From 4a07b15793fc7eee7b6be33430ee424534f7f2a9 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Thu, 20 Mar 2025 18:40:35 -0700 Subject: [PATCH 01/13] Add flash hypothesis to build representation --- spine/build/base.py | 8 + spine/build/manager.py | 41 ++++- spine/data/optical.py | 42 +++++ spine/driver.py | 2 + spine/post/optical/__init__.py | 2 +- spine/post/optical/hypothesis.py | 222 +++++++++++++++++++++++++++ spine/post/optical/opt0_interface.py | 1 + 7 files changed, 312 insertions(+), 6 deletions(-) create mode 100644 spine/post/optical/hypothesis.py create mode 100644 spine/post/optical/opt0_interface.py diff --git a/spine/build/base.py b/spine/build/base.py index 16f2cbfe..13dd4fef 100644 --- a/spine/build/base.py +++ b/spine/build/base.py @@ -80,6 +80,7 @@ def __call__(self, data): if np.isscalar(data['index']): # Single entry to process data[out_key] = self.process(data, mode) + print(f'{out_key} has {len(data[out_key])} entries') else: # Batch of data to process @@ -87,6 +88,7 @@ def __call__(self, data): for entry in range(len(data['index'])): const_list.append(self.process(data, mode, entry)) data[out_key] = const_list + print(f'{out_key} has {len(data[out_key])} entries') def process(self, data, mode, entry=None): """Build representations for a single entry. @@ -100,14 +102,20 @@ def process(self, data, mode, entry=None): entry : int, optional Entry to process """ + print(f'Processing {self.name}s for {mode} mode') + print(f'Data keys: {list(data.keys())}') + # Dispatch to the appropriate function key = f'{mode}_{self.name}s' if key in data: func = f'load_{mode}' + print(f'Loading {self.name}s for {mode} mode') else: func = f'build_{mode}' + print(f'Building {self.name}s for {mode} mode') result = self.construct(func, data, entry) + print(f'New data keys: {list(data.keys())}') # When loading, check that the units are as expected if 'load' in func: diff --git a/spine/build/manager.py b/spine/build/manager.py index c5cebc08..ba387bac 100644 --- a/spine/build/manager.py +++ b/spine/build/manager.py @@ -10,7 +10,7 @@ from .fragment import FragmentBuilder from .particle import ParticleBuilder from .interaction import InteractionBuilder - +from .fill_hypothesis import FlashHypothesisBuilder class BuildManager: """Manager which constructs data representations based on the chain output. @@ -40,11 +40,14 @@ class BuildManager: ('particles', ('particles',)), ('neutrinos', ('neutrinos',)), ('flashes', ('flashes',)), - ('crthits', ('crthits',)) + ('crthits', ('crthits',)), + ('flash_hypos', ('flash_hypos',)), ) - def __init__(self, fragments, particles, interactions, - mode='both', units='cm', sources=None): + def __init__(self, fragments, particles, interactions, flash_hypo, volume='tpc', + mode='both', units='cm', sources=None,ref_volume_id=None, + detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', + truth_dep_mode='depositions',cfg=None ): """Initializes the build manager. Parameters @@ -55,12 +58,33 @@ def __init__(self, fragments, particles, interactions, Build/load RecoParticle/TruthParticle objects interactions : bool Build/load RecoInteraction/TruthInteraction objects + flash_hypo : bool + Build/load RecoFlashHypothesis/TruthFlashHypothesis objects + volume : str, default 'tpc' + Optical volume to build/load flash hypotheses for mode : str, default 'both' Whether to construct reconstructed objects, true objects or both sources : Dict[str, str], optional Dictionary which maps the necessary data products onto a name in the input/output dictionary of the reconstruction chain. + ref_volume_id : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + detector : str, optional + Detector to get the geometry from + parent_path : str, optional + Path to the parent directory of the main analysis configuration. + geometry_file : str, optional + Path to a `.yaml` geometry file to load the geometry from + truth_point_mode : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + truth_dep_mode : str, optional + Detector to get the geometry from + cfg : str, optional + Flash matching configuration file path """ + print(f'cfg: {cfg}') # Check on the mode, store it assert mode in self._run_modes, ( f"Run mode not recognized: {mode}. Must be one {self._run_modes}") @@ -96,6 +120,8 @@ def __init__(self, fragments, particles, interactions, "Interactions are built from particles. If `interactions` " "is True, so must `particles` be.") self.builders['interaction'] = InteractionBuilder(mode, units) + if flash_hypo: + self.builders['flash_hypo'] = FlashHypothesisBuilder(mode, units,cfg,volume, ref_volume_id, detector, parent_path, geometry_file, truth_point_mode, truth_dep_mode) assert len(self.builders), ( "Do not call the builder unless it does anything.") @@ -133,6 +159,10 @@ def __call__(self, data): # Build representations builder(data) + # TODO:Can't generate match pairs for flash hypotheses + if name == 'flash_hypo': + continue + # Generate match pairs from stored matches if load and self.mode in ['both', 'all']: if np.isscalar(data['index']): @@ -145,6 +175,7 @@ def __call__(self, data): match_dict[key].append(val) data.update(**match_dict) + print(f'Data keys: {list(data.keys())}') def build_sources(self, data, entry=None): """Construct the reference coordinate and value tensors used by @@ -229,7 +260,7 @@ def build_sources(self, data, entry=None): for obj in sources[key]: if obj.units != self.units: obj.to_cm(meta) - + print(f'Update keys: {list(update.keys())}') return update @staticmethod diff --git a/spine/data/optical.py b/spine/data/optical.py index 0515b552..f42942de 100644 --- a/spine/data/optical.py +++ b/spine/data/optical.py @@ -20,6 +20,8 @@ class Flash(PosDataBase): ---------- id : int Index of the flash in the list + interaction_id : int + Index of the interaction in the list volume_id : int Index of the optical volume in which the flahs was recorded time : float @@ -48,6 +50,7 @@ class Flash(PosDataBase): Units in which the position coordinates are expressed """ id: int = -1 + interaction_id: int = -1 volume_id: int = -1 frame: int = -1 in_beam_frame: bool = False @@ -108,3 +111,42 @@ def from_larcv(cls, flash): time_abs=flash.absTime(), time_width=flash.timeWidth(), total_pe=flash.TotalPE(), pe_per_ch=pe_per_ch, center=center, width=width) + + @classmethod + def from_hypothesis(cls, flash, interaction_id, id): + """Builds and returns a Flash object from a flashmatch::Flash_t object. + From the hypothesis flash. + + Parameters + ---------- + flash : flashmatch::Flash_t + Flash object + interaction_id : int + Interaction ID to make the flash + id : int + ID of the flash + + Returns + ------- + Flash + Flash object + """ + # Get the number of PEs per optical channel + pe_per_ch = np.array(flash.pe_v, dtype=np.float32) + + # Get the center and width of the flash + center = np.array([flash.x, flash.y, flash.z]) + width = np.array([flash.x_err, flash.y_err, flash.z_err]) + + #Get the volume ID + volume_id = -1 + for attr in ('tpc', 'volume_id'): + if hasattr(flash, attr): + volume_id = getattr(flash, attr)() + + # Create the Flash object + return cls(id=id, interaction_id=interaction_id, volume_id=volume_id, + time=flash.time, + time_width=flash.time_width, + total_pe=flash.TotalPE(), pe_per_ch=pe_per_ch, + center=center, width=width) \ No newline at end of file diff --git a/spine/driver.py b/spine/driver.py index 10a06cf3..54661f3a 100644 --- a/spine/driver.py +++ b/spine/driver.py @@ -562,6 +562,8 @@ def run(self): entry = iteration if self.loader is None else None data = self.process(entry=entry, iteration=iteration) + print(f'(Driver) Data keys: {list(data.keys())}') + # Log the output self.log(data, tstamp, iteration, epoch) diff --git a/spine/post/optical/__init__.py b/spine/post/optical/__init__.py index 5818b57a..03a8be70 100644 --- a/spine/post/optical/__init__.py +++ b/spine/post/optical/__init__.py @@ -1 +1 @@ -from .flash_matching import * +from .flash_matching import * \ No newline at end of file diff --git a/spine/post/optical/hypothesis.py b/spine/post/optical/hypothesis.py new file mode 100644 index 00000000..ac6ecfa0 --- /dev/null +++ b/spine/post/optical/hypothesis.py @@ -0,0 +1,222 @@ +"""Module for storing flash hypotheses into flash objects.""" + +#TODO: Make base class between likelihood and this + +import os +import sys +import numpy as np +import re +from spine.data.optical import Flash + +class Hypothesis: + """Interface class between flash hypothesis and OpT0Finder.""" + + def __init__(self, cfg, detector, parent_path=None,scaling=1., alpha=0.21, + recombination_mip=0.65, legacy=False): + """Initialize the flash hypothesis algorithm. + + Parameters + ---------- + cfg : str + Flash matching configuration file path + detector : str, optional + Detector to get the geometry from + parent_path : str, optional + Path to the parent configuration file (allows for relative paths) + scaling : Union[float, str], default 1. + Global scaling factor for the depositions (can be an expression) + alpha : float, default 0.21 + Number of excitons (Ar*) divided by number of electron-ion pairs (e-,Ar+) + recombination_mip : float, default 0.65 + Recombination factor for MIP-like particles in LAr + legacy : bool, default False + Use the legacy OpT0Finder function(s). TODO: remove when dropping legacy + """ + # Initialize the flash manager (OpT0Finder wrapper) + self.initialize_backend(cfg, detector, parent_path) + + # Get the external parameters + self.scaling = scaling + if isinstance(self.scaling, str): + self.scaling = eval(self.scaling) + self.alpha = alpha + if isinstance(self.alpha, str): + self.alpha = eval(self.alpha) + self.recombination_mip = recombination_mip + if isinstance(self.recombination_mip, str): + self.recombination_mip = eval(self.recombination_mip) + self.legacy = legacy + + #Initialize hypotheses + self.hypothesis_v = None + + def initialize_backend(self, cfg, detector, parent_path): + """Initialize the flash manager (OpT0Finder wrapper). + + Expects that the environment variable `FMATCH_BASEDIR` is set. + You can either set it by hand (to the path where one can find + OpT0Finder) or you can source `OpT0Finder/configure.sh` if you + are running code from a command line. + + Parameters + ---------- + cfg : str + Path to config for OpT0Finder + detector : str, optional + Detector to get the geometry from + parent_path : str, optional + Path to the parent configuration file (allows for relative paths) + """ + # Add OpT0finder python interface to the python path + basedir = os.getenv('FMATCH_BASEDIR') + assert basedir is not None, ( + "You need to source OpT0Finder's configure.sh or set the " + "FMATCH_BASEDIR environment variable before running flash " + "matching.") + sys.path.append(os.path.join(basedir, 'python')) + + # Add the OpT0Finder library to the dynamic link loader + lib_path = os.path.join(basedir, 'build/lib') + os.environ['LD_LIBRARY_PATH'] = '{}:{}'.format( + lib_path, os.environ['LD_LIBRARY_PATH']) + + # Add the OpT0Finder data directory if it is not yet set + if 'FMATCH_DATADIR' not in os.environ: + os.environ['FMATCH_DATADIR'] = os.path.join(basedir, 'dat') + + # Load up the detector specifications + if detector is None: + det_cfg = os.path.join(basedir, 'dat/detector_specs.cfg') + else: + det_cfg = os.path.join(basedir, f'dat/detector_specs_{detector}.cfg') + + if not os.path.isfile(det_cfg): + raise FileNotFoundError( + f"Cannot file detector specification file: {det_cfg}.") + + from flashmatch import flashmatch + flashmatch.DetectorSpecs.GetME(det_cfg) + + # Fetch and initialize the OpT0Finder configuration + if parent_path is not None and not os.path.isfile(cfg): + cfg = os.path.join(parent_path, cfg) + if not os.path.isfile(cfg): + raise FileNotFoundError( + f"Cannot find flash-matcher config: {cfg}") + + cfg = flashmatch.CreateFMParamsFromFile(cfg) + + # Get FlashMatchManager configuration + fmatch_params = cfg.get['flashmatch::FMParams']('FlashMatchManager') + + # Parse the configuration dump to find the HypothesisAlgo value + config_dump = fmatch_params.dump() + match = re.search(r'HypothesisAlgo\s*:\s*"([^"]+)"', config_dump) + if match: + algo = match.group(1) + else: + raise ValueError(f"Could not find HypothesisAlgo in configuration: {config_dump}") + + print(f'HypothesisAlgo: {algo}') + + # Get the light path algorithm to produce QCluster_t objects + self.light_path = flashmatch.CustomAlgoFactory.get().create( + 'LightPath', 'ToyMCLightPath') + self.light_path.Configure(cfg.get['flashmatch::FMParams']('LightPath')) + + # Create the hypothesis algorithm based on the extracted name + if algo == 'SemiAnalyticalModel': + self.hypothesis = flashmatch.FlashHypothesisFactory.get().create( + 'SemiAnalyticalModel','SemiAnalyticalModel') + elif algo == 'PhotonLibHypothesis': + self.hypothesis = flashmatch.FlashHypothesisFactory.get().create( + 'PhotonLibHypothesis','PhotonLibHypothesis') + else: + raise ValueError(f"Unknown hypothesis algorithm: {algo}") + self.hypothesis.Configure(cfg.get['flashmatch::FMParams'](f'{algo}')) + + def make_qcluster_list(self, interactions): + """Converts a list of SPINE interaction into a list of OpT0Finder + flashmatch.QCluster_t objects. + + Parameters + ---------- + interactions : List[Union[Interaction, TruthInteraction]] + List of TPC interactions + + Returns + ------- + List[QCluster_t] + List of OpT0Finder flashmatch::QCluster_t objects + """ + # Loop over the interacions + from flashmatch import flashmatch + qcluster_v = [] + for idx, inter in enumerate(interactions): + # Produce a mask to remove negative value points (can happen) + valid_mask = np.where(inter.depositions > 0.)[0] + + # Skip interactions with less than 2 points + if len(valid_mask) < 2: + continue + + # Initialize qcluster + qcluster = flashmatch.QCluster_t() + qcluster.idx = idx + qcluster.time = 0 + + # Get the point coordinates + points = inter.points[valid_mask] + + # Get the depositions + depositions = inter.depositions[valid_mask] + + # Fill the trajectory + pytraj = np.hstack([points, depositions[:, None]]) + traj = flashmatch.as_geoalgo_trajectory(pytraj) + if self.legacy: + qcluster += self.light_path.MakeQCluster(traj, self.scaling) + else: + qcluster += self.light_path.MakeQCluster( + traj, self.scaling, self.alpha, self.recombination_mip) + + # Append + qcluster_v.append(qcluster) + + return qcluster_v + + def make_hypothesis_list(self, interactions, id_offset=0): + """ + Runs the hypothesis algorithm on a list of interactions to create + a list of flashmatch::Flash_t objects. + + Parameters + ---------- + interactions : List[Union[Interaction, TruthInteraction]] + List of TPC interactions + id_offset : int, default 0 + Offset to add to the flash ID + """ + # Make the QCluster_t objects + qcluster_v = self.make_qcluster_list(interactions) + + # Initialize the list of flashmatch::Flash_t objects + self.hypothesis_v = [] + + # Run the hypothesis algorithm + for i,int in enumerate(interactions): + # Make the QCluster_t object + qcluster = qcluster_v[i] + + # Run the hypothesis algorithm + flash = self.hypothesis.GetEstimate(qcluster) + print(type(flash)) + + # Create a new Flash object + flash = Flash.from_hypothesis(flash, int.id, i + id_offset) + + # Append + self.hypothesis_v.append(flash) + + return self.hypothesis_v + diff --git a/spine/post/optical/opt0_interface.py b/spine/post/optical/opt0_interface.py new file mode 100644 index 00000000..5579be31 --- /dev/null +++ b/spine/post/optical/opt0_interface.py @@ -0,0 +1 @@ +#TODO: Make this the base class for the likelihood and hypothesis classes \ No newline at end of file From 2a690626c02d49cc8d24b58e8139a90247f97750 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Thu, 20 Mar 2025 18:51:29 -0700 Subject: [PATCH 02/13] Add flash hypothesis to build representation --- spine/build/fill_hypothesis.py | 456 +++++++++++++++++++++++++++++++++ 1 file changed, 456 insertions(+) create mode 100644 spine/build/fill_hypothesis.py diff --git a/spine/build/fill_hypothesis.py b/spine/build/fill_hypothesis.py new file mode 100644 index 00000000..707d8c32 --- /dev/null +++ b/spine/build/fill_hypothesis.py @@ -0,0 +1,456 @@ +"""Builder in charge of creating flash hypothesis objects.""" + +import numpy as np + +from spine.build.base import BuilderBase +from spine.utils.geo import Geometry +from spine.post.optical.hypothesis import Hypothesis +from spine.data.optical import Flash +from spine.data.out.base import OutBase + +__all__ = ['FlashHypothesisBuilder'] + +class FlashHypothesisBuilder(BuilderBase): + """Builds flash hypothesis objects from interaction data.""" + + # Builder name (used to create keys like 'reco_flash_hypos') + name = 'flash_hypo' + + # Types of objects constructed by the builder + _reco_type = Flash + _truth_type = Flash + + # Override required keys for building + _build_reco_keys = ( + ('reco_interactions', True), + ) + _build_truth_keys = ( + ('truth_interactions', True), + ) + + # Necessary/optional data products to load a reconstructed object + _load_reco_keys = ( + ('reco_flash_hypos', True), + ) + + # Necessary/optional data products to load a truth object + _load_truth_keys = ( + ('truth_flash_hypos', True), + ) + + def __init__(self, mode='reco', units='cm', cfg=None, volume=None, ref_volume_id=None, + detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', truth_dep_mode='depositions', + **kwargs): + """Initialize the flash hypothesis builder. + + Parameters + ---------- + cfg : str + Flash matching configuration file path + volume : str + Physical volume corresponding to each flash ('module' or 'tpc') + mode : str, default 'reco' + Whether to construct reconstructed objects, true objects or both + (one of 'reco', 'truth', 'both' or 'all') + units : str, default 'cm' + Units in which the position arguments of the constructed objects + should be expressed (one of 'cm' or 'px') + ref_volume_id : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + detector : str, optional + Detector to get the geometry from + geometry_file : str, optional + Path to a `.yaml` geometry file to load the geometry from + parent_path : str, optional + Path to the parent directory of the main analysis configuration. + This allows for the use of relative paths in the post-processors. + """ + # Initialize the parent class + super().__init__(mode,units) + + # Initialize the flash hypothesis builder if building the representation + if cfg is not None: #TODO: Implement a way to initialize fig only when building the representation + self.initialize(cfg, volume, mode, units, ref_volume_id, detector, parent_path, geometry_file,truth_point_mode, truth_dep_mode, **kwargs) + + def initialize(self, cfg=None, volume=None, mode='reco', units='cm', ref_volume_id=None, + detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', truth_dep_mode='depositions', + **kwargs): + """Initialize the flash hypothesis builder. Use this only for building the represenation. + + Parameters + ---------- + cfg : str + Flash matching configuration file path + volume : str + Physical volume corresponding to each flash ('module' or 'tpc') + mode : str, default 'reco' + Whether to construct reconstructed objects, true objects or both + (one of 'reco', 'truth', 'both' or 'all') + units : str, default 'cm' + Units in which the position arguments of the constructed objects + should be expressed (one of 'cm' or 'px') + ref_volume_id : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + detector : str, optional + Detector to get the geometry from + geometry_file : str, optional + Path to a `.yaml` geometry file to load the geometry from + parent_path : str, optional + Path to the parent directory of the main analysis configuration. + This allows for the use of relative paths in the post-processors. + truth_point_mode : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + truth_dep_mode : str, optional + Detector to get the geometry from + cfg : str, optional + Flash matching configuration file path + """ + #Store point mode to use for truth objects + self.truth_point_mode = truth_point_mode + self.truth_dep_mode = truth_dep_mode + self.truth_source_mode = truth_point_mode.replace('points', 'sources') + + # Initialize the detector geometry + self.geo = Geometry(detector, geometry_file) + + # Get the volume within which each flash is confined + assert volume in ('tpc', 'module'), ( + "The `volume` must be one of 'tpc' or 'module'.") + self.volume = volume + self.ref_volume_id = ref_volume_id + + # Initialize the hypothesis algorithm + self.hypothesis = Hypothesis(cfg,detector=detector, parent_path=parent_path, **kwargs) + print('Configuring FlashHypothesisBuilder') + + def build_reco(self, data): + """Build flash hypothesis objects from reconstructed interactions. + + Parameters + ---------- + data : dict + Dictionary containing interactions data + + Returns + ------- + list + List of flash hypothesis objects + """ + return self._build_hypotheses( + data['reco_interactions'] + ) + + def build_truth(self, data): + """Build flash hypothesis objects from truth interactions. + + Parameters + ---------- + data : dict + Dictionary containing truth interactions data + + Returns + ------- + list + List of flash hypothesis objects + """ + return self._build_hypotheses( + data['truth_interactions'] + ) + + def load_reco(self, data): + """Load pre-computed reconstructed flash hypotheses. + + Parameters + ---------- + data : dict + Dictionary containing pre-computed reco hypotheses + + Returns + ------- + list + List of flash hypothesis objects + """ + return data['reco_flash_hypos'] + + def load_truth(self, data): + """Load pre-computed truth flash hypotheses. + + Parameters + ---------- + data : dict + Dictionary containing pre-computed truth hypotheses + + Returns + ------- + list + List of flash hypothesis objects + """ + return data['truth_flash_hypos'] + + def get_sources(self, obj): + """Get a certain pre-defined sources attribute of an object. + + The :class:`TruthFragment`, :class:`TruthParticle` and + :class:`TruthInteraction` objects sources are obtained using the + `truth_source_mode` attribute of the class. + + Parameters + ---------- + obj : Union[FragmentBase, ParticleBase, InteractionBase] + Fragment, Particle or Interaction object + + Results + ------- + np.ndarray + (N, 2) Object sources + """ + if not obj.is_truth: + return obj.sources + else: + return getattr(obj, self.truth_source_mode) + + def get_points(self, obj): + """Get a certain pre-defined point attribute of an object. + + The :class:`TruthFragment`, :class:`TruthParticle` and + :class:`TruthInteraction` objects points are obtained using the + `truth_point_mode` attribute of the class. + + Parameters + ---------- + obj : Union[FragmentBase, ParticleBase, InteractionBase] + Fragment, Particle or Interaction object + + Results + ------- + np.ndarray + (N, 3) Point coordinates + """ + if not obj.is_truth: + return obj.points + else: + return getattr(obj, self.truth_point_mode) + def get_depositions(self, obj): + """Get a certain pre-defined deposition attribute of an object. + + The :class:`TruthFragment`, :class:`TruthParticle` and + :class:`TruthInteraction` objects points are obtained using the + `truth_dep_mode` attribute of the class. + + Parameters + ---------- + obj : Union[FragmentBase, ParticleBase, InteractionBase] + Fragment, Particle or Interaction object + + Results + ------- + np.ndarray + (N) Depositions + """ + if not obj.is_truth: + return obj.depositions + else: + return getattr(obj, self.truth_dep_mode) + + + @property + def source_modes(self): + """Dictionary which makes the correspondance between the name of a true + object source attribute with the underlying source tensor it points to. + + Returns + ------- + Dict[str, str] + Dictionary of (attribute, key) mapping for point sources + """ + return dict(self._source_modes) + + def _build_hypotheses(self, interactions): + """Build flash hypotheses from interaction data. + + Parameters + ---------- + interactions : list + List of interaction objects + + Returns + ------- + list + List of flash hypothesis objects + """ + + + volume_ids = [0, 1] # TODO: Assuming 2 modules + hypotheses = [] + id_offset = 0 + + # Loop over optical volumes, make hypotheses for each + for volume_id in volume_ids: + #Crop interactions to only include depositions in the optical volume + interactions_v = [] + for inter in interactions: + #Fetch the points in the current optical volume + sources = self.get_sources(inter) + + # Filter points by volume + if self.volume == 'module': + index = self.geo.get_volume_index(sources, volume_id) + elif self.volume == 'tpc': + num_cpm = self.geo.tpc.num_chambers_per_module + module_id, tpc_id = volume_id//num_cpm, volume_id%num_cpm + index = self.geo.get_volume_index(sources, module_id, tpc_id) + + # If there are no points in this volume, skip + if len(index) == 0: + continue + + # Fetch points and depositions + points = self.get_points(inter)[index] + depositions = self.get_depositions(inter)[index] + if self.ref_volume_id is not None: + # If the reference volume is specified, shift positions + points = self.geo.translate( + points, volume_id, self.ref_volume_id) + + # Create an interaction object for this volume + inter = OutBase( + id=inter.id, points=points, depositions=depositions) + interactions_v.append(inter) + + # Make the hypothesis + _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) + hypotheses.extend(_hypo_v) + id_offset += len(_hypo_v) + + return hypotheses + + +# """ +# Post-processor in charge of filling the hypothesis into the data product. +# """ + +# from spine.post.base import PostBase +# from spine.utils.geo import Geometry +# from spine.data.out.base import OutBase +# import numpy as np + +# from .hypothesis import Hypothesis + +# __all__ = ['FillFlashHypothesisProcessor'] + +# class FillFlashHypothesisProcessor(PostBase): +# """Fills the hypothesis into the data product.""" + +# # Name of the post-processor (as specified in the configuration) +# name = 'fill_flash_hypothesis' + +# # Alternative allowed names of the post-processor +# aliases = ('fill_hypothesis',) + +# def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, +# geometry_file=None, run_mode='reco', truth_point_mode='points', +# truth_dep_mode='depositions', hypothesis_key='flash_hypo', **kwargs): +# """Initialize the fill hypothesis processor. + +# Parameters +# ---------- +# volume : str +# Physical volume corresponding to each flash ('module' or 'tpc') +# ref_volume_id : str, optional +# If specified, the flash matching expects all interactions/flashes +# to live into a specific optical volume. Must shift everything. +# detector : str, optional +# Detector to get the geometry from +# geometry_file : str, optional +# Path to a `.yaml` geometry file to load the geometry from +# parent_path : str, optional +# Path to the parent directory of the main analysis configuration. +# This allows for the use of relative paths in the post-processors. +# hypothesis_key : str, default 'flash_hypo' +# Key to use for the hypothesis data product +# """ +# # Initialize the parent class +# super().__init__( +# 'interaction', run_mode, truth_point_mode, truth_dep_mode, +# parent_path=parent_path) + +# # Initialize the hypothesis key +# self.hypothesis_key = hypothesis_key + +# # Initialize the detector geometry +# self.geo = Geometry(detector, geometry_file) + +# # Get the volume within which each flash is confined +# assert volume in ('tpc', 'module'), ( +# "The `volume` must be one of 'tpc' or 'module'.") +# self.volume = volume +# self.ref_volume_id = ref_volume_id + +# # Initialize the hypothesis algorithm +# self.hypothesis = Hypothesis(detector=detector, parent_path=self.parent_path, **kwargs) + +# def process(self, data): +# """Fills the hypothesis into the data product. + +# Parameters +# ---------- +# data : dict +# Data product to fill the hypothesis into +# """ + +# #Loop over optical volumes, make the hypotheses in each +# for k in self.interaction_keys: +# # Fetch interactions, nothing to do if there are not any +# interactions = data[k] +# if not len(interactions): +# continue + +# # Make sure the interaction coordinates are expressed in cm +# self.check_units(interactions[0]) + +# # Loop over the optical volumes +# #TODO: Use the specific detector or geometry file to get the list of optical volumes +# id_offset = 0 +# hypothesis_v = [] +# for volume_id in [0,1]: +# # Crop interactions to only include depositions in the optical volume +# interactions_v = [] +# for inter in interactions: +# # Fetch the points in the current optical volume +# sources = self.get_sources(inter) +# if self.volume == 'module': +# index = self.geo.get_volume_index(sources, volume_id) + +# elif self.volume == 'tpc': +# num_cpm = self.geo.tpc.num_chambers_per_module +# module_id, tpc_id = volume_id//num_cpm, volume_id%num_cpm +# index = self.geo.get_volume_index(sources, module_id, tpc_id) + +# # If there are no points in this volume, proceed +# if len(index) == 0: +# continue + +# # Fetch points and depositions +# points = self.get_points(inter)[index] +# depositions = self.get_depositions(inter)[index] +# if self.ref_volume_id is not None: +# # If the reference volume is specified, shift positions +# points = self.geo.translate( +# points, volume_id, self.ref_volume_id) + +# # Create an interaction which holds positions/depositions +# inter_v = OutBase( +# id=inter.id, points=points, depositions=depositions) +# interactions_v.append(inter_v) + +# # Make the hypothesis +# _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) +# hypothesis_v.extend(_hypo_v) +# id_offset += len(_hypo_v) #increment the offset for the next volume + +# # Fill the hypothesis into the data product +# data[self.hypothesis_key] = hypothesis_v +# return data \ No newline at end of file From b57543e6eb35c44575479d3d384cab3ce536e5ee Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Thu, 20 Mar 2025 18:53:54 -0700 Subject: [PATCH 03/13] Add flash hypothesis to build representation --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 6aec5bca..18f8a87f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,7 @@ mlreco/models/cluster_cnn/deprecated # Distribution / packaging .Python -build/ +#build/ #This causes git to ignore spine/build develop-eggs/ dist/ downloads/ From 928acd8c8e65aebca73169c239547d3ede4ac826 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Thu, 20 Mar 2025 18:55:36 -0700 Subject: [PATCH 04/13] Add flash hypothesis to build representation --- spine/vis/out.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spine/vis/out.py b/spine/vis/out.py index 3ac60e31..2f9141bc 100644 --- a/spine/vis/out.py +++ b/spine/vis/out.py @@ -155,7 +155,7 @@ def get_index(self, obj): return obj.index else: return getattr(obj, self.truth_index_mode) - + #TODO: Add a function to draw flash hypotheses def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, draw_end_points=False, draw_vertices=False, draw_flashes=False, synchronize=False, titles=None, split_traces=False, From 0ffea1adcf27966087837c4d99413479270823fa Mon Sep 17 00:00:00 2001 From: Francois Drielsma Date: Sun, 2 Mar 2025 20:30:12 -0800 Subject: [PATCH 05/13] Harmonize indexing in the data objects (orig for LArCV, no prefix for others) --- spine/build/fill_hypothesis.py | 456 --------------------------------- spine/build/manager.py | 1 - 2 files changed, 457 deletions(-) delete mode 100644 spine/build/fill_hypothesis.py diff --git a/spine/build/fill_hypothesis.py b/spine/build/fill_hypothesis.py deleted file mode 100644 index 707d8c32..00000000 --- a/spine/build/fill_hypothesis.py +++ /dev/null @@ -1,456 +0,0 @@ -"""Builder in charge of creating flash hypothesis objects.""" - -import numpy as np - -from spine.build.base import BuilderBase -from spine.utils.geo import Geometry -from spine.post.optical.hypothesis import Hypothesis -from spine.data.optical import Flash -from spine.data.out.base import OutBase - -__all__ = ['FlashHypothesisBuilder'] - -class FlashHypothesisBuilder(BuilderBase): - """Builds flash hypothesis objects from interaction data.""" - - # Builder name (used to create keys like 'reco_flash_hypos') - name = 'flash_hypo' - - # Types of objects constructed by the builder - _reco_type = Flash - _truth_type = Flash - - # Override required keys for building - _build_reco_keys = ( - ('reco_interactions', True), - ) - _build_truth_keys = ( - ('truth_interactions', True), - ) - - # Necessary/optional data products to load a reconstructed object - _load_reco_keys = ( - ('reco_flash_hypos', True), - ) - - # Necessary/optional data products to load a truth object - _load_truth_keys = ( - ('truth_flash_hypos', True), - ) - - def __init__(self, mode='reco', units='cm', cfg=None, volume=None, ref_volume_id=None, - detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', truth_dep_mode='depositions', - **kwargs): - """Initialize the flash hypothesis builder. - - Parameters - ---------- - cfg : str - Flash matching configuration file path - volume : str - Physical volume corresponding to each flash ('module' or 'tpc') - mode : str, default 'reco' - Whether to construct reconstructed objects, true objects or both - (one of 'reco', 'truth', 'both' or 'all') - units : str, default 'cm' - Units in which the position arguments of the constructed objects - should be expressed (one of 'cm' or 'px') - ref_volume_id : str, optional - If specified, the flash matching expects all interactions/flashes - to live into a specific optical volume. Must shift everything. - detector : str, optional - Detector to get the geometry from - geometry_file : str, optional - Path to a `.yaml` geometry file to load the geometry from - parent_path : str, optional - Path to the parent directory of the main analysis configuration. - This allows for the use of relative paths in the post-processors. - """ - # Initialize the parent class - super().__init__(mode,units) - - # Initialize the flash hypothesis builder if building the representation - if cfg is not None: #TODO: Implement a way to initialize fig only when building the representation - self.initialize(cfg, volume, mode, units, ref_volume_id, detector, parent_path, geometry_file,truth_point_mode, truth_dep_mode, **kwargs) - - def initialize(self, cfg=None, volume=None, mode='reco', units='cm', ref_volume_id=None, - detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', truth_dep_mode='depositions', - **kwargs): - """Initialize the flash hypothesis builder. Use this only for building the represenation. - - Parameters - ---------- - cfg : str - Flash matching configuration file path - volume : str - Physical volume corresponding to each flash ('module' or 'tpc') - mode : str, default 'reco' - Whether to construct reconstructed objects, true objects or both - (one of 'reco', 'truth', 'both' or 'all') - units : str, default 'cm' - Units in which the position arguments of the constructed objects - should be expressed (one of 'cm' or 'px') - ref_volume_id : str, optional - If specified, the flash matching expects all interactions/flashes - to live into a specific optical volume. Must shift everything. - detector : str, optional - Detector to get the geometry from - geometry_file : str, optional - Path to a `.yaml` geometry file to load the geometry from - parent_path : str, optional - Path to the parent directory of the main analysis configuration. - This allows for the use of relative paths in the post-processors. - truth_point_mode : str, optional - If specified, the flash matching expects all interactions/flashes - to live into a specific optical volume. Must shift everything. - truth_dep_mode : str, optional - Detector to get the geometry from - cfg : str, optional - Flash matching configuration file path - """ - #Store point mode to use for truth objects - self.truth_point_mode = truth_point_mode - self.truth_dep_mode = truth_dep_mode - self.truth_source_mode = truth_point_mode.replace('points', 'sources') - - # Initialize the detector geometry - self.geo = Geometry(detector, geometry_file) - - # Get the volume within which each flash is confined - assert volume in ('tpc', 'module'), ( - "The `volume` must be one of 'tpc' or 'module'.") - self.volume = volume - self.ref_volume_id = ref_volume_id - - # Initialize the hypothesis algorithm - self.hypothesis = Hypothesis(cfg,detector=detector, parent_path=parent_path, **kwargs) - print('Configuring FlashHypothesisBuilder') - - def build_reco(self, data): - """Build flash hypothesis objects from reconstructed interactions. - - Parameters - ---------- - data : dict - Dictionary containing interactions data - - Returns - ------- - list - List of flash hypothesis objects - """ - return self._build_hypotheses( - data['reco_interactions'] - ) - - def build_truth(self, data): - """Build flash hypothesis objects from truth interactions. - - Parameters - ---------- - data : dict - Dictionary containing truth interactions data - - Returns - ------- - list - List of flash hypothesis objects - """ - return self._build_hypotheses( - data['truth_interactions'] - ) - - def load_reco(self, data): - """Load pre-computed reconstructed flash hypotheses. - - Parameters - ---------- - data : dict - Dictionary containing pre-computed reco hypotheses - - Returns - ------- - list - List of flash hypothesis objects - """ - return data['reco_flash_hypos'] - - def load_truth(self, data): - """Load pre-computed truth flash hypotheses. - - Parameters - ---------- - data : dict - Dictionary containing pre-computed truth hypotheses - - Returns - ------- - list - List of flash hypothesis objects - """ - return data['truth_flash_hypos'] - - def get_sources(self, obj): - """Get a certain pre-defined sources attribute of an object. - - The :class:`TruthFragment`, :class:`TruthParticle` and - :class:`TruthInteraction` objects sources are obtained using the - `truth_source_mode` attribute of the class. - - Parameters - ---------- - obj : Union[FragmentBase, ParticleBase, InteractionBase] - Fragment, Particle or Interaction object - - Results - ------- - np.ndarray - (N, 2) Object sources - """ - if not obj.is_truth: - return obj.sources - else: - return getattr(obj, self.truth_source_mode) - - def get_points(self, obj): - """Get a certain pre-defined point attribute of an object. - - The :class:`TruthFragment`, :class:`TruthParticle` and - :class:`TruthInteraction` objects points are obtained using the - `truth_point_mode` attribute of the class. - - Parameters - ---------- - obj : Union[FragmentBase, ParticleBase, InteractionBase] - Fragment, Particle or Interaction object - - Results - ------- - np.ndarray - (N, 3) Point coordinates - """ - if not obj.is_truth: - return obj.points - else: - return getattr(obj, self.truth_point_mode) - def get_depositions(self, obj): - """Get a certain pre-defined deposition attribute of an object. - - The :class:`TruthFragment`, :class:`TruthParticle` and - :class:`TruthInteraction` objects points are obtained using the - `truth_dep_mode` attribute of the class. - - Parameters - ---------- - obj : Union[FragmentBase, ParticleBase, InteractionBase] - Fragment, Particle or Interaction object - - Results - ------- - np.ndarray - (N) Depositions - """ - if not obj.is_truth: - return obj.depositions - else: - return getattr(obj, self.truth_dep_mode) - - - @property - def source_modes(self): - """Dictionary which makes the correspondance between the name of a true - object source attribute with the underlying source tensor it points to. - - Returns - ------- - Dict[str, str] - Dictionary of (attribute, key) mapping for point sources - """ - return dict(self._source_modes) - - def _build_hypotheses(self, interactions): - """Build flash hypotheses from interaction data. - - Parameters - ---------- - interactions : list - List of interaction objects - - Returns - ------- - list - List of flash hypothesis objects - """ - - - volume_ids = [0, 1] # TODO: Assuming 2 modules - hypotheses = [] - id_offset = 0 - - # Loop over optical volumes, make hypotheses for each - for volume_id in volume_ids: - #Crop interactions to only include depositions in the optical volume - interactions_v = [] - for inter in interactions: - #Fetch the points in the current optical volume - sources = self.get_sources(inter) - - # Filter points by volume - if self.volume == 'module': - index = self.geo.get_volume_index(sources, volume_id) - elif self.volume == 'tpc': - num_cpm = self.geo.tpc.num_chambers_per_module - module_id, tpc_id = volume_id//num_cpm, volume_id%num_cpm - index = self.geo.get_volume_index(sources, module_id, tpc_id) - - # If there are no points in this volume, skip - if len(index) == 0: - continue - - # Fetch points and depositions - points = self.get_points(inter)[index] - depositions = self.get_depositions(inter)[index] - if self.ref_volume_id is not None: - # If the reference volume is specified, shift positions - points = self.geo.translate( - points, volume_id, self.ref_volume_id) - - # Create an interaction object for this volume - inter = OutBase( - id=inter.id, points=points, depositions=depositions) - interactions_v.append(inter) - - # Make the hypothesis - _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) - hypotheses.extend(_hypo_v) - id_offset += len(_hypo_v) - - return hypotheses - - -# """ -# Post-processor in charge of filling the hypothesis into the data product. -# """ - -# from spine.post.base import PostBase -# from spine.utils.geo import Geometry -# from spine.data.out.base import OutBase -# import numpy as np - -# from .hypothesis import Hypothesis - -# __all__ = ['FillFlashHypothesisProcessor'] - -# class FillFlashHypothesisProcessor(PostBase): -# """Fills the hypothesis into the data product.""" - -# # Name of the post-processor (as specified in the configuration) -# name = 'fill_flash_hypothesis' - -# # Alternative allowed names of the post-processor -# aliases = ('fill_hypothesis',) - -# def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, -# geometry_file=None, run_mode='reco', truth_point_mode='points', -# truth_dep_mode='depositions', hypothesis_key='flash_hypo', **kwargs): -# """Initialize the fill hypothesis processor. - -# Parameters -# ---------- -# volume : str -# Physical volume corresponding to each flash ('module' or 'tpc') -# ref_volume_id : str, optional -# If specified, the flash matching expects all interactions/flashes -# to live into a specific optical volume. Must shift everything. -# detector : str, optional -# Detector to get the geometry from -# geometry_file : str, optional -# Path to a `.yaml` geometry file to load the geometry from -# parent_path : str, optional -# Path to the parent directory of the main analysis configuration. -# This allows for the use of relative paths in the post-processors. -# hypothesis_key : str, default 'flash_hypo' -# Key to use for the hypothesis data product -# """ -# # Initialize the parent class -# super().__init__( -# 'interaction', run_mode, truth_point_mode, truth_dep_mode, -# parent_path=parent_path) - -# # Initialize the hypothesis key -# self.hypothesis_key = hypothesis_key - -# # Initialize the detector geometry -# self.geo = Geometry(detector, geometry_file) - -# # Get the volume within which each flash is confined -# assert volume in ('tpc', 'module'), ( -# "The `volume` must be one of 'tpc' or 'module'.") -# self.volume = volume -# self.ref_volume_id = ref_volume_id - -# # Initialize the hypothesis algorithm -# self.hypothesis = Hypothesis(detector=detector, parent_path=self.parent_path, **kwargs) - -# def process(self, data): -# """Fills the hypothesis into the data product. - -# Parameters -# ---------- -# data : dict -# Data product to fill the hypothesis into -# """ - -# #Loop over optical volumes, make the hypotheses in each -# for k in self.interaction_keys: -# # Fetch interactions, nothing to do if there are not any -# interactions = data[k] -# if not len(interactions): -# continue - -# # Make sure the interaction coordinates are expressed in cm -# self.check_units(interactions[0]) - -# # Loop over the optical volumes -# #TODO: Use the specific detector or geometry file to get the list of optical volumes -# id_offset = 0 -# hypothesis_v = [] -# for volume_id in [0,1]: -# # Crop interactions to only include depositions in the optical volume -# interactions_v = [] -# for inter in interactions: -# # Fetch the points in the current optical volume -# sources = self.get_sources(inter) -# if self.volume == 'module': -# index = self.geo.get_volume_index(sources, volume_id) - -# elif self.volume == 'tpc': -# num_cpm = self.geo.tpc.num_chambers_per_module -# module_id, tpc_id = volume_id//num_cpm, volume_id%num_cpm -# index = self.geo.get_volume_index(sources, module_id, tpc_id) - -# # If there are no points in this volume, proceed -# if len(index) == 0: -# continue - -# # Fetch points and depositions -# points = self.get_points(inter)[index] -# depositions = self.get_depositions(inter)[index] -# if self.ref_volume_id is not None: -# # If the reference volume is specified, shift positions -# points = self.geo.translate( -# points, volume_id, self.ref_volume_id) - -# # Create an interaction which holds positions/depositions -# inter_v = OutBase( -# id=inter.id, points=points, depositions=depositions) -# interactions_v.append(inter_v) - -# # Make the hypothesis -# _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) -# hypothesis_v.extend(_hypo_v) -# id_offset += len(_hypo_v) #increment the offset for the next volume - -# # Fill the hypothesis into the data product -# data[self.hypothesis_key] = hypothesis_v -# return data \ No newline at end of file diff --git a/spine/build/manager.py b/spine/build/manager.py index ba387bac..15ef9c38 100644 --- a/spine/build/manager.py +++ b/spine/build/manager.py @@ -10,7 +10,6 @@ from .fragment import FragmentBuilder from .particle import ParticleBuilder from .interaction import InteractionBuilder -from .fill_hypothesis import FlashHypothesisBuilder class BuildManager: """Manager which constructs data representations based on the chain output. From 15ac1a3037c190bdd30f600dc40871a91f9de9f1 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Fri, 21 Mar 2025 00:08:58 -0700 Subject: [PATCH 06/13] Add flash hypothesis post processor --- spine/build/base.py | 8 -- spine/build/manager.py | 40 +------- spine/post/optical/fill_hypothesis.py | 126 ++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 43 deletions(-) create mode 100644 spine/post/optical/fill_hypothesis.py diff --git a/spine/build/base.py b/spine/build/base.py index 13dd4fef..16f2cbfe 100644 --- a/spine/build/base.py +++ b/spine/build/base.py @@ -80,7 +80,6 @@ def __call__(self, data): if np.isscalar(data['index']): # Single entry to process data[out_key] = self.process(data, mode) - print(f'{out_key} has {len(data[out_key])} entries') else: # Batch of data to process @@ -88,7 +87,6 @@ def __call__(self, data): for entry in range(len(data['index'])): const_list.append(self.process(data, mode, entry)) data[out_key] = const_list - print(f'{out_key} has {len(data[out_key])} entries') def process(self, data, mode, entry=None): """Build representations for a single entry. @@ -102,20 +100,14 @@ def process(self, data, mode, entry=None): entry : int, optional Entry to process """ - print(f'Processing {self.name}s for {mode} mode') - print(f'Data keys: {list(data.keys())}') - # Dispatch to the appropriate function key = f'{mode}_{self.name}s' if key in data: func = f'load_{mode}' - print(f'Loading {self.name}s for {mode} mode') else: func = f'build_{mode}' - print(f'Building {self.name}s for {mode} mode') result = self.construct(func, data, entry) - print(f'New data keys: {list(data.keys())}') # When loading, check that the units are as expected if 'load' in func: diff --git a/spine/build/manager.py b/spine/build/manager.py index 15ef9c38..c5cebc08 100644 --- a/spine/build/manager.py +++ b/spine/build/manager.py @@ -11,6 +11,7 @@ from .particle import ParticleBuilder from .interaction import InteractionBuilder + class BuildManager: """Manager which constructs data representations based on the chain output. @@ -39,14 +40,11 @@ class BuildManager: ('particles', ('particles',)), ('neutrinos', ('neutrinos',)), ('flashes', ('flashes',)), - ('crthits', ('crthits',)), - ('flash_hypos', ('flash_hypos',)), + ('crthits', ('crthits',)) ) - def __init__(self, fragments, particles, interactions, flash_hypo, volume='tpc', - mode='both', units='cm', sources=None,ref_volume_id=None, - detector=None, parent_path=None, geometry_file=None,truth_point_mode='points', - truth_dep_mode='depositions',cfg=None ): + def __init__(self, fragments, particles, interactions, + mode='both', units='cm', sources=None): """Initializes the build manager. Parameters @@ -57,33 +55,12 @@ def __init__(self, fragments, particles, interactions, flash_hypo, volume='tpc', Build/load RecoParticle/TruthParticle objects interactions : bool Build/load RecoInteraction/TruthInteraction objects - flash_hypo : bool - Build/load RecoFlashHypothesis/TruthFlashHypothesis objects - volume : str, default 'tpc' - Optical volume to build/load flash hypotheses for mode : str, default 'both' Whether to construct reconstructed objects, true objects or both sources : Dict[str, str], optional Dictionary which maps the necessary data products onto a name in the input/output dictionary of the reconstruction chain. - ref_volume_id : str, optional - If specified, the flash matching expects all interactions/flashes - to live into a specific optical volume. Must shift everything. - detector : str, optional - Detector to get the geometry from - parent_path : str, optional - Path to the parent directory of the main analysis configuration. - geometry_file : str, optional - Path to a `.yaml` geometry file to load the geometry from - truth_point_mode : str, optional - If specified, the flash matching expects all interactions/flashes - to live into a specific optical volume. Must shift everything. - truth_dep_mode : str, optional - Detector to get the geometry from - cfg : str, optional - Flash matching configuration file path """ - print(f'cfg: {cfg}') # Check on the mode, store it assert mode in self._run_modes, ( f"Run mode not recognized: {mode}. Must be one {self._run_modes}") @@ -119,8 +96,6 @@ def __init__(self, fragments, particles, interactions, flash_hypo, volume='tpc', "Interactions are built from particles. If `interactions` " "is True, so must `particles` be.") self.builders['interaction'] = InteractionBuilder(mode, units) - if flash_hypo: - self.builders['flash_hypo'] = FlashHypothesisBuilder(mode, units,cfg,volume, ref_volume_id, detector, parent_path, geometry_file, truth_point_mode, truth_dep_mode) assert len(self.builders), ( "Do not call the builder unless it does anything.") @@ -158,10 +133,6 @@ def __call__(self, data): # Build representations builder(data) - # TODO:Can't generate match pairs for flash hypotheses - if name == 'flash_hypo': - continue - # Generate match pairs from stored matches if load and self.mode in ['both', 'all']: if np.isscalar(data['index']): @@ -174,7 +145,6 @@ def __call__(self, data): match_dict[key].append(val) data.update(**match_dict) - print(f'Data keys: {list(data.keys())}') def build_sources(self, data, entry=None): """Construct the reference coordinate and value tensors used by @@ -259,7 +229,7 @@ def build_sources(self, data, entry=None): for obj in sources[key]: if obj.units != self.units: obj.to_cm(meta) - print(f'Update keys: {list(update.keys())}') + return update @staticmethod diff --git a/spine/post/optical/fill_hypothesis.py b/spine/post/optical/fill_hypothesis.py new file mode 100644 index 00000000..4174a1a2 --- /dev/null +++ b/spine/post/optical/fill_hypothesis.py @@ -0,0 +1,126 @@ +""" +Post-processor in charge of filling the hypothesis into the data product. +""" + +from spine.post.base import PostBase +from spine.utils.geo import Geometry +from spine.data.out.base import OutBase +import numpy as np + +from .hypothesis import Hypothesis + +__all__ = ['FillFlashHypothesisProcessor'] + +class FillFlashHypothesisProcessor(PostBase): + """Fills the hypothesis into the data product.""" + + # Name of the post-processor (as specified in the configuration) + name = 'fill_flash_hypothesis' + + # Alternative allowed names of the post-processor + aliases = ('fill_hypothesis',) + + def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, + geometry_file=None, run_mode='reco', truth_point_mode='points', + truth_dep_mode='depositions', hypothesis_key='flash_hypo', **kwargs): + """Initialize the fill hypothesis processor. + + Parameters + ---------- + volume : str + Physical volume corresponding to each flash ('module' or 'tpc') + ref_volume_id : str, optional + If specified, the flash matching expects all interactions/flashes + to live into a specific optical volume. Must shift everything. + detector : str, optional + Detector to get the geometry from + geometry_file : str, optional + Path to a `.yaml` geometry file to load the geometry from + parent_path : str, optional + Path to the parent directory of the main analysis configuration. + This allows for the use of relative paths in the post-processors. + hypothesis_key : str, default 'flash_hypo' + Key to use for the hypothesis data product + """ + # Initialize the parent class + super().__init__( + 'interaction', run_mode, truth_point_mode, truth_dep_mode, + parent_path=parent_path) + + # Initialize the hypothesis key + self.hypothesis_key = hypothesis_key + + # Initialize the detector geometry + self.geo = Geometry(detector, geometry_file) + + # Get the volume within which each flash is confined + assert volume in ('tpc', 'module'), ( + "The `volume` must be one of 'tpc' or 'module'.") + self.volume = volume + self.ref_volume_id = ref_volume_id + + # Initialize the hypothesis algorithm + self.hypothesis = Hypothesis(detector=detector, parent_path=self.parent_path, **kwargs) + + def process(self, data): + """Fills the hypothesis into the data product. + + Parameters + ---------- + data : dict + Data product to fill the hypothesis into + """ + + #Loop over optical volumes, make the hypotheses in each + for k in self.interaction_keys: + # Fetch interactions, nothing to do if there are not any + interactions = data[k] + if not len(interactions): + continue + + # Make sure the interaction coordinates are expressed in cm + self.check_units(interactions[0]) + + # Loop over the optical volumes + #TODO: Use the specific detector or geometry file to get the list of optical volumes + id_offset = 0 + hypothesis_v = [] + for volume_id in [0,1]: + # Crop interactions to only include depositions in the optical volume + interactions_v = [] + for inter in interactions: + # Fetch the points in the current optical volume + sources = self.get_sources(inter) + if self.volume == 'module': + index = self.geo.get_volume_index(sources, volume_id) + + elif self.volume == 'tpc': + num_cpm = self.geo.tpc.num_chambers_per_module + module_id, tpc_id = volume_id//num_cpm, volume_id%num_cpm + index = self.geo.get_volume_index(sources, module_id, tpc_id) + + # If there are no points in this volume, proceed + if len(index) == 0: + continue + + # Fetch points and depositions + points = self.get_points(inter)[index] + depositions = self.get_depositions(inter)[index] + if self.ref_volume_id is not None: + # If the reference volume is specified, shift positions + points = self.geo.translate( + points, volume_id, self.ref_volume_id) + + # Create an interaction which holds positions/depositions + inter_v = OutBase( + id=inter.id, points=points, depositions=depositions) + interactions_v.append(inter_v) + + # Make the hypothesis + _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) + hypothesis_v.extend(_hypo_v) + id_offset += len(_hypo_v) #increment the offset for the next volume + + # Fill the hypothesis into the data product + data[self.hypothesis_key] = hypothesis_v + return data \ No newline at end of file From 7bc806038596c793b2c370c2a9e40ad55e411188 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Wed, 2 Apr 2025 15:02:11 -0700 Subject: [PATCH 07/13] Update visualization tools to display hypotheses --- spine/data/optical.py | 21 ++++-- spine/post/optical/__init__.py | 3 +- spine/post/optical/fill_hypothesis.py | 60 ++++++++++++--- spine/post/optical/flash_matching.py | 4 +- spine/post/optical/hypothesis.py | 7 +- spine/vis/ellipsoid.py | 18 +++-- spine/vis/geo.py | 19 ++++- spine/vis/layout.py | 4 + spine/vis/out.py | 102 ++++++++++++++++++++++++-- 9 files changed, 203 insertions(+), 35 deletions(-) diff --git a/spine/data/optical.py b/spine/data/optical.py index f42942de..125a24f8 100644 --- a/spine/data/optical.py +++ b/spine/data/optical.py @@ -113,7 +113,7 @@ def from_larcv(cls, flash): center=center, width=width) @classmethod - def from_hypothesis(cls, flash, interaction_id, id): + def from_hypothesis(cls, flash, interaction_id, id, volume_id=None,negative_id=True): """Builds and returns a Flash object from a flashmatch::Flash_t object. From the hypothesis flash. @@ -125,6 +125,10 @@ def from_hypothesis(cls, flash, interaction_id, id): Interaction ID to make the flash id : int ID of the flash + volume_id : int, optional + Volume ID to use for the flash, set manually if provided + negative_id : bool, default True + If `True`, use the negative of the id as the flash ID. This is used to identify which hypothesis is matched. If it's positive, it's matched. Returns ------- @@ -139,14 +143,19 @@ def from_hypothesis(cls, flash, interaction_id, id): width = np.array([flash.x_err, flash.y_err, flash.z_err]) #Get the volume ID - volume_id = -1 - for attr in ('tpc', 'volume_id'): - if hasattr(flash, attr): - volume_id = getattr(flash, attr)() + if volume_id is None: + volume_id = -1 + for attr in ('tpc', 'volume_id'): + if hasattr(flash, attr): + volume_id = getattr(flash, attr)() # Create the Flash object + if negative_id: + id = -(id+1) #+1 to avoid negative zero return cls(id=id, interaction_id=interaction_id, volume_id=volume_id, time=flash.time, time_width=flash.time_width, total_pe=flash.TotalPE(), pe_per_ch=pe_per_ch, - center=center, width=width) \ No newline at end of file + center=center, width=width) + def __str__(self): + return f"Flash(id={self.id}, interaction_id={self.interaction_id}, volume_id={self.volume_id}, time={self.time}, time_width={self.time_width}, total_pe={self.total_pe}, center={self.center}, width={self.width})" diff --git a/spine/post/optical/__init__.py b/spine/post/optical/__init__.py index 03a8be70..84c31e58 100644 --- a/spine/post/optical/__init__.py +++ b/spine/post/optical/__init__.py @@ -1 +1,2 @@ -from .flash_matching import * \ No newline at end of file +from .flash_matching import * +from .fill_hypothesis import * \ No newline at end of file diff --git a/spine/post/optical/fill_hypothesis.py b/spine/post/optical/fill_hypothesis.py index 4174a1a2..1637cc39 100644 --- a/spine/post/optical/fill_hypothesis.py +++ b/spine/post/optical/fill_hypothesis.py @@ -15,18 +15,21 @@ class FillFlashHypothesisProcessor(PostBase): """Fills the hypothesis into the data product.""" # Name of the post-processor (as specified in the configuration) - name = 'fill_flash_hypothesis' + name = 'fill_hypothesis' # Alternative allowed names of the post-processor - aliases = ('fill_hypothesis',) + aliases = ('fill_hypo',) - def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, + def __init__(self, flash_key, volume, ref_volume_id=None, detector=None, parent_path=None, geometry_file=None, run_mode='reco', truth_point_mode='points', - truth_dep_mode='depositions', hypothesis_key='flash_hypo', **kwargs): + truth_dep_mode='depositions', hypothesis_key='flash_hypos', **kwargs): """Initialize the fill hypothesis processor. Parameters ---------- + flash_key : str + Flash data product name. In most cases, this is unambiguous, unless + there are multiple types of segregated optical detectors volume : str Physical volume corresponding to each flash ('module' or 'tpc') ref_volume_id : str, optional @@ -48,6 +51,7 @@ def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, parent_path=parent_path) # Initialize the hypothesis key + self.flash_key = flash_key self.hypothesis_key = hypothesis_key # Initialize the detector geometry @@ -61,6 +65,37 @@ def __init__(self, volume, ref_volume_id=None, detector=None, parent_path=None, # Initialize the hypothesis algorithm self.hypothesis = Hypothesis(detector=detector, parent_path=self.parent_path, **kwargs) + + #Assert that we have flashes and interactions + self.update_keys({self.flash_key: True}) + + def match_hypothesis(self, hypothesis_v, flash_info_v): + """Match the hypothesis to the flash. The hypothesis has the interaction ID, + whereas the interaction has the flash ID. So we will match the hypothesis interaction ID to + the interaction that's matched to the flash, then set the flash ID to the hypothesis ID. + + Parameters + ---------- + hypothesis_v : list + List of hypothesis objects + flash_info_v : list + List of tuples of interaction ID, flash IDs, and flash volumes + + Returns + ------- + None + Modifies the hypothesis objects in place + """ + + #Make a dictionary of the flash IDs and volumes + int_id_dict = {ii[0]: (ii[1],ii[2]) for ii in flash_info_v} + + #Modify the hypothesis objects + for hypo in hypothesis_v: + #If the interaction ID is in the dictionary, and the hypothesis volume matches the flash volume, set the flash IDs + if hypo.interaction_id in int_id_dict and int_id_dict[hypo.interaction_id][1] == hypo.volume_id: + hypo.id = int_id_dict[hypo.interaction_id][0] + def process(self, data): """Fills the hypothesis into the data product. @@ -82,12 +117,12 @@ def process(self, data): self.check_units(interactions[0]) # Loop over the optical volumes - #TODO: Use the specific detector or geometry file to get the list of optical volumes id_offset = 0 hypothesis_v = [] - for volume_id in [0,1]: + for volume_id in [0,1]: #TODO: Use the specific detector or geometry file to get the list of optical volumes # Crop interactions to only include depositions in the optical volume interactions_v = [] + flash_info_v = [] for inter in interactions: # Fetch the points in the current optical volume sources = self.get_sources(inter) @@ -115,12 +150,17 @@ def process(self, data): inter_v = OutBase( id=inter.id, points=points, depositions=depositions) interactions_v.append(inter_v) - + for fid,fvol in zip(inter.flash_ids,inter.flash_volume_ids): + if fvol == volume_id: + flash_info_v.append((inter.id,fid,fvol)) #needed for matching # Make the hypothesis - _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset) + print('volume_id', volume_id) + _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset, volume_id) hypothesis_v.extend(_hypo_v) id_offset += len(_hypo_v) #increment the offset for the next volume + + # Match the matched hypothesis to the flash if provided in interactions_v + self.match_hypothesis(hypothesis_v, flash_info_v) # Fill the hypothesis into the data product - data[self.hypothesis_key] = hypothesis_v - return data \ No newline at end of file + return {self.hypothesis_key: hypothesis_v} \ No newline at end of file diff --git a/spine/post/optical/flash_matching.py b/spine/post/optical/flash_matching.py index 9d9e829d..b74120c0 100644 --- a/spine/post/optical/flash_matching.py +++ b/spine/post/optical/flash_matching.py @@ -28,7 +28,7 @@ class FlashMatchProcessor(PostBase): def __init__(self, flash_key, volume, ref_volume_id=None, method='likelihood', detector=None, geometry_file=None, run_mode='reco', truth_point_mode='points', - truth_dep_mode='depositions', parent_path=None, **kwargs): + truth_dep_mode='depositions', parent_path=None,store_matches=False, **kwargs): """Initialize the flash matching algorithm. Parameters @@ -50,6 +50,8 @@ def __init__(self, flash_key, volume, ref_volume_id=None, parent_path : str, optional Path to the parent directory of the main analysis configuration. This allows for the use of relative paths in the post-processors. + store_matches : bool, default False + If True, the matches will be stored in the data product **kwargs : dict Keyword arguments to pass to specific flash matching algorithms """ diff --git a/spine/post/optical/hypothesis.py b/spine/post/optical/hypothesis.py index ac6ecfa0..55eca628 100644 --- a/spine/post/optical/hypothesis.py +++ b/spine/post/optical/hypothesis.py @@ -185,7 +185,7 @@ def make_qcluster_list(self, interactions): return qcluster_v - def make_hypothesis_list(self, interactions, id_offset=0): + def make_hypothesis_list(self, interactions, id_offset=0, volume_id=None): """ Runs the hypothesis algorithm on a list of interactions to create a list of flashmatch::Flash_t objects. @@ -196,6 +196,8 @@ def make_hypothesis_list(self, interactions, id_offset=0): List of TPC interactions id_offset : int, default 0 Offset to add to the flash ID + volume_id : int, optional + Volume ID to use for the hypothesis """ # Make the QCluster_t objects qcluster_v = self.make_qcluster_list(interactions) @@ -210,10 +212,9 @@ def make_hypothesis_list(self, interactions, id_offset=0): # Run the hypothesis algorithm flash = self.hypothesis.GetEstimate(qcluster) - print(type(flash)) # Create a new Flash object - flash = Flash.from_hypothesis(flash, int.id, i + id_offset) + flash = Flash.from_hypothesis(flash, int.id, i+id_offset, volume_id) # Append self.hypothesis_v.append(flash) diff --git a/spine/vis/ellipsoid.py b/spine/vis/ellipsoid.py index 424101e5..29e9171a 100644 --- a/spine/vis/ellipsoid.py +++ b/spine/vis/ellipsoid.py @@ -9,7 +9,7 @@ def ellipsoid_trace(points=None, centroid=None, covmat=None, contour=0.5, num_samples=10, color=None, intensity=None, hovertext=None, - showscale=False, **kwargs): + showscale=False, size_scale=1, **kwargs): """Converts a cloud of points or a covariance matrix into a 3D ellipsoid. This function uses the centroid and the covariance matrix of a cloud of @@ -39,6 +39,8 @@ def ellipsoid_trace(points=None, centroid=None, covmat=None, contour=0.5, Text associated with the box showscale : bool, default False If True, show the colorscale of the :class:`plotly.graph_objs.Mesh3d` + size_scale : float, optional, default 1 + Scale factor for the size of the ellipsoid **kwargs : dict, optional Additional parameters to pass to the underlying :class:`plotly.graph_objs.Mesh3d` object @@ -78,8 +80,7 @@ def ellipsoid_trace(points=None, centroid=None, covmat=None, contour=0.5, "The `contour` parameter should be a probability.") radius = np.sqrt(2*gammaincinv(1.5, contour)) - ell_points = centroid + radius*np.dot(unit_points, rotmat) - + ell_points = centroid + size_scale*radius*np.dot(unit_points, rotmat) # Convert the color provided to a set of intensities, if needed if color is not None and not isinstance(color, str): assert intensity is None, ( @@ -105,7 +106,7 @@ def ellipsoid_trace(points=None, centroid=None, covmat=None, contour=0.5, def ellipsoid_traces(centroids, covmat, color=None, hovertext=None, cmin=None, cmax=None, shared_legend=True, legendgroup=None, - showlegend=True, name=None, **kwargs): + showlegend=True, name=None, size_scale=1, **kwargs): """Function which produces a list of plotly traces of ellipsoids given a list of centroids and one covariance matrix in x, y and z. @@ -131,6 +132,8 @@ def ellipsoid_traces(centroids, covmat, color=None, hovertext=None, cmin=None, Whether to show legends on not name : str, optional Name of the trace(s) + size_scale : List[float], optional + List of scale factors for the size of the ellipsoids **kwargs : dict, optional List of additional arguments to pass to the underlying list of :class:`plotly.graph_objs.Mesh3D` @@ -181,10 +184,15 @@ def ellipsoid_traces(centroids, covmat, color=None, hovertext=None, cmin=None, else: name_i = f'{name} {i}' + if size_scale is not None: + size_scale_i = size_scale[i] + else: + size_scale_i = 1 + # Append list of traces traces.append(ellipsoid_trace( centroid=centroid, covmat=covmat, contour=None, color=col, hovertext=hov, cmin=cmin, cmax=cmax, legendgroup=legendgroup, - showlegend=showlegend, name=name_i, **kwargs)) + showlegend=showlegend, name=name_i, size_scale=size_scale_i, **kwargs)) return traces diff --git a/spine/vis/geo.py b/spine/vis/geo.py index b1475ed6..73614a51 100644 --- a/spine/vis/geo.py +++ b/spine/vis/geo.py @@ -89,7 +89,7 @@ def tpc_traces(self, meta=None, draw_faces=False, shared_legend=True, def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, name='Optical', color='rgba(0,0,255,0.25)', cmin=None, - cmax=None, zero_supress=False, volume_id=None, **kwargs): + cmax=None, zero_supress=False, volume_id=None,size=1, offset=[0,0,0], **kwargs): """Function which produces a list of traces which represent the optical detectors in a 3D event display. @@ -115,6 +115,10 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, volume_id : int, optional Specifies which optical volume to represent. If not specified, all the optical volumes are drawn + size : Union[int, np.ndarray], optional + Size of the optical detectors. Default is 1 (no scaling) + offest : List[float], optional + Offset of the optical detectors [x,y,z] **kwargs : dict, optional List of additional arguments to pass to spine.vis.ellipsoid.ellipsoid_traces or spine.vis.box.box_traces @@ -134,7 +138,7 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, else: positions = self.geo.optical.positions[volume_id] half_dimensions = self.geo.optical.dimensions/2 - + positions += offset*np.sign(positions) # If there is more than one detector shape, fetch shape IDs shape_ids = None if self.geo.optical.shape_ids is not None: @@ -174,14 +178,23 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, if shape_ids is None: pos = positions col = color + sz = size else: index = np.where(np.asarray(shape_ids) == i)[0] pos = positions[index] + #Set the color of the optical detectors if color is not None and not np.isscalar(color): col = color[index] else: col = color + #Set the size of the optical detectors + if not np.isscalar(size): + sz = size[index] + else: + sz = size + + # If zero-supression is requested, only draw the optical detectors # which record a non-zero signal if zero_supress and color is not None and not np.isscalar(color): @@ -211,7 +224,7 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, # Build ellipsoids traces += ellipsoid_traces( pos, covmat, shared_legend=shared_legend, name=name, - color=col, cmin=cmin, cmax=cmax, + color=col, cmin=cmin, cmax=cmax, size_scale=sz, legendgroup=legendgroup, showlegend=showlegend, **kwargs) return traces diff --git a/spine/vis/layout.py b/spine/vis/layout.py index f6069094..b1922401 100644 --- a/spine/vis/layout.py +++ b/spine/vis/layout.py @@ -102,6 +102,10 @@ def layout3d(ranges=None, meta=None, detector=None, titles=None, ranges[:, 0] -= lengths*0.1 ranges[:, 1] += lengths*0.1 + # Add extra padding to the x-axis + ranges[0, 0] -= lengths[0]*0.2 + ranges[0, 1] += lengths[0]*0.2 + # If pixel coordinates are requested, use meta to make the conversion if detector_coords is False: assert meta is not None, ( diff --git a/spine/vis/out.py b/spine/vis/out.py index 2f9141bc..6c066cf1 100644 --- a/spine/vis/out.py +++ b/spine/vis/out.py @@ -159,7 +159,7 @@ def get_index(self, obj): def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, draw_end_points=False, draw_vertices=False, draw_flashes=False, synchronize=False, titles=None, split_traces=False, - matched_flash_only=True): + matched_flash_only=True, draw_flash_hypotheses=False, hypo_interaction_id=None): """Draw the requested object type with the requested mode. Parameters @@ -187,6 +187,10 @@ def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, If `True`, one trace is produced for each object matched_flash_only : bool, default True If `True`, only flashes matched to interactions are drawn + draw_flash_hypotheses : bool, default False + If `True`, draw the flash hypotheses for the given interaction ID + hypo_interaction_id : int, optional + Interaction ID of the hypothesis to draw Returns ------- @@ -232,13 +236,26 @@ def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, # Fetch the flashes, if requested if draw_flashes: + # This means we will scale the PDS size by the number of PEs + if draw_flash_hypotheses: + use_size = True + else: + use_size = False assert 'flashes' in self.data, ( "Must provide the `flashes` objects to draw them.") for prefix in self.prefixes: obj_name = f'{prefix}_interactions' assert obj_name in self.data, ( "Must provide interactions to draw matched flashes.") - traces[prefix] += self._flash_trace(obj_name, matched_flash_only) + traces[prefix] += self._flash_trace(obj_name, matched_flash_only, use_size) + + # Fetch the flash hypotheses, if requested + if draw_flash_hypotheses: + assert 'flash_hypos' in self.data, ( + "Must provide the `flash_hypos` objects to draw them.") + for prefix in self.prefixes: + obj_name = f'flash_hypos' + traces[prefix] += self._hypothesis_trace(obj_name, hypo_interaction_id) # Add the TPC traces, if available if self.geo_drawer is not None: @@ -579,7 +596,7 @@ def _point_trace(self, obj_name, point_attr, **kwargs): return scatter_points( points, hovertext=np.array(hovertext), name=name, **kwargs) - def _flash_trace(self, obj_name, matched_only, **kwargs): + def _flash_trace(self, obj_name, matched_only, use_size, **kwargs): """Draw the cumlative PEs of flashes that have been matched to interactions specified by `obj_name`. @@ -589,6 +606,8 @@ def _flash_trace(self, obj_name, matched_only, **kwargs): Name of the object to draw matched_only : bool If `True`, only flashes matched to interactions are drawn + use_size : bool + If `True`, scale the size of the flashes by the number of PEs. Otherwise, the size is fixed and the color is scaled by the number of PEs. **kwargs : dict, optional List of additional arguments to pass to :func:`optical_traces` @@ -614,17 +633,88 @@ def _flash_trace(self, obj_name, matched_only, **kwargs): flash_ids = np.arange(len(self.data['flashes'])) # Sum values from each flash to build a a global color scale - color = np.zeros(self.geo_drawer.geo.optical.num_detectors) + size = np.zeros(self.geo_drawer.geo.optical.num_detectors) + color = size.copy() opt_det_ids = self.geo_drawer.geo.optical.det_ids for flash_id in flash_ids: flash = self.data['flashes'][flash_id] index = self.geo_drawer.geo.optical.volume_index(flash.volume_id) pe_per_ch = flash.pe_per_ch + time = flash.time + if opt_det_ids is not None: + pe_per_ch = np.bincount(opt_det_ids, weights=pe_per_ch) + if use_size: + size[index] += pe_per_ch + color[index] += pe_per_ch + else: + color[index] += pe_per_ch + + color = np.where(size == 0, 0, color) + #Normalize the size to be between 0.5 and 2 + size = (size - np.min(size))/(np.max(size) - np.min(size)) + size = size*(2 - 0.5) + 0.5 + + #If we are using size for PE, we need to mask out the flashes with no PE + #if use_size: + + # Return the set of optical detectors with a color scale + return self.geo_drawer.optical_traces( + meta=self.meta, color=color, size=size, zero_supress=True, + colorscale='Inferno', name=name, opacity=0.5) + + def _hypothesis_trace(self, obj_name, interaction_id, **kwargs): + """Draw the hypothesis object. + + Parameters + ---------- + obj_name : str + Name of the object to draw + interaction_id : int + Interaction ID of the hypothesis to draw + **kwargs : dict, optional + List of additional arguments to pass to :func:`optical_traces` + + Returns + ------- + list + List of optical detector traces + """ + # If there was no geometry provided by the user, nothing to do here + assert self.geo_drawer is not None, ( + "Cannot draw optical detectors without geometry information.") + assert interaction_id in [hypo.interaction_id for hypo in self.data['flash_hypos']], ( + f"Interaction ID {interaction_id} not found in flash hypotheses. Available IDs: {np.unique([int(hypo.interaction_id) for hypo in self.data['flash_hypos']])}.") + + # Define the name of the trace + name = ' '.join(obj_name.split('_')).capitalize()[:-1] + ' hypothesis' + + # Find the list of flash IDs to draw that match the interaction ID + flash_ids = [hypo.id for hypo in self.data['flash_hypos'] if hypo.interaction_id == interaction_id] + print(f'hypo flash ids: {flash_ids}') + + # Sum values from each flash to build a a global color scale + size = np.zeros(self.geo_drawer.geo.optical.num_detectors) + color = size.copy() + opt_det_ids = self.geo_drawer.geo.optical.det_ids + + for flash_id in flash_ids: + flash = self.data['flash_hypos'][flash_id] + index = self.geo_drawer.geo.optical.volume_index(flash.volume_id) + pe_per_ch = flash.pe_per_ch + time = flash.time if opt_det_ids is not None: pe_per_ch = np.bincount(opt_det_ids, weights=pe_per_ch) + size[index] += pe_per_ch color[index] += pe_per_ch + color = np.where(size > 0, color, 0) + #Normalize the size to be between 0.5 and 2 + size = (size - np.min(size))/(np.max(size) - np.min(size)) + size = size*(2 - 0.5) + 0.5 + + print('draw_flash_hypotheses') # Return the set of optical detectors with a color scale return self.geo_drawer.optical_traces( - meta=self.meta, color=color, zero_supress=True, - colorscale='Inferno', name=name) + meta=self.meta, color=color, size=size, zero_supress=True, + colorscale='Inferno', name=name, offset=[40,0,0], opacity=0.5) + From b6b151d5e11934ee616613e491c6ed56a7974ef1 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Tue, 8 Apr 2025 15:43:34 -0700 Subject: [PATCH 08/13] Added interface class between likelihood and hypothesis generation. Updated visualization to support size scaling for hypothesis and flashes if necessary --- spine/driver.py | 2 - spine/post/optical/fill_hypothesis.py | 5 +- spine/post/optical/flash_matching.py | 4 +- spine/post/optical/hypothesis.py | 239 ++++++++++---------------- spine/post/optical/likelihood.py | 188 ++++++-------------- spine/post/optical/opt0_interface.py | 190 +++++++++++++++++++- spine/vis/geo.py | 4 +- spine/vis/out.py | 77 +++++---- 8 files changed, 381 insertions(+), 328 deletions(-) diff --git a/spine/driver.py b/spine/driver.py index 54661f3a..10a06cf3 100644 --- a/spine/driver.py +++ b/spine/driver.py @@ -562,8 +562,6 @@ def run(self): entry = iteration if self.loader is None else None data = self.process(entry=entry, iteration=iteration) - print(f'(Driver) Data keys: {list(data.keys())}') - # Log the output self.log(data, tstamp, iteration, epoch) diff --git a/spine/post/optical/fill_hypothesis.py b/spine/post/optical/fill_hypothesis.py index 1637cc39..60dd638d 100644 --- a/spine/post/optical/fill_hypothesis.py +++ b/spine/post/optical/fill_hypothesis.py @@ -106,6 +106,8 @@ def process(self, data): Data product to fill the hypothesis into """ + volume_ids = np.asarray([f.volume_id for f in data[self.flash_key]]) + #Loop over optical volumes, make the hypotheses in each for k in self.interaction_keys: # Fetch interactions, nothing to do if there are not any @@ -119,7 +121,7 @@ def process(self, data): # Loop over the optical volumes id_offset = 0 hypothesis_v = [] - for volume_id in [0,1]: #TODO: Use the specific detector or geometry file to get the list of optical volumes + for volume_id in np.unique(volume_ids): # Crop interactions to only include depositions in the optical volume interactions_v = [] flash_info_v = [] @@ -154,7 +156,6 @@ def process(self, data): if fvol == volume_id: flash_info_v.append((inter.id,fid,fvol)) #needed for matching # Make the hypothesis - print('volume_id', volume_id) _hypo_v = self.hypothesis.make_hypothesis_list(interactions_v, id_offset, volume_id) hypothesis_v.extend(_hypo_v) id_offset += len(_hypo_v) #increment the offset for the next volume diff --git a/spine/post/optical/flash_matching.py b/spine/post/optical/flash_matching.py index b74120c0..9d9e829d 100644 --- a/spine/post/optical/flash_matching.py +++ b/spine/post/optical/flash_matching.py @@ -28,7 +28,7 @@ class FlashMatchProcessor(PostBase): def __init__(self, flash_key, volume, ref_volume_id=None, method='likelihood', detector=None, geometry_file=None, run_mode='reco', truth_point_mode='points', - truth_dep_mode='depositions', parent_path=None,store_matches=False, **kwargs): + truth_dep_mode='depositions', parent_path=None, **kwargs): """Initialize the flash matching algorithm. Parameters @@ -50,8 +50,6 @@ def __init__(self, flash_key, volume, ref_volume_id=None, parent_path : str, optional Path to the parent directory of the main analysis configuration. This allows for the use of relative paths in the post-processors. - store_matches : bool, default False - If True, the matches will be stored in the data product **kwargs : dict Keyword arguments to pass to specific flash matching algorithms """ diff --git a/spine/post/optical/hypothesis.py b/spine/post/optical/hypothesis.py index 55eca628..292182cb 100644 --- a/spine/post/optical/hypothesis.py +++ b/spine/post/optical/hypothesis.py @@ -6,13 +6,23 @@ import sys import numpy as np import re + +# Import the base class and Flash data structure +from .opt0_interface import OpT0Interface from spine.data.optical import Flash -class Hypothesis: - """Interface class between flash hypothesis and OpT0Finder.""" - def __init__(self, cfg, detector, parent_path=None,scaling=1., alpha=0.21, - recombination_mip=0.65, legacy=False): +class Hypothesis(OpT0Interface): + """ + Interface class between flash hypothesis generation and OpT0Finder. + + Inherits common initialization and QCluster creation from OpT0Interface. + Uses an OpT0Finder hypothesis algorithm (e.g., SemiAnalyticalModel, + PhotonLibHypothesis) to generate predicted flash PEs from TPC interactions. + """ + + def __init__(self, cfg, detector, parent_path=None, scaling=1., alpha=0.21, + recombination_mip=0.6, legacy=False): """Initialize the flash hypothesis algorithm. Parameters @@ -27,168 +37,83 @@ def __init__(self, cfg, detector, parent_path=None,scaling=1., alpha=0.21, Global scaling factor for the depositions (can be an expression) alpha : float, default 0.21 Number of excitons (Ar*) divided by number of electron-ion pairs (e-,Ar+) - recombination_mip : float, default 0.65 + recombination_mip : float, default 0.6 Recombination factor for MIP-like particles in LAr legacy : bool, default False Use the legacy OpT0Finder function(s). TODO: remove when dropping legacy """ - # Initialize the flash manager (OpT0Finder wrapper) - self.initialize_backend(cfg, detector, parent_path) - - # Get the external parameters - self.scaling = scaling - if isinstance(self.scaling, str): - self.scaling = eval(self.scaling) - self.alpha = alpha - if isinstance(self.alpha, str): - self.alpha = eval(self.alpha) - self.recombination_mip = recombination_mip - if isinstance(self.recombination_mip, str): - self.recombination_mip = eval(self.recombination_mip) - self.legacy = legacy - - #Initialize hypotheses - self.hypothesis_v = None + # Call the parent class initializer for common setup + super().__init__(cfg, detector, parent_path, scaling, alpha, + recombination_mip, legacy) - def initialize_backend(self, cfg, detector, parent_path): - """Initialize the flash manager (OpT0Finder wrapper). + # Initialize hypothesis-specific attributes + self.hypothesis_v = None + # self.hypothesis is initialized in _initialize_algorithm + # self.light_path is initialized in the base class - Expects that the environment variable `FMATCH_BASEDIR` is set. - You can either set it by hand (to the path where one can find - OpT0Finder) or you can source `OpT0Finder/configure.sh` if you - are running code from a command line. + def _initialize_algorithm(self, cfg_params): + """ + Initialize the specific Hypothesis algorithm based on configuration. Parameters ---------- - cfg : str - Path to config for OpT0Finder - detector : str, optional - Detector to get the geometry from - parent_path : str, optional - Path to the parent configuration file (allows for relative paths) + cfg_params : flashmatch::PSet + The loaded OpT0Finder configuration parameters. """ - # Add OpT0finder python interface to the python path - basedir = os.getenv('FMATCH_BASEDIR') - assert basedir is not None, ( - "You need to source OpT0Finder's configure.sh or set the " - "FMATCH_BASEDIR environment variable before running flash " - "matching.") - sys.path.append(os.path.join(basedir, 'python')) - - # Add the OpT0Finder library to the dynamic link loader - lib_path = os.path.join(basedir, 'build/lib') - os.environ['LD_LIBRARY_PATH'] = '{}:{}'.format( - lib_path, os.environ['LD_LIBRARY_PATH']) - - # Add the OpT0Finder data directory if it is not yet set - if 'FMATCH_DATADIR' not in os.environ: - os.environ['FMATCH_DATADIR'] = os.path.join(basedir, 'dat') - - # Load up the detector specifications - if detector is None: - det_cfg = os.path.join(basedir, 'dat/detector_specs.cfg') - else: - det_cfg = os.path.join(basedir, f'dat/detector_specs_{detector}.cfg') - - if not os.path.isfile(det_cfg): - raise FileNotFoundError( - f"Cannot file detector specification file: {det_cfg}.") - from flashmatch import flashmatch - flashmatch.DetectorSpecs.GetME(det_cfg) - - # Fetch and initialize the OpT0Finder configuration - if parent_path is not None and not os.path.isfile(cfg): - cfg = os.path.join(parent_path, cfg) - if not os.path.isfile(cfg): - raise FileNotFoundError( - f"Cannot find flash-matcher config: {cfg}") - - cfg = flashmatch.CreateFMParamsFromFile(cfg) - - # Get FlashMatchManager configuration - fmatch_params = cfg.get['flashmatch::FMParams']('FlashMatchManager') - + # Get FlashMatchManager configuration section to find the HypothesisAlgo name + # Assuming the relevant parameters are under 'FlashMatchManager' PSet + # Adjust 'FlashMatchManager' if your config structure is different + manager_params = cfg_params.get['flashmatch::FMParams']('FlashMatchManager') + # Parse the configuration dump to find the HypothesisAlgo value - config_dump = fmatch_params.dump() + config_dump = manager_params.dump() match = re.search(r'HypothesisAlgo\s*:\s*"([^"]+)"', config_dump) if match: - algo = match.group(1) + algo_name = match.group(1) else: - raise ValueError(f"Could not find HypothesisAlgo in configuration: {config_dump}") + # Fallback: Check if the hypothesis algo config exists directly under top level + # This depends on how the .cfg file is structured + found_algo = False + for name in ['SemiAnalyticalModel', 'PhotonLibHypothesis']: # Add other known hypothesis algos + if cfg_params.contains_pset(name): + algo_name = name + found_algo = True + break + if not found_algo: + raise ValueError(f"Could not find HypothesisAlgo parameter within " + f"'FlashMatchManager' PSet in configuration: {config_dump}") - print(f'HypothesisAlgo: {algo}') - - # Get the light path algorithm to produce QCluster_t objects - self.light_path = flashmatch.CustomAlgoFactory.get().create( - 'LightPath', 'ToyMCLightPath') - self.light_path.Configure(cfg.get['flashmatch::FMParams']('LightPath')) + + print(f'HypothesisAlgo: {algo_name}') # Create the hypothesis algorithm based on the extracted name - if algo == 'SemiAnalyticalModel': - self.hypothesis = flashmatch.FlashHypothesisFactory.get().create( - 'SemiAnalyticalModel','SemiAnalyticalModel') - elif algo == 'PhotonLibHypothesis': + # Ensure the factory name matches the class name used in OpT0Finder registration + try: self.hypothesis = flashmatch.FlashHypothesisFactory.get().create( - 'PhotonLibHypothesis','PhotonLibHypothesis') - else: - raise ValueError(f"Unknown hypothesis algorithm: {algo}") - self.hypothesis.Configure(cfg.get['flashmatch::FMParams'](f'{algo}')) + algo_name, algo_name) # Factory name and instance name often match + except Exception as e: + raise ValueError(f"Failed to create hypothesis algorithm '{algo_name}'. " + f"Is it registered correctly in OpT0Finder? Error: {e}") - def make_qcluster_list(self, interactions): - """Converts a list of SPINE interaction into a list of OpT0Finder - flashmatch.QCluster_t objects. + # Configure the hypothesis algorithm using its own PSet + try: + algo_pset = cfg_params.get['flashmatch::FMParams'](algo_name) + self.hypothesis.Configure(algo_pset) + except Exception as e: + raise ValueError(f"Failed to configure hypothesis algorithm '{algo_name}' " + f"using PSet '{algo_name}'. Error: {e}") - Parameters - ---------- - interactions : List[Union[Interaction, TruthInteraction]] - List of TPC interactions - - Returns - ------- - List[QCluster_t] - List of OpT0Finder flashmatch::QCluster_t objects - """ - # Loop over the interacions - from flashmatch import flashmatch - qcluster_v = [] - for idx, inter in enumerate(interactions): - # Produce a mask to remove negative value points (can happen) - valid_mask = np.where(inter.depositions > 0.)[0] - - # Skip interactions with less than 2 points - if len(valid_mask) < 2: - continue - - # Initialize qcluster - qcluster = flashmatch.QCluster_t() - qcluster.idx = idx - qcluster.time = 0 - # Get the point coordinates - points = inter.points[valid_mask] + # Remove initialize_backend - handled by base class + # Remove make_qcluster_list - handled by base class - # Get the depositions - depositions = inter.depositions[valid_mask] + # make_qcluster_list is now inherited - # Fill the trajectory - pytraj = np.hstack([points, depositions[:, None]]) - traj = flashmatch.as_geoalgo_trajectory(pytraj) - if self.legacy: - qcluster += self.light_path.MakeQCluster(traj, self.scaling) - else: - qcluster += self.light_path.MakeQCluster( - traj, self.scaling, self.alpha, self.recombination_mip) - - # Append - qcluster_v.append(qcluster) - - return qcluster_v - def make_hypothesis_list(self, interactions, id_offset=0, volume_id=None): """ Runs the hypothesis algorithm on a list of interactions to create - a list of flashmatch::Flash_t objects. + a list of spine Flash objects representing the predicted light. Parameters ---------- @@ -198,25 +123,39 @@ def make_hypothesis_list(self, interactions, id_offset=0, volume_id=None): Offset to add to the flash ID volume_id : int, optional Volume ID to use for the hypothesis + + Returns + ------- + List[Flash] + List of generated spine Flash objects. """ - # Make the QCluster_t objects - qcluster_v = self.make_qcluster_list(interactions) + # Make the QCluster_t objects using the base class method + # Store them in self.qcluster_v as the base class expects + self.qcluster_v = self.make_qcluster_list(interactions) + + # Map original interaction index to qcluster object for easy lookup + qcluster_map = {qc.idx: qc for qc in self.qcluster_v} - # Initialize the list of flashmatch::Flash_t objects + # Initialize the list of generated spine Flash objects self.hypothesis_v = [] - # Run the hypothesis algorithm - for i,int in enumerate(interactions): - # Make the QCluster_t object - qcluster = qcluster_v[i] + # Run the hypothesis algorithm for each interaction that produced a valid qcluster + for i, inter in enumerate(interactions): + # Find the corresponding QCluster_t object using the original index + qcluster = qcluster_map.get(inter.id) # Assuming inter.id is the original index used in make_qcluster_list + + # Skip if no valid qcluster was created for this interaction + if qcluster is None: + continue # Run the hypothesis algorithm - flash = self.hypothesis.GetEstimate(qcluster) + flash_hypothesis_fm = self.hypothesis.GetEstimate(qcluster) # flashmatch::Flash_t - # Create a new Flash object - flash = Flash.from_hypothesis(flash, int.id, i+id_offset, volume_id) + # Create a new spine Flash object from the hypothesis result + # Pass the original interaction ID (inter.id) + flash = Flash.from_hypothesis(flash_hypothesis_fm, inter.id, i + id_offset, volume_id) - # Append + # Append the generated spine Flash object self.hypothesis_v.append(flash) return self.hypothesis_v diff --git a/spine/post/optical/likelihood.py b/spine/post/optical/likelihood.py index 3a6ba672..926e84bc 100644 --- a/spine/post/optical/likelihood.py +++ b/spine/post/optical/likelihood.py @@ -2,19 +2,23 @@ import os import sys - import numpy as np +# Import the base class +from .opt0_interface import OpT0Interface + -class LikelihoodFlashMatcher: - """Interface class between full chain outputs and OpT0Finder +class LikelihoodFlashMatcher(OpT0Interface): + """ + Interface class between full chain outputs and OpT0Finder for likelihood matching. + Inherits common initialization and QCluster creation from OpT0Interface. See https://github.com/drinkingkazu/OpT0Finder for more details about it. """ def __init__(self, cfg, detector, parent_path=None, reflash_merging_window=None, scaling=1., alpha=0.21, - recombination_mip=0.65, legacy=False): + recombination_mip=0.6, legacy=False): """Initialize the likelihood-based flash matching algorithm. Parameters @@ -31,96 +35,37 @@ def __init__(self, cfg, detector, parent_path=None, Global scaling factor for the depositions (can be an expression) alpha : float, default 0.21 Number of excitons (Ar*) divided by number of electron-ion pairs (e-,Ar+) - recombination_mip : float, default 0.65 + recombination_mip : float, default 0.6 Recombination factor for MIP-like particles in LAr legacy : bool, default False Use the legacy OpT0Finder function(s). TODO: remove when dropping legacy """ - # Initialize the flash manager (OpT0Finder wrapper) - self.initialize_backend(cfg, detector, parent_path) + # Call the parent class initializer for common setup + super().__init__(cfg, detector, parent_path, scaling, alpha, + recombination_mip, legacy) - # Get the external parameters + # Store likelihood-specific parameters self.reflash_merging_window = reflash_merging_window - self.scaling = scaling - if isinstance(self.scaling, str): - self.scaling = eval(self.scaling) - self.alpha = alpha - if isinstance(self.alpha, str): - self.alpha = eval(self.alpha) - self.recombination_mip = recombination_mip - if isinstance(self.recombination_mip, str): - self.recombination_mip = eval(self.recombination_mip) - self.legacy = legacy - - # Initialize flash matching attributes + + # Initialize flash matching attributes specific to likelihood matching self.matches = None - self.qcluster_v = None + # self.qcluster_v is initialized in the base class self.flash_v = None + # self.mgr is initialized in _initialize_algorithm - def initialize_backend(self, cfg, detector, parent_path): - """Initialize OpT0Finder (backend). - - Expects that the environment variable `FMATCH_BASEDIR` is set. - You can either set it by hand (to the path where one can find - OpT0Finder) or you can source `OpT0Finder/configure.sh` if you - are running code from a command line. + def _initialize_algorithm(self, cfg_params): + """ + Initialize the FlashMatchManager for likelihood matching. Parameters ---------- - cfg: str - Path to config for OpT0Finder - detector : str, optional - Detector to get the geometry from - parent_path : str, optional - Path to the parent configuration file (allows for relative paths) + cfg_params : flashmatch::PSet + The loaded OpT0Finder configuration parameters. """ - # Add OpT0finder python interface to the python path - basedir = os.getenv('FMATCH_BASEDIR') - assert basedir is not None, ( - "You need to source OpT0Finder's configure.sh or set the " - "FMATCH_BASEDIR environment variable before running flash " - "matching.") - sys.path.append(os.path.join(basedir, 'python')) - - # Add the OpT0Finder library to the dynamic link loader - lib_path = os.path.join(basedir, 'build/lib') - os.environ['LD_LIBRARY_PATH'] = '{}:{}'.format( - lib_path, os.environ['LD_LIBRARY_PATH']) - - # Add the OpT0Finder data directory if it is not yet set - if 'FMATCH_DATADIR' not in os.environ: - os.environ['FMATCH_DATADIR'] = os.path.join(basedir, 'dat') - - # Load up the detector specifications - if detector is None: - det_cfg = os.path.join(basedir, 'dat/detector_specs.cfg') - else: - det_cfg = os.path.join(basedir, f'dat/detector_specs_{detector}.cfg') - - if not os.path.isfile(det_cfg): - raise FileNotFoundError( - f"Cannot file detector specification file: {det_cfg}.") - from flashmatch import flashmatch - flashmatch.DetectorSpecs.GetME(det_cfg) - - # Fetch and initialize the OpT0Finder configuration - if parent_path is not None and not os.path.isfile(cfg): - cfg = os.path.join(parent_path, cfg) - if not os.path.isfile(cfg): - raise FileNotFoundError( - f"Cannot find flash-matcher config: {cfg}") - - cfg = flashmatch.CreateFMParamsFromFile(cfg) - # Initialize The OpT0Finder flash match manager self.mgr = flashmatch.FlashMatchManager() - self.mgr.Configure(cfg) - - # Get the light path algorithm to produce QCluster_t objects - self.light_path = flashmatch.CustomAlgoFactory.get().create( - 'LightPath', 'ToyMCLightPath') - self.light_path.Configure(cfg.get['flashmatch::FMParams']('LightPath')) + self.mgr.Configure(cfg_params) def get_matches(self, interactions, flashes): """Find TPC interactions compatible with optical flashes. @@ -142,6 +87,7 @@ def get_matches(self, interactions, flashes): return [] # Build a list of QCluster_t (OpT0Finder interaction representation) + # Use the method from the base class self.qcluster_v = self.make_qcluster_list(interactions) # Build a list of Flash_t (OpT0Finder optical flash representation) @@ -153,61 +99,37 @@ def get_matches(self, interactions, flashes): # Build result, return result = [] for m in self.matches: - tpc_id = self.qcluster_v[m.tpc_id].idx - flash_id = self.flash_v[m.flash_id].idx - result.append((interactions[tpc_id], flashes[flash_id], m)) + # Find the original interaction index from the qcluster index + tpc_idx_orig = -1 + for qc in self.qcluster_v: + if qc.tpc_id == m.tpc_id: # Note: flashmatch::FlashMatch_t uses tpc_id which is the index in the manager's internal vector + tpc_idx_orig = qc.idx # qc.idx stores the original index from the input interactions list + break + if tpc_idx_orig == -1: + raise ValueError(f"Could not find original TPC index for match TPC ID {m.tpc_id}") + + # Find the original flash index from the flash_t index + flash_idx_orig = -1 + for fl in self.flash_v: + if fl.flash_id == m.flash_id: # Note: flashmatch::FlashMatch_t uses flash_id which is the index in the manager's internal vector + flash_idx_orig = fl.idx # fl.idx stores the original index from the input flashes list + break + if flash_idx_orig == -1: + raise ValueError(f"Could not find original Flash index for match Flash ID {m.flash_id}") + + # Find the corresponding interaction and flash objects using original indices + interaction_obj = next((inter for inter in interactions if inter.id == tpc_idx_orig), None) + flash_obj = next((flash for flash in flashes if flash.id == flash_idx_orig), None) # Use the potentially modified flashes list + + if interaction_obj is None: + raise ValueError(f"Could not find interaction with original index {tpc_idx_orig}") + if flash_obj is None: + raise ValueError(f"Could not find flash with original index {flash_idx_orig}") + + result.append((interaction_obj, flash_obj, m)) - return result - - def make_qcluster_list(self, interactions): - """Converts a list of SPINE interaction into a list of OpT0Finder - flashmatch.QCluster_t objects. - - Parameters - ---------- - interactions : List[Union[Interaction, TruthInteraction]] - List of TPC interactions - Returns - ------- - List[QCluster_t] - List of OpT0Finder flashmatch::QCluster_t objects - """ - # Loop over the interacions - from flashmatch import flashmatch - qcluster_v = [] - for idx, inter in enumerate(interactions): - # Produce a mask to remove negative value points (can happen) - valid_mask = np.where(inter.depositions > 0.)[0] - - # Skip interactions with less than 2 points - if len(valid_mask) < 2: - continue - - # Initialize qcluster - qcluster = flashmatch.QCluster_t() - qcluster.idx = idx - qcluster.time = 0 - - # Get the point coordinates - points = inter.points[valid_mask] - - # Get the depositions - depositions = inter.depositions[valid_mask] - - # Fill the trajectory - pytraj = np.hstack([points, depositions[:, None]]) - traj = flashmatch.as_geoalgo_trajectory(pytraj) - if self.legacy: - qcluster += self.light_path.MakeQCluster(traj, self.scaling) - else: - qcluster += self.light_path.MakeQCluster( - traj, self.scaling, self.alpha, self.recombination_mip) - - # Append - qcluster_v.append(qcluster) - - return qcluster_v + return result def make_flash_list(self, flashes): """Creates a list of flashmatch.Flash_t from the local class. @@ -341,7 +263,7 @@ def get_flash(self, idx, array=False): if array: return flashmatch.as_ndarray(flash) else: return flash - raise Exception('Flash {idx} does not exist in self.flash_v') + raise Exception(f'Flash {idx} does not exist in self.flash_v') def get_match(self, idx): @@ -387,7 +309,7 @@ def get_matched_flash(self, idx): flash_id = m.flash_id if flash_id is None: return None if flash_id > len(self.flash_v): - raise Exception('Flash {flash_id} does not exist in self.flash_v') + raise Exception(f'Flash {flash_id} does not exist in self.flash_v') return self.flash_v[flash_id] diff --git a/spine/post/optical/opt0_interface.py b/spine/post/optical/opt0_interface.py index 5579be31..eae40e69 100644 --- a/spine/post/optical/opt0_interface.py +++ b/spine/post/optical/opt0_interface.py @@ -1 +1,189 @@ -#TODO: Make this the base class for the likelihood and hypothesis classes \ No newline at end of file +"""Base module for interfacing with OpT0Finder algorithms.""" + +import os +import sys +import numpy as np +import re +from abc import ABC, abstractmethod + +class OpT0Interface(ABC): + """ + Abstract base class for OpT0Finder interfaces (Likelihood and Hypothesis). + + Handles common initialization logic, environment setup, configuration loading, + and QCluster creation. + """ + + def __init__(self, cfg, detector, parent_path=None, scaling=1., alpha=0.21, + recombination_mip=0.6, legacy=False): + """ + Initialize common attributes and the OpT0Finder backend. + + Parameters + ---------- + cfg : str + Flash matching configuration file path + detector : str, optional + Detector to get the geometry from + parent_path : str, optional + Path to the parent configuration file (allows for relative paths) + scaling : Union[float, str], default 1. + Global scaling factor for the depositions (can be an expression) + alpha : float, default 0.21 + Number of excitons (Ar*) divided by number of electron-ion pairs (e-,Ar+) + recombination_mip : float, default 0.6 + Recombination factor for MIP-like particles in LAr + legacy : bool, default False + Use the legacy OpT0Finder function(s). TODO: remove when dropping legacy + """ + # Store external parameters + self.scaling = scaling + if isinstance(self.scaling, str): + self.scaling = eval(self.scaling) + self.alpha = alpha + if isinstance(self.alpha, str): + self.alpha = eval(self.alpha) + self.recombination_mip = recombination_mip + if isinstance(self.recombination_mip, str): + self.recombination_mip = eval(self.recombination_mip) + self.legacy = legacy + + # Initialize the flash manager (OpT0Finder wrapper) + self.initialize_backend(cfg, detector, parent_path) + + # Initialize common attributes potentially used by subclasses + self.qcluster_v = None + + def initialize_backend(self, cfg, detector, parent_path): + """ + Initialize the common OpT0Finder backend components. + + Sets up environment variables, loads detector specs, loads configuration, + and initializes the LightPath algorithm. Calls the abstract method + `_initialize_algorithm` for subclass-specific initialization. + + Parameters + ---------- + cfg : str + Path to config for OpT0Finder + detector : str, optional + Detector to get the geometry from + parent_path : str, optional + Path to the parent configuration file (allows for relative paths) + """ + # Add OpT0finder python interface to the python path + basedir = os.getenv('FMATCH_BASEDIR') + assert basedir is not None, ( + "You need to source OpT0Finder's configure.sh or set the " + "FMATCH_BASEDIR environment variable before running flash " + "matching.") + sys.path.append(os.path.join(basedir, 'python')) + + # Add the OpT0Finder library to the dynamic link loader + lib_path = os.path.join(basedir, 'build/lib') + # Avoid prepending if already present to prevent excessive path length + if lib_path not in os.environ.get('LD_LIBRARY_PATH', ''): + os.environ['LD_LIBRARY_PATH'] = '{}:{}'.format( + lib_path, os.environ.get('LD_LIBRARY_PATH', '')) + + # Add the OpT0Finder data directory if it is not yet set + if 'FMATCH_DATADIR' not in os.environ: + os.environ['FMATCH_DATADIR'] = os.path.join(basedir, 'dat') + + # Load up the detector specifications + if detector is None: + det_cfg = os.path.join(basedir, 'dat/detector_specs.cfg') + else: + det_cfg = os.path.join(basedir, f'dat/detector_specs_{detector}.cfg') + + if not os.path.isfile(det_cfg): + raise FileNotFoundError( + f"Cannot file detector specification file: {det_cfg}.") + + from flashmatch import flashmatch + flashmatch.DetectorSpecs.GetME(det_cfg) + + # Fetch and initialize the OpT0Finder configuration + if parent_path is not None and not os.path.isabs(cfg): + cfg = os.path.join(parent_path, cfg) + if not os.path.isfile(cfg): + raise FileNotFoundError( + f"Cannot find flash-matcher config: {cfg}") + + cfg_params = flashmatch.CreateFMParamsFromFile(cfg) + + # Get the light path algorithm to produce QCluster_t objects + self.light_path = flashmatch.CustomAlgoFactory.get().create( + 'LightPath', 'ToyMCLightPath') + self.light_path.Configure(cfg_params.get['flashmatch::FMParams']('LightPath')) + + # Initialize the specific algorithm (FlashMatchManager or Hypothesis) + self._initialize_algorithm(cfg_params) + + + @abstractmethod + def _initialize_algorithm(self, cfg_params): + """ + Abstract method for initializing the specific OpT0Finder algorithm. + + Subclasses must implement this to initialize their specific backend + (e.g., FlashMatchManager for likelihood, HypothesisAlgo for hypothesis). + + Parameters + ---------- + cfg_params : flashmatch::PSet + The loaded OpT0Finder configuration parameters. + """ + pass + + def make_qcluster_list(self, interactions): + """ + Converts a list of SPINE interaction into a list of OpT0Finder + flashmatch.QCluster_t objects. + + Parameters + ---------- + interactions : List[Union[Interaction, TruthInteraction]] + List of TPC interactions + + Returns + ------- + List[QCluster_t] + List of OpT0Finder flashmatch::QCluster_t objects + """ + # Loop over the interacions + from flashmatch import flashmatch + qcluster_v = [] + for idx, inter in enumerate(interactions): + # Produce a mask to remove negative value points (can happen) + valid_mask = np.where(inter.depositions > 0.)[0] + + # Skip interactions with less than 2 points + if len(valid_mask) < 2: + continue + + # Initialize qcluster + qcluster = flashmatch.QCluster_t() + qcluster.idx = idx + qcluster.time = 0 # Assume t=0 for hypothesis/likelihood generation + + # Get the point coordinates + points = inter.points[valid_mask] + + # Get the depositions + depositions = inter.depositions[valid_mask] + + # Fill the trajectory + pytraj = np.hstack([points, depositions[:, None]]) + traj = flashmatch.as_geoalgo_trajectory(pytraj) + if self.legacy: + qcluster += self.light_path.MakeQCluster(traj, self.scaling) + else: + qcluster += self.light_path.MakeQCluster( + traj, self.scaling, self.alpha, self.recombination_mip) + + # Append + qcluster_v.append(qcluster) + + return qcluster_v + diff --git a/spine/vis/geo.py b/spine/vis/geo.py index 73614a51..633f1669 100644 --- a/spine/vis/geo.py +++ b/spine/vis/geo.py @@ -201,7 +201,7 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, index = np.where(np.asarray(col) != 0)[0] pos = pos[index] col = col[index] - + sz = sz[index] # Determine wheter to show legends or not showlegend = not shared_legend or i == 0 @@ -210,7 +210,6 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, if shape == 'box': # Convert the positions/dimensions to box lower/upper bounds lower, upper = pos - hd, pos + hd - # Build boxes traces += box_traces( lower, upper, shared_legend=shared_legend, name=name, @@ -220,7 +219,6 @@ def optical_traces(self, meta=None, shared_legend=True, legendgroup=None, else: # Convert the optical detector dimensions to a covariance matrix covmat = np.diag(hd**2) - # Build ellipsoids traces += ellipsoid_traces( pos, covmat, shared_legend=shared_legend, name=name, diff --git a/spine/vis/out.py b/spine/vis/out.py index 6c066cf1..8c8cecd9 100644 --- a/spine/vis/out.py +++ b/spine/vis/out.py @@ -159,7 +159,8 @@ def get_index(self, obj): def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, draw_end_points=False, draw_vertices=False, draw_flashes=False, synchronize=False, titles=None, split_traces=False, - matched_flash_only=True, draw_flash_hypotheses=False, hypo_interaction_id=None): + matched_flash_only=True, draw_flash_hypotheses=False, hypo_interaction_id=None, + use_size_for_flash=False): """Draw the requested object type with the requested mode. Parameters @@ -191,6 +192,8 @@ def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, If `True`, draw the flash hypotheses for the given interaction ID hypo_interaction_id : int, optional Interaction ID of the hypothesis to draw + use_size_for_flash : bool, default False + If `True`, use the size of the flash to draw the amount of PE for the flash Returns ------- @@ -234,20 +237,21 @@ def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, "Must provide interactions to draw their vertices.") traces[prefix] += self._vertex_trace(obj_name) + # Flash configuration + if draw_flash_hypotheses and draw_flashes: + opacity = 0.5 + else: + opacity = 1. + # Fetch the flashes, if requested if draw_flashes: - # This means we will scale the PDS size by the number of PEs - if draw_flash_hypotheses: - use_size = True - else: - use_size = False assert 'flashes' in self.data, ( "Must provide the `flashes` objects to draw them.") for prefix in self.prefixes: obj_name = f'{prefix}_interactions' assert obj_name in self.data, ( "Must provide interactions to draw matched flashes.") - traces[prefix] += self._flash_trace(obj_name, matched_flash_only, use_size) + traces[prefix] += self._flash_trace(obj_name, matched_flash_only, use_size_for_flash, opacity=opacity) # Fetch the flash hypotheses, if requested if draw_flash_hypotheses: @@ -255,7 +259,7 @@ def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, "Must provide the `flash_hypos` objects to draw them.") for prefix in self.prefixes: obj_name = f'flash_hypos' - traces[prefix] += self._hypothesis_trace(obj_name, hypo_interaction_id) + traces[prefix] += self._hypothesis_trace(obj_name, hypo_interaction_id, use_size_for_flash, opacity=opacity,offset=[40,0,0]) # Add the TPC traces, if available if self.geo_drawer is not None: @@ -596,7 +600,7 @@ def _point_trace(self, obj_name, point_attr, **kwargs): return scatter_points( points, hovertext=np.array(hovertext), name=name, **kwargs) - def _flash_trace(self, obj_name, matched_only, use_size, **kwargs): + def _flash_trace(self, obj_name, matched_only, use_size, opacity, **kwargs): """Draw the cumlative PEs of flashes that have been matched to interactions specified by `obj_name`. @@ -608,6 +612,8 @@ def _flash_trace(self, obj_name, matched_only, use_size, **kwargs): If `True`, only flashes matched to interactions are drawn use_size : bool If `True`, scale the size of the flashes by the number of PEs. Otherwise, the size is fixed and the color is scaled by the number of PEs. + opacity : float + Opacity of the flashes **kwargs : dict, optional List of additional arguments to pass to :func:`optical_traces` @@ -640,29 +646,29 @@ def _flash_trace(self, obj_name, matched_only, use_size, **kwargs): flash = self.data['flashes'][flash_id] index = self.geo_drawer.geo.optical.volume_index(flash.volume_id) pe_per_ch = flash.pe_per_ch - time = flash.time + #Currently, the time of the flash is the same for the entire flash, so it's not a useful attribute + #time = flash.time if opt_det_ids is not None: pe_per_ch = np.bincount(opt_det_ids, weights=pe_per_ch) if use_size: size[index] += pe_per_ch color[index] += pe_per_ch else: + size[index] = 1 color[index] += pe_per_ch color = np.where(size == 0, 0, color) - #Normalize the size to be between 0.5 and 2 - size = (size - np.min(size))/(np.max(size) - np.min(size)) - size = size*(2 - 0.5) + 0.5 - - #If we are using size for PE, we need to mask out the flashes with no PE - #if use_size: + if use_size: + #Normalize the size to be between 0.5 and 2 + size = (size - np.min(size))/(np.max(size) - np.min(size)) + size = size*(2 - 0.5) + 0.5 # Return the set of optical detectors with a color scale return self.geo_drawer.optical_traces( meta=self.meta, color=color, size=size, zero_supress=True, - colorscale='Inferno', name=name, opacity=0.5) + colorscale='Inferno', name=name, opacity=opacity, **kwargs) - def _hypothesis_trace(self, obj_name, interaction_id, **kwargs): + def _hypothesis_trace(self, obj_name, interaction_id, use_size, opacity, **kwargs): """Draw the hypothesis object. Parameters @@ -671,6 +677,8 @@ def _hypothesis_trace(self, obj_name, interaction_id, **kwargs): Name of the object to draw interaction_id : int Interaction ID of the hypothesis to draw + opacity : float + Opacity of the hypothesis **kwargs : dict, optional List of additional arguments to pass to :func:`optical_traces` @@ -689,32 +697,33 @@ def _hypothesis_trace(self, obj_name, interaction_id, **kwargs): name = ' '.join(obj_name.split('_')).capitalize()[:-1] + ' hypothesis' # Find the list of flash IDs to draw that match the interaction ID - flash_ids = [hypo.id for hypo in self.data['flash_hypos'] if hypo.interaction_id == interaction_id] - print(f'hypo flash ids: {flash_ids}') + hypo_flashes = [hypo for hypo in self.data['flash_hypos'] if hypo.interaction_id == interaction_id] # Sum values from each flash to build a a global color scale size = np.zeros(self.geo_drawer.geo.optical.num_detectors) color = size.copy() opt_det_ids = self.geo_drawer.geo.optical.det_ids - for flash_id in flash_ids: - flash = self.data['flash_hypos'][flash_id] - index = self.geo_drawer.geo.optical.volume_index(flash.volume_id) - pe_per_ch = flash.pe_per_ch - time = flash.time + for hypo in hypo_flashes: + index = self.geo_drawer.geo.optical.volume_index(hypo.volume_id) + pe_per_ch = hypo.pe_per_ch + #Currently, the time of the flash is the same for the entire flash, so it's not a useful attribute + #time = hypo.time if opt_det_ids is not None: pe_per_ch = np.bincount(opt_det_ids, weights=pe_per_ch) - size[index] += pe_per_ch - color[index] += pe_per_ch - - color = np.where(size > 0, color, 0) - #Normalize the size to be between 0.5 and 2 - size = (size - np.min(size))/(np.max(size) - np.min(size)) - size = size*(2 - 0.5) + 0.5 + if use_size: + size[index] += pe_per_ch + color[index] += pe_per_ch + else: + size[index] = 1. + color[index] += pe_per_ch - print('draw_flash_hypotheses') + if use_size: + #Normalize the size to be between 0.5 and 2 + size = (size - np.min(size))/(np.max(size) - np.min(size)) + size = size*(2 - 0.5) + 0.5 # Return the set of optical detectors with a color scale return self.geo_drawer.optical_traces( meta=self.meta, color=color, size=size, zero_supress=True, - colorscale='Inferno', name=name, offset=[40,0,0], opacity=0.5) + colorscale='Inferno', name=name, opacity=opacity, **kwargs) From 8e7f6eb9b074bc02ea688769c94baec2a8a90563 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Tue, 8 Apr 2025 15:49:14 -0700 Subject: [PATCH 09/13] Update .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 18f8a87f..6aec5bca 100644 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,7 @@ mlreco/models/cluster_cnn/deprecated # Distribution / packaging .Python -#build/ #This causes git to ignore spine/build +build/ develop-eggs/ dist/ downloads/ From 5eaf858d1e51ef870e80e0b8395081b8a531101e Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Mon, 14 Apr 2025 14:04:00 -0700 Subject: [PATCH 10/13] Bugfix for retrieving matches in likelihood module --- spine/post/optical/likelihood.py | 38 +++----------------------------- 1 file changed, 3 insertions(+), 35 deletions(-) diff --git a/spine/post/optical/likelihood.py b/spine/post/optical/likelihood.py index 926e84bc..0ceabfeb 100644 --- a/spine/post/optical/likelihood.py +++ b/spine/post/optical/likelihood.py @@ -40,18 +40,13 @@ def __init__(self, cfg, detector, parent_path=None, legacy : bool, default False Use the legacy OpT0Finder function(s). TODO: remove when dropping legacy """ - # Call the parent class initializer for common setup super().__init__(cfg, detector, parent_path, scaling, alpha, recombination_mip, legacy) - # Store likelihood-specific parameters self.reflash_merging_window = reflash_merging_window - # Initialize flash matching attributes specific to likelihood matching self.matches = None - # self.qcluster_v is initialized in the base class self.flash_v = None - # self.mgr is initialized in _initialize_algorithm def _initialize_algorithm(self, cfg_params): """ @@ -87,7 +82,6 @@ def get_matches(self, interactions, flashes): return [] # Build a list of QCluster_t (OpT0Finder interaction representation) - # Use the method from the base class self.qcluster_v = self.make_qcluster_list(interactions) # Build a list of Flash_t (OpT0Finder optical flash representation) @@ -99,35 +93,9 @@ def get_matches(self, interactions, flashes): # Build result, return result = [] for m in self.matches: - # Find the original interaction index from the qcluster index - tpc_idx_orig = -1 - for qc in self.qcluster_v: - if qc.tpc_id == m.tpc_id: # Note: flashmatch::FlashMatch_t uses tpc_id which is the index in the manager's internal vector - tpc_idx_orig = qc.idx # qc.idx stores the original index from the input interactions list - break - if tpc_idx_orig == -1: - raise ValueError(f"Could not find original TPC index for match TPC ID {m.tpc_id}") - - # Find the original flash index from the flash_t index - flash_idx_orig = -1 - for fl in self.flash_v: - if fl.flash_id == m.flash_id: # Note: flashmatch::FlashMatch_t uses flash_id which is the index in the manager's internal vector - flash_idx_orig = fl.idx # fl.idx stores the original index from the input flashes list - break - if flash_idx_orig == -1: - raise ValueError(f"Could not find original Flash index for match Flash ID {m.flash_id}") - - # Find the corresponding interaction and flash objects using original indices - interaction_obj = next((inter for inter in interactions if inter.id == tpc_idx_orig), None) - flash_obj = next((flash for flash in flashes if flash.id == flash_idx_orig), None) # Use the potentially modified flashes list - - if interaction_obj is None: - raise ValueError(f"Could not find interaction with original index {tpc_idx_orig}") - if flash_obj is None: - raise ValueError(f"Could not find flash with original index {flash_idx_orig}") - - result.append((interaction_obj, flash_obj, m)) - + tpc_id = self.qcluster_v[m.tpc_id].idx + flash_id = self.flash_v[m.flash_id].idx + result.append((interactions[tpc_id], flashes[flash_id], m)) return result From 896e4ed82c016ae68ffebb2a335dada32a181fb3 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Mon, 14 Apr 2025 14:08:19 -0700 Subject: [PATCH 11/13] Bugfix for retrieving matches in likelihood module --- spine/post/optical/hypothesis.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/spine/post/optical/hypothesis.py b/spine/post/optical/hypothesis.py index 292182cb..b75ea730 100644 --- a/spine/post/optical/hypothesis.py +++ b/spine/post/optical/hypothesis.py @@ -48,8 +48,6 @@ def __init__(self, cfg, detector, parent_path=None, scaling=1., alpha=0.21, # Initialize hypothesis-specific attributes self.hypothesis_v = None - # self.hypothesis is initialized in _initialize_algorithm - # self.light_path is initialized in the base class def _initialize_algorithm(self, cfg_params): """ @@ -104,12 +102,6 @@ def _initialize_algorithm(self, cfg_params): raise ValueError(f"Failed to configure hypothesis algorithm '{algo_name}' " f"using PSet '{algo_name}'. Error: {e}") - - # Remove initialize_backend - handled by base class - # Remove make_qcluster_list - handled by base class - - # make_qcluster_list is now inherited - def make_hypothesis_list(self, interactions, id_offset=0, volume_id=None): """ Runs the hypothesis algorithm on a list of interactions to create From 01256433b35e96dc081838c0dbf7fffe290f4f69 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Mon, 14 Apr 2025 14:09:03 -0700 Subject: [PATCH 12/13] Update spine/vis/out.py --- spine/vis/out.py | 1 - 1 file changed, 1 deletion(-) diff --git a/spine/vis/out.py b/spine/vis/out.py index ea42a25c..543cbfb6 100644 --- a/spine/vis/out.py +++ b/spine/vis/out.py @@ -156,7 +156,6 @@ def get_index(self, obj): return obj.index else: return getattr(obj, self.truth_index_mode) - #TODO: Add a function to draw flash hypotheses def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, draw_end_points=False, draw_vertices=False, draw_flashes=False, synchronize=False, titles=None, split_traces=False, From d5bd3b352ee381f45df1b37da6b8cb059c226c64 Mon Sep 17 00:00:00 2001 From: Bear Carlson Date: Mon, 14 Apr 2025 14:10:48 -0700 Subject: [PATCH 13/13] Bug fix for size of PDS components --- spine/vis/out.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spine/vis/out.py b/spine/vis/out.py index ea42a25c..1e978838 100644 --- a/spine/vis/out.py +++ b/spine/vis/out.py @@ -158,7 +158,7 @@ def get_index(self, obj): return getattr(obj, self.truth_index_mode) #TODO: Add a function to draw flash hypotheses def get(self, obj_type, attr=None, color_attr=None, draw_raw=False, - draw_end_points=False, draw_vertices=False, draw_flashes=False, + draw_end_points=False, draw_directions=False, draw_vertices=False, draw_flashes=False, synchronize=False, titles=None, split_traces=False, matched_flash_only=True, draw_flash_hypotheses=False, hypo_interaction_id=None, use_size_for_flash=False):