diff --git a/gwsumm/archive.py b/gwsumm/archive.py index ad9d4ae3..b2c6ac53 100644 --- a/gwsumm/archive.py +++ b/gwsumm/archive.py @@ -210,7 +210,8 @@ def read_data_archive(sourcefile, rm_source_on_fail=True): # down the whole workflow, requiring manual intervention. Here, we attempt # to automatically catch a common failure try: - h5file = File(sourcefile, 'r') + with File(sourcefile, 'r'): + pass except FileNotFoundError: raise except OSError as exc: # file is corrupt, so we remove it to start fresh @@ -250,7 +251,8 @@ def read_data_archive(sourcefile, rm_source_on_fail=True): # -- timeseries ------------------------- - for dataset in h5file.get('timeseries', {}).values(): + for name in h5file.get('timeseries', {}): + dataset = h5file['timeseries'][name] ts = TimeSeries.read(dataset, format='hdf5') if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and ts.sample_rate.value == 1.0): @@ -271,7 +273,8 @@ def read_data_archive(sourcefile, rm_source_on_fail=True): # -- statevector -- --------------------- - for dataset in h5file.get('statevector', {}).values(): + for name in h5file.get('statevector', {}): + dataset = h5file['statevector'][name] sv = StateVector.read(dataset, format='hdf5') sv.channel = get_channel(sv.channel) add_timeseries(sv, key=sv.channel.ndsname) @@ -279,9 +282,11 @@ def read_data_archive(sourcefile, rm_source_on_fail=True): # -- spectrogram ------------------------ for tag, add_ in zip( - ['spectrogram', 'coherence-components'], - [add_spectrogram, add_coherence_component_spectrogram]): - for key, dataset in h5file.get(tag, {}).items(): + ['spectrogram', 'coherence-components'], + [add_spectrogram, add_coherence_component_spectrogram], + ): + for key in h5file.get(tag, {}): + dataset = h5file[tag][key] key = key.rsplit(',', 1)[0] spec = Spectrogram.read(dataset, format='hdf5') spec.channel = get_channel(spec.channel) @@ -289,14 +294,16 @@ def read_data_archive(sourcefile, rm_source_on_fail=True): # -- segments --------------------------- - for name, dataset in h5file.get('segments', {}).items(): + for name in h5file.get('segments', {}): + dataset = h5file['segments'][name] dqflag = DataQualityFlag.read(h5file, path=dataset.name, format='hdf5') globalv.SEGMENTS += {name: dqflag} # -- triggers --------------------------- - for dataset in h5file.get('triggers', {}).values(): + for name in h5file.get('triggers', {}): + dataset = h5file['triggers'][name] load_table(dataset) diff --git a/gwsumm/data/timeseries.py b/gwsumm/data/timeseries.py index 6d03ac0e..54f12102 100644 --- a/gwsumm/data/timeseries.py +++ b/gwsumm/data/timeseries.py @@ -393,10 +393,12 @@ def all_adc(cache): """ for path in cache: try: - tag = os.path.basename(path).split('-')[1] + path = os.path.basename(path) except (AttributeError, TypeError): # CacheEntry tag = path.description path = path.path + else: + tag = path.split('-')[1] if not path.endswith('.gwf') or tag not in ADC_TYPES: return False return True @@ -711,8 +713,11 @@ def _get_timeseries_dict(channels, segments, config=None, data.override_unit(channel.unit) # update channel type for trends - if data.channel.type is None and ( - data.channel.trend is not None): + if ( + data.channel is not None + and data.channel.type is None + and data.channel.trend is not None + ): if data.dt.to('s').value == 1: data.channel.type = 's-trend' elif data.dt.to('s').value == 60: diff --git a/gwsumm/tests/test_config.py b/gwsumm/tests/test_config.py index 7d19ba82..d7d442f9 100644 --- a/gwsumm/tests/test_config.py +++ b/gwsumm/tests/test_config.py @@ -73,8 +73,8 @@ def new(cls): TEST_CONFIG.seek(0) return cp + @pytest.fixture @classmethod - @pytest.fixture() def cnfg(cls): return cls.new() diff --git a/gwsumm/tests/test_plot.py b/gwsumm/tests/test_plot.py index e59a469d..a5099f60 100644 --- a/gwsumm/tests/test_plot.py +++ b/gwsumm/tests/test_plot.py @@ -103,8 +103,8 @@ def create(cls, *args, **kwargs): return cls.PLOT(*args, **kwargs) return cls.PLOT(*cls.DEFAULT_ARGS, **cls.DEFAULT_KWARGS) + @pytest.fixture @classmethod - @pytest.fixture() def plot(cls): return cls.create()