-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathplot_0_first_demo.py
185 lines (157 loc) · 7.16 KB
/
plot_0_first_demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
"""
First Demo
==========
This Demo will showcase the feature estimation and
exemplar analysis using simulated data.
"""
import numpy as np
from matplotlib import pyplot as plt
import py_neuromodulation as nm
# %%
# Data Simulation
# ---------------
# We will now generate some exemplar data with 10 second duration for 6 channels with a sample rate of 1 kHz.
def generate_random_walk(NUM_CHANNELS, TIME_DATA_SAMPLES):
# from https://towardsdatascience.com/random-walks-with-python-8420981bc4bc
dims = NUM_CHANNELS
step_n = TIME_DATA_SAMPLES - 1
step_set = [-1, 0, 1]
origin = (np.random.random([1, dims]) - 0.5) * 1 # Simulate steps in 1D
step_shape = (step_n, dims)
steps = np.random.choice(a=step_set, size=step_shape)
path = np.concatenate([origin, steps]).cumsum(0)
return path.T
NUM_CHANNELS = 6
sfreq = 1000
TIME_DATA_SAMPLES = 10 * sfreq
data = generate_random_walk(NUM_CHANNELS, TIME_DATA_SAMPLES)
time = np.arange(0, TIME_DATA_SAMPLES / sfreq, 1 / sfreq)
plt.figure(figsize=(8, 4), dpi=100)
for ch_idx in range(data.shape[0]):
plt.plot(time, data[ch_idx, :])
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.title("Example random walk data")
# %%
# Now let’s define the necessary setup files we will be using for data
# preprocessing and feature estimation. Py_neuromodualtion is based on two
# parametrization files: the *channels.tsv* and the *default_settings.json*.
#
# nm_channels
# ~~~~~~~~~~~
#
# The *nm_channel* dataframe. This dataframe contains the columns
#
# +-----------------------------------+-----------------------------------+
# | Column name | Description |
# +===================================+===================================+
# | **name** | name of the channel |
# +-----------------------------------+-----------------------------------+
# | **rereference** | different channel name for |
# | | bipolar re-referencing, or |
# | | average for common average |
# | | re-referencing |
# +-----------------------------------+-----------------------------------+
# | **used** | 0 or 1, channel selection |
# +-----------------------------------+-----------------------------------+
# | **target** | 0 or 1, for some decoding |
# | | applications we can define target |
# | | channels, e.g. EMG channels |
# +-----------------------------------+-----------------------------------+
# | **type** | channel type according to the |
# | | `mne-python`_ toolbox |
# | | |
# | | |
# | | |
# | | |
# | | e.g. ecog, eeg, ecg, emg, dbs, |
# | | seeg etc. |
# +-----------------------------------+-----------------------------------+
# | **status** | good or bad, used for channel |
# | | quality indication |
# +-----------------------------------+-----------------------------------+
# | **new_name** | this keyword can be specified to |
# | | indicate for example the used |
# | | rereferncing scheme |
# +-----------------------------------+-----------------------------------+
#
# .. _mne-python: https://mne.tools/stable/auto_tutorials/raw/10_raw_overview.html#sphx-glr-auto-tutorials-raw-10-raw-overview-py
#
# The :class:`~nm_stream_abc` can either be created as a *.tsv* text file, or as a pandas
# DataFrame. There are some helper functions that let you create the
# nm_channels without much effort:
nm_channels = nm.utils.get_default_channels_from_data(data, car_rereferencing=True)
nm_channels
# %%
# Using this function default channel names and a common average re-referencing scheme is specified.
# Alternatively the *define_nmchannels.set_channels* function can be used to pass each column values.
#
# nm_settings
# -----------
# Next, we will initialize the nm_settings dictionary and use the default settings, reset them, and enable a subset of features:
settings = nm.NMSettings.get_fast_compute()
# %%
# The setting itself is a .json file which contains the parametrization for preprocessing, feature estimation, postprocessing and
# definition with which sampling rate features are being calculated.
# In this example `sampling_rate_features_hz` is specified to be 10 Hz, so every 100ms a new set of features is calculated.
#
# For many features the `segment_length_features_ms` specifies the time dimension of the raw signal being used for feature calculation. Here it is specified to be 1000 ms.
#
# We will now enable the features:
#
# * fft
# * bursts
# * sharpwave
#
# and stay with the default preprcessing methods:
#
# * notch_filter
# * re_referencing
#
# and use *z-score* postprocessing normalization.
settings.features.fooof = True
settings.features.fft = True
settings.features.bursts = True
settings.features.sharpwave_analysis = True
# %%
# We are now ready to go to instantiate the *Stream* and call the *run* method for feature estimation:
stream = nm.Stream(
settings=settings,
channels=nm_channels,
verbose=True,
sfreq=sfreq,
line_noise=50,
)
features = stream.run(data, save_csv=True)
# %%
# Feature Analysis
# ----------------
#
# There is a lot of output, which we could omit by verbose being False, but let's have a look what was being computed.
# We will therefore use the :class:`~nm_analysis` class to showcase some functions. For multi-run -or subject analysis we will pass here the feature_file "sub" as default directory:
analyzer = nm.FeatureReader(
feature_dir=stream.out_dir_root, feature_file=stream.experiment_name
)
# %%
# Let's have a look at the resulting "feature_arr" DataFrame:
analyzer.feature_arr.iloc[:10, :7]
# %%
# Seems like a lot of features were calculated. The `time` column tells us about each row time index.
# For the 6 specified channels, it is each 31 features.
# We can now use some in-built plotting functions for visualization.
#
# .. note::
#
# Due to the nature of simulated data, some of the features have constant values, which are not displayed through the image normalization.
#
#
analyzer.plot_all_features(ch_used="ch1")
# %%
nm.analysis.plot_corr_matrix(
figsize=(25, 25),
show_plot=True,
feature=analyzer.feature_arr,
)
# %%
# The upper correlation matrix shows the correlation of every feature of every channel to every other.
# This notebook demonstrated a first demo how features can quickly be generated. For further feature modalities and decoding applications check out the next notebooks.