|
a |
|
b/tutorials/simulation/80_dics.py |
|
|
1 |
""" |
|
|
2 |
.. _tut-dics: |
|
|
3 |
|
|
|
4 |
====================== |
|
|
5 |
DICS for power mapping |
|
|
6 |
====================== |
|
|
7 |
|
|
|
8 |
In this tutorial, we'll simulate two signals originating from two |
|
|
9 |
locations on the cortex. These signals will be sinusoids, so we'll be looking |
|
|
10 |
at oscillatory activity (as opposed to evoked activity). |
|
|
11 |
|
|
|
12 |
We'll use dynamic imaging of coherent sources (DICS) :footcite:`GrossEtAl2001` |
|
|
13 |
to map out spectral power along the cortex. Let's see if we can find our two |
|
|
14 |
simulated sources. |
|
|
15 |
""" |
|
|
16 |
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com> |
|
|
17 |
# |
|
|
18 |
# License: BSD-3-Clause |
|
|
19 |
# Copyright the MNE-Python contributors. |
|
|
20 |
|
|
|
21 |
# %% |
|
|
22 |
# Setup |
|
|
23 |
# ----- |
|
|
24 |
# We first import the required packages to run this tutorial and define a list |
|
|
25 |
# of filenames for various things we'll be using. |
|
|
26 |
import numpy as np |
|
|
27 |
from matplotlib import pyplot as plt |
|
|
28 |
from scipy.signal import coherence, unit_impulse, welch |
|
|
29 |
|
|
|
30 |
import mne |
|
|
31 |
from mne.beamformer import apply_dics_csd, make_dics |
|
|
32 |
from mne.datasets import sample |
|
|
33 |
from mne.minimum_norm import apply_inverse, make_inverse_operator |
|
|
34 |
from mne.simulation import add_noise, simulate_raw |
|
|
35 |
from mne.time_frequency import csd_morlet |
|
|
36 |
|
|
|
37 |
# We use the MEG and MRI setup from the MNE-sample dataset |
|
|
38 |
data_path = sample.data_path(download=False) |
|
|
39 |
subjects_dir = data_path / "subjects" |
|
|
40 |
|
|
|
41 |
# Filenames for various files we'll be using |
|
|
42 |
meg_path = data_path / "MEG" / "sample" |
|
|
43 |
raw_fname = meg_path / "sample_audvis_raw.fif" |
|
|
44 |
fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif" |
|
|
45 |
cov_fname = meg_path / "sample_audvis-cov.fif" |
|
|
46 |
fwd = mne.read_forward_solution(fwd_fname) |
|
|
47 |
|
|
|
48 |
# Seed for the random number generator |
|
|
49 |
rand = np.random.RandomState(42) |
|
|
50 |
|
|
|
51 |
# %% |
|
|
52 |
# Data simulation |
|
|
53 |
# --------------- |
|
|
54 |
# |
|
|
55 |
# The following function generates a timeseries that contains an oscillator, |
|
|
56 |
# whose frequency fluctuates a little over time, but stays close to 10 Hz. |
|
|
57 |
# We'll use this function to generate our two signals. |
|
|
58 |
|
|
|
59 |
sfreq = 50.0 # Sampling frequency of the generated signal |
|
|
60 |
n_samp = int(round(10.0 * sfreq)) |
|
|
61 |
times = np.arange(n_samp) / sfreq # 10 seconds of signal |
|
|
62 |
n_times = len(times) |
|
|
63 |
|
|
|
64 |
|
|
|
65 |
def coh_signal_gen(): |
|
|
66 |
"""Generate an oscillating signal. |
|
|
67 |
|
|
|
68 |
Returns |
|
|
69 |
------- |
|
|
70 |
signal : ndarray |
|
|
71 |
The generated signal. |
|
|
72 |
""" |
|
|
73 |
t_rand = 0.001 # Variation in the instantaneous frequency of the signal |
|
|
74 |
std = 0.1 # Std-dev of the random fluctuations added to the signal |
|
|
75 |
base_freq = 10.0 # Base frequency of the oscillators in Hertz |
|
|
76 |
n_times = len(times) |
|
|
77 |
|
|
|
78 |
# Generate an oscillator with varying frequency and phase lag. |
|
|
79 |
signal = np.sin( |
|
|
80 |
2.0 |
|
|
81 |
* np.pi |
|
|
82 |
* ( |
|
|
83 |
base_freq * np.arange(n_times) / sfreq |
|
|
84 |
+ np.cumsum(t_rand * rand.randn(n_times)) |
|
|
85 |
) |
|
|
86 |
) |
|
|
87 |
|
|
|
88 |
# Add some random fluctuations to the signal. |
|
|
89 |
signal += std * rand.randn(n_times) |
|
|
90 |
|
|
|
91 |
# Scale the signal to be in the right order of magnitude (~100 nAm) |
|
|
92 |
# for MEG data. |
|
|
93 |
signal *= 100e-9 |
|
|
94 |
|
|
|
95 |
return signal |
|
|
96 |
|
|
|
97 |
|
|
|
98 |
# %% |
|
|
99 |
# Let's simulate two timeseries and plot some basic information about them. |
|
|
100 |
signal1 = coh_signal_gen() |
|
|
101 |
signal2 = coh_signal_gen() |
|
|
102 |
|
|
|
103 |
fig, axes = plt.subplots(2, 2, figsize=(8, 4), layout="constrained") |
|
|
104 |
|
|
|
105 |
# Plot the timeseries |
|
|
106 |
ax = axes[0][0] |
|
|
107 |
ax.plot(times, 1e9 * signal1, lw=0.5) |
|
|
108 |
ax.set( |
|
|
109 |
xlabel="Time (s)", xlim=times[[0, -1]], ylabel="Amplitude (Am)", title="Signal 1" |
|
|
110 |
) |
|
|
111 |
ax = axes[0][1] |
|
|
112 |
ax.plot(times, 1e9 * signal2, lw=0.5) |
|
|
113 |
ax.set(xlabel="Time (s)", xlim=times[[0, -1]], title="Signal 2") |
|
|
114 |
|
|
|
115 |
# Power spectrum of the first timeseries |
|
|
116 |
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256) |
|
|
117 |
ax = axes[1][0] |
|
|
118 |
# Only plot the first 100 frequencies |
|
|
119 |
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.0) |
|
|
120 |
ax.set( |
|
|
121 |
xlabel="Frequency (Hz)", |
|
|
122 |
xlim=f[[0, 99]], |
|
|
123 |
ylabel="Power (dB)", |
|
|
124 |
title="Power spectrum of signal 1", |
|
|
125 |
) |
|
|
126 |
|
|
|
127 |
# Compute the coherence between the two timeseries |
|
|
128 |
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64) |
|
|
129 |
ax = axes[1][1] |
|
|
130 |
ax.plot(f[:50], coh[:50], lw=1.0) |
|
|
131 |
ax.set( |
|
|
132 |
xlabel="Frequency (Hz)", |
|
|
133 |
xlim=f[[0, 49]], |
|
|
134 |
ylabel="Coherence", |
|
|
135 |
title="Coherence between the timeseries", |
|
|
136 |
) |
|
|
137 |
|
|
|
138 |
# %% |
|
|
139 |
# Now we put the signals at two locations on the cortex. We construct a |
|
|
140 |
# :class:`mne.SourceEstimate` object to store them in. |
|
|
141 |
# |
|
|
142 |
# The timeseries will have a part where the signal is active and a part where |
|
|
143 |
# it is not. The techniques we'll be using in this tutorial depend on being |
|
|
144 |
# able to contrast data that contains the signal of interest versus data that |
|
|
145 |
# does not (i.e. it contains only noise). |
|
|
146 |
|
|
|
147 |
# The locations on the cortex where the signal will originate from. These |
|
|
148 |
# locations are indicated as vertex numbers. |
|
|
149 |
vertices = [[146374], [33830]] |
|
|
150 |
|
|
|
151 |
# Construct SourceEstimates that describe the signals at the cortical level. |
|
|
152 |
data = np.vstack((signal1, signal2)) |
|
|
153 |
stc_signal = mne.SourceEstimate( |
|
|
154 |
data, vertices, tmin=0, tstep=1.0 / sfreq, subject="sample" |
|
|
155 |
) |
|
|
156 |
stc_noise = stc_signal * 0.0 |
|
|
157 |
|
|
|
158 |
# %% |
|
|
159 |
# Before we simulate the sensor-level data, let's define a signal-to-noise |
|
|
160 |
# ratio. You are encouraged to play with this parameter and see the effect of |
|
|
161 |
# noise on our results. |
|
|
162 |
snr = 1.0 # Signal-to-noise ratio. Decrease to add more noise. |
|
|
163 |
|
|
|
164 |
# %% |
|
|
165 |
# Now we run the signal through the forward model to obtain simulated sensor |
|
|
166 |
# data. To save computation time, we'll only simulate gradiometer data. You can |
|
|
167 |
# try simulating other types of sensors as well. |
|
|
168 |
# |
|
|
169 |
# Some noise is added based on the baseline noise covariance matrix from the |
|
|
170 |
# sample dataset, scaled to implement the desired SNR. |
|
|
171 |
|
|
|
172 |
# Read the info from the sample dataset. This defines the location of the |
|
|
173 |
# sensors and such. |
|
|
174 |
info = mne.io.read_raw(raw_fname).crop(0, 1).resample(50).info |
|
|
175 |
|
|
|
176 |
# Only use gradiometers |
|
|
177 |
picks = mne.pick_types(info, meg="grad", stim=True, exclude=()) |
|
|
178 |
mne.pick_info(info, picks, copy=False) # modifies info in-place |
|
|
179 |
|
|
|
180 |
# Define a covariance matrix for the simulated noise. In this tutorial, we use |
|
|
181 |
# a simple diagonal matrix. |
|
|
182 |
cov = mne.cov.make_ad_hoc_cov(info) |
|
|
183 |
cov["data"] *= (20.0 / snr) ** 2 # Scale the noise to achieve the desired SNR |
|
|
184 |
|
|
|
185 |
# Simulate the raw data, with a lowpass filter on the noise |
|
|
186 |
stcs = [ |
|
|
187 |
(stc_signal, unit_impulse(n_samp, dtype=int) * 1), |
|
|
188 |
(stc_noise, unit_impulse(n_samp, dtype=int) * 2), |
|
|
189 |
] # stacked in time |
|
|
190 |
duration = (len(stc_signal.times) * 2) / sfreq |
|
|
191 |
raw = simulate_raw(info, stcs, forward=fwd) |
|
|
192 |
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand) |
|
|
193 |
|
|
|
194 |
|
|
|
195 |
# %% |
|
|
196 |
# We create an :class:`mne.Epochs` object containing two trials: one with |
|
|
197 |
# both noise and signal and one with just noise |
|
|
198 |
|
|
|
199 |
events = mne.find_events(raw, initial_event=True) |
|
|
200 |
tmax = (len(stc_signal.times) - 1) / sfreq |
|
|
201 |
epochs = mne.Epochs( |
|
|
202 |
raw, |
|
|
203 |
events, |
|
|
204 |
event_id=dict(signal=1, noise=2), |
|
|
205 |
tmin=0, |
|
|
206 |
tmax=tmax, |
|
|
207 |
baseline=None, |
|
|
208 |
preload=True, |
|
|
209 |
) |
|
|
210 |
assert len(epochs) == 2 # ensure that we got the two expected events |
|
|
211 |
|
|
|
212 |
# Plot some of the channels of the simulated data that are situated above one |
|
|
213 |
# of our simulated sources. |
|
|
214 |
picks = mne.read_vectorview_selection("Left-frontal") # contains both mag and grad |
|
|
215 |
picks = [p for p in picks if p in epochs.ch_names] # now only grads |
|
|
216 |
epochs.plot(picks=picks, events=True) |
|
|
217 |
|
|
|
218 |
# %% |
|
|
219 |
# Power mapping |
|
|
220 |
# ------------- |
|
|
221 |
# With our simulated dataset ready, we can now pretend to be researchers that |
|
|
222 |
# have just recorded this from a real subject and are going to study what parts |
|
|
223 |
# of the brain communicate with each other. |
|
|
224 |
# |
|
|
225 |
# First, we'll create a source estimate of the MEG data. We'll use both a |
|
|
226 |
# straightforward MNE-dSPM inverse solution for this, and the DICS beamformer |
|
|
227 |
# which is specifically designed to work with oscillatory data. |
|
|
228 |
|
|
|
229 |
# %% |
|
|
230 |
# Computing the inverse using MNE-dSPM: |
|
|
231 |
|
|
|
232 |
# Compute the inverse operator |
|
|
233 |
fwd = mne.read_forward_solution(fwd_fname) |
|
|
234 |
inv = make_inverse_operator(epochs.info, fwd, cov) |
|
|
235 |
|
|
|
236 |
# Apply the inverse model to the trial that also contains the signal. |
|
|
237 |
s = apply_inverse(epochs["signal"].average(), inv) |
|
|
238 |
|
|
|
239 |
# Take the root-mean square along the time dimension and plot the result. |
|
|
240 |
s_rms = np.sqrt((s**2).mean()) |
|
|
241 |
title = "MNE-dSPM inverse (RMS)" |
|
|
242 |
brain = s_rms.plot( |
|
|
243 |
"sample", |
|
|
244 |
subjects_dir=subjects_dir, |
|
|
245 |
hemi="both", |
|
|
246 |
figure=1, |
|
|
247 |
size=600, |
|
|
248 |
time_label=title, |
|
|
249 |
title=title, |
|
|
250 |
) |
|
|
251 |
|
|
|
252 |
# Indicate the true locations of the source activity on the plot. |
|
|
253 |
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi="lh") |
|
|
254 |
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi="rh") |
|
|
255 |
|
|
|
256 |
# Rotate the view and add a title. |
|
|
257 |
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0)) |
|
|
258 |
|
|
|
259 |
# %% |
|
|
260 |
# We will now compute the cortical power map at 10 Hz. using a DICS beamformer. |
|
|
261 |
# A beamformer will construct for each vertex a spatial filter that aims to |
|
|
262 |
# pass activity originating from the vertex, while dampening activity from |
|
|
263 |
# other sources as much as possible. |
|
|
264 |
# |
|
|
265 |
# The :func:`mne.beamformer.make_dics` function has many switches that offer |
|
|
266 |
# precise control |
|
|
267 |
# over the way the filter weights are computed. Currently, there is no clear |
|
|
268 |
# consensus regarding the best approach. This is why we will demonstrate two |
|
|
269 |
# approaches here: |
|
|
270 |
# |
|
|
271 |
# 1. The approach as described in :footcite:`vanVlietEtAl2018`, which first |
|
|
272 |
# normalizes the forward solution and computes a vector beamformer. |
|
|
273 |
# 2. The scalar beamforming approach based on |
|
|
274 |
# :footcite:`SekiharaNagarajan2008`, which uses weight normalization |
|
|
275 |
# instead of normalizing the forward solution. |
|
|
276 |
|
|
|
277 |
# Estimate the cross-spectral density (CSD) matrix on the trial containing the |
|
|
278 |
# signal. |
|
|
279 |
csd_signal = csd_morlet(epochs["signal"], frequencies=[10]) |
|
|
280 |
|
|
|
281 |
# Compute the spatial filters for each vertex, using two approaches. |
|
|
282 |
filters_approach1 = make_dics( |
|
|
283 |
info, |
|
|
284 |
fwd, |
|
|
285 |
csd_signal, |
|
|
286 |
reg=0.05, |
|
|
287 |
pick_ori="max-power", |
|
|
288 |
depth=1.0, |
|
|
289 |
inversion="single", |
|
|
290 |
weight_norm=None, |
|
|
291 |
real_filter=True, |
|
|
292 |
) |
|
|
293 |
print(filters_approach1) |
|
|
294 |
|
|
|
295 |
filters_approach2 = make_dics( |
|
|
296 |
info, |
|
|
297 |
fwd, |
|
|
298 |
csd_signal, |
|
|
299 |
reg=0.05, |
|
|
300 |
pick_ori="max-power", |
|
|
301 |
depth=None, |
|
|
302 |
inversion="matrix", |
|
|
303 |
weight_norm="unit-noise-gain", |
|
|
304 |
real_filter=True, |
|
|
305 |
) |
|
|
306 |
print(filters_approach2) |
|
|
307 |
|
|
|
308 |
# You can save these to disk with: |
|
|
309 |
# filters_approach1.save('filters_1-dics.h5') |
|
|
310 |
|
|
|
311 |
# Compute the DICS power map by applying the spatial filters to the CSD matrix. |
|
|
312 |
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1) |
|
|
313 |
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2) |
|
|
314 |
|
|
|
315 |
# %% |
|
|
316 |
# Plot the DICS power maps for both approaches, starting with the first: |
|
|
317 |
|
|
|
318 |
|
|
|
319 |
def plot_approach(power, n): |
|
|
320 |
"""Plot the results on a brain.""" |
|
|
321 |
title = f"DICS power map, approach {n}" |
|
|
322 |
brain = power_approach1.plot( |
|
|
323 |
"sample", |
|
|
324 |
subjects_dir=subjects_dir, |
|
|
325 |
hemi="both", |
|
|
326 |
size=600, |
|
|
327 |
time_label=title, |
|
|
328 |
title=title, |
|
|
329 |
) |
|
|
330 |
# Indicate the true locations of the source activity on the plot. |
|
|
331 |
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi="lh", color="b") |
|
|
332 |
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi="rh", color="b") |
|
|
333 |
# Rotate the view and add a title. |
|
|
334 |
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0)) |
|
|
335 |
return brain |
|
|
336 |
|
|
|
337 |
|
|
|
338 |
brain1 = plot_approach(power_approach1, 1) |
|
|
339 |
|
|
|
340 |
# %% |
|
|
341 |
# Now the second: |
|
|
342 |
|
|
|
343 |
brain2 = plot_approach(power_approach2, 2) |
|
|
344 |
|
|
|
345 |
# %% |
|
|
346 |
# Excellent! All methods found our two simulated sources. Of course, with a |
|
|
347 |
# signal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can |
|
|
348 |
# try playing with the SNR and see how the MNE-dSPM and DICS approaches hold up |
|
|
349 |
# in the presence of increasing noise. In the presence of more noise, you may |
|
|
350 |
# need to increase the regularization parameter of the DICS beamformer. |
|
|
351 |
# |
|
|
352 |
# References |
|
|
353 |
# ---------- |
|
|
354 |
# .. footbibliography:: |