Note
Go to the end to download the full example code.
Spectro-temporal receptive field (STRF) estimation on continuous data#
This demonstrates how an encoding model can be fit with multiple continuous inputs. In this case, we simulate the model behind a spectro-temporal receptive field (or STRF). First, we create a linear filter that maps patterns in spectro-temporal space onto an output, representing neural activity. We fit a receptive field model that attempts to recover the original linear filter that was used to create this data.
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
from scipy.stats import multivariate_normal
from sklearn.preprocessing import scale
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
rng = np.random.RandomState(1337) # To make this example reproducible
Load audio data#
We’ll read in the audio data from [1] in order to simulate a response.
In addition, we’ll downsample the data along the time dimension in order to speed up computation. Note that depending on the input values, this may not be desired. For example if your input stimulus varies more quickly than 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(str(path_audio / "speech_data.mat"))
audio = data["spectrogram"].T
sfreq = float(data["Fs"][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad="auto")
sfreq /= n_decim
Create a receptive field#
We’ll simulate a linear receptive field for a theoretical neural signal. This defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq), np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [0.1, 500]
means_low = [0.2, 2500]
cov = [[0.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(
vmax=np.abs(weights).max(),
vmin=-np.abs(weights).max(),
cmap="RdBu_r",
shading="gouraud",
)
fig, ax = plt.subplots(layout="constrained")
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title="Simulated STRF", xlabel="Time Lags (s)", ylabel="Frequency (Hz)")
plt.setp(ax.get_xticklabels(), rotation=45)
Simulate a neural response#
Using this receptive field, we’ll create an artificial neural response to a stimulus.
To do this, we’ll create a time-delayed version of the receptive field, and then calculate the dot product between this and the stimulus. Note that this is effectively doing a convolution between the stimulus and the receptive field. See here for more information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, : int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq), np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = 0.002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True, layout="constrained")
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap="Reds", shading="gouraud")
ax1.set_title("Input auditory features")
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel="Frequency (Hz)")
ax2.plot(time, y_plt)
ax2.set(
xlim=[time.min(), time.max()],
title="Simulated response",
xlabel="Time (s)",
ylabel="Activity (a.u.)",
)
Fit a model to recover this receptive field#
Finally, we’ll use the mne.decoding.ReceptiveField
class to recover
the linear receptive field of this signal. Note that properties of the
receptive field (e.g. smoothness) will depend on the autocorrelation in the
inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = (
np.rollaxis(ii, -1, 0) for ii in (X_train, X_test, y_train, y_test)
)
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test).item()
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(
1,
2,
figsize=(6, 3),
sharey=True,
sharex=True,
layout="constrained",
)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title("Original STRF")
ax2.set_title("Best Reconstructed STRF")
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color="k", alpha=0.2, lw=4)
ax.plot(time_pred, best_pred, color="r", lw=1)
ax.set(title="Original and predicted activity", xlabel="Time (s)")
ax.legend(["Original", "Predicted"])
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
0%| | Sample : 1/3450 [00:04<4:42:38, 4.92s/it]
16%|█▌ | Sample : 544/3450 [00:04<00:25, 116.05it/s]
32%|███▏ | Sample : 1107/3450 [00:04<00:09, 241.62it/s]
49%|████▊ | Sample : 1675/3450 [00:04<00:04, 374.00it/s]
65%|██████▌ | Sample : 2243/3450 [00:04<00:02, 512.29it/s]
82%|████████▏ | Sample : 2817/3450 [00:04<00:00, 658.22it/s]
98%|█████████▊| Sample : 3392/3450 [00:05<00:00, 810.81it/s]
100%|██████████| Sample : 3450/3450 [00:05<00:00, 687.97it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 561/3450 [00:00<00:00, 35016.51it/s]
33%|███▎ | Sample : 1135/3450 [00:00<00:00, 35455.63it/s]
49%|████▉ | Sample : 1699/3450 [00:00<00:00, 35380.41it/s]
66%|██████▌ | Sample : 2277/3450 [00:00<00:00, 35579.88it/s]
83%|████████▎ | Sample : 2849/3450 [00:00<00:00, 35616.98it/s]
99%|█████████▉| Sample : 3423/3450 [00:00<00:00, 35661.44it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35609.33it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 562/3450 [00:00<00:00, 35121.27it/s]
33%|███▎ | Sample : 1135/3450 [00:00<00:00, 35475.42it/s]
49%|████▉ | Sample : 1707/3450 [00:00<00:00, 35554.38it/s]
66%|██████▌ | Sample : 2267/3450 [00:00<00:00, 35400.86it/s]
82%|████████▏ | Sample : 2834/3450 [00:00<00:00, 35407.19it/s]
99%|█████████▉| Sample : 3414/3450 [00:00<00:00, 35565.40it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35530.02it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 561/3450 [00:00<00:00, 35022.77it/s]
33%|███▎ | Sample : 1128/3450 [00:00<00:00, 35232.61it/s]
49%|████▉ | Sample : 1695/3450 [00:00<00:00, 35292.59it/s]
66%|██████▌ | Sample : 2269/3450 [00:00<00:00, 35440.37it/s]
82%|████████▏ | Sample : 2833/3450 [00:00<00:00, 35394.69it/s]
98%|█████████▊| Sample : 3396/3450 [00:00<00:00, 35354.60it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35348.02it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 565/3450 [00:00<00:00, 35254.64it/s]
33%|███▎ | Sample : 1139/3450 [00:00<00:00, 35556.19it/s]
49%|████▉ | Sample : 1703/3450 [00:00<00:00, 35446.69it/s]
66%|██████▌ | Sample : 2270/3450 [00:00<00:00, 35440.07it/s]
82%|████████▏ | Sample : 2833/3450 [00:00<00:00, 35383.68it/s]
99%|█████████▊| Sample : 3404/3450 [00:00<00:00, 35433.22it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35423.13it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▌ | Sample : 559/3450 [00:00<00:00, 34898.43it/s]
33%|███▎ | Sample : 1137/3450 [00:00<00:00, 35511.87it/s]
50%|████▉ | Sample : 1713/3450 [00:00<00:00, 35669.45it/s]
66%|██████▌ | Sample : 2284/3450 [00:00<00:00, 35649.11it/s]
83%|████████▎ | Sample : 2848/3450 [00:00<00:00, 35546.13it/s]
99%|█████████▉| Sample : 3419/3450 [00:00<00:00, 35567.47it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35543.81it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▌ | Sample : 553/3450 [00:00<00:00, 34553.16it/s]
33%|███▎ | Sample : 1124/3450 [00:00<00:00, 35109.54it/s]
49%|████▉ | Sample : 1693/3450 [00:00<00:00, 35251.84it/s]
66%|██████▌ | Sample : 2264/3450 [00:00<00:00, 35364.20it/s]
82%|████████▏ | Sample : 2834/3450 [00:00<00:00, 35417.01it/s]
99%|█████████▉| Sample : 3417/3450 [00:00<00:00, 35602.47it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35531.42it/s]
Visualize the effects of regularization#
Above we fit a mne.decoding.ReceptiveField
model for one of many
values for the ridge regularization parameter. Here we will plot the model
score as well as the model coefficients for each value, in order to
visualize how coefficients change with different levels of regularization.
These issues as well as the STRF pipeline are described in detail
in [2][3][4].
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4), layout="constrained")
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker="o", color="r")
ax.annotate(
"Best parameter",
(ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - 0.1),
arrowprops={"arrowstyle": "->"},
)
plt.xticks(np.arange(len(alphas)), [f"{ii:.0e}" for ii in alphas])
ax.set(
xlabel="Ridge regularization value",
ylabel="Score ($R^2$)",
xlim=[-0.4, len(alphas) - 0.6],
)
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
fig.suptitle("Model coefficients / scores for many ridge parameters", y=1)
Using different regularization types#
In addition to the standard ridge regularization, the
mne.decoding.TimeDelayingRidge
class also exposes
Laplacian regularization
term as:
This imposes a smoothness constraint of nearby time samples and/or features. Quoting [1] :
Tikhonov [identity] regularization (Equation 5) reduces overfitting by smoothing the TRF estimate in a way that is insensitive to the amplitude of the signal of interest. However, the Laplacian approach (Equation 6) reduces off-sample error whilst preserving signal amplitude (Lalor et al., 2006). As a result, this approach usually leads to an improved estimate of the system’s response (as indexed by MSE) compared to Tikhonov regularization.
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type="laplacian", alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test).item()
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▌ | Sample : 546/3450 [00:00<00:00, 34099.52it/s]
32%|███▏ | Sample : 1118/3450 [00:00<00:00, 34918.90it/s]
49%|████▊ | Sample : 1680/3450 [00:00<00:00, 34977.75it/s]
65%|██████▍ | Sample : 2240/3450 [00:00<00:00, 34976.42it/s]
81%|████████ | Sample : 2795/3450 [00:00<00:00, 34906.05it/s]
98%|█████████▊| Sample : 3369/3450 [00:00<00:00, 35077.93it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35044.89it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▌ | Sample : 557/3450 [00:00<00:00, 34759.08it/s]
32%|███▏ | Sample : 1114/3450 [00:00<00:00, 34764.39it/s]
49%|████▊ | Sample : 1679/3450 [00:00<00:00, 34949.41it/s]
65%|██████▌ | Sample : 2250/3450 [00:00<00:00, 35147.05it/s]
82%|████████▏ | Sample : 2820/3450 [00:00<00:00, 35250.06it/s]
98%|█████████▊| Sample : 3393/3450 [00:00<00:00, 35353.16it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35317.91it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 563/3450 [00:00<00:00, 35130.37it/s]
33%|███▎ | Sample : 1147/3450 [00:00<00:00, 35801.26it/s]
50%|████▉ | Sample : 1715/3450 [00:00<00:00, 35692.76it/s]
67%|██████▋ | Sample : 2301/3450 [00:00<00:00, 35919.59it/s]
83%|████████▎ | Sample : 2876/3450 [00:00<00:00, 35912.07it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 35926.90it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▌ | Sample : 557/3450 [00:00<00:00, 34806.72it/s]
33%|███▎ | Sample : 1137/3450 [00:00<00:00, 35519.90it/s]
50%|████▉ | Sample : 1714/3450 [00:00<00:00, 35690.80it/s]
67%|██████▋ | Sample : 2300/3450 [00:00<00:00, 35937.92it/s]
83%|████████▎ | Sample : 2874/3450 [00:00<00:00, 35917.03it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 36004.94it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
17%|█▋ | Sample : 581/3450 [00:00<00:00, 36283.77it/s]
33%|███▎ | Sample : 1134/3450 [00:00<00:00, 35382.18it/s]
50%|████▉ | Sample : 1717/3450 [00:00<00:00, 35743.69it/s]
67%|██████▋ | Sample : 2304/3450 [00:00<00:00, 35984.34it/s]
84%|████████▍ | Sample : 2893/3450 [00:00<00:00, 36165.80it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 36235.93it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
17%|█▋ | Sample : 574/3450 [00:00<00:00, 35835.95it/s]
34%|███▎ | Sample : 1159/3450 [00:00<00:00, 36191.16it/s]
50%|█████ | Sample : 1742/3450 [00:00<00:00, 36274.98it/s]
67%|██████▋ | Sample : 2320/3450 [00:00<00:00, 36230.48it/s]
84%|████████▍ | Sample : 2896/3450 [00:00<00:00, 36172.05it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 36189.08it/s]
Fitting 15 epochs, 20 channels
0%| | Sample : 0/3450 [00:00<?, ?it/s]
16%|█▋ | Sample : 566/3450 [00:00<00:00, 35330.71it/s]
33%|███▎ | Sample : 1148/3450 [00:00<00:00, 35862.86it/s]
50%|█████ | Sample : 1725/3450 [00:00<00:00, 35920.04it/s]
67%|██████▋ | Sample : 2305/3450 [00:00<00:00, 36002.26it/s]
84%|████████▎ | Sample : 2887/3450 [00:00<00:00, 36075.32it/s]
100%|██████████| Sample : 3450/3450 [00:00<00:00, 36146.59it/s]
Compare model performance#
Below we visualize the model performance of each regularization method (ridge vs. Laplacian) for different levels of alpha. As you can see, the Laplacian method performs better in general, because it imposes a smoothness constraint along the time and feature dimensions of the coefficients. This matches the “true” receptive field structure and results in a better model fit.
fig = plt.figure(figsize=(10, 6), layout="constrained")
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker="o", color="r")
ax.plot(np.arange(len(alphas)), scores, marker="o", color="0.5", ls=":")
ax.annotate(
"Best Laplacian",
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - 0.1),
arrowprops={"arrowstyle": "->"},
)
ax.annotate(
"Best Ridge",
(ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - 0.1),
arrowprops={"arrowstyle": "->"},
)
plt.xticks(np.arange(len(alphas)), [f"{ii:.0e}" for ii in alphas])
ax.set(
xlabel="Laplacian regularization value",
ylabel="Score ($R^2$)",
xlim=[-0.4, len(alphas) - 0.6],
)
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel="Laplacian")
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel="Ridge")
fig.suptitle("Model coefficients / scores for laplacian regularization", y=1)
Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(
1,
3,
figsize=(9, 3),
sharey=True,
sharex=True,
layout="constrained",
)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title("Original STRF")
ax2.set_title("Best Ridge STRF")
ax3.set_title("Best Laplacian STRF")
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
References#
Total running time of the script: (0 minutes 12.807 seconds)