owned this note
owned this note
Published
Linked with GitHub
# preprocessing using MEG data
```python
%matplotlib qt
```
```python
import mne
import numpy as np
import matplotlib.pyplot as plt
from mne.io import read_raw_fif, concatenate_raws
from mne.preprocessing import ICA
```
```python
fif_file = 'derivatives_meg_derivatives_sub-16_ses-meg_meg_sub-16_ses-meg_task-facerecognition_run-01_proc-sss_meg.fif'
data_path = '/Users/kevinhsu/Documents/D/000_course/my2020_neural_decoding/facerecognition'
#data_path + '/sub-16' + '/ses-meg'+ '/meg' + '/' + fif_file
raw = mne.io.read_raw_fif(data_path + '/sub-16' + '/ses-meg'+ '/meg' + '/' + fif_file)
raw.load_data().pick_types(meg=True, stim=True).filter(none, 100, phase= 'zero-double').resample(500)
print(raw)
```
```python
sid = 16
n_runs = 6 # 6 for full data (use less to speed up computations). Seem to be too large.
#fname = '/Volumes/Neurolang_2/open_neuro/Sub-%.d/ses-meg/meg/sub-%.2d_ses-meg_meg_sub-%.2d_ses-meg_task-facerecognition_run-%.2d_meg.fif'
fname = '/Users/kevinhsu/Documents/D/000_course/my2020_neural_decoding/facerecognition/sub-%.2d/ses-meg/meg/derivatives_meg_derivatives_sub-%.2d_ses-meg_meg_sub-%.2d_ses-meg_task-facerecognition_run-%.2d_proc-sss_meg.fif'
raws = [read_raw_fif(fname %(sid, sid, sid, i+1), verbose='error')
for i in range(n_runs)] # ignore filename warnings
raw = concatenate_raws(raws)
raw.load_data().pick_types(meg=True, stim=True).filter(0, 100, phase= 'zero-double').resample(500)
print(raw)
```
## find events
```python
events = mne.find_events(raw, stim_channel='STI101') #just checking what stim_channel documenting what.
print(events)
```
## create epoch object
```python
#stim_type trigger
#Famous 5 150
# 6 81
# 7 63
#Scrambled 17 150
# 18 70
# 19 77
#Unfamiliar 13 150
# 14 65
# 15 82
#這邊把上面的condition group count複製下來對照,3x3.
event_id = {'Famous/1st': 5, 'Famous/2nd_lag0': 6, 'Famous/2nd_lagN': 7,
'Scrambled/1st': 17, 'Scrambled/2nd_lag0': 18, 'Scrambled/2nd_lagN': 19,
'Unfamiliar/1st': 13, 'Unfamiliar/2nd_lag0': 14, 'Unfamiliar/2nd_lagN': 15}
tmin = -0.1 # pre stimulis interval (in seconds) #
tmax = 1.0 # post stimulus interval #
#min:(pre onset 0.1), max: (1 sec after onset.)
# if want to see n1, then 0.5sec is enough, but if want to see else then need longer.
picks = mne.pick_types(raw.info, eeg= False, stim = False)
#if true then all channel kept.
#if only want certain kind of then use mag, grad, planar1, planar2…etc. exclude = [] can exclude specific channels.
#btw, pilot run found that 跑完reject發現eeg被丟掉的多,乾脆eeg都丟掉了。
#then do artifact rejection.
baseline = (None, 0) # tmin ~ 0 , -0.1s-0s
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=1.5e-12, # T (magnetometers)
)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=baseline, picks = picks,
reject = reject, preload = True)
# channels to use in epochs
```
## ICA
```python
ica = ICA(n_components=15, random_state=97) #max_iter=800
ica.fit(raw)
ica.plot_sources(raw)
ica.plot_components()
```
## remove components
```python
ica.exclude = [1, 2] # details on how we picked these are omitted here
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
```
## save epoch objects
```python
epochs.save('saved-audiovisual-epo.fif', overwrite=True)
epochs_from_file = mne.read_epochs('saved-audiovisual-epo.fif', preload=False)
```