"""Check whether a file format is supported by BIDS and then load it."""# Authors: The MNE-BIDS developers# SPDX-License-Identifier: BSD-3-Clauseimportjsonimportosimportos.pathasopimportrefromdatetimeimportdatetime,timedelta,timezonefromdifflibimportget_close_matchesfrompathlibimportPathimportmneimportnumpyasnpfrommneimportevents_from_annotations,io,pick_channels_regexp,read_eventsfrommne.coregimportfit_matched_pointsfrommne.transformsimportapply_transfrommne.utilsimportget_subjects_dir,loggerfrommne_bids.configimport(ALLOWED_DATATYPE_EXTENSIONS,ANNOTATIONS_TO_KEEP,_map_options,reader,)frommne_bids.digimport_read_dig_bidsfrommne_bids.pathimport(BIDSPath,_find_matching_sidecar,_infer_datatype,_parse_ext,get_bids_path_from_fname,)frommne_bids.tsv_handlerimport_drop,_from_tsvfrommne_bids.utilsimport_get_ch_type_mapping,_import_nibabel,verbose,warndef_read_raw(raw_path,electrode=None,hsp=None,hpi=None,allow_maxshield=False,config_path=None,**kwargs,):"""Read a raw file into MNE, making inferences based on extension."""_,ext=_parse_ext(raw_path)# KIT systemsifextin[".con",".sqd"]:raw=io.read_raw_kit(raw_path,elp=electrode,hsp=hsp,mrk=hpi,preload=False,**kwargs)# BTi systemselifext==".pdf":raw=io.read_raw_bti(pdf_fname=raw_path,config_fname=config_path,head_shape_fname=hsp,preload=False,**kwargs,)elifext==".fif":raw=reader[ext](raw_path,allow_maxshield,**kwargs)elifextin[".ds",".vhdr",".set",".edf",".bdf",".EDF",".snirf",".cdt"]:raw_path=Path(raw_path)raw=reader[ext](raw_path,**kwargs)# MEF and NWB are allowed, but not yet implementedelifextin[".mef",".nwb"]:raiseValueError(f'Got "{ext}" as extension. This is an allowed 'f"extension but there is no IO support for this "f"file format yet.")# No supported data found ...# ---------------------------else:raiseValueError(f"Raw file name extension must be one "f"of {ALLOWED_DATATYPE_EXTENSIONS}\n"f"Got {ext}")returnrawdef_read_events(events,event_id,raw,bids_path=None):"""Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations. Parameters ---------- events : path-like | np.ndarray | None If a string, a path to an events file. If an array, an MNE events array (shape n_events, 3). If None, events will be generated from ``raw.annotations``. event_id : dict | None The event id dict used to create a 'trial_type' column in events.tsv, mapping a description key to an integer-valued event code. raw : mne.io.Raw The data as MNE-Python Raw object. bids_path : BIDSPath | None Can be used to determine if the data is a resting-state or empty-room recording, and will suppress a warning about missing events in this case. Returns ------- all_events : np.ndarray, shape = (n_events, 3) The first column contains the event time in samples and the third column contains the event id. The second column is ignored for now but typically contains the value of the trigger channel either immediately before the event or immediately after. all_dur : np.ndarray, shape (n_events,) The event durations in seconds. all_desc : dict A dictionary with the keys corresponding to the event descriptions and the values to the event IDs. """# retrieve eventsifisinstance(events,np.ndarray):ifevents.ndim!=2:raiseValueError("Events must have two dimensions, "f"found {events.ndim}")ifevents.shape[1]!=3:raiseValueError("Events must have second dimension of length 3, "f"found {events.shape[1]}")events=eventselifeventsisNone:events=np.empty(shape=(0,3),dtype=int)else:events=read_events(events).astype(int)ifraw.annotations:ifevent_idisNone:logger.info("The provided raw data contains annotations, but you did not "'pass an "event_id" mapping from annotation descriptions to '"event codes. We will generate arbitrary event codes. "'To specify custom event codes, please pass "event_id".')else:special_annots={"BAD_ACQ_SKIP"}desc_without_id=sorted(set(raw.annotations.description)-set(event_id.keys()))# auto-add entries to `event_id` for "special" annotation values# (but only if they're needed)ifset(desc_without_id)&special_annots:forannotinspecial_annots:# use a value guaranteed to not be in useevent_id={annot:max(event_id.values())+90000}|event_id# remove the "special" annots from the list of problematic annotsdesc_without_id=sorted(set(desc_without_id)-special_annots)ifdesc_without_id:raiseValueError(f"The provided raw data contains annotations, but "f'"event_id" does not contain entries for all annotation 'f"descriptions. The following entries are missing: "f'{", ".join(desc_without_id)}')# If we have events, convert them to Annotations so they can be easily# merged with existing Annotations.ifevents.size>0andevent_idisnotNone:ids_without_desc=set(events[:,2])-set(event_id.values())ifids_without_desc:raiseValueError(f"No description was specified for the following event(s): "f'{", ".join([str(x)forxinsorted(ids_without_desc)])}. 'f"Please add them to the event_id dictionary, or drop them "f"from the events array.")# Append events to raw.annotations. All event onsets are relative to# measurement beginning.id_to_desc_map=dict(zip(event_id.values(),event_id.keys()))# We don't pass `first_samp`, as set_annotations() below will take# care of this shift automatically.new_annotations=mne.annotations_from_events(events=events,sfreq=raw.info["sfreq"],event_desc=id_to_desc_map,orig_time=raw.annotations.orig_time,)raw=raw.copy()# Don't alter the original.annotations=raw.annotations.copy()# We use `+=` here because `Annotations.__iadd__()` does the right# thing and also performs a sanity check on `Annotations.orig_time`.annotations+=new_annotationsraw.set_annotations(annotations)delid_to_desc_map,annotations,new_annotationsifevents.size>0andevent_idisNone:new_annotations=mne.annotations_from_events(events=events,sfreq=raw.info["sfreq"],orig_time=raw.annotations.orig_time,)raw=raw.copy()# Don't alter the original.annotations=raw.annotations.copy()# We use `+=` here because `Annotations.__iadd__()` does the right# thing and also performs a sanity check on `Annotations.orig_time`.annotations+=new_annotationsraw.set_annotations(annotations)delannotations,new_annotations# Now convert the Annotations to events.all_events,all_desc=events_from_annotations(raw,event_id=event_id,regexp=None,# Include `BAD_` and `EDGE_` Annotations, too.)all_dur=raw.annotations.duration# Warn about missing events if not rest or empty-room dataif(all_events.size==0andbids_path.taskisnotNone)and(notbids_path.task.startswith("rest")andnot(bids_path.subject=="emptyroom"andbids_path.task=="noise")):warn("No events found or provided. Please add annotations to the raw ""data, or provide the events and event_id parameters. For ""resting state data, BIDS recommends naming the task using "'labels beginning with "rest".')returnall_events,all_dur,all_descdef_verbose_list_index(lst,val,*,allow_all=False):# try to "return lst.index(val)" for list of str, but be more# informative/verbose when it failstry:returnlst.index(val)exceptValueErrorasexc:# Use str cast here to deal with pathlib.Path instancesextra=get_close_matches(str(val),[str(ll)forllinlst])ifallow_allandnotextra:extra=lstextra=f". Did you mean one of {extra}?"ifextraelse""raiseValueError(f"{exc}{extra}")fromNonedef_handle_participants_reading(participants_fname,raw,subject):participants_tsv=_from_tsv(participants_fname)subjects=participants_tsv["participant_id"]row_ind=_verbose_list_index(subjects,subject,allow_all=True)raw.info["subject_info"]=dict()# start from scratch# set data from participants tsv into subject_info# TODO: Could potentially use "comment" someday to store other options e.g. in JSON# https://github.com/mne-tools/fiff-constants/blob/e27f68cbf74dbfc5193ad429cc77900a59475181/DictionaryTags.txt#L369allowed_keys=set(""" id his_id last_name first_name middle_name birthday sex hand weight height """.strip().split())bad_key_vals=list()forcol_name,valueinparticipants_tsv.items():orig_value=value=value[row_ind]ifcol_namein("sex","hand"):value=_map_options(what=col_name,key=value,fro="bids",to="mne")# We don't know how to translate to MNE, so skip.ifvalueisNone:ifcol_name=="sex":info_str="subject sex"else:info_str="subject handedness"bad_key_vals.append((col_name,orig_value,info_str))elifcol_namein("height","weight"):try:value=float(value)exceptValueError:value=Noneelifcol_name=="age":ifraw.info["meas_date"]isNone:value=NoneelifvalueisnotNone:try:value=float(value)exceptException:value=Noneelse:value=(raw.info["meas_date"]-timedelta(days=int(np.ceil(365.25*value)))).date()else:ifvalue=="n/a":value=None# adjust keys to match MNE nomenclaturekey=col_nameifcol_name=="participant_id":key="his_id"elifcol_name=="age":key="birthday"ifkeynotinallowed_keys:bad_key_vals.append((col_name,orig_value,None))continue# add data into raw.InfoifvalueisnotNone:assertkeynotinraw.info["subject_info"]raw.info["subject_info"][key]=valueifbad_key_vals:warn_str="Unable to map the following column(s) to to MNE:"forcol_name,orig_value,info_strinbad_key_vals:warn_str+=f"\n{col_name}"ifinfo_strisnotNone:warn_str+=f" ({info_str})"warn_str+=f": {orig_value}"warn(warn_str)returnrawdef_handle_scans_reading(scans_fname,raw,bids_path):"""Read associated scans.tsv and set meas_date."""scans_tsv=_from_tsv(scans_fname)fname=bids_path.fpath.nameiffname.endswith(".pdf"):# for BTi files, the scan is an entire directoryfname=fname.split(".")[0]# get the row corresponding to the file# use string concatenation instead of os.path# to work nicely with windowsdata_fname=Path(bids_path.datatype)/fnamefnames=scans_tsv["filename"]fnames=[Path(fname)forfnameinfnames]if"acq_time"inscans_tsv:acq_times=scans_tsv["acq_time"]else:acq_times=["n/a"]*len(fnames)# There are three possible extensions for BrainVision# First gather all the possible extensionsacq_suffixes=set(fname.suffixforfnameinfnames)# Add the filename extension for the bids folderacq_suffixes.add(Path(data_fname).suffix)ifall(suffixin(".vhdr",".eeg",".vmrk")forsuffixinacq_suffixes):ext=fnames[0].suffixdata_fname=Path(data_fname).with_suffix(ext)row_ind=_verbose_list_index(fnames,data_fname)# check whether all split files have the same acq_time# and throw an error if they don'tif"_split-"infname:split_idx=fname.find("split-")pattern=re.compile(bids_path.datatype+"/"+bids_path.basename[:split_idx]+r"split-\d+_"+bids_path.datatype+bids_path.fpath.suffix)split_fnames=list(filter(lambdax:pattern.match(x.as_posix()),fnames))split_acq_times=[]forsplit_finsplit_fnames:split_acq_times.append(acq_times[_verbose_list_index(fnames,split_f)])iflen(set(split_acq_times))!=1:raiseValueError("Split files must have the same acq_time.")# extract the acquisition time from scans fileacq_time=acq_times[row_ind]ifacq_time!="n/a":# BIDS allows the time to be stored in UTC with a zero time-zone offset, as# indicated by a trailing "Z" in the datetime string. If the "Z" is missing, the# time is represented as "local" time. We have no way to know what the local# time zone is at the *acquisition* site; so we simply assume the same time zone# as the user's current system (this is what the spec demands anyway).acq_time_is_utc=acq_time.endswith("Z")# microseconds part in the acquisition time is optional; add it if missingif"."notinacq_time:ifacq_time_is_utc:acq_time=acq_time.replace("Z",".0Z")else:acq_time+=".0"date_format="%Y-%m-%dT%H:%M:%S.%f"ifacq_time_is_utc:date_format+="Z"acq_time=datetime.strptime(acq_time,date_format)ifacq_time_is_utc:# Enforce setting timezone to UTC without additonal conversionacq_time=acq_time.replace(tzinfo=timezone.utc)else:# Convert time offset to UTCacq_time=acq_time.astimezone(timezone.utc)logger.debug(f"Loaded {scans_fname} scans file to set "f"acq_time as {acq_time}.")# First set measurement date to None and then call call anonymize() to# remove any traces of the measurement date we wish# to replace – it might lurk out in more places than just# raw.info['meas_date'], e.g. in info['meas_id]['secs'] and in# info['file_id'], which are not affected by set_meas_date().# The combined use of set_meas_date(None) and anonymize() is suggested# by the MNE documentation, and in fact we cannot load e.g. OpenNeuro# ds003392 without this combination.raw.set_meas_date(None)raw.anonymize(daysback=None,keep_his=True)raw.set_meas_date(acq_time)returnrawdef_handle_info_reading(sidecar_fname,raw):"""Read associated sidecar JSON and populate raw. Handle PowerLineFrequency of recording. """withopen(sidecar_fname,encoding="utf-8-sig")asfin:sidecar_json=json.load(fin)# read in the sidecar JSON's and raw object's line frequencyjson_linefreq=sidecar_json.get("PowerLineFrequency")raw_linefreq=raw.info["line_freq"]# If both are defined, warn if there is a conflict, else all is fineif(json_linefreqisnotNone)and(raw_linefreqisnotNone):ifjson_linefreq!=raw_linefreq:msg=(f"Line frequency in sidecar JSON does not match the info "f"data structure of the mne.Raw object:\n"f"Sidecar JSON is -> {json_linefreq}\n"f"Raw is -> {raw_linefreq}\n\n")ifjson_linefreq=="n/a":msg+="Defaulting to the info from mne.Raw object."raw.info["line_freq"]=raw_linefreqelse:msg+="Defaulting to the info from sidecar JSON."raw.info["line_freq"]=json_linefreqwarn(msg)# Else, try to use JSON, fall back on mne.Rawelif(json_linefreqisnotNone)and(json_linefreq!="n/a"):raw.info["line_freq"]=json_linefreqelse:pass# line freq is either defined or None in mne.Raw# get cHPI infochpi=sidecar_json.get("ContinuousHeadLocalization")ifchpiisNone:# no cHPI info in the sidecar – leave raw.info unchangedpasselifchpiisTrue:frommne.io.ctfimportRawCTFfrommne.io.kit.kitimportRawKITmsg=("Cannot verify that the cHPI frequencies from ""the MEG JSON sidecar file correspond to the raw data{}")ifisinstance(raw,RawCTF):# Pick channels corresponding to the cHPI positionshpi_picks=pick_channels_regexp(raw.info["ch_names"],"HLC00[123][123].*")iflen(hpi_picks)!=9:raiseValueError(f"Could not find all cHPI channels that we expected for "f"CTF data. Expected: 9, found: {len(hpi_picks)}")logger.info(msg.format(" for CTF files."))elifisinstance(raw,RawKIT):logger.info(msg.format(" for KIT files."))elif"HeadCoilFrequency"insidecar_json:hpi_freqs_json=sidecar_json["HeadCoilFrequency"]try:hpi_freqs_raw,_,_=mne.chpi.get_chpi_info(raw.info)exceptValueError:logger.info(msg.format("."))else:# XXX: Set chpi info in mne.Raw to what is in the sidecarifnotnp.allclose(hpi_freqs_json,hpi_freqs_raw):warn(f"The cHPI coil frequencies in the sidecar file "f"{sidecar_fname}:\n{hpi_freqs_json}\n "f"differ from what is stored in the raw data:\n"f" {hpi_freqs_raw}.\n"f"Defaulting to the info from mne.Raw object.")else:addmsg=(".\n(Because no 'HeadCoilFrequency' data was found in the sidecar.)")logger.info(msg.format(addmsg))else:ifraw.info["hpi_subsystem"]:logger.info("Dropping cHPI information stored in raw data, ""following specification in sidecar file")withraw.info._unlock():raw.info["hpi_subsystem"]=Noneraw.info["hpi_meas"]=[]returnrawdef_handle_events_reading(events_fname,raw):"""Read associated events.tsv and populate raw. Handle onset, duration, and description of each event. """logger.info(f"Reading events from {events_fname}.")events_dict=_from_tsv(events_fname)# Get the descriptions of the eventsif"trial_type"inevents_dict:trial_type_col_name="trial_type"elif"stim_type"inevents_dict:# Backward-compat with old datasets.trial_type_col_name="stim_type"warn(f'The events file, {events_fname}, contains a "stim_type" 'f'column. This column should be renamed to "trial_type" for 'f"BIDS compatibility.")else:trial_type_col_name=Noneiftrial_type_col_nameisnotNone:# Drop events unrelated to a trial typeevents_dict=_drop(events_dict,"n/a",trial_type_col_name)if"value"inevents_dict:# Check whether the `trial_type` <> `value` mapping is unique.trial_types=events_dict[trial_type_col_name]values=np.asarray(events_dict["value"],dtype=str)fortrial_typeinnp.unique(trial_types):idx=np.where(trial_type==np.atleast_1d(trial_types))[0]matching_values=values[idx]iflen(np.unique(matching_values))>1:# Event type descriptors are ambiguous; create hierarchical# event descriptors.logger.info(f'The event "{trial_type}" refers to multiple event 'f"values. Creating hierarchical event names.")foriiinidx:value=values[ii]value="na"ifvalue=="n/a"elsevaluenew_name=f"{trial_type}/{value}"logger.info(f" Renaming event: {trial_type} -> "f"{new_name}")trial_types[ii]=new_namedescriptions=np.asarray(trial_types,dtype=str)else:descriptions=np.asarray(events_dict[trial_type_col_name],dtype=str)elif"value"inevents_dict:# If we don't have a proper description of the events, perhaps we have# at least an event value?# Drop events unrelated to valueevents_dict=_drop(events_dict,"n/a","value")descriptions=np.asarray(events_dict["value"],dtype=str)# Worst case, we go with 'n/a' for all eventselse:descriptions=np.array(["n/a"]*len(events_dict["onset"]),dtype=str)# Deal with "n/a" strings before converting to floatonsets=np.array([np.nanifon=="n/a"elseonforoninevents_dict["onset"]],dtype=float)durations=np.array([0ifdu=="n/a"elseduforduinevents_dict["duration"]],dtype=float)# Keep only events where onset is knowngood_events_idx=~np.isnan(onsets)onsets=onsets[good_events_idx]durations=durations[good_events_idx]descriptions=descriptions[good_events_idx]delgood_events_idx# Add events as Annotations, but keep essential Annotations present in# raw fileannot_from_raw=raw.annotations.copy()annot_from_events=mne.Annotations(onset=onsets,duration=durations,description=descriptions)raw.set_annotations(annot_from_events)annot_idx_to_keep=[idxforidx,descrinenumerate(annot_from_raw.description)ifdescrinANNOTATIONS_TO_KEEP]annot_to_keep=annot_from_raw[annot_idx_to_keep]iflen(annot_to_keep):raw.set_annotations(raw.annotations+annot_to_keep)returnrawdef_get_bads_from_tsv_data(tsv_data):"""Extract names of bads from data read from channels.tsv."""idx=[]forch_idx,statusinenumerate(tsv_data["status"]):ifstatus.lower()=="bad":idx.append(ch_idx)bads=[tsv_data["name"][i]foriinidx]returnbadsdef_handle_channels_reading(channels_fname,raw):"""Read associated channels.tsv and populate raw. Updates status (bad) and types of channels. """logger.info(f"Reading channel info from {channels_fname}.")channels_dict=_from_tsv(channels_fname)ch_names_tsv=channels_dict["name"]# Now we can do some work.# The "type" column is mandatory in BIDS. We can use it to set channel# types in the raw data using a mapping between channel typeschannel_type_bids_mne_map=dict()# Get the best mapping we currently have from BIDS to MNE nomenclaturebids_to_mne_ch_types=_get_ch_type_mapping(fro="bids",to="mne")ch_types_json=channels_dict["type"]forch_name,ch_typeinzip(ch_names_tsv,ch_types_json):# We don't map MEG channels for now, as there's no clear 1:1 mapping# from BIDS to MNE coil types.ifch_type.upper()in("MEGGRADAXIAL","MEGMAG","MEGREFGRADAXIAL","MEGGRADPLANAR","MEGREFMAG","MEGOTHER",):continue# Try to map from BIDS nomenclature to MNE, leave channel type# untouched if we are uncertainupdated_ch_type=bids_to_mne_ch_types.get(ch_type,None)ifupdated_ch_typeisNone:# XXX Try again with uppercase spelling – this should be removed# XXX once https://github.com/bids-standard/bids-validator/issues/1018# XXX has been resolved.# XXX x-ref https://github.com/mne-tools/mne-bids/issues/481updated_ch_type=bids_to_mne_ch_types.get(ch_type.upper(),None)ifupdated_ch_typeisnotNone:msg=("The BIDS dataset contains channel types in lowercase ""spelling. This violates the BIDS specification and ""will raise an error in the future.")warn(msg)ifupdated_ch_typeisNone:# We don't have an appropriate mapping, so make it a "misc" channelchannel_type_bids_mne_map[ch_name]="misc"warn(f'No BIDS -> MNE mapping found for channel type "{ch_type}". 'f'Type of channel "{ch_name}" will be set to "misc".')else:# We found a mapping, so use itchannel_type_bids_mne_map[ch_name]=updated_ch_type# Special handling for (synthesized) stimulus channelsynthesized_stim_ch_name="STI 014"if(synthesized_stim_ch_nameinraw.ch_namesandsynthesized_stim_ch_namenotinch_names_tsv):logger.info(f'The stimulus channel "{synthesized_stim_ch_name}" is present in 'f"the raw data, but not included in channels.tsv. Removing the "f"channel.")raw.drop_channels([synthesized_stim_ch_name])# Rename channels in loaded Raw to match those read from the BIDS sidecariflen(ch_names_tsv)!=len(raw.ch_names):warn(f"The number of channels in the channels.tsv sidecar file "f"({len(ch_names_tsv)}) does not match the number of channels "f"in the raw data file ({len(raw.ch_names)}). Will not try to "f"set channel names.")else:raw.rename_channels(dict(zip(raw.ch_names,ch_names_tsv)))# Set the channel types in the raw data according to channels.tsvchannel_type_bids_mne_map_available_channels={ch_name:ch_typeforch_name,ch_typeinchannel_type_bids_mne_map.items()ifch_nameinraw.ch_names}ch_diff=set(channel_type_bids_mne_map.keys())-set(channel_type_bids_mne_map_available_channels.keys())ifch_diff:warn(f"Cannot set channel type for the following channels, as they "f'are missing in the raw data: {", ".join(sorted(ch_diff))}')raw.set_channel_types(channel_type_bids_mne_map_available_channels,on_unit_change="ignore")# Set bad channels based on _channels.tsv sidecarif"status"inchannels_dict:bads_tsv=_get_bads_from_tsv_data(channels_dict)bads_avail=[ch_nameforch_nameinbads_tsvifch_nameinraw.ch_names]ch_diff=set(bads_tsv)-set(bads_avail)ifch_diff:warn(f'Cannot set "bad" status for the following channels, as 'f"they are missing in the raw data: "f'{", ".join(sorted(ch_diff))}')raw.info["bads"]=bads_availreturnraw
[docs]@verbosedefread_raw_bids(bids_path,extra_params=None,verbose=None):"""Read BIDS compatible data. Will attempt to read associated events.tsv and channels.tsv files to populate the returned raw object with raw.annotations and raw.info['bads']. Parameters ---------- bids_path : BIDSPath The file to read. The :class:`mne_bids.BIDSPath` instance passed here **must** have the ``.root`` attribute set. The ``.datatype`` attribute **may** be set. If ``.datatype`` is not set and only one data type (e.g., only EEG or MEG data) is present in the dataset, it will be selected automatically. .. note:: If ``bids_path`` points to a symbolic link of a ``.fif`` file without a ``split`` entity, the link will be resolved before reading. extra_params : None | dict Extra parameters to be passed to MNE read_raw_* functions. Note that the ``exclude`` parameter, which is supported by some MNE-Python readers, is not supported; instead, you need to subset your channels **after** reading. %(verbose)s Returns ------- raw : mne.io.Raw The data as MNE-Python Raw object. Raises ------ RuntimeError If multiple recording data types are present in the dataset, but ``datatype=None``. RuntimeError If more than one data files exist for the specified recording. RuntimeError If no data file in a supported format can be located. ValueError If the specified ``datatype`` cannot be found in the dataset. """ifnotisinstance(bids_path,BIDSPath):raiseRuntimeError('"bids_path" must be a BIDSPath object. Please '"instantiate using mne_bids.BIDSPath().")bids_path=bids_path.copy()sub=bids_path.subjectses=bids_path.sessionbids_root=bids_path.rootdatatype=bids_path.datatypesuffix=bids_path.suffix# check root availableifbids_rootisNone:raiseValueError('The root of the "bids_path" must be set. ''Please use `bids_path.update(root="<root>")` '"to set the root of the BIDS folder to read.")# infer the datatype and suffix if they are not present in the BIDSPathifdatatypeisNone:datatype=_infer_datatype(root=bids_root,sub=sub,ses=ses)bids_path.update(datatype=datatype)ifsuffixisNone:bids_path.update(suffix=datatype)ifbids_path.fpath.suffix==".pdf":bids_raw_folder=bids_path.directory/f"{bids_path.basename}"# try to find the processed data file ("pdf")# see: https://www.fieldtriptoolbox.org/getting_started/bti/bti_pdf_patterns=["0","c,rf*","hc,rf*","e,rf*"]pdf_list=[]forpatterninbti_pdf_patterns:pdf_list+=sorted(bids_raw_folder.glob(pattern))iflen(pdf_list)==0:raiseRuntimeError("Cannot find BTi 'processed data file' (pdf). Please open an issue on ""the mne-bids repository to discuss with the developers:\n\n""https://github.com/mne-tools/mne-bids/issues/new/choose\n\n"f"No matches for following patterns:\n\n{bti_pdf_patterns}\n\n"f"In: {bids_raw_folder}")eliflen(pdf_list)>1:# pragma: no coverlogger.warn("Found more than one BTi 'processed data file' (pdf). "f"Picking:\n\n{pdf_list[0]}\n\nout of the options:\n\n"f"{pdf_list}\n\n")raw_path=pdf_list[0]config_path=bids_raw_folder/"config"else:raw_path=bids_path.fpath# Resolve for FIFF filesif(raw_path.suffix==".fif"andbids_path.splitisNoneandraw_path.is_symlink()):target_path=raw_path.resolve()logger.info(f"Resolving symbolic link: "f"{raw_path} -> {target_path}")raw_path=target_pathconfig_path=None# Special-handle EDF filenames: we accept upper- and lower-case extensionsifraw_path.suffix.lower()==".edf":forextensionin(".edf",".EDF"):candidate_path=raw_path.with_suffix(extension)ifcandidate_path.exists():raw_path=candidate_pathbreakifnotraw_path.exists():options=os.listdir(bids_path.directory)matches=get_close_matches(bids_path.basename,options)msg=f"File does not exist:\n{raw_path}"ifmatches:msg+=("\nDid you mean one of:\n"+"\n".join(matches)+"\ninstead of:\n"+bids_path.basename)raiseFileNotFoundError(msg)ifconfig_pathisnotNoneandnotconfig_path.exists():raiseFileNotFoundError(f"config directory not found: {config_path}")ifextra_paramsisNone:extra_params=dict()elif"exclude"inextra_params:delextra_params["exclude"]logger.info('"exclude" parameter is not supported by read_raw_bids')ifraw_path.suffix==".fif"and"allow_maxshield"notinextra_params:extra_params["allow_maxshield"]=Trueraw=_read_raw(raw_path,electrode=None,hsp=None,hpi=None,config_path=config_path,**extra_params,)# Try to find an associated events.tsv to get information about the# events in the recorded dataif(bids_path.subject=="emptyroom"andbids_path.task=="noise")orbids_path.task.startswith("rest"):on_error="ignore"else:on_error="warn"events_fname=_find_matching_sidecar(bids_path,suffix="events",extension=".tsv",on_error=on_error)ifevents_fnameisnotNone:raw=_handle_events_reading(events_fname,raw)# Try to find an associated channels.tsv to get information about the# status and type of present channelschannels_fname=_find_matching_sidecar(bids_path,suffix="channels",extension=".tsv",on_error="warn")ifchannels_fnameisnotNone:raw=_handle_channels_reading(channels_fname,raw)# Try to find an associated electrodes.tsv and coordsystem.json# to get information about the status and type of present channelson_error="warn"ifsuffix=="ieeg"else"ignore"electrodes_fname=_find_matching_sidecar(bids_path,suffix="electrodes",extension=".tsv",on_error=on_error)coordsystem_fname=_find_matching_sidecar(bids_path,suffix="coordsystem",extension=".json",on_error=on_error)ifelectrodes_fnameisnotNone:ifcoordsystem_fnameisNone:raiseRuntimeError(f"BIDS mandates that the coordsystem.json "f"should exist if electrodes.tsv does. "f"Please create coordsystem.json for"f"{bids_path.basename}")ifdatatypein["meg","eeg","ieeg"]:_read_dig_bids(electrodes_fname,coordsystem_fname,raw=raw,datatype=datatype)# Try to find an associated sidecar .json to get information about the# recording snapshotsidecar_fname=_find_matching_sidecar(bids_path,suffix=datatype,extension=".json",on_error="warn")ifsidecar_fnameisnotNone:raw=_handle_info_reading(sidecar_fname,raw)# read in associated scans filenamescans_fname=BIDSPath(subject=bids_path.subject,session=bids_path.session,suffix="scans",extension=".tsv",root=bids_path.root,).fpathifscans_fname.exists():raw=_handle_scans_reading(scans_fname,raw,bids_path)# read in associated subject info from participants.tsvparticipants_tsv_path=bids_root/"participants.tsv"subject=f"sub-{bids_path.subject}"ifop.exists(participants_tsv_path):raw=_handle_participants_reading(participants_fname=participants_tsv_path,raw=raw,subject=subject)else:warn(f"participants.tsv file not found for {raw_path}")raw.info["subject_info"]=dict()assertraw.annotations.orig_time==raw.info["meas_date"]returnraw
[docs]@verbosedefget_head_mri_trans(bids_path,extra_params=None,t1_bids_path=None,fs_subject=None,fs_subjects_dir=None,*,kind=None,verbose=None,):"""Produce transformation matrix from MEG and MRI landmark points. Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar files of (i) the MEG and (ii) the T1-weighted MRI data. The two sets of points will then be used to calculate a transformation matrix from head coordinates to MRI coordinates. .. note:: The MEG and MRI data need **not** necessarily be stored in the same session or even in the same BIDS dataset. See the ``t1_bids_path`` parameter for details. Parameters ---------- bids_path : BIDSPath The path of the electrophysiology recording. If ``datatype`` and ``suffix`` are not present, they will be set to ``'meg'``, and a warning will be raised. .. versionchanged:: 0.10 A warning is raised it ``datatype`` or ``suffix`` are not set. extra_params : None | dict Extra parameters to be passed to :func:`mne.io.read_raw` when reading the MEG file. t1_bids_path : BIDSPath | None If ``None`` (default), will try to discover the T1-weighted MRI file based on the name and location of the MEG recording specified via the ``bids_path`` parameter. Alternatively, you explicitly specify which T1-weighted MRI scan to use for extraction of MRI landmarks. To do that, pass a :class:`mne_bids.BIDSPath` pointing to the scan. Use this parameter e.g. if the T1 scan was recorded during a different session than the MEG. It is even possible to point to a T1 image stored in an entirely different BIDS dataset than the MEG data. fs_subject : str The subject identifier used for FreeSurfer. .. versionchanged:: 0.10 Does not default anymore to ``bids_path.subject`` if ``None``. fs_subjects_dir : path-like | None The FreeSurfer subjects directory. If ``None``, defaults to the ``SUBJECTS_DIR`` environment variable. .. versionadded:: 0.8 kind : str | None The suffix of the anatomical landmark names in the JSON sidecar. A suffix might be present e.g. to distinguish landmarks between sessions. If provided, should not include a leading underscore ``_``. For example, if the landmark names in the JSON sidecar file are ``LPA_ses-1``, ``RPA_ses-1``, ``NAS_ses-1``, you should pass ``'ses-1'`` here. If ``None``, no suffix is appended, the landmarks named ``Nasion`` (or ``NAS``), ``LPA``, and ``RPA`` will be used. .. versionadded:: 0.10 %(verbose)s Returns ------- trans : mne.transforms.Transform The data transformation matrix from head to MRI coordinates. """nib=_import_nibabel("get a head to MRI transform")ifnotisinstance(bids_path,BIDSPath):raiseRuntimeError('"bids_path" must be a BIDSPath object. Please '"instantiate using mne_bids.BIDSPath().")# check root availablemeg_bids_path=bids_path.copy()delbids_pathifmeg_bids_path.rootisNone:raiseValueError('The root of the "bids_path" must be set. ''Please use `bids_path.update(root="<root>")` '"to set the root of the BIDS folder to read.")# if the bids_path is underspecified, only get info for MEG dataifmeg_bids_path.datatypeisNone:meg_bids_path.datatype="meg"warn('bids_path did not have a datatype set. Assuming "meg". This '"will raise an exception in the future.",module="mne_bids",category=DeprecationWarning,)ifmeg_bids_path.suffixisNone:meg_bids_path.suffix="meg"warn('bids_path did not have a suffix set. Assuming "meg". This '"will raise an exception in the future.",module="mne_bids",category=DeprecationWarning,)# Get the sidecar file for MRI landmarkst1w_bids_path=((meg_bids_pathift1_bids_pathisNoneelset1_bids_path).copy().update(datatype="anat",suffix="T1w",task=None))t1w_json_path=_find_matching_sidecar(bids_path=t1w_bids_path,extension=".json",on_error="ignore")delt1_bids_pathift1w_json_pathisnotNone:t1w_json_path=Path(t1w_json_path)ift1w_json_pathisNoneornott1w_json_path.exists():raiseFileNotFoundError(f"Did not find T1w JSON sidecar file, tried location: "f"{t1w_json_path}")forextensionin(".nii",".nii.gz"):t1w_path_candidate=t1w_json_path.with_suffix(extension)ift1w_path_candidate.exists():t1w_bids_path=get_bids_path_from_fname(fname=t1w_path_candidate)breakifnott1w_bids_path.fpath.exists():raiseFileNotFoundError(f"Did not find T1w recording file, tried location: "f'{t1w_path_candidate.name.replace(".nii.gz","")}[.nii, .nii.gz]')# Get MRI landmarks from the JSON sidecart1w_json=json.loads(t1w_json_path.read_text(encoding="utf-8"))mri_coords_dict=t1w_json.get("AnatomicalLandmarkCoordinates",dict())# landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z]suffix=f"_{kind}"ifkindisnotNoneelse""mri_landmarks=np.full((3,3),np.nan)forlandmark_name,coordsinmri_coords_dict.items():iflandmark_name.upper()==("LPA"+suffix).upper():mri_landmarks[0,:]=coordseliflandmark_name.upper()==("RPA"+suffix).upper():mri_landmarks[2,:]=coordselif(landmark_name.upper()==("NAS"+suffix).upper()orlandmark_name.lower()==("nasion"+suffix).lower()):mri_landmarks[1,:]=coordselse:continueifnp.isnan(mri_landmarks).any():raiseRuntimeError(f"Could not extract fiducial points from T1w sidecar file: "f"{t1w_json_path}\n\n"f"The sidecar file SHOULD contain a key "f'"AnatomicalLandmarkCoordinates" pointing to an 'f'object with the keys "LPA", "NAS", and "RPA". 'f"Yet, the following structure was found:\n\n"f"{mri_coords_dict}")# The MRI landmarks are in "voxels". We need to convert them to the# Neuromag RAS coordinate system in order to compare them with MEG# landmarks. See also: `mne_bids.write.write_anat`iffs_subjectisNone:warn('Passing "fs_subject=None" has been deprecated and will raise '"an error in future versions. Please explicitly specify the ""FreeSurfer subject name.",DeprecationWarning,)fs_subject=f"sub-{meg_bids_path.subject}"fs_subjects_dir=get_subjects_dir(fs_subjects_dir,raise_error=False)fs_t1_path=Path(fs_subjects_dir)/fs_subject/"mri"/"T1.mgz"ifnotfs_t1_path.exists():raiseValueError(f"Could not find {fs_t1_path}. Consider running FreeSurfer's "f"'recon-all` for subject {fs_subject}.")fs_t1_mgh=nib.load(str(fs_t1_path))t1_nifti=nib.load(str(t1w_bids_path.fpath))# Convert to MGH format to access vox2ras methodt1_mgh=nib.MGHImage(t1_nifti.dataobj,t1_nifti.affine)# convert to scanner RASmri_landmarks=apply_trans(t1_mgh.header.get_vox2ras(),mri_landmarks)# convert to FreeSurfer T1 voxels (same scanner RAS as T1)mri_landmarks=apply_trans(fs_t1_mgh.header.get_ras2vox(),mri_landmarks)# now extract transformation matrix and put back to RAS coordinates of MRIvox2ras_tkr=fs_t1_mgh.header.get_vox2ras_tkr()mri_landmarks=apply_trans(vox2ras_tkr,mri_landmarks)mri_landmarks=mri_landmarks*1e-3# Get MEG landmarks from the raw file_,ext=_parse_ext(meg_bids_path)ifextra_paramsisNone:extra_params=dict()ifext==".fif":extra_params["allow_maxshield"]="yes"raw=read_raw_bids(bids_path=meg_bids_path,extra_params=extra_params)if(raw.get_montage()isNoneorraw.get_montage().get_positions()isNoneorany([raw.get_montage().get_positions()[fid_key]isNoneforfid_keyin("nasion","lpa","rpa")])):raiseRuntimeError(f"Could not extract fiducial points from ``raw`` file: "f"{meg_bids_path}\n\n"f"The ``raw`` file SHOULD contain digitization points ""for the nasion and left and right pre-auricular points ""but none were found")pos=raw.get_montage().get_positions()meg_landmarks=np.asarray((pos["lpa"],pos["nasion"],pos["rpa"]))# Given the two sets of points, fit the transformtrans_fitted=fit_matched_points(src_pts=meg_landmarks,tgt_pts=mri_landmarks)trans=mne.transforms.Transform(fro="head",to="mri",trans=trans_fitted)returntrans