From 3b171584a01417d6cfc3e99fd3fe3d1c61905a69 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 4 Jul 2023 10:11:05 +0200
Subject: [PATCH 001/132] Prepare for 1.4.0 release (#753)
---
docs/source/v1.4.md.inc | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/source/v1.4.md.inc b/docs/source/v1.4.md.inc
index dfc79ed60..32cddf309 100644
--- a/docs/source/v1.4.md.inc
+++ b/docs/source/v1.4.md.inc
@@ -1,4 +1,4 @@
-## v1.4.0 (unreleased)
+## v1.4.0 (2023-07-04)
### :new: New features & enhancements
@@ -17,7 +17,7 @@
### :bug: Bug fixes
- Fix bug when [`mf_reference_run != runs[0]`]([mne_bids_pipeline._config.mf_reference_run) (#742 by @larsoner)
-- Fix bug with too many JSON files found during empty room matching (#743 by @allermat)
-- Fix bug with outdated info on ch_types config option (#745 by @allermat)
+- Fix bug with too many JSON files found during empty-room discovery (#743 by @allermat)
- Fix bug where SSP projectors were not added to the report (#747 by @larsoner)
-- Fix bug with documentation issue on data_type config option (#751 by @allermat)
\ No newline at end of file
+- Fix documentation of `data_type` configuration option (#751 by @allermat)
+- Fix documentation of `ch_types` configuration option (#745 by @allermat)
From 49c2e31b3e30c19d812fd1da8a68c1926608d457 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 5 Jul 2023 11:47:10 -0400
Subject: [PATCH 002/132] MAINT: Cleanup dependencies and doc build (#755)
---
docs/build-docs.sh | 4 +++-
docs/mkdocs.yml | 1 +
docs/source/changes.md | 2 ++
docs/source/v1.0.md.inc | 2 +-
docs/source/v1.4.md.inc | 6 +++---
docs/source/v1.5.md.inc | 17 +++++++++++++++++
docs/source/vX.Y.md.inc | 19 +++++++++++++++++++
mne_bids_pipeline/_config.py | 4 ++--
pyproject.toml | 4 ++++
9 files changed, 52 insertions(+), 7 deletions(-)
create mode 100644 docs/source/v1.5.md.inc
create mode 100644 docs/source/vX.Y.md.inc
diff --git a/docs/build-docs.sh b/docs/build-docs.sh
index cac6ba96b..ccb159aae 100755
--- a/docs/build-docs.sh
+++ b/docs/build-docs.sh
@@ -1,4 +1,6 @@
-#!/bin/bash -e
+#!/bin/bash
+
+set -eo pipefail
STEP_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index b30f8df2f..723681c73 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -6,6 +6,7 @@ site_description: The MNE-BIDS-Pipeline is a full-flegded processing pipeline fo
site_author: MNE-BIDS-Pipeline authors
docs_dir: ./source
site_dir: ./site
+strict: true
use_directory_urls: false # For easier navigation on CircleCI
watch: # Additional directories to watch for changes during `mkdocs serve`
- ../mne_bids_pipeline
diff --git a/docs/source/changes.md b/docs/source/changes.md
index 8d0af3b72..3c38f18da 100644
--- a/docs/source/changes.md
+++ b/docs/source/changes.md
@@ -1,3 +1,5 @@
+{% include-markdown "./v1.5.md.inc" %}
+
{% include-markdown "./v1.4.md.inc" %}
{% include-markdown "./v1.3.md.inc" %}
diff --git a/docs/source/v1.0.md.inc b/docs/source/v1.0.md.inc
index c845f9f6a..2bbf200fb 100644
--- a/docs/source/v1.0.md.inc
+++ b/docs/source/v1.0.md.inc
@@ -7,7 +7,7 @@ the release, so we are bumping the patch version number rather than the major
version number.
- The `N_JOBS` parameter has been renamed to
- [`n_jobs`](mne_bids_pipeline._config.n_jobs) for consistency
+ [`n_jobs`][mne_bids_pipeline._config.n_jobs] for consistency
(#694 by @larsoner)
### :bug: Bug fixes
diff --git a/docs/source/v1.4.md.inc b/docs/source/v1.4.md.inc
index 32cddf309..e2a207fac 100644
--- a/docs/source/v1.4.md.inc
+++ b/docs/source/v1.4.md.inc
@@ -3,8 +3,8 @@
### :new: New features & enhancements
- Add movement compensation and cHPI filtering to the Maxwell filtering step, along with additional configuration options (#747 by @larsoner)
-- Add option to specify [`ssp_ecg_channel`]([mne_bids_pipeline._config.ssp_ecg_channel) to override the default value (#747 by @larsoner)
-- Add option [`read_raw_bids_verbose`]([mne_bids_pipeline._config.read_raw_bids_verbose) to set the verbosity level when using `read_raw_bids` to suppress known warnings (#749 by @larsoner)
+- Add option to specify [`ssp_ecg_channel`][mne_bids_pipeline._config.ssp_ecg_channel] to override the default value (#747 by @larsoner)
+- Add option [`read_raw_bids_verbose`][mne_bids_pipeline._config.read_raw_bids_verbose] to set the verbosity level when using `read_raw_bids` to suppress known warnings (#749 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -16,7 +16,7 @@
### :bug: Bug fixes
-- Fix bug when [`mf_reference_run != runs[0]`]([mne_bids_pipeline._config.mf_reference_run) (#742 by @larsoner)
+- Fix bug when [`mf_reference_run != runs[0]`][mne_bids_pipeline._config.mf_reference_run] (#742 by @larsoner)
- Fix bug with too many JSON files found during empty-room discovery (#743 by @allermat)
- Fix bug where SSP projectors were not added to the report (#747 by @larsoner)
- Fix documentation of `data_type` configuration option (#751 by @allermat)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
new file mode 100644
index 000000000..eda14d0e7
--- /dev/null
+++ b/docs/source/v1.5.md.inc
@@ -0,0 +1,17 @@
+## v1.5.0 (unreleased)
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :medical_symbol: Code health
+
+- Fixed doc build errors and dependency specifications (#755 by @larsoner)
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
diff --git a/docs/source/vX.Y.md.inc b/docs/source/vX.Y.md.inc
new file mode 100644
index 000000000..ea88c02c5
--- /dev/null
+++ b/docs/source/vX.Y.md.inc
@@ -0,0 +1,19 @@
+[//]: # (Don't forget to add this to changes.md as an include!)
+
+## vX.Y.0 (unreleased)
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :medical_symbol: Code health)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 9250489ea..868012112 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -618,7 +618,7 @@
1. Choose a reference run. Often one from the middle of the recording session
is a good choice. Set `mf_destination = "reference_run" and then set
- [`config.mf_reference_run`](mne_bids_pipeline._config.mf_reference_run).
+ [`config.mf_reference_run`][mne_bids_pipeline._config.mf_reference_run].
This will result in a device-to-head transformation that differs between
subjects.
2. Choose a standard position in the MEG coordinate frame. For this, pass
@@ -643,7 +643,7 @@
mf_reference_run: Optional[str] = None
"""
Which run to take as the reference for adjusting the head position of all
-runs when [`mf_destination="reference_run"`](mne_bids_pipeline._config.mf_destination).
+runs when [`mf_destination="reference_run"`][mne_bids_pipeline._config.mf_destination].
If `None`, pick the first run.
???+ example "Example"
diff --git a/pyproject.toml b/pyproject.toml
index 4e866afff..da2187653 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,6 +21,10 @@ dependencies = [
"importlib_metadata; python_version < '3.8'",
"psutil", # for joblib
"packaging",
+ "numpy",
+ "scipy",
+ "matplotlib",
+ "nibabel",
"joblib >= 0.14",
"threadpoolctl",
"dask[distributed]",
From 4d16356d70ac488cbbdc9400602ce3c19e31be17 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 6 Jul 2023 02:00:48 -0400
Subject: [PATCH 003/132] ENH: Add annotation of head velocity (#757)
---
.../settings/preprocessing/maxfilter.md | 2 +
docs/source/v1.5.md.inc | 4 +-
mne_bids_pipeline/_config.py | 12 ++++++
mne_bids_pipeline/_report.py | 12 +++++-
.../steps/preprocessing/_03_maxfilter.py | 40 +++++++++++++++++++
.../tests/configs/config_ds004229.py | 2 +
6 files changed, 69 insertions(+), 3 deletions(-)
diff --git a/docs/source/settings/preprocessing/maxfilter.md b/docs/source/settings/preprocessing/maxfilter.md
index 2bdd10b26..6eb5e567e 100644
--- a/docs/source/settings/preprocessing/maxfilter.md
+++ b/docs/source/settings/preprocessing/maxfilter.md
@@ -22,4 +22,6 @@ tags:
- mf_mc_t_window
- mf_mc_gof_limit
- mf_mc_dist_limit
+ - mf_mc_rotation_velocity_limit
+ - mf_mc_translation_velocity_limit
- mf_filter_chpi
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index eda14d0e7..4781ca50b 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -1,8 +1,8 @@
## v1.5.0 (unreleased)
-[//]: # (### :new: New features & enhancements)
+### :new: New features & enhancements
-[//]: # (- Whatever (#000 by @whoever))
+- Added support for annotating bad segments based on head movement velocity (#757 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 868012112..e7eb86dd8 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -707,6 +707,18 @@
Minimum distance (m) to accept for cHPI position fitting.
"""
+mf_mc_rotation_velocity_limit: Optional[float] = None
+"""
+The rotation velocity limit (degrees/second) to use when annotating
+movement-compensated data. If `None`, no annotations will be added.
+"""
+
+mf_mc_translation_velocity_limit: Optional[float] = None
+"""
+The translation velocity limit (meters/second) to use when annotating
+movement-compensated data. If `None`, no annotations will be added.
+"""
+
mf_filter_chpi: Optional[bool] = None
"""
Use mne.chpi.filter_chpi after Maxwell filtering. Can be None to use
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index eefbca167..c4da71025 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1198,6 +1198,7 @@ def _add_raw(
title: str,
tags: tuple = (),
raw: Optional[BaseRaw] = None,
+ extra_html: Optional[str] = None,
):
if bids_path_in.run is not None:
title += f", run {repr(bids_path_in.run)}"
@@ -1208,13 +1209,22 @@ def _add_raw(
or bids_path_in.run in cfg.plot_psd_for_runs
or bids_path_in.task in cfg.plot_psd_for_runs
)
+ tags = ("raw", f"run-{bids_path_in.run}") + tags
with mne.use_log_level("error"):
report.add_raw(
raw=raw or bids_path_in,
title=title,
butterfly=5,
psd=plot_raw_psd,
- tags=("raw", f"run-{bids_path_in.run}") + tags,
+ tags=tags,
# caption=bids_path_in.basename, # TODO upstream
replace=True,
)
+ if extra_html is not None:
+ report.add_html(
+ extra_html,
+ title=title,
+ tags=tags,
+ section=title,
+ replace=True,
+ )
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index 255f6bc60..c75cef153 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -282,6 +282,42 @@ def run_maxwell_filter(
t_window=cfg.mf_mc_t_window,
)
+ if cfg.mf_mc and (
+ cfg.mf_mc_rotation_velocity_limit is not None
+ or cfg.mf_mc_translation_velocity_limit is not None
+ ):
+ movement_annot, _ = mne.preprocessing.annotate_movement(
+ raw_sss,
+ pos=head_pos,
+ rotation_velocity_limit=cfg.mf_mc_rotation_velocity_limit,
+ translation_velocity_limit=cfg.mf_mc_translation_velocity_limit,
+ )
+ perc_time = 100 / raw_sss.times[-1]
+ extra_html = list()
+ for kind, unit in (("translation", "m"), ("rotation", "°")):
+ limit = getattr(cfg, f"mf_mc_{kind}_velocity_limit")
+ if limit is None:
+ continue
+ desc = (f"BAD_mov_{kind[:5]}_vel",)
+ tot_time = np.sum(
+ movement_annot.duration[movement_annot.description == desc]
+ )
+ perc = perc_time * tot_time
+ logger_meth = logger.warning if perc > 20 else logger.info
+ msg = (
+ f"{kind.capitalize()} velocity exceeded {limit} {unit}/s "
+ f"limit for {tot_time:0.1f} s ({perc:0.1f}%)"
+ )
+ logger_meth(**gen_log_kwargs(message=msg))
+ extra_html.append(f"{msg}")
+ extra_html = (
+ "The raw data were annotated with the following movement-related bad "
+ f"segment annotations:
"
+ )
+ raw_sss.set_annotations(raw_sss.annotations + movement_annot)
+ else:
+ movement_annot = extra_html = None
+
out_files["sss_raw"] = bids_path_out
msg = f"Writing {out_files['sss_raw'].fpath.relative_to(cfg.deriv_root)}"
logger.info(**gen_log_kwargs(message=msg))
@@ -307,6 +343,7 @@ def run_maxwell_filter(
) as report:
msg = "Adding Maxwell filtered raw data to report."
logger.info(**gen_log_kwargs(message=msg))
+
_add_raw(
cfg=cfg,
report=report,
@@ -314,6 +351,7 @@ def run_maxwell_filter(
title="Raw (maxwell filtered)",
tags=("sss",),
raw=raw_sss,
+ extra_html=extra_html,
)
assert len(in_files) == 0, in_files.keys()
@@ -345,6 +383,8 @@ def get_config(
mf_destination=config.mf_destination,
mf_int_order=config.mf_int_order,
mf_mc_t_window=config.mf_mc_t_window,
+ mf_mc_rotation_velocity_limit=config.mf_mc_rotation_velocity_limit,
+ mf_mc_translation_velocity_limit=config.mf_mc_translation_velocity_limit,
**_import_data_kwargs(config=config, subject=subject),
)
return cfg
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index b7fd70d05..1c2538eb9 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -27,6 +27,8 @@
mf_mc_t_step_min = 0.5 # just for speed!
mf_mc_t_window = 0.2 # cleaner cHPI filtering on this dataset
mf_filter_chpi = False # for speed, not needed as we low-pass anyway
+mf_mc_rotation_velocity_limit = 30.0 # deg/s for annotations
+mf_mc_translation_velocity_limit = 20e-3 # m/s
ch_types = ["meg"]
l_freq = None
From ccbac54da09e950921c2a26d7c0a009f8109eecd Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 6 Jul 2023 02:35:58 -0400
Subject: [PATCH 004/132] BUG: Fix bug with cache invalidation (#756)
---
docs/source/v1.5.md.inc | 4 +-
mne_bids_pipeline/_run.py | 116 ++++++++++--------
.../steps/freesurfer/_02_coreg_surfaces.py | 4 +-
.../steps/init/_02_find_empty_room.py | 4 +-
.../steps/preprocessing/_01_data_quality.py | 4 +-
.../steps/preprocessing/_02_head_pos.py | 4 +-
.../steps/preprocessing/_03_maxfilter.py | 4 +-
.../preprocessing/_04_frequency_filter.py | 4 +-
.../steps/preprocessing/_05_make_epochs.py | 3 +-
.../steps/preprocessing/_06a_run_ica.py | 4 +-
.../steps/preprocessing/_06b_run_ssp.py | 4 +-
.../steps/preprocessing/_07a_apply_ica.py | 4 +-
.../steps/preprocessing/_07b_apply_ssp.py | 4 +-
.../steps/preprocessing/_08_ptp_reject.py | 4 +-
.../steps/sensor/_01_make_evoked.py | 4 +-
.../steps/sensor/_02_decoding_full_epochs.py | 4 +-
.../steps/sensor/_03_decoding_time_by_time.py | 4 +-
.../steps/sensor/_04_time_frequency.py | 4 +-
.../steps/sensor/_05_decoding_csp.py | 4 +-
.../steps/sensor/_06_make_cov.py | 4 +-
.../steps/source/_01_make_bem_surfaces.py | 4 +-
.../steps/source/_02_make_bem_solution.py | 4 +-
.../steps/source/_03_setup_source_space.py | 4 +-
.../steps/source/_04_make_forward.py | 4 +-
.../steps/source/_05_make_inverse.py | 4 +-
25 files changed, 115 insertions(+), 96 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 4781ca50b..ae28b9646 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -12,6 +12,6 @@
- Fixed doc build errors and dependency specifications (#755 by @larsoner)
-[//]: # (### :bug: Bug fixes)
+### :bug: Bug fixes
-[//]: # (- Whatever (#000 by @whoever))
+- Fixed bug where cache would not invalidate properly based on output file changes and steps could be incorrectly skipped. All steps will automatically rerun to accommodate the new, safer caching scheme (#756 by @larsoner)
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index af4e63b4c..7d7bf50f0 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -10,7 +10,7 @@
import sys
import traceback
import time
-from typing import Callable, Optional, Dict, List
+from typing import Callable, Optional, Dict, List, Literal, Union
from types import SimpleNamespace
from filelock import FileLock
@@ -163,20 +163,7 @@ def wrapper(*args, **kwargs):
# If this is ever true, we'll need to improve the logic below
assert not (unknown_inputs and force_run)
- def hash_(k, v):
- if isinstance(v, BIDSPath):
- v = v.fpath
- assert isinstance(
- v, pathlib.Path
- ), f'Bad type {type(v)}: in_files["{k}"] = {v}'
- assert v.exists(), f'missing in_files["{k}"] = {v}'
- if self.memory_file_method == "mtime":
- this_hash = v.lstat().st_mtime
- else:
- assert self.memory_file_method == "hash" # guaranteed
- this_hash = hash_file_path(v)
- return (str(v), this_hash)
-
+ hash_ = functools.partial(_path_to_str_hash, method=self.memory_file_method)
hashes = []
for k, v in in_files.items():
hashes.append(hash_(k, v))
@@ -211,9 +198,12 @@ def hash_(k, v):
memorized_func = self.memory.cache(func, ignore=self.ignore)
msg = emoji = None
short_circuit = False
- subject = kwargs.get("subject", None)
- session = kwargs.get("session", None)
- run = kwargs.get("run", None)
+ # Used for logging automatically
+ subject = kwargs.get("subject", None) # noqa
+ session = kwargs.get("session", None) # noqa
+ run = kwargs.get("run", None) # noqa
+ task = kwargs.get("task", None) # noqa
+ bad_out_files = False
try:
done = memorized_func.check_call_in_cache(*args, **kwargs)
except Exception:
@@ -229,9 +219,25 @@ def hash_(k, v):
msg = "Computation forced despite existing cached result …"
emoji = "🔂"
else:
- msg = "Computation unnecessary (cached) …"
- emoji = "cache"
- # When out_files is not None, we should check if the output files
+ # Check our output file hashes
+ out_files_hashes = memorized_func(*args, **kwargs)
+ for key, (fname, this_hash) in out_files_hashes.items():
+ fname = pathlib.Path(fname)
+ if not fname.exists():
+ msg = "Output file missing, will recompute …"
+ emoji = "🧩"
+ bad_out_files = True
+ break
+ got_hash = hash_(key, fname, kind="out")[1]
+ if this_hash != got_hash:
+ msg = "Output file hash mismatch, will recompute …"
+ emoji = "🚫"
+ bad_out_files = True
+ break
+ else:
+ msg = "Computation unnecessary (cached) …"
+ emoji = "cache"
+ # When out_files_expected is not None, we should check if the output files
# exist and stop if they do (e.g., in bem surface or coreg surface
# creation)
elif out_files is not None:
@@ -246,41 +252,19 @@ def hash_(k, v):
msg = "Computation unnecessary (output files exist) …"
emoji = "🔍"
short_circuit = True
+ del out_files
+
if msg is not None:
step = _short_step_path(pathlib.Path(inspect.getfile(func)))
- logger.info(
- **gen_log_kwargs(
- message=msg,
- subject=subject,
- session=session,
- run=run,
- emoji=emoji,
- step=step,
- )
- )
+ logger.info(**gen_log_kwargs(message=msg, emoji=emoji, step=step))
if short_circuit:
return
# https://joblib.readthedocs.io/en/latest/memory.html#joblib.memory.MemorizedFunc.call # noqa: E501
- if force_run or unknown_inputs:
- out_files, _ = memorized_func.call(*args, **kwargs)
+ if force_run or unknown_inputs or bad_out_files:
+ memorized_func.call(*args, **kwargs)
else:
- out_files = memorized_func(*args, **kwargs)
- assert isinstance(out_files, dict), type(out_files)
- out_files_missing_msg = "\n".join(
- f"- {key}={fname}"
- for key, fname in out_files.items()
- if not pathlib.Path(fname).exists()
- )
- if out_files_missing_msg:
- raise ValueError(
- "Missing at least one output file: \n"
- + out_files_missing_msg
- + "\n"
- + "This should not happen unless some files "
- "have been manually moved or deleted. You "
- "need to flush your cache to fix this."
- )
+ memorized_func(*args, **kwargs)
return wrapper
@@ -381,3 +365,37 @@ def _get_step_path(
def _short_step_path(step_path: pathlib.Path) -> str:
return f"{step_path.parent.name}/{step_path.stem}"
+
+
+def _prep_out_files(
+ *,
+ exec_params: SimpleNamespace,
+ out_files: Dict[str, BIDSPath],
+):
+ for key, fname in out_files.items():
+ out_files[key] = _path_to_str_hash(
+ key,
+ pathlib.Path(fname),
+ method=exec_params.memory_file_method,
+ kind="out",
+ )
+ return out_files
+
+
+def _path_to_str_hash(
+ k: str,
+ v: Union[BIDSPath, pathlib.Path],
+ *,
+ method: Literal["mtime", "hash"],
+ kind: str = "in",
+):
+ if isinstance(v, BIDSPath):
+ v = v.fpath
+ assert isinstance(v, pathlib.Path), f'Bad type {type(v)}: {kind}_files["{k}"] = {v}'
+ assert v.exists(), f'missing {kind}_files["{k}"] = {v}'
+ if method == "mtime":
+ this_hash = v.lstat().st_mtime
+ else:
+ assert method == "hash" # guaranteed
+ this_hash = hash_file_path(v)
+ return (str(v), this_hash)
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index 2be2d786e..560448713 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -17,7 +17,7 @@
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run
+from ..._run import failsafe_run, _prep_out_files
fs_bids_app = Path(__file__).parent / "contrib" / "run.py"
@@ -62,7 +62,7 @@ def make_coreg_surfaces(
overwrite=True,
)
out_files = get_output_fnames_coreg_surfaces(cfg=cfg, subject=subject)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(*, config, subject) -> SimpleNamespace:
diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
index 15428bcea..33a65af9e 100644
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -15,7 +15,7 @@
)
from ..._io import _empty_room_match_path, _write_json
from ..._logging import gen_log_kwargs, logger
-from ..._run import _update_for_splits, failsafe_run, save_logs
+from ..._run import _update_for_splits, failsafe_run, save_logs, _prep_out_files
def get_input_fnames_find_empty_room(
@@ -96,7 +96,7 @@ def find_empty_room(
out_files = dict()
out_files["empty_room_match"] = _empty_room_match_path(raw_path, cfg)
_write_json(out_files["empty_room_match"], dict(fname=fname))
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 3f66bebe1..95002ac5f 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -30,7 +30,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._viz import plot_auto_scores
@@ -140,7 +140,7 @@ def assess_data_quality(
plt.close(fig)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def _find_bads_maxwell(
diff --git a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
index ba3e9fbac..d78f93628 100644
--- a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
+++ b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
@@ -18,7 +18,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
def get_input_fnames_head_pos(
@@ -140,7 +140,7 @@ def run_head_pos(
plt.close(fig)
del bids_path_in
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index c75cef153..b5e6af567 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -40,7 +40,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs, _update_for_splits
+from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files
def get_input_fnames_maxwell_filter(
@@ -355,7 +355,7 @@ def run_maxwell_filter(
)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index 50a3f3da0..b60543121 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -34,7 +34,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs, _update_for_splits
+from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files
def get_input_fnames_frequency_filter(
@@ -265,7 +265,7 @@ def filter_data(
)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
index 516061726..d4deb4078 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
@@ -28,6 +28,7 @@
save_logs,
_update_for_splits,
_sanitize_callable,
+ _prep_out_files,
)
from ..._parallel import parallel_func, get_parallel_backend
@@ -262,7 +263,7 @@ def run_epochs(
epochs.plot()
epochs.plot_image(combine="gfp", sigma=2.0, cmap="YlGnBu_r")
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
# TODO: ideally we wouldn't need this anymore and could refactor the code above
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index c49eaa825..138f23954 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -34,7 +34,7 @@
from ..._parallel import parallel_func, get_parallel_backend
from ..._reject import _get_reject
from ..._report import _agg_backend
-from ..._run import failsafe_run, _update_for_splits, save_logs
+from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
def filter_for_ica(
@@ -527,7 +527,7 @@ def run_ica(
)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 2d73a6ce3..1c0cadab9 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -22,7 +22,7 @@
from ..._parallel import parallel_func, get_parallel_backend
from ..._reject import _get_reject
from ..._report import _open_report
-from ..._run import failsafe_run, _update_for_splits, save_logs
+from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
def get_input_fnames_run_ssp(
@@ -205,7 +205,7 @@ def run_ssp(
replace=True,
)
plt.close(fig)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
index 2f0f84bdd..effc99f68 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
@@ -30,7 +30,7 @@
from ..._parallel import parallel_func, get_parallel_backend
from ..._reject import _get_reject
from ..._report import _open_report, _agg_backend
-from ..._run import failsafe_run, _update_for_splits, save_logs
+from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
def get_input_fnames_apply_ica(
@@ -172,7 +172,7 @@ def apply_ica(
replace=True,
)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
index 84ee81a3d..d34800bb7 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
@@ -17,7 +17,7 @@
_bids_kwargs,
)
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, _update_for_splits, save_logs
+from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
from ..._parallel import parallel_func, get_parallel_backend
@@ -79,7 +79,7 @@ def apply_ssp(
)
_update_for_splits(out_files, "epochs")
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index 9f1607055..141910ad3 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -23,7 +23,7 @@
from ..._parallel import parallel_func, get_parallel_backend
from ..._reject import _get_reject
from ..._report import _open_report
-from ..._run import failsafe_run, _update_for_splits, save_logs
+from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
def get_input_fnames_drop_ptp(
@@ -154,7 +154,7 @@ def drop_ptp(
drop_log_ignore=(),
replace=True,
)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index 0c0ed1ecf..00c6c64ef 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -16,7 +16,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report, _sanitize_cond_tag
-from ..._run import failsafe_run, save_logs, _sanitize_callable
+from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
def get_input_fnames_evoked(
@@ -138,7 +138,7 @@ def run_evoked(
# topomap_args=topomap_args)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index ddc32bf3f..8fdd863fe 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -35,7 +35,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._decoding import LogReg
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._report import (
_open_report,
_contrasts_to_names,
@@ -209,7 +209,7 @@ def run_epochs_decoding(
plt.close(fig)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index fe4d64fb1..867b48030 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -38,7 +38,7 @@
)
from ..._decoding import LogReg
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._parallel import get_parallel_backend, get_parallel_backend_name
from ..._report import (
_open_report,
@@ -286,7 +286,7 @@ def run_time_decoding(
del decoding_data, cond_1, cond_2, caption
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index 1d88c2813..201bc3f4b 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -22,7 +22,7 @@
_restrict_analyze_channels,
)
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag
@@ -153,7 +153,7 @@ def run_time_frequency(
del itc
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index a51371c3d..7d375fe28 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -27,7 +27,7 @@
from ..._decoding import LogReg, _handle_csp_args
from ..._logging import logger, gen_log_kwargs
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._report import (
_open_report,
_sanitize_cond_tag,
@@ -504,7 +504,7 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
)
assert len(in_files) == 0, in_files.keys()
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 01f80625f..c32f16cdf 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -20,7 +20,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
-from ..._run import failsafe_run, save_logs, _sanitize_callable
+from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
def get_input_fnames_cov(
@@ -274,7 +274,7 @@ def run_covariance(
plt.close(fig)
assert len(in_files) == 0, in_files
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index 110a9c103..4773e5068 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -17,7 +17,7 @@
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
def _get_bem_params(cfg: SimpleNamespace):
@@ -97,7 +97,7 @@ def make_bem_surfaces(
verbose=cfg.freesurfer_verbose,
)
out_files = get_output_fnames_make_bem_surfaces(cfg=cfg, subject=subject)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
index c8b3ddc51..67f0c2737 100644
--- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
+++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
@@ -16,7 +16,7 @@
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
def get_input_fnames_make_bem_solution(
@@ -69,7 +69,7 @@ def make_bem_solution(
out_files = get_output_fnames_make_bem_solution(cfg=cfg, subject=subject)
mne.write_bem_surfaces(out_files["model"], bem_model, overwrite=True)
mne.write_bem_solution(out_files["sol"], bem_sol, overwrite=True)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
index 52c2538dd..4710750f9 100644
--- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py
+++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
@@ -9,7 +9,7 @@
from ..._config_utils import get_fs_subject, get_fs_subjects_dir, get_subjects
from ..._logging import logger, gen_log_kwargs
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._parallel import parallel_func, get_parallel_backend
@@ -55,7 +55,7 @@ def run_setup_source_space(
in_files.clear() # all used by setup_source_space
out_files = get_output_fnames_setup_source_space(cfg=cfg, subject=subject)
mne.write_source_spaces(out_files["src"], src, overwrite=True)
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index c8b7ae7e1..b28916453 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -24,7 +24,7 @@
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report
-from ..._run import failsafe_run, save_logs
+from ..._run import failsafe_run, save_logs, _prep_out_files
def _prepare_trans_template(
@@ -233,7 +233,7 @@ def run_forward(
)
assert len(in_files) == 0, in_files
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index c60fe44e0..161fc9a06 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -27,7 +27,7 @@
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag
-from ..._run import failsafe_run, save_logs, _sanitize_callable
+from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
def get_input_fnames_inverse(
@@ -152,7 +152,7 @@ def run_inverse(
)
assert len(in_files) == 0, in_files
- return out_files
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
From ea37de0e23fa3babc8cc1e64b66069532d015426 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 6 Jul 2023 13:59:14 -0400
Subject: [PATCH 005/132] ENH: Add T1 and FLASH BEM to website (#758)
---
.circleci/config.yml | 206 ++++--------------
.circleci/run_dataset_and_copy_files.sh | 29 +++
.circleci/setup_bash.sh | 2 +-
.github/workflows/circleci-redirector.yml | 1 +
docs/mkdocs.yml | 8 +-
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_report.py | 19 ++
.../steps/source/_01_make_bem_surfaces.py | 19 +-
.../steps/source/_04_make_forward.py | 14 +-
.../tests/configs/config_ERP_CORE.py | 2 +
mne_bids_pipeline/tests/conftest.py | 2 +
mne_bids_pipeline/tests/test_documented.py | 14 +-
pyproject.toml | 1 +
13 files changed, 128 insertions(+), 190 deletions(-)
create mode 100755 .circleci/run_dataset_and_copy_files.sh
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 62f559ab4..1427dda16 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -315,13 +315,7 @@ jobs:
- data-cache-ds000117-2
- run:
name: test ds000117
- command: |
- DS=ds000117
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000117
- codecov/upload
- store_test_results:
path: ./test-results
@@ -347,13 +341,7 @@ jobs:
- data-cache-ds003775-2
- run:
name: test ds003775
- command: |
- DS=ds003775
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds003775
- codecov/upload
- store_test_results:
path: ./test-results
@@ -379,13 +367,7 @@ jobs:
- data-cache-ds001971-2
- run:
name: test ds001971
- command: |
- DS=ds001971
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds001971
- codecov/upload
- store_test_results:
path: ./test-results
@@ -412,13 +394,7 @@ jobs:
- data-cache-ds004107-2
- run:
name: test ds004107
- command: |
- DS=ds004107
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds004107
- codecov/upload
- store_test_results:
path: ./test-results
@@ -445,14 +421,7 @@ jobs:
- run:
name: test ds000246
no_output_timeout: 15m
- command: |
- DS=ds000246
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.tsv ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000246
- codecov/upload
- store_test_results:
path: ./test-results
@@ -479,13 +448,7 @@ jobs:
- data-cache-ds000247-2
- run:
name: test ds000247
- command: |
- DS=ds000247
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000247
- codecov/upload
- store_test_results:
path: ./test-results
@@ -511,15 +474,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test ds000248_base
- command: |
- DS=ds000248_base
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.json ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.tsv ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_base
- codecov/upload
- store_test_results:
path: ./test-results
@@ -547,14 +502,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test ds000248_ica
- command: |
- DS=ds000248_ica
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.tsv ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_ica
- codecov/upload
- store_test_results:
path: ./test-results
@@ -581,15 +529,20 @@ jobs:
- data-cache-ds000248-4
- run:
name: test BEM from FLASH
- command: |
- $RUN_TESTS ds000248_FLASH_BEM
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_FLASH_BEM
- codecov/upload
- store_test_results:
path: ./test-results
- store_artifacts:
path: ./test-results
destination: test-results
+ - store_artifacts:
+ path: /home/circleci/reports/ds000248_FLASH_BEM
+ destination: reports/ds000248_FLASH_BEM
+ - persist_to_workspace:
+ root: ~/
+ paths:
+ - mne_data/derivatives/mne-bids-pipeline/ds000248_FLASH_BEM/*/*/*.html
test_ds000248_T1_BEM:
<<: *imageconfig
@@ -603,15 +556,20 @@ jobs:
- run:
name: test BEM from T1 (watershed)
no_output_timeout: 20m
- command: |
- $RUN_TESTS ds000248_T1_BEM
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_T1_BEM
- codecov/upload
- store_test_results:
path: ./test-results
- store_artifacts:
path: ./test-results
destination: test-results
+ - store_artifacts:
+ path: /home/circleci/reports/ds000248_T1_BEM
+ destination: reports/ds000248_T1_BEM
+ - persist_to_workspace:
+ root: ~/
+ paths:
+ - mne_data/derivatives/mne-bids-pipeline/ds000248_T1_BEM/*/*/*.html
test_ds000248_coreg_surfaces:
<<: *imageconfig
@@ -624,9 +582,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test head surface creation for MNE coregistration
- command: |
- $RUN_TESTS ds000248_coreg_surfaces
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_coreg_surfaces ds000248_coreg_surfaces --no-copy
- codecov/upload
- store_test_results:
path: ./test-results
@@ -645,13 +601,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test ds000248_no_mri
- command: |
- DS=ds000248_no_mri
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds000248_no_mri
- codecov/upload
- store_test_results:
path: ./test-results
@@ -678,13 +628,7 @@ jobs:
- data-cache-ds001810-2
- run:
name: test ds001810
- command: |
- DS=ds001810
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds001810
- codecov/upload
- store_test_results:
path: ./test-results
@@ -710,13 +654,7 @@ jobs:
- data-cache-ds003104-2
- run:
name: test ds003104
- command: |
- DS=ds003104
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds003104
- codecov/upload
- store_test_results:
path: ./test-results
@@ -742,15 +680,7 @@ jobs:
- data-cache-ds003392-2
- run:
name: test ds003392
- command: |
- DS=ds003392
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.json ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.tsv ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds003392
- codecov/upload
- store_test_results:
path: ./test-results
@@ -779,15 +709,7 @@ jobs:
- data-cache-ds004229-2
- run:
name: test ds004229
- command: |
- DS=ds004229
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.json ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/*/*.tsv ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ds004229
- codecov/upload
- store_test_results:
path: ./test-results
@@ -815,13 +737,7 @@ jobs:
- data-cache-eeg_matchingpennies-1
- run:
name: test eeg_matchingpennies
- command: |
- DS=eeg_matchingpennies
- $RUN_TESTS ${DS}
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS eeg_matchingpennies
- codecov/upload
- store_test_results:
path: ./test-results
@@ -856,13 +772,7 @@ jobs:
google-chrome --version
- run:
name: test ERP CORE N400
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_N400
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*N400*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*N400*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_N400 ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -892,13 +802,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE ERN
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_ERN
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*ERN*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*ERN*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_ERN ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -928,13 +832,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE LRP
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_LRP
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*LRP*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*LRP*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_LRP ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -964,13 +862,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE MMN
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_MMN
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*MMN*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*MMN*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_MMN ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -1000,13 +892,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE N2pc
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_N2pc
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*N2pc*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*N2pc*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_N2pc ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -1036,13 +922,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE N170
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_N170
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*N170*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*N170*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_N170 ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -1072,13 +952,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE P3
- command: |
- DS=ERP_CORE
- $RUN_TESTS ${DS}_P3
- mkdir -p ~/reports/${DS}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*P3*.html ~/reports/${DS}/
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*P3*.xlsx ~/reports/${DS}/
- ls -al test-results/*.xml
+ command: $RUN_TESTS ERP_CORE_P3 ERP_CORE
- codecov/upload
- store_test_results:
path: ./test-results
@@ -1102,6 +976,8 @@ jobs:
- run:
name: Build documentation
command: |
+ set -eo pipefail
+ ls ~/mne_data/derivatives/mne-bids-pipeline/*/*/*/*.html
make doc
- store_artifacts:
path: docs/site
@@ -1346,6 +1222,8 @@ workflows:
- test_ds000248_base
- test_ds000248_ica
- test_ds000248_no_mri
+ - test_ds000248_T1_BEM
+ - test_ds000248_FLASH_BEM
- test_ds001810
- test_ds003104
- test_ds003392
diff --git a/.circleci/run_dataset_and_copy_files.sh b/.circleci/run_dataset_and_copy_files.sh
new file mode 100755
index 000000000..63a49c8b4
--- /dev/null
+++ b/.circleci/run_dataset_and_copy_files.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -eo pipefail
+
+DS_RUN=$1
+if [[ "$2" == "" ]]; then
+ DS="$DS_RUN"
+else
+ DS="$2"
+fi
+if [[ "$3" == "--no-copy" ]]; then
+ COPY_FILES="false"
+else
+ COPY_FILES="true"
+fi
+
+pytest mne_bids_pipeline --junit-xml=test-results/junit-results.xml -k ${DS_RUN}
+
+if [[ "$COPY_FILES" == "false" ]]; then
+ exit 0
+fi
+mkdir -p ~/reports/${DS}
+# these should always exist
+cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.html ~/reports/${DS}/
+cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*.xlsx ~/reports/${DS}/
+# these are allowed to be optional
+cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.json ~/reports/${DS}/ || :
+cp -av ~/mne_data/derivatives/mne-bids-pipeline/${DS}/*/**/*.tsv ~/reports/${DS}/ || :
+ls -al test-results/*.xml
diff --git a/.circleci/setup_bash.sh b/.circleci/setup_bash.sh
index 6bd99e4a0..073a45b77 100755
--- a/.circleci/setup_bash.sh
+++ b/.circleci/setup_bash.sh
@@ -35,7 +35,7 @@ fi
sudo ln -s /usr/lib/x86_64-linux-gnu/libxcb-util.so.0 /usr/lib/x86_64-linux-gnu/libxcb-util.so.1
wget -q -O- http://neuro.debian.net/lists/focal.us-tn.libre | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
sudo apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com 0xA5D32F012649A5A9
-echo "export RUN_TESTS=\"pytest mne_bids_pipeline --junit-xml=test-results/junit-results.xml -k\"" >> "$BASH_ENV"
+echo "export RUN_TESTS=\".circleci/run_dataset_and_copy_files.sh\"" >> "$BASH_ENV"
echo "export DOWNLOAD_DATA=\"python -m mne_bids_pipeline._download\"" >> "$BASH_ENV"
# Similar CircleCI setup to mne-python (Xvfb, venv, minimal commands, env vars)
diff --git a/.github/workflows/circleci-redirector.yml b/.github/workflows/circleci-redirector.yml
index 87b6a38cb..812e116ce 100644
--- a/.github/workflows/circleci-redirector.yml
+++ b/.github/workflows/circleci-redirector.yml
@@ -10,5 +10,6 @@ jobs:
uses: larsoner/circleci-artifacts-redirector-action@master
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
+ api-token: ${{ secrets.CIRCLECI_TOKEN }}
artifact-path: 0/site/index.html
circleci-jobs: build_docs
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 723681c73..14d0f4a61 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -107,8 +107,8 @@ nav:
- examples/ds000247.md
- examples/ds000248_base.md
- examples/ds000248_ica.md
- # - examples/ds000248_T1_BEM.md
- # - examples/ds000248_FLASH_BEM.md
+ - examples/ds000248_T1_BEM.md
+ - examples/ds000248_FLASH_BEM.md
- examples/ds000248_no_mri.md
- examples/ds003104.md
- examples/eeg_matchingpennies.md
@@ -127,6 +127,10 @@ plugins:
- tags:
tags_file: tags.md
- include-markdown
+ - exclude:
+ glob:
+ - "*.py" # Python scripts
+ - "*.inc" # includes
- mkdocstrings:
default_handler: python
handlers:
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index ae28b9646..c7a4d8bfe 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -3,6 +3,7 @@
### :new: New features & enhancements
- Added support for annotating bad segments based on head movement velocity (#757 by @larsoner)
+- Added examples of T1 and FLASH BEM to website (#758 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index c4da71025..bc8ddca4c 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1228,3 +1228,22 @@ def _add_raw(
section=title,
replace=True,
)
+
+
+def _render_bem(
+ *,
+ cfg: SimpleNamespace,
+ report: mne.report.Report,
+ subject: str,
+ session: Optional[str],
+):
+ logger.info(**gen_log_kwargs(message="Rendering MRI slices with BEM contours."))
+ report.add_bem(
+ subject=cfg.fs_subject,
+ subjects_dir=cfg.fs_subjects_dir,
+ title="BEM",
+ width=256,
+ decim=8,
+ replace=True,
+ n_jobs=1, # prevent automatic parallelization
+ )
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index 4773e5068..fc4051c9f 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -6,18 +6,22 @@
import glob
from pathlib import Path
from types import SimpleNamespace
+from typing import Optional
import mne
from ..._config_utils import (
get_fs_subject,
get_subjects,
+ get_sessions,
_get_bem_conductivity,
get_fs_subjects_dir,
+ _bids_kwargs,
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._report import _open_report, _render_bem
def _get_bem_params(cfg: SimpleNamespace):
@@ -38,6 +42,7 @@ def get_input_fnames_make_bem_surfaces(
*,
cfg: SimpleNamespace,
subject: str,
+ session: Optional[str],
) -> dict:
in_files = dict()
mri_images, mri_dir, flash_dir = _get_bem_params(cfg)
@@ -54,6 +59,7 @@ def get_output_fnames_make_bem_surfaces(
*,
cfg: SimpleNamespace,
subject: str,
+ session: Optional[str],
) -> dict:
out_files = dict()
conductivity, _ = _get_bem_conductivity(cfg)
@@ -73,6 +79,7 @@ def make_bem_surfaces(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
+ session: Optional[str],
in_files: dict,
) -> dict:
mri_images, _, _ = _get_bem_params(cfg)
@@ -96,7 +103,15 @@ def make_bem_surfaces(
show=show,
verbose=cfg.freesurfer_verbose,
)
- out_files = get_output_fnames_make_bem_surfaces(cfg=cfg, subject=subject)
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ _render_bem(report=report, cfg=cfg, subject=subject, session=session)
+ out_files = get_output_fnames_make_bem_surfaces(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ )
return _prep_out_files(exec_params=exec_params, out_files=out_files)
@@ -112,6 +127,7 @@ def get_config(
freesurfer_verbose=config.freesurfer_verbose,
use_template_mri=config.use_template_mri,
ch_types=config.ch_types,
+ **_bids_kwargs(config=config),
)
return cfg
@@ -143,6 +159,7 @@ def main(*, config: SimpleNamespace) -> None:
),
exec_params=config.exec_params,
subject=subject,
+ session=get_sessions(config)[0],
force_run=config.recreate_bem,
)
for subject in get_subjects(config)
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index b28916453..d2c2bfdb6 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -23,7 +23,7 @@
from ..._config_import import _import_config
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import _open_report
+from ..._report import _open_report, _render_bem
from ..._run import failsafe_run, save_logs, _prep_out_files
@@ -200,17 +200,7 @@ def run_forward(
) as report:
msg = "Adding forward information to report"
logger.info(**gen_log_kwargs(message=msg))
- msg = "Rendering MRI slices with BEM contours."
- logger.info(**gen_log_kwargs(message=msg))
- report.add_bem(
- subject=cfg.fs_subject,
- subjects_dir=cfg.fs_subjects_dir,
- title="BEM",
- width=256,
- decim=8,
- replace=True,
- n_jobs=1, # prevent automatic parallelization
- )
+ _render_bem(report=report, cfg=cfg, subject=subject, session=session)
msg = "Rendering sensor alignment (coregistration)"
logger.info(**gen_log_kwargs(message=msg))
report.add_trans(
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 8d0f9fdfe..91699312a 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -47,6 +47,8 @@
interactive = False
raw_resample_sfreq = 128
+# Suppress "Data file name in EEG.data (sub-019_task-ERN_eeg.fdt) is incorrect..."
+read_raw_bids_verbose = "error"
eeg_template_montage = mne.channels.make_standard_montage("standard_1005")
eeg_bipolar_channels = {
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index c06e8694a..020d292d9 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -34,6 +34,8 @@ def pytest_configure(config):
# seaborn calling tight layout, etc.
ignore:The figure layout has changed to tight:UserWarning
ignore:The \S+_cmap function was deprecated.*:DeprecationWarning
+ # Dask distributed with jsonschema 4.18
+ ignore:jsonschema\.RefResolver is deprecated.*:DeprecationWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index 2175b7af9..727dcf6bc 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -80,7 +80,7 @@ def test_datasets_in_doc():
# make sure everything is consistent there (too much work), let's at least
# check that we get the correct number using `.count`.
counts = dict(ERP_CORE=7, ds000248=6)
- counts_noartifact = dict(ds000248=3) # 3 are actually tests, not for docs
+ counts_noartifact = dict(ds000248=1) # 1 is actually a test, not for docs
for name in sorted(caches):
get = f"Get {name}"
n_found = circle_yaml_src.count(get)
@@ -117,19 +117,13 @@ def test_datasets_in_doc():
# jobs: test_*: steps: persist_to_workspace
pw = re.compile(
f"- mne_data/derivatives/mne-bids-pipeline/{name}[^\\.]+\\*.html"
- ) # noqa: E501
+ )
n_found = len(pw.findall(circle_yaml_src))
assert n_found == this_count, f"{pw} ({n_found} != {this_count})"
# jobs: test_*: steps: run test
- cp = re.compile(
- f"""\
- DS={name}.*
- \\$RUN_TESTS \\${{DS}}.*
- mkdir -p ~/reports/\\${{DS}}
- cp -av ~/mne_data/derivatives/mne-bids-pipeline/\\${{DS}}/[^\\.]+.html"""
- ) # noqa: E501
+ cp = re.compile(rf" command: \$RUN_TESTS {name}.*")
n_found = len(cp.findall(circle_yaml_src))
- assert n_found == this_count, f"{cp} ({n_found} != {this_count})"
+ assert n_found == count, f"{cp} ({n_found} != {count})"
# 3. Read examples from docs (being careful about tags we can't read)
class SafeLoaderIgnoreUnknown(yaml.SafeLoader):
diff --git a/pyproject.toml b/pyproject.toml
index da2187653..ae20edf69 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -60,6 +60,7 @@ tests = [
"mkdocs-material-extensions",
"mkdocs-macros-plugin",
"mkdocs-include-markdown-plugin",
+ "mkdocs-exclude",
"mkdocstrings-python",
"mike",
"jinja2",
From e5d5f1f2bfbbcc4881c2837c8cddabe92557f2f8 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 10 Jul 2023 15:58:01 -0400
Subject: [PATCH 006/132] BUG: Fix bug with empty room processing (#761)
---
docs/source/v1.5.md.inc | 2 +
mne_bids_pipeline/_config_utils.py | 16 +++++---
.../steps/preprocessing/_01_data_quality.py | 2 +-
.../steps/preprocessing/_02_head_pos.py | 5 ++-
.../steps/preprocessing/_03_maxfilter.py | 40 +++++++++++--------
.../steps/sensor/_04_time_frequency.py | 8 ++--
.../tests/configs/config_ds000246.py | 1 +
7 files changed, 45 insertions(+), 29 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index c7a4d8bfe..352abd4c7 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -16,3 +16,5 @@
### :bug: Bug fixes
- Fixed bug where cache would not invalidate properly based on output file changes and steps could be incorrectly skipped. All steps will automatically rerun to accommodate the new, safer caching scheme (#756 by @larsoner)
+- Fixed bug with parallelization across runs for Maxwell filtering (#761 by @larsoner)
+- Fixed bug where head position files were not written with a proper suffix and extension (#761 by @larsoner)
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index a93e57504..04a61edf7 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -240,13 +240,19 @@ def get_runs_tasks(
config: SimpleNamespace,
subject: str,
session: Optional[str],
- include_noise: bool = True,
+ which: Tuple[str] = ("runs", "noise", "rest"),
) -> List[Tuple[str]]:
"""Get (run, task) tuples for all runs plus (maybe) rest."""
from ._import_data import _get_noise_path, _get_rest_path
- runs = get_runs(config=config, subject=subject)
- tasks = [get_task(config=config)] * len(runs)
+ assert isinstance(which, tuple)
+ assert all(isinstance(inc, str) for inc in which)
+ assert all(inc in ("runs", "noise", "rest") for inc in which)
+ runs = list()
+ tasks = list()
+ if "runs" in which:
+ runs.extend(get_runs(config=config, subject=subject))
+ tasks.extend([get_task(config=config)] * len(runs))
kwargs = dict(
cfg=config,
subject=subject,
@@ -254,10 +260,10 @@ def get_runs_tasks(
kind="orig",
add_bads=False,
)
- if _get_rest_path(**kwargs):
+ if "rest" in which and _get_rest_path(**kwargs):
runs.append(None)
tasks.append("rest")
- if include_noise:
+ if "noise" in which:
mf_reference_run = get_mf_reference_run(config=config)
if _get_noise_path(mf_reference_run=mf_reference_run, **kwargs):
runs.append(None)
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 95002ac5f..67413cfdd 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -311,7 +311,7 @@ def get_config(
def main(*, config: SimpleNamespace) -> None:
- """Run maxwell_filter."""
+ """Run assess_data_quality."""
with get_parallel_backend(config.exec_params):
parallel, run_func = parallel_func(
assess_data_quality, exec_params=config.exec_params
diff --git a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
index d78f93628..a75cd7339 100644
--- a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
+++ b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
@@ -62,7 +62,8 @@ def run_head_pos(
out_files = dict()
key = f"raw_run-{run}-pos"
out_files[key] = bids_path_in.copy().update(
- extension=".pos",
+ suffix="headpos",
+ extension=".txt",
root=cfg.deriv_root,
check=False,
)
@@ -183,7 +184,7 @@ def main(*, config: SimpleNamespace) -> None:
config=config,
subject=subject,
session=session,
- include_noise=False,
+ which=("runs", "rest"),
)
)
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index b5e6af567..df40ad936 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -78,7 +78,8 @@ def get_input_fnames_maxwell_filter(
**kwargs,
)[f"raw_task-{pos_task}_run-{pos_run}"]
in_files[f"raw_task-{task}_run-{run}-pos"] = path.update(
- extension=".pos",
+ suffix="headpos",
+ extension=".txt",
root=cfg.deriv_root,
check=False,
task=pos_task,
@@ -401,22 +402,27 @@ def main(*, config: SimpleNamespace) -> None:
parallel, run_func = parallel_func(
run_maxwell_filter, exec_params=config.exec_params
)
- logs = parallel(
- run_func(
- cfg=get_config(config=config, subject=subject, session=session),
- exec_params=config.exec_params,
- subject=subject,
- session=session,
- run=run,
- task=task,
- )
- for subject in get_subjects(config)
- for session in get_sessions(config)
- for run, task in get_runs_tasks(
- config=config,
- subject=subject,
- session=session,
+ # We need to guarantee that the reference_run completes before the
+ # noise/rest runs are processed, so we split the loops.
+ logs = list()
+ for which in [("runs",), ("noise", "rest")]:
+ logs += parallel(
+ run_func(
+ cfg=get_config(config=config, subject=subject, session=session),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ for run, task in get_runs_tasks(
+ config=config,
+ subject=subject,
+ session=session,
+ which=which,
+ )
)
- )
save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index 201bc3f4b..f5b7c3381 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -106,8 +106,8 @@ def run_time_frequency(
# conform to MNE filename checks. This is because BIDS has not
# finalized how derivatives should be named. Once this is done, we
# should update our names and/or MNE's checks.
- power.save(out_files[power_key], overwrite=True, verbose="error")
- itc.save(out_files[itc_key], overwrite=True, verbose="error")
+ power.save(out_files[power_key].fpath, overwrite=True, verbose="error")
+ itc.save(out_files[itc_key].fpath, overwrite=True, verbose="error")
# Report
with _open_report(
@@ -117,8 +117,8 @@ def run_time_frequency(
logger.info(**gen_log_kwargs(message=msg))
for condition in cfg.time_frequency_conditions:
cond = sanitize_cond_name(condition)
- fname_tfr_pow_cond = out_files[f"power-{cond}"]
- fname_tfr_itc_cond = out_files[f"itc-{cond}"]
+ fname_tfr_pow_cond = out_files[f"power-{cond}"].fpath
+ fname_tfr_itc_cond = out_files[f"itc-{cond}"].fpath
with mne.use_log_level("error"): # filename convention
power = mne.time_frequency.read_tfrs(fname_tfr_pow_cond, condition=0)
power.apply_baseline(
diff --git a/mne_bids_pipeline/tests/configs/config_ds000246.py b/mne_bids_pipeline/tests/configs/config_ds000246.py
index a32267b00..6cb3a8148 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000246.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000246.py
@@ -11,6 +11,7 @@
runs = ["01"]
crop_runs = (0, 120) # Reduce memory usage on CI system
+read_raw_bids_verbose = "error" # No BIDS -> MNE mapping found for channel ...
l_freq = 0.3
h_freq = 100
epochs_decim = 4
From 721162ab041b305d77ff68368649f3890a0356f6 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 18 Jul 2023 09:32:36 -0400
Subject: [PATCH 007/132] ENH: More compact logging (#764)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_config_import.py | 8 +--
mne_bids_pipeline/_config_template.py | 4 +-
mne_bids_pipeline/_logging.py | 72 ++++++++-----------
mne_bids_pipeline/_main.py | 17 ++---
mne_bids_pipeline/_report.py | 4 +-
mne_bids_pipeline/_run.py | 3 +-
.../steps/init/_01_init_derivatives_dir.py | 7 +-
8 files changed, 47 insertions(+), 69 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 352abd4c7..22cfa7863 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -4,6 +4,7 @@
- Added support for annotating bad segments based on head movement velocity (#757 by @larsoner)
- Added examples of T1 and FLASH BEM to website (#758 by @larsoner)
+- Output logging spacing improved (#764 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 8755c32c6..ca507ba02 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -147,9 +147,7 @@ def _update_with_user_config(
val = getattr(overrides, name)
if log:
msg = f"Overriding config.{name} = {repr(val)}"
- logger.info(
- **gen_log_kwargs(message=msg, step="", emoji="override", box="╶╴")
- )
+ logger.info(**gen_log_kwargs(message=msg, emoji="override"))
setattr(config, name, val)
# 4. Env vars and other triaging
@@ -168,7 +166,7 @@ def _update_with_user_config(
config.deriv_root = pathlib.Path(config.deriv_root).expanduser().resolve()
# 5. Consistency
- log_kwargs = dict(emoji="override", box=" ", step="")
+ log_kwargs = dict(emoji="override")
if config.interactive:
if log and config.on_error != "debug":
msg = 'Setting config.on_error="debug" because of interactive mode'
@@ -427,4 +425,4 @@ def _handle_config_error(
raise ValueError(msg)
elif config.config_validation == "warn":
if log:
- logger.warning(**gen_log_kwargs(message=msg, step="", emoji="🛟"))
+ logger.warning(**gen_log_kwargs(message=msg, emoji="🛟"))
diff --git a/mne_bids_pipeline/_config_template.py b/mne_bids_pipeline/_config_template.py
index 9954811ad..1925e020e 100644
--- a/mne_bids_pipeline/_config_template.py
+++ b/mne_bids_pipeline/_config_template.py
@@ -27,7 +27,7 @@ def create_template_config(
target_path.write_text("".join(config), encoding="utf-8")
message = f"Successfully created template configuration file at: " f"{target_path}"
- logger.info(**gen_log_kwargs(message=message, emoji="✅", step=""))
+ logger.info(**gen_log_kwargs(message=message, emoji="✅"))
message = "Please edit the file before running the pipeline."
- logger.info(**gen_log_kwargs(message=message, emoji="💡", step=""))
+ logger.info(**gen_log_kwargs(message=message, emoji="💡"))
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index fc6085d6a..56eea901f 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -31,21 +31,28 @@ def _console(self):
kwargs["theme"] = rich.theme.Theme(
dict(
default="white",
+ # Rule
+ title="bold green",
# Prefixes
asctime="green",
- step="bold cyan",
+ prefix="bold cyan",
# Messages
debug="dim",
- info="bold",
- warning="bold magenta",
- error="bold red",
+ info="",
+ warning="magenta",
+ error="red",
)
)
self.__console = rich.console.Console(**kwargs)
return self.__console
- def rule(self, title="", *, align="center"):
- self.__console.rule(title=title, characters="─", style="rule.line", align=align)
+ def title(self, title):
+ # Align left with ASCTIME offset
+ title = f"[title]┌────────┬ {title}[/]"
+ self._console.rule(title=title, characters="─", style="title", align="left")
+
+ def end(self, msg=""):
+ self._console.print(f"[title]└────────┴ {msg}[/]")
@property
def level(self):
@@ -75,29 +82,18 @@ def _log_message(
subject: Optional[Union[str, int]] = None,
session: Optional[Union[str, int]] = None,
run: Optional[Union[str, int]] = None,
- step: Optional[str] = None,
emoji: str = "",
- box: str = "",
):
this_level = getattr(logging, kind.upper())
if this_level < self.level:
return
- if not subject:
- subject = ""
- if not session:
- session = ""
- if not run:
- run = ""
- if not step:
- step = ""
- if step and emoji:
- step = f"{emoji} {step}"
- asctime = datetime.datetime.now().strftime("[%H:%M:%S]")
- msg = (
- f"[asctime]{asctime}[/asctime] "
- f"[step]{box}{step}{subject}{session}{run}[/step]"
- f"[{kind}]{msg}[/{kind}]"
- )
+ # Construct str
+ essr = [x for x in [emoji, subject, session, run] if x]
+ essr = " ".join(essr)
+ if essr:
+ essr += " "
+ asctime = datetime.datetime.now().strftime("│%H:%M:%S│")
+ msg = f"[asctime]{asctime} [/][prefix]{essr}[/][{kind}]{msg}[/]"
self._console.print(msg)
@@ -111,12 +107,8 @@ def gen_log_kwargs(
session: Optional[Union[str, int]] = None,
run: Optional[Union[str, int]] = None,
task: Optional[str] = None,
- step: Optional[str] = None,
emoji: str = "⏳️",
- box: str = "│ ",
) -> LogKwargsT:
- from ._run import _get_step_path, _short_step_path
-
# Try to figure these out
stack = inspect.stack()
up_locals = stack[1].frame.f_locals
@@ -130,23 +122,14 @@ def gen_log_kwargs(
task = task or up_locals.get("task", None)
if task in ("noise", "rest"):
run = task
- if step is None:
- step_path = _get_step_path(stack)
- if step_path:
- step = _short_step_path(_get_step_path())
- else:
- step = ""
# Do some nice formatting
if subject is not None:
- subject = f" sub-{subject}"
+ subject = f"sub-{subject}"
if session is not None:
- session = f" ses-{session}"
+ session = f"ses-{session}"
if run is not None:
- run = f" run-{run}"
- if step != "":
- # need an extra space
- message = f" {message}"
+ run = f"run-{run}"
# Choose some to be our standards
emoji = dict(
@@ -154,10 +137,7 @@ def gen_log_kwargs(
skip="⏩",
override="❌",
).get(emoji, emoji)
- extra = {
- "step": f"{emoji} {step}",
- "box": box,
- }
+ extra = {"emoji": emoji}
if subject:
extra["subject"] = subject
if session:
@@ -170,3 +150,7 @@ def gen_log_kwargs(
"extra": extra,
}
return kwargs
+
+
+def _linkfile(uri):
+ return f"[link=file://{uri}]{uri}[/link]"
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index 7bfbb392a..3c54a177d 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -193,22 +193,19 @@ def main():
# them twice.
step_modules = [*STEP_MODULES["init"], *step_modules]
- msg = "Welcome aboard the MNE BIDS Pipeline!"
- logger.info(**gen_log_kwargs(message=msg, emoji="👋", box="╶╴", step=""))
+ logger.title("Welcome aboard MNE-BIDS-Pipeline! 👋")
msg = f"Using configuration: {config}"
- logger.info(**gen_log_kwargs(message=msg, emoji="🧾", box="╶╴", step=""))
+ logger.info(**gen_log_kwargs(message=msg, emoji="📝"))
+ logger.end()
config_imported = _import_config(
config_path=config_path,
overrides=overrides,
)
- for si, step_module in enumerate(step_modules):
+ for step_module in step_modules:
start = time.time()
step = _short_step_path(pathlib.Path(step_module.__file__))
- if si == 0:
- logger.rule()
- msg = "Now running 👇"
- logger.info(**gen_log_kwargs(message=msg, box="┌╴", emoji="🚀", step=step))
+ logger.title(title=f"{step}")
step_module.main(config=config_imported)
elapsed = time.time() - start
hours, remainder = divmod(elapsed, 3600)
@@ -221,6 +218,4 @@ def main():
elapsed = f"{minutes}m {elapsed}"
if hours:
elapsed = f"{hours}h {elapsed}"
- msg = f"Done running 👆 [{elapsed}]"
- logger.info(**gen_log_kwargs(message=msg, box="└╴", emoji="🎉", step=step))
- logger.rule()
+ logger.end(f"done ({elapsed})")
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index bc8ddca4c..90f651b4e 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -20,7 +20,7 @@
from ._config_utils import sanitize_cond_name, get_subjects, _restrict_analyze_channels
from ._decoding import _handle_csp_args
-from ._logging import logger, gen_log_kwargs
+from ._logging import logger, gen_log_kwargs, _linkfile
@contextlib.contextmanager
@@ -83,7 +83,7 @@ def _open_report(
except Exception as exc:
logger.warning(f"Failed: {exc}")
fname_report_html = fname_report.with_suffix(".html")
- msg = f"Saving report: {fname_report_html}"
+ msg = f"Saving report: {_linkfile(fname_report_html)}"
logger.info(**gen_log_kwargs(message=msg))
report.save(fname_report, overwrite=True)
report.save(fname_report_html, overwrite=True, open_browser=False)
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 7d7bf50f0..6d86b1e4a 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -255,8 +255,7 @@ def wrapper(*args, **kwargs):
del out_files
if msg is not None:
- step = _short_step_path(pathlib.Path(inspect.getfile(func)))
- logger.info(**gen_log_kwargs(message=msg, emoji=emoji, step=step))
+ logger.info(**gen_log_kwargs(message=msg, emoji=emoji))
if short_circuit:
return
diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
index 40a879374..921891a3c 100644
--- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
+++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
@@ -18,9 +18,10 @@ def init_dataset(cfg) -> None:
"""Prepare the pipeline directory in /derivatives."""
fname_json = cfg.deriv_root / "dataset_description.json"
if fname_json.is_file():
- return # already exists
- msg = "Initializing output directories."
- logger.info(**gen_log_kwargs(message=msg))
+ msg = "Output directories already exist …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="✅"))
+ return
+ logger.info(**gen_log_kwargs(message="Initializing output directories."))
cfg.deriv_root.mkdir(exist_ok=True, parents=True)
From 62dde414dbef52733c01eb147c677281a33ce2f9 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 18 Jul 2023 13:31:28 -0400
Subject: [PATCH 008/132] ENH: Add eSSS (#762)
---
.../settings/preprocessing/maxfilter.md | 2 +
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_config.py | 10 +
mne_bids_pipeline/_config_utils.py | 7 +
mne_bids_pipeline/_import_data.py | 2 +-
mne_bids_pipeline/_run.py | 8 +-
.../steps/init/_02_find_empty_room.py | 2 +-
.../steps/preprocessing/_01_data_quality.py | 2 +-
.../steps/preprocessing/_03_maxfilter.py | 236 +++++++++++++++++-
.../steps/preprocessing/_06b_run_ssp.py | 2 +-
.../tests/configs/config_ds004229.py | 2 +
mne_bids_pipeline/tests/conftest.py | 3 +
mne_bids_pipeline/tests/test_run.py | 20 +-
13 files changed, 274 insertions(+), 23 deletions(-)
diff --git a/docs/source/settings/preprocessing/maxfilter.md b/docs/source/settings/preprocessing/maxfilter.md
index 6eb5e567e..3cd32d9d7 100644
--- a/docs/source/settings/preprocessing/maxfilter.md
+++ b/docs/source/settings/preprocessing/maxfilter.md
@@ -17,6 +17,8 @@ tags:
- mf_reference_run
- mf_cal_fname
- mf_ctc_fname
+ - mf_esss
+ - mf_esss_reject
- mf_mc
- mf_mc_t_step_min
- mf_mc_t_window
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 22cfa7863..0d95dc8c5 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -4,6 +4,7 @@
- Added support for annotating bad segments based on head movement velocity (#757 by @larsoner)
- Added examples of T1 and FLASH BEM to website (#758 by @larsoner)
+- Added support for extended SSS (eSSS) in Maxwell filtering (#762 by @larsoner)
- Output logging spacing improved (#764 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index e7eb86dd8..812248ee4 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -681,6 +681,16 @@
```
""" # noqa : E501
+mf_esss: int = 0
+"""
+Number of extended SSS (eSSS) basis projectors to use from empty-room data.
+"""
+
+mf_esss_reject: Optional[Dict[str, float]] = None
+"""
+Rejection parameters to use when computing the extended SSS (eSSS) basis.
+"""
+
mf_mc: bool = False
"""
If True, perform movement compensation on the data.
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 04a61edf7..e08cfb06e 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -615,3 +615,10 @@ def _bids_kwargs(*, config: SimpleNamespace) -> dict:
def _do_mf_autobad(*, cfg: SimpleNamespace) -> bool:
return cfg.find_noisy_channels_meg or cfg.find_flat_channels_meg
+
+
+# Adapted from MNE-Python
+def _pl(x, *, non_pl="", pl="s"):
+ """Determine if plural should be used."""
+ len_x = x if isinstance(x, (int, np.generic)) else len(x)
+ return non_pl if len_x == 1 else pl
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index ce1964604..075dc8e8c 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -2,7 +2,6 @@
from typing import Dict, Optional, Iterable, Union, List, Literal
import mne
-from mne.utils import _pl
from mne_bids import BIDSPath, read_raw_bids, get_bids_path_from_fname
import numpy as np
import pandas as pd
@@ -14,6 +13,7 @@
get_task,
_bids_kwargs,
_do_mf_autobad,
+ _pl,
)
from ._io import _read_json, _empty_room_match_path
from ._logging import gen_log_kwargs, logger
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 6d86b1e4a..fc6742581 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -220,7 +220,9 @@ def wrapper(*args, **kwargs):
emoji = "🔂"
else:
# Check our output file hashes
- out_files_hashes = memorized_func(*args, **kwargs)
+ # Need to make a copy of kwargs["in_files"] in particular
+ use_kwargs = copy.deepcopy(kwargs)
+ out_files_hashes = memorized_func(*args, **use_kwargs)
for key, (fname, this_hash) in out_files_hashes.items():
fname = pathlib.Path(fname)
if not fname.exists():
@@ -302,8 +304,8 @@ def save_logs(*, config: SimpleNamespace, logs) -> None: # TODO add type
def _update_for_splits(
- files_dict: Dict[str, BIDSPath],
- key: str,
+ files_dict: Union[Dict[str, BIDSPath], BIDSPath],
+ key: Optional[str],
*,
single: bool = False,
allow_missing: bool = False,
diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
index 33a65af9e..d9334a9cf 100644
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -3,7 +3,6 @@
from types import SimpleNamespace
from typing import Dict, Optional
-from mne.utils import _pl
from mne_bids import BIDSPath
from ..._config_utils import (
@@ -12,6 +11,7 @@
get_subjects,
get_mf_reference_run,
_bids_kwargs,
+ _pl,
)
from ..._io import _empty_room_match_path, _write_json
from ..._logging import gen_log_kwargs, logger
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 67413cfdd..655280e52 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -6,7 +6,6 @@
import pandas as pd
import mne
-from mne.utils import _pl
from mne_bids import BIDSPath
from ..._config_utils import (
@@ -16,6 +15,7 @@
get_sessions,
get_runs_tasks,
_do_mf_autobad,
+ _pl,
)
from ..._import_data import (
_get_run_rest_noise_path,
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index df40ad936..099336c5c 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -14,6 +14,7 @@
The function loads machine-specific calibration files.
"""
+from copy import deepcopy
import gc
from typing import Optional
from types import SimpleNamespace
@@ -28,6 +29,7 @@
get_subjects,
get_sessions,
get_runs_tasks,
+ _pl,
)
from ..._import_data import (
import_experimental_data,
@@ -43,6 +45,139 @@
from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files
+# %% eSSS
+def get_input_fnames_esss(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ kwargs = dict(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ )
+ in_files = _get_run_rest_noise_path(
+ run=None,
+ task="noise",
+ kind="orig",
+ mf_reference_run=cfg.mf_reference_run,
+ **kwargs,
+ )
+ in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs))
+ return in_files
+
+
+@failsafe_run(
+ get_input_fnames=get_input_fnames_esss,
+)
+def compute_esss_proj(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ in_files: dict,
+) -> dict:
+ import matplotlib.pyplot as plt
+
+ run, task = None, "noise"
+ in_key = f"raw_task-{task}_run-{run}"
+ bids_path_in = in_files.pop(in_key)
+ bids_path_bads_in = in_files.pop(f"{in_key}-bads", None) # noqa
+ bids_path_ref_in = in_files.pop("raw_ref_run")
+ bids_path_ref_bads_in = in_files.pop("raw_ref_run-bads", None)
+ raw_noise = import_er_data(
+ cfg=cfg,
+ bids_path_er_in=bids_path_in,
+ bids_path_ref_in=bids_path_ref_in,
+ # TODO: This must match below, so we don't pass it
+ # bids_path_er_bads_in=bids_path_bads_in,
+ bids_path_er_bads_in=None,
+ bids_path_ref_bads_in=bids_path_ref_bads_in,
+ prepare_maxwell_filter=True,
+ )
+ logger.info(
+ **gen_log_kwargs(
+ f"Computing eSSS basis with {cfg.mf_esss} component{_pl(cfg.mf_esss)}"
+ )
+ )
+ projs = mne.compute_proj_raw(
+ raw_noise,
+ n_grad=cfg.mf_esss,
+ n_mag=cfg.mf_esss,
+ reject=cfg.mf_esss_reject,
+ meg="combined",
+ )
+ out_files = dict()
+ out_files["esss_basis"] = bids_path_in.copy().update(
+ subject=subject, # need these in the case of an empty room match
+ session=session,
+ run=run,
+ task=task,
+ suffix="esssproj",
+ split=None,
+ extension=".fif",
+ root=cfg.deriv_root,
+ check=False,
+ )
+ mne.write_proj(out_files["esss_basis"], projs, overwrite=True)
+
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ ) as report:
+ msg = "Adding eSSS projectors to report."
+ logger.info(**gen_log_kwargs(message=msg))
+ kinds_picks = list()
+ for kind in ("mag", "grad"):
+ picks = mne.pick_types(raw_noise.info, meg=kind, exclude="bads")
+ if not len(picks):
+ continue
+ kinds_picks.append([kind, picks])
+ n_row, n_col = len(kinds_picks), cfg.mf_esss
+ fig, axes = plt.subplots(
+ n_row,
+ n_col,
+ figsize=(n_col + 0.5, n_row + 0.5),
+ constrained_layout=True,
+ squeeze=False,
+ )
+ # TODO: plot_projs_topomap doesn't handle meg="combined" well:
+ # https://github.com/mne-tools/mne-python/pull/11792
+ for ax_row, (kind, picks) in zip(axes, kinds_picks):
+ info = mne.pick_info(raw_noise.info, picks)
+ ch_names = info["ch_names"]
+ these_projs = deepcopy(projs)
+ for proj in these_projs:
+ sub_idx = [proj["data"]["col_names"].index(name) for name in ch_names]
+ proj["data"]["data"] = proj["data"]["data"][:, sub_idx]
+ proj["data"]["col_names"] = ch_names
+ mne.viz.plot_projs_topomap(
+ these_projs,
+ info=info,
+ axes=ax_row,
+ )
+ for ai, ax in enumerate(ax_row):
+ ax.set_title(f"{kind} {ai + 1}")
+ report.add_figure(
+ fig,
+ title="eSSS projectors",
+ tags=("sss", "raw"),
+ replace=True,
+ )
+ plt.close(fig)
+
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
+# %% maxwell_filter
+
+
def get_input_fnames_maxwell_filter(
*,
cfg: SimpleNamespace,
@@ -64,6 +199,8 @@ def get_input_fnames_maxwell_filter(
mf_reference_run=cfg.mf_reference_run,
**kwargs,
)
+ in_key = f"raw_task-{task}_run-{run}"
+ assert in_key in in_files
# head positions
if cfg.mf_mc:
if run is None and task == "noise":
@@ -77,7 +214,7 @@ def get_input_fnames_maxwell_filter(
kind="orig",
**kwargs,
)[f"raw_task-{pos_task}_run-{pos_run}"]
- in_files[f"raw_task-{task}_run-{run}-pos"] = path.update(
+ in_files[f"{in_key}-pos"] = path.update(
suffix="headpos",
extension=".txt",
root=cfg.deriv_root,
@@ -86,9 +223,42 @@ def get_input_fnames_maxwell_filter(
run=pos_run,
)
+ if cfg.mf_esss:
+ in_files["esss_basis"] = (
+ in_files[in_key]
+ .copy()
+ .update(
+ subject=subject,
+ session=session,
+ run=None,
+ task="noise",
+ suffix="esssproj",
+ split=None,
+ extension=".fif",
+ root=cfg.deriv_root,
+ check=False,
+ )
+ )
+
# reference run (used for `destination` and also bad channels for noise)
in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs))
+ is_rest_noise = run is None and task in ("noise", "rest")
+ if is_rest_noise:
+ key = "raw_ref_run_sss"
+ in_files[key] = (
+ in_files["raw_ref_run"]
+ .copy()
+ .update(
+ processing="sss",
+ suffix="raw",
+ extension=".fif",
+ root=cfg.deriv_root,
+ check=False,
+ )
+ )
+ _update_for_splits(in_files, key, single=True)
+
# standard files
in_files["mf_cal_fname"] = cfg.mf_cal_fname
in_files["mf_ctc_fname"] = cfg.mf_ctc_fname
@@ -143,11 +313,6 @@ def run_maxwell_filter(
)
bids_path_out = bids_path_in.copy().update(**bids_path_out_kwargs)
- # Now take everything from the bids_path_in and overwrite the parameters
- subject = bids_path_in.subject # noqa: F841
- session = bids_path_in.session # noqa: F841
- run = bids_path_in.run
-
out_files = dict()
# Load dev_head_t and digitization points from MaxFilter reference run.
msg = f"Loading reference run: {cfg.mf_reference_run}."
@@ -168,15 +333,23 @@ def run_maxwell_filter(
# Maxwell-filter experimental data.
apply_msg = "Applying "
+ extra = list()
if cfg.mf_st_duration:
apply_msg += f"tSSS ({cfg.mf_st_duration} sec, corr={cfg.mf_st_correlation})"
else:
apply_msg += "SSS"
if cfg.mf_mc:
- apply_msg += " with MC"
+ extra.append("MC")
head_pos = mne.chpi.read_head_pos(in_files.pop(f"{in_key}-pos"))
else:
head_pos = None
+ if cfg.mf_esss:
+ extra.append("eSSS")
+ extended_proj = mne.read_proj(in_files.pop("esss_basis"))
+ else:
+ extended_proj = ()
+ if extra:
+ apply_msg += " with " + "/".join(extra)
apply_msg += " to"
mf_kws = dict(
@@ -188,6 +361,7 @@ def run_maxwell_filter(
coord_frame="head",
destination=destination,
head_pos=head_pos,
+ extended_proj=extended_proj,
)
logger.info(**gen_log_kwargs(message=f"{apply_msg} {recording_type} data"))
@@ -250,7 +424,7 @@ def run_maxwell_filter(
# copy the bad channel selection from the reference run over to
# the resting-state recording.
- bids_path_ref_sss = bids_path_ref_in.copy().update(**bids_path_out_kwargs)
+ bids_path_ref_sss = in_files.pop("raw_ref_run_sss")
raw_exp = mne.io.read_raw_fif(bids_path_ref_sss)
rank_exp = mne.compute_rank(raw_exp, rank="info")["meg"]
rank_noise = mne.compute_rank(raw_sss, rank="info")["meg"]
@@ -359,7 +533,21 @@ def run_maxwell_filter(
return _prep_out_files(exec_params=exec_params, out_files=out_files)
-def get_config(
+def get_config_esss(
+ *,
+ config: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> SimpleNamespace:
+ cfg = SimpleNamespace(
+ mf_esss=config.mf_esss,
+ mf_esss_reject=config.mf_esss_reject,
+ **_import_data_kwargs(config=config, subject=subject),
+ )
+ return cfg
+
+
+def get_config_maxwell_filter(
*,
config: SimpleNamespace,
subject: str,
@@ -386,6 +574,7 @@ def get_config(
mf_mc_t_window=config.mf_mc_t_window,
mf_mc_rotation_velocity_limit=config.mf_mc_rotation_velocity_limit,
mf_mc_translation_velocity_limit=config.mf_mc_translation_velocity_limit,
+ mf_esss=config.mf_esss,
**_import_data_kwargs(config=config, subject=subject),
)
return cfg
@@ -399,16 +588,41 @@ def main(*, config: SimpleNamespace) -> None:
return
with get_parallel_backend(config.exec_params):
+ logs = list()
+ # First step: compute eSSS projectors
+ if config.mf_esss:
+ parallel, run_func = parallel_func(
+ compute_esss_proj, exec_params=config.exec_params
+ )
+ logs += parallel(
+ run_func(
+ cfg=get_config_esss(
+ config=config,
+ subject=subject,
+ session=session,
+ ),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ )
+
+ # Second: maxwell_filter
parallel, run_func = parallel_func(
run_maxwell_filter, exec_params=config.exec_params
)
# We need to guarantee that the reference_run completes before the
# noise/rest runs are processed, so we split the loops.
- logs = list()
for which in [("runs",), ("noise", "rest")]:
logs += parallel(
run_func(
- cfg=get_config(config=config, subject=subject, session=session),
+ cfg=get_config_maxwell_filter(
+ config=config,
+ subject=subject,
+ session=session,
+ ),
exec_params=config.exec_params,
subject=subject,
session=session,
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 1c0cadab9..eeb22cf36 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -10,13 +10,13 @@
from mne.preprocessing import create_eog_epochs, create_ecg_epochs
from mne import compute_proj_evoked, compute_proj_epochs
from mne_bids import BIDSPath
-from mne.utils import _pl
from ..._config_utils import (
get_runs,
get_sessions,
get_subjects,
_bids_kwargs,
+ _pl,
)
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index 1c2538eb9..1c625eb90 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -29,6 +29,8 @@
mf_filter_chpi = False # for speed, not needed as we low-pass anyway
mf_mc_rotation_velocity_limit = 30.0 # deg/s for annotations
mf_mc_translation_velocity_limit = 20e-3 # m/s
+mf_esss = 8
+mf_esss_reject = {"grad": 10000e-13, "mag": 40000e-15}
ch_types = ["meg"]
l_freq = None
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index 020d292d9..e17b46076 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -34,6 +34,9 @@ def pytest_configure(config):
# seaborn calling tight layout, etc.
ignore:The figure layout has changed to tight:UserWarning
ignore:The \S+_cmap function was deprecated.*:DeprecationWarning
+ # seaborn->pandas
+ ignore:is_categorical_dtype is deprecated.*:FutureWarning
+ ignore:use_inf_as_na option is deprecated.*:FutureWarning
# Dask distributed with jsonschema 4.18
ignore:jsonschema\.RefResolver is deprecated.*:DeprecationWarning
"""
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 593a5968e..28e2a7e71 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -3,6 +3,7 @@
import shutil
from pathlib import Path
from typing import Collection, Dict, Optional, TypedDict
+import os
import pytest
@@ -23,15 +24,17 @@ class _TestOptionsT(TypedDict, total=False):
steps: Collection[str]
task: Optional[str]
env: Dict[str, str]
+ requires: Collection[str]
# If not supplied below, the defaults are:
# key: {
-# 'dataset': key.split('_')[0],
-# 'config': f'config_{key}.py',
-# 'steps': ('preprocessing', 'sensor'),
-# 'env': {},
-# 'task': None,
+# "dataset": key.split("_")[0],
+# "config": f"config_{key}.py",
+# "steps": ("preprocessing", "sensor"),
+# "env": {},
+# "task": None,
+# "requires": (),
# }
#
TEST_SUITE: Dict[str, _TestOptionsT] = {
@@ -56,16 +59,20 @@ class _TestOptionsT(TypedDict, total=False):
},
"ds000248_base": {
"steps": ("preprocessing", "sensor", "source"),
+ "requires": ("freesurfer",),
},
"ds000248_ica": {},
"ds000248_T1_BEM": {
"steps": ("source/make_bem_surfaces",),
+ "requires": ("freesurfer",),
},
"ds000248_FLASH_BEM": {
"steps": ("source/make_bem_surfaces",),
+ "requires": ("freesurfer",),
},
"ds000248_coreg_surfaces": {
"steps": ("freesurfer/coreg_surfaces",),
+ "requires": ("freesurfer",),
},
"ds000248_no_mri": {
"steps": ("preprocessing", "sensor", "source"),
@@ -120,6 +127,9 @@ def dataset_test(request):
capsys = request.getfixturevalue("capsys")
dataset = request.getfixturevalue("dataset")
test_options = TEST_SUITE[dataset]
+ if "freesurfer" in test_options.get("requires", ()):
+ if "FREESURFER_HOME" not in os.environ:
+ pytest.skip("FREESURFER_HOME required but not found")
dataset_name = test_options.get("dataset", dataset.split("_")[0])
with capsys.disabled():
if request.config.getoption("--download", False): # download requested
From 81d6f55ecc1356f139f589ac5a91e59ab2a73417 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 18 Jul 2023 21:24:18 -0400
Subject: [PATCH 009/132] ENH: Cache grand-average steps (#765)
---
.circleci/config.yml | 35 +-
.circleci/run_dataset_and_copy_files.sh | 41 +-
.circleci/setup_bash.sh | 2 +-
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/__init__.py | 2 +-
mne_bids_pipeline/_config.py | 2 +-
mne_bids_pipeline/_config_import.py | 60 +-
mne_bids_pipeline/_logging.py | 4 +
mne_bids_pipeline/_main.py | 10 +-
mne_bids_pipeline/_parallel.py | 42 +-
mne_bids_pipeline/_report.py | 776 ++++---------
mne_bids_pipeline/_run.py | 9 +-
.../steps/preprocessing/_07a_apply_ica.py | 4 +-
.../steps/sensor/_01_make_evoked.py | 12 +-
.../steps/sensor/_02_decoding_full_epochs.py | 4 +-
.../steps/sensor/_03_decoding_time_by_time.py | 4 +-
.../steps/sensor/_04_time_frequency.py | 12 +-
.../steps/sensor/_06_make_cov.py | 10 +-
.../steps/sensor/_99_group_average.py | 1019 +++++++++++------
.../steps/source/_05_make_inverse.py | 30 +-
.../steps/source/_99_group_average.py | 263 +++--
.../tests/configs/config_ds000248_base.py | 2 -
.../tests/configs/config_ds001971.py | 12 +
mne_bids_pipeline/tests/conftest.py | 4 +
mne_bids_pipeline/tests/test_documented.py | 2 +-
mne_bids_pipeline/tests/test_run.py | 39 +-
26 files changed, 1292 insertions(+), 1109 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1427dda16..03c9de23e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -82,6 +82,7 @@ jobs:
name: Get ds000117
command: |
$DOWNLOAD_DATA ds000117
+ - codecov/upload
- save_cache:
key: data-cache-ds000117-2
paths:
@@ -118,6 +119,7 @@ jobs:
name: Get ds001971
command: |
$DOWNLOAD_DATA ds001971
+ - codecov/upload
- save_cache:
key: data-cache-ds001971-2
paths:
@@ -136,6 +138,7 @@ jobs:
name: Get ds004107
command: |
$DOWNLOAD_DATA ds004107
+ - codecov/upload
- save_cache:
key: data-cache-ds004107-2
paths:
@@ -154,6 +157,7 @@ jobs:
name: Get ds000246
command: |
$DOWNLOAD_DATA ds000246
+ - codecov/upload
- save_cache:
key: data-cache-ds000246-2
paths:
@@ -172,6 +176,7 @@ jobs:
name: Get ds000247
command: |
$DOWNLOAD_DATA ds000247
+ - codecov/upload
- save_cache:
key: data-cache-ds000247-2
paths:
@@ -190,6 +195,7 @@ jobs:
name: Get ds000248
command: |
$DOWNLOAD_DATA ds000248
+ - codecov/upload
- save_cache:
key: data-cache-ds000248-4
paths:
@@ -208,6 +214,7 @@ jobs:
name: Get ds001810
command: |
$DOWNLOAD_DATA ds001810
+ - codecov/upload
- save_cache:
key: data-cache-ds001810-2
paths:
@@ -226,6 +233,7 @@ jobs:
name: Get ds003104
command: |
$DOWNLOAD_DATA ds003104
+ - codecov/upload
- save_cache:
key: data-cache-ds003104-2
paths:
@@ -244,6 +252,7 @@ jobs:
name: Get ds003392
command: |
$DOWNLOAD_DATA ds003392
+ - codecov/upload
- save_cache:
key: data-cache-ds003392-2
paths:
@@ -262,6 +271,7 @@ jobs:
name: Get ds004229
command: |
$DOWNLOAD_DATA ds004229
+ - codecov/upload
- save_cache:
key: data-cache-ds004229-2
paths:
@@ -281,6 +291,7 @@ jobs:
name: Get eeg_matchingpennies
command: |
$DOWNLOAD_DATA eeg_matchingpennies
+ - codecov/upload
- save_cache:
key: data-cache-eeg_matchingpennies-1
paths:
@@ -299,6 +310,7 @@ jobs:
name: Get ERP_CORE
command: |
$DOWNLOAD_DATA ERP_CORE
+ - codecov/upload
- save_cache:
key: data-cache-ERP_CORE-1
paths:
@@ -474,7 +486,8 @@ jobs:
- data-cache-ds000248-4
- run:
name: test ds000248_base
- command: $RUN_TESTS ds000248_base
+ # Forces rerunning (cov and FLASH BEM) so don't check
+ command: $RUN_TESTS -r ds000248_base
- codecov/upload
- store_test_results:
path: ./test-results
@@ -529,7 +542,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test BEM from FLASH
- command: $RUN_TESTS ds000248_FLASH_BEM
+ command: $RUN_TESTS -r ds000248_FLASH_BEM
- codecov/upload
- store_test_results:
path: ./test-results
@@ -556,7 +569,7 @@ jobs:
- run:
name: test BEM from T1 (watershed)
no_output_timeout: 20m
- command: $RUN_TESTS ds000248_T1_BEM
+ command: $RUN_TESTS -r ds000248_T1_BEM
- codecov/upload
- store_test_results:
path: ./test-results
@@ -582,7 +595,7 @@ jobs:
- data-cache-ds000248-4
- run:
name: test head surface creation for MNE coregistration
- command: $RUN_TESTS ds000248_coreg_surfaces ds000248_coreg_surfaces --no-copy
+ command: $RUN_TESTS -c -r ds000248_coreg_surfaces
- codecov/upload
- store_test_results:
path: ./test-results
@@ -772,7 +785,7 @@ jobs:
google-chrome --version
- run:
name: test ERP CORE N400
- command: $RUN_TESTS ERP_CORE_N400 ERP_CORE
+ command: $RUN_TESTS ERP_CORE_N400
- codecov/upload
- store_test_results:
path: ./test-results
@@ -802,7 +815,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE ERN
- command: $RUN_TESTS ERP_CORE_ERN ERP_CORE
+ command: $RUN_TESTS ERP_CORE_ERN
- codecov/upload
- store_test_results:
path: ./test-results
@@ -832,7 +845,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE LRP
- command: $RUN_TESTS ERP_CORE_LRP ERP_CORE
+ command: $RUN_TESTS ERP_CORE_LRP
- codecov/upload
- store_test_results:
path: ./test-results
@@ -862,7 +875,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE MMN
- command: $RUN_TESTS ERP_CORE_MMN ERP_CORE
+ command: $RUN_TESTS ERP_CORE_MMN
- codecov/upload
- store_test_results:
path: ./test-results
@@ -892,7 +905,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE N2pc
- command: $RUN_TESTS ERP_CORE_N2pc ERP_CORE
+ command: $RUN_TESTS ERP_CORE_N2pc
- codecov/upload
- store_test_results:
path: ./test-results
@@ -922,7 +935,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE N170
- command: $RUN_TESTS ERP_CORE_N170 ERP_CORE
+ command: $RUN_TESTS ERP_CORE_N170
- codecov/upload
- store_test_results:
path: ./test-results
@@ -952,7 +965,7 @@ jobs:
command: mkdir -p /home/circleci/.local/share/pyvista
- run:
name: test ERP CORE P3
- command: $RUN_TESTS ERP_CORE_P3 ERP_CORE
+ command: $RUN_TESTS ERP_CORE_P3
- codecov/upload
- store_test_results:
path: ./test-results
diff --git a/.circleci/run_dataset_and_copy_files.sh b/.circleci/run_dataset_and_copy_files.sh
index 63a49c8b4..34dcfa14f 100755
--- a/.circleci/run_dataset_and_copy_files.sh
+++ b/.circleci/run_dataset_and_copy_files.sh
@@ -2,21 +2,48 @@
set -eo pipefail
+COPY_FILES="true"
+RERUN_TEST="true"
+while getopts "cr" option; do
+ echo $option
+ case $option in
+ c)
+ COPY_FILES="false";;
+ r)
+ RERUN_TEST="false";;
+ esac
+done
+shift "$(($OPTIND -1))"
+
DS_RUN=$1
-if [[ "$2" == "" ]]; then
- DS="$DS_RUN"
-else
- DS="$2"
+if [[ -z $1 ]]; then
+ echo "Missing dataset argument"
+ exit 1
fi
-if [[ "$3" == "--no-copy" ]]; then
- COPY_FILES="false"
+if [[ "$DS_RUN" == "ERP_CORE_"* ]]; then
+ DS="ERP_CORE"
else
- COPY_FILES="true"
+ DS="$1"
fi
+SECONDS=0
pytest mne_bids_pipeline --junit-xml=test-results/junit-results.xml -k ${DS_RUN}
+echo "Runtime: ${SECONDS} seconds"
+
+# rerun test (check caching)!
+SECONDS=0
+if [[ "$RERUN_TEST" == "false" ]]; then
+ echo "Skipping rerun test"
+ RUN_TIME=0
+else
+ pytest mne_bids_pipeline --cov-append -k $DS_RUN
+ RUN_TIME=$SECONDS
+ echo "Runtime: ${RUN_TIME} seconds (should be < 20)"
+fi
+test $RUN_TIME -lt 20
if [[ "$COPY_FILES" == "false" ]]; then
+ echo "Not copying files"
exit 0
fi
mkdir -p ~/reports/${DS}
diff --git a/.circleci/setup_bash.sh b/.circleci/setup_bash.sh
index 073a45b77..ee44b317b 100755
--- a/.circleci/setup_bash.sh
+++ b/.circleci/setup_bash.sh
@@ -36,7 +36,7 @@ sudo ln -s /usr/lib/x86_64-linux-gnu/libxcb-util.so.0 /usr/lib/x86_64-linux-gnu/
wget -q -O- http://neuro.debian.net/lists/focal.us-tn.libre | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
sudo apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com 0xA5D32F012649A5A9
echo "export RUN_TESTS=\".circleci/run_dataset_and_copy_files.sh\"" >> "$BASH_ENV"
-echo "export DOWNLOAD_DATA=\"python -m mne_bids_pipeline._download\"" >> "$BASH_ENV"
+echo "export DOWNLOAD_DATA=\"coverage run -m mne_bids_pipeline._download\"" >> "$BASH_ENV"
# Similar CircleCI setup to mne-python (Xvfb, venv, minimal commands, env vars)
wget -q https://raw.githubusercontent.com/mne-tools/mne-python/main/tools/setup_xvfb.sh
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 0d95dc8c5..7b2a70b0c 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -6,6 +6,7 @@
- Added examples of T1 and FLASH BEM to website (#758 by @larsoner)
- Added support for extended SSS (eSSS) in Maxwell filtering (#762 by @larsoner)
- Output logging spacing improved (#764 by @larsoner)
+- Added caching of sensor and source average steps (#765 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/__init__.py b/mne_bids_pipeline/__init__.py
index 39d9f4177..2826b97e6 100644
--- a/mne_bids_pipeline/__init__.py
+++ b/mne_bids_pipeline/__init__.py
@@ -2,6 +2,6 @@
try:
__version__ = version("mne_bids_pipeline")
-except PackageNotFoundError:
+except PackageNotFoundError: # pragma: no cover
# package is not installed
__version__ = "0.0.0"
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 812248ee4..85b0335e1 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1572,7 +1572,7 @@
time_frequency_subtract_evoked: bool = False
"""
-Whether to subtract the evoked signal (averaged across all epochs) from the
+Whether to subtract the evoked response (averaged across all epochs) from the
epochs before passing them to time-frequency analysis. Set this to `True` to
highlight induced activity.
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index ca507ba02..36568b1f2 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -36,6 +36,17 @@ def _import_config(
log=log,
)
+ extra_exec_params_keys = ()
+ extra_config = os.getenv("_MNE_BIDS_STUDY_TESTING_EXTRA_CONFIG", "")
+ if extra_config:
+ msg = f"With testing config: {extra_config}"
+ logger.info(**gen_log_kwargs(message=msg, emoji="override"))
+ _update_config_from_path(
+ config=config,
+ config_path=extra_config,
+ )
+ extra_exec_params_keys = ("_n_jobs",)
+
# Check it
if check:
_check_config(config)
@@ -69,7 +80,7 @@ def _import_config(
# Misc
"deriv_root",
"config_path",
- )
+ ) + extra_exec_params_keys
in_both = {"deriv_root"}
exec_params = SimpleNamespace(**{k: getattr(config, k) for k in keys})
for k in keys:
@@ -102,6 +113,32 @@ def _get_default_config():
return config
+def _update_config_from_path(
+ *,
+ config: SimpleNamespace,
+ config_path: PathLike,
+):
+ user_names = list()
+ config_path = pathlib.Path(config_path).expanduser().resolve(strict=True)
+ # Import configuration from an arbitrary path without having to fiddle
+ # with `sys.path`.
+ spec = importlib.util.spec_from_file_location(
+ name="custom_config", location=config_path
+ )
+ custom_cfg = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(custom_cfg)
+ for key in dir(custom_cfg):
+ if not key.startswith("__"):
+ # don't validate private vars, but do add to config
+ # (e.g., so that our hidden _raw_split_size is included)
+ if not key.startswith("_"):
+ user_names.append(key)
+ val = getattr(custom_cfg, key)
+ logger.debug("Overwriting: %s -> %s" % (key, val))
+ setattr(config, key, val)
+ return user_names
+
+
def _update_with_user_config(
*,
config: SimpleNamespace, # modified in-place
@@ -121,23 +158,12 @@ def _update_with_user_config(
# 2. User config
user_names = list()
if config_path is not None:
- config_path = pathlib.Path(config_path).expanduser().resolve(strict=True)
- # Import configuration from an arbitrary path without having to fiddle
- # with `sys.path`.
- spec = importlib.util.spec_from_file_location(
- name="custom_config", location=config_path
+ user_names.extend(
+ _update_config_from_path(
+ config=config,
+ config_path=config_path,
+ )
)
- custom_cfg = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(custom_cfg)
- for key in dir(custom_cfg):
- if not key.startswith("__"):
- # don't validate private vars, but do add to config
- # (e.g., so that our hidden _raw_split_size is included)
- if not key.startswith("_"):
- user_names.append(key)
- val = getattr(custom_cfg, key)
- logger.debug("Overwriting: %s -> %s" % (key, val))
- setattr(config, key, val)
config.config_path = config_path
# 3. Overrides via command-line switches
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index 56eea901f..6bcb21d73 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -154,3 +154,7 @@ def gen_log_kwargs(
def _linkfile(uri):
return f"[link=file://{uri}]{uri}[/link]"
+
+
+def _is_testing() -> bool:
+ return os.getenv("_MNE_BIDS_STUDY_TESTING", "") == "true"
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index 3c54a177d..cd9ee1037 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -11,6 +11,7 @@
from ._config_import import _import_config
from ._config_template import create_template_config
from ._logging import logger, gen_log_kwargs
+from ._parallel import get_parallel_backend
from ._run import _short_step_path
@@ -195,13 +196,18 @@ def main():
logger.title("Welcome aboard MNE-BIDS-Pipeline! 👋")
msg = f"Using configuration: {config}"
+ __mne_bids_pipeline_step__ = pathlib.Path(__file__) # used for logging
logger.info(**gen_log_kwargs(message=msg, emoji="📝"))
- logger.end()
-
config_imported = _import_config(
config_path=config_path,
overrides=overrides,
)
+ # Initialize dask now
+ with get_parallel_backend(config_imported.exec_params):
+ pass
+ del __mne_bids_pipeline_step__
+ logger.end()
+
for step_module in step_modules:
start = time.time()
step = _short_step_path(pathlib.Path(step_module.__file__))
diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py
index c2f9430ae..12f70cf57 100644
--- a/mne_bids_pipeline/_parallel.py
+++ b/mne_bids_pipeline/_parallel.py
@@ -5,15 +5,25 @@
import joblib
-from ._logging import logger
+from ._logging import logger, gen_log_kwargs, _is_testing
-def get_n_jobs(*, exec_params: SimpleNamespace) -> int:
+def get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int:
n_jobs = exec_params.n_jobs
if n_jobs < 0:
n_cores = joblib.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
+ # Shim to allow overriding n_jobs for specific steps
+ if _is_testing() and hasattr(exec_params, "_n_jobs"):
+ from ._run import _get_step_path, _short_step_path
+
+ step_path = _short_step_path(_get_step_path())
+ orig_n_jobs = n_jobs
+ n_jobs = exec_params._n_jobs.get(step_path, n_jobs)
+ if log_override and n_jobs != orig_n_jobs:
+ msg = f"Overriding n_jobs: {orig_n_jobs}→{n_jobs}"
+ logger.info(**gen_log_kwargs(message=msg, emoji="override"))
return n_jobs
@@ -30,14 +40,16 @@ def setup_dask_client(*, exec_params: SimpleNamespace) -> None:
return
n_workers = get_n_jobs(exec_params=exec_params)
- logger.info(f"👾 Initializing Dask client with {n_workers} workers …")
+ msg = f"Dask initializing with {n_workers} workers …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="👾"))
if exec_params.dask_temp_dir is None:
this_dask_temp_dir = exec_params.deriv_root / ".dask-worker-space"
else:
this_dask_temp_dir = exec_params.dask_temp_dir
- logger.info(f"📂 Temporary directory is: {this_dask_temp_dir}")
+ msg = f"Dask temporary directory: {this_dask_temp_dir}"
+ logger.info(**gen_log_kwargs(message=msg, emoji="📂"))
dask.config.set(
{
"temporary-directory": this_dask_temp_dir,
@@ -61,10 +73,8 @@ def setup_dask_client(*, exec_params: SimpleNamespace) -> None:
client.auto_restart = False # don't restart killed workers
dashboard_url = client.dashboard_link
- logger.info(
- f"⏱ The Dask client is ready. Open {dashboard_url} "
- f"to monitor the workers.\n"
- )
+ msg = "Dask client dashboard: " f"[link={dashboard_url}]{dashboard_url}[/link]"
+ logger.info(**gen_log_kwargs(message=msg, emoji="🌎"))
if exec_params.dask_open_dashboard:
import webbrowser
@@ -76,29 +86,37 @@ def setup_dask_client(*, exec_params: SimpleNamespace) -> None:
def get_parallel_backend_name(
- *, exec_params: SimpleNamespace
+ *,
+ exec_params: SimpleNamespace,
) -> Literal["dask", "loky"]:
if (
exec_params.parallel_backend == "loky"
or get_n_jobs(exec_params=exec_params) == 1
):
- return "loky"
+ backend = "loky"
elif exec_params.parallel_backend == "dask":
# Disable interactive plotting backend
import matplotlib
matplotlib.use("Agg")
- return "dask"
+ backend = "dask"
else:
# TODO: Move to value validation step
raise ValueError(f"Unknown parallel backend: {exec_params.parallel_backend}")
+ return backend
+
def get_parallel_backend(exec_params: SimpleNamespace) -> joblib.parallel_backend:
import joblib
backend = get_parallel_backend_name(exec_params=exec_params)
- kwargs = {"n_jobs": get_n_jobs(exec_params=exec_params)}
+ kwargs = {
+ "n_jobs": get_n_jobs(
+ exec_params=exec_params,
+ log_override=True,
+ )
+ }
if backend == "loky":
kwargs["inner_max_num_threads"] = 1
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 90f651b4e..3d9736c95 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1,8 +1,6 @@
import contextlib
from functools import lru_cache
from io import StringIO
-import os.path as op
-from pathlib import Path
from typing import Optional, List, Literal
from types import SimpleNamespace
@@ -18,7 +16,7 @@
from mne_bids import BIDSPath
from mne_bids.stats import count_events
-from ._config_utils import sanitize_cond_name, get_subjects, _restrict_analyze_channels
+from ._config_utils import get_all_contrasts
from ._decoding import _handle_csp_args
from ._logging import logger, gen_log_kwargs, _linkfile
@@ -472,7 +470,8 @@ def add_event_counts(
except ValueError:
msg = "Could not read events."
logger.warning(**gen_log_kwargs(message=msg))
- df_events = None
+ return
+ logger.info(**gen_log_kwargs(message="Adding event counts to report …"))
if df_events is not None:
css_classes = ("table", "table-striped", "table-borderless", "table-hover")
@@ -549,330 +548,13 @@ def _all_conditions(*, cfg):
conditions = list(cfg.conditions.keys())
else:
conditions = cfg.conditions.copy()
- conditions.extend([contrast["name"] for contrast in cfg.all_contrasts])
+ all_contrasts = get_all_contrasts(cfg)
+ conditions.extend([contrast["name"] for contrast in all_contrasts])
return conditions
-def run_report_average_sensor(
- *,
- cfg: SimpleNamespace,
- exec_params: SimpleNamespace,
- subject: str,
- session: Optional[str],
-) -> None:
- msg = "Generating grand average report …"
- logger.info(**gen_log_kwargs(message=msg))
- assert matplotlib.get_backend() == "agg", matplotlib.get_backend()
-
- evoked_fname = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- suffix="ave",
- extension=".fif",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
-
- title = f"sub-{subject}"
- if session is not None:
- title += f", ses-{session}"
- if cfg.task is not None:
- title += f", task-{cfg.task}"
-
- all_evokeds = mne.read_evokeds(evoked_fname)
- for evoked in all_evokeds:
- _restrict_analyze_channels(evoked, cfg)
- conditions = _all_conditions(cfg=cfg)
- assert len(conditions) == len(all_evokeds)
- all_evokeds = {cond: evoked for cond, evoked in zip(conditions, all_evokeds)}
-
- with _open_report(
- cfg=cfg, exec_params=exec_params, subject=subject, session=session
- ) as report:
- #######################################################################
- #
- # Add event stats.
- #
- add_event_counts(
- cfg=cfg,
- report=report,
- subject=subject,
- session=session,
- )
-
- #######################################################################
- #
- # Visualize evoked responses.
- #
- if all_evokeds:
- msg = (
- f"Adding {len(all_evokeds)} evoked signals and contrasts to "
- "the report."
- )
- else:
- msg = "No evoked conditions or contrasts found."
- logger.info(**gen_log_kwargs(message=msg))
- for condition, evoked in all_evokeds.items():
- tags = ("evoked", _sanitize_cond_tag(condition))
- if condition in cfg.conditions:
- title = f"Condition: {condition}"
- else: # It's a contrast of two conditions.
- title = f"Contrast: {condition}"
- tags = tags + ("contrast",)
-
- report.add_evokeds(
- evokeds=evoked,
- titles=title,
- projs=False,
- tags=tags,
- n_time_points=cfg.report_evoked_n_time_points,
- # captions=evoked.comment, # TODO upstream
- replace=True,
- n_jobs=1, # don't auto parallelize
- )
-
- #######################################################################
- #
- # Visualize decoding results.
- #
- if cfg.decode and cfg.decoding_contrasts:
- msg = "Adding decoding results."
- logger.info(**gen_log_kwargs(message=msg))
- add_decoding_grand_average(session=session, cfg=cfg, report=report)
-
- if cfg.decode and cfg.decoding_csp:
- # No need for a separate message here because these are very quick
- # and the general message above is sufficient
- add_csp_grand_average(session=session, cfg=cfg, report=report)
-
-
-def run_report_average_source(
- *,
- cfg: SimpleNamespace,
- exec_params: SimpleNamespace,
- subject: str,
- session: Optional[str],
-) -> None:
- #######################################################################
- #
- # Visualize forward solution, inverse operator, and inverse solutions.
- #
- evoked_fname = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- suffix="ave",
- extension=".fif",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
- evokeds = mne.read_evokeds(evoked_fname)
- method = cfg.inverse_method
- inverse_str = method
- hemi_str = "hemi" # MNE will auto-append '-lh' and '-rh'.
- morph_str = "morph2fsaverage"
- conditions = _all_conditions(cfg=cfg)
- with _open_report(
- cfg=cfg, exec_params=exec_params, subject=subject, session=session
- ) as report:
- for condition, evoked in zip(conditions, evokeds):
- tags = (
- "source-estimate",
- _sanitize_cond_tag(condition),
- )
- if condition in cfg.conditions:
- title = f"Average: {condition}"
- else: # It's a contrast of two conditions.
- title = f"Average contrast: {condition}"
- tags = tags + ("contrast",)
- cond_str = sanitize_cond_name(condition)
- fname_stc_avg = evoked_fname.copy().update(
- suffix=f"{cond_str}+{inverse_str}+{morph_str}+{hemi_str}",
- extension=None,
- )
- if not Path(f"{fname_stc_avg.fpath}-lh.stc").exists():
- continue
- report.add_stc(
- stc=fname_stc_avg,
- title=title,
- subject="fsaverage",
- subjects_dir=cfg.fs_subjects_dir,
- n_time_points=cfg.report_stc_n_time_points,
- tags=tags,
- replace=True,
- )
-
-
-def add_decoding_grand_average(
- *,
- session: Optional[str],
- cfg: SimpleNamespace,
- report: mne.Report,
-):
- """Add decoding results to the grand average report."""
- import matplotlib.pyplot as plt # nested import to help joblib
-
- bids_path = BIDSPath(
- subject="average",
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- suffix="ave",
- extension=".fif",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
-
- # Full-epochs decoding
- all_decoding_scores = []
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
- a_vs_b = f"{cond_1}+{cond_2}".replace(op.sep, "")
- processing = f"{a_vs_b}+FullEpochs+{cfg.decoding_metric}"
- processing = processing.replace("_", "-").replace("-", "")
- fname_decoding = bids_path.copy().update(
- processing=processing, suffix="decoding", extension=".mat"
- )
- decoding_data = loadmat(fname_decoding)
- all_decoding_scores.append(np.atleast_1d(decoding_data["scores"].squeeze()))
- del fname_decoding, processing, a_vs_b, decoding_data
-
- fig, caption = _plot_full_epochs_decoding_scores(
- contrast_names=_contrasts_to_names(cfg.decoding_contrasts),
- scores=all_decoding_scores,
- metric=cfg.decoding_metric,
- kind="grand-average",
- )
- title = f"Full-epochs decoding: {cond_1} vs. {cond_2}"
- report.add_figure(
- fig=fig,
- title=title,
- section="Decoding: full-epochs",
- caption=caption,
- tags=(
- "epochs",
- "contrast",
- "decoding",
- *[
- f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}"
- for cond_1, cond_2 in cfg.decoding_contrasts
- ],
- ),
- replace=True,
- )
- # close figure to save memory
- plt.close(fig)
- del fig, caption, title
-
- # Time-by-time decoding
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
- a_vs_b = f"{cond_1}+{cond_2}".replace(op.sep, "")
- section = "Decoding: time-by-time"
- tags = (
- "epochs",
- "contrast",
- "decoding",
- f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
- )
- processing = f"{a_vs_b}+TimeByTime+{cfg.decoding_metric}"
- processing = processing.replace("_", "-").replace("-", "")
- fname_decoding = bids_path.copy().update(
- processing=processing, suffix="decoding", extension=".mat"
- )
- decoding_data = loadmat(fname_decoding)
- del fname_decoding, processing, a_vs_b
-
- # Plot scores
- fig = _plot_time_by_time_decoding_scores_gavg(
- cfg=cfg,
- decoding_data=decoding_data,
- )
- caption = (
- f'Based on N={decoding_data["N"].squeeze()} '
- f"subjects. Standard error and confidence interval "
- f"of the mean were bootstrapped with {cfg.n_boot} "
- f"resamples. CI must not be used for statistical inference here, "
- f"as it is not corrected for multiple testing."
- )
- if len(get_subjects(cfg)) > 1:
- caption += (
- f" Time periods with decoding performance significantly above "
- f"chance, if any, were derived with a one-tailed "
- f"cluster-based permutation test "
- f'({decoding_data["cluster_n_permutations"].squeeze()} '
- f"permutations) and are highlighted in yellow."
- )
- title = f"Decoding over time: {cond_1} vs. {cond_2}"
- report.add_figure(
- fig=fig,
- title=title,
- caption=caption,
- section=section,
- tags=tags,
- replace=True,
- )
- plt.close(fig)
-
- # Plot t-values used to form clusters
- if len(get_subjects(cfg)) > 1:
- fig = plot_time_by_time_decoding_t_values(decoding_data=decoding_data)
- t_threshold = np.round(decoding_data["cluster_t_threshold"], 3).item()
- caption = (
- f"Observed t-values. Time points with "
- f"t-values > {t_threshold} were used to form clusters."
- )
- report.add_figure(
- fig=fig,
- title=f"t-values across time: {cond_1} vs. {cond_2}",
- caption=caption,
- section=section,
- tags=tags,
- replace=True,
- )
- plt.close(fig)
-
- if cfg.decoding_time_generalization:
- fig = _plot_decoding_time_generalization(
- decoding_data=decoding_data,
- metric=cfg.decoding_metric,
- kind="grand-average",
- )
- caption = (
- f"Time generalization (generalization across time, GAT): "
- f"each classifier is trained on each time point, and tested "
- f"on all other time points. The results were averaged across "
- f'N={decoding_data["N"].item()} subjects.'
- )
- title = f"Time generalization: {cond_1} vs. {cond_2}"
- report.add_figure(
- fig=fig,
- title=title,
- caption=caption,
- section=section,
- tags=tags,
- replace=True,
- )
- plt.close(fig)
-
-
def _sanitize_cond_tag(cond):
- return cond.lower().replace(" ", "-")
+ return str(cond).lower().replace(" ", "-")
def _imshow_tf(
@@ -913,27 +595,18 @@ def _imshow_tf(
def add_csp_grand_average(
*,
- session: str,
cfg: SimpleNamespace,
+ subject: str,
+ session: str,
report: mne.Report,
+ cond_1: str,
+ cond_2: str,
+ fname_csp_freq_results: BIDSPath,
+ fname_csp_cluster_results: pd.DataFrame,
):
"""Add CSP decoding results to the grand average report."""
import matplotlib.pyplot as plt # nested import to help joblib
- bids_path = BIDSPath(
- subject="average",
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- suffix="decoding",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
-
# First, plot decoding scores across frequency bins (entire epochs).
section = "Decoding: CSP"
freq_name_to_bins_map = _handle_csp_args(
@@ -941,242 +614,217 @@ def add_csp_grand_average(
cfg.decoding_csp_freqs,
cfg.decoding_metric,
)
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
- a_vs_b = f"{cond_1}+{cond_2}".replace(op.sep, "")
- processing = f"{a_vs_b}+CSP+{cfg.decoding_metric}"
- processing = processing.replace("_", "-").replace("-", "")
- fname_csp_freq_results = bids_path.copy().update(
- processing=processing,
- extension=".xlsx",
- )
- csp_freq_results = pd.read_excel(
- fname_csp_freq_results, sheet_name="CSP Frequency"
- )
- freq_bin_starts = list()
- freq_bin_widths = list()
- decoding_scores = list()
- error_bars = list()
- for freq_range_name, freq_bins in freq_name_to_bins_map.items():
- results = csp_freq_results.loc[
- csp_freq_results["freq_range_name"] == freq_range_name, :
- ]
- results.reset_index(drop=True, inplace=True)
- assert len(results) == len(freq_bins)
- for bi, freq_bin in enumerate(freq_bins):
- freq_bin_starts.append(freq_bin[0])
- freq_bin_widths.append(np.diff(freq_bin)[0])
- decoding_scores.append(results["mean"][bi])
- cis_lower = results["mean_ci_lower"][bi]
- cis_upper = results["mean_ci_upper"][bi]
- error_bars_lower = decoding_scores[-1] - cis_lower
- error_bars_upper = cis_upper - decoding_scores[-1]
- error_bars.append(np.stack([error_bars_lower, error_bars_upper]))
- assert len(error_bars[-1]) == 2 # lower, upper
- del cis_lower, cis_upper, error_bars_lower, error_bars_upper
- error_bars = np.array(error_bars, float).T
-
- if cfg.decoding_metric == "roc_auc":
- metric = "ROC AUC"
-
- fig, ax = plt.subplots(constrained_layout=True)
- ax.bar(
- x=freq_bin_starts,
- width=freq_bin_widths,
- height=decoding_scores,
- align="edge",
- yerr=error_bars,
- edgecolor="black",
- )
- ax.set_ylim([0, 1.02])
- offset = matplotlib.transforms.offset_copy(
- ax.transData, fig, 0, 5, units="points"
- )
- for freq_range_name, freq_bins in freq_name_to_bins_map.items():
- start = freq_bins[0][0]
- stop = freq_bins[-1][1]
- width = stop - start
- ax.text(
- x=start + width / 2,
- y=0.0,
- transform=offset,
- s=freq_range_name,
- ha="center",
- va="bottom",
- )
- ax.axhline(0.5, color="black", linestyle="--", label="chance")
- ax.legend()
- ax.set_xlabel("Frequency (Hz)")
- ax.set_ylabel(f"Mean decoding score ({metric})")
- tags = (
- "epochs",
- "contrast",
- "decoding",
- "csp",
- f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
- )
- title = f"CSP decoding: {cond_1} vs. {cond_2}"
- report.add_figure(
- fig=fig,
- title=title,
- section=section,
- caption="Mean decoding scores. Error bars represent "
- "bootstrapped 95% confidence intervals.",
- tags=tags,
- replace=True,
+ freq_bin_starts = list()
+ freq_bin_widths = list()
+ decoding_scores = list()
+ error_bars = list()
+ csp_freq_results = pd.read_excel(fname_csp_freq_results, sheet_name="CSP Frequency")
+ for freq_range_name, freq_bins in freq_name_to_bins_map.items():
+ results = csp_freq_results.loc[
+ csp_freq_results["freq_range_name"] == freq_range_name, :
+ ]
+ results.reset_index(drop=True, inplace=True)
+ assert len(results) == len(freq_bins)
+ for bi, freq_bin in enumerate(freq_bins):
+ freq_bin_starts.append(freq_bin[0])
+ freq_bin_widths.append(np.diff(freq_bin)[0])
+ decoding_scores.append(results["mean"][bi])
+ cis_lower = results["mean_ci_lower"][bi]
+ cis_upper = results["mean_ci_upper"][bi]
+ error_bars_lower = decoding_scores[-1] - cis_lower
+ error_bars_upper = cis_upper - decoding_scores[-1]
+ error_bars.append(np.stack([error_bars_lower, error_bars_upper]))
+ assert len(error_bars[-1]) == 2 # lower, upper
+ del cis_lower, cis_upper, error_bars_lower, error_bars_upper
+ error_bars = np.array(error_bars, float).T
+
+ if cfg.decoding_metric == "roc_auc":
+ metric = "ROC AUC"
+
+ fig, ax = plt.subplots(constrained_layout=True)
+ ax.bar(
+ x=freq_bin_starts,
+ width=freq_bin_widths,
+ height=decoding_scores,
+ align="edge",
+ yerr=error_bars,
+ edgecolor="black",
+ )
+ ax.set_ylim([0, 1.02])
+ offset = matplotlib.transforms.offset_copy(ax.transData, fig, 0, 5, units="points")
+ for freq_range_name, freq_bins in freq_name_to_bins_map.items():
+ start = freq_bins[0][0]
+ stop = freq_bins[-1][1]
+ width = stop - start
+ ax.text(
+ x=start + width / 2,
+ y=0.0,
+ transform=offset,
+ s=freq_range_name,
+ ha="center",
+ va="bottom",
)
+ ax.axhline(0.5, color="black", linestyle="--", label="chance")
+ ax.legend()
+ ax.set_xlabel("Frequency (Hz)")
+ ax.set_ylabel(f"Mean decoding score ({metric})")
+ tags = (
+ "epochs",
+ "contrast",
+ "decoding",
+ "csp",
+ f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
+ )
+ title = f"CSP decoding: {cond_1} vs. {cond_2}"
+ report.add_figure(
+ fig=fig,
+ title=title,
+ section=section,
+ caption="Mean decoding scores. Error bars represent "
+ "bootstrapped 95% confidence intervals.",
+ tags=tags,
+ replace=True,
+ )
# Now, plot decoding scores across time-frequency bins.
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
- a_vs_b = f"{cond_1}+{cond_2}".replace(op.sep, "")
- processing = f"{a_vs_b}+CSP+{cfg.decoding_metric}"
- processing = processing.replace("_", "-").replace("-", "")
- fname_csp_cluster_results = bids_path.copy().update(
- processing=processing,
- extension=".mat",
+ csp_cluster_results = loadmat(fname_csp_cluster_results)
+ fig, ax = plt.subplots(
+ nrows=1, ncols=2, sharex=True, sharey=True, constrained_layout=True
+ )
+ n_clu = 0
+ cbar = None
+ lims = [np.inf, -np.inf, np.inf, -np.inf]
+ for freq_range_name, bins in freq_name_to_bins_map.items():
+ results = csp_cluster_results[freq_range_name][0][0]
+ mean_crossval_scores = results["mean_crossval_scores"].ravel()
+ # t_vals = results['t_vals']
+ clusters = results["clusters"]
+ cluster_p_vals = np.atleast_1d(results["cluster_p_vals"].squeeze())
+ tmin = results["time_bin_edges"].ravel()
+ tmin, tmax = tmin[:-1], tmin[1:]
+ fmin = results["freq_bin_edges"].ravel()
+ fmin, fmax = fmin[:-1], fmin[1:]
+ lims[0] = min(lims[0], tmin.min())
+ lims[1] = max(lims[1], tmax.max())
+ lims[2] = min(lims[2], fmin.min())
+ lims[3] = max(lims[3], fmax.max())
+ # replicate, matching time-frequency order during clustering
+ fmin, fmax = np.tile(fmin, len(tmin)), np.tile(fmax, len(tmax))
+ tmin, tmax = np.repeat(tmin, len(bins)), np.repeat(tmax, len(bins))
+ assert fmin.shape == fmax.shape == tmin.shape == tmax.shape
+ assert fmin.shape == mean_crossval_scores.shape
+ cluster_t_threshold = results["cluster_t_threshold"].ravel().item()
+
+ significant_cluster_idx = np.where(
+ cluster_p_vals < cfg.cluster_permutation_p_threshold
+ )[0]
+ significant_clusters = clusters[significant_cluster_idx]
+ n_clu += len(significant_cluster_idx)
+
+ # XXX Add support for more metrics
+ assert cfg.decoding_metric == "roc_auc"
+ metric = "ROC AUC"
+ vmax = (
+ max(
+ np.abs(mean_crossval_scores.min() - 0.5),
+ np.abs(mean_crossval_scores.max() - 0.5),
+ )
+ + 0.5
)
- csp_cluster_results = loadmat(fname_csp_cluster_results)
-
- fig, ax = plt.subplots(
- nrows=1, ncols=2, sharex=True, sharey=True, constrained_layout=True
+ vmin = 0.5 - (vmax - 0.5)
+ # For diverging gray colormap, we need to combine two existing
+ # colormaps, as there is no diverging colormap with gray/black at
+ # both endpoints.
+ from matplotlib.cm import gray, gray_r
+ from matplotlib.colors import ListedColormap
+
+ black_to_white = gray(np.linspace(start=0, stop=1, endpoint=False, num=128))
+ white_to_black = gray_r(np.linspace(start=0, stop=1, endpoint=False, num=128))
+ black_to_white_to_black = np.vstack((black_to_white, white_to_black))
+ diverging_gray_cmap = ListedColormap(
+ black_to_white_to_black, name="DivergingGray"
)
- n_clu = 0
- cbar = None
- lims = [np.inf, -np.inf, np.inf, -np.inf]
- for freq_range_name, bins in freq_name_to_bins_map.items():
- results = csp_cluster_results[freq_range_name][0][0]
- mean_crossval_scores = results["mean_crossval_scores"].ravel()
- # t_vals = results['t_vals']
- clusters = results["clusters"]
- cluster_p_vals = np.atleast_1d(results["cluster_p_vals"].squeeze())
- tmin = results["time_bin_edges"].ravel()
- tmin, tmax = tmin[:-1], tmin[1:]
- fmin = results["freq_bin_edges"].ravel()
- fmin, fmax = fmin[:-1], fmin[1:]
- lims[0] = min(lims[0], tmin.min())
- lims[1] = max(lims[1], tmax.max())
- lims[2] = min(lims[2], fmin.min())
- lims[3] = max(lims[3], fmax.max())
- # replicate, matching time-frequency order during clustering
- fmin, fmax = np.tile(fmin, len(tmin)), np.tile(fmax, len(tmax))
- tmin, tmax = np.repeat(tmin, len(bins)), np.repeat(tmax, len(bins))
- assert fmin.shape == fmax.shape == tmin.shape == tmax.shape
- assert fmin.shape == mean_crossval_scores.shape
- cluster_t_threshold = results["cluster_t_threshold"].ravel().item()
-
- significant_cluster_idx = np.where(
- cluster_p_vals < cfg.cluster_permutation_p_threshold
- )[0]
- significant_clusters = clusters[significant_cluster_idx]
- n_clu += len(significant_cluster_idx)
-
- # XXX Add support for more metrics
- assert cfg.decoding_metric == "roc_auc"
- metric = "ROC AUC"
- vmax = (
- max(
- np.abs(mean_crossval_scores.min() - 0.5),
- np.abs(mean_crossval_scores.max() - 0.5),
- )
- + 0.5
- )
- vmin = 0.5 - (vmax - 0.5)
- # For diverging gray colormap, we need to combine two existing
- # colormaps, as there is no diverging colormap with gray/black at
- # both endpoints.
- from matplotlib.cm import gray, gray_r
- from matplotlib.colors import ListedColormap
-
- black_to_white = gray(np.linspace(start=0, stop=1, endpoint=False, num=128))
- white_to_black = gray_r(
- np.linspace(start=0, stop=1, endpoint=False, num=128)
- )
- black_to_white_to_black = np.vstack((black_to_white, white_to_black))
- diverging_gray_cmap = ListedColormap(
- black_to_white_to_black, name="DivergingGray"
- )
- cmap_gray = diverging_gray_cmap
- img = _imshow_tf(
- mean_crossval_scores,
- ax[0],
- tmin=tmin,
- tmax=tmax,
- fmin=fmin,
- fmax=fmax,
- vmin=vmin,
- vmax=vmax,
- )
- if cbar is None:
- ax[0].set_xlabel("Time (s)")
- ax[0].set_ylabel("Frequency (Hz)")
- ax[1].set_xlabel("Time (s)")
- cbar = fig.colorbar(
- ax=ax[1], shrink=0.75, orientation="vertical", mappable=img
- )
- cbar.set_label(f"Mean decoding score ({metric})")
- offset = matplotlib.transforms.offset_copy(
- ax[0].transData, fig, 6, 0, units="points"
- )
- ax[0].text(
- tmin.min(),
- 0.5 * fmin.min() + 0.5 * fmax.max(),
- freq_range_name,
- transform=offset,
- ha="left",
- va="center",
- rotation=90,
+ cmap_gray = diverging_gray_cmap
+ img = _imshow_tf(
+ mean_crossval_scores,
+ ax[0],
+ tmin=tmin,
+ tmax=tmax,
+ fmin=fmin,
+ fmax=fmax,
+ vmin=vmin,
+ vmax=vmax,
+ )
+ if cbar is None:
+ ax[0].set_xlabel("Time (s)")
+ ax[0].set_ylabel("Frequency (Hz)")
+ ax[1].set_xlabel("Time (s)")
+ cbar = fig.colorbar(
+ ax=ax[1], shrink=0.75, orientation="vertical", mappable=img
)
+ cbar.set_label(f"Mean decoding score ({metric})")
+ offset = matplotlib.transforms.offset_copy(
+ ax[0].transData, fig, 6, 0, units="points"
+ )
+ ax[0].text(
+ tmin.min(),
+ 0.5 * fmin.min() + 0.5 * fmax.max(),
+ freq_range_name,
+ transform=offset,
+ ha="left",
+ va="center",
+ rotation=90,
+ )
- if len(significant_clusters):
- # Create a masked array that only shows the T-values for
- # time-frequency bins that belong to significant clusters.
- if len(significant_clusters) == 1:
- mask = ~significant_clusters[0].astype(bool)
- else:
- mask = ~np.logical_or(*significant_clusters)
- mask = mask.ravel()
+ if len(significant_clusters):
+ # Create a masked array that only shows the T-values for
+ # time-frequency bins that belong to significant clusters.
+ if len(significant_clusters) == 1:
+ mask = ~significant_clusters[0].astype(bool)
else:
- mask = np.ones(mean_crossval_scores.shape, dtype=bool)
- _imshow_tf(
- mean_crossval_scores,
- ax[1],
- tmin=tmin,
- tmax=tmax,
- fmin=fmin,
- fmax=fmax,
- vmin=vmin,
- vmax=vmax,
- mask=mask,
- cmap_masked=cmap_gray,
- )
-
- ax[0].set_xlim(lims[:2])
- ax[0].set_ylim(lims[2:])
- ax[0].set_title("Scores")
- ax[1].set_title("Masked")
- tags = (
- "epochs",
- "contrast",
- "decoding",
- "csp",
- f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
- )
- title = f"CSP TF decoding: {cond_1} vs. {cond_2}"
- report.add_figure(
- fig=fig,
- title=title,
- section=section,
- caption=f"Found {n_clu} "
- f"cluster{_pl(n_clu)} with "
- f"p < {cfg.cluster_permutation_p_threshold} "
- f"(clustering bins with absolute t-values > "
- f"{round(cluster_t_threshold, 3)}).",
- tags=tags,
- replace=True,
+ mask = ~np.logical_or(*significant_clusters)
+ mask = mask.ravel()
+ else:
+ mask = np.ones(mean_crossval_scores.shape, dtype=bool)
+ _imshow_tf(
+ mean_crossval_scores,
+ ax[1],
+ tmin=tmin,
+ tmax=tmax,
+ fmin=fmin,
+ fmax=fmax,
+ vmin=vmin,
+ vmax=vmax,
+ mask=mask,
+ cmap_masked=cmap_gray,
)
+ ax[0].set_xlim(lims[:2])
+ ax[0].set_ylim(lims[2:])
+ ax[0].set_title("Scores")
+ ax[1].set_title("Masked")
+ tags = (
+ "epochs",
+ "contrast",
+ "decoding",
+ "csp",
+ f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
+ )
+ title = f"CSP TF decoding: {cond_1} vs. {cond_2}"
+ report.add_figure(
+ fig=fig,
+ title=title,
+ section=section,
+ caption=f"Found {n_clu} "
+ f"cluster{_pl(n_clu)} with "
+ f"p < {cfg.cluster_permutation_p_threshold} "
+ f"(clustering bins with absolute t-values > "
+ f"{round(cluster_t_threshold, 3)}).",
+ tags=tags,
+ replace=True,
+ )
+
@contextlib.contextmanager
def _agg_backend():
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index fc6742581..5908e4b0c 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -4,7 +4,6 @@
import functools
import hashlib
import inspect
-import os
import pathlib
import pdb
import sys
@@ -20,7 +19,7 @@
from mne_bids import BIDSPath
from ._config_utils import get_task
-from ._logging import logger, gen_log_kwargs
+from ._logging import logger, gen_log_kwargs, _is_testing
def failsafe_run(
@@ -85,7 +84,7 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
if on_error == "abort":
message += f"\n\nAborting pipeline run. The traceback is:\n\n{tb}"
- if os.getenv("_MNE_BIDS_STUDY_TESTING", "") == "true":
+ if _is_testing():
raise
logger.error(
**gen_log_kwargs(message=message, **kwargs_copy, emoji="❌")
@@ -357,8 +356,10 @@ def _get_step_path(
if "steps" in fname.parts:
return fname
else: # pragma: no cover
- if frame.function == "__mne_bids_pipeline_failsafe_wrapper__":
+ try:
return frame.frame.f_locals["__mne_bids_pipeline_step__"]
+ except KeyError:
+ pass
else: # pragma: no cover
paths = "\n".join(paths)
raise RuntimeError(f"Could not find step path in call stack:\n{paths}")
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
index effc99f68..f36ce8e11 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
@@ -151,11 +151,13 @@ def apply_ica(
assert len(in_files) == 0, in_files.keys()
# Report
+ kwargs = dict()
if ica.exclude:
msg = "Adding ICA to report."
else:
msg = "Skipping ICA addition to report, no components marked as bad."
- logger.info(**gen_log_kwargs(message=msg))
+ kwargs["emoji"] = "skip"
+ logger.info(**gen_log_kwargs(message=msg, **kwargs))
if ica.exclude:
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index 00c6c64ef..a0f7d1e3e 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -12,10 +12,11 @@
get_all_contrasts,
_bids_kwargs,
_restrict_analyze_channels,
+ _pl,
)
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _sanitize_cond_tag
+from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
@@ -98,10 +99,17 @@ def run_evoked(
# Report
if evokeds:
- msg = f"Adding {len(evokeds)} evoked signals and contrasts to the " f"report."
+ n_contrasts = len(cfg.contrasts)
+ n_signals = len(evokeds) - n_contrasts
+ msg = (
+ f"Adding {n_signals} evoked response{_pl(n_signals)} and "
+ f"{n_contrasts} contrast{_pl(n_contrasts)} to the report."
+ )
else:
msg = "No evoked conditions or contrasts found."
logger.info(**gen_log_kwargs(message=msg))
+ all_conditions = _all_conditions(cfg=cfg)
+ assert list(all_conditions) == list(all_evoked) # otherwise we have a bug
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
) as report:
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 8fdd863fe..287ace7bc 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -237,12 +237,12 @@ def main(*, config: SimpleNamespace) -> None:
"""Run time-by-time decoding."""
if not config.contrasts:
msg = "No contrasts specified; not performing decoding."
- logger.info(**gen_log_kwargs(message=msg))
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
if not config.decode:
msg = "No decoding requested by user."
- logger.info(**gen_log_kwargs(message=msg))
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
with get_parallel_backend(config.exec_params):
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index 867b48030..f78e1d0cf 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -314,12 +314,12 @@ def main(*, config: SimpleNamespace) -> None:
"""Run time-by-time decoding."""
if not config.contrasts:
msg = "No contrasts specified; not performing decoding."
- logger.info(**gen_log_kwargs(message=msg))
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
if not config.decode:
msg = "No decoding requested by user."
- logger.info(**gen_log_kwargs(message=msg))
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
# Here we go parallel inside the :class:`mne.decoding.SlidingEstimator`
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index f5b7c3381..0e7b9b720 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -69,11 +69,12 @@ def run_time_frequency(
) -> dict:
import matplotlib.pyplot as plt
- msg = f'Input: {in_files["epochs"].basename}'
+ epochs_path = in_files.pop("epochs")
+ msg = f"Reading {epochs_path.basename}"
logger.info(**gen_log_kwargs(message=msg))
- bids_path = in_files["epochs"].copy().update(processing=None)
-
- epochs = mne.read_epochs(in_files.pop("epochs"))
+ epochs = mne.read_epochs(epochs_path)
+ bids_path = epochs_path.copy().update(processing=None)
+ del epochs_path
_restrict_analyze_channels(epochs, cfg)
if cfg.time_frequency_subtract_evoked:
@@ -87,6 +88,7 @@ def run_time_frequency(
out_files = dict()
for condition in cfg.time_frequency_conditions:
+ logger.info(**gen_log_kwargs(message=f"Computing TFR for {condition}"))
this_epochs = epochs[condition]
power, itc = mne.time_frequency.tfr_morlet(
this_epochs, freqs=freqs, return_itc=True, n_cycles=time_frequency_cycles
@@ -182,7 +184,7 @@ def main(*, config: SimpleNamespace) -> None:
"""Run Time-frequency decomposition."""
if not config.time_frequency_conditions:
msg = "Skipping …"
- logger.info(**gen_log_kwargs(message=msg))
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
parallel, run_func = parallel_func(
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index c32f16cdf..27323d680 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -16,7 +16,7 @@
_bids_kwargs,
)
from ..._config_import import _import_config
-from ..._config_utils import _restrict_analyze_channels, get_all_contrasts
+from ..._config_utils import _restrict_analyze_channels
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
@@ -258,10 +258,8 @@ def run_covariance(
for evoked, condition in zip(all_evoked, conditions):
_restrict_analyze_channels(evoked, cfg)
tags = ("evoked", "covariance", _sanitize_cond_tag(condition))
- if condition in cfg.conditions:
- title = f"Whitening: {condition}"
- else: # It's a contrast of two conditions.
- title = f"Whitening: {condition}"
+ title = f"Whitening: {condition}"
+ if condition not in cfg.conditions:
tags = tags + ("contrast",)
fig = evoked.plot_white(cov, verbose="error")
report.add_figure(
@@ -287,7 +285,7 @@ def get_config(
run_source_estimation=config.run_source_estimation,
noise_cov=_sanitize_callable(config.noise_cov),
conditions=config.conditions,
- all_contrasts=get_all_contrasts(config),
+ contrasts=config.contrasts,
analyze_channels=config.analyze_channels,
**_bids_kwargs(config=config),
)
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index b126eecec..008e8b7dd 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -5,7 +5,7 @@
import os
import os.path as op
-from collections import defaultdict
+from functools import partial
from typing import Optional, TypedDict, List, Tuple
from types import SimpleNamespace
@@ -21,27 +21,37 @@
get_subjects,
get_eeg_reference,
get_decoding_contrasts,
- get_all_contrasts,
_bids_kwargs,
+ _restrict_analyze_channels,
+ _pl,
)
from ..._decoding import _handle_csp_args
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._run import failsafe_run, save_logs
-from ..._report import run_report_average_sensor
+from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
+from ..._report import (
+ _open_report,
+ _sanitize_cond_tag,
+ add_event_counts,
+ add_csp_grand_average,
+ _plot_full_epochs_decoding_scores,
+ _plot_time_by_time_decoding_scores_gavg,
+ plot_time_by_time_decoding_t_values,
+ _plot_decoding_time_generalization,
+ _contrasts_to_names,
+ _all_conditions,
+)
-def average_evokeds(
+def get_input_fnames_average_evokeds(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
-) -> List[mne.Evoked]:
- # Container for all conditions:
- all_evokeds = defaultdict(list)
-
+ session: Optional[dict],
+) -> dict:
+ in_files = dict()
for this_subject in cfg.subjects:
- fname_in = BIDSPath(
+ in_files[f"evoked-{this_subject}"] = BIDSPath(
subject=this_subject,
session=session,
task=cfg.task,
@@ -55,22 +65,43 @@ def average_evokeds(
root=cfg.deriv_root,
check=False,
)
+ return in_files
- msg = f"Input: {fname_in.basename}"
- logger.info(**gen_log_kwargs(message=msg))
- evokeds = mne.read_evokeds(fname_in)
- for idx, evoked in enumerate(evokeds):
- all_evokeds[idx].append(evoked) # Insert into the container
+@failsafe_run(
+ get_input_fnames=get_input_fnames_average_evokeds,
+)
+def average_evokeds(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ in_files: dict,
+) -> dict:
+ logger.info(**gen_log_kwargs(message="Creating grand averages"))
+ # Container for all conditions:
+ conditions = _all_conditions(cfg=cfg)
+ evokeds = [list() for _ in range(len(conditions))]
- for idx, evokeds in all_evokeds.items():
- all_evokeds[idx] = mne.grand_average(
- evokeds, interpolate_bads=cfg.interpolate_bads_grand_average
+ keys = list(in_files)
+ for key in keys:
+ if not key.startswith("evoked-"):
+ continue
+ fname_in = in_files.pop(key)
+ these_evokeds = mne.read_evokeds(fname_in)
+ for idx, evoked in enumerate(these_evokeds):
+ evokeds[idx].append(evoked) # Insert into the container
+
+ for idx, these_evokeds in enumerate(evokeds):
+ evokeds[idx] = mne.grand_average(
+ these_evokeds, interpolate_bads=cfg.interpolate_bads_grand_average
) # Combine subjects
# Keep condition in comment
- all_evokeds[idx].comment = "Grand average: " + evokeds[0].comment
+ evokeds[idx].comment = "Grand average: " + these_evokeds[0].comment
- fname_out = BIDSPath(
+ out_files = dict()
+ fname_out = out_files["evokeds"] = BIDSPath(
subject=subject,
session=session,
task=cfg.task,
@@ -91,8 +122,56 @@ def average_evokeds(
msg = f"Saving grand-averaged evoked sensor data: {fname_out.basename}"
logger.info(**gen_log_kwargs(message=msg))
- mne.write_evokeds(fname_out, list(all_evokeds.values()), overwrite=True)
- return list(all_evokeds.values())
+ mne.write_evokeds(fname_out, evokeds, overwrite=True)
+ if exec_params.interactive:
+ for evoked in evokeds:
+ evoked.plot()
+
+ # Reporting
+ evokeds = [_restrict_analyze_channels(evoked, cfg) for evoked in evokeds]
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ # Add event stats.
+ add_event_counts(
+ cfg=cfg,
+ report=report,
+ subject=subject,
+ session=session,
+ )
+
+ # Evoked responses
+ if evokeds:
+ n_contrasts = len(cfg.contrasts)
+ n_signals = len(evokeds) - n_contrasts
+ msg = (
+ f"Adding {n_signals} evoked response{_pl(n_signals)} and "
+ f"{n_contrasts} contrast{_pl(n_contrasts)} to the report."
+ )
+ else:
+ msg = "No evoked conditions or contrasts found."
+ logger.info(**gen_log_kwargs(message=msg))
+ for condition, evoked in zip(conditions, evokeds):
+ tags = ("evoked", _sanitize_cond_tag(condition))
+ if condition in cfg.conditions:
+ title = f"Average (sensor): {condition}"
+ else: # It's a contrast of two conditions.
+ title = f"Average (sensor) contrast: {condition}"
+ tags = tags + ("contrast",)
+
+ report.add_evokeds(
+ evokeds=evoked,
+ titles=title,
+ projs=False,
+ tags=tags,
+ n_time_points=cfg.report_evoked_n_time_points,
+ # captions=evoked.comment, # TODO upstream
+ replace=True,
+ n_jobs=1, # don't auto parallelize
+ )
+
+ assert len(in_files) == 0, list(in_files)
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
class ClusterAcrossTime(TypedDict):
@@ -119,7 +198,7 @@ def _decoding_cluster_permutation_test(
out_type="mask",
tail=1, # one-sided: significantly above chance level
seed=random_seed,
- verbose=True,
+ verbose="error", # ignore No clusters found
)
n_permutations = H0.size - 1
@@ -134,10 +213,14 @@ def _decoding_cluster_permutation_test(
return t_vals, clusters, n_permutations
-def average_time_by_time_decoding(cfg: SimpleNamespace, session: str):
- # Get the time points from the very first subject. They are identical
- # across all subjects and conditions, so this should suffice.
- fname_epo = BIDSPath(
+def _get_epochs_in_files(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ in_files = dict()
+ in_files["epochs"] = BIDSPath(
subject=cfg.subjects[0],
session=session,
task=cfg.task,
@@ -151,276 +234,494 @@ def average_time_by_time_decoding(cfg: SimpleNamespace, session: str):
root=cfg.deriv_root,
check=False,
)
- epochs = mne.read_epochs(fname_epo)
+ _update_for_splits(in_files, "epochs", single=True)
+ return in_files
+
+
+def _decoding_out_fname(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ cond_1: str,
+ cond_2: str,
+ kind: str,
+ extension: str = ".mat",
+):
+ processing = (
+ f"{cond_1}+{cond_2}+{kind}+{cfg.decoding_metric}".replace(op.sep, "")
+ .replace("_", "-")
+ .replace("-", "")
+ )
+ return BIDSPath(
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ acquisition=cfg.acq,
+ run=None,
+ recording=cfg.rec,
+ space=cfg.space,
+ processing=processing,
+ suffix="decoding",
+ extension=extension,
+ datatype=cfg.datatype,
+ root=cfg.deriv_root,
+ check=False,
+ )
+
+
+def _get_input_fnames_decoding(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ cond_1: str,
+ cond_2: str,
+ kind: str,
+ extension: str = ".mat",
+) -> dict:
+ in_files = _get_epochs_in_files(cfg=cfg, subject=subject, session=session)
+ for this_subject in cfg.subjects:
+ in_files[f"scores-{this_subject}"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=this_subject,
+ session=session,
+ cond_1=cond_1,
+ cond_2=cond_2,
+ kind=kind,
+ extension=extension,
+ )
+ return in_files
+
+
+@failsafe_run(
+ get_input_fnames=partial(
+ _get_input_fnames_decoding,
+ kind="TimeByTime",
+ ),
+)
+def average_time_by_time_decoding(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ cond_1: str,
+ cond_2: str,
+ in_files: dict,
+) -> dict:
+ logger.info(**gen_log_kwargs(message="Averaging time-by-time decoding results"))
+ # Get the time points from the very first subject. They are identical
+ # across all subjects and conditions, so this should suffice.
+ epochs = mne.read_epochs(in_files.pop("epochs"), preload=False)
dtg_decim = cfg.decoding_time_generalization_decim
if cfg.decoding_time_generalization and dtg_decim > 1:
epochs.decimate(dtg_decim, verbose="error")
times = epochs.times
- subjects = cfg.subjects
- del epochs, fname_epo
+ del epochs
+
+ if cfg.decoding_time_generalization:
+ time_points_shape = (len(times), len(times))
+ else:
+ time_points_shape = (len(times),)
+
+ n_subjects = len(cfg.subjects)
+ contrast_score_stats = {
+ "cond_1": cond_1,
+ "cond_2": cond_2,
+ "times": times,
+ "N": n_subjects,
+ "decim": dtg_decim,
+ "mean": np.empty(time_points_shape),
+ "mean_min": np.empty(time_points_shape),
+ "mean_max": np.empty(time_points_shape),
+ "mean_se": np.empty(time_points_shape),
+ "mean_ci_lower": np.empty(time_points_shape),
+ "mean_ci_upper": np.empty(time_points_shape),
+ "cluster_all_times": np.array([]),
+ "cluster_all_t_values": np.array([]),
+ "cluster_t_threshold": np.nan,
+ "cluster_n_permutations": np.nan,
+ "clusters": list(),
+ }
+
+ # Extract mean CV scores from all subjects.
+ mean_scores = np.empty((n_subjects, *time_points_shape))
+
+ # Remaining in_files are all decoding data
+ assert len(in_files) == n_subjects, list(in_files.keys())
+ for sub_idx, key in enumerate(list(in_files)):
+ decoding_data = loadmat(in_files.pop(key))
+ mean_scores[sub_idx, :] = decoding_data["scores"].mean(axis=0)
+
+ # Cluster permutation test.
+ # We can only permute for two or more subjects
+ #
+ # If we've performed time generalization, we will only use the diagonal
+ # CV scores here (classifiers trained and tested at the same time
+ # points).
+
+ if n_subjects > 1:
+ # Constrain cluster permutation test to time points of the
+ # time-locked event or later.
+ # We subtract the chance level from the scores as we'll be
+ # performing a 1-sample test (i.e., test against 0)!
+ idx = np.where(times >= 0)[0]
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
if cfg.decoding_time_generalization:
- time_points_shape = (len(times), len(times))
+ cluster_permutation_scores = mean_scores[:, idx, idx] - 0.5
else:
- time_points_shape = (len(times),)
-
- contrast_score_stats = {
- "cond_1": cond_1,
- "cond_2": cond_2,
- "times": times,
- "N": len(subjects),
- "decim": dtg_decim,
- "mean": np.empty(time_points_shape),
- "mean_min": np.empty(time_points_shape),
- "mean_max": np.empty(time_points_shape),
- "mean_se": np.empty(time_points_shape),
- "mean_ci_lower": np.empty(time_points_shape),
- "mean_ci_upper": np.empty(time_points_shape),
- "cluster_all_times": np.array([]),
- "cluster_all_t_values": np.array([]),
- "cluster_t_threshold": np.nan,
- "cluster_n_permutations": np.nan,
- "clusters": list(),
- }
+ cluster_permutation_scores = mean_scores[:, idx] - 0.5
+
+ cluster_permutation_times = times[idx]
+ if cfg.cluster_forming_t_threshold is None:
+ import scipy.stats
+
+ cluster_forming_t_threshold = scipy.stats.t.ppf(
+ 1 - 0.05, len(cluster_permutation_scores) - 1
+ )
+ else:
+ cluster_forming_t_threshold = cfg.cluster_forming_t_threshold
- processing = (
- f"{cond_1}+{cond_2}+TimeByTime+{cfg.decoding_metric}".replace(op.sep, "")
- .replace("_", "-")
- .replace("-", "")
+ t_vals, clusters, n_perm = _decoding_cluster_permutation_test(
+ scores=cluster_permutation_scores,
+ times=cluster_permutation_times,
+ cluster_forming_t_threshold=cluster_forming_t_threshold,
+ n_permutations=cfg.cluster_n_permutations,
+ random_seed=cfg.random_state,
)
- # Extract mean CV scores from all subjects.
- mean_scores = np.empty((len(subjects), *time_points_shape))
+ contrast_score_stats.update(
+ {
+ "cluster_all_times": cluster_permutation_times,
+ "cluster_all_t_values": t_vals,
+ "cluster_t_threshold": cluster_forming_t_threshold,
+ "clusters": clusters,
+ "cluster_n_permutations": n_perm,
+ }
+ )
- for sub_idx, subject in enumerate(subjects):
- fname_mat = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- processing=processing,
- suffix="decoding",
- extension=".mat",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
+ del cluster_permutation_scores, cluster_permutation_times, n_perm
+
+ # Now we can calculate some descriptive statistics on the mean scores.
+ # We use the [:] here as a safeguard to ensure we don't mess up the
+ # dimensions.
+ #
+ # For time generalization, all values (each time point vs each other)
+ # are considered.
+ contrast_score_stats["mean"][:] = mean_scores.mean(axis=0)
+ contrast_score_stats["mean_min"][:] = mean_scores.min(axis=0)
+ contrast_score_stats["mean_max"][:] = mean_scores.max(axis=0)
+
+ # Finally, for each time point, bootstrap the mean, and calculate the
+ # SD of the bootstrapped distribution: this is the standard error of
+ # the mean. We also derive 95% confidence intervals.
+ rng = np.random.default_rng(seed=cfg.random_state)
+ for time_idx in range(len(times)):
+ if cfg.decoding_time_generalization:
+ data = mean_scores[:, time_idx, time_idx]
+ else:
+ data = mean_scores[:, time_idx]
+ scores_resampled = rng.choice(data, size=(cfg.n_boot, n_subjects), replace=True)
+ bootstrapped_means = scores_resampled.mean(axis=1)
- decoding_data = loadmat(fname_mat)
- mean_scores[sub_idx, :] = decoding_data["scores"].mean(axis=0)
-
- # Cluster permutation test.
- # We can only permute for two or more subjects
- #
- # If we've performed time generalization, we will only use the diagonal
- # CV scores here (classifiers trained and tested at the same time
- # points).
-
- if len(subjects) > 1:
- # Constrain cluster permutation test to time points of the
- # time-locked event or later.
- # We subtract the chance level from the scores as we'll be
- # performing a 1-sample test (i.e., test against 0)!
- idx = np.where(times >= 0)[0]
-
- if cfg.decoding_time_generalization:
- cluster_permutation_scores = mean_scores[:, idx, idx] - 0.5
- else:
- cluster_permutation_scores = mean_scores[:, idx] - 0.5
-
- cluster_permutation_times = times[idx]
- if cfg.cluster_forming_t_threshold is None:
- import scipy.stats
-
- cluster_forming_t_threshold = scipy.stats.t.ppf(
- 1 - 0.05, len(cluster_permutation_scores) - 1
- )
- else:
- cluster_forming_t_threshold = cfg.cluster_forming_t_threshold
+ # SD of the bootstrapped distribution == SE of the metric.
+ se = bootstrapped_means.std(ddof=1)
+ ci_lower = np.quantile(bootstrapped_means, q=0.025)
+ ci_upper = np.quantile(bootstrapped_means, q=0.975)
- t_vals, clusters, n_perm = _decoding_cluster_permutation_test(
- scores=cluster_permutation_scores,
- times=cluster_permutation_times,
- cluster_forming_t_threshold=cluster_forming_t_threshold,
- n_permutations=cfg.cluster_n_permutations,
- random_seed=cfg.random_state,
+ contrast_score_stats["mean_se"][time_idx] = se
+ contrast_score_stats["mean_ci_lower"][time_idx] = ci_lower
+ contrast_score_stats["mean_ci_upper"][time_idx] = ci_upper
+
+ del bootstrapped_means, se, ci_lower, ci_upper
+
+ out_files = dict()
+ out_files["mat"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=cond_1,
+ cond_2=cond_2,
+ kind="TimeByTime",
+ )
+ savemat(out_files["mat"], contrast_score_stats)
+
+ section = "Decoding: time-by-time"
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ logger.info(**gen_log_kwargs(message="Adding time-by-time decoding results"))
+ import matplotlib.pyplot as plt
+
+ tags = (
+ "epochs",
+ "contrast",
+ "decoding",
+ f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}",
+ )
+ decoding_data = loadmat(out_files["mat"])
+
+ # Plot scores
+ fig = _plot_time_by_time_decoding_scores_gavg(
+ cfg=cfg,
+ decoding_data=decoding_data,
+ )
+ caption = (
+ f'Based on N={decoding_data["N"].squeeze()} '
+ f"subjects. Standard error and confidence interval "
+ f"of the mean were bootstrapped with {cfg.n_boot} "
+ f"resamples. CI must not be used for statistical inference here, "
+ f"as it is not corrected for multiple testing."
+ )
+ if len(get_subjects(cfg)) > 1:
+ caption += (
+ f" Time periods with decoding performance significantly above "
+ f"chance, if any, were derived with a one-tailed "
+ f"cluster-based permutation test "
+ f'({decoding_data["cluster_n_permutations"].squeeze()} '
+ f"permutations) and are highlighted in yellow."
+ )
+ title = f"Decoding over time: {cond_1} vs. {cond_2}"
+ report.add_figure(
+ fig=fig,
+ title=title,
+ caption=caption,
+ section=section,
+ tags=tags,
+ replace=True,
+ )
+ plt.close(fig)
+
+ # Plot t-values used to form clusters
+ if len(get_subjects(cfg)) > 1:
+ fig = plot_time_by_time_decoding_t_values(decoding_data=decoding_data)
+ t_threshold = np.round(decoding_data["cluster_t_threshold"], 3).item()
+ caption = (
+ f"Observed t-values. Time points with "
+ f"t-values > {t_threshold} were used to form clusters."
+ )
+ report.add_figure(
+ fig=fig,
+ title=f"t-values across time: {cond_1} vs. {cond_2}",
+ caption=caption,
+ section=section,
+ tags=tags,
+ replace=True,
)
+ plt.close(fig)
- contrast_score_stats.update(
- {
- "cluster_all_times": cluster_permutation_times,
- "cluster_all_t_values": t_vals,
- "cluster_t_threshold": cluster_forming_t_threshold,
- "clusters": clusters,
- "cluster_n_permutations": n_perm,
- }
+ if cfg.decoding_time_generalization:
+ fig = _plot_decoding_time_generalization(
+ decoding_data=decoding_data,
+ metric=cfg.decoding_metric,
+ kind="grand-average",
+ )
+ caption = (
+ f"Time generalization (generalization across time, GAT): "
+ f"each classifier is trained on each time point, and tested "
+ f"on all other time points. The results were averaged across "
+ f'N={decoding_data["N"].item()} subjects.'
)
+ title = f"Time generalization: {cond_1} vs. {cond_2}"
+ report.add_figure(
+ fig=fig,
+ title=title,
+ caption=caption,
+ section=section,
+ tags=tags,
+ replace=True,
+ )
+ plt.close(fig)
- del cluster_permutation_scores, cluster_permutation_times, n_perm
+ return _prep_out_files(out_files=out_files, exec_params=exec_params)
- # Now we can calculate some descriptive statistics on the mean scores.
- # We use the [:] here as a safeguard to ensure we don't mess up the
- # dimensions.
- #
- # For time generalization, all values (each time point vs each other)
- # are considered.
- contrast_score_stats["mean"][:] = mean_scores.mean(axis=0)
- contrast_score_stats["mean_min"][:] = mean_scores.min(axis=0)
- contrast_score_stats["mean_max"][:] = mean_scores.max(axis=0)
- # Finally, for each time point, bootstrap the mean, and calculate the
- # SD of the bootstrapped distribution: this is the standard error of
- # the mean. We also derive 95% confidence intervals.
- rng = np.random.default_rng(seed=cfg.random_state)
- for time_idx in range(len(times)):
- if cfg.decoding_time_generalization:
- data = mean_scores[:, time_idx, time_idx]
- else:
- data = mean_scores[:, time_idx]
- scores_resampled = rng.choice(
- data, size=(cfg.n_boot, len(subjects)), replace=True
- )
- bootstrapped_means = scores_resampled.mean(axis=1)
+@failsafe_run(
+ get_input_fnames=partial(
+ _get_input_fnames_decoding,
+ kind="FullEpochs",
+ ),
+)
+def average_full_epochs_decoding(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ cond_1: str,
+ cond_2: str,
+ in_files: dict,
+) -> dict:
+ n_subjects = len(cfg.subjects)
+ in_files.pop("epochs") # not used but okay to include
+
+ contrast_score_stats = {
+ "cond_1": cond_1,
+ "cond_2": cond_2,
+ "N": n_subjects,
+ "subjects": cfg.subjects,
+ "scores": np.nan,
+ "mean": np.nan,
+ "mean_min": np.nan,
+ "mean_max": np.nan,
+ "mean_se": np.nan,
+ "mean_ci_lower": np.nan,
+ "mean_ci_upper": np.nan,
+ }
- # SD of the bootstrapped distribution == SE of the metric.
- se = bootstrapped_means.std(ddof=1)
- ci_lower = np.quantile(bootstrapped_means, q=0.025)
- ci_upper = np.quantile(bootstrapped_means, q=0.975)
-
- contrast_score_stats["mean_se"][time_idx] = se
- contrast_score_stats["mean_ci_lower"][time_idx] = ci_lower
- contrast_score_stats["mean_ci_upper"][time_idx] = ci_upper
-
- del bootstrapped_means, se, ci_lower, ci_upper
-
- fname_out = fname_mat.copy().update(subject="average")
- savemat(fname_out, contrast_score_stats)
- del contrast_score_stats, fname_out
-
-
-def average_full_epochs_decoding(cfg: SimpleNamespace, session: str):
- for contrast in cfg.decoding_contrasts:
- cond_1, cond_2 = contrast
- n_subjects = len(cfg.subjects)
-
- contrast_score_stats = {
- "cond_1": cond_1,
- "cond_2": cond_2,
- "N": n_subjects,
- "subjects": cfg.subjects,
- "scores": np.nan,
- "mean": np.nan,
- "mean_min": np.nan,
- "mean_max": np.nan,
- "mean_se": np.nan,
- "mean_ci_lower": np.nan,
- "mean_ci_upper": np.nan,
- }
+ # Extract mean CV scores from all subjects.
+ mean_scores = np.empty(n_subjects)
+ for sub_idx, key in enumerate(list(in_files)):
+ decoding_data = loadmat(in_files.pop(key))
+ mean_scores[sub_idx] = decoding_data["scores"].mean()
+
+ # Now we can calculate some descriptive statistics on the mean scores.
+ # We use the [:] here as a safeguard to ensure we don't mess up the
+ # dimensions.
+ contrast_score_stats["scores"] = mean_scores
+ contrast_score_stats["mean"] = mean_scores.mean()
+ contrast_score_stats["mean_min"] = mean_scores.min()
+ contrast_score_stats["mean_max"] = mean_scores.max()
+
+ # Finally, bootstrap the mean, and calculate the
+ # SD of the bootstrapped distribution: this is the standard error of
+ # the mean. We also derive 95% confidence intervals.
+ rng = np.random.default_rng(seed=cfg.random_state)
+ scores_resampled = rng.choice(
+ mean_scores, size=(cfg.n_boot, n_subjects), replace=True
+ )
+ bootstrapped_means = scores_resampled.mean(axis=1)
- processing = (
- f"{cond_1}+{cond_2}+FullEpochs+{cfg.decoding_metric}".replace(op.sep, "")
- .replace("_", "-")
- .replace("-", "")
- )
+ # SD of the bootstrapped distribution == SE of the metric.
+ se = bootstrapped_means.std(ddof=1)
+ ci_lower = np.quantile(bootstrapped_means, q=0.025)
+ ci_upper = np.quantile(bootstrapped_means, q=0.975)
- # Extract mean CV scores from all subjects.
- mean_scores = np.empty(n_subjects)
- for sub_idx, subject in enumerate(cfg.subjects):
- fname_mat = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- processing=processing,
- suffix="decoding",
- extension=".mat",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
+ contrast_score_stats["mean_se"] = se
+ contrast_score_stats["mean_ci_lower"] = ci_lower
+ contrast_score_stats["mean_ci_upper"] = ci_upper
- decoding_data = loadmat(fname_mat)
- mean_scores[sub_idx] = decoding_data["scores"].mean()
+ del bootstrapped_means, se, ci_lower, ci_upper
- # Now we can calculate some descriptive statistics on the mean scores.
- # We use the [:] here as a safeguard to ensure we don't mess up the
- # dimensions.
- contrast_score_stats["scores"] = mean_scores
- contrast_score_stats["mean"] = mean_scores.mean()
- contrast_score_stats["mean_min"] = mean_scores.min()
- contrast_score_stats["mean_max"] = mean_scores.max()
+ out_files = dict()
+ fname_out = out_files["mat"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=cond_1,
+ cond_2=cond_2,
+ kind="FullEpochs",
+ )
+ if not fname_out.fpath.parent.exists():
+ os.makedirs(fname_out.fpath.parent)
+ savemat(fname_out, contrast_score_stats)
+ return _prep_out_files(out_files=out_files, exec_params=exec_params)
- # Finally, bootstrap the mean, and calculate the
- # SD of the bootstrapped distribution: this is the standard error of
- # the mean. We also derive 95% confidence intervals.
- rng = np.random.default_rng(seed=cfg.random_state)
- scores_resampled = rng.choice(
- mean_scores, size=(cfg.n_boot, n_subjects), replace=True
- )
- bootstrapped_means = scores_resampled.mean(axis=1)
- # SD of the bootstrapped distribution == SE of the metric.
- se = bootstrapped_means.std(ddof=1)
- ci_lower = np.quantile(bootstrapped_means, q=0.025)
- ci_upper = np.quantile(bootstrapped_means, q=0.975)
+def get_input_files_average_full_epochs_report(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ decoding_contrasts: List[List[str]],
+) -> dict:
+ in_files = dict()
+ for contrast in decoding_contrasts:
+ in_files[f"decoding-full-epochs-{contrast}"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=contrast[0],
+ cond_2=contrast[1],
+ kind="FullEpochs",
+ )
+ return in_files
- contrast_score_stats["mean_se"] = se
- contrast_score_stats["mean_ci_lower"] = ci_lower
- contrast_score_stats["mean_ci_upper"] = ci_upper
- del bootstrapped_means, se, ci_lower, ci_upper
+@failsafe_run(
+ get_input_fnames=get_input_files_average_full_epochs_report,
+)
+def average_full_epochs_report(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ decoding_contrasts: List[List[str]],
+ in_files: dict,
+) -> dict:
+ """Add decoding results to the grand average report."""
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ import matplotlib.pyplot as plt # nested import to help joblib
+
+ logger.info(
+ **gen_log_kwargs(message="Adding full-epochs decoding results to report")
+ )
- fname_out = fname_mat.copy().update(subject="average")
- if not fname_out.fpath.parent.exists():
- os.makedirs(fname_out.fpath.parent)
- savemat(fname_out, contrast_score_stats)
- del contrast_score_stats, fname_out
+ # Full-epochs decoding
+ all_decoding_scores = []
+ for key in list(in_files):
+ if not key.startswith("decoding-full-epochs-"):
+ continue
+ decoding_data = loadmat(in_files.pop(key))
+ all_decoding_scores.append(np.atleast_1d(decoding_data["scores"].squeeze()))
+ del decoding_data
+
+ fig, caption = _plot_full_epochs_decoding_scores(
+ contrast_names=_contrasts_to_names(decoding_contrasts),
+ scores=all_decoding_scores,
+ metric=cfg.decoding_metric,
+ kind="grand-average",
+ )
+ report.add_figure(
+ fig=fig,
+ title="Full-epochs decoding",
+ section="Decoding: full-epochs",
+ caption=caption,
+ tags=(
+ "epochs",
+ "contrast",
+ "decoding",
+ *[
+ f"{_sanitize_cond_tag(cond_1)}–{_sanitize_cond_tag(cond_2)}"
+ for cond_1, cond_2 in cfg.decoding_contrasts
+ ],
+ ),
+ replace=True,
+ )
+ # close figure to save memory
+ plt.close(fig)
+ return _prep_out_files(exec_params=exec_params, out_files=dict())
+@failsafe_run(
+ get_input_fnames=partial(
+ _get_input_fnames_decoding,
+ kind="CSP",
+ extension=".xlsx",
+ ),
+)
def average_csp_decoding(
+ *,
cfg: SimpleNamespace,
- session: str,
+ exec_params: SimpleNamespace,
subject: str,
- condition_1: str,
- condition_2: str,
+ session: Optional[str],
+ cond_1: str,
+ cond_2: str,
+ in_files: dict,
):
- msg = f"Summarizing CSP results: {condition_1} - {condition_2}."
+ msg = f"Summarizing CSP results: {cond_1} - {cond_2}."
logger.info(**gen_log_kwargs(message=msg))
-
- # Extract mean CV scores from all subjects.
- a_vs_b = f"{condition_1}+{condition_2}".replace(op.sep, "")
- processing = f"{a_vs_b}+CSP+{cfg.decoding_metric}"
- processing = processing.replace("_", "-").replace("-", "")
+ in_files.pop("epochs")
all_decoding_data_freq = []
all_decoding_data_time_freq = []
-
- # First load the data.
- fname_out = BIDSPath(
- subject="average",
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- recording=cfg.rec,
- space=cfg.space,
- processing=processing,
- suffix="decoding",
- extension=".xlsx",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
- for subject in cfg.subjects:
- fname_xlsx = fname_out.copy().update(subject=subject)
+ for key in list(in_files):
+ fname_xlsx = in_files.pop(key)
decoding_data_freq = pd.read_excel(
fname_xlsx,
sheet_name="CSP Frequency",
@@ -438,14 +739,28 @@ def average_csp_decoding(
# Now calculate descriptes and bootstrap CIs.
grand_average_freq = _average_csp_time_freq(
cfg=cfg,
+ subject=subject,
+ session=session,
data=all_decoding_data_freq,
)
grand_average_time_freq = _average_csp_time_freq(
cfg=cfg,
+ subject=subject,
+ session=session,
data=all_decoding_data_time_freq,
)
- with pd.ExcelWriter(fname_out) as w:
+ out_files = dict()
+ out_files["freq"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=cond_1,
+ cond_2=cond_2,
+ kind="CSP",
+ extension=".xlsx",
+ )
+ with pd.ExcelWriter(out_files["freq"]) as w:
grand_average_freq.to_excel(w, sheet_name="CSP Frequency", index=False)
grand_average_time_freq.to_excel(
w, sheet_name="CSP Time-Frequency", index=False
@@ -476,9 +791,9 @@ def average_csp_decoding(
["subject", "freq_range_name", "t_min", "t_max"]
)
- for (subject, freq_range_name, t_min, t_max), df in g:
+ for (subject_, freq_range_name, t_min, t_max), df in g:
scores = df["mean_crossval_score"]
- sub_idx = subjects.index(subject)
+ sub_idx = subjects.index(subject_)
time_bin_idx = time_bins.loc[
(np.isclose(time_bins["t_min"], t_min))
& (np.isclose(time_bins["t_max"], t_max)),
@@ -499,20 +814,24 @@ def average_csp_decoding(
cluster_permutation_results = {}
for freq_range_name, X in data_for_clustering.items():
- (
- t_vals,
- all_clusters,
- cluster_p_vals,
- H0,
- ) = mne.stats.permutation_cluster_1samp_test( # noqa: E501
- X=X - 0.5, # One-sample test against zero.
- threshold=cluster_forming_t_threshold,
- n_permutations=cfg.cluster_n_permutations,
- adjacency=None, # each time & freq bin connected to its neighbors
- out_type="mask",
- tail=1, # one-sided: significantly above chance level
- seed=cfg.random_state,
- )
+ if len(X) < 2:
+ t_vals = np.full(X.shape[1:], np.nan)
+ H0 = all_clusters = cluster_p_vals = np.array([])
+ else:
+ (
+ t_vals,
+ all_clusters,
+ cluster_p_vals,
+ H0,
+ ) = mne.stats.permutation_cluster_1samp_test( # noqa: E501
+ X=X - 0.5, # One-sample test against zero.
+ threshold=cluster_forming_t_threshold,
+ n_permutations=cfg.cluster_n_permutations,
+ adjacency=None, # each time & freq bin connected to its neighbors
+ out_type="mask",
+ tail=1, # one-sided: significantly above chance level
+ seed=cfg.random_state,
+ )
n_permutations = H0.size - 1
all_clusters = np.array(all_clusters) # preserve "empty" 0th dimension
cluster_permutation_results[freq_range_name] = {
@@ -526,20 +845,38 @@ def average_csp_decoding(
"freq_bin_edges": cfg.decoding_csp_freqs[freq_range_name],
}
- fname_out.update(extension=".mat")
- savemat(file_name=fname_out, mdict=cluster_permutation_results)
+ out_files["cluster"] = out_files["freq"].copy().update(extension=".mat")
+ savemat(file_name=out_files["cluster"], mdict=cluster_permutation_results)
+
+ assert subject == "average"
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ add_csp_grand_average(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ report=report,
+ cond_1=cond_1,
+ cond_2=cond_2,
+ fname_csp_freq_results=out_files["freq"],
+ fname_csp_cluster_results=out_files["cluster"],
+ )
+ return _prep_out_files(out_files=out_files, exec_params=exec_params)
def _average_csp_time_freq(
*,
cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
data: pd.DataFrame,
) -> pd.DataFrame:
# Prepare a dataframe for storing the results.
grand_average = data[0].copy()
del grand_average["mean_crossval_score"]
- grand_average["subject"] = "average"
+ grand_average["subject"] = subject
grand_average["mean"] = np.nan
grand_average["mean_se"] = np.nan
grand_average["mean_ci_lower"] = np.nan
@@ -567,7 +904,8 @@ def _average_csp_time_freq(
bootstrapped_means = scores_resampled.mean(axis=1)
# SD of the bootstrapped distribution == SE of the metric.
- se = bootstrapped_means.std(ddof=1)
+ with np.errstate(over="raise"):
+ se = bootstrapped_means.std(ddof=1)
ci_lower = np.quantile(bootstrapped_means, q=0.025)
ci_upper = np.quantile(bootstrapped_means, q=0.975)
@@ -598,7 +936,7 @@ def get_config(
subjects=get_subjects(config),
task_is_rest=config.task_is_rest,
conditions=config.conditions,
- contrasts=get_all_contrasts(config),
+ contrasts=config.contrasts,
decode=config.decode,
decoding_metric=config.decoding_metric,
decoding_n_splits=config.decoding_n_splits,
@@ -618,7 +956,6 @@ def get_config(
eeg_reference=get_eeg_reference(config),
sessions=get_sessions(config),
exclude_subjects=config.exclude_subjects,
- all_contrasts=get_all_contrasts(config),
report_evoked_n_time_points=config.report_evoked_n_time_points,
cluster_permutation_p_threshold=config.cluster_permutation_p_threshold,
# TODO: needed because get_datatype gets called again...
@@ -628,67 +965,93 @@ def get_config(
return cfg
-@failsafe_run()
-def run_group_average_sensor(
- *,
- cfg: SimpleNamespace,
- exec_params: SimpleNamespace,
- subject: str,
-) -> None:
- if cfg.task_is_rest:
+def main(*, config: SimpleNamespace) -> None:
+ if config.task_is_rest:
msg = ' … skipping: for "rest" task.'
logger.info(**gen_log_kwargs(message=msg))
return
-
- sessions = get_sessions(cfg)
- if not sessions:
- sessions = [None]
-
+ cfg = get_config(
+ config=config,
+ )
+ exec_params = config.exec_params
+ subject = "average"
+ sessions = get_sessions(config=config)
+ if cfg.decode or cfg.decoding_csp:
+ decoding_contrasts = get_decoding_contrasts(config=cfg)
+ else:
+ decoding_contrasts = []
+ logs = list()
with get_parallel_backend(exec_params):
- for session in sessions:
- evokeds = average_evokeds(
+ # 1. Evoked data
+ logs += [
+ average_evokeds(
cfg=cfg,
+ exec_params=exec_params,
subject=subject,
session=session,
)
- if exec_params.interactive:
- for evoked in evokeds:
- evoked.plot()
-
- if cfg.decode:
- average_full_epochs_decoding(cfg, session)
- average_time_by_time_decoding(cfg, session)
- if cfg.decoding_csp:
+ for session in sessions
+ ]
+
+ # 2. Time decoding
+ if cfg.decode and decoding_contrasts:
+ # Full epochs (single report function plots across all contrasts
+ # so it's a separate cached step)
+ logs += [
+ average_full_epochs_decoding(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=contrast[0],
+ cond_2=contrast[1],
+ exec_params=exec_params,
+ )
+ for session in sessions
+ for contrast in decoding_contrasts
+ ]
+ logs += [
+ average_full_epochs_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ decoding_contrasts=decoding_contrasts,
+ )
+ for session in sessions
+ ]
+ # Time-by-time
parallel, run_func = parallel_func(
- average_csp_decoding, exec_params=exec_params
+ average_time_by_time_decoding, exec_params=exec_params
)
- parallel(
+ logs += parallel(
run_func(
cfg=cfg,
- session=session,
+ exec_params=exec_params,
subject=subject,
- condition_1=contrast[0],
- condition_2=contrast[1],
+ session=session,
+ cond_1=contrast[0],
+ cond_2=contrast[1],
)
- for session in get_sessions(config=cfg)
- for contrast in get_decoding_contrasts(config=cfg)
+ for session in sessions
+ for contrast in decoding_contrasts
)
- for session in sessions:
- run_report_average_sensor(
- cfg=cfg,
- exec_params=exec_params,
- subject=subject,
- session=session,
+ # 3. CSP
+ if cfg.decoding_csp and decoding_contrasts:
+ parallel, run_func = parallel_func(
+ average_csp_decoding, exec_params=exec_params
+ )
+ logs += parallel(
+ run_func(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ cond_1=contrast[0],
+ cond_2=contrast[1],
+ )
+ for contrast in get_decoding_contrasts(config=cfg)
+ for session in sessions
)
-
-def main(*, config: SimpleNamespace) -> None:
- log = run_group_average_sensor(
- cfg=get_config(
- config=config,
- ),
- exec_params=config.exec_params,
- subject="average",
- )
- save_logs(config=config, logs=[log])
+ save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index 161fc9a06..6e96e13ef 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -3,7 +3,6 @@
Compute and apply an inverse solution for each evoked data set.
"""
-import pathlib
from types import SimpleNamespace
from typing import Optional
@@ -26,7 +25,7 @@
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import _open_report, _sanitize_cond_tag
+from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
@@ -97,22 +96,18 @@ def run_inverse(
# Apply inverse
snr = 3.0
lambda2 = 1.0 / snr**2
-
- if isinstance(cfg.conditions, dict):
- conditions = list(cfg.conditions.keys())
- else:
- conditions = cfg.conditions
-
+ conditions = _all_conditions(cfg=cfg)
method = cfg.inverse_method
if "evoked" in in_files:
fname_ave = in_files.pop("evoked")
evokeds = mne.read_evokeds(fname_ave)
for condition, evoked in zip(conditions, evokeds):
- pick_ori = None
- cond_str = sanitize_cond_name(condition)
- key = f"{cond_str}+{method}+hemi"
- out_files[key] = fname_ave.copy().update(suffix=key, extension=None)
+ suffix = f"{sanitize_cond_name(condition)}+{method}+hemi"
+ out_files[condition] = fname_ave.copy().update(
+ suffix=suffix,
+ extension=".h5",
+ )
if "eeg" in cfg.ch_types:
evoked.set_eeg_reference("average", projection=True)
@@ -122,10 +117,9 @@ def run_inverse(
inverse_operator=inverse_operator,
lambda2=lambda2,
method=method,
- pick_ori=pick_ori,
+ pick_ori=None,
)
- stc.save(out_files[key], overwrite=True)
- out_files[key] = pathlib.Path(str(out_files[key]) + "-lh.stc")
+ stc.save(out_files[condition], ftype="h5", overwrite=True)
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
@@ -139,10 +133,11 @@ def run_inverse(
continue
msg = f"Rendering inverse solution for {condition}"
logger.info(**gen_log_kwargs(message=msg))
- fname_stc = out_files[key]
tags = ("source-estimate", _sanitize_cond_tag(condition))
+ if condition not in cfg.conditions:
+ tags = tags + ("contrast",)
report.add_stc(
- stc=fname_stc,
+ stc=out_files[condition],
title=f"Source: {condition}",
subject=cfg.fs_subject,
subjects_dir=cfg.fs_subjects_dir,
@@ -165,6 +160,7 @@ def get_config(
inverse_targets=config.inverse_targets,
ch_types=config.ch_types,
conditions=config.conditions,
+ contrasts=config.contrasts,
loose=config.loose,
depth=config.depth,
inverse_method=config.inverse_method,
diff --git a/mne_bids_pipeline/steps/source/_99_group_average.py b/mne_bids_pipeline/steps/source/_99_group_average.py
index 3212e0249..9e855d6df 100644
--- a/mne_bids_pipeline/steps/source/_99_group_average.py
+++ b/mne_bids_pipeline/steps/source/_99_group_average.py
@@ -4,7 +4,7 @@
"""
from types import SimpleNamespace
-from typing import Optional, List
+from typing import Optional
import numpy as np
@@ -17,17 +17,28 @@
sanitize_cond_name,
get_fs_subject,
get_sessions,
- get_all_contrasts,
_bids_kwargs,
)
from ..._logging import logger, gen_log_kwargs
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import run_report_average_source
-from ..._run import failsafe_run, save_logs
+from ..._report import _all_conditions, _open_report
+from ..._run import failsafe_run, save_logs, _prep_out_files
-def morph_stc(cfg, subject, fs_subject, session=None):
- bids_path = BIDSPath(
+def _stc_path(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ condition: str,
+ morphed: bool,
+) -> BIDSPath:
+ cond_str = sanitize_cond_name(condition)
+ suffix = [cond_str, cfg.inverse_method, "hemi"]
+ if morphed:
+ suffix.insert(2, "morph2fsaverage")
+ suffix = "+".join(suffix)
+ return BIDSPath(
subject=subject,
session=session,
task=cfg.task,
@@ -37,35 +48,47 @@ def morph_stc(cfg, subject, fs_subject, session=None):
space=cfg.space,
datatype=cfg.datatype,
root=cfg.deriv_root,
+ suffix=suffix,
+ extension=".h5",
check=False,
)
- morphed_stcs = []
-
- if cfg.task_is_rest:
- conditions = [cfg.task.lower()]
- else:
- if isinstance(cfg.conditions, dict):
- conditions = list(cfg.conditions.keys())
- else:
- conditions = cfg.conditions
- for condition in conditions:
- method = cfg.inverse_method
- cond_str = sanitize_cond_name(condition)
- inverse_str = method
- hemi_str = "hemi" # MNE will auto-append '-lh' and '-rh'.
- morph_str = "morph2fsaverage"
-
- fname_stc = bids_path.copy().update(
- suffix=f"{cond_str}+{inverse_str}+{hemi_str}"
- )
- fname_stc_fsaverage = bids_path.copy().update(
- suffix=f"{cond_str}+{inverse_str}+{morph_str}+{hemi_str}"
+def get_input_fnames_morph_stc(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ fs_subject: str,
+ session: Optional[str],
+) -> dict:
+ in_files = dict()
+ for condition in _all_conditions(cfg=cfg):
+ in_files[f"original-{condition}"] = _stc_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ condition=condition,
+ morphed=False,
)
+ return in_files
- stc = mne.read_source_estimate(fname_stc)
+@failsafe_run(
+ get_input_fnames=get_input_fnames_morph_stc,
+)
+def morph_stc(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ fs_subject: str,
+ session: Optional[str],
+ in_files: dict,
+) -> dict:
+ out_files = dict()
+ for condition in _all_conditions(cfg=cfg):
+ fname_stc = in_files.pop(f"original-{condition}")
+ stc = mne.read_source_estimate(fname_stc)
morph = mne.compute_source_morph(
stc,
subject_from=fs_subject,
@@ -73,51 +96,98 @@ def morph_stc(cfg, subject, fs_subject, session=None):
subjects_dir=cfg.fs_subjects_dir,
)
stc_fsaverage = morph.apply(stc)
- stc_fsaverage.save(fname_stc_fsaverage, overwrite=True)
- morphed_stcs.append(stc_fsaverage)
+ key = f"morphed-{condition}"
+ out_files[key] = _stc_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ condition=condition,
+ morphed=True,
+ )
+ stc_fsaverage.save(out_files[key], ftype="h5", overwrite=True)
+
+ assert len(in_files) == 0, in_files
+ return _prep_out_files(out_files=out_files, exec_params=exec_params)
- del fname_stc, fname_stc_fsaverage
- return morphed_stcs
+def get_input_fnames_run_average(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ in_files = dict()
+ assert subject == "average"
+ for condition in _all_conditions(cfg=cfg):
+ for this_subject in cfg.subjects:
+ in_files[f"{this_subject}-{condition}"] = _stc_path(
+ cfg=cfg,
+ subject=this_subject,
+ session=session,
+ condition=condition,
+ morphed=True,
+ )
+ return in_files
+@failsafe_run(
+ get_input_fnames=get_input_fnames_run_average,
+)
def run_average(
*,
cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
subject: str,
session: Optional[str],
- mean_morphed_stcs: List[mne.SourceEstimate],
+ in_files: dict,
):
- bids_path = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- run=None,
- processing=cfg.proc,
- recording=cfg.rec,
- space=cfg.space,
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
-
- if isinstance(cfg.conditions, dict):
- conditions = list(cfg.conditions.keys())
- else:
- conditions = cfg.conditions
-
- for condition, stc in zip(conditions, mean_morphed_stcs):
- method = cfg.inverse_method
- cond_str = sanitize_cond_name(condition)
- inverse_str = method
- hemi_str = "hemi" # MNE will auto-append '-lh' and '-rh'.
- morph_str = "morph2fsaverage"
-
- fname_stc_avg = bids_path.copy().update(
- suffix=f"{cond_str}+{inverse_str}+{morph_str}+{hemi_str}"
+ assert subject == "average"
+ out_files = dict()
+ conditions = _all_conditions(cfg=cfg)
+ for condition in conditions:
+ stc = np.array(
+ [
+ mne.read_source_estimate(in_files.pop(f"{this_subject}-{condition}"))
+ for this_subject in cfg.subjects
+ ]
+ ).mean(axis=0)
+ out_files[condition] = _stc_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ condition=condition,
+ morphed=True,
)
- stc.save(fname_stc_avg, overwrite=True)
+ stc.save(out_files[condition], ftype="h5", overwrite=True)
+
+ #######################################################################
+ #
+ # Visualize forward solution, inverse operator, and inverse solutions.
+ #
+ with _open_report(
+ cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ ) as report:
+ for condition in conditions:
+ msg = f"Rendering inverse solution for {condition}"
+ logger.info(**gen_log_kwargs(message=msg))
+ cond_str = sanitize_cond_name(condition)
+ tags = ("source-estimate", cond_str)
+ if condition in cfg.conditions:
+ title = f"Average (source): {condition}"
+ else: # It's a contrast of two conditions.
+ title = f"Average (source) contrast: {condition}"
+ tags = tags + ("contrast",)
+ report.add_stc(
+ stc=out_files[condition],
+ title=title,
+ subject="fsaverage",
+ subjects_dir=cfg.fs_subjects_dir,
+ n_time_points=cfg.report_stc_n_time_points,
+ tags=tags,
+ replace=True,
+ )
+ assert len(in_files) == 0, in_files
+ return _prep_out_files(out_files=out_files, exec_params=exec_params)
def get_config(
@@ -131,11 +201,11 @@ def get_config(
fs_subjects_dir=get_fs_subjects_dir(config),
subjects_dir=get_fs_subjects_dir(config),
ch_types=config.ch_types,
- subjects=config.subjects,
+ subjects=get_subjects(config=config),
exclude_subjects=config.exclude_subjects,
sessions=get_sessions(config),
use_template_mri=config.use_template_mri,
- all_contrasts=get_all_contrasts(config),
+ contrasts=config.contrasts,
report_stc_n_time_points=config.report_stc_n_time_points,
# TODO: needed because get_datatype gets called again...
data_type=config.data_type,
@@ -144,64 +214,39 @@ def get_config(
return cfg
-# pass 'average' subject for logging
-@failsafe_run()
-def run_group_average_source(
- *,
- cfg: SimpleNamespace,
- exec_params: SimpleNamespace,
- subject: str,
-) -> None:
- """Run group average in source space"""
+def main(*, config: SimpleNamespace) -> None:
+ if not config.run_source_estimation:
+ msg = "Skipping, run_source_estimation is set to False …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
+ return
- mne.datasets.fetch_fsaverage(subjects_dir=get_fs_subjects_dir(cfg))
+ mne.datasets.fetch_fsaverage(subjects_dir=get_fs_subjects_dir(config))
+ cfg = get_config(config=config)
+ exec_params = config.exec_params
+ subjects = get_subjects(config)
+ sessions = get_sessions(config)
+ logs = list()
with get_parallel_backend(exec_params):
parallel, run_func = parallel_func(morph_stc, exec_params=exec_params)
- all_morphed_stcs = parallel(
+ logs += parallel(
run_func(
cfg=cfg,
+ exec_params=exec_params,
subject=subject,
fs_subject=get_fs_subject(config=cfg, subject=subject),
session=session,
)
- for subject in get_subjects(cfg)
- for session in get_sessions(cfg)
+ for subject in subjects
+ for session in sessions
)
- mean_morphed_stcs = np.array(all_morphed_stcs).mean(axis=0)
-
- # XXX to fix
- sessions = get_sessions(cfg)
- if sessions:
- session = sessions[0]
- else:
- session = None
-
+ logs += [
run_average(
- cfg=cfg,
- session=session,
- subject=subject,
- mean_morphed_stcs=mean_morphed_stcs,
- )
- run_report_average_source(
cfg=cfg,
exec_params=exec_params,
- subject=subject,
session=session,
+ subject="average",
)
-
-
-def main(*, config: SimpleNamespace) -> None:
- if not config.run_source_estimation:
- msg = "Skipping, run_source_estimation is set to False …"
- logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
- return
-
- log = run_group_average_source(
- cfg=get_config(
- config=config,
- ),
- exec_params=config.exec_params,
- subject="average",
- )
- save_logs(config=config, logs=[log])
+ for session in sessions
+ ]
+ save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index 795ed618b..22f29b35f 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -21,8 +21,6 @@
find_flat_channels_meg = True
find_noisy_channels_meg = True
use_maxwell_filter = True
-_raw_split_size = "60MB" # hits both task-noise and task-audiovisual
-_epochs_split_size = "30MB"
def noise_cov(bp):
diff --git a/mne_bids_pipeline/tests/configs/config_ds001971.py b/mne_bids_pipeline/tests/configs/config_ds001971.py
index 2f3307c85..c78d3c858 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001971.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001971.py
@@ -13,6 +13,18 @@
ch_types = ["eeg"]
reject = {"eeg": 150e-6}
conditions = ["AdvanceTempo", "DelayTempo"]
+contrasts = [("AdvanceTempo", "DelayTempo")]
subjects = ["001"]
runs = ["01"]
+epochs_decim = 5 # to 100 Hz
+
+# This is mostly for testing purposes!
+decode = True
+decoding_time_generalization = True
+decoding_time_generalization_decim = 2
+decoding_csp = True
+decoding_csp_freqs = {
+ "beta": [13, 20, 30],
+}
+decoding_csp_times = [-0.2, 0.0, 0.2, 0.4]
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index e17b46076..b75d009ec 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -39,6 +39,10 @@ def pytest_configure(config):
ignore:use_inf_as_na option is deprecated.*:FutureWarning
# Dask distributed with jsonschema 4.18
ignore:jsonschema\.RefResolver is deprecated.*:DeprecationWarning
+ # seaborn->pandas
+ ignore:is_categorical_dtype is deprecated.*:FutureWarning
+ ignore:use_inf_as_na option is deprecated.*:FutureWarning
+ ignore:All-NaN axis encountered.*:RuntimeWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index 727dcf6bc..097fc1032 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -121,7 +121,7 @@ def test_datasets_in_doc():
n_found = len(pw.findall(circle_yaml_src))
assert n_found == this_count, f"{pw} ({n_found} != {this_count})"
# jobs: test_*: steps: run test
- cp = re.compile(rf" command: \$RUN_TESTS {name}.*")
+ cp = re.compile(rf" command: \$RUN_TESTS[ -rc]+{name}.*")
n_found = len(cp.findall(circle_yaml_src))
assert n_found == count, f"{cp} ({n_found} != {count})"
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 28e2a7e71..909d6d091 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -35,8 +35,9 @@ class _TestOptionsT(TypedDict, total=False):
# "env": {},
# "task": None,
# "requires": (),
+# "extra_config": "",
# }
-#
+
TEST_SUITE: Dict[str, _TestOptionsT] = {
"ds003392": {},
"ds004229": {},
@@ -60,6 +61,12 @@ class _TestOptionsT(TypedDict, total=False):
"ds000248_base": {
"steps": ("preprocessing", "sensor", "source"),
"requires": ("freesurfer",),
+ "extra_config": """
+_raw_split_size = "60MB" # hits both task-noise and task-audiovisual
+_epochs_split_size = "30MB"
+# use n_jobs=1 here to ensure that we get coverage for metadata_query
+_n_jobs = {"preprocessing/_05_make_epochs": 1}
+""",
},
"ds000248_ica": {},
"ds000248_T1_BEM": {
@@ -92,6 +99,13 @@ class _TestOptionsT(TypedDict, total=False):
"dataset": "ERP_CORE",
"config": "config_ERP_CORE.py",
"task": "ERN",
+ "extra_config": """
+# use n_jobs = 1 with loky to ensure that the CSP steps get proper coverage
+_n_jobs = {
+ "sensor/_05_decoding_csp": 1,
+ "sensor/_99_group_average": 1,
+}
+""",
},
"ERP_CORE_LRP": {
"dataset": "ERP_CORE",
@@ -139,33 +153,30 @@ def dataset_test(request):
@pytest.mark.dataset_test
@pytest.mark.parametrize("dataset", list(TEST_SUITE))
-def test_run(dataset, monkeypatch, dataset_test, capsys):
+def test_run(dataset, monkeypatch, dataset_test, capsys, tmp_path):
"""Test running a dataset."""
test_options = TEST_SUITE[dataset]
-
- # export the environment variables
- monkeypatch.setenv("DATASET", dataset)
- for key, value in test_options.get("env", {}).items():
- monkeypatch.setenv(key, value)
-
config = test_options.get("config", f"config_{dataset}.py")
config_path = BIDS_PIPELINE_DIR / "tests" / "configs" / config
+ extra_config = TEST_SUITE[dataset].get("extra_config", "")
+ if extra_config:
+ extra_path = tmp_path / "extra_config.py"
+ extra_path.write_text(extra_config)
+ monkeypatch.setenv("_MNE_BIDS_STUDY_TESTING_EXTRA_CONFIG", str(extra_path))
# XXX Workaround for buggy date in ds000247. Remove this and the
# XXX file referenced here once fixed!!!
fix_path = Path(__file__).parent
if dataset == "ds000247":
- shutil.copy(
- src=fix_path / "ds000247_scans.tsv",
- dst=Path(
- "~/mne_data/ds000247/sub-0002/ses-01/" "sub-0002_ses-01_scans.tsv"
- ).expanduser(),
+ dst = (
+ DATA_DIR / "ds000247" / "sub-0002" / "ses-01" / "sub-0002_ses-01_scans.tsv"
)
+ shutil.copy(src=fix_path / "ds000247_scans.tsv", dst=dst)
# XXX Workaround for buggy participant_id in ds001971
elif dataset == "ds001971":
shutil.copy(
src=fix_path / "ds001971_participants.tsv",
- dst=Path("~/mne_data/ds001971/participants.tsv").expanduser(),
+ dst=DATA_DIR / "ds001971" / "participants.tsv",
)
# Run the tests.
From 018552e40aa86a1eafdbe958d804d2a22ff455b8 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 19 Jul 2023 14:21:07 -0400
Subject: [PATCH 010/132] MAINT: Test MD5 hashing too (#768)
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/tests/configs/config_ds001971.py | 3 +++
2 files changed, 4 insertions(+)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 7b2a70b0c..a4aa314f2 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -15,6 +15,7 @@
### :medical_symbol: Code health
- Fixed doc build errors and dependency specifications (#755 by @larsoner)
+- Ensure `memory_file_method = "hash"` is tested (#768 by @larsoner)
### :bug: Bug fixes
diff --git a/mne_bids_pipeline/tests/configs/config_ds001971.py b/mne_bids_pipeline/tests/configs/config_ds001971.py
index c78d3c858..7a64f940d 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001971.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001971.py
@@ -28,3 +28,6 @@
"beta": [13, 20, 30],
}
decoding_csp_times = [-0.2, 0.0, 0.2, 0.4]
+
+# Just to test that MD5 works
+memory_file_method = "hash"
From ea95979fdb0ef807b3cd262da409cf010ada3da3 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 19 Jul 2023 15:50:44 -0400
Subject: [PATCH 011/132] ENH: Better logging of distances (#769)
---
docs/source/v1.5.md.inc | 1 +
.../steps/source/_04_make_forward.py | 23 +++++++++++++++----
2 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index a4aa314f2..18e8a2693 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -7,6 +7,7 @@
- Added support for extended SSS (eSSS) in Maxwell filtering (#762 by @larsoner)
- Output logging spacing improved (#764 by @larsoner)
- Added caching of sensor and source average steps (#765 by @larsoner)
+- Improved logging of coregistration distances (#769 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index d2c2bfdb6..a2c1fc211 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -6,6 +6,8 @@
from types import SimpleNamespace
from typing import Optional
+import numpy as np
+
import mne
from mne.coreg import Coregistration
from mne_bids import BIDSPath, get_head_mri_trans
@@ -30,6 +32,8 @@
def _prepare_trans_template(
*,
cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
info: mne.Info,
) -> mne.transforms.Transform:
assert isinstance(cfg.use_template_mri, str)
@@ -47,25 +51,30 @@ def _prepare_trans_template(
)
else:
fiducials = "estimated" # get fiducials from fsaverage
+ logger.info(**gen_log_kwargs("Matching template MRI using fiducials"))
coreg = Coregistration(
info, cfg.fs_subject, cfg.fs_subjects_dir, fiducials=fiducials
)
- coreg.fit_fiducials(verbose=True)
+ # Adapted from MNE-Python
+ coreg.fit_fiducials(verbose=False)
+ dist = np.median(coreg.compute_dig_mri_distances() * 1000)
+ logger.info(**gen_log_kwargs(f"Median dig ↔ MRI distance: {dist:6.2f} mm"))
trans = coreg.trans
return trans
-def _prepare_trans(
+def _prepare_trans_subject(
*,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
bids_path: BIDSPath,
) -> mne.transforms.Transform:
# Generate a head ↔ MRI transformation matrix from the
# electrophysiological and MRI sidecar files, and save it to an MNE
# "trans" file in the derivatives folder.
- subject, session = bids_path.subject, bids_path.session
# TODO: This breaks our encapsulation
config = _import_config(
@@ -90,7 +99,7 @@ def _prepare_trans(
BIDSPath(subject=subject, session=session)
)
- msg = "Estimating head ↔ MRI transform"
+ msg = "Computing head ↔ MRI transform from matched fiducials"
logger.info(**gen_log_kwargs(message=msg))
trans = get_head_mri_trans(
@@ -174,11 +183,15 @@ def run_forward(
if cfg.use_template_mri is not None:
trans = _prepare_trans_template(
cfg=cfg,
+ subject=subject,
+ session=session,
info=info,
)
else:
- trans = _prepare_trans(
+ trans = _prepare_trans_subject(
cfg=cfg,
+ subject=subject,
+ session=session,
exec_params=exec_params,
bids_path=bids_path,
)
From 4b289c9e528e2972303b28d8ba1bf32e469db6e2 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 7 Aug 2023 10:27:14 -0400
Subject: [PATCH 012/132] MAINT: pre-commit.ci (#774)
---
.pre-commit-config.yaml | 11 +++++++++--
docs/source/v1.5.md.inc | 1 +
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 66dfbf03c..9fac19fcc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,9 @@
---
-# Eventually we should use yamllint, too
files: ^(.*\.(py|yaml))$
-exclude: ^(\.[^/]*cache/.*)$
+# We need to match the exclude list in pyproject.toml because pre-commit.ci
+# passes filenames and these do not get passed through the tool.black filter
+# for example
+exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
rev: 22.10.0
@@ -20,3 +22,8 @@ repos:
- id: codespell
additional_dependencies:
- tomli
+ - repo: https://github.com/adrienverge/yamllint.git
+ rev: v1.29.0
+ hooks:
+ - id: yamllint
+ args: [--strict]
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 18e8a2693..234232028 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -17,6 +17,7 @@
- Fixed doc build errors and dependency specifications (#755 by @larsoner)
- Ensure `memory_file_method = "hash"` is tested (#768 by @larsoner)
+- Enable [pre-commit.ci](https://pre-commit.ci) (#774 by @larsoner)
### :bug: Bug fixes
From 721e7288366200545c10fd9d9f860541a0a014e0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Victor=20F=C3=A9rat?=
Date: Mon, 7 Aug 2023 19:01:32 +0200
Subject: [PATCH 013/132] Added deriv_root argument to CLI (#773)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_main.py | 17 +++++++++++++++++
2 files changed, 18 insertions(+)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 234232028..af661698d 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -2,6 +2,7 @@
### :new: New features & enhancements
+- Added `deriv_root` argument to CLI (#773 by @vferat)
- Added support for annotating bad segments based on head movement velocity (#757 by @larsoner)
- Added examples of T1 and FLASH BEM to website (#758 by @larsoner)
- Added support for extended SSS (eSSS) in Maxwell filtering (#762 by @larsoner)
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index cd9ee1037..9489a2cca 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -59,6 +59,18 @@ def main():
default=None,
help="BIDS root directory of the data to process.",
)
+ parser.add_argument(
+ "--deriv_root",
+ dest="deriv_root",
+ default=None,
+ help=dedent(
+ """\
+ The root of the derivatives directory
+ in which the pipeline will store the processing results.
+ If unspecified, this will be derivatives/mne-bids-pipeline
+ inside the BIDS root."""
+ ),
+ ),
parser.add_argument(
"--subject", dest="subject", default=None, help="The subject to process."
)
@@ -115,6 +127,7 @@ def main():
)
steps = options.steps
root_dir = options.root_dir
+ deriv_root = options.deriv_root
subject, session = options.subject, options.session
task, run = options.task, options.run
n_jobs = options.n_jobs
@@ -148,6 +161,10 @@ def main():
overrides = SimpleNamespace()
if root_dir:
overrides.bids_root = pathlib.Path(root_dir).expanduser().resolve(strict=True)
+ if deriv_root:
+ overrides.deriv_root = (
+ pathlib.Path(deriv_root).expanduser().resolve(strict=False)
+ )
if subject:
overrides.subjects = [subject]
if session:
From 5c19b941e9f6a172277d1b4195b8561af5e6da74 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 7 Aug 2023 18:23:17 -0400
Subject: [PATCH 014/132] MAINT: Use pooch for downloading web data (#775)
---
docs/source/examples/gen_examples.py | 14 ++--
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_download.py | 105 +++++++++------------------
mne_bids_pipeline/tests/datasets.py | 72 ++++--------------
mne_bids_pipeline/tests/test_run.py | 25 ++-----
pyproject.toml | 1 +
6 files changed, 68 insertions(+), 150 deletions(-)
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 08aa7d94f..8fa4f114a 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -142,7 +142,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
logger.warning(f"Dataset {dataset_name} has no HTML report.")
continue
- options = DATASET_OPTIONS[dataset_options_key]
+ options = DATASET_OPTIONS[dataset_options_key].copy() # we modify locally
report_str = "\n## Generated output\n\n"
example_target_dir = this_dir / dataset_name
@@ -198,20 +198,24 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
f"{fname.name} :fontawesome-solid-square-poll-vertical:\n\n"
)
- if options["openneuro"]:
+ assert sum(key in options for key in ("openneuro", "git", "web", "datalad")) == 1
+ if "openneuro" in options:
url = f'https://openneuro.org/datasets/{options["openneuro"]}'
- elif options["git"]:
+ elif "git" in options:
url = options["git"]
- elif options["web"]:
+ elif "web" in options:
url = options["web"]
else:
+ assert "datalad" in options # guaranteed above
url = ""
source_str = (
f"## Dataset source\n\nThis dataset was acquired from " f"[{url}]({url})\n"
)
- if options["openneuro"]:
+ if "openneuro" in options:
+ for key in ("include", "exclude"):
+ options[key] = options.get(key, [])
download_str = (
f'\n??? example "How to download this dataset"\n'
f" Run in your terminal:\n"
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index af661698d..14da9b83a 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -19,6 +19,7 @@
- Fixed doc build errors and dependency specifications (#755 by @larsoner)
- Ensure `memory_file_method = "hash"` is tested (#768 by @larsoner)
- Enable [pre-commit.ci](https://pre-commit.ci) (#774 by @larsoner)
+- Use `pooch` for web downloads (#775 by @larsoner)
### :bug: Bug fixes
diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py
index bec670897..33e565207 100644
--- a/mne_bids_pipeline/_download.py
+++ b/mne_bids_pipeline/_download.py
@@ -13,7 +13,10 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path):
import datalad.api as dl
print('datalad installing "{}"'.format(ds_name))
- git_url = DATASET_OPTIONS[ds_name]["git"]
+ options = DATASET_OPTIONS[ds_name]
+ git_url = options["git"]
+ assert "exclude" not in options
+ assert "hash" not in options
dataset = dl.install(path=ds_path, source=git_url)
# XXX: git-annex bug:
@@ -24,7 +27,7 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path):
else:
n_jobs = 1
- for to_get in DATASET_OPTIONS[ds_name]["include"]:
+ for to_get in DATASET_OPTIONS[ds_name].get("include", []):
print('datalad get data "{}" for "{}"'.format(to_get, ds_name))
dataset.get(to_get, jobs=n_jobs)
@@ -32,23 +35,27 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path):
def _download_via_openneuro(*, ds_name: str, ds_path: Path):
import openneuro
+ options = DATASET_OPTIONS[ds_name]
+ assert "hash" not in options
+
openneuro.download(
- dataset=DATASET_OPTIONS[ds_name]["openneuro"],
+ dataset=options["openneuro"],
target_dir=ds_path,
- include=DATASET_OPTIONS[ds_name]["include"],
- exclude=DATASET_OPTIONS[ds_name]["exclude"],
+ include=options.get("include", []),
+ exclude=options.get("exclude", []),
verify_size=False,
)
def _download_from_web(*, ds_name: str, ds_path: Path):
"""Retrieve Zip archives from a web URL."""
- import cgi
- import zipfile
- import httpx
- from tqdm import tqdm
+ import pooch
- url = DATASET_OPTIONS[ds_name]["web"]
+ options = DATASET_OPTIONS[ds_name]
+ url = options["web"]
+ known_hash = options["hash"]
+ assert "exclude" not in options
+ assert "include" not in options
if ds_path.exists():
print(
"Dataset directory already exists; remove it if you wish to "
@@ -57,65 +64,26 @@ def _download_from_web(*, ds_name: str, ds_path: Path):
return
ds_path.mkdir(parents=True, exist_ok=True)
-
- with httpx.Client(follow_redirects=True) as client:
- with client.stream("GET", url=url) as response:
- if not response.is_error:
- pass # All good!
- else:
- raise RuntimeError(
- f"Error {response.status_code} when trying " f"to download {url}"
- )
-
- header = response.headers["content-disposition"]
- _, params = cgi.parse_header(header)
- # where to store the archive
- outfile = ds_path / params["filename"]
- remote_file_size = int(response.headers["content-length"])
-
- with open(outfile, mode="wb") as f:
- with tqdm(
- desc=params["filename"],
- initial=0,
- total=remote_file_size,
- unit="B",
- unit_scale=True,
- unit_divisor=1024,
- leave=False,
- ) as progress:
- num_bytes_downloaded = response.num_bytes_downloaded
-
- for chunk in response.iter_bytes():
- f.write(chunk)
- progress.update(
- response.num_bytes_downloaded - num_bytes_downloaded
- )
- num_bytes_downloaded = response.num_bytes_downloaded
-
- assert outfile.suffix == ".zip"
-
- with zipfile.ZipFile(outfile) as zip:
- for zip_info in zip.infolist():
- path_in_zip = Path(zip_info.filename)
- # omit top-level directory from Zip archive
- target_path = str(Path(*path_in_zip.parts[1:]))
- if str(target_path) in (".", ".."):
- continue
- if zip_info.filename.endswith("/"):
- (ds_path / target_path).mkdir(parents=True, exist_ok=True)
- continue
- zip_info.filename = target_path
- print(f"Extracting: {target_path}")
- zip.extract(zip_info, ds_path)
-
- outfile.unlink()
+ path = ds_path.parent.resolve(strict=True)
+ fname = f"{ds_name}.zip"
+ pooch.retrieve(
+ url=url,
+ path=path,
+ fname=fname,
+ processor=pooch.Unzip(extract_dir="."), # relative to path
+ progressbar=True,
+ known_hash=known_hash,
+ )
+ (path / f"{ds_name}.zip").unlink()
def _download(*, ds_name: str, ds_path: Path):
- openneuro_name = DATASET_OPTIONS[ds_name]["openneuro"]
- git_url = DATASET_OPTIONS[ds_name]["git"]
- osf_node = DATASET_OPTIONS[ds_name]["osf"]
- web_url = DATASET_OPTIONS[ds_name]["web"]
+ options = DATASET_OPTIONS[ds_name]
+ openneuro_name = options.get("openneuro", "")
+ git_url = options.get("git", "")
+ osf_node = options.get("osf", "")
+ web_url = options.get("web", "")
+ assert sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url)) == 1
if openneuro_name:
download_func = _download_via_openneuro
@@ -123,10 +91,9 @@ def _download(*, ds_name: str, ds_path: Path):
download_func = _download_via_datalad
elif osf_node:
raise RuntimeError("OSF downloads are currently not supported.")
- elif web_url:
- download_func = _download_from_web
else:
- raise ValueError("No download location was specified.")
+ assert web_url
+ download_func = _download_from_web
download_func(ds_name=ds_name, ds_path=ds_path)
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index f93f3206d..c27d1ab4e 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -3,23 +3,22 @@
from typing import Dict, List, TypedDict
-class DATASET_OPTIONS_T(TypedDict):
- git: str
- openneuro: str
- osf: str
- web: str
- include: List[str]
- exclude: List[str]
+# If not supplied below, the effective defaults are listed in comments
+class DATASET_OPTIONS_T(TypedDict, total=False):
+ git: str # ""
+ openneuro: str # ""
+ osf: str # ""
+ web: str # ""
+ include: List[str] # []
+ exclude: List[str] # []
+ hash: str # ""
DATASET_OPTIONS: Dict[str, DATASET_OPTIONS_T] = {
"ERP_CORE": {
- "git": "",
- "openneuro": "",
- "osf": "", # original dataset: '9f5w7'
+ # original dataset: "osf": "9f5w7"
"web": "https://osf.io/3zk6n/download?version=2",
- "include": [],
- "exclude": [],
+ "hash": "sha256:ddc94a7c9ba1922637f2770592dd51c019d341bf6bc8558e663e1979a4cb002f", # noqa: E501
},
"eeg_matchingpennies": {
# This dataset started out on osf.io as dataset https://osf.io/cj2dr
@@ -31,18 +30,12 @@ class DATASET_OPTIONS_T(TypedDict):
# "include": ["sub-05"],
#
# So now we mirror this datalad-fetched git repo back on osf.io!
- "git": "",
- "openneuro": "",
- "osf": "", # original dataset: 'cj2dr'
+ # original dataset: "osf": "cj2dr"
"web": "https://osf.io/download/8rbfk?version=1",
- "include": [],
- "exclude": [],
+ "hash": "sha256:06bfbe52c50b9343b6b8d2a5de3dd33e66ad9303f7f6bfbe6868c3c7c375fafd", # noqa: E501
},
"ds003104": { # Anonymized "somato" dataset.
- "git": "",
"openneuro": "ds003104",
- "osf": "",
- "web": "",
"include": ["sub-01", "derivatives/freesurfer/subjects"],
"exclude": [
"derivatives/freesurfer/subjects/01/mri/aparc+aseg.mgz",
@@ -51,30 +44,19 @@ class DATASET_OPTIONS_T(TypedDict):
],
},
"ds000246": {
- "git": "",
"openneuro": "ds000246",
- "osf": "",
- "web": "",
"include": [
"sub-0001/meg/sub-0001_task-AEF_run-01_meg.ds",
"sub-0001/meg/sub-0001_task-AEF_run-01_meg.json",
"sub-0001/meg/sub-0001_task-AEF_run-01_channels.tsv",
],
- "exclude": [],
},
"ds000247": {
- "git": "",
"openneuro": "ds000247",
- "osf": "",
- "web": "",
"include": ["sub-0002/ses-01/meg"],
- "exclude": [],
},
"ds000248": {
- "git": "",
"openneuro": "ds000248",
- "osf": "",
- "web": "",
"include": ["sub-01", "sub-emptyroom", "derivatives/freesurfer/subjects"],
"exclude": [
"derivatives/freesurfer/subjects/fsaverage/mri/aparc.a2005s+aseg.mgz", # noqa: E501
@@ -88,10 +70,7 @@ class DATASET_OPTIONS_T(TypedDict):
],
},
"ds000117": {
- "git": "",
"openneuro": "ds000117",
- "osf": "",
- "web": "",
"include": [
"sub-01/ses-meg/meg/sub-01_ses-meg_task-facerecognition_run-01_*", # noqa: E501
"sub-01/ses-meg/meg/sub-01_ses-meg_task-facerecognition_run-02_*", # noqa: E501
@@ -102,29 +81,17 @@ class DATASET_OPTIONS_T(TypedDict):
"derivatives/meg_derivatives/ct_sparse.fif",
"derivatives/meg_derivatives/sss_cal.dat",
],
- "exclude": [],
},
"ds003775": {
- "git": "",
"openneuro": "ds003775",
- "osf": "",
- "web": "",
"include": ["sub-010"],
- "exclude": [],
},
"ds001810": {
- "git": "",
"openneuro": "ds001810",
- "osf": "",
- "web": "",
"include": ["sub-01"],
- "exclude": [],
},
"ds001971": {
- "git": "",
"openneuro": "ds001971",
- "osf": "",
- "web": "",
"include": [
"sub-001/eeg/sub-001_task-AudioCueWalkingStudy_run-01_events.tsv",
"sub-001/eeg/sub-001_task-AudioCueWalkingStudy_run-01_eeg.set",
@@ -134,38 +101,25 @@ class DATASET_OPTIONS_T(TypedDict):
"sub-001/eeg/sub-001_task-AudioCueWalkingStudy_run-01_coordsystem.json", # noqa: E501
"sub-001/eeg/sub-001_task-AudioCueWalkingStudy_run-01_channels.tsv", # noqa: E501
],
- "exclude": [],
},
"ds003392": {
- "git": "",
"openneuro": "ds003392",
- "osf": "",
- "web": "",
"include": ["sub-01", "sub-emptyroom/ses-19111211"],
- "exclude": [],
},
"ds004107": {
- "git": "",
"openneuro": "ds004107",
- "osf": "",
- "web": "",
"include": [
"sub-mind002/ses-01/meg/*coordsystem*",
"sub-mind002/ses-01/meg/*auditory*",
],
- "exclude": [],
},
"ds004229": {
- "git": "",
"openneuro": "ds004229",
- "osf": "",
- "web": "",
"include": [
"sub-102",
"sub-emptyroom/ses-20000101",
"derivatives/meg_derivatives/ct_sparse.fif",
"derivatives/meg_derivatives/sss_cal.dat",
],
- "exclude": [],
},
}
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 909d6d091..eb07233b1 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -18,26 +18,17 @@
# Once PEP655 lands in 3.11 we can use NotRequired instead of total=False
+# Effective defaults are listed in comments
class _TestOptionsT(TypedDict, total=False):
- dataset: str
- config: str
- steps: Collection[str]
- task: Optional[str]
- env: Dict[str, str]
- requires: Collection[str]
+ dataset: str # key.split("_")[0]
+ config: str # f"config_{key}.py"
+ steps: Collection[str] # ("preprocessing", "sensor")
+ task: Optional[str] # None
+ env: Dict[str, str] # {}
+ requires: Collection[str] # ()
+ extra_config: str # ""
-# If not supplied below, the defaults are:
-# key: {
-# "dataset": key.split("_")[0],
-# "config": f"config_{key}.py",
-# "steps": ("preprocessing", "sensor"),
-# "env": {},
-# "task": None,
-# "requires": (),
-# "extra_config": "",
-# }
-
TEST_SUITE: Dict[str, _TestOptionsT] = {
"ds003392": {},
"ds004229": {},
diff --git a/pyproject.toml b/pyproject.toml
index ae20edf69..f080347f2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -52,6 +52,7 @@ dynamic = ["version"]
tests = [
"pytest",
"pytest-cov",
+ "pooch",
"psutil",
"datalad",
"ruff",
From a931cae9ed41c9acdfef8b8e725eb1b817f1e697 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 8 Aug 2023 09:18:31 -0400
Subject: [PATCH 015/132] [pre-commit.ci] pre-commit autoupdate (#776)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 9fac19fcc..92cf44ae9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,24 +6,24 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 22.10.0
+ rev: 23.7.0
hooks:
- id: black
args:
- --safe
- --quiet
- - repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.178
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.0.282
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
- rev: v2.2.2
+ rev: v2.2.5
hooks:
- id: codespell
additional_dependencies:
- tomli
- repo: https://github.com/adrienverge/yamllint.git
- rev: v1.29.0
+ rev: v1.32.0
hooks:
- id: yamllint
args: [--strict]
From 918cc3bb1ed69d12cf23e3295eb9b512fc914523 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 14 Aug 2023 16:40:30 -0400
Subject: [PATCH 016/132] API: Change default to _cache (#778)
---
docs/source/settings/general.md | 1 +
docs/source/v1.5.md.inc | 2 ++
mne_bids_pipeline/_config.py | 9 ++++++++-
mne_bids_pipeline/_config_import.py | 1 +
mne_bids_pipeline/_run.py | 2 +-
5 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/docs/source/settings/general.md b/docs/source/settings/general.md
index d0814cce6..2640f5f2b 100644
--- a/docs/source/settings/general.md
+++ b/docs/source/settings/general.md
@@ -39,6 +39,7 @@
- random_state
- shortest_event
- memory_location
+ - memory_subdir
- memory_file_method
- memory_verbose
- config_validation
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 14da9b83a..7434586ad 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -12,6 +12,8 @@
[//]: # (### :warning: Behavior changes)
+- The default cache directory is now `_cache` within the derivatives folder when using `memory_location=True`, set [`memory_subdir="joblib"`][mne_bids_pipeline._config.memory_subdir] to get the behavior from v1.4 (#778 by @larsoner)
+
[//]: # (- Whatever (#000 by @whoever))
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 85b0335e1..0ca130754 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2107,7 +2107,14 @@ def noise_cov(bids_path):
"""
If not None (or False), caching will be enabled and the cache files will be
stored in the given directory. The default (True) will use a
-`'joblib'` subdirectory in the BIDS derivative root of the dataset.
+`"_cache"` subdirectory (name configurable via the
+[`memory_subdir`][mne_bids_pipeline._config.memory_subdir]
+variable) in the BIDS derivative root of the dataset.
+"""
+
+memory_subdir: str = "_cache"
+"""
+The caching directory name to use if `memory_location` is `True`.
"""
memory_file_method: Literal["mtime", "hash"] = "mtime"
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 36568b1f2..43f0c3725 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -75,6 +75,7 @@ def _import_config(
"interactive",
# Caching
"memory_location",
+ "memory_subdir",
"memory_verbose",
"memory_file_method",
# Misc
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 5908e4b0c..c76126ea2 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -123,7 +123,7 @@ class ConditionalStepMemory:
def __init__(self, *, exec_params, get_input_fnames, get_output_fnames):
memory_location = exec_params.memory_location
if memory_location is True:
- use_location = exec_params.deriv_root / "joblib"
+ use_location = exec_params.deriv_root / exec_params.memory_subdir
elif not memory_location:
use_location = None
else:
From 34350ce6d795a3b20489ab42f6dc27e29be823f6 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 15 Aug 2023 10:47:39 +0200
Subject: [PATCH 017/132] [pre-commit.ci] pre-commit autoupdate (#780)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 92cf44ae9..a088f558c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.282
+ rev: v0.0.284
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 7a5d01d6f2958e3b40d71b05379bf2e1b6f51564 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 15 Aug 2023 13:52:28 -0400
Subject: [PATCH 018/132] ENH: Validate inputs with pydantic (#779)
---
docs/source/v1.5.md.inc | 2 +
mne_bids_pipeline/_config.py | 34 ++---
mne_bids_pipeline/_config_import.py | 132 ++++++++++--------
mne_bids_pipeline/_decoding.py | 27 +++-
mne_bids_pipeline/_import_data.py | 4 +-
mne_bids_pipeline/_report.py | 12 +-
.../steps/sensor/_05_decoding_csp.py | 16 ++-
.../steps/sensor/_99_group_average.py | 15 +-
mne_bids_pipeline/typing.py | 38 ++++-
pyproject.toml | 1 +
10 files changed, 194 insertions(+), 87 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 7434586ad..1a9d6b3e2 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -9,6 +9,7 @@
- Output logging spacing improved (#764 by @larsoner)
- Added caching of sensor and source average steps (#765 by @larsoner)
- Improved logging of coregistration distances (#769 by @larsoner)
+- Input validation has been improved by leveraging [pydantic](https://docs.pydantic.dev) (#779 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -28,3 +29,4 @@
- Fixed bug where cache would not invalidate properly based on output file changes and steps could be incorrectly skipped. All steps will automatically rerun to accommodate the new, safer caching scheme (#756 by @larsoner)
- Fixed bug with parallelization across runs for Maxwell filtering (#761 by @larsoner)
- Fixed bug where head position files were not written with a proper suffix and extension (#761 by @larsoner)
+- Fixed bug where default values for `decoding_csp_times` and `decoding_csp_freqs` were not set dynamically based on the config parameters (#779 by @larsoner)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 0ca130754..d87c97c34 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2,13 +2,15 @@
from typing import Optional, Union, Iterable, List, Tuple, Dict, Callable, Literal
-from numpy.typing import ArrayLike
-
-import mne
+from mne import Covariance
from mne_bids import BIDSPath
-import numpy as np
-from mne_bids_pipeline.typing import PathLike, ArbitraryContrast
+from mne_bids_pipeline.typing import (
+ PathLike,
+ ArbitraryContrast,
+ FloatArrayLike,
+ DigMontageType,
+)
###############################################################################
@@ -342,7 +344,7 @@
```
"""
-eeg_template_montage: Optional[str] = None
+eeg_template_montage: Optional[Union[str, DigMontageType]] = None
"""
In situations where you wish to process EEG data and no individual
digitization points (measured channel locations) are available, you can apply
@@ -594,7 +596,7 @@
```
"""
-mf_head_origin: Union[Literal["auto"], ArrayLike] = "auto"
+mf_head_origin: Union[Literal["auto"], FloatArrayLike] = "auto"
"""
`mf_head_origin` : array-like, shape (3,) | 'auto'
Origin of internal and external multipolar moment space in meters.
@@ -609,7 +611,7 @@
```
"""
-mf_destination: Union[Literal["reference_run"], ArrayLike] = "reference_run"
+mf_destination: Union[Literal["reference_run"], FloatArrayLike] = "reference_run"
"""
Despite all possible care to avoid movements in the MEG, the participant
will likely slowly drift down from the Dewar or slightly shift the head
@@ -1561,7 +1563,7 @@
```
"""
-time_frequency_cycles: Optional[Union[float, ArrayLike]] = None
+time_frequency_cycles: Optional[Union[float, FloatArrayLike]] = None
"""
The number of cycles to use in the Morlet wavelet. This can be a single number
or one per frequency, where frequencies are calculated via
@@ -1592,9 +1594,7 @@
time and frequency.
"""
-decoding_csp_times: Optional[ArrayLike] = np.linspace(
- max(0, epochs_tmin), epochs_tmax, num=6
-)
+decoding_csp_times: Optional[FloatArrayLike] = None
"""
The edges of the time bins to use for CSP decoding.
Must contain at least two elements. By default, 5 equally-spaced bins are
@@ -1614,13 +1614,7 @@
```
"""
-decoding_csp_freqs: Dict[str, ArrayLike] = {
- "custom": [
- time_frequency_freq_min,
- (time_frequency_freq_max + time_frequency_freq_min) / 2, # noqa: E501
- time_frequency_freq_max,
- ]
-}
+decoding_csp_freqs: Optional[Dict[str, FloatArrayLike]] = None
"""
The edges of the frequency bins to use for CSP decoding.
@@ -1897,7 +1891,7 @@ def mri_landmarks_kind(bids_path):
noise_cov: Union[
Tuple[Optional[float], Optional[float]],
Literal["emptyroom", "rest", "ad-hoc"],
- Callable[[BIDSPath], mne.Covariance],
+ Callable[[BIDSPath], Covariance],
] = (None, 0)
"""
Specify how to estimate the noise covariance matrix, which is used in
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 43f0c3725..af135dcef 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -1,6 +1,8 @@
import ast
import copy
+from dataclasses import field
import difflib
+from functools import partial
import importlib
import os
import pathlib
@@ -10,7 +12,9 @@
import matplotlib
import numpy as np
import mne
-from mne.utils import _check_option, _validate_type
+
+from pydantic import ValidationError
+from pydantic.dataclasses import dataclass
from ._logging import logger, gen_log_kwargs
from .typing import PathLike
@@ -49,7 +53,7 @@ def _import_config(
# Check it
if check:
- _check_config(config)
+ _check_config(config, config_path)
_check_misspellings_removals(
config,
valid_names=valid_names,
@@ -216,10 +220,11 @@ def _update_with_user_config(
return user_names
-def _check_config(config: SimpleNamespace) -> None:
- # TODO: Use pydantic to do these validations
- # https://github.com/mne-tools/mne-bids-pipeline/issues/646
- _check_option("config.parallel_backend", config.parallel_backend, ("dask", "loky"))
+def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> None:
+ _pydantic_validate(config=config, config_path=config_path)
+
+ # Eventually all of these could be pydantic-validated, but for now we'll
+ # just change the ones that are easy
config.bids_root.resolve(strict=True)
@@ -232,12 +237,6 @@ def _check_config(config: SimpleNamespace) -> None:
reject = config.reject
ica_reject = config.ica_reject
if config.spatial_filter == "ica":
- _check_option(
- "config.ica_algorithm",
- config.ica_algorithm,
- ("picard", "fastica", "extended_infomax"),
- )
-
if config.ica_l_freq < 1:
raise ValueError(
"You requested to high-pass filter the data before ICA with "
@@ -282,19 +281,6 @@ def _check_config(config: SimpleNamespace) -> None:
f"{_VALID_TYPES}"
)
- _check_option("config.on_error", config.on_error, ("continue", "abort", "debug"))
- _check_option(
- "config.memory_file_method", config.memory_file_method, ("mtime", "hash")
- )
-
- if isinstance(config.noise_cov, str):
- _check_option(
- "config.noise_cov",
- config.noise_cov,
- ("emptyroom", "ad-hoc", "rest"),
- extra="when a string",
- )
-
if config.noise_cov == "emptyroom" and "eeg" in config.ch_types:
raise ValueError(
"You requested to process data that contains EEG channels. In "
@@ -311,10 +297,6 @@ def _check_config(config: SimpleNamespace) -> None:
"Please set process_empty_room = True"
)
- _check_option(
- "config.bem_mri_images", config.bem_mri_images, ("FLASH", "T1", "auto")
- )
-
bl = config.baseline
if bl is not None:
if (bl[0] is not None and bl[0] < config.epochs_tmin) or (
@@ -355,32 +337,7 @@ def _check_config(config: SimpleNamespace) -> None:
"This is only allowed for resting-state analysis."
)
- _check_option(
- "config.on_rename_missing_events",
- config.on_rename_missing_events,
- ("raise", "warn", "ignore"),
- )
-
- _validate_type(config.n_jobs, int, "n_jobs")
-
- _check_option(
- "config.config_validation",
- config.config_validation,
- ("raise", "warn", "ignore"),
- )
-
- _validate_type(
- config.mf_destination,
- (str, list, tuple, np.ndarray),
- "config.mf_destination",
- )
- if isinstance(config.mf_destination, str):
- _check_option(
- "config.mf_destination",
- config.mf_destination,
- ("reference_run",),
- )
- else:
+ if not isinstance(config.mf_destination, str):
destination = np.array(config.mf_destination, float)
if destination.shape != (4, 4):
raise ValueError(
@@ -389,6 +346,71 @@ def _check_config(config: SimpleNamespace) -> None:
)
+def _default_factory(key, val):
+ # convert a default to a default factory if needed, having an explicit
+ # allowlist of non-empty ones
+ allowlist = [
+ {"n_mag": 1, "n_grad": 1, "n_eeg": 1}, # n_proj_*
+ {"custom": (8, 24.0, 40)}, # decoding_csp_freqs
+ {"suffix": "ave"}, # source_info_path_update
+ ["evoked"], # inverse_targets
+ ]
+ for typ in (dict, list):
+ if isinstance(val, typ):
+ try:
+ idx = allowlist.index(val)
+ except ValueError:
+ assert val == typ(), (key, val)
+ default_factory = typ
+ else:
+ if typ is dict:
+ default_factory = partial(typ, **allowlist[idx])
+ else:
+ assert typ is list
+ default_factory = partial(typ, allowlist[idx])
+ return field(default_factory=default_factory)
+ return val
+
+
+def _pydantic_validate(
+ config: SimpleNamespace,
+ config_path: Optional[PathLike],
+):
+ """Create dataclass from config type hints and validate with pydantic."""
+ # https://docs.pydantic.dev/latest/usage/dataclasses/
+ from . import _config as root_config
+
+ annotations = copy.deepcopy(root_config.__annotations__) # just be safe
+ attrs = {
+ key: _default_factory(key, val)
+ for key, val in root_config.__dict__.items()
+ if key in annotations
+ }
+ # everything should be type annotated, make sure they are
+ asym = set(attrs).symmetric_difference(set(annotations))
+ assert asym == set(), asym
+ name = "user configuration"
+ if config_path is not None:
+ name += f" from {config_path}"
+ UserConfig = type(
+ name,
+ (object,),
+ {"__annotations__": annotations, **attrs},
+ )
+ dataclass_config = dict(
+ arbitrary_types_allowed=False,
+ validate_assignment=True,
+ strict=True, # do not allow float for int for example
+ )
+ UserConfig = dataclass(config=dataclass_config)(UserConfig)
+ # Now use pydantic to automagically validate
+ user_vals = {key: val for key, val in config.__dict__.items() if key in annotations}
+ try:
+ UserConfig(**user_vals)
+ except ValidationError as err:
+ raise ValueError(str(err)) from None
+
+
_REMOVED_NAMES = {
"debug": dict(
new_name="on_error",
diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py
index 5b8ebab43..2b6be3cfc 100644
--- a/mne_bids_pipeline/_decoding.py
+++ b/mne_bids_pipeline/_decoding.py
@@ -13,8 +13,21 @@ def fit(self, *args, **kwargs):
return super().fit(*args, **kwargs)
-def _handle_csp_args(decoding_csp_times, decoding_csp_freqs, decoding_metric):
- _validate_type(decoding_csp_times, (list, tuple, np.ndarray), "decoding_csp_times")
+def _handle_csp_args(
+ decoding_csp_times,
+ decoding_csp_freqs,
+ decoding_metric,
+ *,
+ epochs_tmin,
+ epochs_tmax,
+ time_frequency_freq_min,
+ time_frequency_freq_max,
+):
+ _validate_type(
+ decoding_csp_times, (None, list, tuple, np.ndarray), "decoding_csp_times"
+ )
+ if decoding_csp_times is None:
+ decoding_csp_times = np.linspace(max(0, epochs_tmin), epochs_tmax, num=6)
if len(decoding_csp_times) < 2:
raise ValueError("decoding_csp_times should contain at least 2 values.")
if not np.array_equal(decoding_csp_times, np.sort(decoding_csp_times)):
@@ -25,7 +38,15 @@ def _handle_csp_args(decoding_csp_times, decoding_csp_freqs, decoding_metric):
f"decoding metric, but received "
f'decoding_metric="{decoding_metric}"'
)
- _validate_type(decoding_csp_freqs, dict, "config.decoding_csp_freqs")
+ _validate_type(decoding_csp_freqs, (None, dict), "config.decoding_csp_freqs")
+ if decoding_csp_freqs is None:
+ decoding_csp_freqs = {
+ "custom": (
+ time_frequency_freq_min,
+ (time_frequency_freq_max + time_frequency_freq_min) / 2, # noqa: E501
+ time_frequency_freq_max,
+ ),
+ }
freq_name_to_bins_map = dict()
for freq_range_name, edges in decoding_csp_freqs.items():
_validate_type(freq_range_name, str, "config.decoding_csp_freqs key")
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index 075dc8e8c..9782f1fea 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -225,7 +225,7 @@ def _load_data(cfg: SimpleNamespace, bids_path: BIDSPath) -> mne.io.BaseRaw:
subject = bids_path.subject
raw = read_raw_bids(
bids_path=bids_path,
- extra_params=cfg.reader_extra_params,
+ extra_params=cfg.reader_extra_params or {},
verbose=cfg.read_raw_bids_verbose,
)
@@ -458,7 +458,7 @@ def import_er_data(
# Load reference run plus its auto-bads
raw_ref = read_raw_bids(
bids_path_ref_in,
- extra_params=cfg.reader_extra_params,
+ extra_params=cfg.reader_extra_params or {},
verbose=cfg.read_raw_bids_verbose,
)
if bids_path_ref_bads_in is not None:
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 3d9736c95..f0ddd8d3e 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -613,6 +613,10 @@ def add_csp_grand_average(
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
cfg.decoding_metric,
+ epochs_tmin=cfg.epochs_tmin,
+ epochs_tmax=cfg.epochs_tmax,
+ time_frequency_freq_min=cfg.time_frequency_freq_min,
+ time_frequency_freq_max=cfg.time_frequency_freq_max,
)
freq_bin_starts = list()
@@ -831,11 +835,15 @@ def _agg_backend():
import matplotlib
backend = matplotlib.get_backend()
- matplotlib.use("Agg", force=True)
+ matplotlib.use("agg", force=True)
try:
yield
finally:
- matplotlib.use(backend, force=True)
+ if backend.lower() != "agg":
+ import matplotlib.pyplot as plt
+
+ plt.close("all")
+ matplotlib.use(backend, force=True)
def _add_raw(
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 7d375fe28..a18385d93 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -199,7 +199,13 @@ def one_subject_decoding(
# Loop over frequencies (all time points lumped together)
freq_name_to_bins_map = _handle_csp_args(
- cfg.decoding_csp_times, cfg.decoding_csp_freqs, cfg.decoding_metric
+ cfg.decoding_csp_times,
+ cfg.decoding_csp_freqs,
+ cfg.decoding_metric,
+ epochs_tmin=cfg.epochs_tmin,
+ epochs_tmax=cfg.epochs_tmax,
+ time_frequency_freq_min=cfg.time_frequency_freq_min,
+ time_frequency_freq_max=cfg.time_frequency_freq_max,
)
freq_decoding_table_rows = []
for freq_range_name, freq_bins in freq_name_to_bins_map.items():
@@ -365,6 +371,10 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
cfg.decoding_metric,
+ epochs_tmin=cfg.epochs_tmin,
+ epochs_tmax=cfg.epochs_tmax,
+ time_frequency_freq_min=cfg.time_frequency_freq_min,
+ time_frequency_freq_max=cfg.time_frequency_freq_max,
)
all_csp_tf_results = dict()
for contrast in cfg.decoding_contrasts:
@@ -517,6 +527,10 @@ def get_config(
ch_types=config.ch_types,
eeg_reference=get_eeg_reference(config),
# Processing parameters
+ epochs_tmin=config.epochs_tmin,
+ epochs_tmax=config.epochs_tmax,
+ time_frequency_freq_min=config.time_frequency_freq_min,
+ time_frequency_freq_max=config.time_frequency_freq_max,
time_frequency_subtract_evoked=config.time_frequency_subtract_evoked,
decoding_metric=config.decoding_metric,
decoding_csp_freqs=config.decoding_csp_freqs,
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 008e8b7dd..a05a85a96 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -6,8 +6,9 @@
import os
import os.path as op
from functools import partial
-from typing import Optional, TypedDict, List, Tuple
+from typing import Optional, List, Tuple
from types import SimpleNamespace
+from ...typing import TypedDict
import numpy as np
import pandas as pd
@@ -773,7 +774,13 @@ def average_csp_decoding(
time_bins = np.array(list(zip(time_bins[:-1], time_bins[1:])))
time_bins = pd.DataFrame(time_bins, columns=["t_min", "t_max"])
freq_name_to_bins_map = _handle_csp_args(
- cfg.decoding_csp_times, cfg.decoding_csp_freqs, cfg.decoding_metric
+ cfg.decoding_csp_times,
+ cfg.decoding_csp_freqs,
+ cfg.decoding_metric,
+ epochs_tmin=cfg.epochs_tmin,
+ epochs_tmax=cfg.epochs_tmax,
+ time_frequency_freq_min=cfg.time_frequency_freq_min,
+ time_frequency_freq_max=cfg.time_frequency_freq_max,
)
data_for_clustering = {}
for freq_range_name in freq_name_to_bins_map:
@@ -937,6 +944,10 @@ def get_config(
task_is_rest=config.task_is_rest,
conditions=config.conditions,
contrasts=config.contrasts,
+ epochs_tmin=config.epochs_tmin,
+ epochs_tmax=config.epochs_tmax,
+ time_frequency_freq_min=config.time_frequency_freq_min,
+ time_frequency_freq_max=config.time_frequency_freq_max,
decode=config.decode,
decoding_metric=config.decoding_metric,
decoding_n_splits=config.decoding_n_splits,
diff --git a/mne_bids_pipeline/typing.py b/mne_bids_pipeline/typing.py
index 555012713..7b989309c 100644
--- a/mne_bids_pipeline/typing.py
+++ b/mne_bids_pipeline/typing.py
@@ -1,8 +1,18 @@
"""Typing."""
import pathlib
-from typing import Union, List, Dict, TypedDict
-
+import sys
+from typing import Union, List, Dict
+from typing_extensions import Annotated
+
+if sys.version_info < (3, 12):
+ from typing_extensions import TypedDict
+else:
+ from typing import TypedDict
+
+import numpy as np
+from numpy.typing import ArrayLike
+from pydantic import PlainValidator
import mne
PathLike = Union[str, pathlib.Path]
@@ -22,3 +32,27 @@ class LogKwargsT(TypedDict):
class ReferenceRunParams(TypedDict):
montage: mne.channels.DigMontage
dev_head_t: mne.Transform
+
+
+def assert_float_array_like(val):
+ # https://docs.pydantic.dev/latest/errors/errors/#custom-errors
+ # Should raise ValueError or AssertionError... NumPy should do this for us
+ return np.array(val, dtype="float")
+
+
+FloatArrayLike = Annotated[
+ ArrayLike,
+ # PlainValidator will skip internal validation attempts for ArrayLike
+ PlainValidator(assert_float_array_like),
+]
+
+
+def assert_dig_montage(val):
+ assert isinstance(val, mne.channels.DigMontage)
+ return val
+
+
+DigMontageType = Annotated[
+ mne.channels.DigMontage,
+ PlainValidator(assert_dig_montage),
+]
diff --git a/pyproject.toml b/pyproject.toml
index f080347f2..5d217e36d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,6 +34,7 @@ dependencies = [
"pandas",
"seaborn",
"json_tricks",
+ "pydantic >= 2.0.0",
"rich",
"python-picard",
"qtpy",
From 866c2bbdd14f6f3a58548c74e247667b780ead55 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 18 Aug 2023 10:10:40 -0400
Subject: [PATCH 019/132] TST: Rerun CIs (#781)
---
.../steps/preprocessing/_07a_apply_ica.py | 1 +
.../steps/preprocessing/_07b_apply_ssp.py | 3 ++-
.../steps/preprocessing/_08_ptp_reject.py | 10 +++++++++-
.../steps/sensor/_01_make_evoked.py | 18 ++++++++++++++++--
.../steps/sensor/_02_decoding_full_epochs.py | 5 +++--
.../steps/sensor/_03_decoding_time_by_time.py | 5 +++--
.../steps/sensor/_04_time_frequency.py | 5 +++--
.../steps/sensor/_05_decoding_csp.py | 5 +++--
mne_bids_pipeline/steps/sensor/_06_make_cov.py | 14 ++++++++++----
.../tests/configs/config_ds000248_base.py | 2 ++
10 files changed, 52 insertions(+), 16 deletions(-)
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
index f36ce8e11..bf7a1cac9 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
@@ -56,6 +56,7 @@ def get_input_fnames_apply_ica(
processing="ica", suffix="components", extension=".tsv"
)
in_files["epochs"] = bids_basename.copy().update(suffix="epo", extension=".fif")
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
index d34800bb7..65fc27b70 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
@@ -41,6 +41,7 @@ def get_input_fnames_apply_ssp(
)
in_files = dict()
in_files["epochs"] = bids_basename.copy().update(suffix="epo", check=False)
+ _update_for_splits(in_files, "epochs", single=True)
in_files["proj"] = bids_basename.copy().update(suffix="proj", check=False)
return in_files
@@ -60,7 +61,7 @@ def apply_ssp(
# compute SSP on first run of raw
out_files = dict()
out_files["epochs"] = (
- in_files["epochs"].copy().update(processing="ssp", check=False)
+ in_files["epochs"].copy().update(processing="ssp", split=None, check=False)
)
msg = f"Input epochs: {in_files['epochs'].basename}"
logger.info(**gen_log_kwargs(message=msg))
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index 141910ad3..7f7caefbb 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -48,6 +48,7 @@ def get_input_fnames_drop_ptp(
)
in_files = dict()
in_files["epochs"] = bids_path.copy().update(processing=cfg.spatial_filter)
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -63,7 +64,14 @@ def drop_ptp(
in_files: dict,
) -> dict:
out_files = dict()
- out_files["epochs"] = in_files["epochs"].copy().update(processing="clean")
+ out_files["epochs"] = (
+ in_files["epochs"]
+ .copy()
+ .update(
+ processing="clean",
+ split=None,
+ )
+ )
msg = f'Input: {in_files["epochs"].basename}'
logger.info(**gen_log_kwargs(message=msg))
msg = f'Output: {out_files["epochs"].basename}'
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index a0f7d1e3e..2ec0ea714 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -17,7 +17,13 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
-from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
+from ..._run import (
+ failsafe_run,
+ save_logs,
+ _sanitize_callable,
+ _prep_out_files,
+ _update_for_splits,
+)
def get_input_fnames_evoked(
@@ -43,6 +49,7 @@ def get_input_fnames_evoked(
)
in_files = dict()
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -59,7 +66,14 @@ def run_evoked(
) -> dict:
out_files = dict()
out_files["evoked"] = (
- in_files["epochs"].copy().update(suffix="ave", processing=None, check=False)
+ in_files["epochs"]
+ .copy()
+ .update(
+ suffix="ave",
+ processing=None,
+ check=False,
+ split=None,
+ )
)
msg = f'Input: {in_files["epochs"].basename}'
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 287ace7bc..9960c2670 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -35,7 +35,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._decoding import LogReg
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._report import (
_open_report,
_contrasts_to_names,
@@ -68,6 +68,7 @@ def get_input_fnames_epochs_decoding(
)
in_files = dict()
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -89,7 +90,7 @@ def run_epochs_decoding(
msg = f"Contrasting conditions: {condition1} – {condition2}"
logger.info(**gen_log_kwargs(message=msg))
out_files = dict()
- bids_path = in_files["epochs"].copy()
+ bids_path = in_files["epochs"].copy().update(split=None)
epochs = mne.read_epochs(in_files.pop("epochs"))
_restrict_analyze_channels(epochs, cfg)
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index f78e1d0cf..1dda99dad 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -38,7 +38,7 @@
)
from ..._decoding import LogReg
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._parallel import get_parallel_backend, get_parallel_backend_name
from ..._report import (
_open_report,
@@ -73,6 +73,7 @@ def get_input_fnames_time_decoding(
)
in_files = dict()
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -98,7 +99,7 @@ def run_time_decoding(
msg = f"Contrasting conditions ({kind}): {condition1} – {condition2}"
logger.info(**gen_log_kwargs(message=msg))
out_files = dict()
- bids_path = in_files["epochs"].copy()
+ bids_path = in_files["epochs"].copy().update(split=None)
epochs = mne.read_epochs(in_files.pop("epochs"))
_restrict_analyze_channels(epochs, cfg)
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index 0e7b9b720..92a403c11 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -22,7 +22,7 @@
_restrict_analyze_channels,
)
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag
@@ -53,6 +53,7 @@ def get_input_fnames_time_frequency(
)
in_files = dict()
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -73,7 +74,7 @@ def run_time_frequency(
msg = f"Reading {epochs_path.basename}"
logger.info(**gen_log_kwargs(message=msg))
epochs = mne.read_epochs(epochs_path)
- bids_path = epochs_path.copy().update(processing=None)
+ bids_path = epochs_path.copy().update(processing=None, split=None)
del epochs_path
_restrict_analyze_channels(epochs, cfg)
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index a18385d93..14087fba7 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -27,7 +27,7 @@
from ..._decoding import LogReg, _handle_csp_args
from ..._logging import logger, gen_log_kwargs
from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._report import (
_open_report,
_sanitize_cond_tag,
@@ -132,6 +132,7 @@ def get_input_fnames_csp(
)
in_files = dict()
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -157,7 +158,7 @@ def one_subject_decoding(
msg = f"Contrasting conditions: {condition1} – {condition2}"
logger.info(**gen_log_kwargs(msg))
- bids_path = in_files["epochs"].copy().update(processing=None)
+ bids_path = in_files["epochs"].copy().update(processing=None, split=None)
epochs = mne.read_epochs(in_files.pop("epochs"))
_restrict_analyze_channels(epochs, cfg)
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 27323d680..4c8d25eac 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -20,7 +20,13 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
-from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
+from ..._run import (
+ failsafe_run,
+ save_logs,
+ _sanitize_callable,
+ _prep_out_files,
+ _update_for_splits,
+)
def get_input_fnames_cov(
@@ -47,9 +53,8 @@ def get_input_fnames_cov(
root=cfg.deriv_root,
check=False,
)
- in_files["report_info"] = fname_epochs.copy().update(
- processing="clean", suffix="epo"
- )
+ in_files["report_info"] = fname_epochs.copy().update(processing="clean")
+ _update_for_splits(in_files, "report_info", single=True)
fname_evoked = fname_epochs.copy().update(
suffix="ave", processing=None, check=False
)
@@ -83,6 +88,7 @@ def get_input_fnames_cov(
else:
assert cov_type == "epochs", cov_type
in_files["epochs"] = fname_epochs
+ _update_for_splits(in_files, "epochs", single=True)
return in_files
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index 22f29b35f..b80b6f0f0 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -26,6 +26,8 @@
def noise_cov(bp):
# Use pre-stimulus period as noise source
bp = bp.copy().update(processing="clean", suffix="epo")
+ if not bp.fpath.exists():
+ bp.update(split="01")
epo = mne.read_epochs(bp)
cov = mne.compute_covariance(epo, rank="info", tmax=0)
return cov
From f512fe92cacab8609592f931da4c2c562dd2641b Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 18 Aug 2023 10:57:27 -0400
Subject: [PATCH 020/132] BUG: Fix bug where individual invs were not rendered
(#782)
---
mne_bids_pipeline/steps/source/_05_make_inverse.py | 4 ----
1 file changed, 4 deletions(-)
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index 6e96e13ef..449675817 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -127,10 +127,6 @@ def run_inverse(
msg = "Adding inverse information to report"
logger.info(**gen_log_kwargs(message=msg))
for condition in conditions:
- cond_str = sanitize_cond_name(condition)
- key = f"{cond_str}+{method}+hemi"
- if key not in out_files:
- continue
msg = f"Rendering inverse solution for {condition}"
logger.info(**gen_log_kwargs(message=msg))
tags = ("source-estimate", _sanitize_cond_tag(condition))
From dad6aa907a0f4973e0e198e8f522d4460e3f9fa5 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 22 Aug 2023 08:10:26 +0200
Subject: [PATCH 021/132] [pre-commit.ci] pre-commit autoupdate (#784)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a088f558c..bbe9c9ff4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.284
+ rev: v0.0.285
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 8ee8d8d09db58090f2d499f37cdc8bb11bbf6acb Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 29 Aug 2023 09:14:34 +0200
Subject: [PATCH 022/132] [pre-commit.ci] pre-commit autoupdate (#786)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bbe9c9ff4..6774334a2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.285
+ rev: v0.0.286
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From e58675c468ff71b8c8b99dfea1d387c58fdc632e Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 5 Sep 2023 08:10:26 +0200
Subject: [PATCH 023/132] [pre-commit.ci] pre-commit autoupdate (#787)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6774334a2..b490a7578 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.286
+ rev: v0.0.287
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 0b4bfcc3d03d63e750c5fab585799907a62fa9e4 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 12 Sep 2023 10:16:12 +0200
Subject: [PATCH 024/132] [pre-commit.ci] pre-commit autoupdate (#788)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b490a7578..f06cd2e20 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.7.0
+ rev: 23.9.1
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.287
+ rev: v0.0.288
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 38138d339e4b9b81b99d601c6a7531963735949c Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 19 Sep 2023 08:52:41 +0200
Subject: [PATCH 025/132] [pre-commit.ci] pre-commit autoupdate (#789)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f06cd2e20..d0cb292a6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.288
+ rev: v0.0.290
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 5dac5cbae64b7350c4b1a8271c8d0c2c3ee8b381 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 26 Sep 2023 11:21:50 +0200
Subject: [PATCH 026/132] [pre-commit.ci] pre-commit autoupdate (#790)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d0cb292a6..99588ac55 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.290
+ rev: v0.0.291
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From f781b593e107f24945e250bdecbf911f9dc4e0be Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 26 Sep 2023 12:12:23 -0400
Subject: [PATCH 027/132] MAINT: Fix bug with item (#791)
---
mne_bids_pipeline/_report.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index f0ddd8d3e..35ed8ea8c 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -359,8 +359,8 @@ def plot_time_by_time_decoding_t_values(decoding_data):
# We squeeze() to make Matplotlib happy.
all_times = decoding_data["cluster_all_times"].squeeze()
all_t_values = decoding_data["cluster_all_t_values"].squeeze()
- t_threshold = decoding_data["cluster_t_threshold"]
- decim = decoding_data["decim"]
+ t_threshold = decoding_data["cluster_t_threshold"].item()
+ decim = decoding_data["decim"].item()
fig, ax = plt.subplots(constrained_layout=True)
ax.plot(all_times, all_t_values, ls="-", color="black", label="observed $t$-values")
From 754c85f21abaac847631bd8710671424a9ca8aad Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Wed, 4 Oct 2023 17:56:23 -0400
Subject: [PATCH 028/132] [pre-commit.ci] pre-commit autoupdate (#792)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Richard Höchenberger
---
.pre-commit-config.yaml | 4 ++--
docs/source/v1.0.md.inc | 2 +-
mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 99588ac55..ce0555f92 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,11 +13,11 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.291
+ rev: v0.0.292
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
- rev: v2.2.5
+ rev: v2.2.6
hooks:
- id: codespell
additional_dependencies:
diff --git a/docs/source/v1.0.md.inc b/docs/source/v1.0.md.inc
index 2bbf200fb..9c4d8dd18 100644
--- a/docs/source/v1.0.md.inc
+++ b/docs/source/v1.0.md.inc
@@ -109,7 +109,7 @@ Changes were only tracked starting April 15, 2021.
[`ssp_reject_ecg`][mne_bids_pipeline._config.ssp_reject_ecg].
(#392 by @agramfort, @dengemann,
@apmellot and @hoechenberger)
-- You can now use autoreject for exclusing artifacts before SSP estimation via
+- You can now use autoreject for excluding artifacts before SSP estimation via
the `autoreject_global` option in [`ssp_reject_eog`][mne_bids_pipeline._config.ssp_reject_eog]
and [`ssp_reject_ecg`][mne_bids_pipeline._config.ssp_reject_ecg].
(#396 by @agramfort, @dengemann,
diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
index 921891a3c..a964e6d59 100644
--- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
+++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
@@ -75,7 +75,7 @@ def main(*, config):
"""Initialize the output directories."""
init_dataset(cfg=get_config(config=config))
# Don't bother with parallelization here as I/O operations are generally
- # not well paralellized (and this should be very fast anyway)
+ # not well parallelized (and this should be very fast anyway)
for subject in get_subjects(config):
for session in get_sessions(config):
init_subject_dirs(
From f99ada6a672c6a9afbff83de92c1cf0448ba5d5c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 19 Oct 2023 20:41:10 +0200
Subject: [PATCH 029/132] Ensure link in "You're not viewing the latest stable
version" banner is readable in light and dark theme (#793)
Co-authored-by: Eric Larson
---
.circleci/config.yml | 2 +-
docs/mkdocs.yml | 4 ++--
docs/source/css/extra.css | 6 ++++++
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 03c9de23e..92f446287 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -48,7 +48,7 @@ jobs:
name: Get Python running
command: |
pip install --upgrade --progress-bar off pip setuptools
- pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
+ pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
pip install PyQt6
- run:
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 14d0f4a61..990fe80da 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -155,8 +155,8 @@ markdown_extensions:
- attr_list # Allows to turn any element into a button
- pymdownx.details
- pymdownx.emoji:
- emoji_index: !!python/name:materialx.emoji.twemoji
- emoji_generator: !!python/name:materialx.emoji.to_svg
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
+ emoji_generator: !!python/name:material.extensions.emoji.to_svg
- pymdownx.superfences:
custom_fences:
- name: mermaid
diff --git a/docs/source/css/extra.css b/docs/source/css/extra.css
index 7ca920b15..d66d71ec9 100644
--- a/docs/source/css/extra.css
+++ b/docs/source/css/extra.css
@@ -101,3 +101,9 @@ td p {
.md-button {
margin-top: 1rem !important;
}
+
+/* Ensure the link in the "You're not viewing the latest stable version" banner is
+ readable in both dark and light theme. */
+:root {
+ --md-typeset-a-color: var(--md-default-fg-color);
+}
From 9904fb3f31b4144c4efbcae2b657adb20298331c Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 23 Oct 2023 17:06:01 -0400
Subject: [PATCH 030/132] [pre-commit.ci] pre-commit autoupdate (#795)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ce0555f92..d1269aaed 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.9.1
+ rev: 23.10.0
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.0.292
+ rev: v0.1.1
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 327cb943db5e3b11ea46863d656e2938710914d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 30 Oct 2023 16:00:28 +0100
Subject: [PATCH 031/132] Reduce logging output when creating and saving
reports (#799)
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_report.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 1a9d6b3e2..1213193f8 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -10,6 +10,7 @@
- Added caching of sensor and source average steps (#765 by @larsoner)
- Improved logging of coregistration distances (#769 by @larsoner)
- Input validation has been improved by leveraging [pydantic](https://docs.pydantic.dev) (#779 by @larsoner)
+- Reduced logging when reports are created and saved (#799 by @hoechenberger)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 35ed8ea8c..bf42a27a2 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -454,7 +454,7 @@ def _gen_empty_report(
if cfg.task is not None:
title += f", task-{cfg.task}"
- report = mne.Report(title=title, raw_psd=True)
+ report = mne.Report(title=title, raw_psd=True, verbose=False)
return report
From 2ff1a352bcf2a4dff11935041e4da0c8c76f3763 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 30 Oct 2023 16:38:58 +0100
Subject: [PATCH 032/132] Use inst.pick() instead of inst.pick_types() (#800)
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_import_data.py | 2 +-
mne_bids_pipeline/steps/sensor/_05_decoding_csp.py | 7 ++-----
3 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 1213193f8..eebf44a0e 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -24,6 +24,7 @@
- Ensure `memory_file_method = "hash"` is tested (#768 by @larsoner)
- Enable [pre-commit.ci](https://pre-commit.ci) (#774 by @larsoner)
- Use `pooch` for web downloads (#775 by @larsoner)
+- Ensure compatibility with MNE-Python 1.6 (#800 by @hoechenberger)
### :bug: Bug fixes
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index 9782f1fea..ca52c59e1 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -449,7 +449,7 @@ def import_er_data(
cfg=cfg,
bids_path_bads=bids_path_er_bads_in,
)
- raw_er.pick_types(meg=True, exclude=[])
+ raw_er.pick("meg", exclude=[])
# Don't deal with ref for now (initial data quality / auto bad step)
if bids_path_ref_in is None:
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 14087fba7..09a124224 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -81,15 +81,12 @@ def prepare_epochs_and_y(
*, epochs: mne.BaseEpochs, contrast: Tuple[str, str], cfg, fmin: float, fmax: float
) -> Tuple[mne.BaseEpochs, np.ndarray]:
"""Band-pass between, sub-select the desired epochs, and prepare y."""
- epochs_filt = epochs.copy().pick_types(
- meg=True,
- eeg=True,
- )
+ epochs_filt = epochs.copy().pick(["meg", "eeg"])
# We only take mag to speed up computation
# because the information is redundant between grad and mag
if cfg.datatype == "meg" and cfg.use_maxwell_filter:
- epochs_filt.pick_types(meg="mag")
+ epochs_filt.pick("mag")
# filtering out the conditions we are not interested in, to ensure here we
# have a valid partition between the condition of the contrast.
From 8843a386265bdb367ec3922644a2eb91a133705e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 30 Oct 2023 20:41:29 +0100
Subject: [PATCH 033/132] Use cleaned epochs for sensor-space decoding (#796)
---
docs/source/v1.5.md.inc | 4 ++++
mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py | 6 +++++-
mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py | 1 +
mne_bids_pipeline/steps/sensor/_04_time_frequency.py | 5 +----
mne_bids_pipeline/steps/sensor/_05_decoding_csp.py | 1 +
mne_bids_pipeline/steps/sensor/_06_make_cov.py | 3 +--
mne_bids_pipeline/tests/configs/config_ds001810.py | 1 +
mne_bids_pipeline/tests/conftest.py | 2 ++
8 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index eebf44a0e..c6e42c459 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -1,5 +1,8 @@
## v1.5.0 (unreleased)
+This release contains a number of very important bug fixes that address problems related to decoding, time-frequency analysis, and inverse modeling.
+All users are encouraged to update.
+
### :new: New features & enhancements
- Added `deriv_root` argument to CLI (#773 by @vferat)
@@ -32,3 +35,4 @@
- Fixed bug with parallelization across runs for Maxwell filtering (#761 by @larsoner)
- Fixed bug where head position files were not written with a proper suffix and extension (#761 by @larsoner)
- Fixed bug where default values for `decoding_csp_times` and `decoding_csp_freqs` were not set dynamically based on the config parameters (#779 by @larsoner)
+- A number of processing steps erroneously **always** operated on un-cleaned epochs (`sensor/decoding_full_epochs`, `sensor/decoding_time_by_time`, `sensor/decoding_csp`); or operated on un-cleaned epochs (without PTP rejection) if no ICA or SSP was requested (`sensor/ime_frequency`, `sensor/make_cov`) The bug in `sensor/make_cov` could propagate to the source level, as the covariance matrix is used for inverse modeling. (#796 by @hoechenberger)
\ No newline at end of file
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 9960c2670..d1d8157a1 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -60,6 +60,7 @@ def get_input_fnames_epochs_decoding(
run=None,
recording=cfg.rec,
space=cfg.space,
+ processing="clean",
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
@@ -94,7 +95,6 @@ def run_epochs_decoding(
epochs = mne.read_epochs(in_files.pop("epochs"))
_restrict_analyze_channels(epochs, cfg)
- epochs.crop(cfg.decoding_epochs_tmin, cfg.decoding_epochs_tmax)
# We define the epochs and the labels
if isinstance(cfg.conditions, dict):
@@ -111,6 +111,10 @@ def run_epochs_decoding(
[epochs[epochs_conds[0]], epochs[epochs_conds[1]]], verbose="error"
)
+ # Crop to the desired analysis interval. Do it only after the concatenation to work
+ # around https://github.com/mne-tools/mne-python/issues/12153
+ epochs.crop(cfg.decoding_epochs_tmin, cfg.decoding_epochs_tmax)
+
n_cond1 = len(epochs[epochs_conds[0]])
n_cond2 = len(epochs[epochs_conds[1]])
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index 1dda99dad..02ec357dd 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -65,6 +65,7 @@ def get_input_fnames_time_decoding(
run=None,
recording=cfg.rec,
space=cfg.space,
+ processing="clean",
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index 92a403c11..e1e7b440c 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -33,9 +33,6 @@ def get_input_fnames_time_frequency(
subject: str,
session: Optional[str],
) -> dict:
- processing = None
- if cfg.spatial_filter is not None:
- processing = "clean"
fname_epochs = BIDSPath(
subject=subject,
session=session,
@@ -46,7 +43,7 @@ def get_input_fnames_time_frequency(
space=cfg.space,
datatype=cfg.datatype,
root=cfg.deriv_root,
- processing=processing,
+ processing="clean",
suffix="epo",
extension=".fif",
check=False,
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 09a124224..75b24a854 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -121,6 +121,7 @@ def get_input_fnames_csp(
run=None,
recording=cfg.rec,
space=cfg.space,
+ processing="clean",
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 4c8d25eac..2cb3b8ebf 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -37,7 +37,6 @@ def get_input_fnames_cov(
) -> dict:
cov_type = _get_cov_type(cfg)
in_files = dict()
- processing = "clean" if cfg.spatial_filter is not None else None
fname_epochs = BIDSPath(
subject=subject,
session=session,
@@ -48,7 +47,7 @@ def get_input_fnames_cov(
space=cfg.space,
extension=".fif",
suffix="epo",
- processing=processing,
+ processing="clean",
datatype=cfg.datatype,
root=cfg.deriv_root,
check=False,
diff --git a/mne_bids_pipeline/tests/configs/config_ds001810.py b/mne_bids_pipeline/tests/configs/config_ds001810.py
index 064e8ddb7..508a99e64 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001810.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001810.py
@@ -15,6 +15,7 @@
conditions = ["61450", "61511"]
contrasts = [("61450", "61511")]
decode = True
+decoding_n_splits = 3 # only for testing, use 5 otherwise
l_freq = 0.3
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index b75d009ec..295b2309a 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -43,6 +43,8 @@ def pytest_configure(config):
ignore:is_categorical_dtype is deprecated.*:FutureWarning
ignore:use_inf_as_na option is deprecated.*:FutureWarning
ignore:All-NaN axis encountered.*:RuntimeWarning
+ # sklearn class not enough samples for cv=5
+ always:The least populated class in y has only.*:UserWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
From 4f5e4654f508982092432effbd85eaee2a72a12d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 30 Oct 2023 21:30:58 +0100
Subject: [PATCH 034/132] [pre-commit.ci] pre-commit autoupdate (#803)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d1269aaed..880e7a007 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.10.0
+ rev: 23.10.1
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.1
+ rev: v0.1.3
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 1bc1e8226d70b10b6d3a76d69d355cd906775b39 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 31 Oct 2023 17:23:16 +0100
Subject: [PATCH 035/132] Add "picard-extended_infomax" ICA algorithm (#801)
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_config.py | 8 ++++++--
mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py | 3 +++
mne_bids_pipeline/tests/configs/config_ds003392.py | 1 +
4 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index c6e42c459..bdc8b8089 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -14,6 +14,7 @@ All users are encouraged to update.
- Improved logging of coregistration distances (#769 by @larsoner)
- Input validation has been improved by leveraging [pydantic](https://docs.pydantic.dev) (#779 by @larsoner)
- Reduced logging when reports are created and saved (#799 by @hoechenberger)
+- Added [`"picard-extended_infomax"`][mne_bids_pipeline._config.ica_algorithm] ICA algorithm to perform "extended Infomax"-like ICA decomposition using Picard (#801 by @hoechenberger)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index d87c97c34..db48a98b6 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1258,9 +1258,13 @@
```
"""
-ica_algorithm: Literal["picard", "fastica", "extended_infomax"] = "picard"
+ica_algorithm: Literal[
+ "picard", "fastica", "extended_infomax", "picard-extended_infomax"
+] = "picard"
"""
-The ICA algorithm to use.
+The ICA algorithm to use. `"picard-extended_infomax"` operates `picard` such that the
+generated ICA decomposition is identical to the one generated by the extended Infomax
+algorithm (but may converge in less time).
"""
ica_l_freq: Optional[float] = 1.0
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index 138f23954..029f0682f 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -70,6 +70,9 @@ def fit_ica(
if algorithm == "picard":
fit_params = dict(fastica_it=5)
+ elif algorithm == "picard-extended_infomax":
+ algorithm = "picard"
+ fit_params = dict(ortho=False, extended=True)
elif algorithm == "extended_infomax":
algorithm = "infomax"
fit_params = dict(extended=True)
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index 02134768a..edc30228f 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -20,6 +20,7 @@
# Artifact correction.
spatial_filter = "ica"
+ica_algorithm = "picard-extended_infomax"
ica_max_iterations = 500
ica_l_freq = 1.0
ica_n_components = 0.99
From 03cc03a3bc15f3e5bf94054db9229fffe67cf95e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 2 Nov 2023 16:08:31 +0100
Subject: [PATCH 036/132] Do not reject epochs based on PTP threshold before
ICA cleaning (#806)
---
docs/source/v1.5.md.inc | 5 +++++
mne_bids_pipeline/_config.py | 15 +++++++++++----
.../steps/preprocessing/_07a_apply_ica.py | 11 +----------
3 files changed, 17 insertions(+), 14 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index bdc8b8089..eb3e1656d 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -19,6 +19,11 @@ All users are encouraged to update.
[//]: # (### :warning: Behavior changes)
- The default cache directory is now `_cache` within the derivatives folder when using `memory_location=True`, set [`memory_subdir="joblib"`][mne_bids_pipeline._config.memory_subdir] to get the behavior from v1.4 (#778 by @larsoner)
+- Before cleaning epochs via ICA, we used to reject any epochs execeeding the [`ica_reject`][mne_bids_pipeline._config.ica_reject]
+ criteria. However, this may lead to the unnecessary exclusion of epochs that could have been salvaged through ICA cleaning. Now,
+ we only apply `ica_reject` to the epochs used for ICA fitting. After the experimental epochs have been cleaned with ICA
+ (`preprocessing/apply_ica` step), any remaining large-amplitude artifacts can be removed via
+ [`reject`][mne_bids_pipeline._config.reject], which is used in the last preprocessing step, `preprocessing/ptp_reject`. (#806 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index db48a98b6..bf05b8d4b 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1235,13 +1235,20 @@
# ~~~~~~~~~~~~~~~~~~~~~~
ica_reject: Optional[Dict[str, float]] = None
"""
-Peak-to-peak amplitude limits to exclude epochs from ICA fitting.
-
-This allows you to remove strong transient artifacts, which could negatively
-affect ICA performance.
+Peak-to-peak amplitude limits to exclude epochs from ICA fitting. Epochs exceeding these
+limits will be excluded from ICA fitting. This allows you to remove strong transient
+artifacts, which could negatively affect ICA performance.
This will also be applied to ECG and EOG epochs created during preprocessing.
+???+ info
+ This setting is applied only to the epochs that are used for **fitting** ICA. The
+ goal is to make it easier for ICA to produce a good decomposition. After fitting,
+ ICA is applied to the epochs to be analyzed, usually with one or more components
+ removed (as to remove artifacts). But even after ICA cleaning, some epochs may still
+ contain large-amplitude artifacts. Those epochs can then be rejected by using
+ the [`reject`][mne_bids_pipeline._config.reject] parameter.
+
The BIDS Pipeline will automatically try to detect EOG and ECG artifacts in
your data, and remove them. For this to work properly, it is recommended
to **not** specify rejection thresholds for EOG and ECG channels here –
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
index bf7a1cac9..4b906a106 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
@@ -28,7 +28,6 @@
)
from ..._logging import gen_log_kwargs, logger
from ..._parallel import parallel_func, get_parallel_backend
-from ..._reject import _get_reject
from ..._report import _open_report, _agg_backend
from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
@@ -93,21 +92,13 @@ def apply_ica(
tsv_data = pd.read_csv(in_files.pop("components"), sep="\t")
ica.exclude = tsv_data.loc[tsv_data["status"] == "bad", "component"].to_list()
- # Load epochs to reject ICA components.
+ # Load epochs.
msg = f'Input: {in_files["epochs"].basename}'
logger.info(**gen_log_kwargs(message=msg))
msg = f'Output: {out_files["epochs"].basename}'
logger.info(**gen_log_kwargs(message=msg))
epochs = mne.read_epochs(in_files.pop("epochs"), preload=True)
- ica_reject = _get_reject(
- subject=subject,
- session=session,
- reject=cfg.ica_reject,
- ch_types=cfg.ch_types,
- param="ica_reject",
- )
- epochs.drop_bad(ica_reject)
# Now actually reject the components.
msg = f'Rejecting ICs: {", ".join([str(ic) for ic in ica.exclude])}'
From 418a2e7d138eed2b1964bae4fae4ca0a3c8943c9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Fri, 3 Nov 2023 19:15:31 +0100
Subject: [PATCH 037/132] Add `"autoreject_local"` as new option for
`preprocessing/ptp_reject` (#807)
---
.../settings/preprocessing/artifacts.md | 1 +
docs/source/v1.5.md.inc | 4 +
mne_bids_pipeline/_config.py | 72 ++++++-----
mne_bids_pipeline/_config_import.py | 3 +-
.../steps/preprocessing/_08_ptp_reject.py | 119 ++++++++++++------
.../tests/configs/config_ERP_CORE.py | 18 +--
6 files changed, 146 insertions(+), 71 deletions(-)
diff --git a/docs/source/settings/preprocessing/artifacts.md b/docs/source/settings/preprocessing/artifacts.md
index 9a81b4d15..88407cd2c 100644
--- a/docs/source/settings/preprocessing/artifacts.md
+++ b/docs/source/settings/preprocessing/artifacts.md
@@ -17,3 +17,4 @@ tags:
- reject
- reject_tmin
- reject_tmax
+ - autoreject_n_interpolate
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index eb3e1656d..60aa4326d 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -15,6 +15,10 @@ All users are encouraged to update.
- Input validation has been improved by leveraging [pydantic](https://docs.pydantic.dev) (#779 by @larsoner)
- Reduced logging when reports are created and saved (#799 by @hoechenberger)
- Added [`"picard-extended_infomax"`][mne_bids_pipeline._config.ica_algorithm] ICA algorithm to perform "extended Infomax"-like ICA decomposition using Picard (#801 by @hoechenberger)
+- Added support for using "local" [`autoreject`](https://autoreject.github.io) to find (and repair) bad channels on a
+ per-epochs basis; this can be enabled by setting [`reject`][mne_bids_pipeline._config.reject] to `"autoreject_local"`.
+ The behavior can further be controlled via the new setting
+ [`autoreject_n_interpolate`][mne_bids_pipeline._config.autoreject_n_interpolate]. (#807 by @hoechenberger)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index bf05b8d4b..f2ed6015a 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1355,54 +1355,51 @@
# Rejection based on peak-to-peak amplitude
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-reject: Optional[Union[Dict[str, float], Literal["autoreject_global"]]] = None
+reject: Optional[
+ Union[Dict[str, float], Literal["autoreject_global", "autoreject_local"]]
+] = None
"""
Peak-to-peak amplitude limits to mark epochs as bad. This allows you to remove
epochs with strong transient artifacts.
-If `None` (default), do not apply artifact rejection. If a dictionary,
-manually specify rejection thresholds (see examples). If
-`'autoreject_global'`, use [`autoreject`](https://autoreject.github.io) to find
-suitable "global" rejection thresholds for each channel type, i.e. `autoreject`
-will generate a dictionary with (hopefully!) optimal thresholds for each
-channel type.
+!!! info
+ The rejection is performed **after** SSP or ICA, if any of those methods
+ is used. To reject epochs **before** fitting ICA, see the
+ [`ica_reject`][mne_bids_pipeline._config.ica_reject] setting.
+If `None` (default), do not apply artifact rejection.
+
+If a dictionary, manually specify rejection thresholds (see examples).
The thresholds provided here must be at least as stringent as those in
[`ica_reject`][mne_bids_pipeline._config.ica_reject] if using ICA. In case of
`'autoreject_global'`, thresholds for any channel that do not meet this
requirement will be automatically replaced with those used in `ica_reject`.
-!!! info
- The rejection is performed **after** SSP or ICA, if any of those methods
- is used. To reject epochs **before** fitting ICA, see the
- [`ica_reject`][mne_bids_pipeline._config.ica_reject] setting.
-
-If `None` (default), do not apply automated rejection. If a dictionary,
-manually specify rejection thresholds (see examples). If `'auto'`, use
-[`autoreject`](https://autoreject.github.io) to find suitable "global"
-rejection thresholds for each channel type, i.e. `autoreject` will generate
-a dictionary with (hopefully!) optimal thresholds for each channel type. Note
-that using `autoreject` can be a time-consuming process.
+If `"autoreject_global"`, use [`autoreject`](https://autoreject.github.io) to find
+suitable "global" rejection thresholds for each channel type, i.e., `autoreject`
+will generate a dictionary with (hopefully!) optimal thresholds for each
+channel type.
-!!! info
- `autoreject` basically offers two modes of operation: "global" and
- "local". In "global" mode, it will try to estimate one rejection
- threshold **per channel type.** In "local" mode, it will generate
- thresholds **for each individual channel.** Currently, the BIDS Pipeline
- only supports the "global" mode.
+If `"autoreject_local"`, use "local" `autoreject` to detect (and potentially repair) bad
+channels in each epoch. Use [`autoreject_n_interpolate`][mne_bids_pipeline._config.autoreject_n_interpolate]
+to control how many channels are allowed to be bad before an epoch gets dropped.
???+ example "Example"
```python
- reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}
- reject = {'eeg': 100e-6, 'eog': 250e-6}
+ reject = {"grad": 4000e-13, 'mag': 4e-12, 'eog': 150e-6}
+ reject = {"eeg": 100e-6, "eog": 250e-6}
reject = None # no rejection based on PTP amplitude
+ reject = "autoreject_global" # find global (per channel type) PTP thresholds
+ reject = "autoreject_local" # find local (per channel) thresholds and repair epochs
```
"""
reject_tmin: Optional[float] = None
"""
Start of the time window used to reject epochs. If `None`, the window will
-start with the first time point.
+start with the first time point. Has no effect if
+[`reject`][mne_bids_pipeline._config.reject] has been set to `"autoreject_local"`.
+
???+ example "Example"
```python
reject_tmin = -0.1 # 100 ms before event onset.
@@ -1412,13 +1409,32 @@
reject_tmax: Optional[float] = None
"""
End of the time window used to reject epochs. If `None`, the window will end
-with the last time point.
+with the last time point. Has no effect if
+[`reject`][mne_bids_pipeline._config.reject] has been set to `"autoreject_local"`.
+
???+ example "Example"
```python
reject_tmax = 0.3 # 300 ms after event onset.
```
"""
+autoreject_n_interpolate: FloatArrayLike = [4, 8, 16]
+"""
+The maximum number of bad channels in an epoch that `autoreject` local will try to
+interpolate. The optimal number among this list will be estimated using a
+cross-validation procedure; this means that the more elements are provided here, the
+longer the `autoreject` run will take. If the number of bad channels in an epoch
+exceeds this value, the channels won't be interpolated and the epoch will be dropped.
+
+!!! info
+ This setting only takes effect if [`reject`][mne_bids_pipeline._config.reject] has
+ been set to `"autoreject_local"`.
+
+!!! info
+ Channels marked as globally bad in the BIDS dataset (in `*_channels.tsv)`) will not
+ be considered (i.e., will remain marked as bad and not analyzed by autoreject).
+"""
+
###############################################################################
# DECODING
# --------
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index af135dcef..14a55df2e 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -259,7 +259,7 @@ def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> N
if (
ica_reject is not None
and reject is not None
- and reject != "autoreject_global"
+ and reject not in ["autoreject_global", "autoreject_local"]
):
for ch_type in reject:
if ch_type in ica_reject and reject[ch_type] > ica_reject[ch_type]:
@@ -354,6 +354,7 @@ def _default_factory(key, val):
{"custom": (8, 24.0, 40)}, # decoding_csp_freqs
{"suffix": "ave"}, # source_info_path_update
["evoked"], # inverse_targets
+ [4, 8, 16], # autoreject_n_interpolate
]
for typ in (dict, list):
if isinstance(val, typ):
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index 7f7caefbb..54124eaeb 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -11,6 +11,9 @@
from types import SimpleNamespace
from typing import Optional
+import numpy as np
+import autoreject
+
import mne
from mne_bids import BIDSPath
@@ -79,46 +82,72 @@ def drop_ptp(
# Get rejection parameters and drop bad epochs
epochs = mne.read_epochs(in_files.pop("epochs"), preload=True)
- reject = _get_reject(
- subject=subject,
- session=session,
- reject=cfg.reject,
- ch_types=cfg.ch_types,
- param="reject",
- epochs=epochs,
- )
- if cfg.spatial_filter == "ica":
- ica_reject = _get_reject(
+
+ if cfg.reject == "autoreject_local":
+ msg = "Using autoreject to find and repair bad epochs"
+ logger.info(**gen_log_kwargs(message=msg))
+
+ ar = autoreject.AutoReject(
+ n_interpolate=np.array(cfg.autoreject_n_interpolate),
+ random_state=cfg.random_state,
+ n_jobs=exec_params.n_jobs,
+ verbose=False,
+ )
+ n_epochs_before_reject = len(epochs)
+ epochs, reject_log = ar.fit_transform(epochs, return_log=True)
+ n_epochs_after_reject = len(epochs)
+ assert (
+ n_epochs_before_reject - n_epochs_after_reject
+ == reject_log.bad_epochs.sum()
+ )
+
+ msg = (
+ f"autoreject marked {reject_log.bad_epochs.sum()} epochs as bad "
+ f"(cross-validated n_interpolate limit: {ar.n_interpolate_})"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ else:
+ reject = _get_reject(
subject=subject,
session=session,
- reject=cfg.ica_reject,
+ reject=cfg.reject,
ch_types=cfg.ch_types,
- param="ica_reject",
+ param="reject",
+ epochs=epochs,
)
- else:
- ica_reject = None
-
- if ica_reject is not None:
- for ch_type, threshold in ica_reject.items():
- if ch_type in reject and threshold < reject[ch_type]:
- # This can only ever happen in case of
- # reject = 'autoreject_global'
- msg = (
- f"Adjusting PTP rejection threshold proposed by "
- f"autoreject, as it is greater than ica_reject: "
- f"{ch_type}: {reject[ch_type]} -> {threshold}"
- )
- logger.info(**gen_log_kwargs(message=msg))
- reject[ch_type] = threshold
-
- msg = f"Using PTP rejection thresholds: {reject}"
- logger.info(**gen_log_kwargs(message=msg))
- n_epochs_before_reject = len(epochs)
- epochs.reject_tmin = cfg.reject_tmin
- epochs.reject_tmax = cfg.reject_tmax
- epochs.drop_bad(reject=reject)
- n_epochs_after_reject = len(epochs)
+ if cfg.spatial_filter == "ica":
+ ica_reject = _get_reject(
+ subject=subject,
+ session=session,
+ reject=cfg.ica_reject,
+ ch_types=cfg.ch_types,
+ param="ica_reject",
+ )
+ else:
+ ica_reject = None
+
+ if ica_reject is not None:
+ for ch_type, threshold in ica_reject.items():
+ if ch_type in reject and threshold < reject[ch_type]:
+ # This can only ever happen in case of
+ # reject = 'autoreject_global'
+ msg = (
+ f"Adjusting PTP rejection threshold proposed by "
+ f"autoreject, as it is greater than ica_reject: "
+ f"{ch_type}: {reject[ch_type]} -> {threshold}"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ reject[ch_type] = threshold
+
+ msg = f"Using PTP rejection thresholds: {reject}"
+ logger.info(**gen_log_kwargs(message=msg))
+
+ n_epochs_before_reject = len(epochs)
+ epochs.reject_tmin = cfg.reject_tmin
+ epochs.reject_tmax = cfg.reject_tmax
+ epochs.drop_bad(reject=reject)
+ n_epochs_after_reject = len(epochs)
if 0 < n_epochs_after_reject < 0.5 * n_epochs_before_reject:
msg = (
@@ -155,6 +184,24 @@ def drop_ptp(
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
) as report:
+ if cfg.reject == "autoreject_local":
+ caption = (
+ f"Autoreject was run to produce cleaner epochs. "
+ f"{reject_log.bad_epochs.sum()} epochs were rejected because more than "
+ f"{ar.n_interpolate_} channels were bad (cross-validated n_interpolate "
+ f"limit; excluding globally bad and non-data channels, shown in white)."
+ )
+ report.add_figure(
+ fig=reject_log.plot(
+ orientation="horizontal", aspect="auto", show=False
+ ),
+ title="Epochs: Autoreject cleaning",
+ caption=caption,
+ tags=("epochs", "autoreject"),
+ replace=True,
+ )
+ del caption
+
report.add_epochs(
epochs=epochs,
title="Epochs: after cleaning",
@@ -176,6 +223,8 @@ def get_config(
spatial_filter=config.spatial_filter,
ica_reject=config.ica_reject,
reject=config.reject,
+ autoreject_n_interpolate=config.autoreject_n_interpolate,
+ random_state=config.random_state,
ch_types=config.ch_types,
_epochs_split_size=config._epochs_split_size,
**_bids_kwargs(config=config),
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 91699312a..45e5e9477 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -71,13 +71,18 @@
t_break_annot_start_after_previous_event = 3.0
t_break_annot_stop_before_next_event = 1.5
-ica_reject = dict(eeg=350e-6, eog=500e-6)
-reject = "autoreject_global"
+if task == "N400": # test autoreject local without ICA
+ spatial_filter = None
+ reject = "autoreject_local"
+ autoreject_n_interpolate = [2, 4]
+else:
+ ica_reject = dict(eeg=350e-6, eog=500e-6)
+ reject = "autoreject_global"
-spatial_filter = "ica"
-ica_max_iterations = 1000
-ica_eog_threshold = 2
-ica_decim = 2 # speed up ICA fitting
+ spatial_filter = "ica"
+ ica_max_iterations = 1000
+ ica_eog_threshold = 2
+ ica_decim = 2 # speed up ICA fitting
run_source_estimation = False
@@ -104,7 +109,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.2
epochs_tmax = 0.8
epochs_metadata_tmin = 0
From db45d21534a0eea18e5666e2cface98c263fe17c Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Sat, 4 Nov 2023 07:14:10 -0400
Subject: [PATCH 038/132] ENH: Table sorting and ds004229 updates (#809)
---
.circleci/config.yml | 6 +++---
docs/mkdocs.yml | 5 +++++
docs/source/examples/gen_examples.py | 3 ++-
docs/source/javascripts/tablesort.js | 6 ++++++
docs/source/v1.5.md.inc | 2 ++
mne_bids_pipeline/_config_utils.py | 2 +-
mne_bids_pipeline/tests/configs/config_ds004229.py | 2 --
mne_bids_pipeline/tests/datasets.py | 2 --
8 files changed, 19 insertions(+), 9 deletions(-)
create mode 100644 docs/source/javascripts/tablesort.js
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 92f446287..aa0f22479 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -265,7 +265,7 @@ jobs:
at: ~/
- restore_cache:
keys:
- - data-cache-ds004229-2
+ - data-cache-ds004229-103
- bash_env
- run:
name: Get ds004229
@@ -273,7 +273,7 @@ jobs:
$DOWNLOAD_DATA ds004229
- codecov/upload
- save_cache:
- key: data-cache-ds004229-2
+ key: data-cache-ds004229-103
paths:
- ~/mne_data/ds004229
@@ -719,7 +719,7 @@ jobs:
- bash_env
- restore_cache:
keys:
- - data-cache-ds004229-2
+ - data-cache-ds004229-103
- run:
name: test ds004229
command: $RUN_TESTS ds004229
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 990fe80da..8763aa9c0 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -65,6 +65,10 @@ copyright: Copyright © MNE-BIDS-Pipeline authors
extra_css:
- css/extra.css
+# https://squidfunk.github.io/mkdocs-material/reference/data-tables/
+extra_javascript:
+ - https://unpkg.com/tablesort@5.3.0/dist/tablesort.min.js
+ - javascripts/tablesort.js
nav:
- Home: index.md
- Getting started:
@@ -172,6 +176,7 @@ markdown_extensions:
repo_url_shorthand: true
repo: mne-bids-pipeline
user: mne-tools
+ - tables
- toc:
permalink: true # Add paragraph symbol to link to current headline
- pymdownx.tabbed:
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 8fa4f114a..f24c0d29f 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -261,6 +261,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
demonstrated_funcs = _gen_demonstrated_funcs(config_path)
all_demonstrated[dataset_name] = demonstrated_funcs
del config, config_options
+ # Add the subsection and table header
funcs = [
"## Demonstrated features\n",
"Feature | This example",
@@ -279,7 +280,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
f.write(config_str)
f.write(report_str)
-# Finally, write our examples.html file
+# Finally, write our examples.html file with a table of examples
_example_header = """\
# Examples
diff --git a/docs/source/javascripts/tablesort.js b/docs/source/javascripts/tablesort.js
new file mode 100644
index 000000000..2e9fd4e51
--- /dev/null
+++ b/docs/source/javascripts/tablesort.js
@@ -0,0 +1,6 @@
+document$.subscribe(function() {
+ var tables = document.querySelectorAll("article table:not([class])")
+ tables.forEach(function(table) {
+ new Tablesort(table)
+ })
+ })
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 60aa4326d..eb0d96932 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -19,6 +19,7 @@ All users are encouraged to update.
per-epochs basis; this can be enabled by setting [`reject`][mne_bids_pipeline._config.reject] to `"autoreject_local"`.
The behavior can further be controlled via the new setting
[`autoreject_n_interpolate`][mne_bids_pipeline._config.autoreject_n_interpolate]. (#807 by @hoechenberger)
+- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -38,6 +39,7 @@ All users are encouraged to update.
- Enable [pre-commit.ci](https://pre-commit.ci) (#774 by @larsoner)
- Use `pooch` for web downloads (#775 by @larsoner)
- Ensure compatibility with MNE-Python 1.6 (#800 by @hoechenberger)
+- Updated testing dataset for ds004229 v1.0.3 (#808 by @larsoner)
### :bug: Bug fixes
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index e08cfb06e..35ed07512 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -361,7 +361,7 @@ def get_mf_cal_fname(
root=config.bids_root,
).meg_calibration_fpath
if mf_cal_fpath is None:
- raise ValueError("Could not find Maxwell Filter Calibration " "file.")
+ raise ValueError("Could not find Maxwell Filter Calibration file.")
else:
mf_cal_fpath = pathlib.Path(config.mf_cal_fname).expanduser().absolute()
if not mf_cal_fpath.exists():
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index 1c625eb90..e4ca6d449 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -16,8 +16,6 @@
find_flat_channels_meg = True
find_noisy_channels_meg = True
use_maxwell_filter = True
-mf_cal_fname = bids_root + "/derivatives/meg_derivatives/sss_cal.dat"
-mf_ctc_fname = bids_root + "/derivatives/meg_derivatives/ct_sparse.fif"
mf_destination = mne.transforms.translation( # rotate backward and move up
z=0.055,
) @ mne.transforms.rotation(x=np.deg2rad(-15))
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index c27d1ab4e..60ace0c48 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -118,8 +118,6 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
"include": [
"sub-102",
"sub-emptyroom/ses-20000101",
- "derivatives/meg_derivatives/ct_sparse.fif",
- "derivatives/meg_derivatives/sss_cal.dat",
],
},
}
From 3931daf791f3dbf976c5a01bc959e5557937d31a Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 6 Nov 2023 14:52:12 -0500
Subject: [PATCH 039/132] [pre-commit.ci] pre-commit autoupdate (#811)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 880e7a007..e9ddf043a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.3
+ rev: v0.1.4
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From abd545870d294d4f636ea1aaca736e4f2b3da673 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 7 Nov 2023 18:01:05 +0100
Subject: [PATCH 040/132] Add support for `autoreject` (local) before running
ICA (#810)
---
docs/source/v1.5.md.inc | 7 +-
mne_bids_pipeline/_config.py | 47 ++++++++----
.../steps/preprocessing/_06a_run_ica.py | 76 ++++++++++++++-----
.../steps/preprocessing/_08_ptp_reject.py | 10 ++-
.../tests/configs/config_ERP_CORE.py | 15 ++--
5 files changed, 110 insertions(+), 45 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index eb0d96932..cdafca0d3 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -16,9 +16,12 @@ All users are encouraged to update.
- Reduced logging when reports are created and saved (#799 by @hoechenberger)
- Added [`"picard-extended_infomax"`][mne_bids_pipeline._config.ica_algorithm] ICA algorithm to perform "extended Infomax"-like ICA decomposition using Picard (#801 by @hoechenberger)
- Added support for using "local" [`autoreject`](https://autoreject.github.io) to find (and repair) bad channels on a
- per-epochs basis; this can be enabled by setting [`reject`][mne_bids_pipeline._config.reject] to `"autoreject_local"`.
- The behavior can further be controlled via the new setting
+ per-epoch basis as the last preprocessing step; this can be enabled by setting [`reject`][mne_bids_pipeline._config.reject]
+ to `"autoreject_local"`. The behavior can further be controlled via the new setting
[`autoreject_n_interpolate`][mne_bids_pipeline._config.autoreject_n_interpolate]. (#807 by @hoechenberger)
+- Added support for "local" [`autoreject`](https://autoreject.github.io) to find (and repair) bad channels on a per-epoch
+ basis before submitting them to ICA fitting. This can be enabled by setting [`ica_reject`][mne_bids_pipeline._config.ica_reject]
+ to `"autoreject_local"`. (#810 by @hoechenberger)
- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index f2ed6015a..32e0c9735 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1233,13 +1233,34 @@
# Rejection based on ICA
# ~~~~~~~~~~~~~~~~~~~~~~
-ica_reject: Optional[Dict[str, float]] = None
-"""
-Peak-to-peak amplitude limits to exclude epochs from ICA fitting. Epochs exceeding these
-limits will be excluded from ICA fitting. This allows you to remove strong transient
-artifacts, which could negatively affect ICA performance.
-
-This will also be applied to ECG and EOG epochs created during preprocessing.
+ica_reject: Optional[Union[Dict[str, float], Literal["autoreject_local"]]] = None
+"""
+Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to
+remove strong transient artifacts from the epochs used for fitting ICA, which could
+negatively affect ICA performance.
+
+The parameter values are the same as for [`reject`][mne_bids_pipeline._config.reject],
+but `"autoreject_global"` is not supported.
+
+???+ info
+ We don't support `"autoreject_global"` here (as opposed to
+ [`reject`][mne_bids_pipeline._config.reject]) because in the past, we found that
+ rejection thresholds were too strict before running ICA, i.e., too many epochs
+ got rejected. `"autoreject_local"`, however, usually performed nicely.
+ The `autoreject` documentation
+ [recommends][https://autoreject.github.io/stable/auto_examples/plot_autoreject_workflow.html]
+ running local `autoreject` before and after ICA, which can be achieved by setting
+ both, `ica_reject` and [`reject`][mne_bids_pipeline._config.reject], to
+ `"autoreject_local"`.
+
+If passing a dictionary, the rejection limits will also be applied to the ECG and EOG
+epochs created to find heart beats and ocular artifacts.
+
+???+ info
+ MNE-BIDS-Pipeline will automatically try to detect EOG and ECG artifacts in
+ your data, and remove them. For this to work properly, it is recommended
+ to **not** specify rejection thresholds for EOG and ECG channels here –
+ otherwise, ICA won't be able to "see" these artifacts.
???+ info
This setting is applied only to the epochs that are used for **fitting** ICA. The
@@ -1249,19 +1270,13 @@
contain large-amplitude artifacts. Those epochs can then be rejected by using
the [`reject`][mne_bids_pipeline._config.reject] parameter.
-The BIDS Pipeline will automatically try to detect EOG and ECG artifacts in
-your data, and remove them. For this to work properly, it is recommended
-to **not** specify rejection thresholds for EOG and ECG channels here –
-otherwise, ICA won't be able to "see" these artifacts.
-
-If `None` (default), do not apply artifact rejection. If a dictionary,
-manually specify peak-to-peak rejection thresholds (see examples).
-
???+ example "Example"
```python
ica_reject = {'grad': 10e-10, 'mag': 20e-12, 'eeg': 400e-6}
ica_reject = {'grad': 15e-10}
- ica_reject = None # no rejection
+ ica_reject = None # no rejection before fitting ICA
+ ica_reject = "autoreject_global" # find global (per channel type) PTP thresholds before fitting ICA
+ ica_reject = "autoreject_local" # find local (per channel) thresholds and repair epochs before fitting ICA
```
"""
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index 029f0682f..efd8bec84 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -16,6 +16,7 @@
import pandas as pd
import numpy as np
+import autoreject
import mne
from mne.report import Report
@@ -191,7 +192,7 @@ def detect_bad_components(
ica: mne.preprocessing.ICA,
ch_names: Optional[List[str]],
subject: str,
- session: str,
+ session: Optional[str],
) -> Tuple[List[int], np.ndarray]:
artifact = which.upper()
msg = f"Performing automated {artifact} artifact detection …"
@@ -390,33 +391,51 @@ def run_ica(
epochs_ecg = ecg_epochs_all_runs
epochs_eog = eog_epochs_all_runs
- del epochs_all_runs, eog_epochs_all_runs, ecg_epochs_all_runs
+ del epochs_all_runs, eog_epochs_all_runs, ecg_epochs_all_runs, run
# Set an EEG reference
if "eeg" in cfg.ch_types:
projection = True if cfg.eeg_reference == "average" else False
epochs.set_eeg_reference(cfg.eeg_reference, projection=projection)
- # Reject epochs based on peak-to-peak rejection thresholds
- ica_reject = _get_reject(
- subject=subject,
- session=session,
- reject=cfg.ica_reject,
- ch_types=cfg.ch_types,
- param="ica_reject",
- )
+ if cfg.ica_reject == "autoreject_local":
+ msg = "Using autoreject to find and repair bad epochs before fitting ICA"
+ logger.info(**gen_log_kwargs(message=msg))
- msg = f"Using PTP rejection thresholds: {ica_reject}"
- logger.info(**gen_log_kwargs(message=msg))
+ ar = autoreject.AutoReject(
+ n_interpolate=cfg.autoreject_n_interpolate,
+ random_state=cfg.random_state,
+ n_jobs=exec_params.n_jobs,
+ verbose=False,
+ )
+ ar.fit(epochs)
+ epochs, reject_log = ar.transform(epochs, return_log=True)
+ msg = (
+ f"autoreject marked {reject_log.bad_epochs.sum()} epochs as bad "
+ f"(cross-validated n_interpolate limit: {ar.n_interpolate_})"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ else:
+ # Reject epochs based on peak-to-peak rejection thresholds
+ ica_reject = _get_reject(
+ subject=subject,
+ session=session,
+ reject=cfg.ica_reject,
+ ch_types=cfg.ch_types,
+ param="ica_reject",
+ )
+
+ msg = f"Using PTP rejection thresholds: {ica_reject}"
+ logger.info(**gen_log_kwargs(message=msg))
- epochs.drop_bad(reject=ica_reject)
- if epochs_eog is not None:
- epochs_eog.drop_bad(reject=ica_reject)
- if epochs_ecg is not None:
- epochs_ecg.drop_bad(reject=ica_reject)
+ epochs.drop_bad(reject=ica_reject)
+ if epochs_eog is not None:
+ epochs_eog.drop_bad(reject=ica_reject)
+ if epochs_ecg is not None:
+ epochs_ecg.drop_bad(reject=ica_reject)
# Now actually perform ICA.
- msg = "Calculating ICA solution."
+ msg = f"Calculating ICA solution using method: {cfg.ica_algorithm}."
logger.info(**gen_log_kwargs(message=msg))
ica = fit_ica(cfg=cfg, epochs=epochs, subject=subject, session=session)
@@ -497,6 +516,24 @@ def run_ica(
eog_scores = None if len(eog_scores) == 0 else eog_scores
with _agg_backend():
+ if cfg.ica_reject == "autoreject_local":
+ caption = (
+ f"Autoreject was run to produce cleaner epochs before fitting ICA. "
+ f"{reject_log.bad_epochs.sum()} epochs were rejected because more than "
+ f"{ar.n_interpolate_} channels were bad (cross-validated n_interpolate "
+ f"limit; excluding globally bad and non-data channels, shown in white)."
+ )
+ report.add_figure(
+ fig=reject_log.plot(
+ orientation="horizontal", aspect="auto", show=False
+ ),
+ title="Epochs: Autoreject cleaning",
+ caption=caption,
+ tags=("ica", "epochs", "autoreject"),
+ replace=True,
+ )
+ del caption
+
report.add_epochs(
epochs=epochs,
title="Epochs used for ICA fitting",
@@ -536,7 +573,7 @@ def run_ica(
def get_config(
*,
config: SimpleNamespace,
- subject: Optional[str] = None,
+ subject: str,
session: Optional[str] = None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
@@ -551,6 +588,7 @@ def get_config(
ica_reject=config.ica_reject,
ica_eog_threshold=config.ica_eog_threshold,
ica_ctps_ecg_threshold=config.ica_ctps_ecg_threshold,
+ autoreject_n_interpolate=config.autoreject_n_interpolate,
random_state=config.random_state,
ch_types=config.ch_types,
l_freq=config.l_freq,
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index 54124eaeb..3c02ad91c 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -116,7 +116,7 @@ def drop_ptp(
epochs=epochs,
)
- if cfg.spatial_filter == "ica":
+ if cfg.spatial_filter == "ica" and cfg.ica_reject != "autoreject_local":
ica_reject = _get_reject(
subject=subject,
session=session,
@@ -156,9 +156,13 @@ def drop_ptp(
)
logger.warning(**gen_log_kwargs(message=msg))
elif n_epochs_after_reject == 0:
+ rejection_type = (
+ cfg.reject
+ if cfg.reject in ["autoreject_global", "autoreject_local"]
+ else "PTP-based"
+ )
raise RuntimeError(
- "No epochs remaining after peak-to-peak-based "
- "rejection. Cannot continue."
+ f"No epochs remaining after {rejection_type} rejection. Cannot continue."
)
msg = "Saving cleaned, baseline-corrected epochs …"
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 45e5e9477..47fcb5846 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -75,17 +75,22 @@
spatial_filter = None
reject = "autoreject_local"
autoreject_n_interpolate = [2, 4]
+elif task == "N170": # test autoreject local before ICA
+ spatial_filter = "ica"
+ ica_reject = "autoreject_local"
+ reject = "autoreject_global"
+ autoreject_n_interpolate = [2, 4]
else:
+ spatial_filter = "ica"
ica_reject = dict(eeg=350e-6, eog=500e-6)
reject = "autoreject_global"
- spatial_filter = "ica"
- ica_max_iterations = 1000
- ica_eog_threshold = 2
- ica_decim = 2 # speed up ICA fitting
+# These settings are only used for the cases where spatial_filter="ica"
+ica_max_iterations = 1000
+ica_eog_threshold = 2
+ica_decim = 2 # speed up ICA fitting
run_source_estimation = False
-
on_rename_missing_events = "ignore"
parallel_backend = "dask"
From 8d8717d6d31b040ecde86977e30026219a2a8534 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 7 Nov 2023 16:51:26 -0500
Subject: [PATCH 041/132] BUG: Fix bug with verbosity
---
docs/source/v1.5.md.inc | 1 +
mne_bids_pipeline/_parallel.py | 25 ++++++++++++++-----------
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index cdafca0d3..5522271b1 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -50,4 +50,5 @@ All users are encouraged to update.
- Fixed bug with parallelization across runs for Maxwell filtering (#761 by @larsoner)
- Fixed bug where head position files were not written with a proper suffix and extension (#761 by @larsoner)
- Fixed bug where default values for `decoding_csp_times` and `decoding_csp_freqs` were not set dynamically based on the config parameters (#779 by @larsoner)
+- Fixed bug where the MNE logger verbosity was not respected inside parallel jobs (#813 by @larsoner)
- A number of processing steps erroneously **always** operated on un-cleaned epochs (`sensor/decoding_full_epochs`, `sensor/decoding_time_by_time`, `sensor/decoding_csp`); or operated on un-cleaned epochs (without PTP rejection) if no ICA or SSP was requested (`sensor/ime_frequency`, `sensor/make_cov`) The bug in `sensor/make_cov` could propagate to the source level, as the covariance matrix is used for inverse modeling. (#796 by @hoechenberger)
\ No newline at end of file
diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py
index 12f70cf57..e79ae5151 100644
--- a/mne_bids_pipeline/_parallel.py
+++ b/mne_bids_pipeline/_parallel.py
@@ -4,6 +4,7 @@
from types import SimpleNamespace
import joblib
+from mne.utils import use_log_level, logger as mne_logger
from ._logging import logger, gen_log_kwargs, _is_testing
@@ -127,19 +128,21 @@ def get_parallel_backend(exec_params: SimpleNamespace) -> joblib.parallel_backen
def parallel_func(func: Callable, *, exec_params: SimpleNamespace):
- if get_parallel_backend_name(exec_params=exec_params) == "loky":
- if get_n_jobs(exec_params=exec_params) == 1:
- my_func = func
- parallel = list
- else:
- from joblib import Parallel, delayed
-
- parallel = Parallel()
- my_func = delayed(func)
- else: # Dask
+ if (
+ get_parallel_backend_name(exec_params=exec_params) == "loky"
+ and get_n_jobs(exec_params=exec_params) == 1
+ ):
+ my_func = func
+ parallel = list
+ else: # Dask or n_jobs > 1
from joblib import Parallel, delayed
parallel = Parallel()
- my_func = delayed(func)
+
+ def run_verbose(*args, verbose=mne_logger.level, **kwargs):
+ with use_log_level(verbose=verbose):
+ return func(*args, **kwargs)
+
+ my_func = delayed(run_verbose)
return parallel, my_func
From 76b1dd7b91c5a510572ff3f613ccdb23cad2805e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 13 Nov 2023 18:23:42 +0100
Subject: [PATCH 042/132] Do not interpolate bad segments using autoreject
before ICA (#816)
---
docs/source/v1.5.md.inc | 6 +++---
mne_bids_pipeline/_config.py | 4 +++-
.../steps/preprocessing/_06a_run_ica.py | 14 +++++++++++---
.../steps/preprocessing/_08_ptp_reject.py | 5 ++++-
4 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 5522271b1..eb270912d 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -19,9 +19,9 @@ All users are encouraged to update.
per-epoch basis as the last preprocessing step; this can be enabled by setting [`reject`][mne_bids_pipeline._config.reject]
to `"autoreject_local"`. The behavior can further be controlled via the new setting
[`autoreject_n_interpolate`][mne_bids_pipeline._config.autoreject_n_interpolate]. (#807 by @hoechenberger)
-- Added support for "local" [`autoreject`](https://autoreject.github.io) to find (and repair) bad channels on a per-epoch
- basis before submitting them to ICA fitting. This can be enabled by setting [`ica_reject`][mne_bids_pipeline._config.ica_reject]
- to `"autoreject_local"`. (#810 by @hoechenberger)
+- Added support for "local" [`autoreject`](https://autoreject.github.io) to remove bad epochs
+ before submitting the data to ICA fitting. This can be enabled by setting [`ica_reject`][mne_bids_pipeline._config.ica_reject]
+ to `"autoreject_local"`. (#810, #816 by @hoechenberger)
- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 32e0c9735..807d0a495 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1240,7 +1240,9 @@
negatively affect ICA performance.
The parameter values are the same as for [`reject`][mne_bids_pipeline._config.reject],
-but `"autoreject_global"` is not supported.
+but `"autoreject_global"` is not supported. `"autoreject_local"` here behaves
+differently, too: it is only used to exclude bad epochs from ICA fitting; we do not
+perform any interpolation.
???+ info
We don't support `"autoreject_global"` here (as opposed to
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index efd8bec84..294d05a26 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -399,7 +399,10 @@ def run_ica(
epochs.set_eeg_reference(cfg.eeg_reference, projection=projection)
if cfg.ica_reject == "autoreject_local":
- msg = "Using autoreject to find and repair bad epochs before fitting ICA"
+ msg = (
+ "Using autoreject to find bad epochs before fitting ICA "
+ "(no interpolation will be performend)"
+ )
logger.info(**gen_log_kwargs(message=msg))
ar = autoreject.AutoReject(
@@ -409,7 +412,8 @@ def run_ica(
verbose=False,
)
ar.fit(epochs)
- epochs, reject_log = ar.transform(epochs, return_log=True)
+ reject_log = ar.get_reject_log(epochs)
+ epochs = epochs[~reject_log.bad_epochs]
msg = (
f"autoreject marked {reject_log.bad_epochs.sum()} epochs as bad "
f"(cross-validated n_interpolate limit: {ar.n_interpolate_})"
@@ -521,7 +525,11 @@ def run_ica(
f"Autoreject was run to produce cleaner epochs before fitting ICA. "
f"{reject_log.bad_epochs.sum()} epochs were rejected because more than "
f"{ar.n_interpolate_} channels were bad (cross-validated n_interpolate "
- f"limit; excluding globally bad and non-data channels, shown in white)."
+ f"limit; excluding globally bad and non-data channels, shown in "
+ f"white). Note that none of the blue segments were actually "
+ f"interpolated before submitting the data to ICA. This is following "
+ f"the recommended approach for ICA described in the the Autoreject "
+ f"documentation."
)
report.add_figure(
fig=reject_log.plot(
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index 3c02ad91c..b4a29f4e7 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -84,7 +84,10 @@ def drop_ptp(
epochs = mne.read_epochs(in_files.pop("epochs"), preload=True)
if cfg.reject == "autoreject_local":
- msg = "Using autoreject to find and repair bad epochs"
+ msg = (
+ "Using autoreject to find and repair bad epochs (interpolating bad "
+ "segments)"
+ )
logger.info(**gen_log_kwargs(message=msg))
ar = autoreject.AutoReject(
From 1705142aa3d2d5f5c20ac957f99108ed590f6739 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 13 Nov 2023 18:26:56 +0100
Subject: [PATCH 043/132] Omit bad channels when gathering data used for
decoding (#817)
---
docs/source/v1.5.md.inc | 4 +++-
mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py | 2 +-
mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py | 2 +-
mne_bids_pipeline/steps/sensor/_05_decoding_csp.py | 4 ++--
4 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index eb270912d..d1def9b4f 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -51,4 +51,6 @@ All users are encouraged to update.
- Fixed bug where head position files were not written with a proper suffix and extension (#761 by @larsoner)
- Fixed bug where default values for `decoding_csp_times` and `decoding_csp_freqs` were not set dynamically based on the config parameters (#779 by @larsoner)
- Fixed bug where the MNE logger verbosity was not respected inside parallel jobs (#813 by @larsoner)
-- A number of processing steps erroneously **always** operated on un-cleaned epochs (`sensor/decoding_full_epochs`, `sensor/decoding_time_by_time`, `sensor/decoding_csp`); or operated on un-cleaned epochs (without PTP rejection) if no ICA or SSP was requested (`sensor/ime_frequency`, `sensor/make_cov`) The bug in `sensor/make_cov` could propagate to the source level, as the covariance matrix is used for inverse modeling. (#796 by @hoechenberger)
\ No newline at end of file
+- A number of processing steps erroneously **always** operated on un-cleaned epochs (`sensor/decoding_full_epochs`, `sensor/decoding_time_by_time`, `sensor/decoding_csp`); or operated on un-cleaned epochs (without PTP rejection) if no ICA or SSP was requested (`sensor/ime_frequency`, `sensor/make_cov`) The bug in `sensor/make_cov` could propagate to the source level, as the covariance matrix is used for inverse modeling. (#796 by @hoechenberger)
+- Bad channels may have been submitted to MVPA (full epochs decoding, time-by-time decoding, CSP-based decoding) when not using Maxwell filtering
+ (i.e., usually only EEG data was affected). This has now been fixed and data from bad channels is omitted from decoding. (#817 by @hoechenberger)
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index d1d8157a1..3fe37aa30 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -118,7 +118,7 @@ def run_epochs_decoding(
n_cond1 = len(epochs[epochs_conds[0]])
n_cond2 = len(epochs[epochs_conds[1]])
- X = epochs.get_data()
+ X = epochs.get_data(picks="data") # omit bad channels
y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
classification_pipeline = make_pipeline(
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index 02ec357dd..39103e472 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -129,7 +129,7 @@ def run_time_decoding(
if cfg.decoding_time_generalization and decim > 1:
epochs.decimate(decim, verbose="error")
- X = epochs.get_data()
+ X = epochs.get_data(picks="data") # omit bad channels
y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
# ProgressBar does not work on dask, so only enable it if not using dask
verbose = get_parallel_backend_name(exec_params=exec_params) != "dask"
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 75b24a854..97304924d 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -254,7 +254,7 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
epochs=epochs, contrast=contrast, fmin=fmin, fmax=fmax, cfg=cfg
)
# Get the data for all time points
- X = epochs_filt.get_data()
+ X = epochs_filt.get_data(picks="data") # omit bad channels
# We apply PCA before running CSP:
# - much faster CSP processing
@@ -327,7 +327,7 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
if tmax is not None: # avoid warnings about outside the interval
tmax = min(tmax, epochs_filt.times[-1])
epochs_filt.crop(tmin, tmax)
- X = epochs_filt.get_data()
+ X = epochs_filt.get_data(picks="data") # omit bad channels
X_pca = pca.transform(X)
del X
From 08ee388c0e4efb4c2087cd27179169e606056495 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 20 Nov 2023 21:53:48 +0100
Subject: [PATCH 044/132] [pre-commit.ci] pre-commit autoupdate (#818)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e9ddf043a..0775526de 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.10.1
+ rev: 23.11.0
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.4
+ rev: v0.1.6
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
@@ -23,7 +23,7 @@ repos:
additional_dependencies:
- tomli
- repo: https://github.com/adrienverge/yamllint.git
- rev: v1.32.0
+ rev: v1.33.0
hooks:
- id: yamllint
args: [--strict]
From 87a64b10e431176760673a2b09fe0cebf641e5f8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 21 Nov 2023 22:47:57 +0100
Subject: [PATCH 045/132] Allow decoding using epochs at different stages of
pre-processing (#819)
---
docs/source/settings/sensor/mvpa.md | 2 ++
docs/source/v1.5.md.inc | 2 ++
mne_bids_pipeline/_config.py | 35 +++++++++++++++++--
mne_bids_pipeline/_config_utils.py | 13 +++++++
.../steps/sensor/_02_decoding_full_epochs.py | 5 ++-
.../steps/sensor/_03_decoding_time_by_time.py | 6 ++--
.../steps/sensor/_05_decoding_csp.py | 5 ++-
7 files changed, 62 insertions(+), 6 deletions(-)
diff --git a/docs/source/settings/sensor/mvpa.md b/docs/source/settings/sensor/mvpa.md
index 8131cd428..3a56d22d7 100644
--- a/docs/source/settings/sensor/mvpa.md
+++ b/docs/source/settings/sensor/mvpa.md
@@ -4,12 +4,14 @@ tags:
- evoked
- contrast
- decoding
+ - mvpa
---
::: mne_bids_pipeline._config
options:
members:
- decode
+ - decoding_which_epochs
- decoding_epochs_tmin
- decoding_epochs_tmax
- decoding_metric
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index d1def9b4f..c11e79e6b 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -22,6 +22,7 @@ All users are encouraged to update.
- Added support for "local" [`autoreject`](https://autoreject.github.io) to remove bad epochs
before submitting the data to ICA fitting. This can be enabled by setting [`ica_reject`][mne_bids_pipeline._config.ica_reject]
to `"autoreject_local"`. (#810, #816 by @hoechenberger)
+- The new setting [`decoding_which_epochs`][mne_bids_pipeline._config.decoding_which_epochs] controls which epochs (e.g., uncleaned, after ICA/SSP, cleaned) shall be used for decoding. (#819 by @hoechenber)
- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -32,6 +33,7 @@ All users are encouraged to update.
we only apply `ica_reject` to the epochs used for ICA fitting. After the experimental epochs have been cleaned with ICA
(`preprocessing/apply_ica` step), any remaining large-amplitude artifacts can be removed via
[`reject`][mne_bids_pipeline._config.reject], which is used in the last preprocessing step, `preprocessing/ptp_reject`. (#806 by @hoechenberger)
+- MVPA / decoding used to be performed on un-cleaned epochs in the past. Now, cleaned epochs will be used by default (please also see the "Bug fixes" section below). (#796 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 807d0a495..cc3793ea9 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1465,10 +1465,41 @@
conditions.
"""
+decoding_which_epochs: Literal[
+ "uncleaned", "after_ica", "after_ssp", "cleaned"
+] = "cleaned"
+"""
+This setting controls which epochs will be fed into the decoding algorithms.
+
+!!! info
+ Decoding is a very powerful tool that often can deal with noisy data surprisingly
+ well. Depending on the specific type of data, artifacts, and analysis performed,
+ decoding performance may even improve with less pre-processed data, as
+ processing steps such as ICA or SSP often remove parts of the signal, too, in
+ addition to noise. By default, MNE-BIDS-Pipeline uses cleaned epochs for decoding,
+ but you may choose to use entirely uncleaned epochs, or epochs before the final
+ PTP-based rejection or Autoreject step.
+
+!!! info
+ No other sensor- and source-level processing steps will be affected by this setting
+ and use cleaned epochs only.
+
+If `"uncleaned"`, use the "raw" epochs before any ICA / SSP, PTP-based, or Autoreject
+cleaning (epochs with the filename `*_epo.fif`, without a `proc-` part).
+
+If `"after_ica"` or `"after_ssp"`, use the epochs that were cleaned via ICA or SSP, but
+before a followup cleaning through PTP-based rejection or Autorejct (epochs with the
+filename `*proc-ica_epo.fif` or `*proc-ssp_epo.fif`).
+
+If `"cleaned"`, use the epochs after ICA / SSP and the following cleaning through
+PTP-based rejection or Autoreject (epochs with the filename `*proc-clean_epo.fif`).
+"""
+
decoding_epochs_tmin: Optional[float] = 0.0
"""
The first time sample to use for full epochs decoding. By default it starts
-at 0. If `None`,, it starts at the beginning of the epoch.
+at 0. If `None`,, it starts at the beginning of the epoch. Does not affect time-by-time
+decoding.
"""
decoding_epochs_tmax: Optional[float] = None
@@ -1488,7 +1519,7 @@
decoding_n_splits: int = 5
"""
-The number of folds (also called "splits") to use in the cross-validation
+The number of folds (also called "splits") to use in the K-fold cross-validation
scheme.
"""
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 35ed07512..3e7a1ec95 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -538,6 +538,19 @@ def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[Tuple[str, str]]
return normalized_contrasts
+# Map _config.decoding_which_epochs to a BIDS proc- entity
+_EPOCHS_DESCRIPTION_TO_PROC_MAP = {
+ "uncleaned": None,
+ "after_ica": "ica",
+ "after_ssp": "ssp",
+ "cleaned": "clean",
+}
+
+
+def _get_decoding_proc(config: SimpleNamespace) -> Optional[str]:
+ return _EPOCHS_DESCRIPTION_TO_PROC_MAP[config.decoding_which_epochs]
+
+
def get_eeg_reference(
config: SimpleNamespace,
) -> Union[Literal["average"], Iterable[str]]:
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 3fe37aa30..81243bcd9 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -31,6 +31,7 @@
get_decoding_contrasts,
_bids_kwargs,
_restrict_analyze_channels,
+ _get_decoding_proc,
)
from ..._logging import gen_log_kwargs, logger
from ..._decoding import LogReg
@@ -52,6 +53,7 @@ def get_input_fnames_epochs_decoding(
condition1: str,
condition2: str,
) -> dict:
+ proc = _get_decoding_proc(config=cfg)
fname_epochs = BIDSPath(
subject=subject,
session=session,
@@ -60,7 +62,7 @@ def get_input_fnames_epochs_decoding(
run=None,
recording=cfg.rec,
space=cfg.space,
- processing="clean",
+ processing=proc,
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
@@ -225,6 +227,7 @@ def get_config(
conditions=config.conditions,
contrasts=get_decoding_contrasts(config),
decode=config.decode,
+ decoding_which_epochs=config.decoding_which_epochs,
decoding_metric=config.decoding_metric,
decoding_epochs_tmin=config.decoding_epochs_tmin,
decoding_epochs_tmax=config.decoding_epochs_tmax,
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index 39103e472..b435cf6ae 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -35,6 +35,7 @@
get_decoding_contrasts,
_bids_kwargs,
_restrict_analyze_channels,
+ _get_decoding_proc,
)
from ..._decoding import LogReg
from ..._logging import gen_log_kwargs, logger
@@ -56,7 +57,7 @@ def get_input_fnames_time_decoding(
condition1: str,
condition2: str,
) -> dict:
- # TODO: Shouldn't this at least use the PTP-rejected epochs if available?
+ proc = _get_decoding_proc(config=cfg)
fname_epochs = BIDSPath(
subject=subject,
session=session,
@@ -65,7 +66,7 @@ def get_input_fnames_time_decoding(
run=None,
recording=cfg.rec,
space=cfg.space,
- processing="clean",
+ processing=proc,
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
@@ -299,6 +300,7 @@ def get_config(
conditions=config.conditions,
contrasts=get_decoding_contrasts(config),
decode=config.decode,
+ decoding_which_epochs=config.decoding_which_epochs,
decoding_metric=config.decoding_metric,
decoding_n_splits=config.decoding_n_splits,
decoding_time_generalization=config.decoding_time_generalization,
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 97304924d..1614854c1 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -23,6 +23,7 @@
get_decoding_contrasts,
_bids_kwargs,
_restrict_analyze_channels,
+ _get_decoding_proc,
)
from ..._decoding import LogReg, _handle_csp_args
from ..._logging import logger, gen_log_kwargs
@@ -113,6 +114,7 @@ def get_input_fnames_csp(
session: Optional[str],
contrast: Tuple[str],
) -> dict:
+ proc = _get_decoding_proc(config=cfg)
fname_epochs = BIDSPath(
subject=subject,
session=session,
@@ -121,7 +123,7 @@ def get_input_fnames_csp(
run=None,
recording=cfg.rec,
space=cfg.space,
- processing="clean",
+ processing=proc,
suffix="epo",
extension=".fif",
datatype=cfg.datatype,
@@ -531,6 +533,7 @@ def get_config(
time_frequency_freq_min=config.time_frequency_freq_min,
time_frequency_freq_max=config.time_frequency_freq_max,
time_frequency_subtract_evoked=config.time_frequency_subtract_evoked,
+ decoding_which_epochs=config.decoding_which_epochs,
decoding_metric=config.decoding_metric,
decoding_csp_freqs=config.decoding_csp_freqs,
decoding_csp_times=config.decoding_csp_times,
From 6dd86f937111733004afd2de6bcd7f0b7e1c9a80 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 30 Nov 2023 16:14:44 -0500
Subject: [PATCH 046/132] Prepare for 1.5.0 release (#821)
---
docs/source/v1.5.md.inc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index c11e79e6b..c5cdc0db6 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -1,4 +1,4 @@
-## v1.5.0 (unreleased)
+## v1.5.0 (2023-11-30)
This release contains a number of very important bug fixes that address problems related to decoding, time-frequency analysis, and inverse modeling.
All users are encouraged to update.
From bae0b2ee3c6e1941522b65b3bf9355ab172896d2 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 30 Nov 2023 16:20:21 -0500
Subject: [PATCH 047/132] FIX: PyQt6 (#822)
---
.circleci/config.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index aa0f22479..8962099c5 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -50,7 +50,7 @@ jobs:
pip install --upgrade --progress-bar off pip setuptools
pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
- pip install PyQt6
+ pip install PyQt6 "PyQt6-Qt6!=6.6.1"
- run:
name: Check Qt
command: |
From bc6c166ec6c7b6c5f63e38b2907e8e08e3a89ae2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 7 Dec 2023 10:47:31 +0100
Subject: [PATCH 048/132] The "Behavior changes" heading was incorrectly
commented out in the changelog (#823)
---
docs/source/v1.5.md.inc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index c5cdc0db6..0c0d0b93a 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -25,7 +25,7 @@ All users are encouraged to update.
- The new setting [`decoding_which_epochs`][mne_bids_pipeline._config.decoding_which_epochs] controls which epochs (e.g., uncleaned, after ICA/SSP, cleaned) shall be used for decoding. (#819 by @hoechenber)
- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
-[//]: # (### :warning: Behavior changes)
+### :warning: Behavior changes
- The default cache directory is now `_cache` within the derivatives folder when using `memory_location=True`, set [`memory_subdir="joblib"`][mne_bids_pipeline._config.memory_subdir] to get the behavior from v1.4 (#778 by @larsoner)
- Before cleaning epochs via ICA, we used to reject any epochs execeeding the [`ica_reject`][mne_bids_pipeline._config.ica_reject]
From cc63122548e180bc2c52bbf8eaf844bb344660fb Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 7 Dec 2023 15:36:51 -0500
Subject: [PATCH 049/132] MAINT: Fix CIs (#824)
---
.circleci/config.yml | 2 +-
mne_bids_pipeline/tests/conftest.py | 10 ++++++++++
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8962099c5..a4957b023 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -50,7 +50,7 @@ jobs:
pip install --upgrade --progress-bar off pip setuptools
pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
- pip install PyQt6 "PyQt6-Qt6!=6.6.1"
+ pip install "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1"
- run:
name: Check Qt
command: |
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index 295b2309a..97e380ffc 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -45,6 +45,16 @@ def pytest_configure(config):
ignore:All-NaN axis encountered.*:RuntimeWarning
# sklearn class not enough samples for cv=5
always:The least populated class in y has only.*:UserWarning
+ # constrained layout fails on ds003392
+ # mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py:551: in run_ica
+ # report.add_ica(
+ #../python_env/lib/python3.10/site-packages/mne/report/report.py:1974: in add_ica
+ # self._add_ica(
+ #../python_env/lib/python3.10/site-packages/mne/report/report.py:1872: in _add_ica
+ # self._add_ica_artifact_sources(
+ #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: in _add_ica_artifact_sources
+ # self._add_figure(
+ always:constrained_layout not applied.*:UserWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
From a6995abc39fab333ab957baa45b0026bdb12a3f9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Fri, 8 Dec 2023 23:52:17 +0100
Subject: [PATCH 050/132] Switch build backend to hatchling (#825)
---
.circleci/config.yml | 2 +-
.git_archival.txt | 4 +
.gitattributes | 1 +
.github/workflows/run-tests.yml | 4 +-
docs/source/changes.md | 2 +
docs/source/v1.6.md.inc | 23 +++++
docs/source/vX.Y.md.inc | 6 +-
pyproject.toml | 150 ++++++++++++++++----------------
8 files changed, 114 insertions(+), 78 deletions(-)
create mode 100644 .git_archival.txt
create mode 100644 .gitattributes
create mode 100644 docs/source/v1.6.md.inc
diff --git a/.circleci/config.yml b/.circleci/config.yml
index a4957b023..62e687cba 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -47,7 +47,7 @@ jobs:
- run:
name: Get Python running
command: |
- pip install --upgrade --progress-bar off pip setuptools
+ pip install --upgrade --progress-bar off pip
pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
pip install "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1"
diff --git a/.git_archival.txt b/.git_archival.txt
new file mode 100644
index 000000000..8fb235d70
--- /dev/null
+++ b/.git_archival.txt
@@ -0,0 +1,4 @@
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
+ref-names: $Format:%D$
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000..00a7b00c9
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+.git_archival.txt export-subst
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 7f7ae75ea..eba837d23 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -29,8 +29,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- - run: pip install --upgrade setuptools[toml] pip
- - run: pip install --no-build-isolation -ve .[tests]
+ - run: pip install --upgrade pip
+ - run: pip install -ve .[tests]
- run: pytest mne_bids_pipeline -m "not dataset_test"
- uses: codecov/codecov-action@v3
if: success()
diff --git a/docs/source/changes.md b/docs/source/changes.md
index 3c38f18da..a64ada4b7 100644
--- a/docs/source/changes.md
+++ b/docs/source/changes.md
@@ -1,3 +1,5 @@
+{% include-markdown "./v1.6.md.inc" %}
+
{% include-markdown "./v1.5.md.inc" %}
{% include-markdown "./v1.4.md.inc" %}
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
new file mode 100644
index 000000000..837058f17
--- /dev/null
+++ b/docs/source/v1.6.md.inc
@@ -0,0 +1,23 @@
+[//]: # (Don't forget to add this to changes.md as an include!)
+
+## vX.Y.0 (unreleased)
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :package: Requirements
+
+- MNE-BIDS-Pipeline now requires Python 3.9 or newer. (#825 by @hoechenberger)
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :medical_symbol: Code health
+
+- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
diff --git a/docs/source/vX.Y.md.inc b/docs/source/vX.Y.md.inc
index ea88c02c5..36bf65f57 100644
--- a/docs/source/vX.Y.md.inc
+++ b/docs/source/vX.Y.md.inc
@@ -10,10 +10,14 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :medical_symbol: Code health)
+[//]: # (### :package: Requirements)
[//]: # (- Whatever (#000 by @whoever))
[//]: # (### :bug: Bug fixes)
[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :medical_symbol: Code health)
+
+[//]: # (- Whatever (#000 by @whoever))
diff --git a/pyproject.toml b/pyproject.toml
index 5d217e36d..21fc0671a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,78 +1,79 @@
+[build-system]
+requires = ["hatchling", "hatch-vcs"]
+build-backend = "hatchling.build"
+
[project]
name = "mne-bids-pipeline"
# Keep in sync with README.md:
description = "A full-flegded processing pipeline for your MEG and EEG data"
readme = "README.md"
-requires-python = ">=3.8"
-license = {file = "LICENSE.txt"}
+requires-python = ">=3.9"
+license = { file = "LICENSE.txt" }
keywords = ["science", "neuroscience", "psychology"]
authors = [
- {name = "Eric Larson"},
- {name = "Alexandre Gramfort"},
- {name = "Mainak Jas"},
- {name = "Richard Höchenberger", email = "richard.hoechenberger@gmail.com"},
+ { name = "Eric Larson" },
+ { name = "Alexandre Gramfort" },
+ { name = "Mainak Jas" },
+ { name = "Richard Höchenberger", email = "richard.hoechenberger@gmail.com" },
]
classifiers = [
"Intended Audience :: Science/Research",
- "Programming Language :: Python"
+ "Programming Language :: Python",
]
dependencies = [
- "typing_extensions; python_version < '3.8'",
- "importlib_metadata; python_version < '3.8'",
- "psutil", # for joblib
- "packaging",
- "numpy",
- "scipy",
- "matplotlib",
- "nibabel",
- "joblib >= 0.14",
- "threadpoolctl",
- "dask[distributed]",
- "bokeh < 3", # for distributed dashboard
- "jupyter-server-proxy", # to have dask and jupyter working together
- "scikit-learn",
- "pandas",
- "seaborn",
- "json_tricks",
- "pydantic >= 2.0.0",
- "rich",
- "python-picard",
- "qtpy",
- "pyvista",
- "pyvistaqt",
- "openpyxl",
- "autoreject",
- "mne[hdf5] >=1.2",
- "mne-bids[full]",
- "filelock",
- "setuptools >=65",
+ "psutil", # for joblib
+ "packaging",
+ "numpy",
+ "scipy",
+ "matplotlib",
+ "nibabel",
+ "joblib >= 0.14",
+ "threadpoolctl",
+ "dask[distributed]",
+ "bokeh < 3", # for distributed dashboard
+ "jupyter-server-proxy", # to have dask and jupyter working together
+ "scikit-learn",
+ "pandas",
+ "seaborn",
+ "json_tricks",
+ "pydantic >= 2.0.0",
+ "rich",
+ "python-picard",
+ "qtpy",
+ "pyvista",
+ "pyvistaqt",
+ "openpyxl",
+ "autoreject",
+ "mne[hdf5] >=1.2",
+ "mne-bids[full]",
+ "filelock",
]
dynamic = ["version"]
[project.optional-dependencies]
tests = [
- "pytest",
- "pytest-cov",
- "pooch",
- "psutil",
- "datalad",
- "ruff",
- "mkdocs",
- "mkdocs-material >= 9.0.4",
- "mkdocs-material-extensions",
- "mkdocs-macros-plugin",
- "mkdocs-include-markdown-plugin",
- "mkdocs-exclude",
- "mkdocstrings-python",
- "mike",
- "jinja2",
- "black", # function signature formatting
- "livereload",
- "openneuro-py >= 2022.2.0",
- "httpx >= 0.20",
- "tqdm",
- "Pygments",
- "pyyaml",
+ "pytest",
+ "pytest-cov",
+ "pooch",
+ "psutil",
+ "datalad",
+ "ruff",
+ "mkdocs",
+ "mkdocs-material >= 9.0.4",
+ "mkdocs-material-extensions",
+ "mkdocs-macros-plugin",
+ "mkdocs-include-markdown-plugin",
+ "mkdocs-exclude",
+ "mkdocstrings-python",
+ "mike",
+ "jinja2",
+ "black", # function signature formatting
+ "livereload",
+ "openneuro-py >= 2022.2.0",
+ "httpx >= 0.20",
+ "tqdm",
+ "Pygments",
+ "pyyaml",
]
[project.scripts]
@@ -83,19 +84,22 @@ homepage = "https://mne.tools/mne-bids-pipeline"
repository = "https://github.com/mne-tools/mne-bids-pipeline"
changelog = "http://mne.tools/mne-bids-pipeline/changes.html"
-[build-system]
-requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel"]
-build-backend = "setuptools.build_meta"
-
-[tool.setuptools_scm]
-tag_regex = "^(?Pv)?(?P[0-9.]+)(?P.*)?$"
-version_scheme = "release-branch-semver"
+[tool.hatch.version]
+source = "vcs"
+raw-options = { version_scheme = "release-branch-semver" }
-[tool.setuptools.packages.find]
-exclude = ["false"] # on CircleCI this folder appears during pip install -ve. for an unknown reason
-
-[tool.setuptools.package-data]
-"mne_bids_pipeline.steps.freesurfer.contrib" = ["version"]
+[tool.hatch.build]
+exclude = [
+ "/.*",
+ "/codecov.yml",
+ "**/tests",
+ "/docs",
+ "/docs/source/examples/gen_examples.py", # specify explicitly because its exclusion is negated in .gitignore
+ "/Makefile",
+ "/BUILDING.md",
+ "/CONTRIBUTING.md",
+ "ignore_words.txt",
+]
[tool.codespell]
skip = "docs/site/*,*.html,steps/freesurfer/contrib/*"
@@ -108,13 +112,11 @@ count = ""
[tool.pytest.ini_options]
addopts = "-ra -vv --tb=short --cov=mne_bids_pipeline --cov-report= --junit-xml=junit-results.xml --durations=10"
-testpaths = [
- "mne_bids_pipeline",
-]
+testpaths = ["mne_bids_pipeline"]
junit_family = "xunit2"
[tool.ruff]
-exclude = ["**/freesurfer/contrib", "dist/" , "build/"]
+exclude = ["**/freesurfer/contrib", "dist/", "build/"]
[tool.black]
exclude = "(.*/freesurfer/contrib/.*)|(dist/)|(build/)"
From c624a10153af91ac45216fb6783af31bead7ed5d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 11 Dec 2023 21:11:54 +0100
Subject: [PATCH 051/132] [pre-commit.ci] pre-commit autoupdate (#826)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 0775526de..2ef1e2b38 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.6
+ rev: v0.1.7
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 00daafe81afa9f5a2b16f9cb836a95a193887887 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 18 Dec 2023 21:21:51 +0100
Subject: [PATCH 052/132] [pre-commit.ci] pre-commit autoupdate (#827)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2ef1e2b38..a0eea24c0 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.11.0
+ rev: 23.12.0
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.7
+ rev: v0.1.8
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 885fd15146fa71d157169b1538f09a1903cbc807 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 25 Dec 2023 22:30:07 +0100
Subject: [PATCH 053/132] [pre-commit.ci] pre-commit autoupdate (#828)
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a0eea24c0..ef1dc332b 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,14 +6,14 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/psf/black
- rev: 23.12.0
+ rev: 23.12.1
hooks:
- id: black
args:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.8
+ rev: v0.1.9
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From ffab60aecf9389d5f0e4eb65a38f8d4b9070feb6 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 8 Jan 2024 21:37:47 +0100
Subject: [PATCH 054/132] [pre-commit.ci] pre-commit autoupdate (#829)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ef1dc332b..81e71646f 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.9
+ rev: v0.1.11
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 9055e9cf5e4b7b02d12491f24597897c77f50831 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 15 Jan 2024 15:14:16 -0500
Subject: [PATCH 055/132] [pre-commit.ci] pre-commit autoupdate (#830)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 81e71646f..cbd9b0a06 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -13,7 +13,7 @@ repos:
- --safe
- --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.11
+ rev: v0.1.13
hooks:
- id: ruff
- repo: https://github.com/codespell-project/codespell
From 860c5c454475cf6b0442a1c8437a2cf10b0d6d9e Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 24 Jan 2024 11:01:32 -0500
Subject: [PATCH 056/132] FIX: Paths and pyarrow (#834)
---
.pre-commit-config.yaml | 10 ++--------
docs/source/v1.6.md.inc | 5 +++--
mne_bids_pipeline/_config_utils.py | 10 +++++++---
mne_bids_pipeline/_main.py | 4 ++--
.../steps/preprocessing/_04_frequency_filter.py | 6 +++++-
mne_bids_pipeline/steps/sensor/_99_group_average.py | 3 ++-
mne_bids_pipeline/tests/configs/config_ds003392.py | 2 +-
mne_bids_pipeline/tests/datasets.py | 2 ++
mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv | 2 ++
mne_bids_pipeline/tests/test_run.py | 9 +++++++++
pyproject.toml | 1 +
11 files changed, 36 insertions(+), 18 deletions(-)
create mode 100644 mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cbd9b0a06..363ef00c2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -5,17 +5,11 @@ files: ^(.*\.(py|yaml))$
# for example
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- - repo: https://github.com/psf/black
- rev: 23.12.1
- hooks:
- - id: black
- args:
- - --safe
- - --quiet
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.13
+ rev: v0.1.14
hooks:
- id: ruff
+ - id: ruff-format
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 837058f17..4ee244b1d 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -14,10 +14,11 @@
- MNE-BIDS-Pipeline now requires Python 3.9 or newer. (#825 by @hoechenberger)
-[//]: # (### :bug: Bug fixes)
+### :bug: Bug fixes
-[//]: # (- Whatever (#000 by @whoever))
+- Fix minor issues with path handling for cross-talk and calibration files (#834 by @larsoner)
### :medical_symbol: Code health
- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
+- Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner)
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 3e7a1ec95..6f7ce1f0d 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -353,15 +353,19 @@ def get_mf_cal_fname(
*, config: SimpleNamespace, subject: str, session: str
) -> pathlib.Path:
if config.mf_cal_fname is None:
- mf_cal_fpath = BIDSPath(
+ bids_path = BIDSPath(
subject=subject,
session=session,
suffix="meg",
datatype="meg",
root=config.bids_root,
- ).meg_calibration_fpath
+ ).match()[0]
+ mf_cal_fpath = bids_path.meg_calibration_fpath
if mf_cal_fpath is None:
- raise ValueError("Could not find Maxwell Filter Calibration file.")
+ raise ValueError(
+ "Could not determine Maxwell Filter Calibration file from BIDS "
+ f"definition for file {bids_path}."
+ )
else:
mf_cal_fpath = pathlib.Path(config.mf_cal_fname).expanduser().absolute()
if not mf_cal_fpath.exists():
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index 9489a2cca..ddbb49c6a 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -37,7 +37,7 @@ def main():
metavar="FILE",
help="Create a template configuration file with the specified name. "
"If specified, all other parameters will be ignored.",
- ),
+ )
parser.add_argument(
"--steps",
dest="steps",
@@ -70,7 +70,7 @@ def main():
If unspecified, this will be derivatives/mne-bids-pipeline
inside the BIDS root."""
),
- ),
+ )
parser.add_argument(
"--subject", dest="subject", default=None, help="The subject to process."
)
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index b60543121..d026539ee 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -173,7 +173,7 @@ def filter_data(
raw = import_er_data(
cfg=cfg,
bids_path_er_in=bids_path_in,
- bids_path_ref_in=in_files.pop("raw_ref_run"),
+ bids_path_ref_in=in_files.pop("raw_ref_run", None),
bids_path_er_bads_in=bids_path_bads_in,
# take bads from this run (0)
bids_path_ref_bads_in=in_files.pop("raw_ref_run-bads", None),
@@ -196,6 +196,7 @@ def filter_data(
split=None,
task=task,
run=run,
+ check=False,
)
raw.load_data()
@@ -232,6 +233,9 @@ def filter_data(
run_type=run_type,
)
+ # For example, might need to create
+ # derivatives/mne-bids-pipeline/sub-emptyroom/ses-20230412/meg
+ out_files[in_key].fpath.parent.mkdir(exist_ok=True, parents=True)
raw.save(
out_files[in_key],
overwrite=True,
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index a05a85a96..98e275336 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -814,7 +814,8 @@ def average_csp_decoding(
import scipy.stats
cluster_forming_t_threshold = scipy.stats.t.ppf(
- 1 - 0.05, len(cfg.subjects) - 1 # one-sided test
+ 1 - 0.05,
+ len(cfg.subjects) - 1, # one-sided test
)
else:
cluster_forming_t_threshold = cfg.cluster_forming_t_threshold
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index edc30228f..0decbacc9 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -21,7 +21,7 @@
# Artifact correction.
spatial_filter = "ica"
ica_algorithm = "picard-extended_infomax"
-ica_max_iterations = 500
+ica_max_iterations = 1000
ica_l_freq = 1.0
ica_n_components = 0.99
ica_reject_components = "auto"
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index 60ace0c48..b50454251 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -85,6 +85,8 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
"ds003775": {
"openneuro": "ds003775",
"include": ["sub-010"],
+ # See https://github.com/OpenNeuroOrg/openneuro/issues/2976
+ "exclude": ["sub-010/ses-t1/sub-010_ses-t1_scans.tsv"],
},
"ds001810": {
"openneuro": "ds001810",
diff --git a/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv b/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv
new file mode 100644
index 000000000..54b711284
--- /dev/null
+++ b/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv
@@ -0,0 +1,2 @@
+filename acq_time
+eeg/sub-010_ses-t1_task-resteyesc_eeg.edf 2017-05-09T12:11:44
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index eb07233b1..b394d6f0b 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -169,6 +169,15 @@ def test_run(dataset, monkeypatch, dataset_test, capsys, tmp_path):
src=fix_path / "ds001971_participants.tsv",
dst=DATA_DIR / "ds001971" / "participants.tsv",
)
+ elif dataset == "ds003775":
+ shutil.copy(
+ src=fix_path / "sub-010_ses-t1_scans.tsv",
+ dst=DATA_DIR
+ / "ds003775"
+ / "sub-010"
+ / "ses-t1"
+ / "sub-010_ses-t1_scans.tsv",
+ )
# Run the tests.
steps = test_options.get("steps", ("preprocessing", "sensor"))
diff --git a/pyproject.toml b/pyproject.toml
index 21fc0671a..c576c3710 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,6 +34,7 @@ dependencies = [
"jupyter-server-proxy", # to have dask and jupyter working together
"scikit-learn",
"pandas",
+ "pyarrow", # from pandas
"seaborn",
"json_tricks",
"pydantic >= 2.0.0",
From 742e27eeff0bba67a20b2fcdb13e529c54b0bd70 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 24 Jan 2024 13:04:33 -0500
Subject: [PATCH 057/132] MAINT: Test result caching (#836)
---
.github/workflows/run-tests.yml | 68 ++++++++++++++++++++++-------
Makefile | 4 --
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_logging.py | 9 +++-
mne_bids_pipeline/tests/conftest.py | 2 +
pyproject.toml | 1 -
6 files changed, 63 insertions(+), 22 deletions(-)
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index eba837d23..998e597bb 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -6,22 +6,8 @@ concurrency:
on: [push, pull_request]
jobs:
- check-style:
- name: Style
- runs-on: "ubuntu-latest"
- defaults:
- run:
- shell: bash -l {0}
- steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
- - name: Install ruff and codespell
- run: pip install ruff codespell tomli
- - run: make ruff
- - run: make codespell-error
- - uses: psf/black@stable
check-doc:
- name: Doc consistency
+ name: Doc consistency and codespell
runs-on: ubuntu-latest
defaults:
run:
@@ -30,8 +16,58 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- run: pip install --upgrade pip
- - run: pip install -ve .[tests]
+ - run: pip install -ve .[tests] codespell tomli
+ - run: make codespell-error
- run: pytest mne_bids_pipeline -m "not dataset_test"
- uses: codecov/codecov-action@v3
if: success()
name: 'Upload coverage to CodeCov'
+ caching:
+ name: 'Caching on ${{ matrix.os }}'
+ timeout-minutes: 30
+ continue-on-error: true
+ runs-on: ${{ matrix.os }}
+ defaults:
+ run:
+ shell: bash -el {0}
+ strategy:
+ matrix:
+ include:
+ - os: ubuntu-latest
+ - os: macos-latest
+ - os: windows-latest
+ env:
+ MNE_BIDS_PIPELINE_LEGACY_WINDOWS: "false"
+ PYTHONIOENCODING: 'utf8' # for Windows
+ steps:
+ - uses: actions/checkout@v4
+ - uses: pyvista/setup-headless-display-action@main
+ with:
+ qt: true
+ pyvista: false
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.11" # no "multidict" wheels on 3.12 yet
+ - run: pip install -ve .[tests]
+ - uses: actions/cache@v4
+ with:
+ key: ds001971
+ path: ~/mne_data/ds001971
+ id: ds001971-cache
+ - run: python -m mne_bids_pipeline._download ds001971
+ if: steps.ds001971-cache.outputs.cache-hit != 'true'
+ - run: pytest --cov-append -k ds001971 mne_bids_pipeline/
+ - run: pytest --cov-append -k ds001971 mne_bids_pipeline/ # uses "hash" method
+ timeout-minutes: 1
+ - uses: actions/cache@v4
+ with:
+ key: ds003392
+ path: ~/mne_data/ds003392
+ id: ds003392-cache
+ - run: python -m mne_bids_pipeline._download ds003392
+ if: steps.ds003392-cache.outputs.cache-hit != 'true'
+ - run: pytest --cov-append -k ds003392 mne_bids_pipeline/
+ - run: pytest --cov-append -k ds003392 mne_bids_pipeline/ # uses "mtime" method
+ timeout-minutes: 1
+ - uses: codecov/codecov-action@v3
+ if: success()
diff --git a/Makefile b/Makefile
index 8af267201..4e491526b 100644
--- a/Makefile
+++ b/Makefile
@@ -33,10 +33,6 @@ check:
trailing-spaces:
find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//'
-ruff:
- ruff .
- @echo "ruff passed"
-
codespell: # running manually; auto-fix spelling mistakes
@codespell --write-changes $(CODESPELL_DIRS)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 4ee244b1d..026401106 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -22,3 +22,4 @@
- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
- Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner)
+- Code caching is now tested using GitHub Actions (#836 by @larsoner)
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index 6bcb21d73..931ee393d 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -27,7 +27,14 @@ def _console(self):
force_terminal = os.getenv("MNE_BIDS_PIPELINE_FORCE_TERMINAL", None)
if force_terminal is not None:
force_terminal = force_terminal.lower() in ("true", "1")
- kwargs = dict(soft_wrap=True, force_terminal=force_terminal)
+ legacy_windows = os.getenv("MNE_BIDS_PIPELINE_LEGACY_WINDOWS", None)
+ if legacy_windows is not None:
+ legacy_windows = legacy_windows.lower() in ("true", "1")
+ kwargs = dict(
+ soft_wrap=True,
+ force_terminal=force_terminal,
+ legacy_windows=legacy_windows,
+ )
kwargs["theme"] = rich.theme.Theme(
dict(
default="white",
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index 97e380ffc..fa2014634 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -55,6 +55,8 @@ def pytest_configure(config):
#../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: in _add_ica_artifact_sources
# self._add_figure(
always:constrained_layout not applied.*:UserWarning
+ ignore:datetime\.datetime\.utcfromtimestamp.*:DeprecationWarning
+ ignore:datetime\.datetime\.utcnow.*:DeprecationWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
diff --git a/pyproject.toml b/pyproject.toml
index c576c3710..c3c5dbb2b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -68,7 +68,6 @@ tests = [
"mkdocstrings-python",
"mike",
"jinja2",
- "black", # function signature formatting
"livereload",
"openneuro-py >= 2022.2.0",
"httpx >= 0.20",
From cc93c660e928019435842ab776059c096bab93c6 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 26 Jan 2024 16:19:06 -0500
Subject: [PATCH 058/132] MAINT: Enable more ruff rules (#838)
---
.github/workflows/run-tests.yml | 2 +
.pre-commit-config.yaml | 1 +
docs/hooks.py | 6 +-
docs/source/examples/gen_examples.py | 14 +++--
docs/source/features/gen_steps.py | 1 +
docs/source/v1.6.md.inc | 2 +-
mne_bids_pipeline/__init__.py | 2 +-
mne_bids_pipeline/_config.py | 55 ++++++++---------
mne_bids_pipeline/_config_import.py | 17 +++---
mne_bids_pipeline/_config_template.py | 8 +--
mne_bids_pipeline/_config_utils.py | 61 +++++++++----------
mne_bids_pipeline/_decoding.py | 3 +-
mne_bids_pipeline/_download.py | 4 +-
mne_bids_pipeline/_import_data.py | 27 ++++----
mne_bids_pipeline/_io.py | 2 +-
mne_bids_pipeline/_main.py | 9 ++-
mne_bids_pipeline/_parallel.py | 7 ++-
mne_bids_pipeline/_reject.py | 9 +--
mne_bids_pipeline/_report.py | 17 +++---
mne_bids_pipeline/_run.py | 16 ++---
mne_bids_pipeline/_viz.py | 5 +-
.../steps/freesurfer/_01_recon_all.py | 4 +-
.../steps/freesurfer/_02_coreg_surfaces.py | 10 +--
.../steps/freesurfer/__init__.py | 3 +-
.../steps/init/_01_init_derivatives_dir.py | 4 +-
.../steps/init/_02_find_empty_room.py | 18 +++---
mne_bids_pipeline/steps/init/__init__.py | 3 +-
.../steps/preprocessing/_01_data_quality.py | 27 ++++----
.../steps/preprocessing/_02_head_pos.py | 12 ++--
.../steps/preprocessing/_03_maxfilter.py | 24 ++++----
.../preprocessing/_04_frequency_filter.py | 17 +++---
.../steps/preprocessing/_05_make_epochs.py | 16 ++---
.../steps/preprocessing/_06a_run_ica.py | 24 ++++----
.../steps/preprocessing/_06b_run_ssp.py | 14 ++---
.../steps/preprocessing/_07a_apply_ica.py | 13 ++--
.../steps/preprocessing/_07b_apply_ssp.py | 6 +-
.../steps/preprocessing/_08_ptp_reject.py | 9 ++-
.../steps/preprocessing/__init__.py | 22 ++++---
.../steps/sensor/_01_make_evoked.py | 18 +++---
.../steps/sensor/_02_decoding_full_epochs.py | 29 ++++-----
.../steps/sensor/_03_decoding_time_by_time.py | 25 ++++----
.../steps/sensor/_04_time_frequency.py | 12 ++--
.../steps/sensor/_05_decoding_csp.py | 44 +++++++------
.../steps/sensor/_06_make_cov.py | 18 +++---
.../steps/sensor/_99_group_average.py | 39 ++++++------
mne_bids_pipeline/steps/sensor/__init__.py | 16 ++---
.../steps/source/_01_make_bem_surfaces.py | 12 ++--
.../steps/source/_02_make_bem_solution.py | 8 +--
.../steps/source/_03_setup_source_space.py | 6 +-
.../steps/source/_04_make_forward.py | 17 +++---
.../steps/source/_05_make_inverse.py | 16 ++---
.../steps/source/_99_group_average.py | 13 ++--
mne_bids_pipeline/steps/source/__init__.py | 14 +++--
.../tests/configs/config_ERP_CORE.py | 6 +-
.../tests/configs/config_ds000117.py | 4 +-
.../tests/configs/config_ds000246.py | 3 +-
.../tests/configs/config_ds000247.py | 5 +-
.../configs/config_ds000248_FLASH_BEM.py | 4 +-
.../tests/configs/config_ds000248_T1_BEM.py | 4 +-
.../tests/configs/config_ds000248_base.py | 4 +-
.../configs/config_ds000248_coreg_surfaces.py | 4 +-
.../tests/configs/config_ds000248_ica.py | 4 +-
.../tests/configs/config_ds000248_no_mri.py | 4 +-
.../tests/configs/config_ds001810.py | 4 +-
.../tests/configs/config_ds003104.py | 3 +-
.../tests/configs/config_ds003392.py | 4 +-
.../tests/configs/config_ds003775.py | 4 +-
.../tests/configs/config_ds004107.py | 3 +-
.../tests/configs/config_ds004229.py | 3 +-
.../configs/config_eeg_matchingpennies.py | 4 +-
mne_bids_pipeline/tests/conftest.py | 3 +-
mne_bids_pipeline/tests/datasets.py | 8 +--
mne_bids_pipeline/tests/test_cli.py | 2 +
mne_bids_pipeline/tests/test_documented.py | 13 ++--
mne_bids_pipeline/tests/test_run.py | 13 ++--
mne_bids_pipeline/tests/test_validation.py | 1 +
mne_bids_pipeline/typing.py | 11 ++--
pyproject.toml | 14 ++++-
78 files changed, 444 insertions(+), 469 deletions(-)
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 998e597bb..43e692304 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -15,6 +15,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.11"
- run: pip install --upgrade pip
- run: pip install -ve .[tests] codespell tomli
- run: make codespell-error
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 363ef00c2..d8ceaa9ff 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,6 +9,7 @@ repos:
rev: v0.1.14
hooks:
- id: ruff
+ args: ["--fix"]
- id: ruff-format
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
diff --git a/docs/hooks.py b/docs/hooks.py
index ab7192f6e..41ece9a61 100644
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,9 +1,9 @@
import logging
-from typing import Dict, Any
+from typing import Any
from mkdocs.config.defaults import MkDocsConfig
-from mkdocs.structure.pages import Page
from mkdocs.structure.files import Files
+from mkdocs.structure.pages import Page
logger = logging.getLogger("mkdocs")
@@ -13,7 +13,7 @@
# Ideally there would be a better hook, but it's unclear if context can
# be obtained any earlier
def on_template_context(
- context: Dict[str, Any],
+ context: dict[str, Any],
template_name: str,
config: MkDocsConfig,
) -> None:
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index f24c0d29f..1f2514274 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -1,19 +1,21 @@
#!/usr/bin/env python
-from collections import defaultdict
import contextlib
import logging
import shutil
-from pathlib import Path
import sys
-from typing import Union, Iterable
+from collections import defaultdict
+from collections.abc import Iterable
+from pathlib import Path
+from typing import Union
+
+from tqdm import tqdm
import mne_bids_pipeline
-from mne_bids_pipeline._config_import import _import_config
import mne_bids_pipeline.tests.datasets
-from mne_bids_pipeline.tests.test_run import TEST_SUITE
+from mne_bids_pipeline._config_import import _import_config
from mne_bids_pipeline.tests.datasets import DATASET_OPTIONS
-from tqdm import tqdm
+from mne_bids_pipeline.tests.test_run import TEST_SUITE
this_dir = Path(__file__).parent
root = Path(mne_bids_pipeline.__file__).parent.resolve(strict=True)
diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py
index fffc61ddf..86ea6283f 100755
--- a/docs/source/features/gen_steps.py
+++ b/docs/source/features/gen_steps.py
@@ -3,6 +3,7 @@
import importlib
from pathlib import Path
+
from mne_bids_pipeline._config_utils import _get_step_modules
pre = """\
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 026401106..cf5596cb1 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -21,5 +21,5 @@
### :medical_symbol: Code health
- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
-- Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner)
+- Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner)
- Code caching is now tested using GitHub Actions (#836 by @larsoner)
diff --git a/mne_bids_pipeline/__init__.py b/mne_bids_pipeline/__init__.py
index 2826b97e6..2474edb8a 100644
--- a/mne_bids_pipeline/__init__.py
+++ b/mne_bids_pipeline/__init__.py
@@ -1,4 +1,4 @@
-from importlib.metadata import version, PackageNotFoundError
+from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("mne_bids_pipeline")
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index cc3793ea9..652e5ebfb 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1,18 +1,17 @@
# Default settings for data processing and analysis.
-from typing import Optional, Union, Iterable, List, Tuple, Dict, Callable, Literal
+from typing import Callable, Iterable, Literal, Optional, Union
from mne import Covariance
from mne_bids import BIDSPath
from mne_bids_pipeline.typing import (
- PathLike,
ArbitraryContrast,
- FloatArrayLike,
DigMontageType,
+ FloatArrayLike,
+ PathLike,
)
-
###############################################################################
# Config parameters
# -----------------
@@ -84,7 +83,7 @@
Enabling interactive mode deactivates parallel processing.
"""
-sessions: Union[List, Literal["all"]] = "all"
+sessions: Union[list, Literal["all"]] = "all"
"""
The sessions to process. If `'all'`, will process all sessions found in the
BIDS dataset.
@@ -101,7 +100,7 @@
BIDS dataset.
"""
-exclude_runs: Optional[Dict[str, List[str]]] = None
+exclude_runs: Optional[dict[str, list[str]]] = None
"""
Specify runs to exclude from analysis, for each participant individually.
@@ -117,7 +116,7 @@
did not understand the instructions, etc.).
"""
-crop_runs: Optional[Tuple[float, float]] = None
+crop_runs: Optional[tuple[float, float]] = None
"""
Crop the raw data of each run to the specified time interval `[tmin, tmax]`,
in seconds. The runs will be cropped before Maxwell or frequency filtering is
@@ -288,7 +287,7 @@
```
"""
-eeg_bipolar_channels: Optional[Dict[str, Tuple[str, str]]] = None
+eeg_bipolar_channels: Optional[dict[str, tuple[str, str]]] = None
"""
Combine two channels into a bipolar channel, whose signal is the **difference**
between the two combined channels, and add it to the data.
@@ -688,7 +687,7 @@
Number of extended SSS (eSSS) basis projectors to use from empty-room data.
"""
-mf_esss_reject: Optional[Dict[str, float]] = None
+mf_esss_reject: Optional[dict[str, float]] = None
"""
Rejection parameters to use when computing the extended SSS (eSSS) basis.
"""
@@ -980,7 +979,7 @@
```
""" # noqa: E501
-conditions: Optional[Union[Iterable[str], Dict[str, str]]] = None
+conditions: Optional[Union[Iterable[str], dict[str, str]]] = None
"""
The time-locked events based on which to create evoked responses.
This can either be name of the experimental condition as specified in the
@@ -1048,7 +1047,7 @@
and when the annotations do not contain any stimulation or behavior events.
"""
-baseline: Optional[Tuple[Optional[float], Optional[float]]] = (None, 0)
+baseline: Optional[tuple[Optional[float], Optional[float]]] = (None, 0)
"""
Specifies which time interval to use for baseline correction of epochs;
if `None`, no baseline correction is applied.
@@ -1059,7 +1058,7 @@
```
"""
-contrasts: Iterable[Union[Tuple[str, str], ArbitraryContrast]] = []
+contrasts: Iterable[Union[tuple[str, str], ArbitraryContrast]] = []
"""
The conditions to contrast via a subtraction of ERPs / ERFs. The list elements
can either be tuples or dictionaries (or a mix of both). Each element in the
@@ -1156,12 +1155,12 @@
# Rejection based on SSP
# ~~~~~~~~~~~~~~~~~~~~~~
-n_proj_eog: Dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1)
+n_proj_eog: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1)
"""
Number of SSP vectors to create for EOG artifacts for each channel type.
"""
-n_proj_ecg: Dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1)
+n_proj_ecg: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1)
"""
Number of SSP vectors to create for ECG artifacts for each channel type.
"""
@@ -1189,7 +1188,7 @@
`'separate'` otherwise.
"""
-ssp_reject_ecg: Optional[Union[Dict[str, float], Literal["autoreject_global"]]] = None
+ssp_reject_ecg: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None
"""
Peak-to-peak amplitude limits of the ECG epochs to exclude from SSP fitting.
This allows you to remove strong transient artifacts, which could negatively
@@ -1207,7 +1206,7 @@
```
"""
-ssp_reject_eog: Optional[Union[Dict[str, float], Literal["autoreject_global"]]] = None
+ssp_reject_eog: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None
"""
Peak-to-peak amplitude limits of the EOG epochs to exclude from SSP fitting.
This allows you to remove strong transient artifacts, which could negatively
@@ -1233,11 +1232,11 @@
# Rejection based on ICA
# ~~~~~~~~~~~~~~~~~~~~~~
-ica_reject: Optional[Union[Dict[str, float], Literal["autoreject_local"]]] = None
+ica_reject: Optional[Union[dict[str, float], Literal["autoreject_local"]]] = None
"""
Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to
remove strong transient artifacts from the epochs used for fitting ICA, which could
-negatively affect ICA performance.
+negatively affect ICA performance.
The parameter values are the same as for [`reject`][mne_bids_pipeline._config.reject],
but `"autoreject_global"` is not supported. `"autoreject_local"` here behaves
@@ -1264,7 +1263,7 @@
to **not** specify rejection thresholds for EOG and ECG channels here –
otherwise, ICA won't be able to "see" these artifacts.
-???+ info
+???+ info
This setting is applied only to the epochs that are used for **fitting** ICA. The
goal is to make it easier for ICA to produce a good decomposition. After fitting,
ICA is applied to the epochs to be analyzed, usually with one or more components
@@ -1280,7 +1279,7 @@
ica_reject = "autoreject_global" # find global (per channel type) PTP thresholds before fitting ICA
ica_reject = "autoreject_local" # find local (per channel) thresholds and repair epochs before fitting ICA
```
-"""
+""" # noqa: E501
ica_algorithm: Literal[
"picard", "fastica", "extended_infomax", "picard-extended_infomax"
@@ -1373,7 +1372,7 @@
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
reject: Optional[
- Union[Dict[str, float], Literal["autoreject_global", "autoreject_local"]]
+ Union[dict[str, float], Literal["autoreject_global", "autoreject_local"]]
] = None
"""
Peak-to-peak amplitude limits to mark epochs as bad. This allows you to remove
@@ -1386,7 +1385,7 @@
If `None` (default), do not apply artifact rejection.
-If a dictionary, manually specify rejection thresholds (see examples).
+If a dictionary, manually specify rejection thresholds (see examples).
The thresholds provided here must be at least as stringent as those in
[`ica_reject`][mne_bids_pipeline._config.ica_reject] if using ICA. In case of
`'autoreject_global'`, thresholds for any channel that do not meet this
@@ -1409,7 +1408,7 @@
reject = "autoreject_global" # find global (per channel type) PTP thresholds
reject = "autoreject_local" # find local (per channel) thresholds and repair epochs
```
-"""
+""" # noqa: E501
reject_tmin: Optional[float] = None
"""
@@ -1689,7 +1688,7 @@
```
"""
-decoding_csp_freqs: Optional[Dict[str, FloatArrayLike]] = None
+decoding_csp_freqs: Optional[dict[str, FloatArrayLike]] = None
"""
The edges of the frequency bins to use for CSP decoding.
@@ -1733,7 +1732,7 @@
}
"""
-time_frequency_baseline: Optional[Tuple[float, float]] = None
+time_frequency_baseline: Optional[tuple[float, float]] = None
"""
Baseline period to use for the time-frequency analysis. If `None`, no baseline.
???+ example "Example"
@@ -1964,7 +1963,7 @@ def mri_landmarks_kind(bids_path):
"""
noise_cov: Union[
- Tuple[Optional[float], Optional[float]],
+ tuple[Optional[float], Optional[float]],
Literal["emptyroom", "rest", "ad-hoc"],
Callable[[BIDSPath], Covariance],
] = (None, 0)
@@ -2031,7 +2030,7 @@ def noise_cov(bids_path):
```
"""
-source_info_path_update: Optional[Dict[str, str]] = dict(suffix="ave")
+source_info_path_update: Optional[dict[str, str]] = dict(suffix="ave")
"""
When computing the forward and inverse solutions, by default the pipeline
retrieves the `mne.Info` object from the cleaned evoked data. However, in
@@ -2049,7 +2048,7 @@ def noise_cov(bids_path):
```
"""
-inverse_targets: List[Literal["evoked"]] = ["evoked"]
+inverse_targets: list[Literal["evoked"]] = ["evoked"]
"""
On which data to apply the inverse operator. Currently, the only supported
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 14a55df2e..66fe9583a 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -1,22 +1,21 @@
import ast
import copy
-from dataclasses import field
import difflib
-from functools import partial
import importlib
import os
import pathlib
+from dataclasses import field
+from functools import partial
from types import SimpleNamespace
-from typing import Optional, List
+from typing import Optional
import matplotlib
-import numpy as np
import mne
-
+import numpy as np
from pydantic import ValidationError
from pydantic.dataclasses import dataclass
-from ._logging import logger, gen_log_kwargs
+from ._logging import gen_log_kwargs, logger
from .typing import PathLike
@@ -150,7 +149,7 @@ def _update_with_user_config(
config_path: Optional[PathLike],
overrides: Optional[SimpleNamespace],
log: bool = False,
-) -> List[str]:
+) -> list[str]:
# 1. Basics and hidden vars
from . import __version__
@@ -433,8 +432,8 @@ def _pydantic_validate(
def _check_misspellings_removals(
config: SimpleNamespace,
*,
- valid_names: List[str],
- user_names: List[str],
+ valid_names: list[str],
+ user_names: list[str],
log: bool,
) -> None:
# for each name in the user names, check if it's in the valid names but
diff --git a/mne_bids_pipeline/_config_template.py b/mne_bids_pipeline/_config_template.py
index 1925e020e..9c5a0ff29 100644
--- a/mne_bids_pipeline/_config_template.py
+++ b/mne_bids_pipeline/_config_template.py
@@ -1,8 +1,6 @@
from pathlib import Path
-from typing import List
-
-from ._logging import logger, gen_log_kwargs
+from ._logging import gen_log_kwargs, logger
CONFIG_SOURCE_PATH = Path(__file__).parent / "_config.py"
@@ -17,8 +15,8 @@ def create_template_config(
raise FileExistsError(f"The specified path already exists: {target_path}")
# Create a template by commenting out most of the lines in _config.py
- config: List[str] = []
- with open(CONFIG_SOURCE_PATH, "r", encoding="utf-8") as f:
+ config: list[str] = []
+ with open(CONFIG_SOURCE_PATH, encoding="utf-8") as f:
for line in f:
line = (
line if line.startswith(("#", "\n", "import", "from")) else f"# {line}"
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 6f7ce1f0d..321ccf0f0 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -3,15 +3,16 @@
import copy
import functools
import pathlib
-from typing import List, Optional, Union, Iterable, Tuple, Dict, TypeVar, Literal, Any
-from types import SimpleNamespace, ModuleType
+from collections.abc import Iterable
+from types import ModuleType, SimpleNamespace
+from typing import Any, Literal, Optional, TypeVar, Union
-import numpy as np
import mne
import mne_bids
+import numpy as np
from mne_bids import BIDSPath
-from ._logging import logger, gen_log_kwargs
+from ._logging import gen_log_kwargs, logger
from .typing import ArbitraryContrast
try:
@@ -47,8 +48,8 @@ def get_fs_subject(config: SimpleNamespace, subject: str) -> str:
return f"sub-{subject}"
-@functools.lru_cache(maxsize=None)
-def _get_entity_vals_cached(*args, **kwargs) -> List[str]:
+@functools.cache
+def _get_entity_vals_cached(*args, **kwargs) -> list[str]:
return mne_bids.get_entity_vals(*args, **kwargs)
@@ -73,18 +74,18 @@ def get_datatype(config: SimpleNamespace) -> Literal["meg", "eeg"]:
)
-@functools.lru_cache(maxsize=None)
+@functools.cache
def _get_datatypes_cached(root):
return mne_bids.get_datatypes(root=root)
-def _get_ignore_datatypes(config: SimpleNamespace) -> Tuple[str]:
- _all_datatypes: List[str] = _get_datatypes_cached(root=config.bids_root)
+def _get_ignore_datatypes(config: SimpleNamespace) -> tuple[str]:
+ _all_datatypes: list[str] = _get_datatypes_cached(root=config.bids_root)
_ignore_datatypes = set(_all_datatypes) - set([get_datatype(config)])
return tuple(sorted(_ignore_datatypes))
-def get_subjects(config: SimpleNamespace) -> List[str]:
+def get_subjects(config: SimpleNamespace) -> list[str]:
_valid_subjects = _get_entity_vals_cached(
root=config.bids_root,
entity_key="subject",
@@ -102,7 +103,7 @@ def get_subjects(config: SimpleNamespace) -> List[str]:
return sorted(subjects)
-def get_sessions(config: SimpleNamespace) -> Union[List[None], List[str]]:
+def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]:
sessions = copy.deepcopy(config.sessions)
_all_sessions = _get_entity_vals_cached(
root=config.bids_root,
@@ -120,8 +121,8 @@ def get_sessions(config: SimpleNamespace) -> Union[List[None], List[str]]:
def get_runs_all_subjects(
config: SimpleNamespace,
-) -> Dict[str, Union[List[None], List[str]]]:
- """Gives the mapping between subjects and their runs.
+) -> dict[str, Union[list[None], list[str]]]:
+ """Give the mapping between subjects and their runs.
Returns
-------
@@ -142,10 +143,10 @@ def get_runs_all_subjects(
)
-@functools.lru_cache(maxsize=None)
+@functools.cache
def _get_runs_all_subjects_cached(
- **config_dict: Dict[str, Any],
-) -> Dict[str, Union[List[None], List[str]]]:
+ **config_dict: dict[str, Any],
+) -> dict[str, Union[list[None], list[str]]]:
config = SimpleNamespace(**config_dict)
# Sometimes we check list equivalence for ch_types, so convert it back
config.ch_types = list(config.ch_types)
@@ -172,8 +173,8 @@ def _get_runs_all_subjects_cached(
return subj_runs
-def get_intersect_run(config: SimpleNamespace) -> List[str]:
- """Returns the intersection of all the runs of all subjects."""
+def get_intersect_run(config: SimpleNamespace) -> list[str]:
+ """Return the intersection of all the runs of all subjects."""
subj_runs = get_runs_all_subjects(config)
return list(set.intersection(*map(set, subj_runs.values())))
@@ -183,8 +184,8 @@ def get_runs(
config: SimpleNamespace,
subject: str,
verbose: bool = False,
-) -> Union[List[str], List[None]]:
- """Returns a list of runs in the BIDS input data.
+) -> Union[list[str], list[None]]:
+ """Return a list of runs in the BIDS input data.
Parameters
----------
@@ -240,8 +241,8 @@ def get_runs_tasks(
config: SimpleNamespace,
subject: str,
session: Optional[str],
- which: Tuple[str] = ("runs", "noise", "rest"),
-) -> List[Tuple[str]]:
+ which: tuple[str] = ("runs", "noise", "rest"),
+) -> list[tuple[str]]:
"""Get (run, task) tuples for all runs plus (maybe) rest."""
from ._import_data import _get_noise_path, _get_rest_path
@@ -311,7 +312,7 @@ def get_task(config: SimpleNamespace) -> Optional[str]:
return _valid_tasks[0]
-def get_channels_to_analyze(info: mne.Info, config: SimpleNamespace) -> List[str]:
+def get_channels_to_analyze(info: mne.Info, config: SimpleNamespace) -> list[str]:
# Return names of the channels of the channel types we wish to analyze.
# We also include channels marked as "bad" here.
# `exclude=[]`: keep "bad" channels, too.
@@ -428,7 +429,7 @@ def _restrict_analyze_channels(
return inst
-def _get_scalp_in_files(cfg: SimpleNamespace) -> Dict[str, pathlib.Path]:
+def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, pathlib.Path]:
subject_path = pathlib.Path(cfg.subjects_dir) / cfg.fs_subject
seghead = subject_path / "surf" / "lh.seghead"
in_files = dict()
@@ -439,7 +440,7 @@ def _get_scalp_in_files(cfg: SimpleNamespace) -> Dict[str, pathlib.Path]:
return in_files
-def _get_bem_conductivity(cfg: SimpleNamespace) -> Tuple[Tuple[float], str]:
+def _get_bem_conductivity(cfg: SimpleNamespace) -> tuple[tuple[float], str]:
if cfg.fs_subject in ("fsaverage", cfg.use_template_mri):
conductivity = None # should never be used
tag = "5120-5120-5120"
@@ -522,7 +523,7 @@ def get_all_contrasts(config: SimpleNamespace) -> Iterable[ArbitraryContrast]:
return normalized_contrasts
-def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[Tuple[str, str]]:
+def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[tuple[str, str]]:
_validate_contrasts(config.contrasts)
normalized_contrasts = []
for contrast in config.contrasts:
@@ -583,12 +584,8 @@ def _validate_contrasts(contrasts: SimpleNamespace) -> None:
raise ValueError("Contrasts must be tuples or well-formed dicts")
-def _get_step_modules() -> Dict[str, Tuple[ModuleType]]:
- from .steps import init
- from .steps import preprocessing
- from .steps import sensor
- from .steps import source
- from .steps import freesurfer
+def _get_step_modules() -> dict[str, tuple[ModuleType]]:
+ from .steps import freesurfer, init, preprocessing, sensor, source
INIT_STEPS = init._STEPS
PREPROCESSING_STEPS = preprocessing._STEPS
diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py
index 2b6be3cfc..4d895395b 100644
--- a/mne_bids_pipeline/_decoding.py
+++ b/mne_bids_pipeline/_decoding.py
@@ -1,8 +1,7 @@
import numpy as np
-from sklearn.linear_model import LogisticRegression
from joblib import parallel_backend
-
from mne.utils import _validate_type
+from sklearn.linear_model import LogisticRegression
class LogReg(LogisticRegression):
diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py
index 33e565207..45de893ed 100644
--- a/mne_bids_pipeline/_download.py
+++ b/mne_bids_pipeline/_download.py
@@ -12,7 +12,7 @@
def _download_via_datalad(*, ds_name: str, ds_path: Path):
import datalad.api as dl
- print('datalad installing "{}"'.format(ds_name))
+ print(f'datalad installing "{ds_name}"')
options = DATASET_OPTIONS[ds_name]
git_url = options["git"]
assert "exclude" not in options
@@ -28,7 +28,7 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path):
n_jobs = 1
for to_get in DATASET_OPTIONS[ds_name].get("include", []):
- print('datalad get data "{}" for "{}"'.format(to_get, ds_name))
+ print(f'datalad get data "{to_get}" for "{ds_name}"')
dataset.get(to_get, jobs=n_jobs)
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index ca52c59e1..d7f22240d 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -1,21 +1,22 @@
+from collections.abc import Iterable
from types import SimpleNamespace
-from typing import Dict, Optional, Iterable, Union, List, Literal
+from typing import Literal, Optional, Union
import mne
-from mne_bids import BIDSPath, read_raw_bids, get_bids_path_from_fname
import numpy as np
import pandas as pd
+from mne_bids import BIDSPath, get_bids_path_from_fname, read_raw_bids
from ._config_utils import (
- get_mf_reference_run,
- get_runs,
- get_datatype,
- get_task,
_bids_kwargs,
_do_mf_autobad,
_pl,
+ get_datatype,
+ get_mf_reference_run,
+ get_runs,
+ get_task,
)
-from ._io import _read_json, _empty_room_match_path
+from ._io import _empty_room_match_path, _read_json
from ._logging import gen_log_kwargs, logger
from ._run import _update_for_splits
from .typing import PathLike
@@ -27,8 +28,8 @@ def make_epochs(
subject: str,
session: Optional[str],
raw: mne.io.BaseRaw,
- event_id: Optional[Union[Dict[str, int], Literal["auto"]]],
- conditions: Union[Iterable[str], Dict[str, str]],
+ event_id: Optional[Union[dict[str, int], Literal["auto"]]],
+ conditions: Union[Iterable[str], dict[str, str]],
tmin: float,
tmax: float,
metadata_tmin: Optional[float],
@@ -147,12 +148,12 @@ def make_epochs(
return epochs
-def annotations_to_events(*, raw_paths: List[PathLike]) -> Dict[str, int]:
+def annotations_to_events(*, raw_paths: list[PathLike]) -> dict[str, int]:
"""Generate a unique event name -> event code mapping.
The mapping can that can be used across all passed raws.
"""
- event_names: List[str] = []
+ event_names: list[str] = []
for raw_fname in raw_paths:
raw = mne.io.read_raw_fif(raw_fname)
_, event_id = mne.events_from_annotations(raw=raw)
@@ -434,6 +435,8 @@ def import_er_data(
The BIDS path to the empty room bad channels file.
bids_path_ref_bads_in
The BIDS path to the reference data bad channels file.
+ prepare_maxwell_filter
+ Whether to prepare the empty-room data for Maxwell filtering.
Returns
-------
@@ -753,7 +756,7 @@ def _read_bads_tsv(
*,
cfg: SimpleNamespace,
bids_path_bads: BIDSPath,
-) -> List[str]:
+) -> list[str]:
bads_tsv = pd.read_csv(bids_path_bads.fpath, sep="\t", header=0)
return bads_tsv[bads_tsv.columns[0]].tolist()
diff --git a/mne_bids_pipeline/_io.py b/mne_bids_pipeline/_io.py
index 0b7485f76..f1a2b0ce3 100644
--- a/mne_bids_pipeline/_io.py
+++ b/mne_bids_pipeline/_io.py
@@ -14,7 +14,7 @@ def _write_json(fname: PathLike, data: dict) -> None:
def _read_json(fname: PathLike) -> dict:
- with open(fname, "r", encoding="utf-8") as f:
+ with open(fname, encoding="utf-8") as f:
return json_tricks.load(f)
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index ddbb49c6a..04ddabe1e 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -1,16 +1,15 @@
import argparse
import pathlib
-from textwrap import dedent
import time
-from typing import List
+from textwrap import dedent
from types import ModuleType, SimpleNamespace
import numpy as np
-from ._config_utils import _get_step_modules
from ._config_import import _import_config
from ._config_template import create_template_config
-from ._logging import logger, gen_log_kwargs
+from ._config_utils import _get_step_modules
+from ._logging import gen_log_kwargs, logger
from ._parallel import get_parallel_backend
from ._run import _short_step_path
@@ -182,7 +181,7 @@ def main():
if not cache:
overrides.memory_location = False
- step_modules: List[ModuleType] = []
+ step_modules: list[ModuleType] = []
STEP_MODULES = _get_step_modules()
for stage, step in zip(processing_stages, processing_steps):
if stage not in STEP_MODULES.keys():
diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py
index e79ae5151..9c74e6474 100644
--- a/mne_bids_pipeline/_parallel.py
+++ b/mne_bids_pipeline/_parallel.py
@@ -1,12 +1,13 @@
"""Parallelization."""
-from typing import Literal, Callable
from types import SimpleNamespace
+from typing import Callable, Literal
import joblib
-from mne.utils import use_log_level, logger as mne_logger
+from mne.utils import logger as mne_logger
+from mne.utils import use_log_level
-from ._logging import logger, gen_log_kwargs, _is_testing
+from ._logging import _is_testing, gen_log_kwargs, logger
def get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int:
diff --git a/mne_bids_pipeline/_reject.py b/mne_bids_pipeline/_reject.py
index 5b3729dc2..ca506239d 100644
--- a/mne_bids_pipeline/_reject.py
+++ b/mne_bids_pipeline/_reject.py
@@ -1,21 +1,22 @@
"""Rejection."""
-from typing import Optional, Union, Iterable, Dict, Literal
+from collections.abc import Iterable
+from typing import Literal, Optional, Union
import mne
-from ._logging import logger, gen_log_kwargs
+from ._logging import gen_log_kwargs, logger
def _get_reject(
*,
subject: str,
session: Optional[str],
- reject: Union[Dict[str, float], Literal["autoreject_global"]],
+ reject: Union[dict[str, float], Literal["autoreject_global"]],
ch_types: Iterable[Literal["meg", "mag", "grad", "eeg"]],
param: str,
epochs: Optional[mne.BaseEpochs] = None,
-) -> Dict[str, float]:
+) -> dict[str, float]:
if reject is None:
return dict()
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index bf42a27a2..ed514925d 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1,24 +1,23 @@
import contextlib
from functools import lru_cache
from io import StringIO
-from typing import Optional, List, Literal
from types import SimpleNamespace
+from typing import Literal, Optional
-from filelock import FileLock
import matplotlib.transforms
+import mne
import numpy as np
import pandas as pd
-from scipy.io import loadmat
-
-import mne
+from filelock import FileLock
from mne.io import BaseRaw
from mne.utils import _pl
from mne_bids import BIDSPath
from mne_bids.stats import count_events
+from scipy.io import loadmat
from ._config_utils import get_all_contrasts
from ._decoding import _handle_csp_args
-from ._logging import logger, gen_log_kwargs, _linkfile
+from ._logging import _linkfile, gen_log_kwargs, logger
@contextlib.contextmanager
@@ -123,8 +122,8 @@ def _open_report(
def _plot_full_epochs_decoding_scores(
- contrast_names: List[str],
- scores: List[np.ndarray],
+ contrast_names: list[str],
+ scores: list[np.ndarray],
metric: str,
kind: Literal["single-subject", "grand-average"] = "single-subject",
):
@@ -458,7 +457,7 @@ def _gen_empty_report(
return report
-def _contrasts_to_names(contrasts: List[List[str]]) -> List[str]:
+def _contrasts_to_names(contrasts: list[list[str]]) -> list[str]:
return [f"{c[0]} vs.\n{c[1]}" for c in contrasts]
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index c76126ea2..128b876ed 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -7,19 +7,19 @@
import pathlib
import pdb
import sys
-import traceback
import time
-from typing import Callable, Optional, Dict, List, Literal, Union
+import traceback
from types import SimpleNamespace
+from typing import Callable, Literal, Optional, Union
-from filelock import FileLock
-from joblib import Memory
import json_tricks
import pandas as pd
+from filelock import FileLock
+from joblib import Memory
from mne_bids import BIDSPath
from ._config_utils import get_task
-from ._logging import logger, gen_log_kwargs, _is_testing
+from ._logging import _is_testing, gen_log_kwargs, logger
def failsafe_run(
@@ -303,7 +303,7 @@ def save_logs(*, config: SimpleNamespace, logs) -> None: # TODO add type
def _update_for_splits(
- files_dict: Union[Dict[str, BIDSPath], BIDSPath],
+ files_dict: Union[dict[str, BIDSPath], BIDSPath],
key: Optional[str],
*,
single: bool = False,
@@ -346,7 +346,7 @@ def _sanitize_callable(val):
def _get_step_path(
- stack: Optional[List[inspect.FrameInfo]] = None,
+ stack: Optional[list[inspect.FrameInfo]] = None,
) -> pathlib.Path:
if stack is None:
stack = inspect.stack()
@@ -372,7 +372,7 @@ def _short_step_path(step_path: pathlib.Path) -> str:
def _prep_out_files(
*,
exec_params: SimpleNamespace,
- out_files: Dict[str, BIDSPath],
+ out_files: dict[str, BIDSPath],
):
for key, fname in out_files.items():
out_files[key] = _path_to_str_hash(
diff --git a/mne_bids_pipeline/_viz.py b/mne_bids_pipeline/_viz.py
index 8e49af509..4055ab7c4 100644
--- a/mne_bids_pipeline/_viz.py
+++ b/mne_bids_pipeline/_viz.py
@@ -1,10 +1,9 @@
-from typing import List
import numpy as np
import pandas as pd
from matplotlib.figure import Figure
-def plot_auto_scores(auto_scores, *, ch_types) -> List[Figure]:
+def plot_auto_scores(auto_scores, *, ch_types) -> list[Figure]:
# Plot scores of automated bad channel detection.
import matplotlib.pyplot as plt
import seaborn as sns
@@ -15,7 +14,7 @@ def plot_auto_scores(auto_scores, *, ch_types) -> List[Figure]:
ch_types_[idx] = "grad"
ch_types_.insert(idx + 1, "mag")
- figs: List[Figure] = []
+ figs: list[Figure] = []
for ch_type in ch_types_:
# Only select the data for mag or grad channels.
ch_subset = auto_scores["ch_types"] == ch_type
diff --git a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
index ee803c800..0633a9db0 100755
--- a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
+++ b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
@@ -11,8 +11,8 @@
from mne.utils import run_subprocess
from ..._config_utils import get_fs_subjects_dir, get_subjects
-from ..._logging import logger, gen_log_kwargs
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
fs_bids_app = Path(__file__).parent / "contrib" / "run.py"
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index 560448713..eb5f86151 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -10,14 +10,14 @@
import mne.bem
from ..._config_utils import (
- get_fs_subjects_dir,
+ _get_scalp_in_files,
get_fs_subject,
+ get_fs_subjects_dir,
get_subjects,
- _get_scalp_in_files,
)
-from ..._logging import logger, gen_log_kwargs
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, _prep_out_files
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._run import _prep_out_files, failsafe_run
fs_bids_app = Path(__file__).parent / "contrib" / "run.py"
diff --git a/mne_bids_pipeline/steps/freesurfer/__init__.py b/mne_bids_pipeline/steps/freesurfer/__init__.py
index 84e37008a..7f4d9d088 100644
--- a/mne_bids_pipeline/steps/freesurfer/__init__.py
+++ b/mne_bids_pipeline/steps/freesurfer/__init__.py
@@ -3,7 +3,6 @@
Surface reconstruction via FreeSurfer. These steps are not run by default.
"""
-from . import _01_recon_all
-from . import _02_coreg_surfaces
+from . import _01_recon_all, _02_coreg_surfaces
_STEPS = (_01_recon_all, _02_coreg_surfaces)
diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
index a964e6d59..2f17b0c77 100644
--- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
+++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
@@ -3,13 +3,13 @@
Initialize the derivatives directory.
"""
-from typing import Optional
from types import SimpleNamespace
+from typing import Optional
from mne_bids.config import BIDS_VERSION
from mne_bids.utils import _write_json
-from ..._config_utils import get_subjects, get_sessions, _bids_kwargs
+from ..._config_utils import _bids_kwargs, get_sessions, get_subjects
from ..._logging import gen_log_kwargs, logger
from ..._run import failsafe_run
diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
index d9334a9cf..fcb0536c5 100644
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -1,26 +1,26 @@
"""Find empty-room data matches."""
from types import SimpleNamespace
-from typing import Dict, Optional
+from typing import Optional
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ _pl,
get_datatype,
+ get_mf_reference_run,
get_sessions,
get_subjects,
- get_mf_reference_run,
- _bids_kwargs,
- _pl,
)
from ..._io import _empty_room_match_path, _write_json
from ..._logging import gen_log_kwargs, logger
-from ..._run import _update_for_splits, failsafe_run, save_logs, _prep_out_files
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_find_empty_room(
*, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace
-) -> Dict[str, BIDSPath]:
+) -> dict[str, BIDSPath]:
"""Get paths of files required by find_empty_room function."""
bids_path_in = BIDSPath(
subject=subject,
@@ -35,7 +35,7 @@ def get_input_fnames_find_empty_room(
root=cfg.bids_root,
check=False,
)
- in_files: Dict[str, BIDSPath] = dict()
+ in_files: dict[str, BIDSPath] = dict()
in_files[f"raw_run-{run}"] = bids_path_in
_update_for_splits(in_files, f"raw_run-{run}", single=True)
if hasattr(bids_path_in, "find_matching_sidecar"):
@@ -64,8 +64,8 @@ def find_empty_room(
subject: str,
session: Optional[str],
run: Optional[str],
- in_files: Dict[str, BIDSPath],
-) -> Dict[str, BIDSPath]:
+ in_files: dict[str, BIDSPath],
+) -> dict[str, BIDSPath]:
raw_path = in_files.pop(f"raw_run-{run}")
in_files.pop("sidecar", None)
try:
diff --git a/mne_bids_pipeline/steps/init/__init__.py b/mne_bids_pipeline/steps/init/__init__.py
index 72a80cf13..6435ffdfe 100644
--- a/mne_bids_pipeline/steps/init/__init__.py
+++ b/mne_bids_pipeline/steps/init/__init__.py
@@ -1,7 +1,6 @@
"""Filesystem initialization and dataset inspection."""
-from . import _01_init_derivatives_dir
-from . import _02_find_empty_room
+from . import _01_init_derivatives_dir, _02_find_empty_room
_STEPS = (
_01_init_derivatives_dir,
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 655280e52..3b64c5659 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -3,34 +3,33 @@
from types import SimpleNamespace
from typing import Optional
-import pandas as pd
-
import mne
+import pandas as pd
from mne_bids import BIDSPath
from ..._config_utils import (
+ _do_mf_autobad,
+ _pl,
get_mf_cal_fname,
get_mf_ctc_fname,
- get_subjects,
- get_sessions,
get_runs_tasks,
- _do_mf_autobad,
- _pl,
+ get_sessions,
+ get_subjects,
)
from ..._import_data import (
- _get_run_rest_noise_path,
- _get_mf_reference_run_path,
- import_experimental_data,
- import_er_data,
- _bads_path,
_auto_scores_path,
+ _bads_path,
+ _get_mf_reference_run_path,
+ _get_run_rest_noise_path,
_import_data_kwargs,
+ import_er_data,
+ import_experimental_data,
)
from ..._io import _write_json
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _add_raw, _open_report
+from ..._run import _prep_out_files, failsafe_run, save_logs
from ..._viz import plot_auto_scores
diff --git a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
index a75cd7339..d4a6a2c6b 100644
--- a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
+++ b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
@@ -1,24 +1,24 @@
"""Estimate head positions."""
-from typing import Optional
from types import SimpleNamespace
+from typing import Optional
import mne
from ..._config_utils import (
- get_subjects,
- get_sessions,
get_runs_tasks,
+ get_sessions,
+ get_subjects,
)
from ..._import_data import (
- import_experimental_data,
_get_run_rest_noise_path,
_import_data_kwargs,
+ import_experimental_data,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import _prep_out_files, failsafe_run, save_logs
def get_input_fnames_head_pos(
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index 099336c5c..c5b58e2b6 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -14,35 +14,35 @@
The function loads machine-specific calibration files.
"""
-from copy import deepcopy
import gc
-from typing import Optional
+from copy import deepcopy
from types import SimpleNamespace
+from typing import Optional
-import numpy as np
import mne
+import numpy as np
from mne_bids import read_raw_bids
from ..._config_utils import (
+ _pl,
get_mf_cal_fname,
get_mf_ctc_fname,
- get_subjects,
- get_sessions,
get_runs_tasks,
- _pl,
+ get_sessions,
+ get_subjects,
)
from ..._import_data import (
- import_experimental_data,
- import_er_data,
+ _get_mf_reference_run_path,
_get_run_path,
_get_run_rest_noise_path,
- _get_mf_reference_run_path,
_import_data_kwargs,
+ import_er_data,
+ import_experimental_data,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _add_raw, _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
# %% eSSS
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index d026539ee..a44a1c70e 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -14,27 +14,28 @@
If config.interactive = True plots raw data and power spectral density.
""" # noqa: E501
-import numpy as np
+from collections.abc import Iterable
from types import SimpleNamespace
-from typing import Optional, Union, Literal, Iterable
+from typing import Literal, Optional, Union
import mne
+import numpy as np
from ..._config_utils import (
- get_sessions,
get_runs_tasks,
+ get_sessions,
get_subjects,
)
from ..._import_data import (
- import_experimental_data,
- import_er_data,
_get_run_rest_noise_path,
_import_data_kwargs,
+ import_er_data,
+ import_experimental_data,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _add_raw
-from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _add_raw, _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_frequency_filter(
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
index d4deb4078..0cebb033e 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
@@ -14,23 +14,23 @@
from mne_bids import BIDSPath
from ..._config_utils import (
- get_runs,
- get_subjects,
+ _bids_kwargs,
get_eeg_reference,
+ get_runs,
get_sessions,
- _bids_kwargs,
+ get_subjects,
)
-from ..._import_data import make_epochs, annotations_to_events
+from ..._import_data import annotations_to_events, make_epochs
from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report
from ..._run import (
+ _prep_out_files,
+ _sanitize_callable,
+ _update_for_splits,
failsafe_run,
save_logs,
- _update_for_splits,
- _sanitize_callable,
- _prep_out_files,
)
-from ..._parallel import parallel_func, get_parallel_backend
def get_input_fnames_epochs(
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index 294d05a26..00346df25 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -11,31 +11,31 @@
run 05a-apply_ica.py.
"""
-from typing import List, Optional, Iterable, Tuple, Literal
+from collections.abc import Iterable
from types import SimpleNamespace
+from typing import Literal, Optional
-import pandas as pd
-import numpy as np
import autoreject
-
import mne
-from mne.report import Report
+import numpy as np
+import pandas as pd
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
+from mne.report import Report
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ get_eeg_reference,
get_runs,
get_sessions,
get_subjects,
- get_eeg_reference,
- _bids_kwargs,
)
-from ..._import_data import make_epochs, annotations_to_events
+from ..._import_data import annotations_to_events, make_epochs
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._parallel import get_parallel_backend, parallel_func
from ..._reject import _get_reject
from ..._report import _agg_backend
-from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def filter_for_ica(
@@ -190,10 +190,10 @@ def detect_bad_components(
which: Literal["eog", "ecg"],
epochs: mne.BaseEpochs,
ica: mne.preprocessing.ICA,
- ch_names: Optional[List[str]],
+ ch_names: Optional[list[str]],
subject: str,
session: Optional[str],
-) -> Tuple[List[int], np.ndarray]:
+) -> tuple[list[int], np.ndarray]:
artifact = which.upper()
msg = f"Performing automated {artifact} artifact detection …"
logger.info(**gen_log_kwargs(message=msg))
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index eeb22cf36..46b88ee90 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -3,26 +3,26 @@
These are often also referred to as PCA vectors.
"""
-from typing import Optional
from types import SimpleNamespace
+from typing import Optional
import mne
-from mne.preprocessing import create_eog_epochs, create_ecg_epochs
-from mne import compute_proj_evoked, compute_proj_epochs
+from mne import compute_proj_epochs, compute_proj_evoked
+from mne.preprocessing import create_ecg_epochs, create_eog_epochs
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ _pl,
get_runs,
get_sessions,
get_subjects,
- _bids_kwargs,
- _pl,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._parallel import get_parallel_backend, parallel_func
from ..._reject import _get_reject
from ..._report import _open_report
-from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_run_ssp(
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
index 4b906a106..c24d8e015 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
@@ -14,22 +14,21 @@
from types import SimpleNamespace
from typing import Optional
-import pandas as pd
import mne
+import pandas as pd
from mne.preprocessing import read_ica
from mne.report import Report
-
from mne_bids import BIDSPath
from ..._config_utils import (
- get_subjects,
- get_sessions,
_bids_kwargs,
+ get_sessions,
+ get_subjects,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _agg_backend
-from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _agg_backend, _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_apply_ica(
diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
index 65fc27b70..9b1a83fc9 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
@@ -12,13 +12,13 @@
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
get_sessions,
get_subjects,
- _bids_kwargs,
)
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_apply_ssp(
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
index b4a29f4e7..7f0bf0607 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
@@ -11,22 +11,21 @@
from types import SimpleNamespace
from typing import Optional
-import numpy as np
import autoreject
-
import mne
+import numpy as np
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
get_sessions,
get_subjects,
- _bids_kwargs,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._parallel import get_parallel_backend, parallel_func
from ..._reject import _get_reject
from ..._report import _open_report
-from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_drop_ptp(
diff --git a/mne_bids_pipeline/steps/preprocessing/__init__.py b/mne_bids_pipeline/steps/preprocessing/__init__.py
index 95637ecab..686b7cf27 100644
--- a/mne_bids_pipeline/steps/preprocessing/__init__.py
+++ b/mne_bids_pipeline/steps/preprocessing/__init__.py
@@ -1,15 +1,17 @@
"""Preprocessing."""
-from . import _01_data_quality
-from . import _02_head_pos
-from . import _03_maxfilter
-from . import _04_frequency_filter
-from . import _05_make_epochs
-from . import _06a_run_ica
-from . import _06b_run_ssp
-from . import _07a_apply_ica
-from . import _07b_apply_ssp
-from . import _08_ptp_reject
+from . import (
+ _01_data_quality,
+ _02_head_pos,
+ _03_maxfilter,
+ _04_frequency_filter,
+ _05_make_epochs,
+ _06a_run_ica,
+ _06b_run_ssp,
+ _07a_apply_ica,
+ _07b_apply_ssp,
+ _08_ptp_reject,
+)
_STEPS = (
_01_data_quality,
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index 2ec0ea714..63d1854ae 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -7,22 +7,22 @@
from mne_bids import BIDSPath
from ..._config_utils import (
- get_sessions,
- get_subjects,
- get_all_contrasts,
_bids_kwargs,
- _restrict_analyze_channels,
_pl,
+ _restrict_analyze_channels,
+ get_all_contrasts,
+ get_sessions,
+ get_subjects,
)
from ..._logging import gen_log_kwargs, logger
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _all_conditions, _open_report, _sanitize_cond_tag
from ..._run import (
- failsafe_run,
- save_logs,
- _sanitize_callable,
_prep_out_files,
+ _sanitize_callable,
_update_for_splits,
+ failsafe_run,
+ save_logs,
)
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 81243bcd9..58a354c1c 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -12,37 +12,34 @@
from types import SimpleNamespace
from typing import Optional
+import mne
import numpy as np
import pandas as pd
-from scipy.io import savemat, loadmat
-
-from sklearn.model_selection import cross_val_score
-from sklearn.pipeline import make_pipeline
-from sklearn.model_selection import StratifiedKFold
-
-import mne
from mne.decoding import Scaler, Vectorizer
from mne_bids import BIDSPath
+from scipy.io import loadmat, savemat
+from sklearn.model_selection import StratifiedKFold, cross_val_score
+from sklearn.pipeline import make_pipeline
from ..._config_utils import (
- get_sessions,
- get_subjects,
- get_eeg_reference,
- get_decoding_contrasts,
_bids_kwargs,
- _restrict_analyze_channels,
_get_decoding_proc,
+ _restrict_analyze_channels,
+ get_decoding_contrasts,
+ get_eeg_reference,
+ get_sessions,
+ get_subjects,
)
-from ..._logging import gen_log_kwargs, logger
from ..._decoding import LogReg
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
from ..._report import (
- _open_report,
_contrasts_to_names,
+ _open_report,
_plot_full_epochs_decoding_scores,
_sanitize_cond_tag,
)
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_epochs_decoding(
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index b435cf6ae..d61e865c4 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -15,38 +15,35 @@
from types import SimpleNamespace
from typing import Optional
+import mne
import numpy as np
import pandas as pd
-from scipy.io import savemat, loadmat
-
-import mne
from mne.decoding import GeneralizingEstimator, SlidingEstimator, cross_val_multiscore
-
from mne_bids import BIDSPath
-
-from sklearn.preprocessing import StandardScaler
-from sklearn.pipeline import make_pipeline
+from scipy.io import loadmat, savemat
from sklearn.model_selection import StratifiedKFold
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
from ..._config_utils import (
- get_sessions,
- get_subjects,
- get_eeg_reference,
- get_decoding_contrasts,
_bids_kwargs,
- _restrict_analyze_channels,
_get_decoding_proc,
+ _restrict_analyze_channels,
+ get_decoding_contrasts,
+ get_eeg_reference,
+ get_sessions,
+ get_subjects,
)
from ..._decoding import LogReg
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._parallel import get_parallel_backend, get_parallel_backend_name
from ..._report import (
_open_report,
_plot_decoding_time_generalization,
- _sanitize_cond_tag,
_plot_time_by_time_decoding_scores,
+ _sanitize_cond_tag,
)
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_time_decoding(
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index e1e7b440c..0ab3aa3ea 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -7,24 +7,22 @@
from types import SimpleNamespace
from typing import Optional
-import numpy as np
-
import mne
-
+import numpy as np
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ _restrict_analyze_channels,
+ get_eeg_reference,
get_sessions,
get_subjects,
- get_eeg_reference,
sanitize_cond_name,
- _bids_kwargs,
- _restrict_analyze_channels,
)
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _sanitize_cond_tag
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
def get_input_fnames_time_frequency(
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 1614854c1..c9d3ee077 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -1,15 +1,13 @@
-"""
-Decoding based on common spatial patterns (CSP).
-"""
+"""Decoding based on common spatial patterns (CSP)."""
import os.path as op
from types import SimpleNamespace
-from typing import Dict, Optional, Tuple
+from typing import Optional
+import matplotlib.transforms
import mne
import numpy as np
import pandas as pd
-import matplotlib.transforms
from mne.decoding import CSP, UnsupervisedSpatialFilter
from mne_bids import BIDSPath
from sklearn.decomposition import PCA
@@ -17,35 +15,35 @@
from sklearn.pipeline import make_pipeline
from ..._config_utils import (
- get_sessions,
- get_subjects,
- get_eeg_reference,
- get_decoding_contrasts,
_bids_kwargs,
- _restrict_analyze_channels,
_get_decoding_proc,
+ _restrict_analyze_channels,
+ get_decoding_contrasts,
+ get_eeg_reference,
+ get_sessions,
+ get_subjects,
)
from ..._decoding import LogReg, _handle_csp_args
-from ..._logging import logger, gen_log_kwargs
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
from ..._report import (
+ _imshow_tf,
_open_report,
- _sanitize_cond_tag,
_plot_full_epochs_decoding_scores,
- _imshow_tf,
+ _sanitize_cond_tag,
)
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
-def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: Tuple[str, str]) -> np.ndarray:
+def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: tuple[str, str]) -> np.ndarray:
"""Return the projection of the events_id on a boolean vector.
This projection is useful in the case of hierarchical events:
we project the different events contained in one condition into
just one label.
- Returns:
- --------
+ Returns
+ -------
A boolean numpy array containing the labels.
"""
epochs_cond_0 = epochs[contrast[0]]
@@ -79,8 +77,8 @@ def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: Tuple[str, str]) -> np.
def prepare_epochs_and_y(
- *, epochs: mne.BaseEpochs, contrast: Tuple[str, str], cfg, fmin: float, fmax: float
-) -> Tuple[mne.BaseEpochs, np.ndarray]:
+ *, epochs: mne.BaseEpochs, contrast: tuple[str, str], cfg, fmin: float, fmax: float
+) -> tuple[mne.BaseEpochs, np.ndarray]:
"""Band-pass between, sub-select the desired epochs, and prepare y."""
epochs_filt = epochs.copy().pick(["meg", "eeg"])
@@ -112,7 +110,7 @@ def get_input_fnames_csp(
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
- contrast: Tuple[str],
+ contrast: tuple[str],
) -> dict:
proc = _get_decoding_proc(config=cfg)
fname_epochs = BIDSPath(
@@ -143,8 +141,8 @@ def one_subject_decoding(
exec_params: SimpleNamespace,
subject: str,
session: str,
- contrast: Tuple[str, str],
- in_files: Dict[str, BIDSPath],
+ contrast: tuple[str, str],
+ in_files: dict[str, BIDSPath],
) -> dict:
"""Run one subject.
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 2cb3b8ebf..a9c211df4 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -3,29 +3,29 @@
Covariance matrices are computed and saved.
"""
-from typing import Optional
from types import SimpleNamespace
+from typing import Optional
import mne
from mne_bids import BIDSPath
+from ..._config_import import _import_config
from ..._config_utils import (
+ _bids_kwargs,
+ _restrict_analyze_channels,
+ get_noise_cov_bids_path,
get_sessions,
get_subjects,
- get_noise_cov_bids_path,
- _bids_kwargs,
)
-from ..._config_import import _import_config
-from ..._config_utils import _restrict_analyze_channels
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
+from ..._report import _all_conditions, _open_report, _sanitize_cond_tag
from ..._run import (
- failsafe_run,
- save_logs,
- _sanitize_callable,
_prep_out_files,
+ _sanitize_callable,
_update_for_splits,
+ failsafe_run,
+ save_logs,
)
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 98e275336..7ac19e7de 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -6,42 +6,41 @@
import os
import os.path as op
from functools import partial
-from typing import Optional, List, Tuple
from types import SimpleNamespace
-from ...typing import TypedDict
+from typing import Optional
+import mne
import numpy as np
import pandas as pd
-from scipy.io import loadmat, savemat
-
-import mne
from mne_bids import BIDSPath
+from scipy.io import loadmat, savemat
from ..._config_utils import (
- get_sessions,
- get_subjects,
- get_eeg_reference,
- get_decoding_contrasts,
_bids_kwargs,
- _restrict_analyze_channels,
_pl,
+ _restrict_analyze_channels,
+ get_decoding_contrasts,
+ get_eeg_reference,
+ get_sessions,
+ get_subjects,
)
from ..._decoding import _handle_csp_args
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits
from ..._report import (
+ _all_conditions,
+ _contrasts_to_names,
_open_report,
- _sanitize_cond_tag,
- add_event_counts,
- add_csp_grand_average,
+ _plot_decoding_time_generalization,
_plot_full_epochs_decoding_scores,
_plot_time_by_time_decoding_scores_gavg,
+ _sanitize_cond_tag,
+ add_csp_grand_average,
+ add_event_counts,
plot_time_by_time_decoding_t_values,
- _plot_decoding_time_generalization,
- _contrasts_to_names,
- _all_conditions,
)
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+from ...typing import TypedDict
def get_input_fnames_average_evokeds(
@@ -186,7 +185,7 @@ def _decoding_cluster_permutation_test(
cluster_forming_t_threshold: Optional[float],
n_permutations: int,
random_seed: int,
-) -> Tuple[np.ndarray, List[ClusterAcrossTime], int]:
+) -> tuple[np.ndarray, list[ClusterAcrossTime], int]:
"""Perform a cluster permutation test on decoding scores.
The clusters are formed across time points.
@@ -625,7 +624,7 @@ def get_input_files_average_full_epochs_report(
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
- decoding_contrasts: List[List[str]],
+ decoding_contrasts: list[list[str]],
) -> dict:
in_files = dict()
for contrast in decoding_contrasts:
@@ -649,7 +648,7 @@ def average_full_epochs_report(
exec_params: SimpleNamespace,
subject: str,
session: Optional[str],
- decoding_contrasts: List[List[str]],
+ decoding_contrasts: list[list[str]],
in_files: dict,
) -> dict:
"""Add decoding results to the grand average report."""
diff --git a/mne_bids_pipeline/steps/sensor/__init__.py b/mne_bids_pipeline/steps/sensor/__init__.py
index fc76bf551..848efadf8 100644
--- a/mne_bids_pipeline/steps/sensor/__init__.py
+++ b/mne_bids_pipeline/steps/sensor/__init__.py
@@ -1,12 +1,14 @@
"""Sensor-space analysis."""
-from . import _01_make_evoked
-from . import _02_decoding_full_epochs
-from . import _03_decoding_time_by_time
-from . import _04_time_frequency
-from . import _05_decoding_csp
-from . import _06_make_cov
-from . import _99_group_average
+from . import (
+ _01_make_evoked,
+ _02_decoding_full_epochs,
+ _03_decoding_time_by_time,
+ _04_time_frequency,
+ _05_decoding_csp,
+ _06_make_cov,
+ _99_group_average,
+)
_STEPS = (
_01_make_evoked,
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index fc4051c9f..da2b64890 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -11,17 +11,17 @@
import mne
from ..._config_utils import (
- get_fs_subject,
- get_subjects,
- get_sessions,
+ _bids_kwargs,
_get_bem_conductivity,
+ get_fs_subject,
get_fs_subjects_dir,
- _bids_kwargs,
+ get_sessions,
+ get_subjects,
)
-from ..._logging import logger, gen_log_kwargs
+from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._report import _open_report, _render_bem
+from ..._run import _prep_out_files, failsafe_run, save_logs
def _get_bem_params(cfg: SimpleNamespace):
diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
index 67f0c2737..a09d063e2 100644
--- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
+++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
@@ -10,13 +10,13 @@
from ..._config_utils import (
_get_bem_conductivity,
- get_fs_subjects_dir,
get_fs_subject,
+ get_fs_subjects_dir,
get_subjects,
)
-from ..._logging import logger, gen_log_kwargs
-from ..._parallel import parallel_func, get_parallel_backend
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._run import _prep_out_files, failsafe_run, save_logs
def get_input_fnames_make_bem_solution(
diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
index 4710750f9..64e7314ed 100644
--- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py
+++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
@@ -8,9 +8,9 @@
import mne
from ..._config_utils import get_fs_subject, get_fs_subjects_dir, get_subjects
-from ..._logging import logger, gen_log_kwargs
-from ..._run import failsafe_run, save_logs, _prep_out_files
-from ..._parallel import parallel_func, get_parallel_backend
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._run import _prep_out_files, failsafe_run, save_logs
def get_input_fnames_setup_source_space(*, cfg, subject):
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index a2c1fc211..28586b742 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -6,27 +6,26 @@
from types import SimpleNamespace
from typing import Optional
-import numpy as np
-
import mne
+import numpy as np
from mne.coreg import Coregistration
from mne_bids import BIDSPath, get_head_mri_trans
+from ..._config_import import _import_config
from ..._config_utils import (
- get_fs_subject,
- get_subjects,
+ _bids_kwargs,
_get_bem_conductivity,
+ _meg_in_ch_types,
+ get_fs_subject,
get_fs_subjects_dir,
get_runs,
- _meg_in_ch_types,
get_sessions,
- _bids_kwargs,
+ get_subjects,
)
-from ..._config_import import _import_config
-from ..._logging import logger, gen_log_kwargs
+from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _render_bem
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import _prep_out_files, failsafe_run, save_logs
def _prepare_trans_template(
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index 449675817..54f9fd0ae 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -8,25 +8,25 @@
import mne
from mne.minimum_norm import (
- make_inverse_operator,
apply_inverse,
+ make_inverse_operator,
write_inverse_operator,
)
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ get_fs_subject,
+ get_fs_subjects_dir,
get_noise_cov_bids_path,
+ get_sessions,
get_subjects,
sanitize_cond_name,
- get_sessions,
- get_fs_subjects_dir,
- get_fs_subject,
- _bids_kwargs,
)
-from ..._logging import logger, gen_log_kwargs
+from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import _open_report, _sanitize_cond_tag, _all_conditions
-from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files
+from ..._report import _all_conditions, _open_report, _sanitize_cond_tag
+from ..._run import _prep_out_files, _sanitize_callable, failsafe_run, save_logs
def get_input_fnames_inverse(
diff --git a/mne_bids_pipeline/steps/source/_99_group_average.py b/mne_bids_pipeline/steps/source/_99_group_average.py
index 9e855d6df..eb26c1c5f 100644
--- a/mne_bids_pipeline/steps/source/_99_group_average.py
+++ b/mne_bids_pipeline/steps/source/_99_group_average.py
@@ -6,23 +6,22 @@
from types import SimpleNamespace
from typing import Optional
-import numpy as np
-
import mne
+import numpy as np
from mne_bids import BIDSPath
from ..._config_utils import (
+ _bids_kwargs,
+ get_fs_subject,
get_fs_subjects_dir,
+ get_sessions,
get_subjects,
sanitize_cond_name,
- get_fs_subject,
- get_sessions,
- _bids_kwargs,
)
-from ..._logging import logger, gen_log_kwargs
+from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _all_conditions, _open_report
-from ..._run import failsafe_run, save_logs, _prep_out_files
+from ..._run import _prep_out_files, failsafe_run, save_logs
def _stc_path(
diff --git a/mne_bids_pipeline/steps/source/__init__.py b/mne_bids_pipeline/steps/source/__init__.py
index c748f7f8b..89b757670 100644
--- a/mne_bids_pipeline/steps/source/__init__.py
+++ b/mne_bids_pipeline/steps/source/__init__.py
@@ -1,11 +1,13 @@
"""Source-space analysis."""
-from . import _01_make_bem_surfaces
-from . import _02_make_bem_solution
-from . import _03_setup_source_space
-from . import _04_make_forward
-from . import _05_make_inverse
-from . import _99_group_average
+from . import (
+ _01_make_bem_surfaces,
+ _02_make_bem_solution,
+ _03_setup_source_space,
+ _04_make_forward,
+ _05_make_inverse,
+ _99_group_average,
+)
_STEPS = (
_01_make_bem_surfaces,
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 47fcb5846..3adfbab82 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -1,5 +1,4 @@
-"""
-ERP CORE
+"""ERP CORE.
This example demonstrate how to process 5 participants from the
[ERP CORE](https://erpinfo.org/erp-core) dataset. It shows how to obtain 7 ERP
@@ -24,9 +23,10 @@
[https://doi.org/10.1016/j.neuroimage.2020.117465](https://doi.org/10.1016/j.neuroimage.2020.117465)
"""
import argparse
-import mne
import sys
+import mne
+
study_name = "ERP-CORE"
bids_root = "~/mne_data/ERP_CORE"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ERP_CORE"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000117.py b/mne_bids_pipeline/tests/configs/config_ds000117.py
index b46db99bd..65e213e24 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000117.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000117.py
@@ -1,6 +1,4 @@
-"""
-Faces dataset
-"""
+"""Faces dataset."""
study_name = "ds000117"
bids_root = "~/mne_data/ds000117"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000246.py b/mne_bids_pipeline/tests/configs/config_ds000246.py
index 6cb3a8148..0c516796d 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000246.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000246.py
@@ -1,5 +1,4 @@
-"""
-Brainstorm - Auditory Dataset.
+"""Brainstorm - Auditory Dataset.
See https://openneuro.org/datasets/ds000246/versions/1.0.0 for more
information.
diff --git a/mne_bids_pipeline/tests/configs/config_ds000247.py b/mne_bids_pipeline/tests/configs/config_ds000247.py
index 8d2b0451f..0a321d8fe 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000247.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000247.py
@@ -1,9 +1,6 @@
-"""
-OMEGA Resting State Sample Data
-"""
+"""OMEGA Resting State Sample Data."""
import numpy as np
-
study_name = "ds000247"
bids_root = f"~/mne_data/{study_name}"
deriv_root = f"~/mne_data/derivatives/mne-bids-pipeline/{study_name}"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
index f09fdc6d5..9b77f36b5 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: BEM from FLASH images
-"""
+"""MNE Sample Data: BEM from FLASH images."""
study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_FLASH_BEM"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
index df315e035..76fee45e3 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: BEM from T1 images
-"""
+"""MNE Sample Data: BEM from T1 images."""
study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index b80b6f0f0..6ffd9644e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: M/EEG combined processing
-"""
+"""MNE Sample Data: M/EEG combined processing."""
import mne
study_name = "ds000248"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
index 9262fdcb8..475ca5d67 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: Head surfaces from FreeSurfer surfaces for coregistration step
-"""
+"""MNE Sample Data: Head surfaces from FreeSurfer surfaces for coregistration step."""
study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
index 176a2f592..ebc0ddc88 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: ICA
-"""
+"""MNE Sample Data: ICA."""
study_name = 'MNE "sample" dataset'
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_ica"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
index 9941d2842..3b83b0e6e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
@@ -1,6 +1,4 @@
-"""
-MNE Sample Data: Using the `fsaverage` template MRI
-"""
+"""MNE Sample Data: Using the `fsaverage` template MRI."""
study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
diff --git a/mne_bids_pipeline/tests/configs/config_ds001810.py b/mne_bids_pipeline/tests/configs/config_ds001810.py
index 508a99e64..606fee3c8 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001810.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001810.py
@@ -1,6 +1,4 @@
-"""
-tDCS EEG
-"""
+"""tDCS EEG."""
study_name = "ds001810"
bids_root = "~/mne_data/ds001810"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003104.py b/mne_bids_pipeline/tests/configs/config_ds003104.py
index c88d07161..2414371c0 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003104.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003104.py
@@ -1,5 +1,4 @@
-"""Somato
-"""
+"""Somato."""
study_name = "MNE-somato-data-anonymized"
bids_root = "~/mne_data/ds003104"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003104"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index 0decbacc9..756d36fbc 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -1,6 +1,4 @@
-"""
-hMT+ Localizer
-"""
+"""hMT+ Localizer."""
study_name = "localizer"
bids_root = "~/mne_data/ds003392"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003392"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003775.py b/mne_bids_pipeline/tests/configs/config_ds003775.py
index 4dae88993..980bed232 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003775.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003775.py
@@ -1,6 +1,4 @@
-"""
-SRM Resting-state EEG
-"""
+"""SRM Resting-state EEG."""
study_name = "ds003775"
bids_root = "~/mne_data/ds003775"
diff --git a/mne_bids_pipeline/tests/configs/config_ds004107.py b/mne_bids_pipeline/tests/configs/config_ds004107.py
index 7a32d952c..6e0eb1cc6 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004107.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004107.py
@@ -1,5 +1,4 @@
-"""
-MIND DATA
+"""MIND DATA.
M.P. Weisend, F.M. Hanlon, R. Montaño, S.P. Ahlfors, A.C. Leuthold,
D. Pantazis, J.C. Mosher, A.P. Georgopoulos, M.S. Hämäläinen, C.J.
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index e4ca6d449..956f92010 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -1,5 +1,4 @@
-"""
-Single-subject infant dataset for testing maxwell_filter with movecomp.
+"""Single-subject infant dataset for testing maxwell_filter with movecomp.
https://openneuro.org/datasets/ds004229
"""
diff --git a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
index 643e51799..fbe34b11a 100644
--- a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
+++ b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
@@ -1,6 +1,4 @@
-"""
-Matchingpennies EEG experiment
-"""
+"""Matchingpennies EEG experiment."""
study_name = "eeg_matchingpennies"
bids_root = "~/mne_data/eeg_matchingpennies"
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index fa2014634..bd3a1f485 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -52,7 +52,8 @@ def pytest_configure(config):
# self._add_ica(
#../python_env/lib/python3.10/site-packages/mne/report/report.py:1872: in _add_ica
# self._add_ica_artifact_sources(
- #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: in _add_ica_artifact_sources
+ #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713:
+ # in _add_ica_artifact_sources
# self._add_figure(
always:constrained_layout not applied.*:UserWarning
ignore:datetime\.datetime\.utcfromtimestamp.*:DeprecationWarning
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index b50454251..f96a01042 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -1,6 +1,6 @@
"""Definition of the testing datasets."""
-from typing import Dict, List, TypedDict
+from typing import TypedDict
# If not supplied below, the effective defaults are listed in comments
@@ -9,12 +9,12 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
openneuro: str # ""
osf: str # ""
web: str # ""
- include: List[str] # []
- exclude: List[str] # []
+ include: list[str] # []
+ exclude: list[str] # []
hash: str # ""
-DATASET_OPTIONS: Dict[str, DATASET_OPTIONS_T] = {
+DATASET_OPTIONS: dict[str, DATASET_OPTIONS_T] = {
"ERP_CORE": {
# original dataset: "osf": "9f5w7"
"web": "https://osf.io/3zk6n/download?version=2",
diff --git a/mne_bids_pipeline/tests/test_cli.py b/mne_bids_pipeline/tests/test_cli.py
index 607cbdd67..45532c3ce 100644
--- a/mne_bids_pipeline/tests/test_cli.py
+++ b/mne_bids_pipeline/tests/test_cli.py
@@ -2,7 +2,9 @@
import importlib
import sys
+
import pytest
+
from mne_bids_pipeline._main import main
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index 097fc1032..dd90f7ad5 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -1,13 +1,14 @@
"""Test that all config values are documented."""
import ast
-from pathlib import Path
import os
import re
+from pathlib import Path
+
import yaml
+from mne_bids_pipeline._config_import import _get_default_config
from mne_bids_pipeline.tests.datasets import DATASET_OPTIONS
from mne_bids_pipeline.tests.test_run import TEST_SUITE
-from mne_bids_pipeline._config_import import _get_default_config
root_path = Path(__file__).parent.parent
@@ -15,7 +16,7 @@
def test_options_documented():
"""Test that all options are suitably documented."""
# use ast to parse _config.py for assignments
- with open(root_path / "_config.py", "r") as fid:
+ with open(root_path / "_config.py") as fid:
contents = fid.read()
contents = ast.parse(contents)
in_config = [
@@ -41,7 +42,7 @@ def test_options_documented():
if not fname.endswith(".md"):
continue
# This is a .md file
- with open(Path(dirpath) / fname, "r") as fid:
+ with open(Path(dirpath) / fname) as fid:
for line in fid:
if not line.startswith(key):
continue
@@ -67,7 +68,7 @@ def test_datasets_in_doc():
# So let's make sure they stay in sync.
# 1. Read cache, test, etc. entries from CircleCI
- with open(root_path.parent / ".circleci" / "config.yml", "r") as fid:
+ with open(root_path.parent / ".circleci" / "config.yml") as fid:
circle_yaml_src = fid.read()
circle_yaml = yaml.safe_load(circle_yaml_src)
caches = [job[6:] for job in circle_yaml["jobs"] if job.startswith("cache_")]
@@ -134,7 +135,7 @@ def ignore_unknown(self, node):
None, SafeLoaderIgnoreUnknown.ignore_unknown
)
- with open(root_path.parent / "docs" / "mkdocs.yml", "r") as fid:
+ with open(root_path.parent / "docs" / "mkdocs.yml") as fid:
examples = yaml.load(fid.read(), Loader=SafeLoaderIgnoreUnknown)
examples = [n for n in examples["nav"] if list(n)[0] == "Examples"][0]
examples = [ex for ex in examples["Examples"] if isinstance(ex, str)]
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index b394d6f0b..4eee1aa02 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -1,14 +1,15 @@
"""Download test data and run a test suite."""
-import sys
+import os
import shutil
+import sys
+from collections.abc import Collection
from pathlib import Path
-from typing import Collection, Dict, Optional, TypedDict
-import os
+from typing import Optional, TypedDict
import pytest
-from mne_bids_pipeline._main import main
from mne_bids_pipeline._download import main as download_main
+from mne_bids_pipeline._main import main
BIDS_PIPELINE_DIR = Path(__file__).absolute().parents[1]
@@ -24,12 +25,12 @@ class _TestOptionsT(TypedDict, total=False):
config: str # f"config_{key}.py"
steps: Collection[str] # ("preprocessing", "sensor")
task: Optional[str] # None
- env: Dict[str, str] # {}
+ env: dict[str, str] # {}
requires: Collection[str] # ()
extra_config: str # ""
-TEST_SUITE: Dict[str, _TestOptionsT] = {
+TEST_SUITE: dict[str, _TestOptionsT] = {
"ds003392": {},
"ds004229": {},
"ds001971": {},
diff --git a/mne_bids_pipeline/tests/test_validation.py b/mne_bids_pipeline/tests/test_validation.py
index 25d5abdaa..c47432155 100644
--- a/mne_bids_pipeline/tests/test_validation.py
+++ b/mne_bids_pipeline/tests/test_validation.py
@@ -1,4 +1,5 @@
import pytest
+
from mne_bids_pipeline._config_import import _import_config
diff --git a/mne_bids_pipeline/typing.py b/mne_bids_pipeline/typing.py
index 7b989309c..c52484f15 100644
--- a/mne_bids_pipeline/typing.py
+++ b/mne_bids_pipeline/typing.py
@@ -2,31 +2,30 @@
import pathlib
import sys
-from typing import Union, List, Dict
-from typing_extensions import Annotated
+from typing import Annotated, Union
if sys.version_info < (3, 12):
from typing_extensions import TypedDict
else:
from typing import TypedDict
+import mne
import numpy as np
from numpy.typing import ArrayLike
from pydantic import PlainValidator
-import mne
PathLike = Union[str, pathlib.Path]
class ArbitraryContrast(TypedDict):
name: str
- conditions: List[str]
- weights: List[float]
+ conditions: list[str]
+ weights: list[float]
class LogKwargsT(TypedDict):
msg: str
- extra: Dict[str, str]
+ extra: dict[str, str]
class ReferenceRunParams(TypedDict):
diff --git a/pyproject.toml b/pyproject.toml
index c3c5dbb2b..bac831873 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -116,7 +116,17 @@ testpaths = ["mne_bids_pipeline"]
junit_family = "xunit2"
[tool.ruff]
+select = ["A", "B006", "D", "E", "F", "I", "W", "UP"]
exclude = ["**/freesurfer/contrib", "dist/", "build/"]
+ignore = [
+ "D100", # Missing docstring in public module
+ "D101", # Missing docstring in public class
+ "D103", # Missing docstring in public function
+ "D104", # Missing docstring in public package
+ "D413", # Missing blank line after last section
+ "UP031", # Use format specifiers instead of percent format
+ "UP035", # Import Iterable from collections.abc
+]
-[tool.black]
-exclude = "(.*/freesurfer/contrib/.*)|(dist/)|(build/)"
+[tool.ruff.pydocstyle]
+convention = "numpy"
From ce233bdd53b2e8df31a9b82898efd432cfa14d71 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Sat, 27 Jan 2024 11:58:13 -0500
Subject: [PATCH 059/132] ENH: Add artifact regression (#837)
---
.circleci/config.yml | 56 ++++++
docs/mkdocs.yml | 3 +-
docs/source/examples/gen_examples.py | 14 +-
docs/source/settings/preprocessing/ssp_ica.md | 1 +
docs/source/v1.6.md.inc | 4 +-
mne_bids_pipeline/_config.py | 57 ++++--
mne_bids_pipeline/_config_import.py | 54 ++----
mne_bids_pipeline/_download.py | 15 +-
mne_bids_pipeline/_import_data.py | 33 ++--
mne_bids_pipeline/_report.py | 8 +-
mne_bids_pipeline/_run.py | 9 +-
.../preprocessing/_04_frequency_filter.py | 34 +++-
.../preprocessing/_05_regress_artifact.py | 172 ++++++++++++++++++
.../steps/preprocessing/_06a_run_ica.py | 3 +-
.../steps/preprocessing/_06b_run_ssp.py | 5 +-
...{_05_make_epochs.py => _07_make_epochs.py} | 0
.../{_07a_apply_ica.py => _08a_apply_ica.py} | 2 +-
.../{_07b_apply_ssp.py => _08b_apply_ssp.py} | 4 +-
.../{_08_ptp_reject.py => _09_ptp_reject.py} | 0
.../steps/preprocessing/__init__.py | 18 +-
.../configs/config_MNE_phantom_KIT_data.py | 28 +++
mne_bids_pipeline/tests/datasets.py | 4 +
mne_bids_pipeline/tests/test_run.py | 3 +
mne_bids_pipeline/tests/test_validation.py | 2 +-
24 files changed, 427 insertions(+), 102 deletions(-)
create mode 100644 mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
rename mne_bids_pipeline/steps/preprocessing/{_05_make_epochs.py => _07_make_epochs.py} (100%)
rename mne_bids_pipeline/steps/preprocessing/{_07a_apply_ica.py => _08a_apply_ica.py} (99%)
rename mne_bids_pipeline/steps/preprocessing/{_07b_apply_ssp.py => _08b_apply_ssp.py} (96%)
rename mne_bids_pipeline/steps/preprocessing/{_08_ptp_reject.py => _09_ptp_reject.py} (100%)
create mode 100644 mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 62e687cba..ceb51dfbf 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -297,6 +297,26 @@ jobs:
paths:
- ~/mne_data/eeg_matchingpennies
+ cache_MNE-phantom-KIT-data:
+ <<: *imageconfig
+ steps:
+ - attach_workspace:
+ at: ~/
+ - restore_cache:
+ keys:
+ - data-cache-MNE-phantom-KIT-data-1
+ - bash_env
+ - gitconfig # email address is needed for datalad
+ - run:
+ name: Get MNE-phantom-KIT-data
+ command: |
+ $DOWNLOAD_DATA MNE-phantom-KIT-data
+ - codecov/upload
+ - save_cache:
+ key: data-cache-MNE-phantom-KIT-data-1
+ paths:
+ - ~/mne_data/MNE-phantom-KIT-data
+
cache_ERP_CORE:
<<: *imageconfig
steps:
@@ -765,6 +785,32 @@ jobs:
paths:
- mne_data/derivatives/mne-bids-pipeline/eeg_matchingpennies/*/*/*.html
+ test_MNE-phantom-KIT-data:
+ <<: *imageconfig
+ steps:
+ - attach_workspace:
+ at: ~/
+ - bash_env
+ - restore_cache:
+ keys:
+ - data-cache-MNE-phantom-KIT-data-1
+ - run:
+ name: test MNE-phantom-KIT-data
+ command: $RUN_TESTS MNE-phantom-KIT-data
+ - codecov/upload
+ - store_test_results:
+ path: ./test-results
+ - store_artifacts:
+ path: ./test-results
+ destination: test-results
+ - store_artifacts:
+ path: /home/circleci/reports/MNE-phantom-KIT-data
+ destination: reports/MNE-phantom-KIT-data
+ - persist_to_workspace:
+ root: ~/
+ paths:
+ - mne_data/derivatives/mne-bids-pipeline/MNE-phantom-KIT-data/*/*/*.html
+
test_ERP_CORE_N400:
<<: *imageconfig
resource_class: large
@@ -1191,6 +1237,15 @@ workflows:
- cache_eeg_matchingpennies
<<: *filter_tags
+ - cache_MNE-phantom-KIT-data:
+ requires:
+ - setup_env
+ <<: *filter_tags
+ - test_MNE-phantom-KIT-data:
+ requires:
+ - cache_MNE-phantom-KIT-data
+ <<: *filter_tags
+
- cache_ERP_CORE:
requires:
- setup_env
@@ -1242,6 +1297,7 @@ workflows:
- test_ds003392
- test_ds004229
- test_eeg_matchingpennies
+ - test_MNE-phantom-KIT-data
- test_ERP_CORE_N400
- test_ERP_CORE_ERN
- test_ERP_CORE_LRP
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 8763aa9c0..29107ff32 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -90,7 +90,7 @@ nav:
- Epoching: settings/preprocessing/epochs.md
- Artifact removal:
- Stimulation artifact: settings/preprocessing/stim_artifact.md
- - SSP & ICA: settings/preprocessing/ssp_ica.md
+ - SSP, ICA, and artifact regression: settings/preprocessing/ssp_ica.md
- Amplitude-based artifact rejection: settings/preprocessing/artifacts.md
- Sensor-level analysis:
- Condition contrasts: settings/sensor/contrasts.md
@@ -116,6 +116,7 @@ nav:
- examples/ds000248_no_mri.md
- examples/ds003104.md
- examples/eeg_matchingpennies.md
+ - examples/MNE-phantom-KIT-data.md
- examples/ds001810.md
- examples/ds000117.md
- examples/ds003775.md
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 1f2514274..b55e526d8 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -63,6 +63,8 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
key = "Maxwell filter"
funcs[key] = funcs[key] or config.use_maxwell_filter
funcs["Frequency filter"] = config.l_freq or config.h_freq
+ key = "Artifact regression"
+ funcs[key] = funcs[key] or (config.regress_artifact is not None)
key = "SSP"
funcs[key] = funcs[key] or (config.spatial_filter == "ssp")
key = "ICA"
@@ -144,6 +146,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
logger.warning(f"Dataset {dataset_name} has no HTML report.")
continue
+ assert dataset_options_key in DATASET_OPTIONS, dataset_options_key
options = DATASET_OPTIONS[dataset_options_key].copy() # we modify locally
report_str = "\n## Generated output\n\n"
@@ -200,13 +203,18 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
f"{fname.name} :fontawesome-solid-square-poll-vertical:\n\n"
)
- assert sum(key in options for key in ("openneuro", "git", "web", "datalad")) == 1
+ assert (
+ sum(key in options for key in ("openneuro", "git", "web", "datalad", "mne"))
+ == 1
+ )
if "openneuro" in options:
url = f'https://openneuro.org/datasets/{options["openneuro"]}'
elif "git" in options:
url = options["git"]
elif "web" in options:
url = options["web"]
+ elif "mne" in options:
+ url = f"https://mne.tools/dev/generated/mne.datasets.{options['mne']}.data_path.html" # noqa: E501
else:
assert "datalad" in options # guaranteed above
url = ""
@@ -246,7 +254,9 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
# TODO: For things like ERP_CORE_ERN, decoding_csp are not populated
# properly by the root config
- config_path = root / "tests" / "configs" / f"config_{dataset_name}.py"
+ config_path = (
+ root / "tests" / "configs" / f"config_{dataset_name.replace('-', '_')}.py"
+ )
config = config_path.read_text(encoding="utf-8-sig").strip()
descr_end_idx = config[2:].find('"""')
config_descr = "# " + config[: descr_end_idx + 1].replace('"""', "").strip()
diff --git a/docs/source/settings/preprocessing/ssp_ica.md b/docs/source/settings/preprocessing/ssp_ica.md
index b132ef4bf..f25110729 100644
--- a/docs/source/settings/preprocessing/ssp_ica.md
+++ b/docs/source/settings/preprocessing/ssp_ica.md
@@ -11,6 +11,7 @@ tags:
::: mne_bids_pipeline._config
options:
members:
+ - regress_artifact
- spatial_filter
- min_ecg_epochs
- min_eog_epochs
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index cf5596cb1..afb7835c3 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -2,9 +2,9 @@
## vX.Y.0 (unreleased)
-[//]: # (### :new: New features & enhancements)
+:new: New features & enhancements
-[//]: # (- Whatever (#000 by @whoever))
+- Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 652e5ebfb..e3c7626bb 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1,7 +1,8 @@
# Default settings for data processing and analysis.
-from typing import Callable, Iterable, Literal, Optional, Union
+from typing import Annotated, Any, Callable, Literal, Optional, Sequence, Union
+from annotated_types import Ge, Interval, Len
from mne import Covariance
from mne_bids import BIDSPath
@@ -94,7 +95,7 @@
The task to process.
"""
-runs: Union[Iterable, Literal["all"]] = "all"
+runs: Union[Sequence, Literal["all"]] = "all"
"""
The runs to process. If `'all'`, will process all runs found in the
BIDS dataset.
@@ -143,7 +144,7 @@
The BIDS `space` entity.
"""
-plot_psd_for_runs: Union[Literal["all"], Iterable[str]] = "all"
+plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all"
"""
For which runs to add a power spectral density (PSD) plot to the generated
report. This can take a considerable amount of time if you have many long
@@ -151,7 +152,7 @@
plotting.
"""
-subjects: Union[Iterable[str], Literal["all"]] = "all"
+subjects: Union[Sequence[str], Literal["all"]] = "all"
"""
Subjects to analyze. If `'all'`, include all subjects. To only
include a subset of subjects, pass a list of their identifiers. Even
@@ -171,7 +172,7 @@
```
"""
-exclude_subjects: Iterable[str] = []
+exclude_subjects: Sequence[str] = []
"""
Specify subjects to exclude from analysis. The MEG empty-room mock-subject
is automatically excluded from regular analysis.
@@ -201,7 +202,7 @@
covariance (via `noise_cov='rest'`).
"""
-ch_types: Iterable[Literal["meg", "mag", "grad", "eeg"]] = []
+ch_types: Annotated[Sequence[Literal["meg", "mag", "grad", "eeg"]], Len(1, 4)] = []
"""
The channel types to consider.
@@ -252,7 +253,7 @@
```
"""
-eog_channels: Optional[Iterable[str]] = None
+eog_channels: Optional[Sequence[str]] = None
"""
Specify EOG channels to use, or create virtual EOG channels.
@@ -320,7 +321,7 @@
```
"""
-eeg_reference: Union[Literal["average"], str, Iterable["str"]] = "average"
+eeg_reference: Union[Literal["average"], str, Sequence["str"]] = "average"
"""
The EEG reference to use. If `average`, will use the average reference,
i.e. the average across all channels. If a string, must be the name of a single
@@ -371,7 +372,7 @@
```
"""
-drop_channels: Iterable[str] = []
+drop_channels: Sequence[str] = []
"""
Names of channels to remove from the data. This can be useful, for example,
if you have added a new bipolar channel via `eeg_bipolar_channels` and now wish
@@ -385,7 +386,7 @@
"""
analyze_channels: Union[
- Literal["all"], Literal["ch_types"], Iterable["str"]
+ Literal["all"], Literal["ch_types"], Sequence["str"]
] = "ch_types"
"""
The names of the channels to analyze during ERP/ERF and time-frequency analysis
@@ -789,7 +790,7 @@
Keep it `None` if no lowpass filtering should be applied.
"""
-notch_freq: Optional[Union[float, Iterable[float]]] = None
+notch_freq: Optional[Union[float, Sequence[float]]] = None
"""
Notch filter frequency. More than one frequency can be supplied, e.g. to remove
harmonics. Keep it `None` if no notch filter should be applied.
@@ -827,7 +828,7 @@
Specifies the transition bandwidth of the notch filter. The default is `1.`.
"""
-notch_widths: Optional[Union[float, Iterable[float]]] = None
+notch_widths: Optional[Union[float, Sequence[float]]] = None
"""
Specifies the width of each stop band. `None` uses the MNE default.
"""
@@ -931,7 +932,7 @@
window for metadata generation.
"""
-epochs_metadata_keep_first: Optional[Iterable[str]] = None
+epochs_metadata_keep_first: Optional[Sequence[str]] = None
"""
Event groupings using hierarchical event descriptors (HEDs) for which to store
the time of the **first** occurrence of any event of this group in a new column
@@ -959,7 +960,7 @@
and `first_stimulus`.
"""
-epochs_metadata_keep_last: Optional[Iterable[str]] = None
+epochs_metadata_keep_last: Optional[Sequence[str]] = None
"""
Same as `epochs_metadata_keep_first`, but for keeping the **last**
occurrence of matching event types. The columns indicating the event types
@@ -979,7 +980,7 @@
```
""" # noqa: E501
-conditions: Optional[Union[Iterable[str], dict[str, str]]] = None
+conditions: Optional[Union[Sequence[str], dict[str, str]]] = None
"""
The time-locked events based on which to create evoked responses.
This can either be name of the experimental condition as specified in the
@@ -1058,7 +1059,7 @@
```
"""
-contrasts: Iterable[Union[tuple[str, str], ArbitraryContrast]] = []
+contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = []
"""
The conditions to contrast via a subtraction of ERPs / ERFs. The list elements
can either be tuples or dictionaries (or a mix of both). Each element in the
@@ -1125,6 +1126,24 @@
#
# Currently you cannot use both.
+regress_artifact: Optional[dict[str, Any]] = None
+"""
+Keyword arguments to pass to the `mne.preprocessing.EOGRegression` model used
+in `mne.preprocessing.regress_artifact`. If `None`, no time-domain regression will
+be applied. Note that any channels picked in `regress_artifact["picks_artifact"]` will
+have the same time-domain filters applied to them as the experimental data.
+
+Artifact regression is applied before SSP or ICA.
+
+???+ example "Example"
+ For example, if you have MEG reference channel data recorded in three
+ miscellaneous channels, you could do:
+
+ ```python
+ regress_artifact = {"picks": "meg", "picks_artifact": ["MISC 001", "MISC 002", "MISC 003"]}
+ ```
+""" # noqa: E501
+
spatial_filter: Optional[Literal["ssp", "ica"]] = None
"""
Whether to use a spatial filter to detect and remove artifacts. The BIDS
@@ -1516,7 +1535,7 @@
you don't need to be worried about **exactly** balancing class sizes.
"""
-decoding_n_splits: int = 5
+decoding_n_splits: Annotated[int, Ge(2)] = 5
"""
The number of folds (also called "splits") to use in the K-fold cross-validation
scheme.
@@ -1577,7 +1596,7 @@
test to determine the significance of the decoding scores across participants.
"""
-cluster_permutation_p_threshold: float = 0.05
+cluster_permutation_p_threshold: Annotated[float, Interval(gt=0, lt=1)] = 0.05
"""
The alpha level (p-value, p threshold) to use for rejecting the null hypothesis
that the clusters show no significant difference between conditions. This is
@@ -1609,7 +1628,7 @@
# TIME-FREQUENCY
# --------------
-time_frequency_conditions: Iterable[str] = []
+time_frequency_conditions: Sequence[str] = []
"""
The conditions to compute time-frequency decomposition on.
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 66fe9583a..db5487cb7 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -12,8 +12,7 @@
import matplotlib
import mne
import numpy as np
-from pydantic import ValidationError
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, ValidationError
from ._logging import gen_log_kwargs, logger
from .typing import PathLike
@@ -269,17 +268,6 @@ def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> N
f'ica_reject["{ch_type}"] ({ica_reject[ch_type]})'
)
- if not config.ch_types:
- raise ValueError("Please specify ch_types in your configuration.")
-
- _VALID_TYPES = ("meg", "mag", "grad", "eeg")
- if any(ch_type not in _VALID_TYPES for ch_type in config.ch_types):
- raise ValueError(
- "Invalid channel type passed. Please adjust `ch_types` in your "
- f"configuration, got {config.ch_types} but supported types are "
- f"{_VALID_TYPES}"
- )
-
if config.noise_cov == "emptyroom" and "eeg" in config.ch_types:
raise ValueError(
"You requested to process data that contains EEG channels. In "
@@ -312,16 +300,7 @@ def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> N
f"but you set baseline={bl}"
)
- # check decoding parameters
- if config.decoding_n_splits < 2:
- raise ValueError("decoding_n_splits should be at least 2.")
-
# check cluster permutation parameters
- if not 0 < config.cluster_permutation_p_threshold < 1:
- raise ValueError(
- "cluster_permutation_p_threshold should be in the (0, 1) interval."
- )
-
if config.cluster_n_permutations < 10 / config.cluster_permutation_p_threshold:
raise ValueError(
"cluster_n_permutations is not big enough to calculate "
@@ -380,33 +359,30 @@ def _pydantic_validate(
# https://docs.pydantic.dev/latest/usage/dataclasses/
from . import _config as root_config
- annotations = copy.deepcopy(root_config.__annotations__) # just be safe
- attrs = {
- key: _default_factory(key, val)
- for key, val in root_config.__dict__.items()
- if key in annotations
- }
- # everything should be type annotated, make sure they are
- asym = set(attrs).symmetric_difference(set(annotations))
- assert asym == set(), asym
+ # Modify annotations to add nested strict parsing
+ annotations = dict()
+ attrs = dict()
+ for key, annot in root_config.__annotations__.items():
+ annotations[key] = annot
+ attrs[key] = _default_factory(key, root_config.__dict__[key])
name = "user configuration"
if config_path is not None:
name += f" from {config_path}"
- UserConfig = type(
- name,
- (object,),
- {"__annotations__": annotations, **attrs},
- )
- dataclass_config = dict(
+ model_config = ConfigDict(
arbitrary_types_allowed=False,
validate_assignment=True,
strict=True, # do not allow float for int for example
+ extra="forbid",
+ )
+ UserConfig = type(
+ name,
+ (BaseModel,),
+ {"__annotations__": annotations, "model_config": model_config, **attrs},
)
- UserConfig = dataclass(config=dataclass_config)(UserConfig)
# Now use pydantic to automagically validate
user_vals = {key: val for key, val in config.__dict__.items() if key in annotations}
try:
- UserConfig(**user_vals)
+ UserConfig.model_validate(user_vals)
except ValidationError as err:
raise ValueError(str(err)) from None
diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py
index 45de893ed..46cf17e7a 100644
--- a/mne_bids_pipeline/_download.py
+++ b/mne_bids_pipeline/_download.py
@@ -77,13 +77,24 @@ def _download_from_web(*, ds_name: str, ds_path: Path):
(path / f"{ds_name}.zip").unlink()
+def _download_via_mne(*, ds_name: str, ds_path: Path):
+ assert ds_path.stem == ds_name, ds_path
+ getattr(mne.datasets, DATASET_OPTIONS[ds_name]["mne"]).data_path(
+ ds_path.parent,
+ verbose=True,
+ )
+
+
def _download(*, ds_name: str, ds_path: Path):
options = DATASET_OPTIONS[ds_name]
openneuro_name = options.get("openneuro", "")
git_url = options.get("git", "")
osf_node = options.get("osf", "")
web_url = options.get("web", "")
- assert sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url)) == 1
+ mne_mod = options.get("mne", "")
+ assert (
+ sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url, mne_mod)) == 1
+ )
if openneuro_name:
download_func = _download_via_openneuro
@@ -91,6 +102,8 @@ def _download(*, ds_name: str, ds_path: Path):
download_func = _download_via_datalad
elif osf_node:
raise RuntimeError("OSF downloads are currently not supported.")
+ elif mne_mod:
+ download_func = _download_via_mne
else:
assert web_url
download_func = _download_from_web
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index d7f22240d..be892576b 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -452,7 +452,6 @@ def import_er_data(
cfg=cfg,
bids_path_bads=bids_path_er_bads_in,
)
- raw_er.pick("meg", exclude=[])
# Don't deal with ref for now (initial data quality / auto bad step)
if bids_path_ref_in is None:
@@ -530,7 +529,7 @@ def _get_bids_path_in(
session: Optional[str],
run: Optional[str],
task: Optional[str],
- kind: Literal["orig", "sss"] = "orig",
+ kind: Literal["orig", "sss", "filt"] = "orig",
) -> BIDSPath:
# b/c can be used before this is updated
path_kwargs = dict(
@@ -544,13 +543,13 @@ def _get_bids_path_in(
datatype=get_datatype(config=cfg),
check=False,
)
- if kind == "sss":
+ if kind != "orig":
+ assert kind in ("sss", "filt"), kind
path_kwargs["root"] = cfg.deriv_root
path_kwargs["suffix"] = "raw"
path_kwargs["extension"] = ".fif"
- path_kwargs["processing"] = "sss"
+ path_kwargs["processing"] = kind
else:
- assert kind == "orig", kind
path_kwargs["root"] = cfg.bids_root
path_kwargs["suffix"] = None
path_kwargs["extension"] = None
@@ -566,7 +565,7 @@ def _get_run_path(
session: Optional[str],
run: Optional[str],
task: Optional[str],
- kind: Literal["orig", "sss"],
+ kind: Literal["orig", "sss", "filt"],
add_bads: Optional[bool] = None,
allow_missing: bool = False,
key: Optional[str] = None,
@@ -594,7 +593,7 @@ def _get_rest_path(
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
- kind: Literal["orig", "sss"],
+ kind: Literal["orig", "sss", "filt"],
add_bads: Optional[bool] = None,
) -> dict:
if not (cfg.process_rest and not cfg.task_is_rest):
@@ -616,13 +615,14 @@ def _get_noise_path(
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
- kind: Literal["orig", "sss"],
+ kind: Literal["orig", "sss", "filt"],
mf_reference_run: Optional[str],
add_bads: Optional[bool] = None,
) -> dict:
if not (cfg.process_empty_room and get_datatype(config=cfg) == "meg"):
return dict()
- if kind == "sss":
+ if kind != "orig":
+ assert kind in ("sss", "filt")
raw_fname = _get_bids_path_in(
cfg=cfg,
subject=subject,
@@ -661,7 +661,7 @@ def _get_run_rest_noise_path(
session: Optional[str],
run: Optional[str],
task: Optional[str],
- kind: Literal["orig", "sss"],
+ kind: Literal["orig", "sss", "filt"],
mf_reference_run: Optional[str],
add_bads: Optional[bool] = None,
) -> dict:
@@ -705,7 +705,7 @@ def _path_dict(
cfg: SimpleNamespace,
bids_path_in: BIDSPath,
add_bads: Optional[bool] = None,
- kind: Literal["orig", "sss"],
+ kind: Literal["orig", "sss", "filt"],
allow_missing: bool,
key: Optional[str] = None,
) -> dict:
@@ -805,3 +805,14 @@ def _import_data_kwargs(*, config: SimpleNamespace, subject: str) -> dict:
runs=get_runs(config=config, subject=subject), # XXX needs to accept session!
**_bids_kwargs(config=config),
)
+
+
+def _get_run_type(
+ run: Optional[str],
+ task: Optional[str],
+) -> str:
+ if run is None and task in ("noise", "rest"):
+ run_type = dict(rest="resting-state", noise="empty-room")[task]
+ else:
+ run_type = "experimental"
+ return run_type
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index ed514925d..80f2f1962 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -68,14 +68,13 @@ def _open_report(
yield report
finally:
try:
- msg = "Adding config and sys info to report"
- logger.info(**gen_log_kwargs(message=msg))
_finalize(
report=report,
exec_params=exec_params,
subject=subject,
session=session,
run=run,
+ task=task,
)
except Exception as exc:
logger.warning(f"Failed: {exc}")
@@ -506,12 +505,17 @@ def _finalize(
subject: str,
session: Optional[str],
run: Optional[str],
+ task: Optional[str],
) -> None:
"""Add system information and the pipeline configuration to the report."""
# ensure they are always appended
titles = ["Configuration file", "System information"]
for title in titles:
report.remove(title=title, remove_all=True)
+ # Print this exactly once
+ if _cached_sys_info.cache_info()[-1] == 0: # never run
+ msg = "Adding config and sys info to report"
+ logger.info(**gen_log_kwargs(message=msg))
# No longer need replace=True in these
report.add_code(
code=exec_params.config_path,
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 128b876ed..c7e46267b 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -225,13 +225,18 @@ def wrapper(*args, **kwargs):
for key, (fname, this_hash) in out_files_hashes.items():
fname = pathlib.Path(fname)
if not fname.exists():
- msg = "Output file missing, will recompute …"
+ msg = (
+ f"Output file missing {str(fname)}, " "will recompute …"
+ )
emoji = "🧩"
bad_out_files = True
break
got_hash = hash_(key, fname, kind="out")[1]
if this_hash != got_hash:
- msg = "Output file hash mismatch, will recompute …"
+ msg = (
+ f"Output file hash mismatch for {str(fname)}, "
+ "will recompute …"
+ )
emoji = "🚫"
bad_out_files = True
break
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index a44a1c70e..fd9c6c874 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -20,6 +20,8 @@
import mne
import numpy as np
+from mne.io.pick import _picks_to_idx
+from mne.preprocessing import EOGRegression
from ..._config_utils import (
get_runs_tasks,
@@ -28,6 +30,7 @@
)
from ..._import_data import (
_get_run_rest_noise_path,
+ _get_run_type,
_import_data_kwargs,
import_er_data,
import_experimental_data,
@@ -69,6 +72,7 @@ def notch_filter(
trans_bandwidth: Union[float, Literal["auto"]],
notch_widths: Optional[Union[float, Iterable[float]]],
run_type: Literal["experimental", "empty-room", "resting-state"],
+ picks: Optional[np.ndarray],
) -> None:
"""Filter data channels (MEG and EEG)."""
if freqs is None:
@@ -86,6 +90,7 @@ def notch_filter(
trans_bandwidth=trans_bandwidth,
notch_widths=notch_widths,
n_jobs=1,
+ picks=picks,
)
@@ -100,6 +105,7 @@ def bandpass_filter(
l_trans_bandwidth: Union[float, Literal["auto"]],
h_trans_bandwidth: Union[float, Literal["auto"]],
run_type: Literal["experimental", "empty-room", "resting-state"],
+ picks: Optional[np.ndarray],
) -> None:
"""Filter data channels (MEG and EEG)."""
if l_freq is not None and h_freq is None:
@@ -122,6 +128,7 @@ def bandpass_filter(
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth,
n_jobs=1,
+ picks=picks,
)
@@ -161,14 +168,10 @@ def filter_data(
bids_path_in = in_files.pop(in_key)
bids_path_bads_in = in_files.pop(f"{in_key}-bads", None)
- if run is None and task in ("noise", "rest"):
- run_type = dict(rest="resting-state", noise="empty-room")[task]
- else:
- run_type = "experimental"
-
+ run_type = _get_run_type(run=run, task=task)
+ msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}"
+ logger.info(**gen_log_kwargs(message=msg))
if cfg.use_maxwell_filter:
- msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}"
- logger.info(**gen_log_kwargs(message=msg))
raw = mne.io.read_raw_fif(bids_path_in)
elif run is None and task == "noise":
raw = import_er_data(
@@ -191,6 +194,8 @@ def filter_data(
out_files[in_key] = bids_path_in.copy().update(
root=cfg.deriv_root,
+ subject=subject, # save under subject's directory so all files are there
+ session=session,
processing="filt",
extension=".fif",
suffix="raw",
@@ -200,6 +205,18 @@ def filter_data(
check=False,
)
+ if cfg.regress_artifact is None:
+ picks = None
+ else:
+ # Need to figure out the correct picks to use
+ model = EOGRegression(**cfg.regress_artifact)
+ picks_regress = _picks_to_idx(
+ raw.info, model.picks, none="data", exclude=model.exclude
+ )
+ picks_artifact = _picks_to_idx(raw.info, model.picks_artifact)
+ picks_data = _picks_to_idx(raw.info, "data", exclude=()) # raw.filter default
+ picks = np.unique(np.r_[picks_regress, picks_artifact, picks_data])
+
raw.load_data()
notch_filter(
raw=raw,
@@ -211,6 +228,7 @@ def filter_data(
trans_bandwidth=cfg.notch_trans_bandwidth,
notch_widths=cfg.notch_widths,
run_type=run_type,
+ picks=picks,
)
bandpass_filter(
raw=raw,
@@ -223,6 +241,7 @@ def filter_data(
h_trans_bandwidth=cfg.h_trans_bandwidth,
l_trans_bandwidth=cfg.l_trans_bandwidth,
run_type=run_type,
+ picks=picks,
)
resample(
raw=raw,
@@ -287,6 +306,7 @@ def get_config(
notch_trans_bandwidth=config.notch_trans_bandwidth,
notch_widths=config.notch_widths,
raw_resample_sfreq=config.raw_resample_sfreq,
+ regress_artifact=config.regress_artifact,
**_import_data_kwargs(config=config, subject=subject),
)
return cfg
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
new file mode 100644
index 000000000..8a2b2a0f6
--- /dev/null
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -0,0 +1,172 @@
+"""Run Signal Subspace Projections (SSP) for artifact correction.
+
+These are often also referred to as PCA vectors.
+"""
+
+from types import SimpleNamespace
+from typing import Optional
+
+import mne
+from mne.io.pick import _picks_to_idx
+from mne.preprocessing import EOGRegression
+
+from ..._config_utils import (
+ get_runs_tasks,
+ get_sessions,
+ get_subjects,
+)
+from ..._import_data import _get_run_rest_noise_path, _get_run_type, _import_data_kwargs
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _add_raw, _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+
+
+def get_input_fnames_regress_artifact(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+) -> dict:
+ """Get paths of files required by regress_artifact function."""
+ out = _get_run_rest_noise_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ kind="filt",
+ mf_reference_run=cfg.mf_reference_run,
+ )
+ assert len(out)
+ return out
+
+
+@failsafe_run(
+ get_input_fnames=get_input_fnames_regress_artifact,
+)
+def run_regress_artifact(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+ in_files: dict,
+) -> dict:
+ model = EOGRegression(proj=False, **cfg.regress_artifact)
+ out_files = dict()
+ in_key = f"raw_task-{task}_run-{run}"
+ bids_path_in = in_files.pop(in_key)
+ out_files[in_key] = bids_path_in.copy().update(processing="regress")
+ run_type = _get_run_type(run=run, task=task)
+ msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}"
+ logger.info(**gen_log_kwargs(message=msg))
+ raw = mne.io.read_raw_fif(bids_path_in).load_data()
+ projs = raw.info["projs"]
+ raw.del_proj()
+ model.fit(raw)
+ all_types = raw.get_channel_types()
+ picks = _picks_to_idx(raw.info, model.picks, none="data", exclude=model.exclude)
+ ch_types = set(all_types[pick] for pick in picks)
+ del picks
+ out_files["regress"] = bids_path_in.copy().update(
+ processing=None,
+ split=None,
+ run=None,
+ suffix="regress",
+ extension=".h5",
+ )
+ model.apply(raw, copy=False)
+ if projs:
+ raw.add_proj(projs)
+ raw.save(out_files[in_key], overwrite=True)
+ _update_for_splits(out_files, in_key)
+ model.save(out_files["regress"], overwrite=True)
+ assert len(in_files) == 0, in_files.keys()
+
+ # Report
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ ) as report:
+ msg = "Adding regressed raw data to report"
+ logger.info(**gen_log_kwargs(message=msg))
+ figs, captions = list(), list()
+ for kind in ("mag", "grad", "eeg"):
+ if kind not in ch_types:
+ continue
+ figs.append(model.plot(ch_type=kind))
+ captions.append(f"Run {run}: {kind}")
+ if figs:
+ report.add_figure(
+ fig=figs,
+ caption=captions,
+ title="Regression weights",
+ tags=("raw", f"run-{run}", "regression"),
+ replace=True,
+ )
+ _add_raw(
+ cfg=cfg,
+ report=report,
+ bids_path_in=out_files[in_key],
+ title="Raw (regression)",
+ tags=("regression",),
+ raw=raw,
+ )
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
+def get_config(
+ *,
+ config: SimpleNamespace,
+ subject: str,
+) -> SimpleNamespace:
+ cfg = SimpleNamespace(
+ regress_artifact=config.regress_artifact,
+ **_import_data_kwargs(config=config, subject=subject),
+ )
+ return cfg
+
+
+def main(*, config: SimpleNamespace) -> None:
+ """Run artifact regression."""
+ if config.regress_artifact is None:
+ msg = "Skipping …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
+ return
+
+ with get_parallel_backend(config.exec_params):
+ parallel, run_func = parallel_func(
+ run_regress_artifact, exec_params=config.exec_params
+ )
+
+ logs = parallel(
+ run_func(
+ cfg=get_config(
+ config=config,
+ subject=subject,
+ ),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ for run, task in get_runs_tasks(
+ config=config,
+ subject=subject,
+ session=session,
+ )
+ )
+
+ save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index 00346df25..7bfef3c56 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -253,7 +253,7 @@ def get_input_fnames_run_ica(
for run in cfg.runs:
key = f"raw_run-{run}"
in_files[key] = bids_basename.copy().update(
- run=run, processing="filt", suffix="raw"
+ run=run, processing=cfg.processing, suffix="raw"
)
_update_for_splits(in_files, key, single=True)
return in_files
@@ -614,6 +614,7 @@ def get_config(
eog_channels=config.eog_channels,
rest_epochs_duration=config.rest_epochs_duration,
rest_epochs_overlap=config.rest_epochs_overlap,
+ processing="filt" if config.regress_artifact is None else "regress",
**_bids_kwargs(config=config),
)
return cfg
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 46b88ee90..7aa0e97de 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -47,7 +47,7 @@ def get_input_fnames_run_ssp(
for run in cfg.runs:
key = f"raw_run-{run}"
in_files[key] = bids_basename.copy().update(
- run=run, processing="filt", suffix="raw"
+ run=run, processing=cfg.processing, suffix="raw"
)
_update_for_splits(in_files, key, single=True)
return in_files
@@ -66,7 +66,7 @@ def run_ssp(
) -> dict:
import matplotlib.pyplot as plt
- # compute SSP on first run of raw
+ # compute SSP on all runs of raw
raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs]
# when saving proj, use run=None
@@ -229,6 +229,7 @@ def get_config(
epochs_decim=config.epochs_decim,
use_maxwell_filter=config.use_maxwell_filter,
runs=get_runs(config=config, subject=subject),
+ processing="filt" if config.regress_artifact is None else "regress",
**_bids_kwargs(config=config),
)
return cfg
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
similarity index 100%
rename from mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py
rename to mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
similarity index 99%
rename from mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
rename to mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index c24d8e015..f4b999cc8 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -1,4 +1,4 @@
-"""Apply ICA and obtain the cleaned epochs.
+"""Apply ICA and obtain the cleaned epochs and raw data.
Blinks and ECG artifacts are automatically detected and the corresponding ICA
components are removed from the data.
diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
similarity index 96%
rename from mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
rename to mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
index 9b1a83fc9..b1eda9cd1 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
@@ -1,4 +1,4 @@
-"""Apply SSP projections and obtain the cleaned epochs.
+"""Apply SSP projections and obtain the cleaned epochs and raw data.
Blinks and ECG artifacts are automatically detected and the corresponding SSP
projections components are removed from the data.
@@ -57,8 +57,6 @@ def apply_ssp(
session: Optional[str],
in_files: dict,
) -> dict:
- # load epochs to reject ICA components
- # compute SSP on first run of raw
out_files = dict()
out_files["epochs"] = (
in_files["epochs"].copy().update(processing="ssp", split=None, check=False)
diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
similarity index 100%
rename from mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py
rename to mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
diff --git a/mne_bids_pipeline/steps/preprocessing/__init__.py b/mne_bids_pipeline/steps/preprocessing/__init__.py
index 686b7cf27..07d65224a 100644
--- a/mne_bids_pipeline/steps/preprocessing/__init__.py
+++ b/mne_bids_pipeline/steps/preprocessing/__init__.py
@@ -5,12 +5,13 @@
_02_head_pos,
_03_maxfilter,
_04_frequency_filter,
- _05_make_epochs,
+ _05_regress_artifact,
_06a_run_ica,
_06b_run_ssp,
- _07a_apply_ica,
- _07b_apply_ssp,
- _08_ptp_reject,
+ _07_make_epochs,
+ _08a_apply_ica,
+ _08b_apply_ssp,
+ _09_ptp_reject,
)
_STEPS = (
@@ -18,10 +19,11 @@
_02_head_pos,
_03_maxfilter,
_04_frequency_filter,
- _05_make_epochs,
+ _05_regress_artifact,
_06a_run_ica,
_06b_run_ssp,
- _07a_apply_ica,
- _07b_apply_ssp,
- _08_ptp_reject,
+ _07_make_epochs,
+ _08a_apply_ica,
+ _08b_apply_ssp,
+ _09_ptp_reject,
)
diff --git a/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
new file mode 100644
index 000000000..ef3347a53
--- /dev/null
+++ b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
@@ -0,0 +1,28 @@
+"""
+KIT phantom data.
+
+https://mne.tools/dev/documentation/datasets.html#kit-phantom-dataset
+"""
+
+study_name = "MNE-phantom-KIT-data"
+bids_root = "~/mne_data/MNE-phantom-KIT-data"
+deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/MNE-phantom-KIT-data"
+task = "phantom"
+ch_types = ["meg"]
+
+# Preprocessing
+l_freq = None
+h_freq = 40.0
+regress_artifact = dict(
+ picks="meg", picks_artifact=["MISC 001", "MISC 002", "MISC 003"]
+)
+
+# Epochs
+epochs_tmin = -0.08
+epochs_tmax = 0.18
+epochs_decim = 10 # 2000->200 Hz
+baseline = (None, 0)
+conditions = ["dip01", "dip13", "dip25", "dip37", "dip49"]
+
+# Decoding
+decode = True # should be very good performance
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index f96a01042..c559f06ca 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -9,6 +9,7 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
openneuro: str # ""
osf: str # ""
web: str # ""
+ mne: str # ""
include: list[str] # []
exclude: list[str] # []
hash: str # ""
@@ -122,4 +123,7 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
"sub-emptyroom/ses-20000101",
],
},
+ "MNE-phantom-KIT-data": {
+ "mne": "phantom_kit",
+ },
}
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 4eee1aa02..2e068ef70 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -124,6 +124,9 @@ class _TestOptionsT(TypedDict, total=False):
"config": "config_ERP_CORE.py",
"task": "P3",
},
+ "MNE-phantom-KIT-data": {
+ "config": "config_MNE_phantom_KIT_data.py",
+ },
}
diff --git a/mne_bids_pipeline/tests/test_validation.py b/mne_bids_pipeline/tests/test_validation.py
index c47432155..e99bfecf9 100644
--- a/mne_bids_pipeline/tests/test_validation.py
+++ b/mne_bids_pipeline/tests/test_validation.py
@@ -14,7 +14,7 @@ def test_validation(tmp_path, capsys):
bad_text += f"bids_root = '{tmp_path}'\n"
# no ch_types
config_path.write_text(bad_text)
- with pytest.raises(ValueError, match="Please specify ch_types"):
+ with pytest.raises(ValueError, match="Value should have at least 1 item"):
_import_config(config_path=config_path)
bad_text += "ch_types = ['eeg']\n"
# conditions
From a65f278d92eb7d36914d3c630209dce12a4c6a8a Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 30 Jan 2024 14:37:03 -0500
Subject: [PATCH 060/132] BUG: Fix several bugs (#839)
---
docs/source/v1.6.md.inc | 4 ++
mne_bids_pipeline/_config_import.py | 2 +-
mne_bids_pipeline/_config_utils.py | 40 ++++++++++---------
mne_bids_pipeline/_main.py | 1 -
mne_bids_pipeline/_reject.py | 10 ++---
mne_bids_pipeline/_run.py | 14 ++++++-
.../steps/freesurfer/_02_coreg_surfaces.py | 22 +++++++---
.../preprocessing/_05_regress_artifact.py | 1 -
.../steps/preprocessing/_09_ptp_reject.py | 19 +++++++--
.../steps/source/_01_make_bem_surfaces.py | 4 +-
.../steps/source/_02_make_bem_solution.py | 4 +-
.../steps/source/_03_setup_source_space.py | 4 +-
12 files changed, 85 insertions(+), 40 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index afb7835c3..01bfd87e4 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -5,6 +5,7 @@
:new: New features & enhancements
- Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner)
+- Chosen `reject` parameters are now saved in the generated HTML reports (#839 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -17,6 +18,9 @@
### :bug: Bug fixes
- Fix minor issues with path handling for cross-talk and calibration files (#834 by @larsoner)
+- Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner)
+- Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner)
+- Fix bug where `--no-cache` had no effect (#839 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index db5487cb7..fa8fb6772 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -369,7 +369,7 @@ def _pydantic_validate(
if config_path is not None:
name += f" from {config_path}"
model_config = ConfigDict(
- arbitrary_types_allowed=False,
+ arbitrary_types_allowed=True, # needed in 2.6.0 to allow DigMontage for example
validate_assignment=True,
strict=True, # do not allow float for int for example
extra="forbid",
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 321ccf0f0..7b555a2a4 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -16,9 +16,9 @@
from .typing import ArbitraryContrast
try:
- _keys_arbitrary_contrast = set(ArbitraryContrast.__required_keys__)
+ _set_keys_arbitrary_contrast = set(ArbitraryContrast.__required_keys__)
except Exception:
- _keys_arbitrary_contrast = set(ArbitraryContrast.__annotations__.keys())
+ _set_keys_arbitrary_contrast = set(ArbitraryContrast.__annotations__.keys())
def get_fs_subjects_dir(config: SimpleNamespace) -> pathlib.Path:
@@ -96,11 +96,14 @@ def get_subjects(config: SimpleNamespace) -> list[str]:
else:
s = config.subjects
- subjects = set(s) - set(config.exclude_subjects)
- # Drop empty-room subject.
- subjects = subjects - set(["emptyroom"])
+ # Preserve order and remove excluded subjects
+ subjects = [
+ subject
+ for subject in s
+ if subject not in config.exclude_subjects and subject != "emptyroom"
+ ]
- return sorted(subjects)
+ return subjects
def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]:
@@ -176,7 +179,17 @@ def _get_runs_all_subjects_cached(
def get_intersect_run(config: SimpleNamespace) -> list[str]:
"""Return the intersection of all the runs of all subjects."""
subj_runs = get_runs_all_subjects(config)
- return list(set.intersection(*map(set, subj_runs.values())))
+ # Do not use something like:
+ # list(set.intersection(*map(set, subj_runs.values())))
+ # as it will not preserve order. Instead just be explicit and preserve order.
+ # We could use "sorted", but it's probably better to use the order provided by
+ # the user (if they want to put `runs=["02", "01"]` etc. it's better to use "02")
+ all_runs = list()
+ for runs in subj_runs.values():
+ for run in runs:
+ if run not in all_runs:
+ all_runs.append(run)
+ return all_runs
def get_runs(
@@ -429,17 +442,6 @@ def _restrict_analyze_channels(
return inst
-def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, pathlib.Path]:
- subject_path = pathlib.Path(cfg.subjects_dir) / cfg.fs_subject
- seghead = subject_path / "surf" / "lh.seghead"
- in_files = dict()
- if seghead.is_file():
- in_files["seghead"] = seghead
- else:
- in_files["t1"] = subject_path / "mri" / "T1.mgz"
- return in_files
-
-
def _get_bem_conductivity(cfg: SimpleNamespace) -> tuple[tuple[float], str]:
if cfg.fs_subject in ("fsaverage", cfg.use_template_mri):
conductivity = None # should never be used
@@ -573,7 +575,7 @@ def _validate_contrasts(contrasts: SimpleNamespace) -> None:
if len(contrast) != 2:
raise ValueError("Contrasts' tuples MUST be two conditions")
elif isinstance(contrast, dict):
- if not _keys_arbitrary_contrast.issubset(set(contrast.keys())):
+ if not _set_keys_arbitrary_contrast.issubset(set(contrast.keys())):
raise ValueError(f"Missing key(s) in contrast {contrast}")
if len(contrast["conditions"]) != len(contrast["weights"]):
raise ValueError(
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index 04ddabe1e..56d14a010 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -141,7 +141,6 @@ def main():
steps = (steps,)
on_error = "debug" if debug else None
- cache = "1" if cache else "0"
processing_stages = []
processing_steps = []
diff --git a/mne_bids_pipeline/_reject.py b/mne_bids_pipeline/_reject.py
index ca506239d..707984732 100644
--- a/mne_bids_pipeline/_reject.py
+++ b/mne_bids_pipeline/_reject.py
@@ -45,11 +45,11 @@ def _get_reject(
# Only keep thresholds for channel types of interest
reject = reject.copy()
- if ch_types == ["eeg"]:
- ch_types_to_remove = ("mag", "grad")
- else:
- ch_types_to_remove = ("eeg",)
-
+ ch_types_to_remove = list()
+ if "meg" not in ch_types:
+ ch_types_to_remove.extend(("mag", "grad"))
+ if "eeg" not in ch_types:
+ ch_types_to_remove.append("eeg")
for ch_type in ch_types_to_remove:
try:
del reject[ch_type]
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index c7e46267b..04deef839 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -378,11 +378,21 @@ def _prep_out_files(
*,
exec_params: SimpleNamespace,
out_files: dict[str, BIDSPath],
+ check_relative: Optional[pathlib.Path] = None,
):
+ if check_relative is None:
+ check_relative = exec_params.deriv_root
for key, fname in out_files.items():
+ # Sanity check that we only ever write to the derivatives directory
+ fname = pathlib.Path(fname)
+ if not fname.is_relative_to(check_relative):
+ raise RuntimeError(
+ f"Output BIDSPath not relative to expected root {check_relative}:"
+ f"\n{fname}"
+ )
out_files[key] = _path_to_str_hash(
key,
- pathlib.Path(fname),
+ fname,
method=exec_params.memory_file_method,
kind="out",
)
@@ -401,7 +411,7 @@ def _path_to_str_hash(
assert isinstance(v, pathlib.Path), f'Bad type {type(v)}: {kind}_files["{k}"] = {v}'
assert v.exists(), f'missing {kind}_files["{k}"] = {v}'
if method == "mtime":
- this_hash = v.lstat().st_mtime
+ this_hash = v.stat().st_mtime
else:
assert method == "hash" # guaranteed
this_hash = hash_file_path(v)
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index eb5f86151..b2e2f8090 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -10,7 +10,6 @@
import mne.bem
from ..._config_utils import (
- _get_scalp_in_files,
get_fs_subject,
get_fs_subjects_dir,
get_subjects,
@@ -22,6 +21,17 @@
fs_bids_app = Path(__file__).parent / "contrib" / "run.py"
+def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, Path]:
+ subject_path = Path(cfg.fs_subjects_dir) / cfg.fs_subject
+ seghead = subject_path / "surf" / "lh.seghead"
+ in_files = dict()
+ if seghead.is_file():
+ in_files["seghead"] = seghead
+ else:
+ in_files["t1"] = subject_path / "mri" / "T1.mgz"
+ return in_files
+
+
def get_input_fnames_coreg_surfaces(
*,
cfg: SimpleNamespace,
@@ -32,7 +42,7 @@ def get_input_fnames_coreg_surfaces(
def get_output_fnames_coreg_surfaces(*, cfg: SimpleNamespace, subject: str) -> dict:
out_files = dict()
- subject_path = Path(cfg.subjects_dir) / cfg.fs_subject
+ subject_path = Path(cfg.fs_subjects_dir) / cfg.fs_subject
out_files["seghead"] = subject_path / "surf" / "lh.seghead"
for key in ("dense", "medium", "sparse"):
out_files[f"head-{key}"] = (
@@ -57,19 +67,21 @@ def make_coreg_surfaces(
in_files.pop("t1" if "t1" in in_files else "seghead")
mne.bem.make_scalp_surfaces(
subject=cfg.fs_subject,
- subjects_dir=cfg.subjects_dir,
+ subjects_dir=cfg.fs_subjects_dir,
force=True,
overwrite=True,
)
out_files = get_output_fnames_coreg_surfaces(cfg=cfg, subject=subject)
- return _prep_out_files(exec_params=exec_params, out_files=out_files)
+ return _prep_out_files(
+ exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ )
def get_config(*, config, subject) -> SimpleNamespace:
cfg = SimpleNamespace(
subject=subject,
fs_subject=get_fs_subject(config, subject),
- subjects_dir=get_fs_subjects_dir(config),
+ fs_subjects_dir=get_fs_subjects_dir(config),
)
return cfg
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
index 8a2b2a0f6..5ab1119a6 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -76,7 +76,6 @@ def run_regress_artifact(
out_files["regress"] = bids_path_in.copy().update(
processing=None,
split=None,
- run=None,
suffix="regress",
extension=".h5",
)
diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
index 7f0bf0607..3584aa72f 100644
--- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
@@ -187,6 +187,9 @@ def drop_ptp(
psd = True
else:
psd = 30
+ tags = ("epochs", "reject")
+ kind = cfg.reject if isinstance(cfg.reject, str) else "Rejection"
+ title = "Epochs: after cleaning"
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
) as report:
@@ -201,18 +204,28 @@ def drop_ptp(
fig=reject_log.plot(
orientation="horizontal", aspect="auto", show=False
),
- title="Epochs: Autoreject cleaning",
+ title=f"{kind} cleaning",
caption=caption,
- tags=("epochs", "autoreject"),
+ section=title,
+ tags=tags,
replace=True,
)
del caption
+ else:
+ report.add_html(
+ html=f"{reject}
",
+ title=f"{kind} thresholds",
+ section=title,
+ replace=True,
+ tags=tags,
+ )
report.add_epochs(
epochs=epochs,
- title="Epochs: after cleaning",
+ title=title,
psd=psd,
drop_log_ignore=(),
+ tags=tags,
replace=True,
)
return _prep_out_files(exec_params=exec_params, out_files=out_files)
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index da2b64890..f77593107 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -112,7 +112,9 @@ def make_bem_surfaces(
subject=subject,
session=session,
)
- return _prep_out_files(exec_params=exec_params, out_files=out_files)
+ return _prep_out_files(
+ exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ )
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
index a09d063e2..33f7b870c 100644
--- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
+++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
@@ -69,7 +69,9 @@ def make_bem_solution(
out_files = get_output_fnames_make_bem_solution(cfg=cfg, subject=subject)
mne.write_bem_surfaces(out_files["model"], bem_model, overwrite=True)
mne.write_bem_solution(out_files["sol"], bem_sol, overwrite=True)
- return _prep_out_files(exec_params=exec_params, out_files=out_files)
+ return _prep_out_files(
+ exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ )
def get_config(
diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
index 64e7314ed..52c342dbf 100644
--- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py
+++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
@@ -55,7 +55,9 @@ def run_setup_source_space(
in_files.clear() # all used by setup_source_space
out_files = get_output_fnames_setup_source_space(cfg=cfg, subject=subject)
mne.write_source_spaces(out_files["src"], src, overwrite=True)
- return _prep_out_files(exec_params=exec_params, out_files=out_files)
+ return _prep_out_files(
+ exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ )
def get_config(
From 2796d017ccc2797a09d29f771443184948b9834b Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 5 Feb 2024 15:03:26 -0500
Subject: [PATCH 061/132] [pre-commit.ci] pre-commit autoupdate (#843)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d8ceaa9ff..6a8375286 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.14
+ rev: v0.2.0
hooks:
- id: ruff
args: ["--fix"]
From 92be6039ce7b644f1a049c255c49a742ff591de8 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 6 Feb 2024 11:20:29 -0500
Subject: [PATCH 062/132] ENH: Write out raw data and SSP events (#840)
---
docs/source/v1.6.md.inc | 3 +
mne_bids_pipeline/_config.py | 8 +-
mne_bids_pipeline/_config_utils.py | 23 ++-
.../preprocessing/_05_regress_artifact.py | 2 +-
.../steps/preprocessing/_06b_run_ssp.py | 56 ++++---
.../steps/preprocessing/_07_make_epochs.py | 5 +-
.../steps/preprocessing/_08a_apply_ica.py | 145 ++++++++++++++++--
.../steps/preprocessing/_08b_apply_ssp.py | 132 +++++++++++++---
.../steps/preprocessing/_09_ptp_reject.py | 2 +-
.../steps/sensor/_06_make_cov.py | 4 +-
.../steps/sensor/_99_group_average.py | 2 +-
.../tests/configs/config_ds000248_base.py | 2 +-
12 files changed, 317 insertions(+), 67 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 01bfd87e4..3c87dac23 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -6,6 +6,8 @@
- Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner)
- Chosen `reject` parameters are now saved in the generated HTML reports (#839 by @larsoner)
+- Added saving of clean raw data in addition to epochs (#840 by @larsoner)
+- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -21,6 +23,7 @@
- Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner)
- Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner)
- Fix bug where `--no-cache` had no effect (#839 by @larsoner)
+- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index e3c7626bb..041331da5 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1161,14 +1161,14 @@
ways using the configuration options you can find below.
"""
-min_ecg_epochs: int = 5
+min_ecg_epochs: Annotated[int, Ge(1)] = 5
"""
-Minimal number of ECG epochs needed to compute SSP or ICA rejection.
+Minimal number of ECG epochs needed to compute SSP projectors.
"""
-min_eog_epochs: int = 5
+min_eog_epochs: Annotated[int, Ge(1)] = 5
"""
-Minimal number of EOG epochs needed to compute SSP or ICA rejection.
+Minimal number of EOG epochs needed to compute SSP projectors.
"""
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 7b555a2a4..784752028 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -484,7 +484,7 @@ def get_noise_cov_bids_path(
task=cfg.task,
acquisition=cfg.acq,
run=None,
- processing=cfg.proc,
+ processing="clean",
recording=cfg.rec,
space=cfg.space,
suffix="cov",
@@ -638,3 +638,24 @@ def _pl(x, *, non_pl="", pl="s"):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (int, np.generic)) else len(x)
return non_pl if len_x == 1 else pl
+
+
+def _proj_path(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> BIDSPath:
+ return BIDSPath(
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ acquisition=cfg.acq,
+ recording=cfg.rec,
+ space=cfg.space,
+ datatype=cfg.datatype,
+ root=cfg.deriv_root,
+ extension=".fif",
+ suffix="proj",
+ check=False,
+ )
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
index 5ab1119a6..cb31df04d 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -82,7 +82,7 @@ def run_regress_artifact(
model.apply(raw, copy=False)
if projs:
raw.add_proj(projs)
- raw.save(out_files[in_key], overwrite=True)
+ raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size)
_update_for_splits(out_files, in_key)
model.save(out_files["regress"], overwrite=True)
assert len(in_files) == 0, in_files.keys()
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 7aa0e97de..7ec75ef91 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -7,13 +7,15 @@
from typing import Optional
import mne
+import numpy as np
from mne import compute_proj_epochs, compute_proj_evoked
-from mne.preprocessing import create_ecg_epochs, create_eog_epochs
+from mne.preprocessing import find_ecg_events, find_eog_events
from mne_bids import BIDSPath
from ..._config_utils import (
_bids_kwargs,
_pl,
+ _proj_path,
get_runs,
get_sessions,
get_subjects,
@@ -25,6 +27,11 @@
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+def _find_ecg_events(raw: mne.io.Raw, ch_name: Optional[str]) -> np.ndarray:
+ """Wrap find_ecg_events to use the same defaults as create_ecg_events."""
+ return find_ecg_events(raw, ch_name=ch_name, l_freq=8, h_freq=16)[0]
+
+
def get_input_fnames_run_ssp(
*,
cfg: SimpleNamespace,
@@ -69,14 +76,7 @@ def run_ssp(
# compute SSP on all runs of raw
raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs]
- # when saving proj, use run=None
- out_files = dict()
- out_files["proj"] = (
- raw_fnames[0]
- .copy()
- .update(run=None, suffix="proj", split=None, processing=None, check=False)
- )
-
+ out_files = dict(proj=_proj_path(cfg=cfg, subject=subject, session=session))
msg = (
f"Input{_pl(raw_fnames)} ({len(raw_fnames)}): "
f'{raw_fnames[0].basename}{_pl(raw_fnames, pl=" ...")}'
@@ -93,7 +93,7 @@ def run_ssp(
projs = dict()
proj_kinds = ("ecg", "eog")
rate_names = dict(ecg="heart", eog="blink")
- epochs_fun = dict(ecg=create_ecg_epochs, eog=create_eog_epochs)
+ events_fun = dict(ecg=_find_ecg_events, eog=find_eog_events)
minimums = dict(ecg=cfg.min_ecg_epochs, eog=cfg.min_eog_epochs)
rejects = dict(ecg=cfg.ssp_reject_ecg, eog=cfg.ssp_reject_eog)
avg = dict(ecg=cfg.ecg_proj_from_average, eog=cfg.eog_proj_from_average)
@@ -111,17 +111,38 @@ def run_ssp(
projs[kind] = []
if not any(n_projs[kind].values()):
continue
- proj_epochs = epochs_fun[kind](
- raw,
- ch_name=ch_name[kind],
- decim=cfg.epochs_decim,
- )
- n_orig = len(proj_epochs.selection)
+ events = events_fun[kind](raw=raw, ch_name=ch_name[kind])
+ n_orig = len(events)
rate = n_orig / raw.times[-1] * 60
bpm_msg = f"{rate:5.1f} bpm"
msg = f"Detected {rate_names[kind]} rate: {bpm_msg}"
logger.info(**gen_log_kwargs(message=msg))
- # Enough to start
+ # Enough to create epochs
+ if len(events) < minimums[kind]:
+ msg = (
+ f"No {kind.upper()} projectors computed: got "
+ f"{len(events)} original events < {minimums[kind]} {bpm_msg}"
+ )
+ logger.warning(**gen_log_kwargs(message=msg))
+ continue
+ out_files[f"events_{kind}"] = (
+ out_files["proj"]
+ .copy()
+ .update(suffix=f"{kind}-eve", split=None, check=False, extension=".txt")
+ )
+ mne.write_events(out_files[f"events_{kind}"], events, overwrite=True)
+ proj_epochs = mne.Epochs(
+ raw,
+ events=events,
+ event_id=events[0, 2],
+ tmin=-0.5,
+ tmax=0.5,
+ proj=False,
+ baseline=(None, None),
+ reject_by_annotation=True,
+ preload=True,
+ decim=cfg.epochs_decim,
+ )
if len(proj_epochs) >= minimums[kind]:
reject_ = _get_reject(
subject=subject,
@@ -134,7 +155,6 @@ def run_ssp(
proj_epochs.drop_bad(reject=reject_)
# Still enough after rejection
if len(proj_epochs) >= minimums[kind]:
- proj_epochs.apply_baseline((None, None))
use = proj_epochs.average() if avg[kind] else proj_epochs
fun = compute_proj_evoked if avg[kind] else compute_proj_epochs
desc_prefix = (
diff --git a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
index 0cebb033e..42bf721df 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
@@ -54,7 +54,7 @@ def get_input_fnames_epochs(
extension=".fif",
datatype=cfg.datatype,
root=cfg.deriv_root,
- processing="filt",
+ processing=cfg.processing,
).update(suffix="raw", check=False)
# Generate a list of raw data paths (i.e., paths of individual runs)
@@ -276,7 +276,7 @@ def _get_events(cfg, subject, session):
acquisition=cfg.acq,
recording=cfg.rec,
space=cfg.space,
- processing="filt",
+ processing=cfg.processing,
suffix="raw",
extension=".fif",
datatype=cfg.datatype,
@@ -322,6 +322,7 @@ def get_config(
rest_epochs_overlap=config.rest_epochs_overlap,
_epochs_split_size=config._epochs_split_size,
runs=get_runs(config=config, subject=subject),
+ processing="filt" if config.regress_artifact is None else "regress",
**_bids_kwargs(config=config),
)
return cfg
diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index f4b999cc8..e53a4758f 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -21,22 +21,23 @@
from mne_bids import BIDSPath
from ..._config_utils import (
- _bids_kwargs,
+ get_runs_tasks,
get_sessions,
get_subjects,
)
+from ..._import_data import _get_run_rest_noise_path, _import_data_kwargs
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
-from ..._report import _agg_backend, _open_report
+from ..._report import _add_raw, _agg_backend, _open_report
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
-def get_input_fnames_apply_ica(
+def _ica_paths(
*,
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
-) -> dict:
+):
bids_basename = BIDSPath(
subject=subject,
session=session,
@@ -53,15 +54,56 @@ def get_input_fnames_apply_ica(
in_files["components"] = bids_basename.copy().update(
processing="ica", suffix="components", extension=".tsv"
)
- in_files["epochs"] = bids_basename.copy().update(suffix="epo", extension=".fif")
+ return in_files
+
+
+def _read_ica_and_exclude(
+ in_files: dict,
+) -> None:
+ ica = read_ica(fname=in_files.pop("ica"))
+ tsv_data = pd.read_csv(in_files.pop("components"), sep="\t")
+ ica.exclude = tsv_data.loc[tsv_data["status"] == "bad", "component"].to_list()
+ return ica
+
+
+def get_input_fnames_apply_ica_epochs(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ in_files = _ica_paths(cfg=cfg, subject=subject, session=session)
+ in_files["epochs"] = in_files["ica"].copy().update(suffix="epo", extension=".fif")
_update_for_splits(in_files, "epochs", single=True)
return in_files
+def get_input_fnames_apply_ica_raw(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+) -> dict:
+ in_files = _get_run_rest_noise_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ kind="filt",
+ mf_reference_run=cfg.mf_reference_run,
+ )
+ assert len(in_files)
+ in_files.update(_ica_paths(cfg=cfg, subject=subject, session=session))
+ return in_files
+
+
@failsafe_run(
- get_input_fnames=get_input_fnames_apply_ica,
+ get_input_fnames=get_input_fnames_apply_ica_epochs,
)
-def apply_ica(
+def apply_ica_epochs(
*,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
@@ -85,11 +127,7 @@ def apply_ica(
# Load ICA.
msg = f"Reading ICA: {in_files['ica']}"
logger.debug(**gen_log_kwargs(message=msg))
- ica = read_ica(fname=in_files.pop("ica"))
-
- # Select ICs to remove.
- tsv_data = pd.read_csv(in_files.pop("components"), sep="\t")
- ica.exclude = tsv_data.loc[tsv_data["status"] == "bad", "component"].to_list()
+ ica = _read_ica_and_exclude(in_files)
# Load epochs.
msg = f'Input: {in_files["epochs"].basename}'
@@ -168,16 +206,65 @@ def apply_ica(
return _prep_out_files(exec_params=exec_params, out_files=out_files)
+@failsafe_run(
+ get_input_fnames=get_input_fnames_apply_ica_raw,
+)
+def apply_ica_raw(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+ in_files: dict,
+) -> dict:
+ ica = _read_ica_and_exclude(in_files)
+ in_key = list(in_files)[0]
+ assert in_key.startswith("raw"), in_key
+ raw_fname = in_files.pop(in_key)
+ assert len(in_files) == 0, in_files
+ out_files = dict()
+ out_files[in_key] = raw_fname.copy().update(processing="clean")
+ msg = f"Writing {out_files[in_key].basename} …"
+ logger.info(**gen_log_kwargs(message=msg))
+ raw = mne.io.read_raw_fif(raw_fname, preload=True)
+ ica.apply(raw)
+ raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size)
+ _update_for_splits(out_files, in_key)
+ # Report
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ ) as report:
+ msg = "Adding cleaned raw data to report"
+ logger.info(**gen_log_kwargs(message=msg))
+ _add_raw(
+ cfg=cfg,
+ report=report,
+ bids_path_in=out_files[in_key],
+ title="Raw (clean)",
+ tags=("clean",),
+ raw=raw,
+ )
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
def get_config(
*,
config: SimpleNamespace,
+ subject: str,
) -> SimpleNamespace:
cfg = SimpleNamespace(
baseline=config.baseline,
ica_reject=config.ica_reject,
- ch_types=config.ch_types,
+ processing="filt" if config.regress_artifact is None else "regress",
_epochs_split_size=config._epochs_split_size,
- **_bids_kwargs(config=config),
+ **_import_data_kwargs(config=config, subject=subject),
)
return cfg
@@ -190,17 +277,45 @@ def main(*, config: SimpleNamespace) -> None:
return
with get_parallel_backend(config.exec_params):
- parallel, run_func = parallel_func(apply_ica, exec_params=config.exec_params)
+ # Epochs
+ parallel, run_func = parallel_func(
+ apply_ica_epochs, exec_params=config.exec_params
+ )
logs = parallel(
run_func(
cfg=get_config(
config=config,
+ subject=subject,
+ ),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ )
+ # Raw
+ parallel, run_func = parallel_func(
+ apply_ica_raw, exec_params=config.exec_params
+ )
+ logs += parallel(
+ run_func(
+ cfg=get_config(
+ config=config,
+ subject=subject,
),
exec_params=config.exec_params,
subject=subject,
session=session,
+ run=run,
+ task=task,
)
for subject in get_subjects(config)
for session in get_sessions(config)
+ for run, task in get_runs_tasks(
+ config=config,
+ subject=subject,
+ session=session,
+ )
)
save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
index b1eda9cd1..e6fad4b8f 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
@@ -9,47 +9,37 @@
from typing import Optional
import mne
-from mne_bids import BIDSPath
from ..._config_utils import (
- _bids_kwargs,
+ _proj_path,
+ get_runs_tasks,
get_sessions,
get_subjects,
)
+from ..._import_data import _get_run_rest_noise_path, _import_data_kwargs
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _add_raw, _open_report
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
-def get_input_fnames_apply_ssp(
+def get_input_fnames_apply_ssp_epochs(
*,
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
) -> dict:
- bids_basename = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- recording=cfg.rec,
- space=cfg.space,
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- extension=".fif",
- check=False,
- )
in_files = dict()
- in_files["epochs"] = bids_basename.copy().update(suffix="epo", check=False)
+ in_files["proj"] = _proj_path(cfg=cfg, subject=subject, session=session)
+ in_files["epochs"] = in_files["proj"].copy().update(suffix="epo", check=False)
_update_for_splits(in_files, "epochs", single=True)
- in_files["proj"] = bids_basename.copy().update(suffix="proj", check=False)
return in_files
@failsafe_run(
- get_input_fnames=get_input_fnames_apply_ssp,
+ get_input_fnames=get_input_fnames_apply_ssp_epochs,
)
-def apply_ssp(
+def apply_ssp_epochs(
*,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
@@ -81,13 +71,85 @@ def apply_ssp(
return _prep_out_files(exec_params=exec_params, out_files=out_files)
+def get_input_fnames_apply_ssp_raw(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+) -> dict:
+ in_files = _get_run_rest_noise_path(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ kind="filt",
+ mf_reference_run=cfg.mf_reference_run,
+ )
+ assert len(in_files)
+ in_files["proj"] = _proj_path(cfg=cfg, subject=subject, session=session)
+ return in_files
+
+
+@failsafe_run(
+ get_input_fnames=get_input_fnames_apply_ssp_raw,
+)
+def apply_ssp_raw(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ run: str,
+ task: Optional[str],
+ in_files: dict,
+) -> dict:
+ projs = mne.read_proj(in_files.pop("proj"))
+ in_key = list(in_files.keys())[0]
+ assert in_key.startswith("raw"), in_key
+ raw_fname = in_files.pop(in_key)
+ assert len(in_files) == 0, in_files.keys()
+ raw = mne.io.read_raw_fif(raw_fname)
+ raw.add_proj(projs)
+ out_files = dict()
+ out_files[in_key] = raw_fname.copy().update(processing="clean")
+ msg = f"Writing {out_files[in_key].basename} …"
+ logger.info(**gen_log_kwargs(message=msg))
+ raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size)
+ _update_for_splits(out_files, in_key)
+ # Report
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ ) as report:
+ msg = "Adding cleaned raw data to report"
+ logger.info(**gen_log_kwargs(message=msg))
+ _add_raw(
+ cfg=cfg,
+ report=report,
+ bids_path_in=out_files[in_key],
+ title="Raw (clean)",
+ tags=("clean",),
+ raw=raw,
+ )
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
def get_config(
*,
config: SimpleNamespace,
+ subject: str,
) -> SimpleNamespace:
cfg = SimpleNamespace(
+ processing="filt" if config.regress_artifact is None else "regress",
_epochs_split_size=config._epochs_split_size,
- **_bids_kwargs(config=config),
+ **_import_data_kwargs(config=config, subject=subject),
)
return cfg
@@ -100,11 +162,15 @@ def main(*, config: SimpleNamespace) -> None:
return
with get_parallel_backend(config.exec_params):
- parallel, run_func = parallel_func(apply_ssp, exec_params=config.exec_params)
+ # Epochs
+ parallel, run_func = parallel_func(
+ apply_ssp_epochs, exec_params=config.exec_params
+ )
logs = parallel(
run_func(
cfg=get_config(
config=config,
+ subject=subject,
),
exec_params=config.exec_params,
subject=subject,
@@ -113,4 +179,28 @@ def main(*, config: SimpleNamespace) -> None:
for subject in get_subjects(config)
for session in get_sessions(config)
)
+ # Raw
+ parallel, run_func = parallel_func(
+ apply_ssp_raw, exec_params=config.exec_params
+ )
+ logs += parallel(
+ run_func(
+ cfg=get_config(
+ config=config,
+ subject=subject,
+ ),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ run=run,
+ task=task,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ for run, task in get_runs_tasks(
+ config=config,
+ subject=subject,
+ session=session,
+ )
+ )
save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
index 3584aa72f..d08469b3c 100644
--- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
@@ -187,7 +187,7 @@ def drop_ptp(
psd = True
else:
psd = 30
- tags = ("epochs", "reject")
+ tags = ("epochs", "clean")
kind = cfg.reject if isinstance(cfg.reject, str) else "Rejection"
title = "Epochs: after cleaning"
with _open_report(
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index a9c211df4..5a210d45f 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -71,7 +71,7 @@ def get_input_fnames_cov(
run=None,
recording=cfg.rec,
space=cfg.space,
- processing="filt",
+ processing="clean",
suffix="raw",
extension=".fif",
datatype=cfg.datatype,
@@ -173,7 +173,7 @@ def retrieve_custom_cov(
task=cfg.task,
acquisition=cfg.acq,
run=None,
- processing=cfg.proc,
+ processing="clean",
recording=cfg.rec,
space=cfg.space,
suffix="ave",
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 7ac19e7de..63e4e6ea2 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -107,7 +107,7 @@ def average_evokeds(
task=cfg.task,
acquisition=cfg.acq,
run=None,
- processing=cfg.proc,
+ processing="clean",
recording=cfg.rec,
space=cfg.space,
suffix="ave",
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index 6ffd9644e..9888e1cee 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -23,7 +23,7 @@
def noise_cov(bp):
# Use pre-stimulus period as noise source
- bp = bp.copy().update(processing="clean", suffix="epo")
+ bp = bp.copy().update(suffix="epo")
if not bp.fpath.exists():
bp.update(split="01")
epo = mne.read_epochs(bp)
From fe56c011c8810f5d1acf04b81b147ed08295cd6f Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 7 Feb 2024 09:01:42 -0500
Subject: [PATCH 063/132] BUG: Fix bug with Maxwell step when
find_noise_channels_meg=False (#847)
---
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_import_data.py | 3 ++-
mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py | 5 +++--
mne_bids_pipeline/tests/configs/config_ds003392.py | 5 +++--
4 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 3c87dac23..a38943374 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -23,6 +23,7 @@
- Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner)
- Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner)
- Fix bug where `--no-cache` had no effect (#839 by @larsoner)
+- Fix bug where the Maxwell filtering step would fail if [`find_noisy_channels_meg = False`][mne_bids_pipeline._config.find_noisy_channels_meg]` was used (#847 by @larsoner)
- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index be892576b..c3c319f44 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -683,10 +683,11 @@ def _get_run_rest_noise_path(
def _get_mf_reference_run_path(
+ *,
cfg: SimpleNamespace,
subject: str,
session: Optional[str],
- add_bads: bool,
+ add_bads: Optional[bool] = None,
) -> dict:
return _get_run_path(
cfg=cfg,
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index c5b58e2b6..5e5e30318 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -64,7 +64,7 @@ def get_input_fnames_esss(
mf_reference_run=cfg.mf_reference_run,
**kwargs,
)
- in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs))
+ in_files.update(_get_mf_reference_run_path(**kwargs))
return in_files
@@ -241,7 +241,8 @@ def get_input_fnames_maxwell_filter(
)
# reference run (used for `destination` and also bad channels for noise)
- in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs))
+ # use add_bads=None here to mean "add if autobad is turned on"
+ in_files.update(_get_mf_reference_run_path(**kwargs))
is_rest_noise = run is None and task in ("noise", "rest")
if is_rest_noise:
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index 756d36fbc..3f225e50c 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -6,8 +6,9 @@
subjects = ["01"]
task = "localizer"
-find_flat_channels_meg = True
-find_noisy_channels_meg = True
+# usually a good idea to use True, but we know no bads are detected for this dataset
+find_flat_channels_meg = False
+find_noisy_channels_meg = False
use_maxwell_filter = True
ch_types = ["meg"]
From d76abaa0cc1c0fbf7dc25e6ba6bcc6e0d4ed4284 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 12 Feb 2024 19:45:40 -0500
Subject: [PATCH 064/132] [pre-commit.ci] pre-commit autoupdate (#851)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6a8375286..e579e4d82 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.2.0
+ rev: v0.2.1
hooks:
- id: ruff
args: ["--fix"]
@@ -18,7 +18,7 @@ repos:
additional_dependencies:
- tomli
- repo: https://github.com/adrienverge/yamllint.git
- rev: v1.33.0
+ rev: v1.34.0
hooks:
- id: yamllint
args: [--strict]
From 9f8b17091a0db3914b324c92219b08ff62d4eb7c Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 15 Feb 2024 12:53:00 -0500
Subject: [PATCH 065/132] BUG: Fix bug with multichannel classification (#853)
---
docs/source/v1.6.md.inc | 2 +
mne_bids_pipeline/_config.py | 5 +-
mne_bids_pipeline/_decoding.py | 29 ++++++++++
.../steps/sensor/_02_decoding_full_epochs.py | 27 ++++++---
.../steps/sensor/_03_decoding_time_by_time.py | 31 ++++++++--
.../steps/sensor/_05_decoding_csp.py | 56 +++++++------------
.../tests/configs/config_ERP_CORE.py | 5 --
.../tests/configs/config_ds003392.py | 1 -
pyproject.toml | 4 +-
9 files changed, 103 insertions(+), 57 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index a38943374..a4e521f0a 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -25,6 +25,8 @@
- Fix bug where `--no-cache` had no effect (#839 by @larsoner)
- Fix bug where the Maxwell filtering step would fail if [`find_noisy_channels_meg = False`][mne_bids_pipeline._config.find_noisy_channels_meg]` was used (#847 by @larsoner)
- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner)
+- Fix bug where multiple channel types (e.g., MEG and EEG) were not handled correctly in decoding (#853 by @larsoner)
+- Fix bug where the previous default for [`ica_n_components`][mne_bids_pipeline._config.ica_n_components] of `0.8` was too conservative, changed the default to `None` to match MNE-Python (#853 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 041331da5..2232134c8 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1343,7 +1343,7 @@
limit may be too low to achieve convergence.
"""
-ica_n_components: Optional[Union[float, int]] = 0.8
+ica_n_components: Optional[Union[float, int]] = None
"""
MNE conducts ICA as a sort of a two-step procedure: First, a PCA is run
on the data (trying to exclude zero-valued components in rank-deficient
@@ -1362,7 +1362,8 @@
explained variance less than the value specified here will be passed to
ICA.
-If `None`, **all** principal components will be used.
+If `None` (default), `0.999999` will be used to avoid issues when working with
+rank-deficient data.
This setting may drastically alter the time required to compute ICA.
"""
diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py
index 4d895395b..df10d6f1f 100644
--- a/mne_bids_pipeline/_decoding.py
+++ b/mne_bids_pipeline/_decoding.py
@@ -1,8 +1,15 @@
+from typing import Optional
+
+import mne
import numpy as np
from joblib import parallel_backend
from mne.utils import _validate_type
+from sklearn.base import BaseEstimator
+from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
+from ._logging import gen_log_kwargs, logger
+
class LogReg(LogisticRegression):
"""Hack to avoid a warning with n_jobs != 1 when using dask."""
@@ -70,3 +77,25 @@ def _handle_csp_args(
freq_bins = list(zip(edges[:-1], edges[1:]))
freq_name_to_bins_map[freq_range_name] = freq_bins
return freq_name_to_bins_map
+
+
+def _decoding_preproc_steps(
+ subject: str,
+ session: Optional[str],
+ epochs: mne.Epochs,
+ pca: bool = True,
+) -> list[BaseEstimator]:
+ scaler = mne.decoding.Scaler(epochs.info)
+ steps = [scaler]
+ if pca:
+ ranks = mne.compute_rank(inst=epochs, rank="info")
+ rank = sum(ranks.values())
+ msg = f"Reducing data dimension via PCA; new rank: {rank} (from {ranks})."
+ logger.info(**gen_log_kwargs(message=msg))
+ steps.append(
+ mne.decoding.UnsupervisedSpatialFilter(
+ PCA(rank, whiten=True),
+ average=False,
+ )
+ )
+ return steps
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 58a354c1c..4034245ff 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -15,7 +15,7 @@
import mne
import numpy as np
import pandas as pd
-from mne.decoding import Scaler, Vectorizer
+from mne.decoding import Vectorizer
from mne_bids import BIDSPath
from scipy.io import loadmat, savemat
from sklearn.model_selection import StratifiedKFold, cross_val_score
@@ -30,7 +30,7 @@
get_sessions,
get_subjects,
)
-from ..._decoding import LogReg
+from ..._decoding import LogReg, _decoding_preproc_steps
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import (
@@ -113,16 +113,23 @@ def run_epochs_decoding(
# Crop to the desired analysis interval. Do it only after the concatenation to work
# around https://github.com/mne-tools/mne-python/issues/12153
epochs.crop(cfg.decoding_epochs_tmin, cfg.decoding_epochs_tmax)
+ # omit bad channels and reference MEG sensors
+ epochs.pick_types(meg=True, eeg=True, ref_meg=False, exclude="bads")
+ pre_steps = _decoding_preproc_steps(
+ subject=subject,
+ session=session,
+ epochs=epochs,
+ )
n_cond1 = len(epochs[epochs_conds[0]])
n_cond2 = len(epochs[epochs_conds[1]])
- X = epochs.get_data(picks="data") # omit bad channels
+ X = epochs.get_data()
y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
- classification_pipeline = make_pipeline(
- Scaler(scalings="mean"),
- Vectorizer(), # So we can pass the data to scikit-learn
+ clf = make_pipeline(
+ *pre_steps,
+ Vectorizer(),
LogReg(
solver="liblinear", # much faster than the default
random_state=cfg.random_state,
@@ -138,7 +145,13 @@ def run_epochs_decoding(
n_splits=cfg.decoding_n_splits,
)
scores = cross_val_score(
- estimator=classification_pipeline, X=X, y=y, cv=cv, scoring="roc_auc", n_jobs=1
+ estimator=clf,
+ X=X,
+ y=y,
+ cv=cv,
+ scoring="roc_auc",
+ n_jobs=1,
+ error_score="raise",
)
# Save the scores
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index d61e865c4..5ad221d67 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -18,12 +18,16 @@
import mne
import numpy as np
import pandas as pd
-from mne.decoding import GeneralizingEstimator, SlidingEstimator, cross_val_multiscore
+from mne.decoding import (
+ GeneralizingEstimator,
+ SlidingEstimator,
+ Vectorizer,
+ cross_val_multiscore,
+)
from mne_bids import BIDSPath
from scipy.io import loadmat, savemat
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
-from sklearn.preprocessing import StandardScaler
from ..._config_utils import (
_bids_kwargs,
@@ -34,7 +38,7 @@
get_sessions,
get_subjects,
)
-from ..._decoding import LogReg
+from ..._decoding import LogReg, _decoding_preproc_steps
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, get_parallel_backend_name
from ..._report import (
@@ -122,18 +126,35 @@ def run_time_decoding(
epochs = mne.concatenate_epochs([epochs[epochs_conds[0]], epochs[epochs_conds[1]]])
n_cond1 = len(epochs[epochs_conds[0]])
n_cond2 = len(epochs[epochs_conds[1]])
+ epochs.pick_types(meg=True, eeg=True, ref_meg=False, exclude="bads")
+ # We can't use the full rank here because the number of samples can just be the
+ # number of epochs (which can be fewer than the number of channels)
+ pre_steps = _decoding_preproc_steps(
+ subject=subject,
+ session=session,
+ epochs=epochs,
+ pca=False,
+ )
+ # At some point we might want to enable this, but it's really slow and arguably
+ # unnecessary so let's omit it for now:
+ # pre_steps.append(
+ # mne.decoding.UnsupervisedSpatialFilter(
+ # PCA(n_components=0.999, whiten=True),
+ # )
+ # )
decim = cfg.decoding_time_generalization_decim
if cfg.decoding_time_generalization and decim > 1:
epochs.decimate(decim, verbose="error")
- X = epochs.get_data(picks="data") # omit bad channels
+ X = epochs.get_data()
y = np.r_[np.ones(n_cond1), np.zeros(n_cond2)]
# ProgressBar does not work on dask, so only enable it if not using dask
verbose = get_parallel_backend_name(exec_params=exec_params) != "dask"
with get_parallel_backend(exec_params):
clf = make_pipeline(
- StandardScaler(),
+ *pre_steps,
+ Vectorizer(),
LogReg(
solver="liblinear", # much faster than the default
random_state=cfg.random_state,
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index c9d3ee077..9b93c0c32 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -8,9 +8,8 @@
import mne
import numpy as np
import pandas as pd
-from mne.decoding import CSP, UnsupervisedSpatialFilter
+from mne.decoding import CSP
from mne_bids import BIDSPath
-from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
@@ -23,7 +22,7 @@
get_sessions,
get_subjects,
)
-from ..._decoding import LogReg, _handle_csp_args
+from ..._decoding import LogReg, _decoding_preproc_steps, _handle_csp_args
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import (
@@ -159,30 +158,24 @@ def one_subject_decoding(
bids_path = in_files["epochs"].copy().update(processing=None, split=None)
epochs = mne.read_epochs(in_files.pop("epochs"))
_restrict_analyze_channels(epochs, cfg)
+ epochs.pick_types(meg=True, eeg=True, ref_meg=False, exclude="bads")
if cfg.time_frequency_subtract_evoked:
epochs.subtract_evoked()
- # Perform rank reduction via PCA.
- #
- # Select the channel type with the smallest rank.
- # Limit it to a maximum of 100.
- ranks = mne.compute_rank(inst=epochs, rank="info")
- ch_type_smallest_rank = min(ranks, key=ranks.get)
- rank = min(ranks[ch_type_smallest_rank], 100)
- del ch_type_smallest_rank, ranks
-
- msg = f"Reducing data dimension via PCA; new rank: {rank}."
- logger.info(**gen_log_kwargs(msg))
- pca = UnsupervisedSpatialFilter(PCA(rank), average=False)
+ preproc_steps = _decoding_preproc_steps(
+ subject=subject,
+ session=session,
+ epochs=epochs,
+ )
# Classifier
csp = CSP(
n_components=4, # XXX revisit
reg=0.1, # XXX revisit
- rank="info",
)
clf = make_pipeline(
+ *preproc_steps,
csp,
LogReg(
solver="liblinear", # much faster than the default
@@ -254,17 +247,11 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
epochs=epochs, contrast=contrast, fmin=fmin, fmax=fmax, cfg=cfg
)
# Get the data for all time points
- X = epochs_filt.get_data(picks="data") # omit bad channels
-
- # We apply PCA before running CSP:
- # - much faster CSP processing
- # - reduced risk of numerical instabilities.
- X_pca = pca.fit_transform(X)
- del X
+ X = epochs_filt.get_data()
cv_scores = cross_val_score(
estimator=clf,
- X=X_pca,
+ X=X,
y=y,
scoring=cfg.decoding_metric,
cv=cv,
@@ -326,14 +313,11 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
# Crop data to the time window of interest
if tmax is not None: # avoid warnings about outside the interval
tmax = min(tmax, epochs_filt.times[-1])
- epochs_filt.crop(tmin, tmax)
- X = epochs_filt.get_data(picks="data") # omit bad channels
- X_pca = pca.transform(X)
- del X
-
+ X = epochs_filt.crop(tmin, tmax).get_data()
+ del epochs_filt
cv_scores = cross_val_score(
estimator=clf,
- X=X_pca,
+ X=X,
y=y,
scoring=cfg.decoding_metric,
cv=cv,
@@ -454,11 +438,13 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
results = all_csp_tf_results[contrast]
mean_crossval_scores = list()
tmin, tmax, fmin, fmax = list(), list(), list(), list()
- mean_crossval_scores.extend(results["mean_crossval_score"].ravel())
- tmin.extend(results["t_min"].ravel())
- tmax.extend(results["t_max"].ravel())
- fmin.extend(results["f_min"].ravel())
- fmax.extend(results["f_max"].ravel())
+ mean_crossval_scores.extend(
+ results["mean_crossval_score"].to_numpy().ravel()
+ )
+ tmin.extend(results["t_min"].to_numpy().ravel())
+ tmax.extend(results["t_max"].to_numpy().ravel())
+ fmin.extend(results["f_min"].to_numpy().ravel())
+ fmax.extend(results["f_max"].to_numpy().ravel())
mean_crossval_scores = np.array(mean_crossval_scores, float)
fig, ax = plt.subplots(constrained_layout=True)
# XXX Add support for more metrics
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 3adfbab82..14fcfc998 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -147,7 +147,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.6
epochs_tmax = 0.4
baseline = (-0.4, -0.2)
@@ -180,7 +179,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.8
epochs_tmax = 0.2
baseline = (None, -0.6)
@@ -193,7 +191,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.2
epochs_tmax = 0.8
baseline = (None, 0)
@@ -214,7 +211,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.2
epochs_tmax = 0.8
baseline = (None, 0)
@@ -281,7 +277,6 @@
}
eeg_reference = ["P9", "P10"]
- ica_n_components = 30 - len(eeg_reference)
epochs_tmin = -0.2
epochs_tmax = 0.8
baseline = (None, 0)
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index 3f225e50c..d84475cff 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -23,7 +23,6 @@
ica_max_iterations = 1000
ica_l_freq = 1.0
ica_n_components = 0.99
-ica_reject_components = "auto"
# Epochs
epochs_tmin = -0.2
diff --git a/pyproject.toml b/pyproject.toml
index bac831873..20a020d24 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -115,7 +115,7 @@ addopts = "-ra -vv --tb=short --cov=mne_bids_pipeline --cov-report= --junit-xml=
testpaths = ["mne_bids_pipeline"]
junit_family = "xunit2"
-[tool.ruff]
+[tool.ruff.lint]
select = ["A", "B006", "D", "E", "F", "I", "W", "UP"]
exclude = ["**/freesurfer/contrib", "dist/", "build/"]
ignore = [
@@ -128,5 +128,5 @@ ignore = [
"UP035", # Import Iterable from collections.abc
]
-[tool.ruff.pydocstyle]
+[tool.ruff.lint.pydocstyle]
convention = "numpy"
From 5d4e3c3096024f41081680e1c679269feb2158ee Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 16 Feb 2024 18:13:09 -0500
Subject: [PATCH 066/132] ENH: Better event count rep and noise_cov_method
(#854)
---
docs/source/settings/source/inverse.md | 1 +
docs/source/v1.6.md.inc | 2 +
mne_bids_pipeline/_config.py | 16 ++++
mne_bids_pipeline/_main.py | 6 +-
mne_bids_pipeline/_report.py | 84 ++++++++++++++-----
.../steps/sensor/_06_make_cov.py | 3 +-
6 files changed, 91 insertions(+), 21 deletions(-)
diff --git a/docs/source/settings/source/inverse.md b/docs/source/settings/source/inverse.md
index 4a10f1aef..367275071 100644
--- a/docs/source/settings/source/inverse.md
+++ b/docs/source/settings/source/inverse.md
@@ -10,5 +10,6 @@ tags:
- depth
- inverse_method
- noise_cov
+ - noise_cov_method
- source_info_path_update
- inverse_targets
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index a4e521f0a..b66b0eb17 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -8,6 +8,7 @@
- Chosen `reject` parameters are now saved in the generated HTML reports (#839 by @larsoner)
- Added saving of clean raw data in addition to epochs (#840 by @larsoner)
- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner)
+- Added [`noise_cov_method`][mne_bids_pipeline._config.noise_cov_method] to allow for the use of methods other than `"shrunk"` for noise covariance estimation (#854 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -27,6 +28,7 @@
- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner)
- Fix bug where multiple channel types (e.g., MEG and EEG) were not handled correctly in decoding (#853 by @larsoner)
- Fix bug where the previous default for [`ica_n_components`][mne_bids_pipeline._config.ica_n_components] of `0.8` was too conservative, changed the default to `None` to match MNE-Python (#853 by @larsoner)
+- Fix bug where the events table for the average subject could overflow in reports (#854 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 2232134c8..b45753966 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2050,6 +2050,22 @@ def noise_cov(bids_path):
```
"""
+noise_cov_method: Literal[
+ "shrunk",
+ "empirical",
+ "diagonal_fixed",
+ "oas",
+ "ledoit_wolf",
+ "factor_analysis",
+ "shrinkage",
+ "pca",
+ "auto",
+] = "shrunk"
+"""
+The noise covariance estimation method to use. See the MNE-Python documentation
+of `mne.compute_covariance` for details.
+"""
+
source_info_path_update: Optional[dict[str, str]] = dict(suffix="ave")
"""
When computing the forward and inverse solutions, by default the pipeline
diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py
index 56d14a010..634018a13 100755
--- a/mne_bids_pipeline/_main.py
+++ b/mne_bids_pipeline/_main.py
@@ -94,7 +94,11 @@ def main():
help="Enable interactive mode.",
)
parser.add_argument(
- "--debug", dest="debug", action="store_true", help="Enable debugging on error."
+ "--debug",
+ "--pdb",
+ dest="debug",
+ action="store_true",
+ help="Enable debugging on error.",
)
parser.add_argument(
"--no-cache",
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 80f2f1962..4df0be691 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1,4 +1,5 @@
import contextlib
+import re
from functools import lru_cache
from io import StringIO
from types import SimpleNamespace
@@ -472,30 +473,13 @@ def add_event_counts(
logger.info(**gen_log_kwargs(message="Adding event counts to report …"))
if df_events is not None:
- css_classes = ("table", "table-striped", "table-borderless", "table-hover")
+ df_events.reset_index(drop=False, inplace=True, col_level=1)
report.add_html(
- f'\n'
- f"{df_events.to_html(classes=css_classes, border=0)}\n"
- f"
",
+ _df_bootstrap_table(df=df_events, data_id="events"),
title="Event counts",
tags=("events",),
replace=True,
)
- css = (
- ".event-counts {\n"
- " display: -webkit-box;\n"
- " display: -ms-flexbox;\n"
- " display: -webkit-flex;\n"
- " display: flex;\n"
- " justify-content: center;\n"
- " text-align: center;\n"
- "}\n\n"
- "th, td {\n"
- " text-align: center;\n"
- "}\n"
- )
- if css not in report.include:
- report.add_custom_css(css=css)
def _finalize(
@@ -906,3 +890,65 @@ def _render_bem(
replace=True,
n_jobs=1, # prevent automatic parallelization
)
+
+
+# Copied from mne/report/report.py
+
+try:
+ from mne.report.report import _df_bootstrap_table
+except ImportError: # MNE < 1.7
+
+ def _df_bootstrap_table(*, df, data_id):
+ html = df.to_html(
+ border=0,
+ index=False,
+ show_dimensions=True,
+ justify="unset",
+ float_format=lambda x: f"{x:.3f}",
+ classes="table table-hover table-striped table-sm table-responsive small",
+ na_rep="",
+ )
+ htmls = html.split("\n")
+ header_pattern = "(.*) | "
+
+ for idx, html in enumerate(htmls):
+ if "'
+ )
+ continue
+
+ col_headers = re.findall(pattern=header_pattern, string=html)
+ if col_headers:
+ # Make columns sortable
+ assert len(col_headers) == 1
+ col_header = col_headers[0]
+ htmls[idx] = html.replace(
+ "",
+ f' | ',
+ )
+
+ html = "\n".join(htmls)
+ return html
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 5a210d45f..1fb467e0a 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -116,7 +116,7 @@ def compute_cov_from_epochs(
epochs,
tmin=tmin,
tmax=tmax,
- method="shrunk",
+ method=cfg.noise_cov_method,
rank="info",
verbose="error", # TODO: not baseline corrected, maybe problematic?
)
@@ -292,6 +292,7 @@ def get_config(
conditions=config.conditions,
contrasts=config.contrasts,
analyze_channels=config.analyze_channels,
+ noise_cov_method=config.noise_cov_method,
**_bids_kwargs(config=config),
)
return cfg
From 3c9a22c009a54cef5fff7be62586fd7b1c62dc4e Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 19 Feb 2024 15:02:14 -0500
Subject: [PATCH 067/132] BUG: Fix bug with split epochs and ICA (#855)
---
.circleci/config.yml | 2 +-
docs/source/v1.6.md.inc | 19 ++++++++++---------
.../steps/preprocessing/_08a_apply_ica.py | 2 +-
mne_bids_pipeline/tests/conftest.py | 2 ++
mne_bids_pipeline/tests/test_run.py | 8 +++++++-
5 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index ceb51dfbf..8a1e87683 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -50,7 +50,7 @@ jobs:
pip install --upgrade --progress-bar off pip
pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
- pip install "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1"
+ pip install "PyQt6!=6.6.1,!=6.6.2" "PyQt6-Qt6!=6.6.1,!=6.6.2"
- run:
name: Check Qt
command: |
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index b66b0eb17..3abc9a081 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -20,15 +20,16 @@
### :bug: Bug fixes
-- Fix minor issues with path handling for cross-talk and calibration files (#834 by @larsoner)
-- Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner)
-- Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner)
-- Fix bug where `--no-cache` had no effect (#839 by @larsoner)
-- Fix bug where the Maxwell filtering step would fail if [`find_noisy_channels_meg = False`][mne_bids_pipeline._config.find_noisy_channels_meg]` was used (#847 by @larsoner)
-- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner)
-- Fix bug where multiple channel types (e.g., MEG and EEG) were not handled correctly in decoding (#853 by @larsoner)
-- Fix bug where the previous default for [`ica_n_components`][mne_bids_pipeline._config.ica_n_components] of `0.8` was too conservative, changed the default to `None` to match MNE-Python (#853 by @larsoner)
-- Fix bug where the events table for the average subject could overflow in reports (#854 by @larsoner)
+- Fixed minor issues with path handling for cross-talk and calibration files (#834 by @larsoner)
+- Fixed EEG `reject` use for `ch_types = ["meg", "eeg"]` in epoch cleaning (#839 by @larsoner)
+- Fixed bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner)
+- Fixed `--no-cache` behavior having no effect (#839 by @larsoner)
+- Fixed Maxwell filtering failures when [`find_noisy_channels_meg = False`][mne_bids_pipeline._config.find_noisy_channels_meg]` is used (#847 by @larsoner)
+- Fixed raw, empty-room, and custom noise covariances calculation, previously they could errantly be calculated on data without ICA or SSP applied (#840 by @larsoner)
+- Fixed multiple channel type handling (e.g., MEG and EEG) in decoding (#853 by @larsoner)
+- Changed the default for [`ica_n_components`][mne_bids_pipeline._config.ica_n_components] from `0.8` (too conservative) to `None` to match MNE-Python's default (#853 by @larsoner)
+- Prevent events table for the average subject overflowing in reports (#854 by @larsoner)
+- Fixed split file behavior for Epochs when using ICA (#855 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index e53a4758f..8fcc8141c 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -113,7 +113,7 @@ def apply_ica_epochs(
) -> dict:
bids_basename = in_files["ica"].copy().update(processing=None)
out_files = dict()
- out_files["epochs"] = in_files["epochs"].copy().update(processing="ica")
+ out_files["epochs"] = in_files["epochs"].copy().update(processing="ica", split=None)
out_files["report"] = bids_basename.copy().update(
processing="ica", suffix="report", extension=".html"
)
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index bd3a1f485..64571c4a2 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -58,6 +58,8 @@ def pytest_configure(config):
always:constrained_layout not applied.*:UserWarning
ignore:datetime\.datetime\.utcfromtimestamp.*:DeprecationWarning
ignore:datetime\.datetime\.utcnow.*:DeprecationWarning
+ # pandas with no good workaround
+ ignore:The behavior of DataFrame concatenation with empty.*:FutureWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 2e068ef70..041fef894 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -60,7 +60,13 @@ class _TestOptionsT(TypedDict, total=False):
_n_jobs = {"preprocessing/_05_make_epochs": 1}
""",
},
- "ds000248_ica": {},
+ "ds000248_ica": {
+ "extra_config": """
+_raw_split_size = "60MB"
+_epochs_split_size = "30MB"
+_n_jobs = {}
+"""
+ },
"ds000248_T1_BEM": {
"steps": ("source/make_bem_surfaces",),
"requires": ("freesurfer",),
From 4e6733d7115305395e978e53f0ea3a54d805b784 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 19 Feb 2024 19:20:39 -0500
Subject: [PATCH 068/132] [pre-commit.ci] pre-commit autoupdate (#856)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e579e4d82..ae9404554 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.2.1
+ rev: v0.2.2
hooks:
- id: ruff
args: ["--fix"]
@@ -18,7 +18,7 @@ repos:
additional_dependencies:
- tomli
- repo: https://github.com/adrienverge/yamllint.git
- rev: v1.34.0
+ rev: v1.35.1
hooks:
- id: yamllint
args: [--strict]
From 61b35530b16a61753b25fa663b575d9b2f7a6ed5 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Mon, 26 Feb 2024 09:22:56 -0500
Subject: [PATCH 069/132] DOC: Automatic flowchart (#860)
---
docs/source/.gitignore | 1 +
docs/source/features/gen_steps.py | 145 +++++++++++++++++-
docs/source/features/overview.md | 53 -------
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_run.py | 7 +
.../steps/freesurfer/_02_coreg_surfaces.py | 5 +-
.../steps/preprocessing/_03_maxfilter.py | 1 +
.../preprocessing/_05_regress_artifact.py | 5 +-
.../steps/preprocessing/_06a_run_ica.py | 9 +-
.../steps/preprocessing/_06b_run_ssp.py | 3 +-
.../steps/preprocessing/_07_make_epochs.py | 5 +-
.../steps/preprocessing/_08a_apply_ica.py | 5 +-
.../steps/preprocessing/_08b_apply_ssp.py | 5 +-
.../steps/preprocessing/_09_ptp_reject.py | 4 +-
.../steps/source/_01_make_bem_surfaces.py | 5 +-
.../steps/source/_02_make_bem_solution.py | 5 +-
.../steps/source/_03_setup_source_space.py | 5 +-
17 files changed, 182 insertions(+), 82 deletions(-)
delete mode 100644 docs/source/features/overview.md
diff --git a/docs/source/.gitignore b/docs/source/.gitignore
index 77afb012b..ce1332a62 100644
--- a/docs/source/.gitignore
+++ b/docs/source/.gitignore
@@ -1 +1,2 @@
features/steps.md
+features/overview.md
diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py
index 86ea6283f..2b3cc3bd7 100755
--- a/docs/source/features/gen_steps.py
+++ b/docs/source/features/gen_steps.py
@@ -6,20 +6,92 @@
from mne_bids_pipeline._config_utils import _get_step_modules
-pre = """\
-# Detailed lis of processing steps
+autogen_header = f"""\
+[//]: # (AUTO-GENERATED, TO CHANGE EDIT {'/'.join(Path(__file__).parts[-4:])})
+"""
+
+steps_pre = f"""\
+{autogen_header}
+
+# Detailed list of processing steps
The following table provides a concise summary of each processing step. The
step names can be used to run individual steps or entire groups of steps by
passing their name(s) to `mne_bids_pipeline` via the `steps=...` argument.
+""" # noqa: E501
+
+overview_pre = f"""\
+{autogen_header}
+
+MNE-BIDS-Pipeline processes your data in a sequential manner, i.e., one step
+at a time. The next step is only run after the previous steps have been
+successfully completed. There are, of course, exceptions; for example, if you
+chose not to apply ICA, the respective steps will simply be omitted and we'll
+directly move to the subsequent steps. The following flow chart aims to give
+you a brief overview of which steps are included in the pipeline, in which
+order they are run, and how we group them together.
+
+!!! info
+ All intermediate results are saved to disk for later
+ inspection, and an **extensive report** is generated.
+
+!!! info
+ Analyses are conducted on individual (per-subject) as well as group level.
"""
+icon_map = {
+ "Filesystem initialization and dataset inspection": ":open_file_folder:",
+ "Preprocessing": ":broom:",
+ "Sensor-space analysis": ":satellite:",
+ "Source-space analysis": ":brain:",
+ "FreeSurfer-related processing": ":person_surfing:",
+}
+out_dir = Path(__file__).parent
+
print("Generating steps …")
step_modules = _get_step_modules()
+char_start = ord("A")
+
+# In principle we could try to sort this out based on naming, but for now let's just
+# set our hierarchy manually and update it when we move files around since that's easy
+# (and rare) enough to do.
+manual_order = {
+ "Preprocessing": (
+ ("01", "02"),
+ ("02", "03"),
+ ("03", "04"),
+ ("04", "05"),
+ ("05", "06a"),
+ ("05", "06b"),
+ ("05", "07"),
+ # technically we could have the raw data flow here, but it doesn't really help
+ # ("05", "08a"),
+ # ("05", "08b"),
+ ("06a", "08a"),
+ ("07", "08a"),
+ # Force the artifact-fitting and epoching steps on the same level, in this order
+ """\
+ subgraph Z[" "]
+ direction LR
+ B06a
+ B07
+ B06b
+ end
+ style Z fill:#0000,stroke-width:0px
+""",
+ ("06b", "08b"),
+ ("07", "08b"),
+ ("08a", "09"),
+ ("08b", "09"),
+ ),
+}
# Construct the lines of steps.md
-lines = [pre]
+lines = [steps_pre]
+overview_lines = [overview_pre]
+used_titles = set()
for di, (dir_, modules) in enumerate(step_modules.items(), 1):
+ # Steps
if dir_ == "all":
continue # this is an alias
dir_module = importlib.import_module(f"mne_bids_pipeline.steps.{dir_}")
@@ -29,7 +101,9 @@
dir_body = dir_body[1].strip()
else:
dir_body = ""
- lines.append(f"## {di}. {dir_header}\n")
+ icon = icon_map[dir_header]
+ module_header = f"{di}. {icon} {dir_header}"
+ lines.append(f"## {module_header}\n")
if dir_body:
lines.append(f"{dir_body}\n")
lines.append("| Step name | Description |")
@@ -42,5 +116,64 @@
step_title = module.__doc__.split("\n")[0]
lines.append(f"`{step_name}` | {step_title} |")
lines.append("")
-with open(Path(__file__).parent / "steps.md", "w") as fid:
- fid.write("\n".join(lines))
+
+ # Overview
+ overview_lines.append(
+ f"""\
+## {module_header}
+
+```mermaid
+flowchart TD"""
+ )
+ chr_pre = chr(char_start + di - 1) # A, B, C, ...
+ start = None
+ prev_idx = None
+ title_map = {}
+ for mi, module in enumerate(modules, 1):
+ step_title = module.__doc__.split("\n")[0].rstrip(".")
+ idx = module.__name__.split(".")[-1].split("_")[1] # 01, 05a, etc.
+ # Need to quote the title to deal with parens, and sanitize quotes
+ step_title = step_title.replace('"', "'")
+ assert step_title not in used_titles, f"Redundant title: {step_title}"
+ used_titles.add(step_title)
+ this_block = f'{chr_pre}{idx}["{step_title}"]'
+ # special case: manual order
+ title_map[idx] = step_title
+ if dir_header in manual_order:
+ continue
+ if mi == 1:
+ start = this_block
+ assert prev_idx is None
+ continue
+ if start is not None:
+ assert mi == 2, mi
+ overview_lines.append(f" {start} --> {this_block}")
+ start = None
+ else:
+ overview_lines.append(f" {chr_pre}{prev_idx} --> {this_block}")
+ prev_idx = idx
+ if dir_header in manual_order:
+ mapped = set()
+ for a_b in manual_order[dir_header]:
+ if isinstance(a_b, str): # insert directly
+ overview_lines.append(a_b)
+ continue
+ assert isinstance(a_b, tuple), type(a_b)
+ a_b = list(a_b) # allow modification
+ for ii, idx in enumerate(a_b):
+ assert idx in title_map, (dir_header, sorted(title_map))
+ if idx not in mapped:
+ mapped.add(idx)
+ a_b[ii] = f'{idx}["{title_map[idx]}"]'
+ overview_lines.append(f" {chr_pre}{a_b[0]} --> {chr_pre}{a_b[1]}")
+ all_steps = set(
+ sum(
+ [a_b for a_b in manual_order[dir_header] if not isinstance(a_b, str)],
+ (),
+ )
+ )
+ assert mapped == all_steps, all_steps.symmetric_difference(mapped)
+ overview_lines.append("```\n")
+
+(out_dir / "steps.md").write_text("\n".join(lines), encoding="utf8")
+(out_dir / "overview.md").write_text("\n".join(overview_lines), encoding="utf8")
diff --git a/docs/source/features/overview.md b/docs/source/features/overview.md
deleted file mode 100644
index 9fe044038..000000000
--- a/docs/source/features/overview.md
+++ /dev/null
@@ -1,53 +0,0 @@
-MNE-BIDS-Pipeline processes your data in a sequential manner, i.e., one step
-at a time. The next step is only run after the previous steps have been
-successfully completed. There are, of course, exceptions; for example, if you
-chose not to apply ICA, the respective steps will simply be omitted and we'll
-directly move to the subsequent steps. The following flow chart aims to give
-you a brief overview of which steps are included in the pipeline, in which
-order they are run, and how we group them together.
-
-!!! info
- All intermediate results are saved to disk for later
- inspection, and an **extensive report** is generated.
-
-!!! info
- Analyses are conducted on individual (per-subject) as well as group level.
-
-
-## :open_file_folder: Filesystem initialization and dataset inspection
-```mermaid
-flowchart TD
- A1[initialize the target directories] --> A2[locate empty-room recordings]
-```
-
-## :broom: Preprocessing
-```mermaid
- flowchart TD
- B1[Noisy & flat channel detection] --> B2[Maxwell filter]
- B2 --> B3[Frequency filter]
- B3 --> B4[Epoch creation]
- B4 --> B5[SSP or ICA fitting]
- B5 --> B6[Artifact removal via SSP or ICA]
- B6 --> B7[Amplitude-based epoch rejection]
-```
-
-## :satellite: Sensor-space processing
-```mermaid
- flowchart TD
- C1[ERP / ERF calculation] --> C2[MVPA: full epochs]
- C2 --> C3[MVPA: time-by-time decoding]
- C3 --> C4[Time-frequency decomposition]
- C4 --> C5[MVPA: CSP]
- C5 --> C6[Noise covariance estimation]
- C6 --> C7[Grand average]
-```
-
-## :brain: Source-space processing
-```mermaid
- flowchart TD
- D1[BEM surface creation] --> D2[BEM solution]
- D2 --> D3[Source space creation]
- D3 --> D4[Forward model creation]
- D4 --> D5[Inverse solution]
- D5 --> D6[Grand average]
-```
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 3abc9a081..aba57baea 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -36,3 +36,4 @@
- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
- Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner)
- Code caching is now tested using GitHub Actions (#836 by @larsoner)
+- Steps in the documentation are now automatically parsed into flowcharts (#859 by @larsoner)
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 04deef839..748fd83ca 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -379,11 +379,18 @@ def _prep_out_files(
exec_params: SimpleNamespace,
out_files: dict[str, BIDSPath],
check_relative: Optional[pathlib.Path] = None,
+ bids_only: bool = True,
):
if check_relative is None:
check_relative = exec_params.deriv_root
for key, fname in out_files.items():
# Sanity check that we only ever write to the derivatives directory
+ if bids_only:
+ assert isinstance(fname, BIDSPath), (type(fname), fname)
+ # raw and epochs can split on write, and .save should check for us now, so
+ # we only need to check *other* types (these should never split)
+ if isinstance(fname, BIDSPath) and fname.suffix not in ("raw", "epo"):
+ assert fname.split is None, fname
fname = pathlib.Path(fname)
if not fname.is_relative_to(check_relative):
raise RuntimeError(
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index b2e2f8090..f7613fa68 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -73,7 +73,10 @@ def make_coreg_surfaces(
)
out_files = get_output_fnames_coreg_surfaces(cfg=cfg, subject=subject)
return _prep_out_files(
- exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ exec_params=exec_params,
+ out_files=out_files,
+ check_relative=cfg.fs_subjects_dir,
+ bids_only=False,
)
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index 5e5e30318..00ce5ad0a 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -311,6 +311,7 @@ def run_maxwell_filter(
extension=".fif",
root=cfg.deriv_root,
check=False,
+ split=None,
)
bids_path_out = bids_path_in.copy().update(**bids_path_out_kwargs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
index cb31df04d..9fce737cc 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -1,7 +1,4 @@
-"""Run Signal Subspace Projections (SSP) for artifact correction.
-
-These are often also referred to as PCA vectors.
-"""
+"""Temporal regression for artifact removal."""
from types import SimpleNamespace
from typing import Optional
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
index 7bfef3c56..fb6f1b089 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
@@ -1,14 +1,13 @@
-"""Run Independent Component Analysis (ICA) for artifact correction.
+"""Fit ICA.
-This fits ICA on epoched data filtered with 1 Hz highpass,
-for this purpose only using fastICA. Separate ICAs are fitted and stored for
-MEG and EEG data.
+This fits Independent Component Analysis (ICA) on raw data filtered with 1 Hz highpass,
+temporarily creating task-related epochs.
Before performing ICA, we reject epochs based on peak-to-peak amplitude above
the 'ica_reject' to filter massive non-biological artifacts.
To actually remove designated ICA components from your data, you will have to
-run 05a-apply_ica.py.
+run the apply_ica step.
"""
from collections.abc import Iterable
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 7ec75ef91..1580836ca 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -1,5 +1,6 @@
-"""Run Signal Subspace Projections (SSP) for artifact correction.
+"""Compute SSP.
+Signal subspace projections (SSP) vectors are computed from EOG and ECG signals.
These are often also referred to as PCA vectors.
"""
diff --git a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
index 42bf721df..e32fbd0ed 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
@@ -214,7 +214,10 @@ def run_epochs(
logger.info(**gen_log_kwargs(message=msg))
out_files = dict()
out_files["epochs"] = bids_path_in.copy().update(
- suffix="epo", processing=None, check=False
+ suffix="epo",
+ processing=None,
+ check=False,
+ split=None,
)
epochs.save(
out_files["epochs"],
diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index 8fcc8141c..aecab29e4 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -1,4 +1,4 @@
-"""Apply ICA and obtain the cleaned epochs and raw data.
+"""Apply ICA.
Blinks and ECG artifacts are automatically detected and the corresponding ICA
components are removed from the data.
@@ -8,7 +8,6 @@
make sure you did not re-run the ICA in the meantime. Otherwise (especially if
the random state was not set, or you used a different machine, the component
order might differ).
-
"""
from types import SimpleNamespace
@@ -225,7 +224,7 @@ def apply_ica_raw(
raw_fname = in_files.pop(in_key)
assert len(in_files) == 0, in_files
out_files = dict()
- out_files[in_key] = raw_fname.copy().update(processing="clean")
+ out_files[in_key] = raw_fname.copy().update(processing="clean", split=None)
msg = f"Writing {out_files[in_key].basename} …"
logger.info(**gen_log_kwargs(message=msg))
raw = mne.io.read_raw_fif(raw_fname, preload=True)
diff --git a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
index e6fad4b8f..9a0026a78 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
@@ -1,8 +1,7 @@
-"""Apply SSP projections and obtain the cleaned epochs and raw data.
+"""Apply SSP.
Blinks and ECG artifacts are automatically detected and the corresponding SSP
projections components are removed from the data.
-
"""
from types import SimpleNamespace
@@ -114,7 +113,7 @@ def apply_ssp_raw(
raw = mne.io.read_raw_fif(raw_fname)
raw.add_proj(projs)
out_files = dict()
- out_files[in_key] = raw_fname.copy().update(processing="clean")
+ out_files[in_key] = raw_fname.copy().update(processing="clean", split=None)
msg = f"Writing {out_files[in_key].basename} …"
logger.info(**gen_log_kwargs(message=msg))
raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size)
diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
index d08469b3c..434b235ec 100644
--- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
@@ -1,6 +1,6 @@
-"""Remove epochs based on peak-to-peak (PTP) amplitudes.
+"""Remove epochs based on PTP amplitudes.
-Epochs containing peak-to-peak above the thresholds defined
+Epochs containing peak-to-peak (PTP) above the thresholds defined
in the 'reject' parameter are removed from the data.
This step will drop epochs containing non-biological artifacts
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index f77593107..22c67c235 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -113,7 +113,10 @@ def make_bem_surfaces(
session=session,
)
return _prep_out_files(
- exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ exec_params=exec_params,
+ out_files=out_files,
+ check_relative=cfg.fs_subjects_dir,
+ bids_only=False,
)
diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
index 33f7b870c..1320d6dc7 100644
--- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
+++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
@@ -70,7 +70,10 @@ def make_bem_solution(
mne.write_bem_surfaces(out_files["model"], bem_model, overwrite=True)
mne.write_bem_solution(out_files["sol"], bem_sol, overwrite=True)
return _prep_out_files(
- exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ exec_params=exec_params,
+ out_files=out_files,
+ check_relative=cfg.fs_subjects_dir,
+ bids_only=False,
)
diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
index 52c342dbf..bcd4bef34 100644
--- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py
+++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py
@@ -56,7 +56,10 @@ def run_setup_source_space(
out_files = get_output_fnames_setup_source_space(cfg=cfg, subject=subject)
mne.write_source_spaces(out_files["src"], src, overwrite=True)
return _prep_out_files(
- exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir
+ exec_params=exec_params,
+ out_files=out_files,
+ check_relative=cfg.fs_subjects_dir,
+ bids_only=False,
)
From e663d93a687732f6b6d841e6cc58ece4b409c32f Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 28 Feb 2024 09:41:23 -0500
Subject: [PATCH 070/132] DOC: Automatically document settings (#863)
---
docs/build-docs.sh | 3 +
docs/mkdocs.yml | 1 +
docs/source/.gitignore | 2 +
docs/source/settings/gen_settings.py | 182 ++++++
docs/source/settings/general.md | 48 --
.../settings/preprocessing/artifacts.md | 20 -
.../source/settings/preprocessing/autobads.md | 27 -
docs/source/settings/preprocessing/breaks.md | 15 -
docs/source/settings/preprocessing/epochs.md | 26 -
docs/source/settings/preprocessing/filter.md | 37 --
.../settings/preprocessing/maxfilter.md | 29 -
.../source/settings/preprocessing/resample.md | 21 -
docs/source/settings/preprocessing/ssp_ica.md | 33 -
.../settings/preprocessing/stim_artifact.md | 19 -
.../settings/reports/report_generation.md | 10 -
docs/source/settings/sensor/contrasts.md | 11 -
docs/source/settings/sensor/group_level.md | 10 -
docs/source/settings/sensor/mvpa.md | 27 -
docs/source/settings/sensor/time_freq.md | 18 -
docs/source/settings/source/bem.md | 16 -
docs/source/settings/source/forward.md | 14 -
docs/source/settings/source/general.md | 9 -
docs/source/settings/source/inverse.md | 15 -
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_config.py | 572 +++++++++---------
mne_bids_pipeline/tests/test_documented.py | 34 +-
26 files changed, 511 insertions(+), 689 deletions(-)
create mode 100755 docs/source/settings/gen_settings.py
delete mode 100644 docs/source/settings/general.md
delete mode 100644 docs/source/settings/preprocessing/artifacts.md
delete mode 100644 docs/source/settings/preprocessing/autobads.md
delete mode 100644 docs/source/settings/preprocessing/breaks.md
delete mode 100644 docs/source/settings/preprocessing/epochs.md
delete mode 100644 docs/source/settings/preprocessing/filter.md
delete mode 100644 docs/source/settings/preprocessing/maxfilter.md
delete mode 100644 docs/source/settings/preprocessing/resample.md
delete mode 100644 docs/source/settings/preprocessing/ssp_ica.md
delete mode 100644 docs/source/settings/preprocessing/stim_artifact.md
delete mode 100644 docs/source/settings/reports/report_generation.md
delete mode 100644 docs/source/settings/sensor/contrasts.md
delete mode 100644 docs/source/settings/sensor/group_level.md
delete mode 100644 docs/source/settings/sensor/mvpa.md
delete mode 100644 docs/source/settings/sensor/time_freq.md
delete mode 100644 docs/source/settings/source/bem.md
delete mode 100644 docs/source/settings/source/forward.md
delete mode 100644 docs/source/settings/source/general.md
delete mode 100644 docs/source/settings/source/inverse.md
diff --git a/docs/build-docs.sh b/docs/build-docs.sh
index ccb159aae..4a94ab206 100755
--- a/docs/build-docs.sh
+++ b/docs/build-docs.sh
@@ -10,6 +10,9 @@ python $STEP_DIR/source/examples/gen_examples.py
echo "Generating pipeline table …"
python $STEP_DIR/source/features/gen_steps.py
+echo "Generating config docs …"
+python $STEP_DIR/source/settings/gen_settings.py
+
echo "Building the documentation …"
cd $STEP_DIR
mkdocs build
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 29107ff32..ab4e493e4 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -103,6 +103,7 @@ nav:
- Source space & forward solution: settings/source/forward.md
- Inverse solution: settings/source/inverse.md
- Report generation: settings/reports/report_generation.md
+ - Execution: settings/execution.md
- Examples:
- Examples Gallery: examples/examples.md
- examples/ds003392.md
diff --git a/docs/source/.gitignore b/docs/source/.gitignore
index ce1332a62..b909cc5d3 100644
--- a/docs/source/.gitignore
+++ b/docs/source/.gitignore
@@ -1,2 +1,4 @@
features/steps.md
features/overview.md
+settings/**/
+settings/*.md
diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py
new file mode 100755
index 000000000..6f7eaf7d3
--- /dev/null
+++ b/docs/source/settings/gen_settings.py
@@ -0,0 +1,182 @@
+"""Generate settings .md files."""
+
+# Any changes to the overall structure need to be reflected in mkdocs.yml nav section.
+
+import re
+from pathlib import Path
+
+from tqdm import tqdm
+
+import mne_bids_pipeline._config
+
+config_path = Path(mne_bids_pipeline._config.__file__)
+settings_dir = Path(__file__).parent
+
+# Mapping between first two lower-case words in the section name and the desired
+# file or folder name
+section_to_file = { # .md will be added to the files
+ # root file
+ "general settings": "general",
+ # folder
+ "preprocessing": "preprocessing",
+ "break detection": "breaks",
+ "bad channel": "autobads",
+ "maxwell filter": "maxfilter",
+ "filtering": "filter",
+ "resampling": "resample",
+ "epoching": "epochs",
+ "filtering &": None, # just a header
+ "artifact removal": None,
+ "stimulation artifact": "stim_artifact",
+ "ssp, ica,": "ssp_ica",
+ "amplitude-based artifact": "artifacts",
+ # folder
+ "sensor-level analysis": "sensor",
+ "condition contrasts": "contrasts",
+ "decoding /": "mvpa",
+ "time-frequency analysis": "time_freq",
+ "group-level analysis": "group_level",
+ # folder
+ "source-level analysis": "source",
+ "general source": "general",
+ "bem surface": "bem",
+ "source space": "forward",
+ "inverse solution": "inverse",
+ # folder
+ "reports": "reports",
+ "report generation": "report_generation",
+ # root file
+ "execution": "execution",
+}
+# TODO: Make sure these are consistent, autogenerate some based on section names,
+# and/or autogenerate based on inputs/outputs of actual functions.
+section_tags = {
+ "general settings": (),
+ "preprocessing": (),
+ "filtering &": (),
+ "artifact removal": (),
+ "break detection": ("preprocessing", "artifact-removal", "raw", "events"),
+ "bad channel": ("preprocessing", "raw", "bad-channels"),
+ "maxwell filter": ("preprocessing", "maxwell-filter", "raw"),
+ "filtering": ("preprocessing", "frequency-filter", "raw"),
+ "resampling": ("preprocessing", "resampling", "decimation", "raw", "epochs"),
+ "epoching": ("preprocessing", "epochs", "events", "metadata", "resting-state"),
+ "stimulation artifact": ("preprocessing", "artifact-removal", "raw", "epochs"),
+ "ssp, ica,": ("preprocessing", "artifact-removal", "raw", "epochs", "ssp", "ica"),
+ "amplitude-based artifact": ("preprocessing", "artifact-removal", "epochs"),
+ "sensor-level analysis": (),
+ "condition contrasts": ("epochs", "evoked", "contrast"),
+ "decoding /": ("epochs", "evoked", "contrast", "decoding", "mvpa"),
+ "time-frequency analysis": ("epochs", "evoked", "time-frequency"),
+ "group-level analysis": ("evoked", "group-level"),
+ "source-level analysis": (),
+ "general source": ("inverse-solution",),
+ "bem surface": ("inverse-solution", "bem", "freesurfer"),
+ "source space": ("inverse-solution", "forward-model"),
+ "inverse solution": ("inverse-solution",),
+ "reports": (),
+ "report generation": ("report",),
+ "execution": (),
+}
+
+option_header = """\
+::: mne_bids_pipeline._config
+ options:
+ members:"""
+prefix = """\
+ - """
+
+# We cannot use ast for this because it doesn't preserve comments. We could use
+# something like redbaron, but our code is hopefully simple enough!
+assign_re = re.compile(
+ # Line starts with annotation syntax (name captured by the first group).
+ r"^(\w+): "
+ # Then the annotation can be ...
+ "("
+ # ... a standard assignment ...
+ ".+ = .+"
+ # ... or ...
+ "|"
+ # ... the start of a multiline type annotation like "a: Union["
+ r"(Union|Optional|Literal)\["
+ # To the end of the line.
+ ")$",
+ re.MULTILINE,
+)
+
+
+def main():
+ print(f"Parsing {config_path} to generate settings .md files.")
+ # max file-level depth is 2 even though we have 3 subsection levels
+ levels = [None, None]
+ current_path, current_lines = None, list()
+ text = config_path.read_text("utf-8")
+ lines = text.splitlines()
+ lines += ["# #"] # add a dummy line to trigger the last write
+ in_header = False
+ have_params = False
+ for li, line in enumerate(tqdm(lines)):
+ line = line.rstrip()
+ if line.startswith("# #"): # a new (sub)section / file
+ this_def = line[2:]
+ this_level = this_def.split()[0]
+ assert this_level.count("#") == len(this_level), this_level
+ this_level = this_level.count("#") - 1
+ if this_level == 2:
+ # flatten preprocessing/filtering/filter to preprocessing/filter
+ # for example
+ this_level = 1
+ assert this_level in (0, 1), (this_level, this_def)
+ this_def = this_def[this_level + 2 :]
+ levels[this_level] = this_def
+ # Write current lines and reset
+ if have_params: # more than just the header
+ assert current_path is not None, levels
+ if current_lines[0] == "": # this happens with tags
+ current_lines = current_lines[1:]
+ current_path.write_text("\n".join(current_lines + [""]), "utf-8")
+ have_params = False
+ if this_level == 0:
+ this_root = settings_dir
+ else:
+ this_root = settings_dir / f"{section_to_file[levels[0].lower()]}"
+ this_root.mkdir(exist_ok=True)
+ key = " ".join(this_def.split()[:2]).lower()
+ if key == "":
+ assert li == len(lines) - 1, (li, line)
+ continue # our dummy line
+ fname = section_to_file[key]
+ if fname is None:
+ current_path = None
+ else:
+ current_path = this_root / f"{fname}.md"
+ in_header = True
+ current_lines = list()
+ if len(section_tags[key]):
+ current_lines += ["---", "tags:"]
+ current_lines += [f" - {tag}" for tag in section_tags[key]]
+ current_lines += ["---"]
+ continue
+
+ if in_header:
+ if line == "":
+ in_header = False
+ if current_lines:
+ current_lines.append("")
+ current_lines.append(option_header)
+ else:
+ assert line == "#" or line.startswith("# "), (li, line) # a comment
+ current_lines.append(line[2:])
+ continue
+
+ # Could be an option
+ match = assign_re.match(line)
+ if match is not None:
+ have_params = True
+ name, typ, desc = match.groups()
+ current_lines.append(f"{prefix}{name}")
+ continue
+
+
+if __name__ == "__main__":
+ main()
diff --git a/docs/source/settings/general.md b/docs/source/settings/general.md
deleted file mode 100644
index 2640f5f2b..000000000
--- a/docs/source/settings/general.md
+++ /dev/null
@@ -1,48 +0,0 @@
-::: mne_bids_pipeline._config
- options:
- members:
- - study_name
- - bids_root
- - deriv_root
- - subjects_dir
- - interactive
- - sessions
- - task
- - task_is_rest
- - runs
- - exclude_runs
- - crop_runs
- - acq
- - proc
- - rec
- - space
- - subjects
- - exclude_subjects
- - process_empty_room
- - process_rest
- - ch_types
- - data_type
- - eog_channels
- - eeg_bipolar_channels
- - eeg_reference
- - eeg_template_montage
- - drop_channels
- - reader_extra_params
- - read_raw_bids_verbose
- - analyze_channels
- - plot_psd_for_runs
- - n_jobs
- - parallel_backend
- - dask_open_dashboard
- - dask_temp_dir
- - dask_worker_memory_limit
- - random_state
- - shortest_event
- - memory_location
- - memory_subdir
- - memory_file_method
- - memory_verbose
- - config_validation
- - log_level
- - mne_log_level
- - on_error
diff --git a/docs/source/settings/preprocessing/artifacts.md b/docs/source/settings/preprocessing/artifacts.md
deleted file mode 100644
index 88407cd2c..000000000
--- a/docs/source/settings/preprocessing/artifacts.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-tags:
- - preprocessing
- - artifact-removal
- - epochs
----
-
-???+ info "Good Practice / Advice"
- Have a look at your raw data and train yourself to detect a blink, a heart
- beat and an eye movement.
- You can do a quick average of blink data and check what the amplitude looks
- like.
-
-::: mne_bids_pipeline._config
- options:
- members:
- - reject
- - reject_tmin
- - reject_tmax
- - autoreject_n_interpolate
diff --git a/docs/source/settings/preprocessing/autobads.md b/docs/source/settings/preprocessing/autobads.md
deleted file mode 100644
index a118917a1..000000000
--- a/docs/source/settings/preprocessing/autobads.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-tags:
- - preprocessing
- - raw
- - bad-channels
----
-
-!!! warning
- This functionality will soon be removed from the pipeline, and
- will be integrated into MNE-BIDS.
-
-"Bad", i.e. flat and overly noisy channels, can be automatically detected
-using a procedure inspired by the commercial MaxFilter by Elekta. First,
-a copy of the data is low-pass filtered at 40 Hz. Then, channels with
-unusually low variability are flagged as "flat", while channels with
-excessively high variability are flagged as "noisy". Flat and noisy channels
-are marked as "bad" and excluded from subsequent analysis. See
-:func:`mne.preprocssessing.find_bad_channels_maxwell` for more information
-on this procedure. The list of bad channels detected through this procedure
-will be merged with the list of bad channels already present in the dataset,
-if any.
-
-::: mne_bids_pipeline._config
- options:
- members:
- - find_flat_channels_meg
- - find_noisy_channels_meg
diff --git a/docs/source/settings/preprocessing/breaks.md b/docs/source/settings/preprocessing/breaks.md
deleted file mode 100644
index 01e3159eb..000000000
--- a/docs/source/settings/preprocessing/breaks.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-tags:
- - preprocessing
- - artifact-removal
- - raw
- - events
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - find_breaks
- - min_break_duration
- - t_break_annot_start_after_previous_event
- - t_break_annot_stop_before_next_event
diff --git a/docs/source/settings/preprocessing/epochs.md b/docs/source/settings/preprocessing/epochs.md
deleted file mode 100644
index 02dd1f71d..000000000
--- a/docs/source/settings/preprocessing/epochs.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-tags:
- - preprocessing
- - epochs
- - events
- - metadata
- - resting-state
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - rename_events
- - on_rename_missing_events
- - event_repeated
- - conditions
- - epochs_tmin
- - epochs_tmax
- - baseline
- - epochs_metadata_tmin
- - epochs_metadata_tmax
- - epochs_metadata_keep_first
- - epochs_metadata_keep_last
- - epochs_metadata_query
- - rest_epochs_duration
- - rest_epochs_overlap
diff --git a/docs/source/settings/preprocessing/filter.md b/docs/source/settings/preprocessing/filter.md
deleted file mode 100644
index 9d1301412..000000000
--- a/docs/source/settings/preprocessing/filter.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-tags:
- - preprocessing
- - frequency-filter
- - raw
----
-
-It is typically better to set your filtering properties on the raw data so
-as to avoid what we call border (or edge) effects.
-
-If you use this pipeline for evoked responses, you could consider
-a low-pass filter cut-off of h_freq = 40 Hz
-and possibly a high-pass filter cut-off of l_freq = 1 Hz
-so you would preserve only the power in the 1Hz to 40 Hz band.
-Note that highpass filtering is not necessarily recommended as it can
-distort waveforms of evoked components, or simply wash out any low
-frequency that can may contain brain signal. It can also act as
-a replacement for baseline correction in Epochs. See below.
-
-If you use this pipeline for time-frequency analysis, a default filtering
-could be a high-pass filter cut-off of l_freq = 1 Hz
-a low-pass filter cut-off of h_freq = 120 Hz
-so you would preserve only the power in the 1Hz to 120 Hz band.
-
-If you need more fancy analysis, you are already likely past this kind
-of tips! 😇
-
-::: mne_bids_pipeline._config
- options:
- members:
- - l_freq
- - h_freq
- - l_trans_bandwidth
- - h_trans_bandwidth
- - notch_freq
- - notch_trans_bandwidth
- - notch_widths
diff --git a/docs/source/settings/preprocessing/maxfilter.md b/docs/source/settings/preprocessing/maxfilter.md
deleted file mode 100644
index 3cd32d9d7..000000000
--- a/docs/source/settings/preprocessing/maxfilter.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-tags:
- - preprocessing
- - maxwell-filter
- - raw
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - use_maxwell_filter
- - mf_st_duration
- - mf_st_correlation
- - mf_head_origin
- - mf_destination
- - mf_int_order
- - mf_reference_run
- - mf_cal_fname
- - mf_ctc_fname
- - mf_esss
- - mf_esss_reject
- - mf_mc
- - mf_mc_t_step_min
- - mf_mc_t_window
- - mf_mc_gof_limit
- - mf_mc_dist_limit
- - mf_mc_rotation_velocity_limit
- - mf_mc_translation_velocity_limit
- - mf_filter_chpi
diff --git a/docs/source/settings/preprocessing/resample.md b/docs/source/settings/preprocessing/resample.md
deleted file mode 100644
index 6aa824a6c..000000000
--- a/docs/source/settings/preprocessing/resample.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-tags:
- - preprocessing
- - resampling
- - decimation
- - raw
- - epochs
----
-
-If you have acquired data with a very high sampling frequency (e.g. 2 kHz)
-you will likely want to downsample to lighten up the size of the files you
-are working with (pragmatics)
-If you are interested in typical analysis (up to 120 Hz) you can typically
-resample your data down to 500 Hz without preventing reliable time-frequency
-exploration of your data.
-
-::: mne_bids_pipeline._config
- options:
- members:
- - raw_resample_sfreq
- - epochs_decim
diff --git a/docs/source/settings/preprocessing/ssp_ica.md b/docs/source/settings/preprocessing/ssp_ica.md
deleted file mode 100644
index f25110729..000000000
--- a/docs/source/settings/preprocessing/ssp_ica.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-tags:
- - preprocessing
- - artifact-removal
- - raw
- - epochs
- - ssp
- - ica
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - regress_artifact
- - spatial_filter
- - min_ecg_epochs
- - min_eog_epochs
- - n_proj_eog
- - n_proj_ecg
- - ssp_meg
- - ecg_proj_from_average
- - eog_proj_from_average
- - ssp_reject_eog
- - ssp_reject_ecg
- - ssp_ecg_channel
- - ica_reject
- - ica_algorithm
- - ica_l_freq
- - ica_max_iterations
- - ica_n_components
- - ica_decim
- - ica_ctps_ecg_threshold
- - ica_eog_threshold
diff --git a/docs/source/settings/preprocessing/stim_artifact.md b/docs/source/settings/preprocessing/stim_artifact.md
deleted file mode 100644
index cbc142550..000000000
--- a/docs/source/settings/preprocessing/stim_artifact.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-tags:
- - preprocessing
- - artifact-removal
- - raw
- - epochs
----
-
-When using electric stimulation systems, e.g. for median nerve or index
-stimulation, it is frequent to have a stimulation artifact. This option
-allows to fix it by linear interpolation early in the pipeline on the raw
-data.
-
-::: mne_bids_pipeline._config
- options:
- members:
- - fix_stim_artifact
- - stim_artifact_tmin
- - stim_artifact_tmax
diff --git a/docs/source/settings/reports/report_generation.md b/docs/source/settings/reports/report_generation.md
deleted file mode 100644
index 2ccf520ed..000000000
--- a/docs/source/settings/reports/report_generation.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-tags:
- - report
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - report_evoked_n_time_points
- - report_stc_n_time_points
diff --git a/docs/source/settings/sensor/contrasts.md b/docs/source/settings/sensor/contrasts.md
deleted file mode 100644
index 576e45ee3..000000000
--- a/docs/source/settings/sensor/contrasts.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-tags:
- - epochs
- - evoked
- - contrast
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - contrasts
diff --git a/docs/source/settings/sensor/group_level.md b/docs/source/settings/sensor/group_level.md
deleted file mode 100644
index a330a9703..000000000
--- a/docs/source/settings/sensor/group_level.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-tags:
- - evoked
- - group-level
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - interpolate_bads_grand_average
diff --git a/docs/source/settings/sensor/mvpa.md b/docs/source/settings/sensor/mvpa.md
deleted file mode 100644
index 3a56d22d7..000000000
--- a/docs/source/settings/sensor/mvpa.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-tags:
- - epochs
- - evoked
- - contrast
- - decoding
- - mvpa
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - decode
- - decoding_which_epochs
- - decoding_epochs_tmin
- - decoding_epochs_tmax
- - decoding_metric
- - decoding_n_splits
- - decoding_time_generalization
- - decoding_time_generalization_decim
- - decoding_csp
- - decoding_csp_times
- - decoding_csp_freqs
- - n_boot
- - cluster_forming_t_threshold
- - cluster_n_permutations
- - cluster_permutation_p_threshold
diff --git a/docs/source/settings/sensor/time_freq.md b/docs/source/settings/sensor/time_freq.md
deleted file mode 100644
index 492296dc0..000000000
--- a/docs/source/settings/sensor/time_freq.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-tags:
- - epochs
- - evoked
- - time-frequency
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - time_frequency_conditions
- - time_frequency_freq_min
- - time_frequency_freq_max
- - time_frequency_cycles
- - time_frequency_subtract_evoked
- - time_frequency_baseline
- - time_frequency_baseline_mode
- - time_frequency_crop
diff --git a/docs/source/settings/source/bem.md b/docs/source/settings/source/bem.md
deleted file mode 100644
index f55972baf..000000000
--- a/docs/source/settings/source/bem.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-tags:
- - inverse-solution
- - bem
- - freesurfer
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - use_template_mri
- - adjust_coreg
- - bem_mri_images
- - recreate_bem
- - recreate_scalp_surface
- - freesurfer_verbose
diff --git a/docs/source/settings/source/forward.md b/docs/source/settings/source/forward.md
deleted file mode 100644
index 8ce5c87ad..000000000
--- a/docs/source/settings/source/forward.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-tags:
- - inverse-solution
- - forward-model
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - mri_t1_path_generator
- - mri_landmarks_kind
- - spacing
- - mindist
- - source_info_path_update
diff --git a/docs/source/settings/source/general.md b/docs/source/settings/source/general.md
deleted file mode 100644
index 09eac741f..000000000
--- a/docs/source/settings/source/general.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-tags:
- - inverse-solution
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - run_source_estimation
diff --git a/docs/source/settings/source/inverse.md b/docs/source/settings/source/inverse.md
deleted file mode 100644
index 367275071..000000000
--- a/docs/source/settings/source/inverse.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-tags:
- - inverse-solution
----
-
-::: mne_bids_pipeline._config
- options:
- members:
- - loose
- - depth
- - inverse_method
- - noise_cov
- - noise_cov_method
- - source_info_path_update
- - inverse_targets
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index aba57baea..0c0e204c0 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -37,3 +37,4 @@
- Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner)
- Code caching is now tested using GitHub Actions (#836 by @larsoner)
- Steps in the documentation are now automatically parsed into flowcharts (#859 by @larsoner)
+- New configuration options are now automatically added to the docs (#863 by @larsoner)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index b45753966..06f468cb7 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -13,9 +13,8 @@
PathLike,
)
-###############################################################################
-# Config parameters
-# -----------------
+# %%
+# # General settings
study_name: str = ""
"""
@@ -95,6 +94,11 @@
The task to process.
"""
+task_is_rest: bool = False
+"""
+Whether the task should be treated as resting-state data.
+"""
+
runs: Union[Sequence, Literal["all"]] = "all"
"""
The runs to process. If `'all'`, will process all runs found in the
@@ -144,14 +148,6 @@
The BIDS `space` entity.
"""
-plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all"
-"""
-For which runs to add a power spectral density (PSD) plot to the generated
-report. This can take a considerable amount of time if you have many long
-runs. In this case, specify the runs, or pass an empty list to disable raw PSD
-plotting.
-"""
-
subjects: Union[Sequence[str], Literal["all"]] = "all"
"""
Subjects to analyze. If `'all'`, include all subjects. To only
@@ -426,9 +422,32 @@
`'error'` to suppress warnings emitted by read_raw_bids.
"""
-###############################################################################
-# BREAK DETECTION
-# ---------------
+plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all"
+"""
+For which runs to add a power spectral density (PSD) plot to the generated
+report. This can take a considerable amount of time if you have many long
+runs. In this case, specify the runs, or pass an empty list to disable raw PSD
+plotting.
+"""
+
+random_state: Optional[int] = 42
+"""
+You can specify the seed of the random number generator (RNG).
+This setting is passed to the ICA algorithm and to the decoding function,
+ensuring reproducible results. Set to `None` to avoid setting the RNG
+to a defined state.
+"""
+
+shortest_event: int = 1
+"""
+Minimum number of samples an event must last. If the
+duration is less than this, an exception will be raised.
+"""
+
+# %%
+# # Preprocessing
+
+# ## Break detection
find_breaks: bool = False
"""
@@ -527,10 +546,23 @@
```
"""
-###############################################################################
-# MAXWELL FILTER PARAMETERS
-# -------------------------
-# done in 01-import_and_maxfilter.py
+# %%
+# ## Bad channel detection
+#
+# !!! warning
+# This functionality will soon be removed from the pipeline, and
+# will be integrated into MNE-BIDS.
+#
+# "Bad", i.e. flat and overly noisy channels, can be automatically detected
+# using a procedure inspired by the commercial MaxFilter by Elekta. First,
+# a copy of the data is low-pass filtered at 40 Hz. Then, channels with
+# unusually low variability are flagged as "flat", while channels with
+# excessively high variability are flagged as "noisy". Flat and noisy channels
+# are marked as "bad" and excluded from subsequent analysis. See
+# :func:`mne.preprocssessing.find_bad_channels_maxwell` for more information
+# on this procedure. The list of bad channels detected through this procedure
+# will be merged with the list of bad channels already present in the dataset,
+# if any.
find_flat_channels_meg: bool = False
"""
@@ -543,6 +575,9 @@
Auto-detect "noisy" channels and mark them as bad.
"""
+# %%
+# ## Maxwell filter
+
use_maxwell_filter: bool = False
"""
Whether or not to use Maxwell filtering to preprocess the data.
@@ -738,45 +773,29 @@
Only used when [`use_maxwell_filter=True`][mne_bids_pipeline._config.use_maxwell_filter]
""" # noqa: E501
-###############################################################################
-# STIMULATION ARTIFACT
-# --------------------
-# used in 01-import_and_maxfilter.py
+# ## Filtering & resampling
-fix_stim_artifact: bool = False
-"""
-Apply interpolation to fix stimulation artifact.
-
-???+ example "Example"
- ```python
- fix_stim_artifact = False
- ```
-"""
-
-stim_artifact_tmin: float = 0.0
-"""
-Start time of the interpolation window in seconds.
-
-???+ example "Example"
- ```python
- stim_artifact_tmin = 0. # on stim onset
- ```
-"""
-
-stim_artifact_tmax: float = 0.01
-"""
-End time of the interpolation window in seconds.
-
-???+ example "Example"
- ```python
- stim_artifact_tmax = 0.01 # up to 10ms post-stimulation
- ```
-"""
-
-###############################################################################
-# FREQUENCY FILTERING & RESAMPLING
-# --------------------------------
-# done in 02-frequency_filter.py
+# ### Filtering
+#
+# It is typically better to set your filtering properties on the raw data so
+# as to avoid what we call border (or edge) effects.
+#
+# If you use this pipeline for evoked responses, you could consider
+# a low-pass filter cut-off of h_freq = 40 Hz
+# and possibly a high-pass filter cut-off of l_freq = 1 Hz
+# so you would preserve only the power in the 1Hz to 40 Hz band.
+# Note that highpass filtering is not necessarily recommended as it can
+# distort waveforms of evoked components, or simply wash out any low
+# frequency that can may contain brain signal. It can also act as
+# a replacement for baseline correction in Epochs. See below.
+#
+# If you use this pipeline for time-frequency analysis, a default filtering
+# could be a high-pass filter cut-off of l_freq = 1 Hz
+# a low-pass filter cut-off of h_freq = 120 Hz
+# so you would preserve only the power in the 1Hz to 120 Hz band.
+#
+# If you need more fancy analysis, you are already likely past this kind
+# of tips! 😇
l_freq: Optional[float] = None
"""
@@ -790,6 +809,20 @@
Keep it `None` if no lowpass filtering should be applied.
"""
+l_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
+"""
+Specifies the transition bandwidth of the
+highpass filter. By default it's `'auto'` and uses default MNE
+parameters.
+"""
+
+h_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
+"""
+Specifies the transition bandwidth of the
+lowpass filter. By default it's `'auto'` and uses default MNE
+parameters.
+"""
+
notch_freq: Optional[Union[float, Sequence[float]]] = None
"""
Notch filter frequency. More than one frequency can be supplied, e.g. to remove
@@ -809,20 +842,6 @@
```
"""
-l_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
-"""
-Specifies the transition bandwidth of the
-highpass filter. By default it's `'auto'` and uses default MNE
-parameters.
-"""
-
-h_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
-"""
-Specifies the transition bandwidth of the
-lowpass filter. By default it's `'auto'` and uses default MNE
-parameters.
-"""
-
notch_trans_bandwidth: float = 1.0
"""
Specifies the transition bandwidth of the notch filter. The default is `1.`.
@@ -833,6 +852,15 @@
Specifies the width of each stop band. `None` uses the MNE default.
"""
+# ### Resampling
+#
+# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)
+# you will likely want to downsample to lighten up the size of the files you
+# are working with (pragmatics)
+# If you are interested in typical analysis (up to 120 Hz) you can typically
+# resample your data down to 500 Hz without preventing reliable time-frequency
+# exploration of your data.
+
raw_resample_sfreq: Optional[float] = None
"""
Specifies at which sampling frequency the data should be resampled.
@@ -845,10 +873,6 @@
```
"""
-###############################################################################
-# DECIMATION
-# ----------
-
epochs_decim: int = 1
"""
Says how much to decimate data at the epochs level.
@@ -867,9 +891,7 @@
"""
-###############################################################################
-# RENAME EXPERIMENTAL EVENTS
-# --------------------------
+# ## Epoching
rename_events: dict = dict()
"""
@@ -895,10 +917,6 @@
to only get a warning instead, or `'ignore'` to ignore it completely.
"""
-###############################################################################
-# HANDLING OF REPEATED EVENTS
-# ---------------------------
-
event_repeated: Literal["error", "drop", "merge"] = "error"
"""
How to handle repeated events. We call events "repeated" if more than one event
@@ -914,10 +932,6 @@
April 1st, 2021.
"""
-###############################################################################
-# EPOCHING
-# --------
-
epochs_metadata_tmin: Optional[float] = None
"""
The beginning of the time window for metadata generation, in seconds,
@@ -1032,11 +1046,6 @@
```
"""
-task_is_rest: bool = False
-"""
-Whether the task should be treated as resting-state data.
-"""
-
rest_epochs_duration: Optional[float] = None
"""
Duration of epochs in seconds.
@@ -1059,72 +1068,46 @@
```
"""
-contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = []
-"""
-The conditions to contrast via a subtraction of ERPs / ERFs. The list elements
-can either be tuples or dictionaries (or a mix of both). Each element in the
-list corresponds to a single contrast.
-
-A tuple specifies a one-vs-one contrast, where the second condition is
-subtracted from the first.
-
-If a dictionary, must contain the following keys:
+# ## Artifact removal
-- `name`: a custom name of the contrast
-- `conditions`: the conditions to contrast
-- `weights`: the weights associated with each condition.
-
-Pass an empty list to avoid calculation of any contrasts.
+# ### Stimulation artifact
+#
+# When using electric stimulation systems, e.g. for median nerve or index
+# stimulation, it is frequent to have a stimulation artifact. This option
+# allows to fix it by linear interpolation early in the pipeline on the raw
+# data.
-For the contrasts to be computed, the appropriate conditions must have been
-epoched, and therefore the conditions should either match or be subsets of
-`conditions` above.
+fix_stim_artifact: bool = False
+"""
+Apply interpolation to fix stimulation artifact.
???+ example "Example"
- Contrast the "left" and the "right" conditions by calculating
- `left - right` at every time point of the evoked responses:
```python
- contrasts = [('left', 'right')] # Note we pass a tuple inside the list!
+ fix_stim_artifact = False
```
+"""
- Contrast the "left" and the "right" conditions within the "auditory" and
- the "visual" modality, and "auditory" vs "visual" regardless of side:
+stim_artifact_tmin: float = 0.0
+"""
+Start time of the interpolation window in seconds.
+
+???+ example "Example"
```python
- contrasts = [('auditory/left', 'auditory/right'),
- ('visual/left', 'visual/right'),
- ('auditory', 'visual')]
+ stim_artifact_tmin = 0. # on stim onset
```
+"""
- Contrast the "left" and the "right" regardless of side, and compute an
- arbitrary contrast with a gradient of weights:
+stim_artifact_tmax: float = 0.01
+"""
+End time of the interpolation window in seconds.
+
+???+ example "Example"
```python
- contrasts = [
- ('auditory/left', 'auditory/right'),
- {
- 'name': 'gradedContrast',
- 'conditions': [
- 'auditory/left',
- 'auditory/right',
- 'visual/left',
- 'visual/right'
- ],
- 'weights': [-1.5, -.5, .5, 1.5]
- }
- ]
+ stim_artifact_tmax = 0.01 # up to 10ms post-stimulation
```
"""
-###############################################################################
-# ARTIFACT REMOVAL
-# ----------------
-#
-# You can choose between ICA and SSP to remove eye and heart artifacts.
-# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa
-# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa
-# if you choose ICA, run steps 5a and 6a
-# if you choose SSP, run steps 5b and 6b
-#
-# Currently you cannot use both.
+# ### SSP, ICA, and artifact regression
regress_artifact: Optional[dict[str, Any]] = None
"""
@@ -1171,9 +1154,6 @@
Minimal number of EOG epochs needed to compute SSP projectors.
"""
-
-# Rejection based on SSP
-# ~~~~~~~~~~~~~~~~~~~~~~
n_proj_eog: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1)
"""
Number of SSP vectors to create for EOG artifacts for each channel type.
@@ -1249,8 +1229,6 @@
is not reliable.
"""
-# Rejection based on ICA
-# ~~~~~~~~~~~~~~~~~~~~~~
ica_reject: Optional[Union[dict[str, float], Literal["autoreject_local"]]] = None
"""
Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to
@@ -1388,8 +1366,13 @@
false-alarm rate increases dramatically.
"""
-# Rejection based on peak-to-peak amplitude
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ### Amplitude-based artifact rejection
+#
+# ???+ info "Good Practice / Advice"
+# Have a look at your raw data and train yourself to detect a blink, a heart
+# beat and an eye movement.
+# You can do a quick average of blink data and check what the amplitude looks
+# like.
reject: Optional[
Union[dict[str, float], Literal["autoreject_global", "autoreject_local"]]
@@ -1471,9 +1454,67 @@
be considered (i.e., will remain marked as bad and not analyzed by autoreject).
"""
-###############################################################################
-# DECODING
-# --------
+# %%
+# # Sensor-level analysis
+
+# ## Condition contrasts
+
+contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = []
+"""
+The conditions to contrast via a subtraction of ERPs / ERFs. The list elements
+can either be tuples or dictionaries (or a mix of both). Each element in the
+list corresponds to a single contrast.
+
+A tuple specifies a one-vs-one contrast, where the second condition is
+subtracted from the first.
+
+If a dictionary, must contain the following keys:
+
+- `name`: a custom name of the contrast
+- `conditions`: the conditions to contrast
+- `weights`: the weights associated with each condition.
+
+Pass an empty list to avoid calculation of any contrasts.
+
+For the contrasts to be computed, the appropriate conditions must have been
+epoched, and therefore the conditions should either match or be subsets of
+`conditions` above.
+
+???+ example "Example"
+ Contrast the "left" and the "right" conditions by calculating
+ `left - right` at every time point of the evoked responses:
+ ```python
+ contrasts = [('left', 'right')] # Note we pass a tuple inside the list!
+ ```
+
+ Contrast the "left" and the "right" conditions within the "auditory" and
+ the "visual" modality, and "auditory" vs "visual" regardless of side:
+ ```python
+ contrasts = [('auditory/left', 'auditory/right'),
+ ('visual/left', 'visual/right'),
+ ('auditory', 'visual')]
+ ```
+
+ Contrast the "left" and the "right" regardless of side, and compute an
+ arbitrary contrast with a gradient of weights:
+ ```python
+ contrasts = [
+ ('auditory/left', 'auditory/right'),
+ {
+ 'name': 'gradedContrast',
+ 'conditions': [
+ 'auditory/left',
+ 'auditory/right',
+ 'visual/left',
+ 'visual/right'
+ ],
+ 'weights': [-1.5, -.5, .5, 1.5]
+ }
+ ]
+ ```
+"""
+
+# ## Decoding / MVPA
decode: bool = True
"""
@@ -1572,6 +1613,78 @@
resolution in the resulting matrix.
"""
+decoding_csp: bool = False
+"""
+Whether to run decoding via Common Spatial Patterns (CSP) analysis on the
+data. CSP takes as input data covariances that are estimated on different
+time and frequency ranges. This allows to obtain decoding scores defined over
+time and frequency.
+"""
+
+decoding_csp_times: Optional[FloatArrayLike] = None
+"""
+The edges of the time bins to use for CSP decoding.
+Must contain at least two elements. By default, 5 equally-spaced bins are
+created across the non-negative time range of the epochs.
+All specified time points must be contained in the epochs interval.
+If `None`, do not perform **time-frequency** analysis, and only run CSP on
+**frequency** data.
+
+???+ example "Example"
+ Create 3 equidistant time bins (0–0.2, 0.2–0.4, 0.4–0.6 sec):
+ ```python
+ decoding_csp_times = np.linspace(start=0, stop=0.6, num=4)
+ ```
+ Create 2 time bins of different durations (0–0.4, 0.4–0.6 sec):
+ ```python
+ decoding_csp_times = [0, 0.4, 0.6]
+ ```
+"""
+
+decoding_csp_freqs: Optional[dict[str, FloatArrayLike]] = None
+"""
+The edges of the frequency bins to use for CSP decoding.
+
+This parameter must be a dictionary with:
+- keys specifying the unique identifier or "name" to use for the frequency
+ range to be treated jointly during statistical testing (such as "alpha" or
+ "beta"), and
+- values must be list-like objects containing at least two scalar values,
+ specifying the edges of the respective frequency bin(s), e.g., `[8, 12]`.
+
+Defaults to two frequency bins, one from
+[`time_frequency_freq_min`][mne_bids_pipeline._config.time_frequency_freq_min]
+to the midpoint between this value and
+[`time_frequency_freq_max`][mne_bids_pipeline._config.time_frequency_freq_max];
+and the other from that midpoint to `time_frequency_freq_max`.
+???+ example "Example"
+ Create two frequency bins, one for 4–8 Hz, and another for 8–14 Hz, which
+ will be clustered together during statistical testing (in the
+ time-frequency plane):
+ ```python
+ decoding_csp_freqs = {
+ 'custom_range': [4, 8, 14]
+ }
+ ```
+ Create the same two frequency bins, but treat them separately during
+ statistical testing (i.e., temporal clustering only):
+ ```python
+ decoding_csp_freqs = {
+ 'theta': [4, 8],
+ 'alpha': [8, 14]
+ }
+ ```
+ Create 5 equidistant frequency bins from 4 to 14 Hz:
+ ```python
+ decoding_csp_freqs = {
+ 'custom_range': np.linspace(
+ start=4,
+ stop=14,
+ num=5+1 # We need one more to account for the endpoint!
+ )
+ }
+"""
+
n_boot: int = 5000
"""
The number of bootstrap resamples when estimating the standard error and
@@ -1608,26 +1721,7 @@
[`cluster_forming_t_threshold`][mne_bids_pipeline._config.cluster_forming_t_threshold].
"""
-###############################################################################
-# GROUP AVERAGE SENSORS
-# ---------------------
-
-interpolate_bads_grand_average: bool = True
-"""
-Interpolate bad sensors in each dataset before calculating the grand
-average. This parameter is passed to the `mne.grand_average` function via
-the keyword argument `interpolate_bads`. It requires to have channel
-locations set.
-
-???+ example "Example"
- ```python
- interpolate_bads_grand_average = True
- ```
-"""
-
-###############################################################################
-# TIME-FREQUENCY
-# --------------
+# ## Time-frequency analysis
time_frequency_conditions: Sequence[str] = []
"""
@@ -1676,82 +1770,6 @@
This also applies to CSP analysis.
"""
-###############################################################################
-# TIME-FREQUENCY CSP
-# ------------------
-
-decoding_csp: bool = False
-"""
-Whether to run decoding via Common Spatial Patterns (CSP) analysis on the
-data. CSP takes as input data covariances that are estimated on different
-time and frequency ranges. This allows to obtain decoding scores defined over
-time and frequency.
-"""
-
-decoding_csp_times: Optional[FloatArrayLike] = None
-"""
-The edges of the time bins to use for CSP decoding.
-Must contain at least two elements. By default, 5 equally-spaced bins are
-created across the non-negative time range of the epochs.
-All specified time points must be contained in the epochs interval.
-If `None`, do not perform **time-frequency** analysis, and only run CSP on
-**frequency** data.
-
-???+ example "Example"
- Create 3 equidistant time bins (0–0.2, 0.2–0.4, 0.4–0.6 sec):
- ```python
- decoding_csp_times = np.linspace(start=0, stop=0.6, num=4)
- ```
- Create 2 time bins of different durations (0–0.4, 0.4–0.6 sec):
- ```python
- decoding_csp_times = [0, 0.4, 0.6]
- ```
-"""
-
-decoding_csp_freqs: Optional[dict[str, FloatArrayLike]] = None
-"""
-The edges of the frequency bins to use for CSP decoding.
-
-This parameter must be a dictionary with:
-- keys specifying the unique identifier or "name" to use for the frequency
- range to be treated jointly during statistical testing (such as "alpha" or
- "beta"), and
-- values must be list-like objects containing at least two scalar values,
- specifying the edges of the respective frequency bin(s), e.g., `[8, 12]`.
-
-Defaults to two frequency bins, one from
-[`time_frequency_freq_min`][mne_bids_pipeline._config.time_frequency_freq_min]
-to the midpoint between this value and
-[`time_frequency_freq_max`][mne_bids_pipeline._config.time_frequency_freq_max];
-and the other from that midpoint to `time_frequency_freq_max`.
-???+ example "Example"
- Create two frequency bins, one for 4–8 Hz, and another for 8–14 Hz, which
- will be clustered together during statistical testing (in the
- time-frequency plane):
- ```python
- decoding_csp_freqs = {
- 'custom_range': [4, 8, 14]
- }
- ```
- Create the same two frequency bins, but treat them separately during
- statistical testing (i.e., temporal clustering only):
- ```python
- decoding_csp_freqs = {
- 'theta': [4, 8],
- 'alpha': [8, 14]
- }
- ```
- Create 5 equidistant frequency bins from 4 to 14 Hz:
- ```python
- decoding_csp_freqs = {
- 'custom_range': np.linspace(
- start=4,
- stop=14,
- num=5+1 # We need one more to account for the endpoint!
- )
- }
-"""
-
time_frequency_baseline: Optional[tuple[float, float]] = None
"""
Baseline period to use for the time-frequency analysis. If `None`, no baseline.
@@ -1782,16 +1800,33 @@
```
"""
-###############################################################################
-# SOURCE ESTIMATION PARAMETERS
-# ----------------------------
-#
+# ## Group-level analysis
+
+interpolate_bads_grand_average: bool = True
+"""
+Interpolate bad sensors in each dataset before calculating the grand
+average. This parameter is passed to the `mne.grand_average` function via
+the keyword argument `interpolate_bads`. It requires to have channel
+locations set.
+
+???+ example "Example"
+ ```python
+ interpolate_bads_grand_average = True
+ ```
+"""
+
+# %%
+# # Source-level analysis
+
+# ## General source analysis settings
run_source_estimation: bool = True
"""
Whether to run source estimation processing steps if not explicitly requested.
"""
+# ## BEM surface
+
use_template_mri: Optional[str] = None
"""
Whether to use a template MRI subject such as FreeSurfer's `fsaverage` subject.
@@ -1864,6 +1899,8 @@
Whether to print the complete output of FreeSurfer commands. Note that if
`False`, no FreeSurfer output might be displayed at all!"""
+# ## Source space & forward solution
+
mri_t1_path_generator: Optional[Callable[[BIDSPath], BIDSPath]] = None
"""
To perform source-level analyses, the Pipeline needs to generate a
@@ -1956,6 +1993,8 @@ def mri_landmarks_kind(bids_path):
Exclude points closer than this distance (mm) to the bounding surface.
"""
+# ## Inverse solution
+
loose: Union[float, Literal["auto"]] = 0.2
"""
Value that weights the source variances of the dipole components
@@ -2103,9 +2142,10 @@ def noise_cov(bids_path):
```
"""
-###############################################################################
-# Report generation
-# -----------------
+# %%
+# # Reports
+
+# ## Report generation
report_evoked_n_time_points: Optional[int] = None
"""
@@ -2131,9 +2171,11 @@ def noise_cov(bids_path):
```
"""
-###############################################################################
-# Execution
-# ---------
+# %%
+# # Execution
+#
+# These options control how the pipeline is executed but should not affect
+# what outputs get produced.
n_jobs: int = 1
"""
@@ -2173,20 +2215,6 @@ def noise_cov(bids_path):
The maximum amount of RAM per Dask worker.
"""
-random_state: Optional[int] = 42
-"""
-You can specify the seed of the random number generator (RNG).
-This setting is passed to the ICA algorithm and to the decoding function,
-ensuring reproducible results. Set to `None` to avoid setting the RNG
-to a defined state.
-"""
-
-shortest_event: int = 1
-"""
-Minimum number of samples an event must last. If the
-duration is less than this, an exception will be raised.
-"""
-
log_level: Literal["info", "error"] = "info"
"""
Set the pipeline logging verbosity.
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index dd90f7ad5..906bba3f6 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -2,6 +2,7 @@
import ast
import os
import re
+import sys
from pathlib import Path
import yaml
@@ -29,31 +30,40 @@ def test_options_documented():
config_names = set(d for d in dir(config) if not d.startswith("_"))
assert in_config == config_names
settings_path = root_path.parent / "docs" / "source" / "settings"
+ sys.path.append(str(settings_path))
+ try:
+ from gen_settings import main
+ finally:
+ sys.path.pop()
+ main()
assert settings_path.is_dir()
- in_doc = set()
+ in_doc = dict()
key = " - "
- allowed_duplicates = set(
- [
- "source_info_path_update",
- ]
- )
for dirpath, _, fnames in os.walk(settings_path):
for fname in fnames:
if not fname.endswith(".md"):
continue
# This is a .md file
- with open(Path(dirpath) / fname) as fid:
+ # convert to relative path
+ fname = os.path.join(os.path.relpath(dirpath, settings_path), fname)
+ assert fname not in in_doc
+ in_doc[fname] = set()
+ with open(settings_path / fname) as fid:
for line in fid:
if not line.startswith(key):
continue
# The line starts with our magic key
val = line[len(key) :].strip()
- if val not in allowed_duplicates:
- assert val not in in_doc, "Duplicate documentation"
- in_doc.add(val)
+ for other in in_doc:
+ why = f"Duplicate docs in {fname} and {other} for {val}"
+ assert val not in in_doc[other], why
+ in_doc[fname].add(val)
what = "docs/source/settings doc"
- assert in_doc.difference(in_config) == set(), f"Extra values in {what}"
- assert in_config.difference(in_doc) == set(), f"Values missing from {what}"
+ in_doc_all = set()
+ for vals in in_doc.values():
+ in_doc_all.update(vals)
+ assert in_doc_all.difference(in_config) == set(), f"Extra values in {what}"
+ assert in_config.difference(in_doc_all) == set(), f"Values missing from {what}"
def test_datasets_in_doc():
From 4c73c2c24d1f797fcff70e25bb231705b2364ce2 Mon Sep 17 00:00:00 2001
From: Sophie Herbst
Date: Thu, 29 Feb 2024 11:16:05 +0100
Subject: [PATCH 071/132] add image_kwargs to report (#859)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Eric Larson
---
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_config.py | 16 ++++++++++++++++
.../steps/preprocessing/_07_make_epochs.py | 11 +++++++++++
.../steps/preprocessing/_09_ptp_reject.py | 3 +++
.../tests/configs/config_ds004229.py | 4 ++++
5 files changed, 35 insertions(+)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 0c0e204c0..2c099e8ad 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -9,6 +9,7 @@
- Added saving of clean raw data in addition to epochs (#840 by @larsoner)
- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner)
- Added [`noise_cov_method`][mne_bids_pipeline._config.noise_cov_method] to allow for the use of methods other than `"shrunk"` for noise covariance estimation (#854 by @larsoner)
+- Added option to pass `image_kwargs` to [`mne.Report.add_epochs`] to allow adjusting e.g. `"vmin"` and `"vmax"` of the epochs image in the report via [`report_add_epochs_image_kwargs`][mne_bids_pipeline._config.report_add_epochs_image_kwargs] (#848 by @SophieHerbst)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 06f468cb7..b8c918301 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2171,6 +2171,22 @@ def noise_cov(bids_path):
```
"""
+report_add_epochs_image_kwargs: Optional[dict] = None
+"""
+Specifies the limits for the color scales of the epochs_image in the report.
+If `None`, it defaults to the current default in MNE-Python.
+
+???+ example "Example"
+ Set vmin and vmax to the epochs rejection thresholds (with unit conversion):
+
+ ```python
+ report_add_epochs_image_kwargs = {
+ "grad": {"vmin": 0, "vmax": 1e13 * reject["grad"]}, # fT/cm
+ "mag": {"vmin": 0, "vmax": 1e15 * reject["mag"]}, # fT
+ }
+ ```
+"""
+
# %%
# # Execution
#
diff --git a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
index e32fbd0ed..cc010d0b8 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
@@ -7,6 +7,7 @@
To save space, the epoch data can be decimated.
"""
+import inspect
from types import SimpleNamespace
from typing import Optional
@@ -259,6 +260,7 @@ def run_epochs(
psd=psd,
drop_log_ignore=(),
replace=True,
+ **_add_epochs_image_kwargs(cfg),
)
# Interactive
@@ -269,6 +271,14 @@ def run_epochs(
return _prep_out_files(exec_params=exec_params, out_files=out_files)
+def _add_epochs_image_kwargs(cfg: SimpleNamespace) -> dict:
+ arg_spec = inspect.getfullargspec(mne.Report.add_epochs)
+ kwargs = dict()
+ if cfg.report_add_epochs_image_kwargs and "image_kwargs" in arg_spec.kwonlyargs:
+ kwargs["image_kwargs"] = cfg.report_add_epochs_image_kwargs
+ return kwargs
+
+
# TODO: ideally we wouldn't need this anymore and could refactor the code above
def _get_events(cfg, subject, session):
raws_filt = []
@@ -318,6 +328,7 @@ def get_config(
epochs_metadata_query=config.epochs_metadata_query,
event_repeated=config.event_repeated,
epochs_decim=config.epochs_decim,
+ report_add_epochs_image_kwargs=config.report_add_epochs_image_kwargs,
ch_types=config.ch_types,
noise_cov=_sanitize_callable(config.noise_cov),
eeg_reference=get_eeg_reference(config),
diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
index 434b235ec..51ca1149b 100644
--- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
@@ -26,6 +26,7 @@
from ..._reject import _get_reject
from ..._report import _open_report
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+from ._07_make_epochs import _add_epochs_image_kwargs
def get_input_fnames_drop_ptp(
@@ -227,6 +228,7 @@ def drop_ptp(
drop_log_ignore=(),
tags=tags,
replace=True,
+ **_add_epochs_image_kwargs(cfg=cfg),
)
return _prep_out_files(exec_params=exec_params, out_files=out_files)
@@ -246,6 +248,7 @@ def get_config(
random_state=config.random_state,
ch_types=config.ch_types,
_epochs_split_size=config._epochs_split_size,
+ report_add_epochs_image_kwargs=config.report_add_epochs_image_kwargs,
**_bids_kwargs(config=config),
)
return cfg
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index 956f92010..355dbcf6d 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -45,6 +45,10 @@
epochs_tmax = 1
epochs_decim = 6 # 1200->200 Hz
baseline = (None, 0)
+report_add_epochs_image_kwargs = {
+ "grad": {"vmin": 0, "vmax": 1e13 * reject["grad"]}, # fT/cm
+ "mag": {"vmin": 0, "vmax": 1e15 * reject["mag"]}, # fT
+}
# Conditions / events to consider when epoching
conditions = ["auditory"]
From 324d4b49edb79737e656b74c238d93d8cdf940b2 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 1 Mar 2024 00:16:43 -0500
Subject: [PATCH 072/132] MAINT: Remove datalad (#867)
---
.circleci/config.yml | 2 --
.circleci/setup_bash.sh | 4 +---
CONTRIBUTING.md | 8 +------
Makefile | 2 --
docs/source/examples/gen_examples.py | 13 +++--------
docs/source/v1.6.md.inc | 1 +
mne_bids_pipeline/_download.py | 33 +---------------------------
mne_bids_pipeline/tests/datasets.py | 10 ---------
pyproject.toml | 1 -
9 files changed, 7 insertions(+), 67 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8a1e87683..5dc0d8901 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -286,7 +286,6 @@ jobs:
keys:
- data-cache-eeg_matchingpennies-1
- bash_env
- - gitconfig # email address is needed for datalad
- run:
name: Get eeg_matchingpennies
command: |
@@ -306,7 +305,6 @@ jobs:
keys:
- data-cache-MNE-phantom-KIT-data-1
- bash_env
- - gitconfig # email address is needed for datalad
- run:
name: Get MNE-phantom-KIT-data
command: |
diff --git a/.circleci/setup_bash.sh b/.circleci/setup_bash.sh
index ee44b317b..a7a00ad71 100755
--- a/.circleci/setup_bash.sh
+++ b/.circleci/setup_bash.sh
@@ -33,15 +33,13 @@ fi
# Set up image
sudo ln -s /usr/lib/x86_64-linux-gnu/libxcb-util.so.0 /usr/lib/x86_64-linux-gnu/libxcb-util.so.1
-wget -q -O- http://neuro.debian.net/lists/focal.us-tn.libre | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
-sudo apt-key adv --recv-keys --keyserver hkps://keyserver.ubuntu.com 0xA5D32F012649A5A9
echo "export RUN_TESTS=\".circleci/run_dataset_and_copy_files.sh\"" >> "$BASH_ENV"
echo "export DOWNLOAD_DATA=\"coverage run -m mne_bids_pipeline._download\"" >> "$BASH_ENV"
# Similar CircleCI setup to mne-python (Xvfb, venv, minimal commands, env vars)
wget -q https://raw.githubusercontent.com/mne-tools/mne-python/main/tools/setup_xvfb.sh
bash setup_xvfb.sh
-sudo apt install -qq tcsh git-annex-standalone python3.10-venv python3-venv libxft2
+sudo apt install -qq tcsh python3.10-venv python3-venv libxft2
python3.10 -m venv ~/python_env
wget -q https://raw.githubusercontent.com/mne-tools/mne-python/main/tools/get_minimal_commands.sh
source get_minimal_commands.sh
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a403bc8f2..3fafbf0d0 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,17 +11,11 @@ Once this is done, you should be able to run this in a terminal:
`$ python -c "import mne; mne.sys_info()"`
-You can then install the following additional packages via `pip`. Note that
+You can then install the following additional package via `pip`. Note that
the URL points to the bleeding edge version of `mne_bids`:
-`$ pip install datalad`
`$ pip install https://github.com/mne-tools/mne-bids/zipball/main`
-To get the test data, you need to install `git-annex` on your system. If you
-installed MNE-Python via `conda`, you can simply call:
-
-`conda install -c conda-forge git-annex`
-
Now, get the pipeline through git:
`$ git clone https://github.com/mne-tools/mne-bids-pipeline.git`
diff --git a/Makefile b/Makefile
index 4e491526b..3199402d6 100644
--- a/Makefile
+++ b/Makefile
@@ -23,8 +23,6 @@ doc:
check:
which python
- git-annex version
- datalad --version
openneuro-py --version
mri_convert --version
mne_bids --version
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index b55e526d8..6e6363519 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -203,21 +203,14 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
f"{fname.name} :fontawesome-solid-square-poll-vertical:\n\n"
)
- assert (
- sum(key in options for key in ("openneuro", "git", "web", "datalad", "mne"))
- == 1
- )
+ assert sum(key in options for key in ("openneuro", "web", "mne")) == 1
if "openneuro" in options:
url = f'https://openneuro.org/datasets/{options["openneuro"]}'
- elif "git" in options:
- url = options["git"]
elif "web" in options:
url = options["web"]
- elif "mne" in options:
- url = f"https://mne.tools/dev/generated/mne.datasets.{options['mne']}.data_path.html" # noqa: E501
else:
- assert "datalad" in options # guaranteed above
- url = ""
+ assert "mne" in options
+ url = f"https://mne.tools/dev/generated/mne.datasets.{options['mne']}.data_path.html" # noqa: E501
source_str = (
f"## Dataset source\n\nThis dataset was acquired from " f"[{url}]({url})\n"
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 2c099e8ad..9abe51c96 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -35,6 +35,7 @@
### :medical_symbol: Code health
- The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger)
+- Removed dependencies on `datalad` and `git-annex` for testing (#867 by @larsoner)
- Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner)
- Code caching is now tested using GitHub Actions (#836 by @larsoner)
- Steps in the documentation are now automatically parsed into flowcharts (#859 by @larsoner)
diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py
index 46cf17e7a..a310f0f67 100644
--- a/mne_bids_pipeline/_download.py
+++ b/mne_bids_pipeline/_download.py
@@ -9,29 +9,6 @@
DEFAULT_DATA_DIR = Path("~/mne_data").expanduser()
-def _download_via_datalad(*, ds_name: str, ds_path: Path):
- import datalad.api as dl
-
- print(f'datalad installing "{ds_name}"')
- options = DATASET_OPTIONS[ds_name]
- git_url = options["git"]
- assert "exclude" not in options
- assert "hash" not in options
- dataset = dl.install(path=ds_path, source=git_url)
-
- # XXX: git-annex bug:
- # https://github.com/datalad/datalad/issues/3583
- # if datalad fails, use "get" twice, or set `n_jobs=1`
- if ds_name == "ds003104":
- n_jobs = 16
- else:
- n_jobs = 1
-
- for to_get in DATASET_OPTIONS[ds_name].get("include", []):
- print(f'datalad get data "{to_get}" for "{ds_name}"')
- dataset.get(to_get, jobs=n_jobs)
-
-
def _download_via_openneuro(*, ds_name: str, ds_path: Path):
import openneuro
@@ -88,20 +65,12 @@ def _download_via_mne(*, ds_name: str, ds_path: Path):
def _download(*, ds_name: str, ds_path: Path):
options = DATASET_OPTIONS[ds_name]
openneuro_name = options.get("openneuro", "")
- git_url = options.get("git", "")
- osf_node = options.get("osf", "")
web_url = options.get("web", "")
mne_mod = options.get("mne", "")
- assert (
- sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url, mne_mod)) == 1
- )
+ assert sum(bool(x) for x in (openneuro_name, web_url, mne_mod)) == 1
if openneuro_name:
download_func = _download_via_openneuro
- elif git_url:
- download_func = _download_via_datalad
- elif osf_node:
- raise RuntimeError("OSF downloads are currently not supported.")
elif mne_mod:
download_func = _download_via_mne
else:
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index c559f06ca..95c846bd3 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -22,16 +22,6 @@ class DATASET_OPTIONS_T(TypedDict, total=False):
"hash": "sha256:ddc94a7c9ba1922637f2770592dd51c019d341bf6bc8558e663e1979a4cb002f", # noqa: E501
},
"eeg_matchingpennies": {
- # This dataset started out on osf.io as dataset https://osf.io/cj2dr
- # then moved to g-node.org. As of 2023/02/28 when we download it via
- # datalad it's too (~200 kB/sec!) and times out at the end:
- #
- # "git": "https://gin.g-node.org/sappelhoff/eeg_matchingpennies",
- # "web": "",
- # "include": ["sub-05"],
- #
- # So now we mirror this datalad-fetched git repo back on osf.io!
- # original dataset: "osf": "cj2dr"
"web": "https://osf.io/download/8rbfk?version=1",
"hash": "sha256:06bfbe52c50b9343b6b8d2a5de3dd33e66ad9303f7f6bfbe6868c3c7c375fafd", # noqa: E501
},
diff --git a/pyproject.toml b/pyproject.toml
index 20a020d24..0b5465fdf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,6 @@ tests = [
"pytest-cov",
"pooch",
"psutil",
- "datalad",
"ruff",
"mkdocs",
"mkdocs-material >= 9.0.4",
From 94256f70ee93be3342a35cd56097b3fe34119a90 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 1 Mar 2024 08:24:16 -0500
Subject: [PATCH 073/132] ENH: Split ICA into multiple steps (#865)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
.circleci/run_dataset_and_copy_files.sh | 5 +-
docs/mkdocs.yml | 2 +-
docs/source/features/gen_steps.py | 59 +-
docs/source/v1.6.md.inc | 2 +
mne_bids_pipeline/_report.py | 43 +-
mne_bids_pipeline/_run.py | 36 +-
.../steps/preprocessing/_06a1_fit_ica.py | 354 ++++++++++
.../preprocessing/_06a2_find_ica_artifacts.py | 399 +++++++++++
.../steps/preprocessing/_06a_run_ica.py | 641 ------------------
.../steps/preprocessing/_08a_apply_ica.py | 33 +-
.../steps/preprocessing/__init__.py | 6 +-
.../tests/configs/config_ERP_CORE.py | 2 +-
12 files changed, 876 insertions(+), 706 deletions(-)
create mode 100644 mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
create mode 100644 mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
delete mode 100644 mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
diff --git a/.circleci/run_dataset_and_copy_files.sh b/.circleci/run_dataset_and_copy_files.sh
index 34dcfa14f..f8ae2d31f 100755
--- a/.circleci/run_dataset_and_copy_files.sh
+++ b/.circleci/run_dataset_and_copy_files.sh
@@ -32,15 +32,16 @@ echo "Runtime: ${SECONDS} seconds"
# rerun test (check caching)!
SECONDS=0
+RERUN_LIMIT=30
if [[ "$RERUN_TEST" == "false" ]]; then
echo "Skipping rerun test"
RUN_TIME=0
else
pytest mne_bids_pipeline --cov-append -k $DS_RUN
RUN_TIME=$SECONDS
- echo "Runtime: ${RUN_TIME} seconds (should be < 20)"
+ echo "Runtime: ${RUN_TIME} seconds (should be <= $RERUN_LIMIT)"
fi
-test $RUN_TIME -lt 20
+test $RUN_TIME -le $RERUN_LIMIT
if [[ "$COPY_FILES" == "false" ]]; then
echo "Not copying files"
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index ab4e493e4..1e881f1b7 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -77,7 +77,7 @@ nav:
- Preparations for source-level analyses: getting_started/freesurfer.md
- Processing steps:
- Overview: features/overview.md
- - Detailed list of processing steps: features/steps.md
+ - List of processing steps: features/steps.md
- Configuration options:
- General settings: settings/general.md
- Preprocessing:
diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py
index 2b3cc3bd7..ad4d7ae1c 100755
--- a/docs/source/features/gen_steps.py
+++ b/docs/source/features/gen_steps.py
@@ -13,30 +13,49 @@
steps_pre = f"""\
{autogen_header}
-# Detailed list of processing steps
+# List of processing steps
The following table provides a concise summary of each processing step. The
step names can be used to run individual steps or entire groups of steps by
-passing their name(s) to `mne_bids_pipeline` via the `steps=...` argument.
+passing their name(s) to `mne_bids_pipeline` via the `steps=...` argument. However,
+we recommend using `mne_bids_pipeline config.py` to run the entire pipeline
+instead to ensure that all steps affected by a given change are re-run.
""" # noqa: E501
overview_pre = f"""\
{autogen_header}
+# MNE-BIDS-Pipeline overview
+
MNE-BIDS-Pipeline processes your data in a sequential manner, i.e., one step
at a time. The next step is only run after the previous steps have been
successfully completed. There are, of course, exceptions; for example, if you
-chose not to apply ICA, the respective steps will simply be omitted and we'll
-directly move to the subsequent steps. The following flow chart aims to give
-you a brief overview of which steps are included in the pipeline, in which
-order they are run, and how we group them together.
+chose not to apply ICA or SSP, the spatial filtering steps will simply be omitted and
+we'll directly move to the subsequent steps. See [the flowchart below](#flowchart) for
+a visualization of the steps, or check out the
+[list of processing steps](steps.md) for more information.
+
+All intermediate results are saved to disk for later inspection, and an
+**extensive report** is generated. Analyses are conducted on individual (per-subject)
+as well as group level.
+
+## Caching
+
+MNE-BIDS-Pipeline offers automated caching of intermediate results. This means that
+running `mne_bids_pipeline config.py` once will generate all outputs, and running it
+again will only re-run the steps that need rerunning based on:
+
+1. Changes to files on disk (e.g., updates to `bids_root` files), and
+2. Changes to `config.py`
-!!! info
- All intermediate results are saved to disk for later
- inspection, and an **extensive report** is generated.
+This is particularly useful when you are developing your pipeline, as you can quickly
+iterate over changes to your pipeline without having to re-run the entire pipeline
+every time -- only the steps that need to be re-run will be executed.
-!!! info
- Analyses are conducted on individual (per-subject) as well as group level.
+## Flowchart
+
+For more detailed information on each step, please refer to the [detailed list
+of processing steps](steps.md).
"""
icon_map = {
@@ -61,25 +80,26 @@
("02", "03"),
("03", "04"),
("04", "05"),
- ("05", "06a"),
+ ("05", "06a1"),
+ ("06a1", "06a2"),
("05", "06b"),
("05", "07"),
# technically we could have the raw data flow here, but it doesn't really help
# ("05", "08a"),
# ("05", "08b"),
- ("06a", "08a"),
- ("07", "08a"),
+ ("06a2", "08a"),
# Force the artifact-fitting and epoching steps on the same level, in this order
"""\
subgraph Z[" "]
direction LR
- B06a
+ B06a1
B07
B06b
end
style Z fill:#0000,stroke-width:0px
""",
("06b", "08b"),
+ ("07", "08a"),
("07", "08b"),
("08a", "09"),
("08b", "09"),
@@ -120,7 +140,10 @@
# Overview
overview_lines.append(
f"""\
-## {module_header}
+### {module_header}
+
+
+Click to expand
```mermaid
flowchart TD"""
@@ -161,7 +184,7 @@
assert isinstance(a_b, tuple), type(a_b)
a_b = list(a_b) # allow modification
for ii, idx in enumerate(a_b):
- assert idx in title_map, (dir_header, sorted(title_map))
+ assert idx in title_map, (dir_header, idx, sorted(title_map))
if idx not in mapped:
mapped.add(idx)
a_b[ii] = f'{idx}["{title_map[idx]}"]'
@@ -173,7 +196,7 @@
)
)
assert mapped == all_steps, all_steps.symmetric_difference(mapped)
- overview_lines.append("```\n")
+ overview_lines.append("```\n\n \n")
(out_dir / "steps.md").write_text("\n".join(lines), encoding="utf8")
(out_dir / "overview.md").write_text("\n".join(overview_lines), encoding="utf8")
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 9abe51c96..99d31f48d 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -10,6 +10,7 @@
- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner)
- Added [`noise_cov_method`][mne_bids_pipeline._config.noise_cov_method] to allow for the use of methods other than `"shrunk"` for noise covariance estimation (#854 by @larsoner)
- Added option to pass `image_kwargs` to [`mne.Report.add_epochs`] to allow adjusting e.g. `"vmin"` and `"vmax"` of the epochs image in the report via [`report_add_epochs_image_kwargs`][mne_bids_pipeline._config.report_add_epochs_image_kwargs] (#848 by @SophieHerbst)
+- Split ICA fitting and artifact detection into separate steps. This means that now, ICA is split into a total of three consecutive steps: fitting, artifact detection, and the actual data cleaning step ("applying ICA"). This makes it easier to experiment with different settings for artifact detection without needing to re-fit ICA. (#865 by @larsoner)
[//]: # (### :warning: Behavior changes)
@@ -31,6 +32,7 @@
- Changed the default for [`ica_n_components`][mne_bids_pipeline._config.ica_n_components] from `0.8` (too conservative) to `None` to match MNE-Python's default (#853 by @larsoner)
- Prevent events table for the average subject overflowing in reports (#854 by @larsoner)
- Fixed split file behavior for Epochs when using ICA (#855 by @larsoner)
+- Fixed a bug where users could not set `_components.tsv` as it would be detected as a cache miss and overwritten on next pipeline run (#865 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 4df0be691..e607de98f 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -30,27 +30,32 @@ def _open_report(
session: Optional[str],
run: Optional[str] = None,
task: Optional[str] = None,
+ fname_report: Optional[BIDSPath] = None,
+ name: str = "report",
):
- fname_report = BIDSPath(
- subject=subject,
- session=session,
- # Report is across all runs, but for logging purposes it's helpful
- # to pass the run and task for gen_log_kwargs
- run=None,
- task=cfg.task,
- acquisition=cfg.acq,
- recording=cfg.rec,
- space=cfg.space,
- extension=".h5",
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- suffix="report",
- check=False,
- ).fpath
+ if fname_report is None:
+ fname_report = BIDSPath(
+ subject=subject,
+ session=session,
+ # Report is across all runs, but for logging purposes it's helpful
+ # to pass the run and task for gen_log_kwargs
+ run=None,
+ task=cfg.task,
+ acquisition=cfg.acq,
+ recording=cfg.rec,
+ space=cfg.space,
+ extension=".h5",
+ datatype=cfg.datatype,
+ root=cfg.deriv_root,
+ suffix="report",
+ check=False,
+ )
+ fname_report = fname_report.fpath
+ assert fname_report.suffix == ".h5", fname_report.suffix
# prevent parallel file access
with FileLock(f"{fname_report}.lock"), _agg_backend():
if not fname_report.is_file():
- msg = "Initializing report HDF5 file"
+ msg = f"Initializing {name} HDF5 file"
logger.info(**gen_log_kwargs(message=msg))
report = _gen_empty_report(
cfg=cfg,
@@ -62,7 +67,7 @@ def _open_report(
report = mne.open_report(fname_report)
except Exception as exc:
raise exc.__class__(
- f"Could not open report HDF5 file:\n{fname_report}\n"
+ f"Could not open {name} HDF5 file:\n{fname_report}\n"
f"Got error:\n{exc}\nPerhaps you need to delete it?"
) from None
try:
@@ -80,7 +85,7 @@ def _open_report(
except Exception as exc:
logger.warning(f"Failed: {exc}")
fname_report_html = fname_report.with_suffix(".html")
- msg = f"Saving report: {_linkfile(fname_report_html)}"
+ msg = f"Saving {name}: {_linkfile(fname_report_html)}"
logger.info(**gen_log_kwargs(message=msg))
report.save(fname_report, overwrite=True)
report.save(fname_report_html, overwrite=True, open_browser=False)
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 748fd83ca..1f033bc5b 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -4,6 +4,7 @@
import functools
import hashlib
import inspect
+import json
import pathlib
import pdb
import sys
@@ -277,23 +278,36 @@ def clear(self) -> None:
self.memory.clear()
-def save_logs(*, config: SimpleNamespace, logs) -> None: # TODO add type
+def save_logs(*, config: SimpleNamespace, logs: list[pd.Series]) -> None:
fname = config.deriv_root / f"task-{get_task(config)}_log.xlsx"
# Get the script from which the function is called for logging
sheet_name = _short_step_path(_get_step_path()).replace("/", "-")
sheet_name = sheet_name[-30:] # shorten due to limit of excel format
- df = pd.DataFrame(logs)
-
- columns = df.columns
- if "cfg" in columns:
- columns = list(columns)
- idx = columns.index("cfg")
- del columns[idx]
- columns.insert(-3, "cfg") # put it before time, success & err cols
-
- df = df[columns]
+ # We need to make the logs more compact to be able to write Excel format
+ # (32767 char limit per cell), in particular the "cfg" column has very large
+ # cells, so replace the "cfg" column with separated cfg.* columns (still truncated
+ # to the 32767 char limit)
+ compact_logs = list()
+ for log in logs:
+ log = log.copy()
+ # 1. Remove indentation (e.g., 220814 chars to 54416)
+ cfg = json.loads(log["cfg"])
+ del log["cfg"]
+ assert cfg["__instance_type__"] == ["types", "SimpleNamespace"], cfg[
+ "__instance_type__"
+ ]
+ for key, val in cfg["attributes"].items():
+ if isinstance(val, dict) and list(val.keys()) == ["__pathlib__"]:
+ val = val["__pathlib__"]
+ val = json.dumps(val, separators=(",", ":"))
+ if len(val) > 32767:
+ val = val[:32765] + " …"
+ log[f"cfg.{key}"] = val
+ compact_logs.append(log)
+ df = pd.DataFrame(compact_logs)
+ del logs, compact_logs
with FileLock(fname.with_suffix(fname.suffix + ".lock")):
append = fname.exists()
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
new file mode 100644
index 000000000..79e15a235
--- /dev/null
+++ b/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
@@ -0,0 +1,354 @@
+"""Fit ICA.
+
+This fits Independent Component Analysis (ICA) on high-pass filtered raw data,
+temporarily creating task-related epochs. The epochs created here are used for
+the purpose of fitting ICA only, and will not enter any other processing steps.
+
+Before performing ICA, we reject epochs based on peak-to-peak amplitude above
+the 'ica_reject' limits to remove high-amplitude non-biological artifacts
+(e.g., voltage or flux spikes).
+"""
+
+from types import SimpleNamespace
+from typing import Optional
+
+import autoreject
+import mne
+import numpy as np
+from mne.preprocessing import ICA
+from mne_bids import BIDSPath
+
+from ..._config_utils import (
+ _bids_kwargs,
+ get_eeg_reference,
+ get_runs,
+ get_sessions,
+ get_subjects,
+)
+from ..._import_data import annotations_to_events, make_epochs
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._reject import _get_reject
+from ..._report import _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+
+
+def get_input_fnames_run_ica(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ bids_basename = BIDSPath(
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ acquisition=cfg.acq,
+ recording=cfg.rec,
+ space=cfg.space,
+ datatype=cfg.datatype,
+ root=cfg.deriv_root,
+ check=False,
+ extension=".fif",
+ )
+ in_files = dict()
+ for run in cfg.runs:
+ key = f"raw_run-{run}"
+ in_files[key] = bids_basename.copy().update(
+ run=run, processing=cfg.processing, suffix="raw"
+ )
+ _update_for_splits(in_files, key, single=True)
+ return in_files
+
+
+@failsafe_run(
+ get_input_fnames=get_input_fnames_run_ica,
+)
+def run_ica(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ in_files: dict,
+) -> dict:
+ """Run ICA."""
+ import matplotlib.pyplot as plt
+
+ raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs]
+ out_files = dict()
+ bids_basename = raw_fnames[0].copy().update(processing=None, split=None, run=None)
+ out_files["ica"] = bids_basename.copy().update(processing="icafit", suffix="ica")
+ out_files["epochs"] = (
+ out_files["ica"].copy().update(suffix="epo", processing="icafit")
+ )
+ out_files["report"] = bids_basename.copy().update(
+ processing="icafit", suffix="report", extension=".h5"
+ )
+ del bids_basename
+
+ # Generate a list of raw data paths (i.e., paths of individual runs)
+ # we want to create epochs from.
+
+ # Generate a unique event name -> event code mapping that can be used
+ # across all runs.
+ event_name_to_code_map = annotations_to_events(raw_paths=raw_fnames)
+
+ epochs = None
+ for idx, (run, raw_fname) in enumerate(zip(cfg.runs, raw_fnames)):
+ msg = f"Processing raw data from {raw_fname.basename}"
+ logger.info(**gen_log_kwargs(message=msg))
+ raw = mne.io.read_raw_fif(raw_fname, preload=True)
+
+ # Produce high-pass filtered version of the data for ICA.
+ # Sanity check – make sure we're using the correct data!
+ if cfg.raw_resample_sfreq is not None:
+ assert np.allclose(raw.info["sfreq"], cfg.raw_resample_sfreq)
+ if cfg.l_freq is not None:
+ assert np.allclose(raw.info["highpass"], cfg.l_freq)
+
+ if idx == 0:
+ if cfg.ica_l_freq is None:
+ msg = (
+ f"Not applying high-pass filter (data is already filtered, "
+ f'cutoff: {raw.info["highpass"]} Hz).'
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ else:
+ msg = f"Applying high-pass filter with {cfg.ica_l_freq} Hz cutoff …"
+ logger.info(**gen_log_kwargs(message=msg))
+ raw.filter(l_freq=cfg.ica_l_freq, h_freq=None, n_jobs=1)
+
+ # Only keep the subset of the mapping that applies to the current run
+ event_id = event_name_to_code_map.copy()
+ for event_name in event_id.copy().keys():
+ if event_name not in raw.annotations.description:
+ del event_id[event_name]
+
+ if idx == 0:
+ msg = "Creating task-related epochs …"
+ logger.info(**gen_log_kwargs(message=msg))
+ these_epochs = make_epochs(
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ conditions=cfg.conditions,
+ raw=raw,
+ event_id=event_id,
+ tmin=cfg.epochs_tmin,
+ tmax=cfg.epochs_tmax,
+ metadata_tmin=cfg.epochs_metadata_tmin,
+ metadata_tmax=cfg.epochs_metadata_tmax,
+ metadata_keep_first=cfg.epochs_metadata_keep_first,
+ metadata_keep_last=cfg.epochs_metadata_keep_last,
+ metadata_query=cfg.epochs_metadata_query,
+ event_repeated=cfg.event_repeated,
+ epochs_decim=cfg.epochs_decim,
+ task_is_rest=cfg.task_is_rest,
+ rest_epochs_duration=cfg.rest_epochs_duration,
+ rest_epochs_overlap=cfg.rest_epochs_overlap,
+ )
+
+ these_epochs.load_data() # Remove reference to raw
+ del raw # free memory
+
+ if epochs is None:
+ epochs = these_epochs
+ else:
+ epochs = mne.concatenate_epochs([epochs, these_epochs], on_mismatch="warn")
+
+ del these_epochs
+ del run
+
+ # Set an EEG reference
+ if "eeg" in cfg.ch_types:
+ projection = True if cfg.eeg_reference == "average" else False
+ epochs.set_eeg_reference(cfg.eeg_reference, projection=projection)
+
+ ar_reject_log = ar_n_interpolate_ = None
+ if cfg.ica_reject == "autoreject_local":
+ msg = (
+ "Using autoreject to find bad epochs for ICA "
+ "(no interpolation will be performend)"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ ar = autoreject.AutoReject(
+ n_interpolate=cfg.autoreject_n_interpolate,
+ random_state=cfg.random_state,
+ n_jobs=exec_params.n_jobs,
+ verbose=False,
+ )
+ ar.fit(epochs)
+ ar_reject_log = ar.get_reject_log(epochs)
+ epochs = epochs[~ar_reject_log.bad_epochs]
+ ar_n_interpolate_ = ar.n_interpolate_
+ msg = (
+ f"autoreject marked {ar_reject_log.bad_epochs.sum()} epochs as bad "
+ f"(cross-validated n_interpolate limit: {ar_n_interpolate_})"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ del ar
+ else:
+ # Reject epochs based on peak-to-peak rejection thresholds
+ ica_reject = _get_reject(
+ subject=subject,
+ session=session,
+ reject=cfg.ica_reject,
+ ch_types=cfg.ch_types,
+ param="ica_reject",
+ )
+ msg = f"Using PTP rejection thresholds: {ica_reject}"
+ logger.info(**gen_log_kwargs(message=msg))
+ epochs.drop_bad(reject=ica_reject)
+ ar = None
+ msg = "Saving ICA epochs to disk."
+ logger.info(**gen_log_kwargs(message=msg))
+ epochs.save(
+ out_files["epochs"],
+ overwrite=True,
+ split_naming="bids",
+ split_size=cfg._epochs_split_size,
+ )
+ _update_for_splits(out_files, "epochs")
+
+ msg = f"Calculating ICA solution using method: {cfg.ica_algorithm}."
+ logger.info(**gen_log_kwargs(message=msg))
+
+ algorithm = cfg.ica_algorithm
+ fit_params = None
+
+ if algorithm == "picard":
+ fit_params = dict(fastica_it=5)
+ elif algorithm == "picard-extended_infomax":
+ algorithm = "picard"
+ fit_params = dict(ortho=False, extended=True)
+ elif algorithm == "extended_infomax":
+ algorithm = "infomax"
+ fit_params = dict(extended=True)
+
+ ica = ICA(
+ method=algorithm,
+ random_state=cfg.random_state,
+ n_components=cfg.ica_n_components,
+ fit_params=fit_params,
+ max_iter=cfg.ica_max_iterations,
+ )
+ ica.fit(epochs, decim=cfg.ica_decim)
+ explained_var = (
+ ica.pca_explained_variance_[: ica.n_components_].sum()
+ / ica.pca_explained_variance_.sum()
+ )
+ msg = (
+ f"Fit {ica.n_components_} components (explaining "
+ f"{round(explained_var * 100, 1)}% of the variance) in "
+ f"{ica.n_iter_} iterations."
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ msg = "Saving ICA solution to disk."
+ logger.info(**gen_log_kwargs(message=msg))
+ ica.save(out_files["ica"], overwrite=True)
+
+ # Start a report
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ fname_report=out_files["report"],
+ name="ICA.fit report",
+ ) as report:
+ report.title = f"ICA – {report.title}"
+ if cfg.ica_reject == "autoreject_local":
+ caption = (
+ f"Autoreject was run to produce cleaner epochs before fitting ICA. "
+ f"{ar_reject_log.bad_epochs.sum()} epochs were rejected because more "
+ f"than {ar_n_interpolate_} channels were bad (cross-validated "
+ f"n_interpolate limit; excluding globally bad and non-data channels, "
+ f"shown in white). Note that none of the blue segments were actually "
+ f"interpolated before submitting the data to ICA. This is following "
+ f"the recommended approach for ICA described in the the Autoreject "
+ f"documentation."
+ )
+ fig = ar_reject_log.plot(
+ orientation="horizontal", aspect="auto", show=False
+ )
+ report.add_figure(
+ fig=fig,
+ title="Epochs: Autoreject cleaning",
+ caption=caption,
+ tags=("ica", "epochs", "autoreject"),
+ replace=True,
+ )
+ plt.close(fig)
+ del caption
+ report.add_epochs(
+ epochs=epochs,
+ title="Epochs used for ICA fitting",
+ drop_log_ignore=(),
+ replace=True,
+ )
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
+def get_config(
+ *,
+ config: SimpleNamespace,
+ subject: str,
+ session: Optional[str] = None,
+) -> SimpleNamespace:
+ cfg = SimpleNamespace(
+ conditions=config.conditions,
+ runs=get_runs(config=config, subject=subject),
+ task_is_rest=config.task_is_rest,
+ ica_l_freq=config.ica_l_freq,
+ ica_algorithm=config.ica_algorithm,
+ ica_n_components=config.ica_n_components,
+ ica_max_iterations=config.ica_max_iterations,
+ ica_decim=config.ica_decim,
+ ica_reject=config.ica_reject,
+ autoreject_n_interpolate=config.autoreject_n_interpolate,
+ random_state=config.random_state,
+ ch_types=config.ch_types,
+ l_freq=config.l_freq,
+ epochs_decim=config.epochs_decim,
+ raw_resample_sfreq=config.raw_resample_sfreq,
+ event_repeated=config.event_repeated,
+ epochs_tmin=config.epochs_tmin,
+ epochs_tmax=config.epochs_tmax,
+ epochs_metadata_tmin=config.epochs_metadata_tmin,
+ epochs_metadata_tmax=config.epochs_metadata_tmax,
+ epochs_metadata_keep_first=config.epochs_metadata_keep_first,
+ epochs_metadata_keep_last=config.epochs_metadata_keep_last,
+ epochs_metadata_query=config.epochs_metadata_query,
+ eeg_reference=get_eeg_reference(config),
+ eog_channels=config.eog_channels,
+ rest_epochs_duration=config.rest_epochs_duration,
+ rest_epochs_overlap=config.rest_epochs_overlap,
+ processing="filt" if config.regress_artifact is None else "regress",
+ _epochs_split_size=config._epochs_split_size,
+ **_bids_kwargs(config=config),
+ )
+ return cfg
+
+
+def main(*, config: SimpleNamespace) -> None:
+ """Run ICA."""
+ if config.spatial_filter != "ica":
+ msg = "Skipping …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
+ return
+
+ with get_parallel_backend(config.exec_params):
+ parallel, run_func = parallel_func(run_ica, exec_params=config.exec_params)
+ logs = parallel(
+ run_func(
+ cfg=get_config(config=config, subject=subject),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ )
+ save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py b/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
new file mode 100644
index 000000000..d85398bfc
--- /dev/null
+++ b/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
@@ -0,0 +1,399 @@
+"""Find ICA artifacts.
+
+This step automatically finds ECG- and EOG-related ICs in your data, and sets them
+as bad components.
+
+To actually remove designated ICA components from your data, you will have to
+run the apply_ica step.
+"""
+
+import shutil
+from types import SimpleNamespace
+from typing import Literal, Optional
+
+import mne
+import numpy as np
+import pandas as pd
+from mne.preprocessing import create_ecg_epochs, create_eog_epochs
+from mne_bids import BIDSPath
+
+from ..._config_utils import (
+ _bids_kwargs,
+ get_eeg_reference,
+ get_runs,
+ get_sessions,
+ get_subjects,
+)
+from ..._logging import gen_log_kwargs, logger
+from ..._parallel import get_parallel_backend, parallel_func
+from ..._report import _open_report
+from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
+
+
+def detect_bad_components(
+ *,
+ cfg,
+ which: Literal["eog", "ecg"],
+ epochs: Optional[mne.BaseEpochs],
+ ica: mne.preprocessing.ICA,
+ ch_names: Optional[list[str]],
+ subject: str,
+ session: Optional[str],
+) -> tuple[list[int], np.ndarray]:
+ artifact = which.upper()
+ if epochs is None:
+ msg = (
+ f"No {artifact} events could be found. "
+ f"Not running {artifact} artifact detection."
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ return [], []
+ msg = f"Performing automated {artifact} artifact detection …"
+ logger.info(**gen_log_kwargs(message=msg))
+
+ if which == "eog":
+ inds, scores = ica.find_bads_eog(
+ epochs,
+ threshold=cfg.ica_eog_threshold,
+ ch_name=ch_names,
+ )
+ else:
+ inds, scores = ica.find_bads_ecg(
+ epochs,
+ method="ctps",
+ threshold=cfg.ica_ctps_ecg_threshold,
+ ch_name=ch_names,
+ )
+
+ if not inds:
+ adjust_setting = (
+ "ica_eog_threshold" if which == "eog" else "ica_ctps_ecg_threshold"
+ )
+ warn = (
+ f"No {artifact}-related ICs detected, this is highly "
+ f"suspicious. A manual check is suggested. You may wish to "
+ f'lower "{adjust_setting}".'
+ )
+ logger.warning(**gen_log_kwargs(message=warn))
+ else:
+ msg = (
+ f"Detected {len(inds)} {artifact}-related ICs in "
+ f"{len(epochs)} {artifact} epochs."
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+
+ return inds, scores
+
+
+def get_input_fnames_find_ica_artifacts(
+ *,
+ cfg: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+) -> dict:
+ bids_basename = BIDSPath(
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ acquisition=cfg.acq,
+ recording=cfg.rec,
+ space=cfg.space,
+ datatype=cfg.datatype,
+ root=cfg.deriv_root,
+ check=False,
+ extension=".fif",
+ )
+ in_files = dict()
+ in_files["epochs"] = bids_basename.copy().update(processing="icafit", suffix="epo")
+ _update_for_splits(in_files, "epochs", single=True)
+ for run in cfg.runs:
+ key = f"raw_run-{run}"
+ in_files[key] = bids_basename.copy().update(
+ run=run, processing=cfg.processing, suffix="raw"
+ )
+ _update_for_splits(in_files, key, single=True)
+ in_files["ica"] = bids_basename.copy().update(processing="icafit", suffix="ica")
+ in_files["report"] = bids_basename.copy().update(
+ processing="icafit", suffix="report", extension=".h5"
+ )
+ return in_files
+
+
+@failsafe_run(
+ get_input_fnames=get_input_fnames_find_ica_artifacts,
+)
+def find_ica_artifacts(
+ *,
+ cfg: SimpleNamespace,
+ exec_params: SimpleNamespace,
+ subject: str,
+ session: Optional[str],
+ in_files: dict,
+) -> dict:
+ """Run ICA."""
+ raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs]
+ bids_basename = raw_fnames[0].copy().update(processing=None, split=None, run=None)
+ out_files = dict()
+ out_files["ica"] = bids_basename.copy().update(processing="ica", suffix="ica")
+ # DO NOT add this to out_files["ica"] because we expect it to be modified by users.
+ # If the modify it and it's in out_files, caching will detect the hash change and
+ # consider *this step* a cache miss, and it will run again, overwriting the user's
+ # changes. Instead, we want the ica.apply step to rerun (which it will if the
+ # file changes).
+ out_files_components = bids_basename.copy().update(
+ processing="ica", suffix="components", extension=".tsv"
+ )
+ out_files["report"] = bids_basename.copy().update(
+ processing="ica+components", suffix="report", extension=".h5"
+ )
+ del bids_basename
+ msg = "Loading ICA solution"
+ logger.info(**gen_log_kwargs(message=msg))
+ ica = mne.preprocessing.read_ica(in_files.pop("ica"))
+
+ # Epochs used for ICA fitting
+ epochs = mne.read_epochs(in_files.pop("epochs"), preload=True)
+
+ # ECG component detection
+ epochs_ecg = None
+ ecg_ics, ecg_scores = [], []
+ for ri, raw_fname in enumerate(raw_fnames):
+ # Have the channels needed to make ECG epochs
+ raw = mne.io.read_raw(raw_fname, preload=False)
+ # ECG epochs
+ if not (
+ "ecg" in raw.get_channel_types()
+ or "meg" in cfg.ch_types
+ or "mag" in cfg.ch_types
+ ):
+ msg = (
+ "No ECG or magnetometer channels are present, cannot "
+ "automate artifact detection for ECG."
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+ break
+ elif ri == 0:
+ msg = "Creating ECG epochs …"
+ logger.info(**gen_log_kwargs(message=msg))
+
+ # We want to extract a total of 5 min of data for ECG epochs generation
+ # (across all runs)
+ total_ecg_dur = 5 * 60
+ ecg_dur_per_run = total_ecg_dur / len(raw_fnames)
+ t_mid = (raw.times[-1] + raw.times[0]) / 2
+ raw = raw.crop(
+ tmin=max(t_mid - 1 / 2 * ecg_dur_per_run, 0),
+ tmax=min(t_mid + 1 / 2 * ecg_dur_per_run, raw.times[-1]),
+ ).load_data()
+
+ these_ecg_epochs = create_ecg_epochs(
+ raw,
+ baseline=(None, -0.2),
+ tmin=-0.5,
+ tmax=0.5,
+ )
+ del raw # Free memory
+ if len(these_ecg_epochs):
+ if epochs.reject is not None:
+ these_ecg_epochs.drop_bad(reject=epochs.reject)
+ if len(these_ecg_epochs):
+ if epochs_ecg is None:
+ epochs_ecg = these_ecg_epochs
+ else:
+ epochs_ecg = mne.concatenate_epochs(
+ [epochs_ecg, these_ecg_epochs], on_mismatch="warn"
+ )
+ del these_ecg_epochs
+ else: # did not break so had usable channels
+ ecg_ics, ecg_scores = detect_bad_components(
+ cfg=cfg,
+ which="ecg",
+ epochs=epochs_ecg,
+ ica=ica,
+ ch_names=None, # we currently don't allow for custom channels
+ subject=subject,
+ session=session,
+ )
+
+ # EOG component detection
+ epochs_eog = None
+ eog_ics = eog_scores = []
+ for ri, raw_fname in enumerate(raw_fnames):
+ raw = mne.io.read_raw_fif(raw_fname, preload=True)
+ if cfg.eog_channels:
+ ch_names = cfg.eog_channels
+ assert all([ch_name in raw.ch_names for ch_name in ch_names])
+ else:
+ eog_picks = mne.pick_types(raw.info, meg=False, eog=True)
+ ch_names = [raw.ch_names[pick] for pick in eog_picks]
+ if not ch_names:
+ msg = "No EOG channel is present, cannot automate IC detection for EOG."
+ logger.info(**gen_log_kwargs(message=msg))
+ break
+ elif ri == 0:
+ msg = "Creating EOG epochs …"
+ logger.info(**gen_log_kwargs(message=msg))
+ these_eog_epochs = create_eog_epochs(
+ raw,
+ ch_name=ch_names,
+ baseline=(None, -0.2),
+ )
+ if len(these_eog_epochs):
+ if epochs.reject is not None:
+ these_eog_epochs.drop_bad(reject=epochs.reject)
+ if len(these_eog_epochs):
+ if epochs_eog is None:
+ epochs_eog = these_eog_epochs
+ else:
+ epochs_eog = mne.concatenate_epochs(
+ [epochs_eog, these_eog_epochs], on_mismatch="warn"
+ )
+ else: # did not break
+ eog_ics, eog_scores = detect_bad_components(
+ cfg=cfg,
+ which="eog",
+ epochs=epochs_eog,
+ ica=ica,
+ ch_names=cfg.eog_channels,
+ subject=subject,
+ session=session,
+ )
+
+ # Save updated ICA to disk.
+ # We also store the automatically identified ECG- and EOG-related ICs.
+ msg = "Saving ICA solution and detected artifacts to disk."
+ logger.info(**gen_log_kwargs(message=msg))
+ ica.exclude = sorted(set(ecg_ics + eog_ics))
+ ica.save(out_files["ica"], overwrite=True)
+
+ # Create TSV.
+ tsv_data = pd.DataFrame(
+ dict(
+ component=list(range(ica.n_components_)),
+ type=["ica"] * ica.n_components_,
+ description=["Independent Component"] * ica.n_components_,
+ status=["good"] * ica.n_components_,
+ status_description=["n/a"] * ica.n_components_,
+ )
+ )
+
+ for component in ecg_ics:
+ row_idx = tsv_data["component"] == component
+ tsv_data.loc[row_idx, "status"] = "bad"
+ tsv_data.loc[row_idx, "status_description"] = "Auto-detected ECG artifact"
+
+ for component in eog_ics:
+ row_idx = tsv_data["component"] == component
+ tsv_data.loc[row_idx, "status"] = "bad"
+ tsv_data.loc[row_idx, "status_description"] = "Auto-detected EOG artifact"
+
+ tsv_data.to_csv(out_files_components, sep="\t", index=False)
+
+ # Lastly, add info about the epochs used for the ICA fit, and plot all ICs
+ # for manual inspection.
+
+ ecg_evoked = None if epochs_ecg is None else epochs_ecg.average()
+ eog_evoked = None if epochs_eog is None else epochs_eog.average()
+ ecg_scores = None if len(ecg_scores) == 0 else ecg_scores
+ eog_scores = None if len(eog_scores) == 0 else eog_scores
+
+ shutil.copyfile(in_files.pop("report"), out_files["report"])
+ with _open_report(
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ task=cfg.task,
+ fname_report=out_files["report"],
+ name="ICA report",
+ ) as report:
+ report.add_ica(
+ ica=ica,
+ title="ICA cleaning",
+ inst=epochs,
+ ecg_evoked=ecg_evoked,
+ eog_evoked=eog_evoked,
+ ecg_scores=ecg_scores,
+ eog_scores=eog_scores,
+ replace=True,
+ n_jobs=1, # avoid automatic parallelization
+ )
+
+ msg = (
+ f"ICA completed. Please carefully review the extracted ICs in the "
+ f"report {out_files['report'].basename}, and mark all components "
+ f"you wish to reject as 'bad' in "
+ f"{out_files_components.basename}"
+ )
+ logger.info(**gen_log_kwargs(message=msg))
+
+ report.save(
+ out_files["report"],
+ overwrite=True,
+ open_browser=exec_params.interactive,
+ )
+
+ assert len(in_files) == 0, in_files.keys()
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
+
+
+def get_config(
+ *,
+ config: SimpleNamespace,
+ subject: str,
+ session: Optional[str] = None,
+) -> SimpleNamespace:
+ cfg = SimpleNamespace(
+ conditions=config.conditions,
+ runs=get_runs(config=config, subject=subject),
+ task_is_rest=config.task_is_rest,
+ ica_l_freq=config.ica_l_freq,
+ ica_reject=config.ica_reject,
+ ica_eog_threshold=config.ica_eog_threshold,
+ ica_ctps_ecg_threshold=config.ica_ctps_ecg_threshold,
+ autoreject_n_interpolate=config.autoreject_n_interpolate,
+ random_state=config.random_state,
+ ch_types=config.ch_types,
+ l_freq=config.l_freq,
+ epochs_decim=config.epochs_decim,
+ raw_resample_sfreq=config.raw_resample_sfreq,
+ event_repeated=config.event_repeated,
+ epochs_tmin=config.epochs_tmin,
+ epochs_tmax=config.epochs_tmax,
+ epochs_metadata_tmin=config.epochs_metadata_tmin,
+ epochs_metadata_tmax=config.epochs_metadata_tmax,
+ epochs_metadata_keep_first=config.epochs_metadata_keep_first,
+ epochs_metadata_keep_last=config.epochs_metadata_keep_last,
+ epochs_metadata_query=config.epochs_metadata_query,
+ eeg_reference=get_eeg_reference(config),
+ eog_channels=config.eog_channels,
+ rest_epochs_duration=config.rest_epochs_duration,
+ rest_epochs_overlap=config.rest_epochs_overlap,
+ processing="filt" if config.regress_artifact is None else "regress",
+ **_bids_kwargs(config=config),
+ )
+ return cfg
+
+
+def main(*, config: SimpleNamespace) -> None:
+ """Run ICA."""
+ if config.spatial_filter != "ica":
+ msg = "Skipping …"
+ logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
+ return
+
+ with get_parallel_backend(config.exec_params):
+ parallel, run_func = parallel_func(
+ find_ica_artifacts, exec_params=config.exec_params
+ )
+ logs = parallel(
+ run_func(
+ cfg=get_config(config=config, subject=subject),
+ exec_params=config.exec_params,
+ subject=subject,
+ session=session,
+ )
+ for subject in get_subjects(config)
+ for session in get_sessions(config)
+ )
+ save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
deleted file mode 100644
index fb6f1b089..000000000
--- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py
+++ /dev/null
@@ -1,641 +0,0 @@
-"""Fit ICA.
-
-This fits Independent Component Analysis (ICA) on raw data filtered with 1 Hz highpass,
-temporarily creating task-related epochs.
-
-Before performing ICA, we reject epochs based on peak-to-peak amplitude above
-the 'ica_reject' to filter massive non-biological artifacts.
-
-To actually remove designated ICA components from your data, you will have to
-run the apply_ica step.
-"""
-
-from collections.abc import Iterable
-from types import SimpleNamespace
-from typing import Literal, Optional
-
-import autoreject
-import mne
-import numpy as np
-import pandas as pd
-from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
-from mne.report import Report
-from mne_bids import BIDSPath
-
-from ..._config_utils import (
- _bids_kwargs,
- get_eeg_reference,
- get_runs,
- get_sessions,
- get_subjects,
-)
-from ..._import_data import annotations_to_events, make_epochs
-from ..._logging import gen_log_kwargs, logger
-from ..._parallel import get_parallel_backend, parallel_func
-from ..._reject import _get_reject
-from ..._report import _agg_backend
-from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
-
-
-def filter_for_ica(
- *,
- cfg,
- raw: mne.io.BaseRaw,
- subject: str,
- session: Optional[str],
- run: Optional[str] = None,
-) -> None:
- """Apply a high-pass filter if needed."""
- if cfg.ica_l_freq is None:
- msg = (
- f"Not applying high-pass filter (data is already filtered, "
- f'cutoff: {raw.info["highpass"]} Hz).'
- )
- logger.info(**gen_log_kwargs(message=msg))
- else:
- msg = f"Applying high-pass filter with {cfg.ica_l_freq} Hz cutoff …"
- logger.info(**gen_log_kwargs(message=msg))
- raw.filter(l_freq=cfg.ica_l_freq, h_freq=None, n_jobs=1)
-
-
-def fit_ica(
- *,
- cfg,
- epochs: mne.BaseEpochs,
- subject: str,
- session: Optional[str],
-) -> mne.preprocessing.ICA:
- algorithm = cfg.ica_algorithm
- fit_params = None
-
- if algorithm == "picard":
- fit_params = dict(fastica_it=5)
- elif algorithm == "picard-extended_infomax":
- algorithm = "picard"
- fit_params = dict(ortho=False, extended=True)
- elif algorithm == "extended_infomax":
- algorithm = "infomax"
- fit_params = dict(extended=True)
-
- ica = ICA(
- method=algorithm,
- random_state=cfg.random_state,
- n_components=cfg.ica_n_components,
- fit_params=fit_params,
- max_iter=cfg.ica_max_iterations,
- )
-
- ica.fit(epochs, decim=cfg.ica_decim)
-
- explained_var = (
- ica.pca_explained_variance_[: ica.n_components_].sum()
- / ica.pca_explained_variance_.sum()
- )
- msg = (
- f"Fit {ica.n_components_} components (explaining "
- f"{round(explained_var * 100, 1)}% of the variance) in "
- f"{ica.n_iter_} iterations."
- )
- logger.info(**gen_log_kwargs(message=msg))
- return ica
-
-
-def make_ecg_epochs(
- *,
- cfg,
- raw_path: BIDSPath,
- subject: str,
- session: Optional[str],
- run: Optional[str] = None,
- n_runs: int,
-) -> Optional[mne.BaseEpochs]:
- # ECG either needs an ecg channel, or avg of the mags (i.e. MEG data)
- raw = mne.io.read_raw(raw_path, preload=False)
-
- if (
- "ecg" in raw.get_channel_types()
- or "meg" in cfg.ch_types
- or "mag" in cfg.ch_types
- ):
- msg = "Creating ECG epochs …"
- logger.info(**gen_log_kwargs(message=msg))
-
- # We want to extract a total of 5 min of data for ECG epochs generation
- # (across all runs)
- total_ecg_dur = 5 * 60
- ecg_dur_per_run = total_ecg_dur / n_runs
- t_mid = (raw.times[-1] + raw.times[0]) / 2
- raw = raw.crop(
- tmin=max(t_mid - 1 / 2 * ecg_dur_per_run, 0),
- tmax=min(t_mid + 1 / 2 * ecg_dur_per_run, raw.times[-1]),
- ).load_data()
-
- ecg_epochs = create_ecg_epochs(raw, baseline=(None, -0.2), tmin=-0.5, tmax=0.5)
- del raw # Free memory
-
- if len(ecg_epochs) == 0:
- msg = "No ECG events could be found. Not running ECG artifact " "detection."
- logger.info(**gen_log_kwargs(message=msg))
- ecg_epochs = None
- else:
- msg = (
- "No ECG or magnetometer channels are present. Cannot "
- "automate artifact detection for ECG"
- )
- logger.info(**gen_log_kwargs(message=msg))
- ecg_epochs = None
-
- return ecg_epochs
-
-
-def make_eog_epochs(
- *,
- raw: mne.io.BaseRaw,
- eog_channels: Optional[Iterable[str]],
- subject: str,
- session: Optional[str],
- run: Optional[str] = None,
-) -> Optional[mne.Epochs]:
- """Create EOG epochs. No rejection thresholds will be applied."""
- if eog_channels:
- ch_names = eog_channels
- assert all([ch_name in raw.ch_names for ch_name in ch_names])
- else:
- ch_idx = mne.pick_types(raw.info, meg=False, eog=True)
- ch_names = [raw.ch_names[i] for i in ch_idx]
- del ch_idx
-
- if ch_names:
- msg = "Creating EOG epochs …"
- logger.info(**gen_log_kwargs(message=msg))
-
- eog_epochs = create_eog_epochs(raw, ch_name=ch_names, baseline=(None, -0.2))
-
- if len(eog_epochs) == 0:
- msg = "No EOG events could be found. Not running EOG artifact " "detection."
- logger.warning(**gen_log_kwargs(message=msg))
- eog_epochs = None
- else:
- msg = "No EOG channel is present. Cannot automate IC detection " "for EOG"
- logger.info(**gen_log_kwargs(message=msg))
- eog_epochs = None
-
- return eog_epochs
-
-
-def detect_bad_components(
- *,
- cfg,
- which: Literal["eog", "ecg"],
- epochs: mne.BaseEpochs,
- ica: mne.preprocessing.ICA,
- ch_names: Optional[list[str]],
- subject: str,
- session: Optional[str],
-) -> tuple[list[int], np.ndarray]:
- artifact = which.upper()
- msg = f"Performing automated {artifact} artifact detection …"
- logger.info(**gen_log_kwargs(message=msg))
-
- if which == "eog":
- inds, scores = ica.find_bads_eog(
- epochs,
- threshold=cfg.ica_eog_threshold,
- ch_name=ch_names,
- )
- else:
- inds, scores = ica.find_bads_ecg(
- epochs,
- method="ctps",
- threshold=cfg.ica_ctps_ecg_threshold,
- ch_name=ch_names,
- )
-
- if not inds:
- adjust_setting = (
- "ica_eog_threshold" if which == "eog" else "ica_ctps_ecg_threshold"
- )
- warn = (
- f"No {artifact}-related ICs detected, this is highly "
- f"suspicious. A manual check is suggested. You may wish to "
- f'lower "{adjust_setting}".'
- )
- logger.warning(**gen_log_kwargs(message=warn))
- else:
- msg = (
- f"Detected {len(inds)} {artifact}-related ICs in "
- f"{len(epochs)} {artifact} epochs."
- )
- logger.info(**gen_log_kwargs(message=msg))
-
- return inds, scores
-
-
-def get_input_fnames_run_ica(
- *,
- cfg: SimpleNamespace,
- subject: str,
- session: Optional[str],
-) -> dict:
- bids_basename = BIDSPath(
- subject=subject,
- session=session,
- task=cfg.task,
- acquisition=cfg.acq,
- recording=cfg.rec,
- space=cfg.space,
- datatype=cfg.datatype,
- root=cfg.deriv_root,
- check=False,
- )
- in_files = dict()
- for run in cfg.runs:
- key = f"raw_run-{run}"
- in_files[key] = bids_basename.copy().update(
- run=run, processing=cfg.processing, suffix="raw"
- )
- _update_for_splits(in_files, key, single=True)
- return in_files
-
-
-@failsafe_run(
- get_input_fnames=get_input_fnames_run_ica,
-)
-def run_ica(
- *,
- cfg: SimpleNamespace,
- exec_params: SimpleNamespace,
- subject: str,
- session: Optional[str],
- in_files: dict,
-) -> dict:
- """Run ICA."""
- raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs]
- bids_basename = raw_fnames[0].copy().update(processing=None, split=None, run=None)
- out_files = dict()
- out_files["ica"] = bids_basename.copy().update(suffix="ica", extension=".fif")
- out_files["components"] = bids_basename.copy().update(
- processing="ica", suffix="components", extension=".tsv"
- )
- out_files["report"] = bids_basename.copy().update(
- processing="ica+components", suffix="report", extension=".html"
- )
- del bids_basename
-
- # Generate a list of raw data paths (i.e., paths of individual runs)
- # we want to create epochs from.
-
- # Generate a unique event name -> event code mapping that can be used
- # across all runs.
- event_name_to_code_map = annotations_to_events(raw_paths=raw_fnames)
-
- # Now, generate epochs from each individual run
- eog_epochs_all_runs = None
- ecg_epochs_all_runs = None
-
- for idx, (run, raw_fname) in enumerate(zip(cfg.runs, raw_fnames)):
- msg = f"Loading filtered raw data from {raw_fname.basename}"
- logger.info(**gen_log_kwargs(message=msg))
-
- # ECG epochs
- ecg_epochs = make_ecg_epochs(
- cfg=cfg,
- raw_path=raw_fname,
- subject=subject,
- session=session,
- run=run,
- n_runs=len(cfg.runs),
- )
- if ecg_epochs is not None:
- if idx == 0:
- ecg_epochs_all_runs = ecg_epochs
- else:
- ecg_epochs_all_runs = mne.concatenate_epochs(
- [ecg_epochs_all_runs, ecg_epochs], on_mismatch="warn"
- )
-
- del ecg_epochs
-
- # EOG epochs
- raw = mne.io.read_raw_fif(raw_fname, preload=True)
- eog_epochs = make_eog_epochs(
- raw=raw,
- eog_channels=cfg.eog_channels,
- subject=subject,
- session=session,
- run=run,
- )
- if eog_epochs is not None:
- if idx == 0:
- eog_epochs_all_runs = eog_epochs
- else:
- eog_epochs_all_runs = mne.concatenate_epochs(
- [eog_epochs_all_runs, eog_epochs], on_mismatch="warn"
- )
-
- del eog_epochs
-
- # Produce high-pass filtered version of the data for ICA.
- # Sanity check – make sure we're using the correct data!
- if cfg.raw_resample_sfreq is not None:
- assert np.allclose(raw.info["sfreq"], cfg.raw_resample_sfreq)
- if cfg.l_freq is not None:
- assert np.allclose(raw.info["highpass"], cfg.l_freq)
-
- filter_for_ica(cfg=cfg, raw=raw, subject=subject, session=session, run=run)
-
- # Only keep the subset of the mapping that applies to the current run
- event_id = event_name_to_code_map.copy()
- for event_name in event_id.copy().keys():
- if event_name not in raw.annotations.description:
- del event_id[event_name]
-
- msg = "Creating task-related epochs …"
- logger.info(**gen_log_kwargs(message=msg))
- epochs = make_epochs(
- subject=subject,
- session=session,
- task=cfg.task,
- conditions=cfg.conditions,
- raw=raw,
- event_id=event_id,
- tmin=cfg.epochs_tmin,
- tmax=cfg.epochs_tmax,
- metadata_tmin=cfg.epochs_metadata_tmin,
- metadata_tmax=cfg.epochs_metadata_tmax,
- metadata_keep_first=cfg.epochs_metadata_keep_first,
- metadata_keep_last=cfg.epochs_metadata_keep_last,
- metadata_query=cfg.epochs_metadata_query,
- event_repeated=cfg.event_repeated,
- epochs_decim=cfg.epochs_decim,
- task_is_rest=cfg.task_is_rest,
- rest_epochs_duration=cfg.rest_epochs_duration,
- rest_epochs_overlap=cfg.rest_epochs_overlap,
- )
-
- epochs.load_data() # Remove reference to raw
- del raw # free memory
-
- if idx == 0:
- epochs_all_runs = epochs
- else:
- epochs_all_runs = mne.concatenate_epochs(
- [epochs_all_runs, epochs], on_mismatch="warn"
- )
-
- del epochs
-
- # Clean up namespace
- epochs = epochs_all_runs
- epochs_ecg = ecg_epochs_all_runs
- epochs_eog = eog_epochs_all_runs
-
- del epochs_all_runs, eog_epochs_all_runs, ecg_epochs_all_runs, run
-
- # Set an EEG reference
- if "eeg" in cfg.ch_types:
- projection = True if cfg.eeg_reference == "average" else False
- epochs.set_eeg_reference(cfg.eeg_reference, projection=projection)
-
- if cfg.ica_reject == "autoreject_local":
- msg = (
- "Using autoreject to find bad epochs before fitting ICA "
- "(no interpolation will be performend)"
- )
- logger.info(**gen_log_kwargs(message=msg))
-
- ar = autoreject.AutoReject(
- n_interpolate=cfg.autoreject_n_interpolate,
- random_state=cfg.random_state,
- n_jobs=exec_params.n_jobs,
- verbose=False,
- )
- ar.fit(epochs)
- reject_log = ar.get_reject_log(epochs)
- epochs = epochs[~reject_log.bad_epochs]
- msg = (
- f"autoreject marked {reject_log.bad_epochs.sum()} epochs as bad "
- f"(cross-validated n_interpolate limit: {ar.n_interpolate_})"
- )
- logger.info(**gen_log_kwargs(message=msg))
- else:
- # Reject epochs based on peak-to-peak rejection thresholds
- ica_reject = _get_reject(
- subject=subject,
- session=session,
- reject=cfg.ica_reject,
- ch_types=cfg.ch_types,
- param="ica_reject",
- )
-
- msg = f"Using PTP rejection thresholds: {ica_reject}"
- logger.info(**gen_log_kwargs(message=msg))
-
- epochs.drop_bad(reject=ica_reject)
- if epochs_eog is not None:
- epochs_eog.drop_bad(reject=ica_reject)
- if epochs_ecg is not None:
- epochs_ecg.drop_bad(reject=ica_reject)
-
- # Now actually perform ICA.
- msg = f"Calculating ICA solution using method: {cfg.ica_algorithm}."
- logger.info(**gen_log_kwargs(message=msg))
- ica = fit_ica(cfg=cfg, epochs=epochs, subject=subject, session=session)
-
- # Start a report
- title = f"ICA – sub-{subject}"
- if session is not None:
- title += f", ses-{session}"
- if cfg.task is not None:
- title += f", task-{cfg.task}"
-
- # ECG and EOG component detection
- if epochs_ecg:
- ecg_ics, ecg_scores = detect_bad_components(
- cfg=cfg,
- which="ecg",
- epochs=epochs_ecg,
- ica=ica,
- ch_names=None, # we currently don't allow for custom channels
- subject=subject,
- session=session,
- )
- else:
- ecg_ics = ecg_scores = []
-
- if epochs_eog:
- eog_ics, eog_scores = detect_bad_components(
- cfg=cfg,
- which="eog",
- epochs=epochs_eog,
- ica=ica,
- ch_names=cfg.eog_channels,
- subject=subject,
- session=session,
- )
- else:
- eog_ics = eog_scores = []
-
- # Save ICA to disk.
- # We also store the automatically identified ECG- and EOG-related ICs.
- msg = "Saving ICA solution and detected artifacts to disk."
- logger.info(**gen_log_kwargs(message=msg))
- ica.exclude = sorted(set(ecg_ics + eog_ics))
- ica.save(out_files["ica"], overwrite=True)
- _update_for_splits(out_files, "ica")
-
- # Create TSV.
- tsv_data = pd.DataFrame(
- dict(
- component=list(range(ica.n_components_)),
- type=["ica"] * ica.n_components_,
- description=["Independent Component"] * ica.n_components_,
- status=["good"] * ica.n_components_,
- status_description=["n/a"] * ica.n_components_,
- )
- )
-
- for component in ecg_ics:
- row_idx = tsv_data["component"] == component
- tsv_data.loc[row_idx, "status"] = "bad"
- tsv_data.loc[row_idx, "status_description"] = "Auto-detected ECG artifact"
-
- for component in eog_ics:
- row_idx = tsv_data["component"] == component
- tsv_data.loc[row_idx, "status"] = "bad"
- tsv_data.loc[row_idx, "status_description"] = "Auto-detected EOG artifact"
-
- tsv_data.to_csv(out_files["components"], sep="\t", index=False)
-
- # Lastly, add info about the epochs used for the ICA fit, and plot all ICs
- # for manual inspection.
- msg = "Adding diagnostic plots for all ICA components to the HTML report …"
- logger.info(**gen_log_kwargs(message=msg))
-
- report = Report(info_fname=epochs, title=title, verbose=False)
- ecg_evoked = None if epochs_ecg is None else epochs_ecg.average()
- eog_evoked = None if epochs_eog is None else epochs_eog.average()
- ecg_scores = None if len(ecg_scores) == 0 else ecg_scores
- eog_scores = None if len(eog_scores) == 0 else eog_scores
-
- with _agg_backend():
- if cfg.ica_reject == "autoreject_local":
- caption = (
- f"Autoreject was run to produce cleaner epochs before fitting ICA. "
- f"{reject_log.bad_epochs.sum()} epochs were rejected because more than "
- f"{ar.n_interpolate_} channels were bad (cross-validated n_interpolate "
- f"limit; excluding globally bad and non-data channels, shown in "
- f"white). Note that none of the blue segments were actually "
- f"interpolated before submitting the data to ICA. This is following "
- f"the recommended approach for ICA described in the the Autoreject "
- f"documentation."
- )
- report.add_figure(
- fig=reject_log.plot(
- orientation="horizontal", aspect="auto", show=False
- ),
- title="Epochs: Autoreject cleaning",
- caption=caption,
- tags=("ica", "epochs", "autoreject"),
- replace=True,
- )
- del caption
-
- report.add_epochs(
- epochs=epochs,
- title="Epochs used for ICA fitting",
- drop_log_ignore=(),
- replace=True,
- )
- report.add_ica(
- ica=ica,
- title="ICA cleaning",
- inst=epochs,
- ecg_evoked=ecg_evoked,
- eog_evoked=eog_evoked,
- ecg_scores=ecg_scores,
- eog_scores=eog_scores,
- replace=True,
- n_jobs=1, # avoid automatic parallelization
- )
-
- msg = (
- f"ICA completed. Please carefully review the extracted ICs in the "
- f"report {out_files['report'].basename}, and mark all components "
- f"you wish to reject as 'bad' in "
- f"{out_files['components'].basename}"
- )
- logger.info(**gen_log_kwargs(message=msg))
-
- report.save(
- out_files["report"],
- overwrite=True,
- open_browser=exec_params.interactive,
- )
-
- assert len(in_files) == 0, in_files.keys()
- return _prep_out_files(exec_params=exec_params, out_files=out_files)
-
-
-def get_config(
- *,
- config: SimpleNamespace,
- subject: str,
- session: Optional[str] = None,
-) -> SimpleNamespace:
- cfg = SimpleNamespace(
- conditions=config.conditions,
- runs=get_runs(config=config, subject=subject),
- task_is_rest=config.task_is_rest,
- ica_l_freq=config.ica_l_freq,
- ica_algorithm=config.ica_algorithm,
- ica_n_components=config.ica_n_components,
- ica_max_iterations=config.ica_max_iterations,
- ica_decim=config.ica_decim,
- ica_reject=config.ica_reject,
- ica_eog_threshold=config.ica_eog_threshold,
- ica_ctps_ecg_threshold=config.ica_ctps_ecg_threshold,
- autoreject_n_interpolate=config.autoreject_n_interpolate,
- random_state=config.random_state,
- ch_types=config.ch_types,
- l_freq=config.l_freq,
- epochs_decim=config.epochs_decim,
- raw_resample_sfreq=config.raw_resample_sfreq,
- event_repeated=config.event_repeated,
- epochs_tmin=config.epochs_tmin,
- epochs_tmax=config.epochs_tmax,
- epochs_metadata_tmin=config.epochs_metadata_tmin,
- epochs_metadata_tmax=config.epochs_metadata_tmax,
- epochs_metadata_keep_first=config.epochs_metadata_keep_first,
- epochs_metadata_keep_last=config.epochs_metadata_keep_last,
- epochs_metadata_query=config.epochs_metadata_query,
- eeg_reference=get_eeg_reference(config),
- eog_channels=config.eog_channels,
- rest_epochs_duration=config.rest_epochs_duration,
- rest_epochs_overlap=config.rest_epochs_overlap,
- processing="filt" if config.regress_artifact is None else "regress",
- **_bids_kwargs(config=config),
- )
- return cfg
-
-
-def main(*, config: SimpleNamespace) -> None:
- """Run ICA."""
- if config.spatial_filter != "ica":
- msg = "Skipping …"
- logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
- return
-
- with get_parallel_backend(config.exec_params):
- parallel, run_func = parallel_func(run_ica, exec_params=config.exec_params)
- logs = parallel(
- run_func(
- cfg=get_config(config=config, subject=subject),
- exec_params=config.exec_params,
- subject=subject,
- session=session,
- )
- for subject in get_subjects(config)
- for session in get_sessions(config)
- )
- save_logs(config=config, logs=logs)
diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index aecab29e4..b64e99f3a 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -1,13 +1,8 @@
"""Apply ICA.
-Blinks and ECG artifacts are automatically detected and the corresponding ICA
-components are removed from the data.
-This relies on the ICAs computed in 04-run_ica.py
-
-!! If you manually add components to remove (config.rejcomps_man),
-make sure you did not re-run the ICA in the meantime. Otherwise (especially if
-the random state was not set, or you used a different machine, the component
-order might differ).
+!! If you manually add components to remove, make sure you did not re-run the ICA in
+the meantime. Otherwise (especially if the random state was not set, or you used a
+different machine) the component order might differ.
"""
from types import SimpleNamespace
@@ -49,7 +44,11 @@ def _ica_paths(
check=False,
)
in_files = dict()
- in_files["ica"] = bids_basename.copy().update(suffix="ica", extension=".fif")
+ in_files["ica"] = bids_basename.copy().update(
+ processing="ica",
+ suffix="ica",
+ extension=".fif",
+ )
in_files["components"] = bids_basename.copy().update(
processing="ica", suffix="components", extension=".tsv"
)
@@ -72,7 +71,15 @@ def get_input_fnames_apply_ica_epochs(
session: Optional[str],
) -> dict:
in_files = _ica_paths(cfg=cfg, subject=subject, session=session)
- in_files["epochs"] = in_files["ica"].copy().update(suffix="epo", extension=".fif")
+ in_files["epochs"] = (
+ in_files["ica"]
+ .copy()
+ .update(
+ suffix="epo",
+ extension=".fif",
+ processing=None,
+ )
+ )
_update_for_splits(in_files, "epochs", single=True)
return in_files
@@ -188,7 +195,11 @@ def apply_ica_epochs(
logger.info(**gen_log_kwargs(message=msg, **kwargs))
if ica.exclude:
with _open_report(
- cfg=cfg, exec_params=exec_params, subject=subject, session=session
+ cfg=cfg,
+ exec_params=exec_params,
+ subject=subject,
+ session=session,
+ name="ICA.apply report",
) as report:
report.add_ica(
ica=ica,
diff --git a/mne_bids_pipeline/steps/preprocessing/__init__.py b/mne_bids_pipeline/steps/preprocessing/__init__.py
index 07d65224a..f9072617c 100644
--- a/mne_bids_pipeline/steps/preprocessing/__init__.py
+++ b/mne_bids_pipeline/steps/preprocessing/__init__.py
@@ -6,7 +6,8 @@
_03_maxfilter,
_04_frequency_filter,
_05_regress_artifact,
- _06a_run_ica,
+ _06a1_fit_ica,
+ _06a2_find_ica_artifacts,
_06b_run_ssp,
_07_make_epochs,
_08a_apply_ica,
@@ -20,7 +21,8 @@
_03_maxfilter,
_04_frequency_filter,
_05_regress_artifact,
- _06a_run_ica,
+ _06a1_fit_ica,
+ _06a2_find_ica_artifacts,
_06b_run_ssp,
_07_make_epochs,
_08a_apply_ica,
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 14fcfc998..0cd21a5a7 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -94,7 +94,7 @@
on_rename_missing_events = "ignore"
parallel_backend = "dask"
-dask_worker_memory_limit = "2G"
+dask_worker_memory_limit = "2.5G"
n_jobs = 4
if task == "N400":
From 525646ab3de33f81cd0bc13dc2a31600c37b1bd3 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 1 Mar 2024 08:26:05 -0500
Subject: [PATCH 074/132] ENH: Document relevant steps (#866)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/hooks.py | 252 ++++++++++++++++++
docs/mkdocs.yml | 2 +
docs/source/settings/gen_settings.py | 12 +
docs/source/v1.6.md.inc | 1 +
.../python/material/_base/attribute.html | 101 +++++++
.../steps/freesurfer/_02_coreg_surfaces.py | 1 -
.../steps/preprocessing/_01_data_quality.py | 1 +
.../steps/sensor/_99_group_average.py | 3 +-
.../steps/source/_04_make_forward.py | 49 ++--
9 files changed, 392 insertions(+), 30 deletions(-)
create mode 100644 docs/templates/python/material/_base/attribute.html
diff --git a/docs/hooks.py b/docs/hooks.py
index 41ece9a61..003247550 100644
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,15 +1,267 @@
+import ast
+import inspect
import logging
+from collections import defaultdict
+from pathlib import Path
from typing import Any
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.structure.files import Files
from mkdocs.structure.pages import Page
+from tqdm import tqdm
+
+from mne_bids_pipeline import _config_utils
logger = logging.getLogger("mkdocs")
config_updated = False
+class _ParseConfigSteps:
+ def __init__(self):
+ self.steps = defaultdict(list)
+ # We don't need to parse the config itself, just the steps
+ no_config = {
+ "freesurfer/_01_recon_all",
+ }
+ ignore_options = {
+ "PIPELINE_NAME",
+ "VERSION",
+ "CODE_URL",
+ }
+ ignore_calls = {
+ # TODO: These are used a lot at the very beginning, so adding them will lead
+ # to long lists. Instead, let's just mention at the top of General that
+ # messing with basic BIDS params will affect almost every step.
+ "_bids_kwargs",
+ "_import_data_kwargs",
+ "get_runs",
+ "get_subjects",
+ "get_sessions",
+ }
+ manual_kws = {
+ "source/_04_make_forward:get_config:t1_bids_path": (
+ "mri_t1_path_generator",
+ ),
+ "source/_04_make_forward:get_config:landmarks_kind": (
+ "mri_landmarks_kind",
+ ),
+ "preprocessing/_01_data_quality:get_config:extra_kwargs": (
+ "mf_cal_fname",
+ "mf_ctc_fname",
+ "mf_head_origin",
+ "find_flat_channels_meg",
+ "find_noisy_channels_meg",
+ ),
+ }
+ # Add a few helper functions
+ for func in (
+ _config_utils.get_eeg_reference,
+ _config_utils.get_all_contrasts,
+ _config_utils.get_decoding_contrasts,
+ _config_utils.get_fs_subject,
+ _config_utils.get_fs_subjects_dir,
+ _config_utils.get_mf_cal_fname,
+ _config_utils.get_mf_ctc_fname,
+ ):
+ this_list = []
+ for attr in ast.walk(ast.parse(inspect.getsource(func))):
+ if not isinstance(attr, ast.Attribute):
+ continue
+ if not (isinstance(attr.value, ast.Name) and attr.value.id == "config"):
+ continue
+ if attr.attr not in this_list:
+ this_list.append(attr.attr)
+ manual_kws[func.__name__] = tuple(this_list)
+
+ for module in tqdm(
+ sum(_config_utils._get_step_modules().values(), tuple()),
+ desc="Generating option->step mapping",
+ ):
+ step = "/".join(module.__name__.split(".")[-2:])
+ found = False # found at least one?
+ # Walk the module file for "get_config*" functions (can be multiple!)
+ for func in ast.walk(ast.parse(Path(module.__file__).read_text("utf-8"))):
+ if not isinstance(func, ast.FunctionDef):
+ continue
+ where = f"{step}:{func.name}"
+ # Also look at config.* args in main(), e.g. config.recreate_bem
+ # and config.recreate_scalp_surface
+ if func.name == "main":
+ for call in ast.walk(func):
+ if not isinstance(call, ast.Call):
+ continue
+ for keyword in call.keywords:
+ if not isinstance(keyword.value, ast.Attribute):
+ continue
+ if keyword.value.value.id != "config":
+ continue
+ if keyword.value.attr in ("exec_params",):
+ continue
+ self._add_step_option(step, keyword.value.attr)
+ # Also look for root-level conditionals like use_maxwell_filter
+ # or spatial_filter
+ for cond in ast.iter_child_nodes(func):
+ # is a conditional
+ if not isinstance(cond, ast.If):
+ continue
+ # has a return statement
+ if not any(isinstance(c, ast.Return) for c in ast.walk(cond)):
+ continue
+ # look at all attributes in the conditional
+ for attr in ast.walk(cond.test):
+ if not isinstance(attr, ast.Attribute):
+ continue
+ if attr.value.id != "config":
+ continue
+ self._add_step_option(step, attr.attr)
+ # Now look at get_config* functions
+ if not func.name.startswith("get_config"):
+ continue
+ found = True
+ for call in ast.walk(func):
+ if not isinstance(call, ast.Call):
+ continue
+ if call.func.id != "SimpleNamespace":
+ continue
+ break
+ else:
+ raise RuntimeError(f"Could not find SimpleNamespace in {func}")
+ assert call.args == []
+ for keyword in call.keywords:
+ if isinstance(keyword.value, ast.Call):
+ key = keyword.value.func.id
+ if key in ignore_calls:
+ continue
+ if key in manual_kws:
+ for option in manual_kws[key]:
+ self._add_step_option(step, option)
+ continue
+ if keyword.value.func.id == "_sanitize_callable":
+ assert len(keyword.value.args) == 1
+ assert isinstance(keyword.value.args[0], ast.Attribute)
+ assert keyword.value.args[0].value.id == "config"
+ self._add_step_option(step, keyword.value.args[0].attr)
+ continue
+ raise RuntimeError(
+ f"{where} cannot handle call {keyword.value.func.id=}"
+ )
+ if isinstance(keyword.value, ast.Name):
+ key = f"{where}:{keyword.value.id}"
+ if key in manual_kws:
+ for option in manual_kws[f"{where}:{keyword.value.id}"]:
+ self._add_step_option(step, option)
+ continue
+ raise RuntimeError(f"{where} cannot handle Name {key=}")
+ if isinstance(keyword.value, ast.IfExp): # conditional
+ if keyword.arg == "processing": # inline conditional for proc
+ continue
+ if not isinstance(keyword.value, ast.Attribute):
+ raise RuntimeError(
+ f"{where} cannot handle type {keyword.value=}"
+ )
+ option = keyword.value.attr
+ if option in ignore_options:
+ continue
+ assert keyword.value.value.id == "config", f"{where} {keyword.value.value.id}" # noqa: E501 # fmt: skip
+ self._add_step_option(step, option)
+ if step in no_config:
+ assert not found, f"Found unexpected get_config* in {step}"
+ else:
+ assert found, f"Could not find get_config* in {step}"
+ # Some don't show up so force them to be empty
+ force_empty = (
+ # Eventually we could deduplicate these with the execution.md list
+ "n_jobs",
+ "parallel_backend",
+ "dask_open_dashboard",
+ "dask_temp_dir",
+ "dask_worker_memory_limit",
+ "log_level",
+ "mne_log_level",
+ "on_error",
+ "memory_location",
+ "memory_file_method",
+ "memory_subdir",
+ "memory_verbose",
+ "config_validation",
+ "interactive",
+ # Plus some BIDS one we don't detect because _bids_kwargs etc. above,
+ # which we could cross-check against the general.md list. A notable
+ # exception is random_state, since this does have more localized effects.
+ "study_name",
+ "bids_root",
+ "deriv_root",
+ "subjects_dir",
+ "sessions",
+ "acq",
+ "proc",
+ "rec",
+ "space",
+ "task",
+ "runs",
+ "exclude_runs",
+ "subjects",
+ "crop_runs",
+ "process_empty_room",
+ "process_rest",
+ "eeg_bipolar_channels",
+ "eeg_reference",
+ "eeg_template_montage",
+ "drop_channels",
+ "reader_extra_params",
+ "read_raw_bids_verbose",
+ "plot_psd_for_runs",
+ "shortest_event",
+ "find_breaks",
+ "min_break_duration",
+ "t_break_annot_start_after_previous_event",
+ "t_break_annot_stop_before_next_event",
+ "rename_events",
+ "on_rename_missing_events",
+ "mf_reference_run", # TODO: Make clearer that this changes a lot
+ "fix_stim_artifact",
+ "stim_artifact_tmin",
+ "stim_artifact_tmax",
+ # And some that we force to be empty because they affect too many things
+ # and what they affect is an incomplete list anyway
+ "exclude_subjects",
+ "ch_types",
+ "task_is_rest",
+ "data_type",
+ )
+ for key in force_empty:
+ self.steps[key] = list()
+ for key, val in self.steps.items():
+ assert len(val) == len(set(val)), f"{key} {val}"
+ self.steps = {k: tuple(v) for k, v in self.steps.items()} # no defaultdict
+
+ def _add_step_option(self, step, option):
+ if step not in self.steps[option]:
+ self.steps[option].append(step)
+
+ def __call__(self, option: str) -> list[str]:
+ return self.steps[option]
+
+
+_parse_config_steps = _ParseConfigSteps()
+
+
+# This hack can be cleaned up once this is resolved:
+# https://github.com/mkdocstrings/mkdocstrings/issues/615#issuecomment-1971568301
+def on_pre_build(config: MkDocsConfig) -> None:
+ """Monkey patch mkdocstrings-python jinja template to have global vars."""
+ import mkdocstrings_handlers.python.handler
+
+ old_update_env = mkdocstrings_handlers.python.handler.PythonHandler.update_env
+
+ def update_env(self, md, config: dict) -> None:
+ old_update_env(self, md=md, config=config)
+ self.env.globals["pipeline_steps"] = _parse_config_steps
+
+ mkdocstrings_handlers.python.handler.PythonHandler.update_env = update_env
+
+
# Ideally there would be a better hook, but it's unclear if context can
# be obtained any earlier
def on_template_context(
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 1e881f1b7..190d0f517 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -139,6 +139,8 @@ plugins:
- "*.inc" # includes
- mkdocstrings:
default_handler: python
+ enable_inventory: true
+ custom_templates: templates
handlers:
python:
paths: # Where to find the packages and modules to import
diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py
index 6f7eaf7d3..021d271d1 100755
--- a/docs/source/settings/gen_settings.py
+++ b/docs/source/settings/gen_settings.py
@@ -79,6 +79,16 @@
"execution": (),
}
+extra_headers = {
+ "general settings": """\
+!!! info
+ Many settings in this section control the pipeline behavior very early in the
+ pipeline. Therefore, for most of them (e.g., `bids_root`) we do not list the
+ steps that directly depend on the setting. The options with drop-down step
+ lists (e.g., `random_state`) have more localized effects.
+"""
+}
+
option_header = """\
::: mne_bids_pipeline._config
options:
@@ -156,6 +166,8 @@ def main():
current_lines += ["---", "tags:"]
current_lines += [f" - {tag}" for tag in section_tags[key]]
current_lines += ["---"]
+ if key in extra_headers:
+ current_lines.extend(["", extra_headers[key]])
continue
if in_header:
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 99d31f48d..790b818d3 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -42,3 +42,4 @@
- Code caching is now tested using GitHub Actions (#836 by @larsoner)
- Steps in the documentation are now automatically parsed into flowcharts (#859 by @larsoner)
- New configuration options are now automatically added to the docs (#863 by @larsoner)
+- Configuration options now have relevant steps listed in the docs (#866 by @larsoner)
diff --git a/docs/templates/python/material/_base/attribute.html b/docs/templates/python/material/_base/attribute.html
new file mode 100644
index 000000000..c06e20391
--- /dev/null
+++ b/docs/templates/python/material/_base/attribute.html
@@ -0,0 +1,101 @@
+{# Modified from https://github.com/mkdocstrings/python/blob/master/src/mkdocstrings_handlers/python/templates/material/_base/attribute.html #}
+
+{{ log.debug("Rendering " + attribute.path) }}
+
+
+{% with obj = attribute, html_id = attribute.path %}
+
+ {% if root %}
+ {% set show_full_path = config.show_root_full_path %}
+ {% set root_members = True %}
+ {% elif root_members %}
+ {% set show_full_path = config.show_root_members_full_path or config.show_object_full_path %}
+ {% set root_members = False %}
+ {% else %}
+ {% set show_full_path = config.show_object_full_path %}
+ {% endif %}
+
+ {% set attribute_name = attribute.path if show_full_path else attribute.name %}
+
+ {% if not root or config.show_root_heading %}
+ {% filter heading(
+ heading_level,
+ role="data" if attribute.parent.kind.value == "module" else "attr",
+ id=html_id,
+ class="doc doc-heading",
+ toc_label=('
'|safe if config.show_symbol_type_toc else '') + attribute.name,
+ ) %}
+
+ {% block heading scoped %}
+ {% if config.show_symbol_type_heading %}
{% endif %}
+ {% if config.separate_signature %}
+ {{ attribute_name }}
+ {% else %}
+ {%+ filter highlight(language="python", inline=True) %}
+ {{ attribute_name }}{% if attribute.annotation %}: {{ attribute.annotation }}{% endif %}
+ {% if attribute.value %} = {{ attribute.value }}{% endif %}
+ {% endfilter %}
+ {% endif %}
+ {% endblock heading %}
+
+ {% block labels scoped %}
+ {% with labels = attribute.labels %}
+ {% include "labels.html" with context %}
+ {% endwith %}
+ {% endblock labels %}
+
+ {% endfilter %}
+
+ {% block signature scoped %}
+ {% if config.separate_signature %}
+ {% filter format_attribute(attribute, config.line_length, crossrefs=config.signature_crossrefs) %}
+ {{ attribute.name }}
+ {% endfilter %}
+ {% endif %}
+ {% endblock signature %}
+
+ {% else %}
+
+ {% if config.show_root_toc_entry %}
+ {% filter heading(heading_level,
+ role="data" if attribute.parent.kind.value == "module" else "attr",
+ id=html_id,
+ toc_label=('
'|safe if config.show_symbol_type_toc else '') + attribute.name,
+ hidden=True,
+ ) %}
+ {% endfilter %}
+ {% endif %}
+ {% set heading_level = heading_level - 1 %}
+ {% endif %}
+
+
+ {% block contents scoped %}
+ {% block docstring scoped %}
+ {% with docstring_sections = attribute.docstring.parsed %}
+ {% include "docstring.html" with context %}
+ {% endwith %}
+ {% endblock docstring %}
+ {% endblock contents %}
+
+ {# START NEW CODE #}
+ {% if pipeline_steps(attribute_name) %}
+ {# https://squidfunk.github.io/mkdocs-material/reference/admonitions/#collapsible-blocks #}
+
+ Pipeline steps using this setting
+
+ The following steps are directly affected by changes to
+ {{ attribute_name }} :
+
+
+ {% for step in pipeline_steps(attribute_name) %}
+ {{ step }}
+ {% endfor %}
+
+
+ {% endif %}
+ {# END NEW CODE #}
+
+
+
+{% endwith %}
+
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index f7613fa68..c9fc0e61c 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -82,7 +82,6 @@ def make_coreg_surfaces(
def get_config(*, config, subject) -> SimpleNamespace:
cfg = SimpleNamespace(
- subject=subject,
fs_subject=get_fs_subject(config, subject),
fs_subjects_dir=get_fs_subjects_dir(config),
)
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 3b64c5659..c12dd6a26 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -287,6 +287,7 @@ def get_config(
) -> SimpleNamespace:
extra_kwargs = dict()
if config.find_noisy_channels_meg or config.find_flat_channels_meg:
+ # If these change, need to update hooks.py in doc build
extra_kwargs["mf_cal_fname"] = get_mf_cal_fname(
config=config,
subject=subject,
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 63e4e6ea2..e84877683 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -938,7 +938,6 @@ def get_config(
*,
config,
) -> SimpleNamespace:
- dtg_decim = config.decoding_time_generalization_decim
cfg = SimpleNamespace(
subjects=get_subjects(config),
task_is_rest=config.task_is_rest,
@@ -952,7 +951,7 @@ def get_config(
decoding_metric=config.decoding_metric,
decoding_n_splits=config.decoding_n_splits,
decoding_time_generalization=config.decoding_time_generalization,
- decoding_time_generalization_decim=dtg_decim,
+ decoding_time_generalization_decim=config.decoding_time_generalization_decim,
decoding_csp=config.decoding_csp,
decoding_csp_freqs=config.decoding_csp_freqs,
decoding_csp_times=config.decoding_csp_times,
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index 28586b742..87bcf6fd9 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -11,7 +11,6 @@
from mne.coreg import Coregistration
from mne_bids import BIDSPath, get_head_mri_trans
-from ..._config_import import _import_config
from ..._config_utils import (
_bids_kwargs,
_get_bem_conductivity,
@@ -75,38 +74,15 @@ def _prepare_trans_subject(
# electrophysiological and MRI sidecar files, and save it to an MNE
# "trans" file in the derivatives folder.
- # TODO: This breaks our encapsulation
- config = _import_config(
- config_path=exec_params.config_path,
- check=False,
- log=False,
- )
- if config.mri_t1_path_generator is None:
- t1_bids_path = None
- else:
- t1_bids_path = BIDSPath(subject=subject, session=session, root=cfg.bids_root)
- t1_bids_path = config.mri_t1_path_generator(t1_bids_path.copy())
- if t1_bids_path.suffix is None:
- t1_bids_path.update(suffix="T1w")
- if t1_bids_path.datatype is None:
- t1_bids_path.update(datatype="anat")
-
- if config.mri_landmarks_kind is None:
- landmarks_kind = None
- else:
- landmarks_kind = config.mri_landmarks_kind(
- BIDSPath(subject=subject, session=session)
- )
-
msg = "Computing head ↔ MRI transform from matched fiducials"
logger.info(**gen_log_kwargs(message=msg))
trans = get_head_mri_trans(
bids_path.copy().update(run=cfg.runs[0], root=cfg.bids_root, extension=None),
- t1_bids_path=t1_bids_path,
+ t1_bids_path=cfg.t1_bids_path,
fs_subject=cfg.fs_subject,
fs_subjects_dir=cfg.fs_subjects_dir,
- kind=landmarks_kind,
+ kind=cfg.landmarks_kind,
)
return trans
@@ -242,7 +218,24 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
+ session: Optional[str],
) -> SimpleNamespace:
+ if config.mri_t1_path_generator is None:
+ t1_bids_path = None
+ else:
+ t1_bids_path = BIDSPath(subject=subject, session=session, root=config.bids_root)
+ t1_bids_path = config.mri_t1_path_generator(t1_bids_path.copy())
+ if t1_bids_path.suffix is None:
+ t1_bids_path.update(suffix="T1w")
+ if t1_bids_path.datatype is None:
+ t1_bids_path.update(datatype="anat")
+ if config.mri_landmarks_kind is None:
+ landmarks_kind = None
+ else:
+ landmarks_kind = config.mri_landmarks_kind(
+ BIDSPath(subject=subject, session=session)
+ )
+
cfg = SimpleNamespace(
runs=get_runs(config=config, subject=subject),
mindist=config.mindist,
@@ -253,6 +246,8 @@ def get_config(
ch_types=config.ch_types,
fs_subject=get_fs_subject(config=config, subject=subject),
fs_subjects_dir=get_fs_subjects_dir(config),
+ t1_bids_path=t1_bids_path,
+ landmarks_kind=landmarks_kind,
**_bids_kwargs(config=config),
)
return cfg
@@ -269,7 +264,7 @@ def main(*, config: SimpleNamespace) -> None:
parallel, run_func = parallel_func(run_forward, exec_params=config.exec_params)
logs = parallel(
run_func(
- cfg=get_config(config=config, subject=subject),
+ cfg=get_config(config=config, subject=subject, session=session),
exec_params=config.exec_params,
subject=subject,
session=session,
From 5a1ef0ed7f663b157c8b26e158e6ee172bdb925a Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 1 Mar 2024 14:28:42 -0500
Subject: [PATCH 075/132] ENH: Doc config in sheet (#869)
---
docs/source/v1.6.md.inc | 3 +-
mne_bids_pipeline/_config_import.py | 29 ++++++++++---
mne_bids_pipeline/_run.py | 66 +++++++++++++----------------
3 files changed, 54 insertions(+), 44 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 790b818d3..8a0dfade4 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -9,8 +9,9 @@
- Added saving of clean raw data in addition to epochs (#840 by @larsoner)
- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner)
- Added [`noise_cov_method`][mne_bids_pipeline._config.noise_cov_method] to allow for the use of methods other than `"shrunk"` for noise covariance estimation (#854 by @larsoner)
-- Added option to pass `image_kwargs` to [`mne.Report.add_epochs`] to allow adjusting e.g. `"vmin"` and `"vmax"` of the epochs image in the report via [`report_add_epochs_image_kwargs`][mne_bids_pipeline._config.report_add_epochs_image_kwargs] (#848 by @SophieHerbst)
+- Added option to pass `image_kwargs` to [`mne.Report.add_epochs`] to allow adjusting e.g. `"vmin"` and `"vmax"` of the epochs image in the report via [`report_add_epochs_image_kwargs`][mne_bids_pipeline._config.report_add_epochs_image_kwargs]. This feature requires MNE-Python 1.7 or newer. (#848 by @SophieHerbst)
- Split ICA fitting and artifact detection into separate steps. This means that now, ICA is split into a total of three consecutive steps: fitting, artifact detection, and the actual data cleaning step ("applying ICA"). This makes it easier to experiment with different settings for artifact detection without needing to re-fit ICA. (#865 by @larsoner)
+- The configuration used for the pipeline is now saved in a separate spreadsheet in the `.xlsx` log file (#869 by @larsoner)
[//]: # (### :warning: Behavior changes)
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index fa8fb6772..0d46f7455 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -28,7 +28,18 @@ def _import_config(
"""Import the default config and the user's config."""
# Get the default
config = _get_default_config()
+ # Public names users generally will have in their config
valid_names = [d for d in dir(config) if not d.startswith("_")]
+ # Names that we will reduce the SimpleConfig to before returning
+ # (see _update_with_user_config)
+ keep_names = [d for d in dir(config) if not d.startswith("__")] + [
+ "config_path",
+ "PIPELINE_NAME",
+ "VERSION",
+ "CODE_URL",
+ "_raw_split_size",
+ "_epochs_split_size",
+ ]
# Update with user config
user_names = _update_with_user_config(
@@ -48,17 +59,21 @@ def _import_config(
config_path=extra_config,
)
extra_exec_params_keys = ("_n_jobs",)
+ keep_names.extend(extra_exec_params_keys)
# Check it
if check:
_check_config(config, config_path)
_check_misspellings_removals(
- config,
valid_names=valid_names,
user_names=user_names,
log=log,
+ config_validation=config.config_validation,
)
+ # Finally, reduce to our actual supported params (all keep_names should be present)
+ config = SimpleNamespace(**{k: getattr(config, k) for k in keep_names})
+
# Take some standard actions
mne.set_log_level(verbose=config.mne_log_level.upper())
@@ -406,11 +421,11 @@ def _pydantic_validate(
def _check_misspellings_removals(
- config: SimpleNamespace,
*,
valid_names: list[str],
user_names: list[str],
log: bool,
+ config_validation: str,
) -> None:
# for each name in the user names, check if it's in the valid names but
# the correct one is not defined
@@ -427,7 +442,7 @@ def _check_misspellings_removals(
"the variable to reduce ambiguity and avoid this message, "
"or set config.config_validation to 'warn' or 'ignore'."
)
- _handle_config_error(this_msg, log, config)
+ _handle_config_error(this_msg, log, config_validation)
if user_name in _REMOVED_NAMES:
new = _REMOVED_NAMES[user_name]["new_name"]
if new not in user_names:
@@ -438,16 +453,16 @@ def _check_misspellings_removals(
f"{msg} this variable has been removed as a valid "
f"config option, {instead}."
)
- _handle_config_error(this_msg, log, config)
+ _handle_config_error(this_msg, log, config_validation)
def _handle_config_error(
msg: str,
log: bool,
- config: SimpleNamespace,
+ config_validation: str,
) -> None:
- if config.config_validation == "raise":
+ if config_validation == "raise":
raise ValueError(msg)
- elif config.config_validation == "warn":
+ elif config_validation == "warn":
if log:
logger.warning(**gen_log_kwargs(message=msg, emoji="🛟"))
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 1f033bc5b..3e4e5da51 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -4,7 +4,6 @@
import functools
import hashlib
import inspect
-import json
import pathlib
import pdb
import sys
@@ -38,14 +37,10 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
get_input_fnames=get_input_fnames,
get_output_fnames=get_output_fnames,
)
- kwargs_copy = copy.deepcopy(kwargs)
t0 = time.time()
- kwargs_copy["cfg"] = json_tricks.dumps(
- kwargs_copy["cfg"], sort_keys=False, indent=4
- )
log_info = pd.concat(
[
- pd.Series(kwargs_copy, dtype=object),
+ pd.Series(kwargs, dtype=object),
pd.Series(index=["time", "success", "error_message"], dtype=object),
]
)
@@ -58,10 +53,10 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
log_info["error_message"] = ""
except Exception as e:
# Only keep what gen_log_kwargs() can handle
- kwargs_copy = {
- k: v
- for k, v in kwargs_copy.items()
- if k in ("subject", "session", "task", "run")
+ kwargs_log = {
+ k: kwargs[k]
+ for k in ("subject", "session", "task", "run")
+ if k in kwargs
}
message = (
f"A critical error occurred. " f"The error message was: {str(e)}"
@@ -88,13 +83,13 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
if _is_testing():
raise
logger.error(
- **gen_log_kwargs(message=message, **kwargs_copy, emoji="❌")
+ **gen_log_kwargs(message=message, **kwargs_log, emoji="❌")
)
sys.exit(1)
elif on_error == "debug":
message += "\n\nStarting post-mortem debugger."
logger.error(
- **gen_log_kwargs(message=message, **kwargs_copy, emoji="🐛")
+ **gen_log_kwargs(message=message, **kwargs_log, emoji="🐛")
)
extype, value, tb = sys.exc_info()
print(tb)
@@ -103,7 +98,7 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
else:
message += "\n\nContinuing pipeline run."
logger.error(
- **gen_log_kwargs(message=message, **kwargs_copy, emoji="🔂")
+ **gen_log_kwargs(message=message, **kwargs_log, emoji="🔂")
)
log_info["time"] = round(time.time() - t0, ndigits=1)
return log_info
@@ -285,29 +280,8 @@ def save_logs(*, config: SimpleNamespace, logs: list[pd.Series]) -> None:
sheet_name = _short_step_path(_get_step_path()).replace("/", "-")
sheet_name = sheet_name[-30:] # shorten due to limit of excel format
- # We need to make the logs more compact to be able to write Excel format
- # (32767 char limit per cell), in particular the "cfg" column has very large
- # cells, so replace the "cfg" column with separated cfg.* columns (still truncated
- # to the 32767 char limit)
- compact_logs = list()
- for log in logs:
- log = log.copy()
- # 1. Remove indentation (e.g., 220814 chars to 54416)
- cfg = json.loads(log["cfg"])
- del log["cfg"]
- assert cfg["__instance_type__"] == ["types", "SimpleNamespace"], cfg[
- "__instance_type__"
- ]
- for key, val in cfg["attributes"].items():
- if isinstance(val, dict) and list(val.keys()) == ["__pathlib__"]:
- val = val["__pathlib__"]
- val = json.dumps(val, separators=(",", ":"))
- if len(val) > 32767:
- val = val[:32765] + " …"
- log[f"cfg.{key}"] = val
- compact_logs.append(log)
- df = pd.DataFrame(compact_logs)
- del logs, compact_logs
+ df = pd.DataFrame(logs)
+ del logs
with FileLock(fname.with_suffix(fname.suffix + ".lock")):
append = fname.exists()
@@ -317,7 +291,27 @@ def save_logs(*, config: SimpleNamespace, logs: list[pd.Series]) -> None:
mode="a" if append else "w",
if_sheet_exists="replace" if append else None,
)
+ assert isinstance(config, SimpleNamespace), type(config)
+ cf_df = dict()
+ for key, val in config.__dict__.items():
+ # We need to be careful about functions, json_tricks does not work with them
+ if inspect.isfunction(val):
+ new_val = ""
+ if func_file := inspect.getfile(val):
+ new_val += f"{func_file}:"
+ if getattr(val, "__qualname__", None):
+ new_val += val.__qualname__
+ val = "custom callable" if not new_val else new_val
+ val = json_tricks.dumps(val, indent=4, sort_keys=False)
+ # 32767 char limit per cell (could split over lines but if something is
+ # this long, you'll probably get the gist from the first 32k chars)
+ if len(val) > 32767:
+ val = val[:32765] + " …"
+ cf_df[key] = val
+ cf_df = pd.DataFrame([cf_df], dtype=object)
with writer:
+ # Config first then the data
+ cf_df.to_excel(writer, sheet_name="config", index=False)
df.to_excel(writer, sheet_name=sheet_name, index=False)
From fbee2d1aeadef9b87217773b7cd0ba665ddf15c1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Sun, 3 Mar 2024 19:40:39 +0100
Subject: [PATCH 076/132] Fix a typo in the readme (#871)
---
README.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index a18948e22..075e1e2b8 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ Structure (BIDS)](https://bids.neuroimaging.io/).
* 👣 Data processing as a sequence of standard processing steps.
* ⏩ Steps are cached to avoid unnecessary recomputation.
* ⏏️ Data can be "ejected" from the pipeline at any stage. No lock-in!
-* ☁️ Runs on your laptop, on a powerful server, or on a high-performance cluster via Dash.
+* ☁️ Runs on your laptop, on a powerful server, or on a high-performance cluster via Dask.
@@ -44,7 +44,7 @@ developed for this publication:
> M. Jas, E. Larson, D. A. Engemann, J. Leppäkangas, S. Taulu, M. Hämäläinen,
> A. Gramfort (2018). A reproducible MEG/EEG group study with the MNE software:
> recommendations, quality assessments, and good practices. Frontiers in
-> neuroscience, 12. https://doi.org/10.3389/fnins.2018.00530
+> neuroscience, 12.
The current iteration is based on BIDS and relies on the extensions to BIDS
for EEG and MEG. See the following two references:
@@ -52,10 +52,10 @@ for EEG and MEG. See the following two references:
> Pernet, C. R., Appelhoff, S., Gorgolewski, K. J., Flandin, G.,
> Phillips, C., Delorme, A., Oostenveld, R. (2019). EEG-BIDS, an extension
> to the brain imaging data structure for electroencephalography. Scientific
-> Data, 6, 103. https://doi.org/10.1038/s41597-019-0104-8
+> Data, 6, 103.
> Niso, G., Gorgolewski, K. J., Bock, E., Brooks, T. L., Flandin, G., Gramfort, A.,
> Henson, R. N., Jas, M., Litvak, V., Moreau, J., Oostenveld, R., Schoffelen, J.,
> Tadel, F., Wexler, J., Baillet, S. (2018). MEG-BIDS, the brain imaging data
> structure extended to magnetoencephalography. Scientific Data, 5, 180110.
-> https://doi.org/10.1038/sdata.2018.110
+>
From 022c59b924c1ccfc672e21212ba6d129dae7e910 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Mon, 4 Mar 2024 17:58:38 +0100
Subject: [PATCH 077/132] Enable stricter linting (#872)
---
.pre-commit-config.yaml | 2 +-
docs/hooks.py | 3 +++
docs/source/changes.md | 2 ++
docs/source/doc-config.py | 2 --
docs/source/examples/gen_examples.py | 2 ++
docs/source/settings/gen_settings.py | 1 +
docs/source/v1.5.md.inc | 2 +-
docs/source/v1.7.md.inc | 23 +++++++++++++++++++
mne_bids_pipeline/_config.py | 13 +++++------
mne_bids_pipeline/_config_import.py | 2 +-
mne_bids_pipeline/_download.py | 1 +
mne_bids_pipeline/_logging.py | 1 +
.../steps/freesurfer/_01_recon_all.py | 1 +
.../steps/freesurfer/_02_coreg_surfaces.py | 1 +
.../tests/configs/config_ERP_CORE.py | 1 +
.../tests/configs/config_ds000247.py | 1 +
.../configs/config_ds000248_FLASH_BEM.py | 1 +
.../tests/configs/config_ds000248_base.py | 3 +++
.../tests/configs/config_ds000248_ica.py | 1 +
.../tests/configs/config_ds001971.py | 1 -
.../tests/configs/config_ds003104.py | 1 +
.../tests/configs/config_ds003392.py | 1 +
.../tests/configs/config_ds004107.py | 1 +
.../tests/configs/config_ds004229.py | 1 +
mne_bids_pipeline/tests/conftest.py | 2 ++
mne_bids_pipeline/tests/datasets.py | 2 ++
mne_bids_pipeline/tests/test_cli.py | 1 +
mne_bids_pipeline/tests/test_documented.py | 1 +
mne_bids_pipeline/tests/test_run.py | 2 ++
mne_bids_pipeline/tests/test_validation.py | 2 ++
mne_bids_pipeline/typing.py | 13 ++++++-----
pyproject.toml | 10 ++------
32 files changed, 74 insertions(+), 27 deletions(-)
delete mode 100644 docs/source/doc-config.py
create mode 100644 docs/source/v1.7.md.inc
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ae9404554..ff6efb603 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.2.2
+ rev: v0.3.0
hooks:
- id: ruff
args: ["--fix"]
diff --git a/docs/hooks.py b/docs/hooks.py
index 003247550..af79eef0d 100644
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,3 +1,5 @@
+"""Custom hooks for MkDocs-Material."""
+
import ast
import inspect
import logging
@@ -298,6 +300,7 @@ def on_page_markdown(
config: MkDocsConfig,
files: Files,
) -> str:
+ """Replace emojis."""
if page.file.name == "index" and page.title == "Home":
for rd, md in _EMOJI_MAP.items():
markdown = markdown.replace(rd, md)
diff --git a/docs/source/changes.md b/docs/source/changes.md
index a64ada4b7..3388f2f40 100644
--- a/docs/source/changes.md
+++ b/docs/source/changes.md
@@ -1,3 +1,5 @@
+{% include-markdown "./v1.7.md.inc" %}
+
{% include-markdown "./v1.6.md.inc" %}
{% include-markdown "./v1.5.md.inc" %}
diff --git a/docs/source/doc-config.py b/docs/source/doc-config.py
deleted file mode 100644
index e3846c3a7..000000000
--- a/docs/source/doc-config.py
+++ /dev/null
@@ -1,2 +0,0 @@
-bids_root = "/tmp"
-ch_types = ["meg"]
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 6e6363519..922c5c569 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+"""Generate documentation pages for our examples gallery."""
+
import contextlib
import logging
import shutil
diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py
index 021d271d1..2b749fc10 100755
--- a/docs/source/settings/gen_settings.py
+++ b/docs/source/settings/gen_settings.py
@@ -116,6 +116,7 @@
def main():
+ """Parse the configuration and generate the markdown documentation."""
print(f"Parsing {config_path} to generate settings .md files.")
# max file-level depth is 2 even though we have 3 subsection levels
levels = [None, None]
diff --git a/docs/source/v1.5.md.inc b/docs/source/v1.5.md.inc
index 0c0d0b93a..6ef152c1e 100644
--- a/docs/source/v1.5.md.inc
+++ b/docs/source/v1.5.md.inc
@@ -22,7 +22,7 @@ All users are encouraged to update.
- Added support for "local" [`autoreject`](https://autoreject.github.io) to remove bad epochs
before submitting the data to ICA fitting. This can be enabled by setting [`ica_reject`][mne_bids_pipeline._config.ica_reject]
to `"autoreject_local"`. (#810, #816 by @hoechenberger)
-- The new setting [`decoding_which_epochs`][mne_bids_pipeline._config.decoding_which_epochs] controls which epochs (e.g., uncleaned, after ICA/SSP, cleaned) shall be used for decoding. (#819 by @hoechenber)
+- The new setting [`decoding_which_epochs`][mne_bids_pipeline._config.decoding_which_epochs] controls which epochs (e.g., uncleaned, after ICA/SSP, cleaned) shall be used for decoding. (#819 by @hoechenberger)
- Website documentation tables can now be sorted (e.g., to find examples that use a specific feature) (#808 by @larsoner)
### :warning: Behavior changes
diff --git a/docs/source/v1.7.md.inc b/docs/source/v1.7.md.inc
new file mode 100644
index 000000000..fb3be1914
--- /dev/null
+++ b/docs/source/v1.7.md.inc
@@ -0,0 +1,23 @@
+## v1.7.0 (unreleased)
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :package: Requirements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :medical_symbol: Code health
+
+- We enabled stricter linting to guarantee a consistently high code quality! (#872 by @hoechenberger)
+
+[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index b8c918301..1d5406410 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1,6 +1,7 @@
# Default settings for data processing and analysis.
-from typing import Annotated, Any, Callable, Literal, Optional, Sequence, Union
+from collections.abc import Sequence
+from typing import Annotated, Any, Callable, Literal, Optional, Union
from annotated_types import Ge, Interval, Len
from mne import Covariance
@@ -381,9 +382,7 @@
```
"""
-analyze_channels: Union[
- Literal["all"], Literal["ch_types"], Sequence["str"]
-] = "ch_types"
+analyze_channels: Union[Literal["all", "ch_types"], Sequence["str"]] = "ch_types"
"""
The names of the channels to analyze during ERP/ERF and time-frequency analysis
steps. For certain paradigms, e.g. EEG ERP research, it is common to constrain
@@ -1525,9 +1524,9 @@
conditions.
"""
-decoding_which_epochs: Literal[
- "uncleaned", "after_ica", "after_ssp", "cleaned"
-] = "cleaned"
+decoding_which_epochs: Literal["uncleaned", "after_ica", "after_ssp", "cleaned"] = (
+ "cleaned"
+)
"""
This setting controls which epochs will be fed into the decoding algorithms.
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 0d46f7455..25eea4b36 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -152,7 +152,7 @@ def _update_config_from_path(
if not key.startswith("_"):
user_names.append(key)
val = getattr(custom_cfg, key)
- logger.debug("Overwriting: %s -> %s" % (key, val))
+ logger.debug(f"Overwriting: {key} -> {val}")
setattr(config, key, val)
return user_names
diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py
index a310f0f67..2a5308868 100644
--- a/mne_bids_pipeline/_download.py
+++ b/mne_bids_pipeline/_download.py
@@ -1,4 +1,5 @@
"""Download test data."""
+
import argparse
from pathlib import Path
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index 931ee393d..2f54757a6 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -1,4 +1,5 @@
"""Logging."""
+
import datetime
import inspect
import logging
diff --git a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
index 0633a9db0..1889867cf 100755
--- a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
+++ b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py
@@ -3,6 +3,7 @@
This will run FreeSurfer's ``recon-all --all`` if necessary.
"""
+
import os
import shutil
import sys
diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
index c9fc0e61c..a76c037ef 100644
--- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
+++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py
@@ -4,6 +4,7 @@
Use FreeSurfer's ``mkheadsurf`` and related utilities to make head surfaces
suitable for coregistration.
"""
+
from pathlib import Path
from types import SimpleNamespace
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 0cd21a5a7..953b19498 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -22,6 +22,7 @@
event-related potential research. *NeuroImage* 225: 117465.
[https://doi.org/10.1016/j.neuroimage.2020.117465](https://doi.org/10.1016/j.neuroimage.2020.117465)
"""
+
import argparse
import sys
diff --git a/mne_bids_pipeline/tests/configs/config_ds000247.py b/mne_bids_pipeline/tests/configs/config_ds000247.py
index 0a321d8fe..395b538ab 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000247.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000247.py
@@ -1,4 +1,5 @@
"""OMEGA Resting State Sample Data."""
+
import numpy as np
study_name = "ds000247"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
index 9b77f36b5..547721753 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
@@ -1,4 +1,5 @@
"""MNE Sample Data: BEM from FLASH images."""
+
study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_FLASH_BEM"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index 9888e1cee..89f39cd8b 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -1,4 +1,5 @@
"""MNE Sample Data: M/EEG combined processing."""
+
import mne
study_name = "ds000248"
@@ -22,6 +23,7 @@
def noise_cov(bp):
+ """Estimate the noise covariance."""
# Use pre-stimulus period as noise source
bp = bp.copy().update(suffix="epo")
if not bp.fpath.exists():
@@ -46,5 +48,6 @@ def noise_cov(bp):
def mri_t1_path_generator(bids_path):
+ """Return the path to a T1 image."""
# don't really do any modifications – just for testing!
return bids_path
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
index ebc0ddc88..e1b090e30 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
@@ -1,4 +1,5 @@
"""MNE Sample Data: ICA."""
+
study_name = 'MNE "sample" dataset'
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_ica"
diff --git a/mne_bids_pipeline/tests/configs/config_ds001971.py b/mne_bids_pipeline/tests/configs/config_ds001971.py
index 7a64f940d..befc0f30e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001971.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001971.py
@@ -3,7 +3,6 @@
See ds001971 on OpenNeuro: https://github.com/OpenNeuroDatasets/ds001971
"""
-
study_name = "ds001971"
bids_root = "~/mne_data/ds001971"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds001971"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003104.py b/mne_bids_pipeline/tests/configs/config_ds003104.py
index 2414371c0..3e4b9e44d 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003104.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003104.py
@@ -1,4 +1,5 @@
"""Somato."""
+
study_name = "MNE-somato-data-anonymized"
bids_root = "~/mne_data/ds003104"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003104"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index d84475cff..b004c4345 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -1,4 +1,5 @@
"""hMT+ Localizer."""
+
study_name = "localizer"
bids_root = "~/mne_data/ds003392"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003392"
diff --git a/mne_bids_pipeline/tests/configs/config_ds004107.py b/mne_bids_pipeline/tests/configs/config_ds004107.py
index 6e0eb1cc6..a46679e54 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004107.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004107.py
@@ -6,6 +6,7 @@
Paving the way for cross-site pooling of magnetoencephalography (MEG) data.
International Congress Series, Volume 1300, Pages 615-618.
"""
+
# This has auditory, median, indx, visual, rest, and emptyroom but let's just
# process the auditory (it's the smallest after rest)
study_name = "ds004107"
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index 355dbcf6d..9cedd6491 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -2,6 +2,7 @@
https://openneuro.org/datasets/ds004229
"""
+
import mne
import numpy as np
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index 64571c4a2..2ac1e9403 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -2,6 +2,7 @@
def pytest_addoption(parser):
+ """Add pytest command line options."""
parser.addoption(
"--download",
action="store_true",
@@ -10,6 +11,7 @@ def pytest_addoption(parser):
def pytest_configure(config):
+ """Add pytest configuration settings."""
# register an additional marker
config.addinivalue_line("markers", "dataset_test: mark that a test runs a dataset")
warning_lines = r"""
diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py
index 95c846bd3..92e365cda 100644
--- a/mne_bids_pipeline/tests/datasets.py
+++ b/mne_bids_pipeline/tests/datasets.py
@@ -5,6 +5,8 @@
# If not supplied below, the effective defaults are listed in comments
class DATASET_OPTIONS_T(TypedDict, total=False):
+ """A container for sources, hash, include and excludes of a dataset."""
+
git: str # ""
openneuro: str # ""
osf: str # ""
diff --git a/mne_bids_pipeline/tests/test_cli.py b/mne_bids_pipeline/tests/test_cli.py
index 45532c3ce..5b8b33b5a 100644
--- a/mne_bids_pipeline/tests/test_cli.py
+++ b/mne_bids_pipeline/tests/test_cli.py
@@ -9,6 +9,7 @@
def test_config_generation(tmp_path, monkeypatch):
+ """Test the generation of a default config file."""
cmd = ["mne_bids_pipeline", "--create-config"]
monkeypatch.setattr(sys, "argv", cmd)
with pytest.raises(SystemExit, match="2"):
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index 906bba3f6..db5bbbd06 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -1,4 +1,5 @@
"""Test that all config values are documented."""
+
import ast
import os
import re
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index 041fef894..cc69c9efc 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -1,4 +1,5 @@
"""Download test data and run a test suite."""
+
import os
import shutil
import sys
@@ -138,6 +139,7 @@ class _TestOptionsT(TypedDict, total=False):
@pytest.fixture()
def dataset_test(request):
+ """Provide a defined context for our dataset tests."""
# There is probably a cleaner way to get this param, but this works for now
capsys = request.getfixturevalue("capsys")
dataset = request.getfixturevalue("dataset")
diff --git a/mne_bids_pipeline/tests/test_validation.py b/mne_bids_pipeline/tests/test_validation.py
index e99bfecf9..c76130cc0 100644
--- a/mne_bids_pipeline/tests/test_validation.py
+++ b/mne_bids_pipeline/tests/test_validation.py
@@ -1,3 +1,5 @@
+"""Test the pipeline configuration import validator."""
+
import pytest
from mne_bids_pipeline._config_import import _import_config
diff --git a/mne_bids_pipeline/typing.py b/mne_bids_pipeline/typing.py
index c52484f15..61f2abdeb 100644
--- a/mne_bids_pipeline/typing.py
+++ b/mne_bids_pipeline/typing.py
@@ -1,4 +1,4 @@
-"""Typing."""
+"""Custom data types for MNE-BIDS-Pipeline."""
import pathlib
import sys
@@ -18,22 +18,22 @@
class ArbitraryContrast(TypedDict):
+ """Statistical contrast with arbitrary weights."""
+
name: str
conditions: list[str]
weights: list[float]
class LogKwargsT(TypedDict):
+ """Container for logger keyword arguments."""
+
msg: str
extra: dict[str, str]
-class ReferenceRunParams(TypedDict):
- montage: mne.channels.DigMontage
- dev_head_t: mne.Transform
-
-
def assert_float_array_like(val):
+ """Convert the input into a NumPy float array."""
# https://docs.pydantic.dev/latest/errors/errors/#custom-errors
# Should raise ValueError or AssertionError... NumPy should do this for us
return np.array(val, dtype="float")
@@ -47,6 +47,7 @@ def assert_float_array_like(val):
def assert_dig_montage(val):
+ """Assert that the input is a DigMontage."""
assert isinstance(val, mne.channels.DigMontage)
return val
diff --git a/pyproject.toml b/pyproject.toml
index 0b5465fdf..e2b222fa6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,7 +34,7 @@ dependencies = [
"jupyter-server-proxy", # to have dask and jupyter working together
"scikit-learn",
"pandas",
- "pyarrow", # from pandas
+ "pyarrow", # from pandas
"seaborn",
"json_tricks",
"pydantic >= 2.0.0",
@@ -118,13 +118,7 @@ junit_family = "xunit2"
select = ["A", "B006", "D", "E", "F", "I", "W", "UP"]
exclude = ["**/freesurfer/contrib", "dist/", "build/"]
ignore = [
- "D100", # Missing docstring in public module
- "D101", # Missing docstring in public class
- "D103", # Missing docstring in public function
- "D104", # Missing docstring in public package
- "D413", # Missing blank line after last section
- "UP031", # Use format specifiers instead of percent format
- "UP035", # Import Iterable from collections.abc
+ "D104", # Missing docstring in public package
]
[tool.ruff.lint.pydocstyle]
From 5afe560f257dfd40c35cbb9bcb940fdcfd9727ca Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 7 Mar 2024 21:04:20 +0100
Subject: [PATCH 078/132] Improve logging during cache invalidation (#877)
---
docs/source/v1.7.md.inc | 6 +++++-
mne_bids_pipeline/_run.py | 4 ++--
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/docs/source/v1.7.md.inc b/docs/source/v1.7.md.inc
index fb3be1914..8af37eb05 100644
--- a/docs/source/v1.7.md.inc
+++ b/docs/source/v1.7.md.inc
@@ -1,6 +1,10 @@
## v1.7.0 (unreleased)
-[//]: # (### :new: New features & enhancements)
+### :new: New features & enhancements
+
+- Improved logging message during cache invalidation: We now print the selected
+ [`memory_file_method`][mne_bids_pipeline._config.memory_file_method] ("hash" or "mtime").
+ Previously, we'd always print "hash". (#876 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 3e4e5da51..73e4c6082 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -230,8 +230,8 @@ def wrapper(*args, **kwargs):
got_hash = hash_(key, fname, kind="out")[1]
if this_hash != got_hash:
msg = (
- f"Output file hash mismatch for {str(fname)}, "
- "will recompute …"
+ f"Output file {self.memory_file_method} mismatch for "
+ f"{str(fname)}, will recompute …"
)
emoji = "🚫"
bad_out_files = True
From b52742e0b1a08deac963f59dafff4ad2d0578a62 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 7 Mar 2024 21:04:41 +0100
Subject: [PATCH 079/132] Update contributing instructions and remove outdated
building instructions (#875)
---
BUILDING.md | 9 ---------
CONTRIBUTING.md | 14 +++++++-------
pyproject.toml | 1 -
3 files changed, 7 insertions(+), 17 deletions(-)
delete mode 100644 BUILDING.md
diff --git a/BUILDING.md b/BUILDING.md
deleted file mode 100644
index 41f1e327d..000000000
--- a/BUILDING.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Building a release
-
-* Tag a new release with `git` if necessary.
-* Create `sdist` distribution:
-
- ```shell
- pip install -q build
- python -m build # will build sdist and wheel
- ```
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3fafbf0d0..cea32e59d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,9 +1,9 @@
-# Overview
+# Contributing to MNE-BIDS-Pipeline
Contributors to MNE-BIDS-Pipeline are expected to follow our
[Code of Conduct](https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md).
-# Installation
+## Installation
First, you need to make sure you have MNE-Python installed and working on your
system. See the [installation instructions](http://martinos.org/mne/stable/install_mne_python.html).
@@ -26,9 +26,9 @@ If you do not know how to use git, download the pipeline as a zip file
Finally, for source analysis you'll also need `FreeSurfer`, follow the
instructions on [their website](https://surfer.nmr.mgh.harvard.edu/).
-# Testing
+## Testing
-## Running the tests, and continuous integration
+### Running the tests, and continuous integration
The tests are run using `pytest`. You can run them by calling
`pytest mne_bids_pipeline` to run
@@ -42,7 +42,7 @@ For every pull request or merge into the `main` branch of the
[CircleCI](https://circleci.com/gh/brainthemind/CogBrainDyn_MEG_Pipeline)
will run tests as defined in `./circleci/config.yml`.
-## Debugging
+### Debugging
To run the test in debugging mode, just pass `--pdb` to the `pytest` call
as usual. This will place you in debugging mode on failure.
@@ -50,9 +50,9 @@ See the
[pdb help](https://docs.python.org/3/library/pdb.html#debugger-commands)
for more commands.
-## Config files
+### Config files
-Nested in the `/tests` directory is a `/configs` directory, which contains
+Nested in the `tests` directory is a `configs` directory, which contains
config files for specific test datasets. For example, the `config_ds001810.py`
file specifies parameters only for the `ds001810` data, which should overwrite
the more general parameters in the main `_config.py` file.
diff --git a/pyproject.toml b/pyproject.toml
index e2b222fa6..ef0db82ec 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -95,7 +95,6 @@ exclude = [
"/docs",
"/docs/source/examples/gen_examples.py", # specify explicitly because its exclusion is negated in .gitignore
"/Makefile",
- "/BUILDING.md",
"/CONTRIBUTING.md",
"ignore_words.txt",
]
From 60e6a54a88b9faa317b6a5b1a87abd5966b5af8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 12 Mar 2024 18:40:30 +0100
Subject: [PATCH 080/132] Fix handling of `analyze_channels` for EEG data,
where some steps (like ERP calculation) would previously fail (#883)
Co-authored-by: Eric Larson
---
docs/source/v1.7.md.inc | 4 +-
mne_bids_pipeline/_config.py | 6 +-
mne_bids_pipeline/_config_utils.py | 31 ++++----
.../steps/sensor/_01_make_evoked.py | 2 +
.../steps/sensor/_06_make_cov.py | 2 +
.../tests/configs/config_ERP_CORE.py | 70 +++++++++++++++++++
6 files changed, 96 insertions(+), 19 deletions(-)
diff --git a/docs/source/v1.7.md.inc b/docs/source/v1.7.md.inc
index 8af37eb05..7f708cc84 100644
--- a/docs/source/v1.7.md.inc
+++ b/docs/source/v1.7.md.inc
@@ -16,7 +16,9 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :bug: Bug fixes)
+### :bug: Bug fixes
+
+- Fixed an error when using [`analyze_channels`][mne_bids_pipeline._config.analyze_channels] with EEG data, where e.g. ERP creation didn't work. (#883 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 1d5406410..93a3b26bb 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -3,7 +3,7 @@
from collections.abc import Sequence
from typing import Annotated, Any, Callable, Literal, Optional, Union
-from annotated_types import Ge, Interval, Len
+from annotated_types import Ge, Interval, Len, MinLen
from mne import Covariance
from mne_bids import BIDSPath
@@ -382,7 +382,9 @@
```
"""
-analyze_channels: Union[Literal["all", "ch_types"], Sequence["str"]] = "ch_types"
+analyze_channels: Union[
+ Literal["all", "ch_types"], Annotated[Sequence["str"], MinLen(1)]
+] = "ch_types"
"""
The names of the channels to analyze during ERP/ERF and time-frequency analysis
steps. For certain paradigms, e.g. EEG ERP research, it is common to constrain
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 784752028..701cd93a3 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -423,22 +423,21 @@ def get_mf_ctc_fname(
def _restrict_analyze_channels(
inst: RawEpochsEvokedT, cfg: SimpleNamespace
) -> RawEpochsEvokedT:
- if cfg.analyze_channels:
- analyze_channels = cfg.analyze_channels
- if cfg.analyze_channels == "ch_types":
- analyze_channels = cfg.ch_types
- inst.apply_proj()
- # We special-case the average reference here to work around a situation
- # where e.g. `analyze_channels` might contain only a single channel:
- # `concatenate_epochs` below will then fail when trying to create /
- # apply the projection. We can avoid this by removing an existing
- # average reference projection here, and applying the average reference
- # directly – without going through a projector.
- elif "eeg" in cfg.ch_types and cfg.eeg_reference == "average":
- inst.set_eeg_reference("average")
- else:
- inst.apply_proj()
- inst.pick(analyze_channels)
+ analyze_channels = cfg.analyze_channels
+ if cfg.analyze_channels == "ch_types":
+ analyze_channels = cfg.ch_types
+ inst.apply_proj()
+ # We special-case the average reference here to work around a situation
+ # where e.g. `analyze_channels` might contain only a single channel:
+ # `concatenate_epochs` below will then fail when trying to create /
+ # apply the projection. We can avoid this by removing an existing
+ # average reference projection here, and applying the average reference
+ # directly – without going through a projector.
+ elif "eeg" in cfg.ch_types and cfg.eeg_reference == "average":
+ inst.set_eeg_reference("average")
+ else:
+ inst.apply_proj()
+ inst.pick(analyze_channels)
return inst
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index 63d1854ae..71835faee 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -11,6 +11,7 @@
_pl,
_restrict_analyze_channels,
get_all_contrasts,
+ get_eeg_reference,
get_sessions,
get_subjects,
)
@@ -172,6 +173,7 @@ def get_config(
contrasts=get_all_contrasts(config),
noise_cov=_sanitize_callable(config.noise_cov),
analyze_channels=config.analyze_channels,
+ eeg_reference=get_eeg_reference(config),
ch_types=config.ch_types,
report_evoked_n_time_points=config.report_evoked_n_time_points,
**_bids_kwargs(config=config),
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 1fb467e0a..075abe472 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -13,6 +13,7 @@
from ..._config_utils import (
_bids_kwargs,
_restrict_analyze_channels,
+ get_eeg_reference,
get_noise_cov_bids_path,
get_sessions,
get_subjects,
@@ -292,6 +293,7 @@ def get_config(
conditions=config.conditions,
contrasts=config.contrasts,
analyze_channels=config.analyze_channels,
+ eeg_reference=get_eeg_reference(config),
noise_cov_method=config.noise_cov_method,
**_bids_kwargs(config=config),
)
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 953b19498..0ae706aa9 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -212,6 +212,41 @@
}
eeg_reference = ["P9", "P10"]
+ # Analyze all EEG channels -- we only specify the channels here for the purpose of
+ # demonstration
+ analyze_channels = [
+ "FP1",
+ "F3",
+ "F7",
+ "FC3",
+ "C3",
+ "C5",
+ "P3",
+ "P7",
+ "P9",
+ "PO7",
+ "PO3",
+ "O1",
+ "Oz",
+ "Pz",
+ "CPz",
+ "FP2",
+ "Fz",
+ "F4",
+ "F8",
+ "FC4",
+ "FCz",
+ "Cz",
+ "C4",
+ "C6",
+ "P4",
+ "P8",
+ "P10",
+ "PO8",
+ "PO4",
+ "O2",
+ ]
+
epochs_tmin = -0.2
epochs_tmax = 0.8
baseline = (None, 0)
@@ -224,6 +259,41 @@
}
eeg_reference = "average"
+ # Analyze all EEG channels -- we only specify the channels here for the purpose of
+ # demonstration
+ analyze_channels = [
+ "FP1",
+ "F3",
+ "F7",
+ "FC3",
+ "C3",
+ "C5",
+ "P3",
+ "P7",
+ "P9",
+ "PO7",
+ "PO3",
+ "O1",
+ "Oz",
+ "Pz",
+ "CPz",
+ "FP2",
+ "Fz",
+ "F4",
+ "F8",
+ "FC4",
+ "FCz",
+ "Cz",
+ "C4",
+ "C6",
+ "P4",
+ "P8",
+ "P10",
+ "PO8",
+ "PO4",
+ "O2",
+ ]
+
ica_n_components = 30 - 1
for i in range(1, 180 + 1):
orig_name = f"stimulus/{i}"
From 0d2b87a1b85aa9de08911dee318f7ffed4310cbf Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Wed, 13 Mar 2024 02:26:15 +0000
Subject: [PATCH 081/132] [pre-commit.ci] pre-commit autoupdate (#885)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ff6efb603..6014fece7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.0
+ rev: v0.3.2
hooks:
- id: ruff
args: ["--fix"]
From f0f0baf69cbaa3d06e96d6b7aa403e6775821d4a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 14 Mar 2024 15:37:41 +0100
Subject: [PATCH 082/132] Remove ancient config parameter `shortest_event`
(#888)
---
docs/hooks.py | 1 -
docs/source/changes.md | 2 ++
docs/source/v1.8.md.inc | 24 ++++++++++++++++++++++++
mne_bids_pipeline/_config.py | 6 ------
4 files changed, 26 insertions(+), 7 deletions(-)
create mode 100644 docs/source/v1.8.md.inc
diff --git a/docs/hooks.py b/docs/hooks.py
index af79eef0d..b1d505916 100644
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -214,7 +214,6 @@ def __init__(self):
"reader_extra_params",
"read_raw_bids_verbose",
"plot_psd_for_runs",
- "shortest_event",
"find_breaks",
"min_break_duration",
"t_break_annot_start_after_previous_event",
diff --git a/docs/source/changes.md b/docs/source/changes.md
index 3388f2f40..6f23f2563 100644
--- a/docs/source/changes.md
+++ b/docs/source/changes.md
@@ -1,3 +1,5 @@
+{% include-markdown "./v1.8.md.inc" %}
+
{% include-markdown "./v1.7.md.inc" %}
{% include-markdown "./v1.6.md.inc" %}
diff --git a/docs/source/v1.8.md.inc b/docs/source/v1.8.md.inc
new file mode 100644
index 000000000..c4e5db937
--- /dev/null
+++ b/docs/source/v1.8.md.inc
@@ -0,0 +1,24 @@
+## v1.8.0 (unreleased)
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :package: Requirements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :medical_symbol: Code health
+
+- We removed the unused setting `shortest_event`. It was a relic of early days of the pipeline
+ and hasn't been in use for a long time. (#888 by @hoechenberger)
+
+[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 93a3b26bb..213d26651 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -439,12 +439,6 @@
to a defined state.
"""
-shortest_event: int = 1
-"""
-Minimum number of samples an event must last. If the
-duration is less than this, an exception will be raised.
-"""
-
# %%
# # Preprocessing
From 5f046fca80b1a702328c5dd6d7f03fa135f62ddf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 14 Mar 2024 16:28:10 +0100
Subject: [PATCH 083/132] Add missing release dates to changelog (#887)
---
docs/source/v1.6.md.inc | 4 +---
docs/source/v1.7.md.inc | 2 +-
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc
index 8a0dfade4..b29871c11 100644
--- a/docs/source/v1.6.md.inc
+++ b/docs/source/v1.6.md.inc
@@ -1,6 +1,4 @@
-[//]: # (Don't forget to add this to changes.md as an include!)
-
-## vX.Y.0 (unreleased)
+## v1.6.0 (2024-03-01)
:new: New features & enhancements
diff --git a/docs/source/v1.7.md.inc b/docs/source/v1.7.md.inc
index 7f708cc84..3db0f1dfd 100644
--- a/docs/source/v1.7.md.inc
+++ b/docs/source/v1.7.md.inc
@@ -1,4 +1,4 @@
-## v1.7.0 (unreleased)
+## v1.7.0 (2024-03-13)
### :new: New features & enhancements
From 04b9facf2b9c2e5cde6b476cb95a20e9a6263e94 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 14 Mar 2024 16:30:40 +0100
Subject: [PATCH 084/132] Explicitly depend on `annotated-types` (#886)
Co-authored-by: Eric Larson
---
docs/source/v1.8.md.inc | 4 +++-
pyproject.toml | 1 +
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/docs/source/v1.8.md.inc b/docs/source/v1.8.md.inc
index c4e5db937..dfce7a2a7 100644
--- a/docs/source/v1.8.md.inc
+++ b/docs/source/v1.8.md.inc
@@ -8,7 +8,9 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :package: Requirements)
+### :package: Requirements
+
+- MNE-BIDS-Pipeline now explicitly depends on `annotated-types` (#886 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/pyproject.toml b/pyproject.toml
index ef0db82ec..7c4928f35 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,6 +38,7 @@ dependencies = [
"seaborn",
"json_tricks",
"pydantic >= 2.0.0",
+ "annotated-types",
"rich",
"python-picard",
"qtpy",
From 1c2a081997922870494797eead449f65df8b22c5 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 14 Mar 2024 12:13:59 -0400
Subject: [PATCH 085/132] MAINT: Check that all options are used (#889)
---
docs/hooks.py | 235 +---------------
docs/source/v1.8.md.inc | 4 +-
mne_bids_pipeline/_config.py | 11 -
mne_bids_pipeline/_docs.py | 260 ++++++++++++++++++
.../tests/configs/config_ERP_CORE.py | 1 -
.../configs/config_MNE_phantom_KIT_data.py | 1 -
.../tests/configs/config_ds000117.py | 1 -
.../tests/configs/config_ds000246.py | 1 -
.../tests/configs/config_ds000247.py | 5 +-
.../configs/config_ds000248_FLASH_BEM.py | 1 -
.../tests/configs/config_ds000248_T1_BEM.py | 1 -
.../tests/configs/config_ds000248_base.py | 1 -
.../configs/config_ds000248_coreg_surfaces.py | 1 -
.../tests/configs/config_ds000248_ica.py | 1 -
.../tests/configs/config_ds000248_no_mri.py | 1 -
.../tests/configs/config_ds001810.py | 1 -
.../tests/configs/config_ds001971.py | 1 -
.../tests/configs/config_ds003104.py | 1 -
.../tests/configs/config_ds003392.py | 1 -
.../tests/configs/config_ds003775.py | 1 -
.../tests/configs/config_ds004107.py | 5 +-
.../tests/configs/config_ds004229.py | 1 -
.../configs/config_eeg_matchingpennies.py | 1 -
mne_bids_pipeline/tests/test_documented.py | 18 ++
24 files changed, 285 insertions(+), 270 deletions(-)
create mode 100644 mne_bids_pipeline/_docs.py
diff --git a/docs/hooks.py b/docs/hooks.py
index b1d505916..81213a9c2 100644
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,250 +1,17 @@
"""Custom hooks for MkDocs-Material."""
-import ast
-import inspect
import logging
-from collections import defaultdict
-from pathlib import Path
from typing import Any
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.structure.files import Files
from mkdocs.structure.pages import Page
-from tqdm import tqdm
-from mne_bids_pipeline import _config_utils
+from mne_bids_pipeline._docs import _ParseConfigSteps
logger = logging.getLogger("mkdocs")
config_updated = False
-
-
-class _ParseConfigSteps:
- def __init__(self):
- self.steps = defaultdict(list)
- # We don't need to parse the config itself, just the steps
- no_config = {
- "freesurfer/_01_recon_all",
- }
- ignore_options = {
- "PIPELINE_NAME",
- "VERSION",
- "CODE_URL",
- }
- ignore_calls = {
- # TODO: These are used a lot at the very beginning, so adding them will lead
- # to long lists. Instead, let's just mention at the top of General that
- # messing with basic BIDS params will affect almost every step.
- "_bids_kwargs",
- "_import_data_kwargs",
- "get_runs",
- "get_subjects",
- "get_sessions",
- }
- manual_kws = {
- "source/_04_make_forward:get_config:t1_bids_path": (
- "mri_t1_path_generator",
- ),
- "source/_04_make_forward:get_config:landmarks_kind": (
- "mri_landmarks_kind",
- ),
- "preprocessing/_01_data_quality:get_config:extra_kwargs": (
- "mf_cal_fname",
- "mf_ctc_fname",
- "mf_head_origin",
- "find_flat_channels_meg",
- "find_noisy_channels_meg",
- ),
- }
- # Add a few helper functions
- for func in (
- _config_utils.get_eeg_reference,
- _config_utils.get_all_contrasts,
- _config_utils.get_decoding_contrasts,
- _config_utils.get_fs_subject,
- _config_utils.get_fs_subjects_dir,
- _config_utils.get_mf_cal_fname,
- _config_utils.get_mf_ctc_fname,
- ):
- this_list = []
- for attr in ast.walk(ast.parse(inspect.getsource(func))):
- if not isinstance(attr, ast.Attribute):
- continue
- if not (isinstance(attr.value, ast.Name) and attr.value.id == "config"):
- continue
- if attr.attr not in this_list:
- this_list.append(attr.attr)
- manual_kws[func.__name__] = tuple(this_list)
-
- for module in tqdm(
- sum(_config_utils._get_step_modules().values(), tuple()),
- desc="Generating option->step mapping",
- ):
- step = "/".join(module.__name__.split(".")[-2:])
- found = False # found at least one?
- # Walk the module file for "get_config*" functions (can be multiple!)
- for func in ast.walk(ast.parse(Path(module.__file__).read_text("utf-8"))):
- if not isinstance(func, ast.FunctionDef):
- continue
- where = f"{step}:{func.name}"
- # Also look at config.* args in main(), e.g. config.recreate_bem
- # and config.recreate_scalp_surface
- if func.name == "main":
- for call in ast.walk(func):
- if not isinstance(call, ast.Call):
- continue
- for keyword in call.keywords:
- if not isinstance(keyword.value, ast.Attribute):
- continue
- if keyword.value.value.id != "config":
- continue
- if keyword.value.attr in ("exec_params",):
- continue
- self._add_step_option(step, keyword.value.attr)
- # Also look for root-level conditionals like use_maxwell_filter
- # or spatial_filter
- for cond in ast.iter_child_nodes(func):
- # is a conditional
- if not isinstance(cond, ast.If):
- continue
- # has a return statement
- if not any(isinstance(c, ast.Return) for c in ast.walk(cond)):
- continue
- # look at all attributes in the conditional
- for attr in ast.walk(cond.test):
- if not isinstance(attr, ast.Attribute):
- continue
- if attr.value.id != "config":
- continue
- self._add_step_option(step, attr.attr)
- # Now look at get_config* functions
- if not func.name.startswith("get_config"):
- continue
- found = True
- for call in ast.walk(func):
- if not isinstance(call, ast.Call):
- continue
- if call.func.id != "SimpleNamespace":
- continue
- break
- else:
- raise RuntimeError(f"Could not find SimpleNamespace in {func}")
- assert call.args == []
- for keyword in call.keywords:
- if isinstance(keyword.value, ast.Call):
- key = keyword.value.func.id
- if key in ignore_calls:
- continue
- if key in manual_kws:
- for option in manual_kws[key]:
- self._add_step_option(step, option)
- continue
- if keyword.value.func.id == "_sanitize_callable":
- assert len(keyword.value.args) == 1
- assert isinstance(keyword.value.args[0], ast.Attribute)
- assert keyword.value.args[0].value.id == "config"
- self._add_step_option(step, keyword.value.args[0].attr)
- continue
- raise RuntimeError(
- f"{where} cannot handle call {keyword.value.func.id=}"
- )
- if isinstance(keyword.value, ast.Name):
- key = f"{where}:{keyword.value.id}"
- if key in manual_kws:
- for option in manual_kws[f"{where}:{keyword.value.id}"]:
- self._add_step_option(step, option)
- continue
- raise RuntimeError(f"{where} cannot handle Name {key=}")
- if isinstance(keyword.value, ast.IfExp): # conditional
- if keyword.arg == "processing": # inline conditional for proc
- continue
- if not isinstance(keyword.value, ast.Attribute):
- raise RuntimeError(
- f"{where} cannot handle type {keyword.value=}"
- )
- option = keyword.value.attr
- if option in ignore_options:
- continue
- assert keyword.value.value.id == "config", f"{where} {keyword.value.value.id}" # noqa: E501 # fmt: skip
- self._add_step_option(step, option)
- if step in no_config:
- assert not found, f"Found unexpected get_config* in {step}"
- else:
- assert found, f"Could not find get_config* in {step}"
- # Some don't show up so force them to be empty
- force_empty = (
- # Eventually we could deduplicate these with the execution.md list
- "n_jobs",
- "parallel_backend",
- "dask_open_dashboard",
- "dask_temp_dir",
- "dask_worker_memory_limit",
- "log_level",
- "mne_log_level",
- "on_error",
- "memory_location",
- "memory_file_method",
- "memory_subdir",
- "memory_verbose",
- "config_validation",
- "interactive",
- # Plus some BIDS one we don't detect because _bids_kwargs etc. above,
- # which we could cross-check against the general.md list. A notable
- # exception is random_state, since this does have more localized effects.
- "study_name",
- "bids_root",
- "deriv_root",
- "subjects_dir",
- "sessions",
- "acq",
- "proc",
- "rec",
- "space",
- "task",
- "runs",
- "exclude_runs",
- "subjects",
- "crop_runs",
- "process_empty_room",
- "process_rest",
- "eeg_bipolar_channels",
- "eeg_reference",
- "eeg_template_montage",
- "drop_channels",
- "reader_extra_params",
- "read_raw_bids_verbose",
- "plot_psd_for_runs",
- "find_breaks",
- "min_break_duration",
- "t_break_annot_start_after_previous_event",
- "t_break_annot_stop_before_next_event",
- "rename_events",
- "on_rename_missing_events",
- "mf_reference_run", # TODO: Make clearer that this changes a lot
- "fix_stim_artifact",
- "stim_artifact_tmin",
- "stim_artifact_tmax",
- # And some that we force to be empty because they affect too many things
- # and what they affect is an incomplete list anyway
- "exclude_subjects",
- "ch_types",
- "task_is_rest",
- "data_type",
- )
- for key in force_empty:
- self.steps[key] = list()
- for key, val in self.steps.items():
- assert len(val) == len(set(val)), f"{key} {val}"
- self.steps = {k: tuple(v) for k, v in self.steps.items()} # no defaultdict
-
- def _add_step_option(self, step, option):
- if step not in self.steps[option]:
- self.steps[option].append(step)
-
- def __call__(self, option: str) -> list[str]:
- return self.steps[option]
-
-
_parse_config_steps = _ParseConfigSteps()
diff --git a/docs/source/v1.8.md.inc b/docs/source/v1.8.md.inc
index dfce7a2a7..f9f583fb9 100644
--- a/docs/source/v1.8.md.inc
+++ b/docs/source/v1.8.md.inc
@@ -20,7 +20,7 @@
### :medical_symbol: Code health
-- We removed the unused setting `shortest_event`. It was a relic of early days of the pipeline
- and hasn't been in use for a long time. (#888 by @hoechenberger)
+- We removed the unused settings `shortest_event` and `study_name`. They were relics of early days of the pipeline
+ and haven't been in use for a long time. (#888, #889 by @hoechenberger and @larsoner)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 213d26651..07c41ece7 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -17,17 +17,6 @@
# %%
# # General settings
-study_name: str = ""
-"""
-Specify the name of your study. It will be used to populate filenames for
-saving the analysis results.
-
-???+ example "Example"
- ```python
- study_name = 'my-study'
- ```
-"""
-
bids_root: Optional[PathLike] = None
"""
Specify the BIDS root directory. Pass an empty string or ```None` to use
diff --git a/mne_bids_pipeline/_docs.py b/mne_bids_pipeline/_docs.py
new file mode 100644
index 000000000..440b34690
--- /dev/null
+++ b/mne_bids_pipeline/_docs.py
@@ -0,0 +1,260 @@
+import ast
+import inspect
+import re
+from collections import defaultdict
+from pathlib import Path
+
+from tqdm import tqdm
+
+from . import _config_utils, _import_data
+
+_CONFIG_RE = re.compile(r"config\.([a-zA-Z_]+)")
+
+_NO_CONFIG = {
+ "freesurfer/_01_recon_all",
+}
+_IGNORE_OPTIONS = {
+ "PIPELINE_NAME",
+ "VERSION",
+ "CODE_URL",
+}
+# We don't need to parse the config itself, just the steps
+_MANUAL_KWS = {
+ "source/_04_make_forward:get_config:t1_bids_path": ("mri_t1_path_generator",),
+ "source/_04_make_forward:get_config:landmarks_kind": ("mri_landmarks_kind",),
+ "preprocessing/_01_data_quality:get_config:extra_kwargs": (
+ "mf_cal_fname",
+ "mf_ctc_fname",
+ "mf_head_origin",
+ "find_flat_channels_meg",
+ "find_noisy_channels_meg",
+ ),
+}
+# Some don't show up so force them to be empty
+_EXECUTION_OPTIONS = (
+ # Eventually we could deduplicate these with the execution.md list
+ "n_jobs",
+ "parallel_backend",
+ "dask_open_dashboard",
+ "dask_temp_dir",
+ "dask_worker_memory_limit",
+ "log_level",
+ "mne_log_level",
+ "on_error",
+ "memory_location",
+ "memory_file_method",
+ "memory_subdir",
+ "memory_verbose",
+ "config_validation",
+ "interactive",
+)
+_FORCE_EMPTY = _EXECUTION_OPTIONS + (
+ # Plus some BIDS one we don't detect because _bids_kwargs etc. above,
+ # which we could cross-check against the general.md list. A notable
+ # exception is random_state, since this does have more localized effects.
+ # These are used a lot at the very beginning, so adding them will lead
+ # to long lists. Instead, let's just mention at the top of General that
+ # messing with basic BIDS params will affect almost every step.
+ "bids_root",
+ "deriv_root",
+ "subjects_dir",
+ "sessions",
+ "acq",
+ "proc",
+ "rec",
+ "space",
+ "task",
+ "runs",
+ "exclude_runs",
+ "subjects",
+ "crop_runs",
+ "process_empty_room",
+ "process_rest",
+ "eeg_bipolar_channels",
+ "eeg_reference",
+ "eeg_template_montage",
+ "drop_channels",
+ "reader_extra_params",
+ "read_raw_bids_verbose",
+ "plot_psd_for_runs",
+ "shortest_event",
+ "find_breaks",
+ "min_break_duration",
+ "t_break_annot_start_after_previous_event",
+ "t_break_annot_stop_before_next_event",
+ "rename_events",
+ "on_rename_missing_events",
+ "mf_reference_run", # TODO: Make clearer that this changes a lot
+ "fix_stim_artifact",
+ "stim_artifact_tmin",
+ "stim_artifact_tmax",
+ # And some that we force to be empty because they affect too many things
+ # and what they affect is an incomplete list anyway
+ "exclude_subjects",
+ "ch_types",
+ "task_is_rest",
+ "data_type",
+)
+# Eventually we could parse AST to get these, but this is simple enough
+_EXTRA_FUNCS = {
+ "_bids_kwargs": ("get_task",),
+ "_import_data_kwargs": ("get_mf_reference_run",),
+ "get_runs": ("get_runs_all_subjects",),
+}
+
+
+class _ParseConfigSteps:
+ def __init__(self, force_empty=None):
+ self._force_empty = _FORCE_EMPTY if force_empty is None else force_empty
+ self.steps = defaultdict(list)
+ # Add a few helper functions
+ for func in (
+ _config_utils.get_eeg_reference,
+ _config_utils.get_all_contrasts,
+ _config_utils.get_decoding_contrasts,
+ _config_utils.get_fs_subject,
+ _config_utils.get_fs_subjects_dir,
+ _config_utils.get_mf_cal_fname,
+ _config_utils.get_mf_ctc_fname,
+ ):
+ this_list = []
+ for attr in ast.walk(ast.parse(inspect.getsource(func))):
+ if not isinstance(attr, ast.Attribute):
+ continue
+ if not (isinstance(attr.value, ast.Name) and attr.value.id == "config"):
+ continue
+ if attr.attr not in this_list:
+ this_list.append(attr.attr)
+ _MANUAL_KWS[func.__name__] = tuple(this_list)
+
+ for module in tqdm(
+ sum(_config_utils._get_step_modules().values(), tuple()),
+ desc="Generating option->step mapping",
+ ):
+ step = "/".join(module.__name__.split(".")[-2:])
+ found = False # found at least one?
+ # Walk the module file for "get_config*" functions (can be multiple!)
+ for func in ast.walk(ast.parse(Path(module.__file__).read_text("utf-8"))):
+ if not isinstance(func, ast.FunctionDef):
+ continue
+ where = f"{step}:{func.name}"
+ # Also look at config.* args in main(), e.g. config.recreate_bem
+ # and config.recreate_scalp_surface
+ if func.name == "main":
+ for call in ast.walk(func):
+ if not isinstance(call, ast.Call):
+ continue
+ for keyword in call.keywords:
+ if not isinstance(keyword.value, ast.Attribute):
+ continue
+ if keyword.value.value.id != "config":
+ continue
+ if keyword.value.attr in ("exec_params",):
+ continue
+ self._add_step_option(step, keyword.value.attr)
+ # Also look for root-level conditionals like use_maxwell_filter
+ # or spatial_filter
+ for cond in ast.iter_child_nodes(func):
+ # is a conditional
+ if not isinstance(cond, ast.If):
+ continue
+ # has a return statement
+ if not any(isinstance(c, ast.Return) for c in ast.walk(cond)):
+ continue
+ # look at all attributes in the conditional
+ for attr in ast.walk(cond.test):
+ if not isinstance(attr, ast.Attribute):
+ continue
+ if attr.value.id != "config":
+ continue
+ self._add_step_option(step, attr.attr)
+ # Now look at get_config* functions
+ if not func.name.startswith("get_config"):
+ continue
+ found = True
+ for call in ast.walk(func):
+ if not isinstance(call, ast.Call):
+ continue
+ if call.func.id != "SimpleNamespace":
+ continue
+ break
+ else:
+ raise RuntimeError(f"Could not find SimpleNamespace in {func}")
+ assert call.args == []
+ for keyword in call.keywords:
+ if isinstance(keyword.value, ast.Call):
+ key = keyword.value.func.id
+ if key in _MANUAL_KWS:
+ for option in _MANUAL_KWS[key]:
+ self._add_step_option(step, option)
+ continue
+ if keyword.value.func.id == "_sanitize_callable":
+ assert len(keyword.value.args) == 1
+ assert isinstance(keyword.value.args[0], ast.Attribute)
+ assert keyword.value.args[0].value.id == "config"
+ self._add_step_option(step, keyword.value.args[0].attr)
+ continue
+ if key not in (
+ "_bids_kwargs",
+ "_import_data_kwargs",
+ "get_runs",
+ "get_subjects",
+ "get_sessions",
+ ):
+ raise RuntimeError(
+ f"{where} cannot handle call {keyword.value.func.id=} "
+ f"for {key}"
+ )
+ # Get the source and regex for config values
+ if key == "_import_data_kwargs":
+ funcs = [getattr(_import_data, key)]
+ else:
+ funcs = [getattr(_config_utils, key)]
+ for func_name in _EXTRA_FUNCS.get(key, ()):
+ funcs.append(getattr(_config_utils, func_name))
+ for fi, func in enumerate(funcs):
+ source = inspect.getsource(func)
+ assert "config: SimpleNamespace" in source, key
+ if fi == 0:
+ for func_name in _EXTRA_FUNCS.get(key, ()):
+ assert f"{func_name}(" in source, (key, func_name)
+ attrs = _CONFIG_RE.findall(source)
+ assert len(attrs), f"No config.* found in source of {key}"
+ for attr in attrs:
+ self._add_step_option(step, attr)
+ continue
+ if isinstance(keyword.value, ast.Name):
+ key = f"{where}:{keyword.value.id}"
+ if key in _MANUAL_KWS:
+ for option in _MANUAL_KWS[f"{where}:{keyword.value.id}"]:
+ self._add_step_option(step, option)
+ continue
+ raise RuntimeError(f"{where} cannot handle Name {key=}")
+ if isinstance(keyword.value, ast.IfExp): # conditional
+ if keyword.arg == "processing": # inline conditional for proc
+ continue
+ if not isinstance(keyword.value, ast.Attribute):
+ raise RuntimeError(
+ f"{where} cannot handle type {keyword.value=}"
+ )
+ option = keyword.value.attr
+ if option in _IGNORE_OPTIONS:
+ continue
+ assert keyword.value.value.id == "config", f"{where} {keyword.value.value.id}" # noqa: E501 # fmt: skip
+ self._add_step_option(step, option)
+ if step in _NO_CONFIG:
+ assert not found, f"Found unexpected get_config* in {step}"
+ else:
+ assert found, f"Could not find get_config* in {step}"
+ for key in self._force_empty:
+ self.steps[key] = list()
+ for key, val in self.steps.items():
+ assert len(val) == len(set(val)), f"{key} {val}"
+ self.steps = {k: tuple(v) for k, v in self.steps.items()} # no defaultdict
+
+ def _add_step_option(self, step, option):
+ if step not in self.steps[option]:
+ self.steps[option].append(step)
+
+ def __call__(self, option: str) -> list[str]:
+ return self.steps[option]
diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
index 0ae706aa9..34a534791 100644
--- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
+++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py
@@ -28,7 +28,6 @@
import mne
-study_name = "ERP-CORE"
bids_root = "~/mne_data/ERP_CORE"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ERP_CORE"
diff --git a/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
index ef3347a53..49689bd3e 100644
--- a/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
+++ b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py
@@ -4,7 +4,6 @@
https://mne.tools/dev/documentation/datasets.html#kit-phantom-dataset
"""
-study_name = "MNE-phantom-KIT-data"
bids_root = "~/mne_data/MNE-phantom-KIT-data"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/MNE-phantom-KIT-data"
task = "phantom"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000117.py b/mne_bids_pipeline/tests/configs/config_ds000117.py
index 65e213e24..14fd77499 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000117.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000117.py
@@ -1,6 +1,5 @@
"""Faces dataset."""
-study_name = "ds000117"
bids_root = "~/mne_data/ds000117"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000117"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000246.py b/mne_bids_pipeline/tests/configs/config_ds000246.py
index 0c516796d..d1a9610d4 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000246.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000246.py
@@ -4,7 +4,6 @@
information.
"""
-study_name = "ds000246"
bids_root = "~/mne_data/ds000246"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000246"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000247.py b/mne_bids_pipeline/tests/configs/config_ds000247.py
index 395b538ab..fc4f42464 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000247.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000247.py
@@ -2,9 +2,8 @@
import numpy as np
-study_name = "ds000247"
-bids_root = f"~/mne_data/{study_name}"
-deriv_root = f"~/mne_data/derivatives/mne-bids-pipeline/{study_name}"
+bids_root = "~/mne_data/ds000247"
+deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000247"
subjects = ["0002"]
sessions = ["01"]
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
index 547721753..5d37fde67 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py
@@ -1,6 +1,5 @@
"""MNE Sample Data: BEM from FLASH images."""
-study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_FLASH_BEM"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
index 76fee45e3..0fdfdbf76 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py
@@ -1,6 +1,5 @@
"""MNE Sample Data: BEM from T1 images."""
-study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_T1_BEM"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
index 89f39cd8b..2f4db6a10 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py
@@ -2,7 +2,6 @@
import mne
-study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_base"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
index 475ca5d67..dba51f97d 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py
@@ -1,6 +1,5 @@
"""MNE Sample Data: Head surfaces from FreeSurfer surfaces for coregistration step."""
-study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_coreg_surfaces"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
index e1b090e30..6dfb49c7b 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py
@@ -1,6 +1,5 @@
"""MNE Sample Data: ICA."""
-study_name = 'MNE "sample" dataset'
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_ica"
diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
index 3b83b0e6e..08b98e9bb 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py
@@ -1,6 +1,5 @@
"""MNE Sample Data: Using the `fsaverage` template MRI."""
-study_name = "ds000248"
bids_root = "~/mne_data/ds000248"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_no_mri"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds001810.py b/mne_bids_pipeline/tests/configs/config_ds001810.py
index 606fee3c8..3771d6cd3 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001810.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001810.py
@@ -1,6 +1,5 @@
"""tDCS EEG."""
-study_name = "ds001810"
bids_root = "~/mne_data/ds001810"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds001810"
diff --git a/mne_bids_pipeline/tests/configs/config_ds001971.py b/mne_bids_pipeline/tests/configs/config_ds001971.py
index befc0f30e..349dbe23e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds001971.py
+++ b/mne_bids_pipeline/tests/configs/config_ds001971.py
@@ -3,7 +3,6 @@
See ds001971 on OpenNeuro: https://github.com/OpenNeuroDatasets/ds001971
"""
-study_name = "ds001971"
bids_root = "~/mne_data/ds001971"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds001971"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003104.py b/mne_bids_pipeline/tests/configs/config_ds003104.py
index 3e4b9e44d..d47a0a64c 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003104.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003104.py
@@ -1,6 +1,5 @@
"""Somato."""
-study_name = "MNE-somato-data-anonymized"
bids_root = "~/mne_data/ds003104"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003104"
subjects_dir = f"{bids_root}/derivatives/freesurfer/subjects"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index b004c4345..b8ee82d2e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -1,6 +1,5 @@
"""hMT+ Localizer."""
-study_name = "localizer"
bids_root = "~/mne_data/ds003392"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003392"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003775.py b/mne_bids_pipeline/tests/configs/config_ds003775.py
index 980bed232..219b1e23a 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003775.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003775.py
@@ -1,6 +1,5 @@
"""SRM Resting-state EEG."""
-study_name = "ds003775"
bids_root = "~/mne_data/ds003775"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003775"
diff --git a/mne_bids_pipeline/tests/configs/config_ds004107.py b/mne_bids_pipeline/tests/configs/config_ds004107.py
index a46679e54..0dd70a5ef 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004107.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004107.py
@@ -9,9 +9,8 @@
# This has auditory, median, indx, visual, rest, and emptyroom but let's just
# process the auditory (it's the smallest after rest)
-study_name = "ds004107"
-bids_root = f"~/mne_data/{study_name}"
-deriv_root = f"~/mne_data/derivatives/mne-bids-pipeline/{study_name}"
+bids_root = "~/mne_data/ds004107"
+deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds004107"
subjects = ["mind002"]
sessions = ["01"]
conditions = ["left", "right"] # there are also tone and noise
diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py
index 9cedd6491..878b743b0 100644
--- a/mne_bids_pipeline/tests/configs/config_ds004229.py
+++ b/mne_bids_pipeline/tests/configs/config_ds004229.py
@@ -6,7 +6,6 @@
import mne
import numpy as np
-study_name = "amnoise"
bids_root = "~/mne_data/ds004229"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds004229"
diff --git a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
index fbe34b11a..5cb0b1390 100644
--- a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
+++ b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py
@@ -1,6 +1,5 @@
"""Matchingpennies EEG experiment."""
-study_name = "eeg_matchingpennies"
bids_root = "~/mne_data/eeg_matchingpennies"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/eeg_matchingpennies"
diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py
index db5bbbd06..a6275a4c8 100644
--- a/mne_bids_pipeline/tests/test_documented.py
+++ b/mne_bids_pipeline/tests/test_documented.py
@@ -9,6 +9,7 @@
import yaml
from mne_bids_pipeline._config_import import _get_default_config
+from mne_bids_pipeline._docs import _EXECUTION_OPTIONS, _ParseConfigSteps
from mne_bids_pipeline.tests.datasets import DATASET_OPTIONS
from mne_bids_pipeline.tests.test_run import TEST_SUITE
@@ -67,6 +68,23 @@ def test_options_documented():
assert in_config.difference(in_doc_all) == set(), f"Values missing from {what}"
+def test_config_options_used():
+ """Test that all config options are used somewhere."""
+ config = _get_default_config()
+ config_names = set(d for d in dir(config) if not d.startswith("__"))
+ for key in ("_epochs_split_size", "_raw_split_size"):
+ config_names.add(key)
+ for key in _EXECUTION_OPTIONS:
+ config_names.remove(key)
+ pcs = _ParseConfigSteps(force_empty=())
+ missing_from_config = sorted(set(pcs.steps) - config_names)
+ assert missing_from_config == [], f"Missing from config: {missing_from_config}"
+ missing_from_steps = sorted(config_names - set(pcs.steps))
+ assert missing_from_steps == [], f"Missing from steps: {missing_from_steps}"
+ for key, val in pcs.steps.items():
+ assert val, f"No steps for {key}"
+
+
def test_datasets_in_doc():
"""Test that all datasets in tests are in the doc."""
# There are four things to keep in sync:
From 45e4c13507e13fa3eb9883ac618eee8ade4d44c5 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Fri, 15 Mar 2024 10:00:46 -0400
Subject: [PATCH 086/132] BUG: Fix bug with CSP computation and Maxwell filter
(#890)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/source/examples/gen_examples.py | 2 +-
docs/source/v1.8.md.inc | 9 +-
mne_bids_pipeline/_config.py | 2 +-
mne_bids_pipeline/_config_import.py | 2 +-
mne_bids_pipeline/_config_utils.py | 2 +-
mne_bids_pipeline/_decoding.py | 15 +-
mne_bids_pipeline/_import_data.py | 40 ++--
mne_bids_pipeline/_io.py | 9 -
mne_bids_pipeline/_report.py | 6 +-
mne_bids_pipeline/_run.py | 11 +-
.../steps/init/_02_find_empty_room.py | 3 +-
.../steps/preprocessing/_01_data_quality.py | 18 +-
.../steps/sensor/_05_decoding_csp.py | 66 +++---
.../steps/sensor/_06_make_cov.py | 2 +-
.../steps/sensor/_99_group_average.py | 196 +++++++++---------
.../steps/source/_02_make_bem_solution.py | 2 +-
.../tests/configs/config_ds000117.py | 1 +
.../tests/configs/config_ds003392.py | 5 +
mne_bids_pipeline/tests/conftest.py | 4 +
19 files changed, 209 insertions(+), 186 deletions(-)
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 922c5c569..2ae1a1b5a 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -139,7 +139,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict:
)
if dataset_name in all_demonstrated:
logger.warning(
- f"Duplicate dataset name {test_dataset_name} -> {dataset_name}, " "skipping"
+ f"Duplicate dataset name {test_dataset_name} -> {dataset_name}, skipping"
)
continue
del test_dataset_options, test_dataset_name
diff --git a/docs/source/v1.8.md.inc b/docs/source/v1.8.md.inc
index f9f583fb9..552d65272 100644
--- a/docs/source/v1.8.md.inc
+++ b/docs/source/v1.8.md.inc
@@ -1,8 +1,8 @@
## v1.8.0 (unreleased)
-[//]: # (### :new: New features & enhancements)
+### :new: New features & enhancements
-[//]: # (- Whatever (#000 by @whoever))
+- Disabling CSP time-frequency mode is now supported by passing an empty list to [`decoding_csp_times`][mne_bids_pipeline._config.decoding_csp_times] (#890 by @whoever)
[//]: # (### :warning: Behavior changes)
@@ -14,9 +14,10 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :bug: Bug fixes)
+### :bug: Bug fixes
-[//]: # (- Whatever (#000 by @whoever))
+- Fix handling of Maxwell-filtered data in CSP (#890 by @larsoner)
+- Avoid recomputation / cache miss when the same empty-room file is matched to multiple subjects (#890 by @larsoner)
### :medical_symbol: Code health
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 07c41ece7..0df4096e1 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1611,7 +1611,7 @@
Must contain at least two elements. By default, 5 equally-spaced bins are
created across the non-negative time range of the epochs.
All specified time points must be contained in the epochs interval.
-If `None`, do not perform **time-frequency** analysis, and only run CSP on
+If an empty list, do not perform **time-frequency** analysis, and only run CSP on
**frequency** data.
???+ example "Example"
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 25eea4b36..98286d4ba 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -434,7 +434,7 @@ def _check_misspellings_removals(
if user_name not in valid_names:
# find the closest match
closest_match = difflib.get_close_matches(user_name, valid_names, n=1)
- msg = f"Found a variable named {repr(user_name)} in your custom " "config,"
+ msg = f"Found a variable named {repr(user_name)} in your custom config,"
if closest_match and closest_match[0] not in user_names:
this_msg = (
f"{msg} did you mean {repr(closest_match[0])}? "
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index 701cd93a3..d6bcb0ce5 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -403,7 +403,7 @@ def get_mf_ctc_fname(
root=config.bids_root,
).meg_crosstalk_fpath
if mf_ctc_fpath is None:
- raise ValueError("Could not find Maxwell Filter cross-talk " "file.")
+ raise ValueError("Could not find Maxwell Filter cross-talk file.")
else:
mf_ctc_fpath = pathlib.Path(config.mf_ctc_fname).expanduser().absolute()
if not mf_ctc_fpath.exists():
diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py
index df10d6f1f..3968fcf3c 100644
--- a/mne_bids_pipeline/_decoding.py
+++ b/mne_bids_pipeline/_decoding.py
@@ -34,10 +34,19 @@ def _handle_csp_args(
)
if decoding_csp_times is None:
decoding_csp_times = np.linspace(max(0, epochs_tmin), epochs_tmax, num=6)
- if len(decoding_csp_times) < 2:
- raise ValueError("decoding_csp_times should contain at least 2 values.")
+ else:
+ decoding_csp_times = np.array(decoding_csp_times, float)
+ if decoding_csp_times.ndim != 1 or len(decoding_csp_times) == 1:
+ raise ValueError(
+ "decoding_csp_times should be 1 dimensional and contain at least 2 values "
+ "to define time intervals, or be empty to disable time-frequency mode, got "
+ f"shape {decoding_csp_times.shape}"
+ )
if not np.array_equal(decoding_csp_times, np.sort(decoding_csp_times)):
ValueError("decoding_csp_times should be sorted.")
+ time_bins = np.c_[decoding_csp_times[:-1], decoding_csp_times[1:]]
+ assert time_bins.ndim == 2 and time_bins.shape[1] == 2, time_bins.shape
+
if decoding_metric != "roc_auc":
raise ValueError(
f'CSP decoding currently only supports the "roc_auc" '
@@ -76,7 +85,7 @@ def _handle_csp_args(
freq_bins = list(zip(edges[:-1], edges[1:]))
freq_name_to_bins_map[freq_range_name] = freq_bins
- return freq_name_to_bins_map
+ return freq_name_to_bins_map, time_bins
def _decoding_preproc_steps(
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index c3c319f44..aaf7b56e3 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -16,7 +16,7 @@
get_runs,
get_task,
)
-from ._io import _empty_room_match_path, _read_json
+from ._io import _read_json
from ._logging import gen_log_kwargs, logger
from ._run import _update_for_splits
from .typing import PathLike
@@ -403,6 +403,7 @@ def import_experimental_data(
_fix_stim_artifact_func(cfg=cfg, raw=raw)
if bids_path_bads_in is not None:
+ run = "rest" if data_is_rest else run # improve logging
bads = _read_bads_tsv(cfg=cfg, bids_path_bads=bids_path_bads_in)
msg = f"Marking {len(bads)} channel{_pl(bads)} as bad."
logger.info(**gen_log_kwargs(message=msg))
@@ -585,6 +586,8 @@ def _get_run_path(
add_bads=add_bads,
kind=kind,
allow_missing=allow_missing,
+ subject=subject,
+ session=session,
)
@@ -651,6 +654,8 @@ def _get_noise_path(
add_bads=add_bads,
kind=kind,
allow_missing=True,
+ subject=subject,
+ session=session,
)
@@ -701,6 +706,12 @@ def _get_mf_reference_run_path(
)
+def _empty_room_match_path(run_path: BIDSPath, cfg: SimpleNamespace) -> BIDSPath:
+ return run_path.copy().update(
+ extension=".json", suffix="emptyroommatch", root=cfg.deriv_root
+ )
+
+
def _path_dict(
*,
cfg: SimpleNamespace,
@@ -709,6 +720,8 @@ def _path_dict(
kind: Literal["orig", "sss", "filt"],
allow_missing: bool,
key: Optional[str] = None,
+ subject: str,
+ session: Optional[str],
) -> dict:
if add_bads is None:
add_bads = kind == "orig" and _do_mf_autobad(cfg=cfg)
@@ -719,35 +732,30 @@ def _path_dict(
if allow_missing and not in_files[key].fpath.exists():
return dict()
if add_bads:
- bads_tsv_fname = _bads_path(cfg=cfg, bids_path_in=bids_path_in)
+ bads_tsv_fname = _bads_path(
+ cfg=cfg,
+ bids_path_in=bids_path_in,
+ subject=subject,
+ session=session,
+ )
if bads_tsv_fname.fpath.is_file() or not allow_missing:
in_files[f"{key}-bads"] = bads_tsv_fname
return in_files
-def _auto_scores_path(
- *,
- cfg: SimpleNamespace,
- bids_path_in: BIDSPath,
-) -> BIDSPath:
- return bids_path_in.copy().update(
- suffix="scores",
- extension=".json",
- root=cfg.deriv_root,
- split=None,
- check=False,
- )
-
-
def _bads_path(
*,
cfg: SimpleNamespace,
bids_path_in: BIDSPath,
+ subject: str,
+ session: Optional[str],
) -> BIDSPath:
return bids_path_in.copy().update(
suffix="bads",
extension=".tsv",
root=cfg.deriv_root,
+ subject=subject,
+ session=session,
split=None,
check=False,
)
diff --git a/mne_bids_pipeline/_io.py b/mne_bids_pipeline/_io.py
index f1a2b0ce3..dc894cb6b 100644
--- a/mne_bids_pipeline/_io.py
+++ b/mne_bids_pipeline/_io.py
@@ -1,9 +1,6 @@
"""I/O helpers."""
-from types import SimpleNamespace
-
import json_tricks
-from mne_bids import BIDSPath
from .typing import PathLike
@@ -16,9 +13,3 @@ def _write_json(fname: PathLike, data: dict) -> None:
def _read_json(fname: PathLike) -> dict:
with open(fname, encoding="utf-8") as f:
return json_tricks.load(f)
-
-
-def _empty_room_match_path(run_path: BIDSPath, cfg: SimpleNamespace) -> BIDSPath:
- return run_path.copy().update(
- extension=".json", suffix="emptyroommatch", root=cfg.deriv_root
- )
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index e607de98f..5a4136cef 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -594,14 +594,14 @@ def add_csp_grand_average(
cond_1: str,
cond_2: str,
fname_csp_freq_results: BIDSPath,
- fname_csp_cluster_results: pd.DataFrame,
+ fname_csp_cluster_results: Optional[pd.DataFrame],
):
"""Add CSP decoding results to the grand average report."""
import matplotlib.pyplot as plt # nested import to help joblib
# First, plot decoding scores across frequency bins (entire epochs).
section = "Decoding: CSP"
- freq_name_to_bins_map = _handle_csp_args(
+ freq_name_to_bins_map, _ = _handle_csp_args(
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
cfg.decoding_metric,
@@ -684,6 +684,8 @@ def add_csp_grand_average(
)
# Now, plot decoding scores across time-frequency bins.
+ if fname_csp_cluster_results is None:
+ return
csp_cluster_results = loadmat(fname_csp_cluster_results)
fig, ax = plt.subplots(
nrows=1, ncols=2, sharex=True, sharey=True, constrained_layout=True
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index 73e4c6082..ca3bd1faf 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -67,13 +67,13 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
# Find the limit / step where the error occurred
step_dir = pathlib.Path(__file__).parent / "steps"
tb = traceback.extract_tb(e.__traceback__)
- for fi, frame in enumerate(inspect.stack()):
+ for fi, frame in enumerate(tb):
is_step = pathlib.Path(frame.filename).parent.parent == step_dir
del frame
if is_step:
# omit everything before the "step" dir, which will
# generally be stuff from this file and joblib
- tb = tb[-fi:]
+ tb = tb[fi:]
break
tb = "".join(traceback.format_list(tb))
@@ -221,9 +221,7 @@ def wrapper(*args, **kwargs):
for key, (fname, this_hash) in out_files_hashes.items():
fname = pathlib.Path(fname)
if not fname.exists():
- msg = (
- f"Output file missing {str(fname)}, " "will recompute …"
- )
+ msg = f"Output file missing: {fname}, will recompute …"
emoji = "🧩"
bad_out_files = True
break
@@ -231,7 +229,8 @@ def wrapper(*args, **kwargs):
if this_hash != got_hash:
msg = (
f"Output file {self.memory_file_method} mismatch for "
- f"{str(fname)}, will recompute …"
+ f"{fname} ({this_hash} != {got_hash}), will "
+ "recompute …"
)
emoji = "🚫"
bad_out_files = True
diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
index fcb0536c5..d56318365 100644
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -13,7 +13,8 @@
get_sessions,
get_subjects,
)
-from ..._io import _empty_room_match_path, _write_json
+from ..._import_data import _empty_room_match_path
+from ..._io import _write_json
from ..._logging import gen_log_kwargs, logger
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index c12dd6a26..1cbeca387 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -17,7 +17,6 @@
get_subjects,
)
from ..._import_data import (
- _auto_scores_path,
_bads_path,
_get_mf_reference_run_path,
_get_run_rest_noise_path,
@@ -159,7 +158,7 @@ def _find_bads_maxwell(
elif cfg.find_noisy_channels_meg and not cfg.find_flat_channels_meg:
msg = "Finding noisy channels using Maxwell filtering."
else:
- msg = "Finding flat channels and noisy channels using " "Maxwell filtering."
+ msg = "Finding flat channels and noisy channels using Maxwell filtering."
logger.info(**gen_log_kwargs(message=msg))
if run is None and task == "noise":
@@ -232,18 +231,23 @@ def _find_bads_maxwell(
logger.info(**gen_log_kwargs(message=msg))
if cfg.find_noisy_channels_meg:
- out_files["auto_scores"] = _auto_scores_path(
- cfg=cfg,
- bids_path_in=bids_path_in,
+ out_files["auto_scores"] = bids_path_in.copy().update(
+ suffix="scores",
+ extension=".json",
+ root=cfg.deriv_root,
+ split=None,
+ check=False,
+ session=session,
+ subject=subject,
)
- if not out_files["auto_scores"].fpath.parent.exists():
- out_files["auto_scores"].fpath.parent.mkdir(parents=True)
_write_json(out_files["auto_scores"], auto_scores)
# Write the bad channels to disk.
out_files["bads_tsv"] = _bads_path(
cfg=cfg,
bids_path_in=bids_path_in,
+ subject=subject,
+ session=session,
)
bads_for_tsv = []
reasons = []
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 9b93c0c32..ca7791fd4 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -79,21 +79,15 @@ def prepare_epochs_and_y(
*, epochs: mne.BaseEpochs, contrast: tuple[str, str], cfg, fmin: float, fmax: float
) -> tuple[mne.BaseEpochs, np.ndarray]:
"""Band-pass between, sub-select the desired epochs, and prepare y."""
- epochs_filt = epochs.copy().pick(["meg", "eeg"])
-
- # We only take mag to speed up computation
- # because the information is redundant between grad and mag
- if cfg.datatype == "meg" and cfg.use_maxwell_filter:
- epochs_filt.pick("mag")
-
# filtering out the conditions we are not interested in, to ensure here we
# have a valid partition between the condition of the contrast.
- #
+
# XXX Hack for handling epochs selection via metadata
+ # This also makes a copy
if contrast[0].startswith("event_name.isin"):
- epochs_filt = epochs_filt[f"{contrast[0]} or {contrast[1]}"]
+ epochs_filt = epochs[f"{contrast[0]} or {contrast[1]}"]
else:
- epochs_filt = epochs_filt[contrast]
+ epochs_filt = epochs[contrast]
# Filtering is costly, so do it last, after the selection of the channels
# and epochs. We know that often the filter will be longer than the signal,
@@ -190,7 +184,7 @@ def one_subject_decoding(
)
# Loop over frequencies (all time points lumped together)
- freq_name_to_bins_map = _handle_csp_args(
+ freq_name_to_bins_map, time_bins = _handle_csp_args(
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
cfg.decoding_metric,
@@ -264,11 +258,6 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
#
# Note: We don't support varying time ranges for different frequency
# ranges to avoid leaking of information.
- time_bins = np.array(cfg.decoding_csp_times)
- if time_bins.ndim == 1:
- time_bins = np.array(list(zip(time_bins[:-1], time_bins[1:])))
- assert time_bins.ndim == 2
-
tf_decoding_table_rows = []
for freq_range_name, freq_bins in freq_name_to_bins_map.items():
@@ -292,13 +281,18 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
}
tf_decoding_table_rows.append(row)
- tf_decoding_table = pd.concat(
- [pd.DataFrame.from_dict(row) for row in tf_decoding_table_rows],
- ignore_index=True,
- )
+ if len(tf_decoding_table_rows):
+ tf_decoding_table = pd.concat(
+ [pd.DataFrame.from_dict(row) for row in tf_decoding_table_rows],
+ ignore_index=True,
+ )
+ else:
+ tf_decoding_table = pd.DataFrame()
del tf_decoding_table_rows
for idx, row in tf_decoding_table.iterrows():
+ if len(row) == 0:
+ break # no data
tmin = row["t_min"]
tmax = row["t_max"]
fmin = row["f_min"]
@@ -340,8 +334,10 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
)
with pd.ExcelWriter(fname_results) as w:
freq_decoding_table.to_excel(w, sheet_name="CSP Frequency", index=False)
- tf_decoding_table.to_excel(w, sheet_name="CSP Time-Frequency", index=False)
+ if not tf_decoding_table.empty:
+ tf_decoding_table.to_excel(w, sheet_name="CSP Time-Frequency", index=False)
out_files = {"csp-excel": fname_results}
+ del freq_decoding_table
# Report
with _open_report(
@@ -350,15 +346,6 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
msg = "Adding CSP decoding results to the report."
logger.info(**gen_log_kwargs(message=msg))
section = "Decoding: CSP"
- freq_name_to_bins_map = _handle_csp_args(
- cfg.decoding_csp_times,
- cfg.decoding_csp_freqs,
- cfg.decoding_metric,
- epochs_tmin=cfg.epochs_tmin,
- epochs_tmax=cfg.epochs_tmax,
- time_frequency_freq_min=cfg.time_frequency_freq_min,
- time_frequency_freq_max=cfg.time_frequency_freq_max,
- )
all_csp_tf_results = dict()
for contrast in cfg.decoding_contrasts:
cond_1, cond_2 = contrast
@@ -381,14 +368,15 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
csp_freq_results["scores"] = csp_freq_results["scores"].apply(
lambda x: np.array(x[1:-1].split(), float)
)
- csp_tf_results = pd.read_excel(
- fname_decoding, sheet_name="CSP Time-Frequency"
- )
- csp_tf_results["scores"] = csp_tf_results["scores"].apply(
- lambda x: np.array(x[1:-1].split(), float)
- )
- all_csp_tf_results[contrast] = csp_tf_results
- del csp_tf_results
+ if not tf_decoding_table.empty:
+ csp_tf_results = pd.read_excel(
+ fname_decoding, sheet_name="CSP Time-Frequency"
+ )
+ csp_tf_results["scores"] = csp_tf_results["scores"].apply(
+ lambda x: np.array(x[1:-1].split(), float)
+ )
+ all_csp_tf_results[contrast] = csp_tf_results
+ del csp_tf_results
all_decoding_scores = list()
contrast_names = list()
@@ -497,6 +485,8 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
tags=tags,
replace=True,
)
+ plt.close(fig)
+ del fig, title
assert len(in_files) == 0, in_files.keys()
return _prep_out_files(exec_params=exec_params, out_files=out_files)
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index 075abe472..e3c8cdc9e 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -184,7 +184,7 @@ def retrieve_custom_cov(
check=False,
)
- msg = "Retrieving noise covariance matrix from custom user-supplied " "function"
+ msg = "Retrieving noise covariance matrix from custom user-supplied function"
logger.info(**gen_log_kwargs(message=msg))
msg = f'Output: {out_files["cov"].basename}'
logger.info(**gen_log_kwargs(message=msg))
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index e84877683..b3747c147 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -722,18 +722,20 @@ def average_csp_decoding(
all_decoding_data_time_freq = []
for key in list(in_files):
fname_xlsx = in_files.pop(key)
- decoding_data_freq = pd.read_excel(
- fname_xlsx,
- sheet_name="CSP Frequency",
- dtype={"subject": str}, # don't drop trailing zeros
- )
- decoding_data_time_freq = pd.read_excel(
- fname_xlsx,
- sheet_name="CSP Time-Frequency",
- dtype={"subject": str}, # don't drop trailing zeros
- )
- all_decoding_data_freq.append(decoding_data_freq)
- all_decoding_data_time_freq.append(decoding_data_time_freq)
+ with pd.ExcelFile(fname_xlsx) as xf:
+ decoding_data_freq = pd.read_excel(
+ xf,
+ sheet_name="CSP Frequency",
+ dtype={"subject": str}, # don't drop trailing zeros
+ )
+ all_decoding_data_freq.append(decoding_data_freq)
+ if "CSP Time-Frequency" in xf.sheet_names:
+ decoding_data_time_freq = pd.read_excel(
+ xf,
+ sheet_name="CSP Time-Frequency",
+ dtype={"subject": str}, # don't drop trailing zeros
+ )
+ all_decoding_data_time_freq.append(decoding_data_time_freq)
del fname_xlsx
# Now calculate descriptes and bootstrap CIs.
@@ -743,12 +745,15 @@ def average_csp_decoding(
session=session,
data=all_decoding_data_freq,
)
- grand_average_time_freq = _average_csp_time_freq(
- cfg=cfg,
- subject=subject,
- session=session,
- data=all_decoding_data_time_freq,
- )
+ if len(all_decoding_data_time_freq):
+ grand_average_time_freq = _average_csp_time_freq(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ data=all_decoding_data_time_freq,
+ )
+ else:
+ grand_average_time_freq = None
out_files = dict()
out_files["freq"] = _decoding_out_fname(
@@ -762,17 +767,15 @@ def average_csp_decoding(
)
with pd.ExcelWriter(out_files["freq"]) as w:
grand_average_freq.to_excel(w, sheet_name="CSP Frequency", index=False)
- grand_average_time_freq.to_excel(
- w, sheet_name="CSP Time-Frequency", index=False
- )
+ if grand_average_time_freq is not None:
+ grand_average_time_freq.to_excel(
+ w, sheet_name="CSP Time-Frequency", index=False
+ )
+ del grand_average_time_freq
# Perform a cluster-based permutation test.
subjects = cfg.subjects
- time_bins = np.array(cfg.decoding_csp_times)
- if time_bins.ndim == 1:
- time_bins = np.array(list(zip(time_bins[:-1], time_bins[1:])))
- time_bins = pd.DataFrame(time_bins, columns=["t_min", "t_max"])
- freq_name_to_bins_map = _handle_csp_args(
+ freq_name_to_bins_map, time_bins = _handle_csp_args(
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
cfg.decoding_metric,
@@ -781,79 +784,84 @@ def average_csp_decoding(
time_frequency_freq_min=cfg.time_frequency_freq_min,
time_frequency_freq_max=cfg.time_frequency_freq_max,
)
- data_for_clustering = {}
- for freq_range_name in freq_name_to_bins_map:
- a = np.empty(
- shape=(
- len(subjects),
- len(time_bins),
- len(freq_name_to_bins_map[freq_range_name]),
+ if not len(time_bins):
+ fname_csp_cluster_results = None
+ else:
+ time_bins = pd.DataFrame(time_bins, columns=["t_min", "t_max"])
+ data_for_clustering = {}
+ for freq_range_name in freq_name_to_bins_map:
+ a = np.empty(
+ shape=(
+ len(subjects),
+ len(time_bins),
+ len(freq_name_to_bins_map[freq_range_name]),
+ )
)
+ a.fill(np.nan)
+ data_for_clustering[freq_range_name] = a
+
+ g = pd.concat(all_decoding_data_time_freq).groupby(
+ ["subject", "freq_range_name", "t_min", "t_max"]
)
- a.fill(np.nan)
- data_for_clustering[freq_range_name] = a
- g = pd.concat(all_decoding_data_time_freq).groupby(
- ["subject", "freq_range_name", "t_min", "t_max"]
- )
+ for (subject_, freq_range_name, t_min, t_max), df in g:
+ scores = df["mean_crossval_score"]
+ sub_idx = subjects.index(subject_)
+ time_bin_idx = time_bins.loc[
+ (np.isclose(time_bins["t_min"], t_min))
+ & (np.isclose(time_bins["t_max"], t_max)),
+ :,
+ ].index
+ assert len(time_bin_idx) == 1
+ time_bin_idx = time_bin_idx[0]
+ data_for_clustering[freq_range_name][sub_idx][time_bin_idx] = scores
- for (subject_, freq_range_name, t_min, t_max), df in g:
- scores = df["mean_crossval_score"]
- sub_idx = subjects.index(subject_)
- time_bin_idx = time_bins.loc[
- (np.isclose(time_bins["t_min"], t_min))
- & (np.isclose(time_bins["t_max"], t_max)),
- :,
- ].index
- assert len(time_bin_idx) == 1
- time_bin_idx = time_bin_idx[0]
- data_for_clustering[freq_range_name][sub_idx][time_bin_idx] = scores
-
- if cfg.cluster_forming_t_threshold is None:
- import scipy.stats
-
- cluster_forming_t_threshold = scipy.stats.t.ppf(
- 1 - 0.05,
- len(cfg.subjects) - 1, # one-sided test
- )
- else:
- cluster_forming_t_threshold = cfg.cluster_forming_t_threshold
+ if cfg.cluster_forming_t_threshold is None:
+ import scipy.stats
- cluster_permutation_results = {}
- for freq_range_name, X in data_for_clustering.items():
- if len(X) < 2:
- t_vals = np.full(X.shape[1:], np.nan)
- H0 = all_clusters = cluster_p_vals = np.array([])
- else:
- (
- t_vals,
- all_clusters,
- cluster_p_vals,
- H0,
- ) = mne.stats.permutation_cluster_1samp_test( # noqa: E501
- X=X - 0.5, # One-sample test against zero.
- threshold=cluster_forming_t_threshold,
- n_permutations=cfg.cluster_n_permutations,
- adjacency=None, # each time & freq bin connected to its neighbors
- out_type="mask",
- tail=1, # one-sided: significantly above chance level
- seed=cfg.random_state,
+ cluster_forming_t_threshold = scipy.stats.t.ppf(
+ 1 - 0.05,
+ len(cfg.subjects) - 1, # one-sided test
)
- n_permutations = H0.size - 1
- all_clusters = np.array(all_clusters) # preserve "empty" 0th dimension
- cluster_permutation_results[freq_range_name] = {
- "mean_crossval_scores": X.mean(axis=0),
- "t_vals": t_vals,
- "clusters": all_clusters,
- "cluster_p_vals": cluster_p_vals,
- "cluster_t_threshold": cluster_forming_t_threshold,
- "n_permutations": n_permutations,
- "time_bin_edges": cfg.decoding_csp_times,
- "freq_bin_edges": cfg.decoding_csp_freqs[freq_range_name],
- }
-
- out_files["cluster"] = out_files["freq"].copy().update(extension=".mat")
- savemat(file_name=out_files["cluster"], mdict=cluster_permutation_results)
+ else:
+ cluster_forming_t_threshold = cfg.cluster_forming_t_threshold
+
+ cluster_permutation_results = {}
+ for freq_range_name, X in data_for_clustering.items():
+ if len(X) < 2:
+ t_vals = np.full(X.shape[1:], np.nan)
+ H0 = all_clusters = cluster_p_vals = np.array([])
+ else:
+ (
+ t_vals,
+ all_clusters,
+ cluster_p_vals,
+ H0,
+ ) = mne.stats.permutation_cluster_1samp_test( # noqa: E501
+ X=X - 0.5, # One-sample test against zero.
+ threshold=cluster_forming_t_threshold,
+ n_permutations=cfg.cluster_n_permutations,
+ adjacency=None, # each time & freq bin connected to its neighbors
+ out_type="mask",
+ tail=1, # one-sided: significantly above chance level
+ seed=cfg.random_state,
+ )
+ n_permutations = H0.size - 1
+ all_clusters = np.array(all_clusters) # preserve "empty" 0th dimension
+ cluster_permutation_results[freq_range_name] = {
+ "mean_crossval_scores": X.mean(axis=0),
+ "t_vals": t_vals,
+ "clusters": all_clusters,
+ "cluster_p_vals": cluster_p_vals,
+ "cluster_t_threshold": cluster_forming_t_threshold,
+ "n_permutations": n_permutations,
+ "time_bin_edges": cfg.decoding_csp_times,
+ "freq_bin_edges": cfg.decoding_csp_freqs[freq_range_name],
+ }
+
+ out_files["cluster"] = out_files["freq"].copy().update(extension=".mat")
+ savemat(file_name=out_files["cluster"], mdict=cluster_permutation_results)
+ fname_csp_cluster_results = out_files["cluster"]
assert subject == "average"
with _open_report(
@@ -867,7 +875,7 @@ def average_csp_decoding(
cond_1=cond_1,
cond_2=cond_2,
fname_csp_freq_results=out_files["freq"],
- fname_csp_cluster_results=out_files["cluster"],
+ fname_csp_cluster_results=fname_csp_cluster_results,
)
return _prep_out_files(out_files=out_files, exec_params=exec_params)
diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
index 1320d6dc7..1f2947d01 100644
--- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
+++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py
@@ -99,7 +99,7 @@ def main(*, config) -> None:
return
if config.use_template_mri is not None:
- msg = "Skipping, BEM solution computation not needed for " "MRI template …"
+ msg = "Skipping, BEM solution computation not needed for MRI template …"
logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
if config.use_template_mri == "fsaverage":
# Ensure we have the BEM
diff --git a/mne_bids_pipeline/tests/configs/config_ds000117.py b/mne_bids_pipeline/tests/configs/config_ds000117.py
index 14fd77499..2e49f1a4e 100644
--- a/mne_bids_pipeline/tests/configs/config_ds000117.py
+++ b/mne_bids_pipeline/tests/configs/config_ds000117.py
@@ -15,6 +15,7 @@
find_flat_channels_meg = True
find_noisy_channels_meg = True
use_maxwell_filter = True
+process_empty_room = True
mf_reference_run = "02"
mf_cal_fname = bids_root + "/derivatives/meg_derivatives/sss_cal.dat"
diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py
index b8ee82d2e..37c8a46c3 100644
--- a/mne_bids_pipeline/tests/configs/config_ds003392.py
+++ b/mne_bids_pipeline/tests/configs/config_ds003392.py
@@ -37,6 +37,11 @@
decoding_time_generalization = True
decoding_time_generalization_decim = 4
contrasts = [("incoherent", "coherent")]
+decoding_csp = True
+decoding_csp_times = []
+decoding_csp_freqs = {
+ "alpha": (8, 12),
+}
# Noise estimation
noise_cov = "emptyroom"
diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py
index 2ac1e9403..dbefd583c 100644
--- a/mne_bids_pipeline/tests/conftest.py
+++ b/mne_bids_pipeline/tests/conftest.py
@@ -62,6 +62,10 @@ def pytest_configure(config):
ignore:datetime\.datetime\.utcnow.*:DeprecationWarning
# pandas with no good workaround
ignore:The behavior of DataFrame concatenation with empty.*:FutureWarning
+ # joblib on Windows sometimes
+ ignore:Persisting input arguments took.*:UserWarning
+ # matplotlib needs to update
+ ignore:Conversion of an array with ndim.*:DeprecationWarning
"""
for warning_line in warning_lines.split("\n"):
warning_line = warning_line.strip()
From 541eb9b4058a6201570fd511f87674d3158adda9 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 18 Mar 2024 18:32:49 -0400
Subject: [PATCH 087/132] [pre-commit.ci] pre-commit autoupdate (#891)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 6014fece7..cc8ee6752 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.2
+ rev: v0.3.3
hooks:
- id: ruff
args: ["--fix"]
From 313dc18e2c46a16c48ca43774fd119cf623a028d Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Wed, 20 Mar 2024 05:49:19 -0400
Subject: [PATCH 088/132] MAINT: Print longer traceback on report error (#892)
---
mne_bids_pipeline/_report.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 5a4136cef..a9f9b48a4 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -1,7 +1,9 @@
import contextlib
import re
+import traceback
from functools import lru_cache
from io import StringIO
+from textwrap import indent
from types import SimpleNamespace
from typing import Literal, Optional
@@ -67,8 +69,9 @@ def _open_report(
report = mne.open_report(fname_report)
except Exception as exc:
raise exc.__class__(
- f"Could not open {name} HDF5 file:\n{fname_report}\n"
- f"Got error:\n{exc}\nPerhaps you need to delete it?"
+ f"Could not open {name} HDF5 file:\n{fname_report}, "
+ "Perhaps you need to delete it? Got error:\n\n"
+ f'{indent(traceback.format_exc(), " ")}'
) from None
try:
yield report
From 06d2bb91354a9745a05e36c6237880f45bebc352 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Wed, 20 Mar 2024 14:16:08 +0100
Subject: [PATCH 089/132] Set up Dependabot to automatically keep GitHub
Actions up-to-date (#893)
---
.github/dependabot.yml | 15 +++++++++++++++
docs/source/changes.md | 2 ++
docs/source/v1.8.md.inc | 2 +-
docs/source/v1.9.md.inc | 21 +++++++++++++++++++++
4 files changed, 39 insertions(+), 1 deletion(-)
create mode 100644 .github/dependabot.yml
create mode 100644 docs/source/v1.9.md.inc
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000..4a47c7a99
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,15 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ labels:
+ - "dependabot"
+ commit-message:
+ prefix: "[dependabot]"
diff --git a/docs/source/changes.md b/docs/source/changes.md
index 6f23f2563..8101723ca 100644
--- a/docs/source/changes.md
+++ b/docs/source/changes.md
@@ -1,3 +1,5 @@
+{% include-markdown "./v1.9.md.inc" %}
+
{% include-markdown "./v1.8.md.inc" %}
{% include-markdown "./v1.7.md.inc" %}
diff --git a/docs/source/v1.8.md.inc b/docs/source/v1.8.md.inc
index 552d65272..d4bb7f867 100644
--- a/docs/source/v1.8.md.inc
+++ b/docs/source/v1.8.md.inc
@@ -1,4 +1,4 @@
-## v1.8.0 (unreleased)
+## v1.8.0 (2024-03-20)
### :new: New features & enhancements
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
new file mode 100644
index 000000000..a4a001f15
--- /dev/null
+++ b/docs/source/v1.9.md.inc
@@ -0,0 +1,21 @@
+## v1.9.0
+
+[//]: # (### :new: New features & enhancements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :warning: Behavior changes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :package: Requirements)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+[//]: # (### :bug: Bug fixes)
+
+[//]: # (- Whatever (#000 by @whoever))
+
+### :medical_symbol: Code health and infrastructure
+
+- Use GitHub's `dependabot` service to automatically keep GitHub Actions up-to-date. (#893 by @hoechenberger)
From d77087981cf7040183981819ec68c001f375999b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:09:36 +0000
Subject: [PATCH 090/132] [dependabot]: Bump actions/setup-python from 4 to 5
(#894)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/release.yml | 2 +-
.github/workflows/run-tests.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1d5a786d7..7e6de72b9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -20,7 +20,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Install dependencies
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 43e692304..2684b5310 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -14,7 +14,7 @@ jobs:
shell: bash -l {0}
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@v5
with:
python-version: "3.11"
- run: pip install --upgrade pip
From 8f237804d2b21d278d61da20abae329d5fdbd849 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:10:06 +0000
Subject: [PATCH 091/132] [dependabot]: Bump actions/download-artifact from 3
to 4 (#895)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/release.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 7e6de72b9..5ea7d5bce 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
if: github.event_name == 'release'
steps:
- - uses: actions/download-artifact@v3
+ - uses: actions/download-artifact@v4
with:
name: dist
path: dist
From 90baed9c140553ec8604b845bde643b2e2461004 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 14:10:44 +0000
Subject: [PATCH 092/132] [dependabot]: Bump actions/upload-artifact from 3 to
4 (#896)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/release.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 5ea7d5bce..4fe4b4210 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -34,7 +34,7 @@ jobs:
- name: Check env vars
run: |
echo "Triggered by: ${{ github.event_name }}"
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: dist
path: dist
From 71f9f188be41c4c71ce003bea7f293478c33cf3f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 10:28:16 -0400
Subject: [PATCH 093/132] [dependabot]: Bump codecov/codecov-action from 3 to 4
(#898)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/run-tests.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 2684b5310..d2785baf5 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -21,7 +21,7 @@ jobs:
- run: pip install -ve .[tests] codespell tomli
- run: make codespell-error
- run: pytest mne_bids_pipeline -m "not dataset_test"
- - uses: codecov/codecov-action@v3
+ - uses: codecov/codecov-action@v4
if: success()
name: 'Upload coverage to CodeCov'
caching:
@@ -71,5 +71,5 @@ jobs:
- run: pytest --cov-append -k ds003392 mne_bids_pipeline/
- run: pytest --cov-append -k ds003392 mne_bids_pipeline/ # uses "mtime" method
timeout-minutes: 1
- - uses: codecov/codecov-action@v3
+ - uses: codecov/codecov-action@v4
if: success()
From be6850ad68579dc672b3ea9faf5cabc4b8a22668 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 10:56:12 -0400
Subject: [PATCH 094/132] [dependabot]: Bump actions/checkout from 3 to 4
(#897)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/release.yml | 2 +-
.github/workflows/run-tests.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 4fe4b4210..497c09832 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -18,7 +18,7 @@ jobs:
package:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index d2785baf5..879eab4f9 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -13,7 +13,7 @@ jobs:
run:
shell: bash -l {0}
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.11"
From 8e5546fa207b4cdb66d2b18bbee2462684fccf32 Mon Sep 17 00:00:00 2001
From: Sophie Herbst
Date: Mon, 25 Mar 2024 15:18:01 +0100
Subject: [PATCH 095/132] add n sbs to report (#902)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Alexandre Gramfort
Co-authored-by: Eric Larson
---
docs/source/v1.9.md.inc | 4 +++-
mne_bids_pipeline/steps/sensor/_99_group_average.py | 11 +++++++----
2 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index a4a001f15..67c724e07 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -1,6 +1,8 @@
## v1.9.0
-[//]: # (### :new: New features & enhancements)
+### :new: New features & enhancements
+
+- Added number of subject to sub-average report (#902 by @SophieHerbst)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index b3747c147..923b61ccb 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -154,9 +154,12 @@ def average_evokeds(
for condition, evoked in zip(conditions, evokeds):
tags = ("evoked", _sanitize_cond_tag(condition))
if condition in cfg.conditions:
- title = f"Average (sensor): {condition}"
+ title = f"Average (sensor): {condition}, N = {len(cfg.subjects)}"
else: # It's a contrast of two conditions.
- title = f"Average (sensor) contrast: {condition}"
+ title = (
+ f"Average (sensor) contrast: {condition}, "
+ f"N = {len(cfg.subjects)}"
+ )
tags = tags + ("contrast",)
report.add_evokeds(
@@ -447,7 +450,7 @@ def average_time_by_time_decoding(
)
savemat(out_files["mat"], contrast_score_stats)
- section = "Decoding: time-by-time"
+ section = f"Decoding: time-by-time, N = {len(cfg.subjects)}"
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
) as report:
@@ -679,7 +682,7 @@ def average_full_epochs_report(
report.add_figure(
fig=fig,
title="Full-epochs decoding",
- section="Decoding: full-epochs",
+ section=f"Decoding: full-epochs, N = {len(cfg.subjects)}",
caption=caption,
tags=(
"epochs",
From ed5939a1d8c820197c0d69d5455693c5b4898e43 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 25 Mar 2024 18:38:59 -0400
Subject: [PATCH 096/132] [pre-commit.ci] pre-commit autoupdate (#903)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cc8ee6752..00b8889fe 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.3
+ rev: v0.3.4
hooks:
- id: ruff
args: ["--fix"]
From 96251a8338e0bd0d2aa4458d02b24a4d7d86bbc5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 28 Mar 2024 05:38:53 +0100
Subject: [PATCH 097/132] Drop support for Python 3.9, drop `Optional` and
`Union` from type hints (#908)
Co-authored-by: Eric Larson
---
docs/source/examples/gen_examples.py | 3 +-
docs/source/settings/gen_settings.py | 25 +--
docs/source/v1.9.md.inc | 8 +-
mne_bids_pipeline/_config.py | 188 +++++++++---------
mne_bids_pipeline/_config_import.py | 15 +-
mne_bids_pipeline/_config_utils.py | 26 +--
mne_bids_pipeline/_decoding.py | 4 +-
mne_bids_pipeline/_import_data.py | 98 ++++-----
mne_bids_pipeline/_logging.py | 23 +--
mne_bids_pipeline/_parallel.py | 3 +-
mne_bids_pipeline/_reject.py | 8 +-
mne_bids_pipeline/_report.py | 28 +--
mne_bids_pipeline/_run.py | 17 +-
.../steps/init/_01_init_derivatives_dir.py | 3 +-
.../steps/init/_02_find_empty_room.py | 7 +-
.../steps/preprocessing/_01_data_quality.py | 23 +--
.../steps/preprocessing/_02_head_pos.py | 15 +-
.../steps/preprocessing/_03_maxfilter.py | 21 +-
.../preprocessing/_04_frequency_filter.py | 40 ++--
.../preprocessing/_05_regress_artifact.py | 9 +-
.../steps/preprocessing/_06a1_fit_ica.py | 7 +-
.../preprocessing/_06a2_find_ica_artifacts.py | 14 +-
.../steps/preprocessing/_06b_run_ssp.py | 7 +-
.../steps/preprocessing/_07_make_epochs.py | 5 +-
.../steps/preprocessing/_08a_apply_ica.py | 15 +-
.../steps/preprocessing/_08b_apply_ssp.py | 13 +-
.../steps/preprocessing/_09_ptp_reject.py | 5 +-
.../steps/sensor/_01_make_evoked.py | 5 +-
.../steps/sensor/_02_decoding_full_epochs.py | 5 +-
.../steps/sensor/_03_decoding_time_by_time.py | 5 +-
.../steps/sensor/_04_time_frequency.py | 5 +-
.../steps/sensor/_05_decoding_csp.py | 5 +-
.../steps/sensor/_06_make_cov.py | 15 +-
.../steps/sensor/_99_group_average.py | 25 ++-
.../steps/source/_01_make_bem_surfaces.py | 7 +-
.../steps/source/_04_make_forward.py | 9 +-
.../steps/source/_05_make_inverse.py | 5 +-
.../steps/source/_99_group_average.py | 11 +-
mne_bids_pipeline/tests/test_run.py | 4 +-
mne_bids_pipeline/typing.py | 4 +-
pyproject.toml | 2 +-
41 files changed, 356 insertions(+), 381 deletions(-)
diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py
index 2ae1a1b5a..24564754f 100755
--- a/docs/source/examples/gen_examples.py
+++ b/docs/source/examples/gen_examples.py
@@ -9,7 +9,6 @@
from collections import defaultdict
from collections.abc import Iterable
from pathlib import Path
-from typing import Union
from tqdm import tqdm
@@ -24,7 +23,7 @@
logger = logging.getLogger()
-def _bool_to_icon(x: Union[bool, Iterable]) -> str:
+def _bool_to_icon(x: bool | Iterable) -> str:
if x:
return "✅"
else:
diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py
index 2b749fc10..8300245fe 100755
--- a/docs/source/settings/gen_settings.py
+++ b/docs/source/settings/gen_settings.py
@@ -99,18 +99,16 @@
# We cannot use ast for this because it doesn't preserve comments. We could use
# something like redbaron, but our code is hopefully simple enough!
assign_re = re.compile(
- # Line starts with annotation syntax (name captured by the first group).
- r"^(\w+): "
- # Then the annotation can be ...
- "("
- # ... a standard assignment ...
- ".+ = .+"
- # ... or ...
- "|"
- # ... the start of a multiline type annotation like "a: Union["
- r"(Union|Optional|Literal)\["
- # To the end of the line.
- ")$",
+ "^" # The line starts, then is followed by
+ r"(\w+): " # annotation syntax (name captured by the first group),
+ "(?:" # then the rest of the line can be (in a non-capturing group):
+ ".+ = .+" # 1. a standard assignment
+ "|" # 2. or
+ r"Literal\[" # 3. the start of a multiline type annotation like "a: Literal["
+ "|" # 4. or
+ r"\(" # 5. the start of a multiline 3.9+ type annotation like "a: ("
+ ")" # Then the end of our group
+ "$", # and immediately the end of the line.
re.MULTILINE,
)
@@ -186,8 +184,7 @@ def main():
match = assign_re.match(line)
if match is not None:
have_params = True
- name, typ, desc = match.groups()
- current_lines.append(f"{prefix}{name}")
+ current_lines.append(f"{prefix}{match.groups()[0]}")
continue
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index 67c724e07..0ff3240cc 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -2,7 +2,9 @@
### :new: New features & enhancements
-- Added number of subject to sub-average report (#902 by @SophieHerbst)
+- Added number of subject to `sub-average` report (#902 by @SophieHerbst)
+- The type annotations in the default configuration file are now easier to read: We
+ replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
@@ -10,7 +12,9 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :package: Requirements)
+### :package: Requirements
+
+- We dropped support for Python 3.9. You now need Python 3.10 or newer.
[//]: # (- Whatever (#000 by @whoever))
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 0df4096e1..175b96cd2 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1,7 +1,7 @@
# Default settings for data processing and analysis.
-from collections.abc import Sequence
-from typing import Annotated, Any, Callable, Literal, Optional, Union
+from collections.abc import Callable, Sequence
+from typing import Annotated, Any, Literal
from annotated_types import Ge, Interval, Len, MinLen
from mne import Covariance
@@ -17,7 +17,7 @@
# %%
# # General settings
-bids_root: Optional[PathLike] = None
+bids_root: PathLike | None = None
"""
Specify the BIDS root directory. Pass an empty string or ```None` to use
the value specified in the `BIDS_ROOT` environment variable instead.
@@ -30,7 +30,7 @@
```
"""
-deriv_root: Optional[PathLike] = None
+deriv_root: PathLike | None = None
"""
The root of the derivatives directory in which the pipeline will store
the processing results. If `None`, this will be
@@ -41,7 +41,7 @@
set [`subjects_dir`][mne_bids_pipeline._config.subjects_dir] as well.
"""
-subjects_dir: Optional[PathLike] = None
+subjects_dir: PathLike | None = None
"""
Path to the directory that contains the FreeSurfer reconstructions of all
subjects. Specifically, this defines the `SUBJECTS_DIR` that is used by
@@ -73,7 +73,7 @@
Enabling interactive mode deactivates parallel processing.
"""
-sessions: Union[list, Literal["all"]] = "all"
+sessions: list | Literal["all"] = "all"
"""
The sessions to process. If `'all'`, will process all sessions found in the
BIDS dataset.
@@ -89,13 +89,13 @@
Whether the task should be treated as resting-state data.
"""
-runs: Union[Sequence, Literal["all"]] = "all"
+runs: Sequence | Literal["all"] = "all"
"""
The runs to process. If `'all'`, will process all runs found in the
BIDS dataset.
"""
-exclude_runs: Optional[dict[str, list[str]]] = None
+exclude_runs: dict[str, list[str]] | None = None
"""
Specify runs to exclude from analysis, for each participant individually.
@@ -111,34 +111,34 @@
did not understand the instructions, etc.).
"""
-crop_runs: Optional[tuple[float, float]] = None
+crop_runs: tuple[float, float] | None = None
"""
Crop the raw data of each run to the specified time interval `[tmin, tmax]`,
in seconds. The runs will be cropped before Maxwell or frequency filtering is
applied. If `None`, do not crop the data.
"""
-acq: Optional[str] = None
+acq: str | None = None
"""
The BIDS `acquisition` entity.
"""
-proc: Optional[str] = None
+proc: str | None = None
"""
The BIDS `processing` entity.
"""
-rec: Optional[str] = None
+rec: str | None = None
"""
The BIDS `recording` entity.
"""
-space: Optional[str] = None
+space: str | None = None
"""
The BIDS `space` entity.
"""
-subjects: Union[Sequence[str], Literal["all"]] = "all"
+subjects: Sequence[str] | Literal["all"] = "all"
"""
Subjects to analyze. If `'all'`, include all subjects. To only
include a subset of subjects, pass a list of their identifiers. Even
@@ -205,7 +205,7 @@
```
"""
-data_type: Optional[Literal["meg", "eeg"]] = None
+data_type: Literal["meg", "eeg"] | None = None
"""
The BIDS data type.
@@ -239,7 +239,7 @@
```
"""
-eog_channels: Optional[Sequence[str]] = None
+eog_channels: Sequence[str] | None = None
"""
Specify EOG channels to use, or create virtual EOG channels.
@@ -274,7 +274,7 @@
```
"""
-eeg_bipolar_channels: Optional[dict[str, tuple[str, str]]] = None
+eeg_bipolar_channels: dict[str, tuple[str, str]] | None = None
"""
Combine two channels into a bipolar channel, whose signal is the **difference**
between the two combined channels, and add it to the data.
@@ -307,7 +307,7 @@
```
"""
-eeg_reference: Union[Literal["average"], str, Sequence["str"]] = "average"
+eeg_reference: Literal["average"] | str | Sequence["str"] = "average"
"""
The EEG reference to use. If `average`, will use the average reference,
i.e. the average across all channels. If a string, must be the name of a single
@@ -330,7 +330,7 @@
```
"""
-eeg_template_montage: Optional[Union[str, DigMontageType]] = None
+eeg_template_montage: str | DigMontageType | None = None
"""
In situations where you wish to process EEG data and no individual
digitization points (measured channel locations) are available, you can apply
@@ -371,9 +371,9 @@
```
"""
-analyze_channels: Union[
- Literal["all", "ch_types"], Annotated[Sequence["str"], MinLen(1)]
-] = "ch_types"
+analyze_channels: Literal["all", "ch_types"] | Annotated[Sequence["str"], MinLen(1)] = (
+ "ch_types"
+)
"""
The names of the channels to analyze during ERP/ERF and time-frequency analysis
steps. For certain paradigms, e.g. EEG ERP research, it is common to constrain
@@ -404,7 +404,7 @@
```
"""
-read_raw_bids_verbose: Optional[Literal["error"]] = None
+read_raw_bids_verbose: Literal["error"] | None = None
"""
Verbosity level to pass to `read_raw_bids(..., verbose=read_raw_bids_verbose)`.
If you know your dataset will contain files that are not perfectly BIDS
@@ -412,7 +412,7 @@
`'error'` to suppress warnings emitted by read_raw_bids.
"""
-plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all"
+plot_psd_for_runs: Literal["all"] | Sequence[str] = "all"
"""
For which runs to add a power spectral density (PSD) plot to the generated
report. This can take a considerable amount of time if you have many long
@@ -420,7 +420,7 @@
plotting.
"""
-random_state: Optional[int] = 42
+random_state: int | None = 42
"""
You can specify the seed of the random number generator (RNG).
This setting is passed to the ICA algorithm and to the decoding function,
@@ -574,7 +574,7 @@
before applying Maxwell filter.
"""
-mf_st_duration: Optional[float] = None
+mf_st_duration: float | None = None
"""
There are two kinds of Maxwell filtering: SSS (signal space separation) and
tSSS (temporal signal space separation)
@@ -615,7 +615,7 @@
```
"""
-mf_head_origin: Union[Literal["auto"], FloatArrayLike] = "auto"
+mf_head_origin: Literal["auto"] | FloatArrayLike = "auto"
"""
`mf_head_origin` : array-like, shape (3,) | 'auto'
Origin of internal and external multipolar moment space in meters.
@@ -630,7 +630,7 @@
```
"""
-mf_destination: Union[Literal["reference_run"], FloatArrayLike] = "reference_run"
+mf_destination: Literal["reference_run"] | FloatArrayLike = "reference_run"
"""
Despite all possible care to avoid movements in the MEG, the participant
will likely slowly drift down from the Dewar or slightly shift the head
@@ -661,7 +661,7 @@
is expected.
"""
-mf_reference_run: Optional[str] = None
+mf_reference_run: str | None = None
"""
Which run to take as the reference for adjusting the head position of all
runs when [`mf_destination="reference_run"`][mne_bids_pipeline._config.mf_destination].
@@ -673,7 +673,7 @@
```
"""
-mf_cal_fname: Optional[str] = None
+mf_cal_fname: str | None = None
"""
!!! warning
This parameter should only be used for BIDS datasets that don't store
@@ -687,7 +687,7 @@
```
""" # noqa : E501
-mf_ctc_fname: Optional[str] = None
+mf_ctc_fname: str | None = None
"""
Path to the Maxwell Filter cross-talk file. If `None`, the recommended
location is used.
@@ -707,7 +707,7 @@
Number of extended SSS (eSSS) basis projectors to use from empty-room data.
"""
-mf_esss_reject: Optional[dict[str, float]] = None
+mf_esss_reject: dict[str, float] | None = None
"""
Rejection parameters to use when computing the extended SSS (eSSS) basis.
"""
@@ -722,7 +722,7 @@
Minimum time step to use during cHPI coil amplitude estimation.
"""
-mf_mc_t_window: Union[float, Literal["auto"]] = "auto"
+mf_mc_t_window: float | Literal["auto"] = "auto"
"""
The window to use during cHPI coil amplitude estimation and in cHPI filtering.
Can be "auto" to autodetect a reasonable value or a float (in seconds).
@@ -738,19 +738,19 @@
Minimum distance (m) to accept for cHPI position fitting.
"""
-mf_mc_rotation_velocity_limit: Optional[float] = None
+mf_mc_rotation_velocity_limit: float | None = None
"""
The rotation velocity limit (degrees/second) to use when annotating
movement-compensated data. If `None`, no annotations will be added.
"""
-mf_mc_translation_velocity_limit: Optional[float] = None
+mf_mc_translation_velocity_limit: float | None = None
"""
The translation velocity limit (meters/second) to use when annotating
movement-compensated data. If `None`, no annotations will be added.
"""
-mf_filter_chpi: Optional[bool] = None
+mf_filter_chpi: bool | None = None
"""
Use mne.chpi.filter_chpi after Maxwell filtering. Can be None to use
the same value as [`mf_mc`][mne_bids_pipeline._config.mf_mc].
@@ -781,33 +781,33 @@
# If you need more fancy analysis, you are already likely past this kind
# of tips! 😇
-l_freq: Optional[float] = None
+l_freq: float | None = None
"""
The low-frequency cut-off in the highpass filtering step.
Keep it `None` if no highpass filtering should be applied.
"""
-h_freq: Optional[float] = 40.0
+h_freq: float | None = 40.0
"""
The high-frequency cut-off in the lowpass filtering step.
Keep it `None` if no lowpass filtering should be applied.
"""
-l_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
+l_trans_bandwidth: float | Literal["auto"] = "auto"
"""
Specifies the transition bandwidth of the
highpass filter. By default it's `'auto'` and uses default MNE
parameters.
"""
-h_trans_bandwidth: Union[float, Literal["auto"]] = "auto"
+h_trans_bandwidth: float | Literal["auto"] = "auto"
"""
Specifies the transition bandwidth of the
lowpass filter. By default it's `'auto'` and uses default MNE
parameters.
"""
-notch_freq: Optional[Union[float, Sequence[float]]] = None
+notch_freq: float | Sequence[float] | None = None
"""
Notch filter frequency. More than one frequency can be supplied, e.g. to remove
harmonics. Keep it `None` if no notch filter should be applied.
@@ -831,7 +831,7 @@
Specifies the transition bandwidth of the notch filter. The default is `1.`.
"""
-notch_widths: Optional[Union[float, Sequence[float]]] = None
+notch_widths: float | Sequence[float] | None = None
"""
Specifies the width of each stop band. `None` uses the MNE default.
"""
@@ -845,7 +845,7 @@
# resample your data down to 500 Hz without preventing reliable time-frequency
# exploration of your data.
-raw_resample_sfreq: Optional[float] = None
+raw_resample_sfreq: float | None = None
"""
Specifies at which sampling frequency the data should be resampled.
If `None`, then no resampling will be done.
@@ -916,7 +916,7 @@
April 1st, 2021.
"""
-epochs_metadata_tmin: Optional[float] = None
+epochs_metadata_tmin: float | None = None
"""
The beginning of the time window for metadata generation, in seconds,
relative to the time-locked event of the respective epoch. This may be less
@@ -924,13 +924,13 @@
time point of the epoch.
"""
-epochs_metadata_tmax: Optional[float] = None
+epochs_metadata_tmax: float | None = None
"""
Same as `epochs_metadata_tmin`, but specifying the **end** of the time
window for metadata generation.
"""
-epochs_metadata_keep_first: Optional[Sequence[str]] = None
+epochs_metadata_keep_first: Sequence[str] | None = None
"""
Event groupings using hierarchical event descriptors (HEDs) for which to store
the time of the **first** occurrence of any event of this group in a new column
@@ -958,14 +958,14 @@
and `first_stimulus`.
"""
-epochs_metadata_keep_last: Optional[Sequence[str]] = None
+epochs_metadata_keep_last: Sequence[str] | None = None
"""
Same as `epochs_metadata_keep_first`, but for keeping the **last**
occurrence of matching event types. The columns indicating the event types
will be named with a `last_` instead of a `first_` prefix.
"""
-epochs_metadata_query: Optional[str] = None
+epochs_metadata_query: str | None = None
"""
A [metadata query][https://mne.tools/stable/auto_tutorials/epochs/30_epochs_metadata.html]
specifying which epochs to keep. If the query fails because it refers to an
@@ -978,7 +978,7 @@
```
""" # noqa: E501
-conditions: Optional[Union[Sequence[str], dict[str, str]]] = None
+conditions: Sequence[str] | dict[str, str] | None = None
"""
The time-locked events based on which to create evoked responses.
This can either be name of the experimental condition as specified in the
@@ -1030,18 +1030,18 @@
```
"""
-rest_epochs_duration: Optional[float] = None
+rest_epochs_duration: float | None = None
"""
Duration of epochs in seconds.
"""
-rest_epochs_overlap: Optional[float] = None
+rest_epochs_overlap: float | None = None
"""
Overlap between epochs in seconds. This is used if the task is `'rest'`
and when the annotations do not contain any stimulation or behavior events.
"""
-baseline: Optional[tuple[Optional[float], Optional[float]]] = (None, 0)
+baseline: tuple[float | None, float | None] | None = (None, 0)
"""
Specifies which time interval to use for baseline correction of epochs;
if `None`, no baseline correction is applied.
@@ -1093,7 +1093,7 @@
# ### SSP, ICA, and artifact regression
-regress_artifact: Optional[dict[str, Any]] = None
+regress_artifact: dict[str, Any] | None = None
"""
Keyword arguments to pass to the `mne.preprocessing.EOGRegression` model used
in `mne.preprocessing.regress_artifact`. If `None`, no time-domain regression will
@@ -1111,7 +1111,7 @@
```
""" # noqa: E501
-spatial_filter: Optional[Literal["ssp", "ica"]] = None
+spatial_filter: Literal["ssp", "ica"] | None = None
"""
Whether to use a spatial filter to detect and remove artifacts. The BIDS
Pipeline offers the use of signal-space projection (SSP) and independent
@@ -1171,7 +1171,7 @@
`'separate'` otherwise.
"""
-ssp_reject_ecg: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None
+ssp_reject_ecg: dict[str, float] | Literal["autoreject_global"] | None = None
"""
Peak-to-peak amplitude limits of the ECG epochs to exclude from SSP fitting.
This allows you to remove strong transient artifacts, which could negatively
@@ -1189,7 +1189,7 @@
```
"""
-ssp_reject_eog: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None
+ssp_reject_eog: dict[str, float] | Literal["autoreject_global"] | None = None
"""
Peak-to-peak amplitude limits of the EOG epochs to exclude from SSP fitting.
This allows you to remove strong transient artifacts, which could negatively
@@ -1207,13 +1207,13 @@
```
"""
-ssp_ecg_channel: Optional[str] = None
+ssp_ecg_channel: str | None = None
"""
Channel to use for ECG SSP. Can be useful when the autodetected ECG channel
is not reliable.
"""
-ica_reject: Optional[Union[dict[str, float], Literal["autoreject_local"]]] = None
+ica_reject: dict[str, float] | Literal["autoreject_local"] | None = None
"""
Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to
remove strong transient artifacts from the epochs used for fitting ICA, which could
@@ -1271,7 +1271,7 @@
algorithm (but may converge in less time).
"""
-ica_l_freq: Optional[float] = 1.0
+ica_l_freq: float | None = 1.0
"""
The cutoff frequency of the high-pass filter to apply before running ICA.
Using a relatively high cutoff like 1 Hz will remove slow drifts from the
@@ -1305,7 +1305,7 @@
limit may be too low to achieve convergence.
"""
-ica_n_components: Optional[Union[float, int]] = None
+ica_n_components: float | int | None = None
"""
MNE conducts ICA as a sort of a two-step procedure: First, a PCA is run
on the data (trying to exclude zero-valued components in rank-deficient
@@ -1330,7 +1330,7 @@
This setting may drastically alter the time required to compute ICA.
"""
-ica_decim: Optional[int] = None
+ica_decim: int | None = None
"""
The decimation parameter to compute ICA. If 5 it means
that 1 every 5 sample is used by ICA solver. The higher the faster
@@ -1358,9 +1358,9 @@
# You can do a quick average of blink data and check what the amplitude looks
# like.
-reject: Optional[
- Union[dict[str, float], Literal["autoreject_global", "autoreject_local"]]
-] = None
+reject: dict[str, float] | Literal["autoreject_global", "autoreject_local"] | None = (
+ None
+)
"""
Peak-to-peak amplitude limits to mark epochs as bad. This allows you to remove
epochs with strong transient artifacts.
@@ -1397,7 +1397,7 @@
```
""" # noqa: E501
-reject_tmin: Optional[float] = None
+reject_tmin: float | None = None
"""
Start of the time window used to reject epochs. If `None`, the window will
start with the first time point. Has no effect if
@@ -1409,7 +1409,7 @@
```
"""
-reject_tmax: Optional[float] = None
+reject_tmax: float | None = None
"""
End of the time window used to reject epochs. If `None`, the window will end
with the last time point. Has no effect if
@@ -1443,7 +1443,7 @@
# ## Condition contrasts
-contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = []
+contrasts: Sequence[tuple[str, str] | ArbitraryContrast] = []
"""
The conditions to contrast via a subtraction of ERPs / ERFs. The list elements
can either be tuples or dictionaries (or a mix of both). Each element in the
@@ -1539,14 +1539,14 @@
PTP-based rejection or Autoreject (epochs with the filename `*proc-clean_epo.fif`).
"""
-decoding_epochs_tmin: Optional[float] = 0.0
+decoding_epochs_tmin: float | None = 0.0
"""
The first time sample to use for full epochs decoding. By default it starts
at 0. If `None`,, it starts at the beginning of the epoch. Does not affect time-by-time
decoding.
"""
-decoding_epochs_tmax: Optional[float] = None
+decoding_epochs_tmax: float | None = None
"""
The last time sample to use for full epochs decoding. By default it is set
to None so it ends at the end of the epoch.
@@ -1605,7 +1605,7 @@
time and frequency.
"""
-decoding_csp_times: Optional[FloatArrayLike] = None
+decoding_csp_times: FloatArrayLike | None = None
"""
The edges of the time bins to use for CSP decoding.
Must contain at least two elements. By default, 5 equally-spaced bins are
@@ -1625,7 +1625,7 @@
```
"""
-decoding_csp_freqs: Optional[dict[str, FloatArrayLike]] = None
+decoding_csp_freqs: dict[str, FloatArrayLike] | None = None
"""
The edges of the frequency bins to use for CSP decoding.
@@ -1675,7 +1675,7 @@
confidence interval of the mean decoding scores.
"""
-cluster_forming_t_threshold: Optional[float] = None
+cluster_forming_t_threshold: float | None = None
"""
The t-value threshold to use for forming clusters in the cluster-based
permutation test run on the the time-by-time decoding scores.
@@ -1717,7 +1717,7 @@
```
"""
-time_frequency_freq_min: Optional[float] = 8
+time_frequency_freq_min: float | None = 8
"""
Minimum frequency for the time frequency analysis, in Hz.
???+ example "Example"
@@ -1726,7 +1726,7 @@
```
"""
-time_frequency_freq_max: Optional[float] = 40
+time_frequency_freq_max: float | None = 40
"""
Maximum frequency for the time frequency analysis, in Hz.
???+ example "Example"
@@ -1735,7 +1735,7 @@
```
"""
-time_frequency_cycles: Optional[Union[float, FloatArrayLike]] = None
+time_frequency_cycles: float | FloatArrayLike | None = None
"""
The number of cycles to use in the Morlet wavelet. This can be a single number
or one per frequency, where frequencies are calculated via
@@ -1754,7 +1754,7 @@
This also applies to CSP analysis.
"""
-time_frequency_baseline: Optional[tuple[float, float]] = None
+time_frequency_baseline: tuple[float, float] | None = None
"""
Baseline period to use for the time-frequency analysis. If `None`, no baseline.
???+ example "Example"
@@ -1773,7 +1773,7 @@
```
"""
-time_frequency_crop: Optional[dict] = None
+time_frequency_crop: dict | None = None
"""
Period and frequency range to crop the time-frequency analysis to.
If `None`, no cropping.
@@ -1811,7 +1811,7 @@
# ## BEM surface
-use_template_mri: Optional[str] = None
+use_template_mri: str | None = None
"""
Whether to use a template MRI subject such as FreeSurfer's `fsaverage` subject.
This may come in handy if you don't have individual MR scans of your
@@ -1885,7 +1885,7 @@
# ## Source space & forward solution
-mri_t1_path_generator: Optional[Callable[[BIDSPath], BIDSPath]] = None
+mri_t1_path_generator: Callable[[BIDSPath], BIDSPath] | None = None
"""
To perform source-level analyses, the Pipeline needs to generate a
transformation matrix that translates coordinates from MEG and EEG sensor
@@ -1945,7 +1945,7 @@ def get_t1_from_meeg(bids_path):
```
"""
-mri_landmarks_kind: Optional[Callable[[BIDSPath], str]] = None
+mri_landmarks_kind: Callable[[BIDSPath], str] | None = None
"""
This config option allows to look for specific landmarks in the json
sidecar file of the T1 MRI file. This can be useful when we have different
@@ -1962,7 +1962,7 @@ def mri_landmarks_kind(bids_path):
```
"""
-spacing: Union[Literal["oct5", "oct6", "ico4", "ico5", "all"], int] = "oct6"
+spacing: Literal["oct5", "oct6", "ico4", "ico5", "all"] | int = "oct6"
"""
The spacing to use. Can be `'ico#'` for a recursively subdivided
icosahedron, `'oct#'` for a recursively subdivided octahedron,
@@ -1979,7 +1979,7 @@ def mri_landmarks_kind(bids_path):
# ## Inverse solution
-loose: Union[float, Literal["auto"]] = 0.2
+loose: float | Literal["auto"] = 0.2
"""
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If `0`, then the
@@ -1990,7 +1990,7 @@ def mri_landmarks_kind(bids_path):
unless `fixed is True` in which case the value 0. is used.
"""
-depth: Optional[Union[float, dict]] = 0.8
+depth: float | dict | None = 0.8
"""
If float (default 0.8), it acts as the depth weighting exponent (`exp`)
to use (must be between 0 and 1). None is equivalent to 0, meaning no
@@ -2005,11 +2005,11 @@ def mri_landmarks_kind(bids_path):
solution.
"""
-noise_cov: Union[
- tuple[Optional[float], Optional[float]],
- Literal["emptyroom", "rest", "ad-hoc"],
- Callable[[BIDSPath], Covariance],
-] = (None, 0)
+noise_cov: (
+ tuple[float | None, float | None]
+ | Literal["emptyroom", "rest", "ad-hoc"]
+ | Callable[[BIDSPath], Covariance]
+) = (None, 0)
"""
Specify how to estimate the noise covariance matrix, which is used in
inverse modeling.
@@ -2089,7 +2089,7 @@ def noise_cov(bids_path):
of `mne.compute_covariance` for details.
"""
-source_info_path_update: Optional[dict[str, str]] = dict(suffix="ave")
+source_info_path_update: dict[str, str] | None = dict(suffix="ave")
"""
When computing the forward and inverse solutions, by default the pipeline
retrieves the `mne.Info` object from the cleaned evoked data. However, in
@@ -2131,7 +2131,7 @@ def noise_cov(bids_path):
# ## Report generation
-report_evoked_n_time_points: Optional[int] = None
+report_evoked_n_time_points: int | None = None
"""
Specifies the number of time points to display for each evoked
in the report. If `None`, it defaults to the current default in MNE-Python.
@@ -2143,7 +2143,7 @@ def noise_cov(bids_path):
```
"""
-report_stc_n_time_points: Optional[int] = None
+report_stc_n_time_points: int | None = None
"""
Specifies the number of time points to display for each source estimates
in the report. If `None`, it defaults to the current default in MNE-Python.
@@ -2155,7 +2155,7 @@ def noise_cov(bids_path):
```
"""
-report_add_epochs_image_kwargs: Optional[dict] = None
+report_add_epochs_image_kwargs: dict | None = None
"""
Specifies the limits for the color scales of the epochs_image in the report.
If `None`, it defaults to the current default in MNE-Python.
@@ -2197,7 +2197,7 @@ def noise_cov(bids_path):
Ignored if `parallel_backend` is not `'dask'`.
"""
-dask_temp_dir: Optional[PathLike] = None
+dask_temp_dir: PathLike | None = None
"""
The temporary directory to use by Dask. Dask places lock-files in this
directory, and also uses it to "spill" RAM contents to disk if the amount of
@@ -2235,7 +2235,7 @@ def noise_cov(bids_path):
Enabling debug mode deactivates parallel processing.
"""
-memory_location: Optional[Union[PathLike, bool]] = True
+memory_location: PathLike | bool | None = True
"""
If not None (or False), caching will be enabled and the cache files will be
stored in the given directory. The default (True) will use a
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index 98286d4ba..a52c82119 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -7,7 +7,6 @@
from dataclasses import field
from functools import partial
from types import SimpleNamespace
-from typing import Optional
import matplotlib
import mne
@@ -20,8 +19,8 @@
def _import_config(
*,
- config_path: Optional[PathLike],
- overrides: Optional[SimpleNamespace] = None,
+ config_path: PathLike | None,
+ overrides: SimpleNamespace | None = None,
check: bool = True,
log: bool = True,
) -> SimpleNamespace:
@@ -118,7 +117,7 @@ def _get_default_config():
ignore_keys = {
name.asname or name.name
for element in tree.body
- if isinstance(element, (ast.Import, ast.ImportFrom))
+ if isinstance(element, ast.Import | ast.ImportFrom)
for name in element.names
}
config = SimpleNamespace(
@@ -160,8 +159,8 @@ def _update_config_from_path(
def _update_with_user_config(
*,
config: SimpleNamespace, # modified in-place
- config_path: Optional[PathLike],
- overrides: Optional[SimpleNamespace],
+ config_path: PathLike | None,
+ overrides: SimpleNamespace | None,
log: bool = False,
) -> list[str]:
# 1. Basics and hidden vars
@@ -233,7 +232,7 @@ def _update_with_user_config(
return user_names
-def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> None:
+def _check_config(config: SimpleNamespace, config_path: PathLike | None) -> None:
_pydantic_validate(config=config, config_path=config_path)
# Eventually all of these could be pydantic-validated, but for now we'll
@@ -368,7 +367,7 @@ def _default_factory(key, val):
def _pydantic_validate(
config: SimpleNamespace,
- config_path: Optional[PathLike],
+ config_path: PathLike | None,
):
"""Create dataclass from config type hints and validate with pydantic."""
# https://docs.pydantic.dev/latest/usage/dataclasses/
diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py
index d6bcb0ce5..46990a623 100644
--- a/mne_bids_pipeline/_config_utils.py
+++ b/mne_bids_pipeline/_config_utils.py
@@ -5,7 +5,7 @@
import pathlib
from collections.abc import Iterable
from types import ModuleType, SimpleNamespace
-from typing import Any, Literal, Optional, TypeVar, Union
+from typing import Any, Literal, TypeVar
import mne
import mne_bids
@@ -106,7 +106,7 @@ def get_subjects(config: SimpleNamespace) -> list[str]:
return subjects
-def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]:
+def get_sessions(config: SimpleNamespace) -> list[None] | list[str]:
sessions = copy.deepcopy(config.sessions)
_all_sessions = _get_entity_vals_cached(
root=config.bids_root,
@@ -124,7 +124,7 @@ def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]:
def get_runs_all_subjects(
config: SimpleNamespace,
-) -> dict[str, Union[list[None], list[str]]]:
+) -> dict[str, list[None] | list[str]]:
"""Give the mapping between subjects and their runs.
Returns
@@ -149,7 +149,7 @@ def get_runs_all_subjects(
@functools.cache
def _get_runs_all_subjects_cached(
**config_dict: dict[str, Any],
-) -> dict[str, Union[list[None], list[str]]]:
+) -> dict[str, list[None] | list[str]]:
config = SimpleNamespace(**config_dict)
# Sometimes we check list equivalence for ch_types, so convert it back
config.ch_types = list(config.ch_types)
@@ -197,7 +197,7 @@ def get_runs(
config: SimpleNamespace,
subject: str,
verbose: bool = False,
-) -> Union[list[str], list[None]]:
+) -> list[str] | list[None]:
"""Return a list of runs in the BIDS input data.
Parameters
@@ -253,7 +253,7 @@ def get_runs_tasks(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
which: tuple[str] = ("runs", "noise", "rest"),
) -> list[tuple[str]]:
"""Get (run, task) tuples for all runs plus (maybe) rest."""
@@ -310,7 +310,7 @@ def get_mf_reference_run(config: SimpleNamespace) -> str:
)
-def get_task(config: SimpleNamespace) -> Optional[str]:
+def get_task(config: SimpleNamespace) -> str | None:
task = config.task
if task:
return task
@@ -416,7 +416,7 @@ def get_mf_ctc_fname(
RawEpochsEvokedT = TypeVar(
- "RawEpochsEvokedT", bound=Union[mne.io.BaseRaw, mne.BaseEpochs, mne.Evoked]
+ "RawEpochsEvokedT", bound=mne.io.BaseRaw | mne.BaseEpochs | mne.Evoked
)
@@ -459,7 +459,7 @@ def _meg_in_ch_types(ch_types: str) -> bool:
def get_noise_cov_bids_path(
- cfg: SimpleNamespace, subject: str, session: Optional[str]
+ cfg: SimpleNamespace, subject: str, session: str | None
) -> BIDSPath:
"""Retrieve the path to the noise covariance file.
@@ -553,13 +553,13 @@ def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[tuple[str, str]]
}
-def _get_decoding_proc(config: SimpleNamespace) -> Optional[str]:
+def _get_decoding_proc(config: SimpleNamespace) -> str | None:
return _EPOCHS_DESCRIPTION_TO_PROC_MAP[config.decoding_which_epochs]
def get_eeg_reference(
config: SimpleNamespace,
-) -> Union[Literal["average"], Iterable[str]]:
+) -> Literal["average"] | Iterable[str]:
if config.eeg_reference == "average":
return config.eeg_reference
elif isinstance(config.eeg_reference, str):
@@ -635,7 +635,7 @@ def _do_mf_autobad(*, cfg: SimpleNamespace) -> bool:
# Adapted from MNE-Python
def _pl(x, *, non_pl="", pl="s"):
"""Determine if plural should be used."""
- len_x = x if isinstance(x, (int, np.generic)) else len(x)
+ len_x = x if isinstance(x, int | np.generic) else len(x)
return non_pl if len_x == 1 else pl
@@ -643,7 +643,7 @@ def _proj_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> BIDSPath:
return BIDSPath(
subject=subject,
diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py
index 3968fcf3c..9c34ded27 100644
--- a/mne_bids_pipeline/_decoding.py
+++ b/mne_bids_pipeline/_decoding.py
@@ -1,5 +1,3 @@
-from typing import Optional
-
import mne
import numpy as np
from joblib import parallel_backend
@@ -90,7 +88,7 @@ def _handle_csp_args(
def _decoding_preproc_steps(
subject: str,
- session: Optional[str],
+ session: str | None,
epochs: mne.Epochs,
pca: bool = True,
) -> list[BaseEstimator]:
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index aaf7b56e3..1043db0d5 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -1,6 +1,6 @@
from collections.abc import Iterable
from types import SimpleNamespace
-from typing import Literal, Optional, Union
+from typing import Literal
import mne
import numpy as np
@@ -26,17 +26,17 @@ def make_epochs(
*,
task: str,
subject: str,
- session: Optional[str],
+ session: str | None,
raw: mne.io.BaseRaw,
- event_id: Optional[Union[dict[str, int], Literal["auto"]]],
- conditions: Union[Iterable[str], dict[str, str]],
+ event_id: dict[str, int] | Literal["auto"] | None,
+ conditions: Iterable[str] | dict[str, str],
tmin: float,
tmax: float,
- metadata_tmin: Optional[float],
- metadata_tmax: Optional[float],
- metadata_keep_first: Optional[Iterable[str]],
- metadata_keep_last: Optional[Iterable[str]],
- metadata_query: Optional[str],
+ metadata_tmin: float | None,
+ metadata_tmax: float | None,
+ metadata_keep_first: Iterable[str] | None,
+ metadata_keep_last: Iterable[str] | None,
+ metadata_query: str | None,
event_repeated: Literal["error", "drop", "merge"],
epochs_decim: int,
task_is_rest: bool,
@@ -173,8 +173,8 @@ def _rename_events_func(
cfg: SimpleNamespace,
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
- run: Optional[str],
+ session: str | None,
+ run: str | None,
) -> None:
"""Rename events (actually, annotations descriptions) in ``raw``.
@@ -256,7 +256,7 @@ def _drop_channels_func(
cfg: SimpleNamespace,
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> None:
"""Drop channels from the data.
@@ -272,8 +272,8 @@ def _create_bipolar_channels(
cfg: SimpleNamespace,
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
- run: Optional[str],
+ session: str | None,
+ run: str | None,
) -> None:
"""Create a channel from a bipolar referencing scheme..
@@ -318,8 +318,8 @@ def _set_eeg_montage(
cfg: SimpleNamespace,
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
- run: Optional[str],
+ session: str | None,
+ run: str | None,
) -> None:
"""Set an EEG template montage if requested.
@@ -356,8 +356,8 @@ def import_experimental_data(
*,
cfg: SimpleNamespace,
bids_path_in: BIDSPath,
- bids_path_bads_in: Optional[BIDSPath],
- data_is_rest: Optional[bool],
+ bids_path_bads_in: BIDSPath | None,
+ data_is_rest: bool | None,
) -> mne.io.BaseRaw:
"""Run the data import.
@@ -417,9 +417,9 @@ def import_er_data(
*,
cfg: SimpleNamespace,
bids_path_er_in: BIDSPath,
- bids_path_ref_in: Optional[BIDSPath],
- bids_path_er_bads_in: Optional[BIDSPath],
- bids_path_ref_bads_in: Optional[BIDSPath],
+ bids_path_ref_in: BIDSPath | None,
+ bids_path_er_bads_in: BIDSPath | None,
+ bids_path_ref_bads_in: BIDSPath | None,
prepare_maxwell_filter: bool,
) -> mne.io.BaseRaw:
"""Import empty-room data.
@@ -495,8 +495,8 @@ def _find_breaks_func(
cfg,
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
- run: Optional[str],
+ session: str | None,
+ run: str | None,
) -> None:
if not cfg.find_breaks:
return
@@ -527,9 +527,9 @@ def _get_bids_path_in(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
kind: Literal["orig", "sss", "filt"] = "orig",
) -> BIDSPath:
# b/c can be used before this is updated
@@ -563,13 +563,13 @@ def _get_run_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
kind: Literal["orig", "sss", "filt"],
- add_bads: Optional[bool] = None,
+ add_bads: bool | None = None,
allow_missing: bool = False,
- key: Optional[str] = None,
+ key: str | None = None,
) -> dict:
bids_path_in = _get_bids_path_in(
cfg=cfg,
@@ -595,9 +595,9 @@ def _get_rest_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
kind: Literal["orig", "sss", "filt"],
- add_bads: Optional[bool] = None,
+ add_bads: bool | None = None,
) -> dict:
if not (cfg.process_rest and not cfg.task_is_rest):
return dict()
@@ -617,10 +617,10 @@ def _get_noise_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
kind: Literal["orig", "sss", "filt"],
- mf_reference_run: Optional[str],
- add_bads: Optional[bool] = None,
+ mf_reference_run: str | None,
+ add_bads: bool | None = None,
) -> dict:
if not (cfg.process_empty_room and get_datatype(config=cfg) == "meg"):
return dict()
@@ -663,12 +663,12 @@ def _get_run_rest_noise_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
kind: Literal["orig", "sss", "filt"],
- mf_reference_run: Optional[str],
- add_bads: Optional[bool] = None,
+ mf_reference_run: str | None,
+ add_bads: bool | None = None,
) -> dict:
kwargs = dict(
cfg=cfg,
@@ -691,8 +691,8 @@ def _get_mf_reference_run_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- add_bads: Optional[bool] = None,
+ session: str | None,
+ add_bads: bool | None = None,
) -> dict:
return _get_run_path(
cfg=cfg,
@@ -716,12 +716,12 @@ def _path_dict(
*,
cfg: SimpleNamespace,
bids_path_in: BIDSPath,
- add_bads: Optional[bool] = None,
+ add_bads: bool | None = None,
kind: Literal["orig", "sss", "filt"],
allow_missing: bool,
- key: Optional[str] = None,
+ key: str | None = None,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
if add_bads is None:
add_bads = kind == "orig" and _do_mf_autobad(cfg=cfg)
@@ -748,7 +748,7 @@ def _bads_path(
cfg: SimpleNamespace,
bids_path_in: BIDSPath,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> BIDSPath:
return bids_path_in.copy().update(
suffix="bads",
@@ -817,8 +817,8 @@ def _import_data_kwargs(*, config: SimpleNamespace, subject: str) -> dict:
def _get_run_type(
- run: Optional[str],
- task: Optional[str],
+ run: str | None,
+ task: str | None,
) -> str:
if run is None and task in ("noise", "rest"):
run_type = dict(rest="resting-state", noise="empty-room")[task]
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index 2f54757a6..187930be2 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -4,7 +4,6 @@
import inspect
import logging
import os
-from typing import Optional, Union
import rich.console
import rich.theme
@@ -71,25 +70,25 @@ def level(self, level):
level = int(level)
self._level = level
- def debug(self, msg: str, *, extra: Optional[LogKwargsT] = None) -> None:
+ def debug(self, msg: str, *, extra: LogKwargsT | None = None) -> None:
self._log_message(kind="debug", msg=msg, **(extra or {}))
- def info(self, msg: str, *, extra: Optional[LogKwargsT] = None) -> None:
+ def info(self, msg: str, *, extra: LogKwargsT | None = None) -> None:
self._log_message(kind="info", msg=msg, **(extra or {}))
- def warning(self, msg: str, *, extra: Optional[LogKwargsT] = None) -> None:
+ def warning(self, msg: str, *, extra: LogKwargsT | None = None) -> None:
self._log_message(kind="warning", msg=msg, **(extra or {}))
- def error(self, msg: str, *, extra: Optional[LogKwargsT] = None) -> None:
+ def error(self, msg: str, *, extra: LogKwargsT | None = None) -> None:
self._log_message(kind="error", msg=msg, **(extra or {}))
def _log_message(
self,
kind: str,
msg: str,
- subject: Optional[Union[str, int]] = None,
- session: Optional[Union[str, int]] = None,
- run: Optional[Union[str, int]] = None,
+ subject: str | int | None = None,
+ session: str | int | None = None,
+ run: str | int | None = None,
emoji: str = "",
):
this_level = getattr(logging, kind.upper())
@@ -111,10 +110,10 @@ def _log_message(
def gen_log_kwargs(
message: str,
*,
- subject: Optional[Union[str, int]] = None,
- session: Optional[Union[str, int]] = None,
- run: Optional[Union[str, int]] = None,
- task: Optional[str] = None,
+ subject: str | int | None = None,
+ session: str | int | None = None,
+ run: str | int | None = None,
+ task: str | None = None,
emoji: str = "⏳️",
) -> LogKwargsT:
# Try to figure these out
diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py
index 9c74e6474..acee195c0 100644
--- a/mne_bids_pipeline/_parallel.py
+++ b/mne_bids_pipeline/_parallel.py
@@ -1,7 +1,8 @@
"""Parallelization."""
+from collections.abc import Callable
from types import SimpleNamespace
-from typing import Callable, Literal
+from typing import Literal
import joblib
from mne.utils import logger as mne_logger
diff --git a/mne_bids_pipeline/_reject.py b/mne_bids_pipeline/_reject.py
index 707984732..3837daa97 100644
--- a/mne_bids_pipeline/_reject.py
+++ b/mne_bids_pipeline/_reject.py
@@ -1,7 +1,7 @@
"""Rejection."""
from collections.abc import Iterable
-from typing import Literal, Optional, Union
+from typing import Literal
import mne
@@ -11,11 +11,11 @@
def _get_reject(
*,
subject: str,
- session: Optional[str],
- reject: Union[dict[str, float], Literal["autoreject_global"]],
+ session: str | None,
+ reject: dict[str, float] | Literal["autoreject_global"],
ch_types: Iterable[Literal["meg", "mag", "grad", "eeg"]],
param: str,
- epochs: Optional[mne.BaseEpochs] = None,
+ epochs: mne.BaseEpochs | None = None,
) -> dict[str, float]:
if reject is None:
return dict()
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index a9f9b48a4..db14020fa 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -5,7 +5,7 @@
from io import StringIO
from textwrap import indent
from types import SimpleNamespace
-from typing import Literal, Optional
+from typing import Literal
import matplotlib.transforms
import mne
@@ -29,10 +29,10 @@ def _open_report(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str] = None,
- task: Optional[str] = None,
- fname_report: Optional[BIDSPath] = None,
+ session: str | None,
+ run: str | None = None,
+ task: str | None = None,
+ fname_report: BIDSPath | None = None,
name: str = "report",
):
if fname_report is None:
@@ -453,7 +453,7 @@ def _plot_decoding_time_generalization(
def _gen_empty_report(
- *, cfg: SimpleNamespace, subject: str, session: Optional[str]
+ *, cfg: SimpleNamespace, subject: str, session: str | None
) -> mne.Report:
title = f"sub-{subject}"
if session is not None:
@@ -470,7 +470,7 @@ def _contrasts_to_names(contrasts: list[list[str]]) -> list[str]:
def add_event_counts(
- *, cfg, subject: Optional[str], session: Optional[str], report: mne.Report
+ *, cfg, subject: str | None, session: str | None, report: mne.Report
) -> None:
try:
df_events = count_events(BIDSPath(root=cfg.bids_root, session=session))
@@ -495,9 +495,9 @@ def _finalize(
report: mne.Report,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
) -> None:
"""Add system information and the pipeline configuration to the report."""
# ensure they are always appended
@@ -597,7 +597,7 @@ def add_csp_grand_average(
cond_1: str,
cond_2: str,
fname_csp_freq_results: BIDSPath,
- fname_csp_cluster_results: Optional[pd.DataFrame],
+ fname_csp_cluster_results: pd.DataFrame | None,
):
"""Add CSP decoding results to the grand average report."""
import matplotlib.pyplot as plt # nested import to help joblib
@@ -850,8 +850,8 @@ def _add_raw(
bids_path_in: BIDSPath,
title: str,
tags: tuple = (),
- raw: Optional[BaseRaw] = None,
- extra_html: Optional[str] = None,
+ raw: BaseRaw | None = None,
+ extra_html: str | None = None,
):
if bids_path_in.run is not None:
title += f", run {repr(bids_path_in.run)}"
@@ -888,7 +888,7 @@ def _render_bem(
cfg: SimpleNamespace,
report: mne.report.Report,
subject: str,
- session: Optional[str],
+ session: str | None,
):
logger.info(**gen_log_kwargs(message="Rendering MRI slices with BEM contours."))
report.add_bem(
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index ca3bd1faf..de1321352 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -9,8 +9,9 @@
import sys
import time
import traceback
+from collections.abc import Callable
from types import SimpleNamespace
-from typing import Callable, Literal, Optional, Union
+from typing import Literal
import json_tricks
import pandas as pd
@@ -23,8 +24,8 @@
def failsafe_run(
- get_input_fnames: Optional[Callable] = None,
- get_output_fnames: Optional[Callable] = None,
+ get_input_fnames: Callable | None = None,
+ get_output_fnames: Callable | None = None,
) -> Callable:
def failsafe_run_decorator(func):
@functools.wraps(func) # Preserve "identity" of original function
@@ -315,8 +316,8 @@ def save_logs(*, config: SimpleNamespace, logs: list[pd.Series]) -> None:
def _update_for_splits(
- files_dict: Union[dict[str, BIDSPath], BIDSPath],
- key: Optional[str],
+ files_dict: dict[str, BIDSPath] | BIDSPath,
+ key: str | None,
*,
single: bool = False,
allow_missing: bool = False,
@@ -358,7 +359,7 @@ def _sanitize_callable(val):
def _get_step_path(
- stack: Optional[list[inspect.FrameInfo]] = None,
+ stack: list[inspect.FrameInfo] | None = None,
) -> pathlib.Path:
if stack is None:
stack = inspect.stack()
@@ -385,7 +386,7 @@ def _prep_out_files(
*,
exec_params: SimpleNamespace,
out_files: dict[str, BIDSPath],
- check_relative: Optional[pathlib.Path] = None,
+ check_relative: pathlib.Path | None = None,
bids_only: bool = True,
):
if check_relative is None:
@@ -415,7 +416,7 @@ def _prep_out_files(
def _path_to_str_hash(
k: str,
- v: Union[BIDSPath, pathlib.Path],
+ v: BIDSPath | pathlib.Path,
*,
method: Literal["mtime", "hash"],
kind: str = "in",
diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
index 2f17b0c77..de7a2de0d 100644
--- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
+++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
@@ -4,7 +4,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
from mne_bids.config import BIDS_VERSION
from mne_bids.utils import _write_json
@@ -47,7 +46,7 @@ def init_subject_dirs(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> None:
"""Create processing data output directories for individual participants."""
out_dir = cfg.deriv_root / f"sub-{subject}"
diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
index d56318365..02515021a 100644
--- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py
+++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py
@@ -1,7 +1,6 @@
"""Find empty-room data matches."""
from types import SimpleNamespace
-from typing import Optional
from mne_bids import BIDSPath
@@ -20,7 +19,7 @@
def get_input_fnames_find_empty_room(
- *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace
+ *, subject: str, session: str | None, run: str | None, cfg: SimpleNamespace
) -> dict[str, BIDSPath]:
"""Get paths of files required by find_empty_room function."""
bids_path_in = BIDSPath(
@@ -63,8 +62,8 @@ def find_empty_room(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
+ session: str | None,
+ run: str | None,
in_files: dict[str, BIDSPath],
) -> dict[str, BIDSPath]:
raw_path = in_files.pop(f"raw_run-{run}")
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 1cbeca387..0c8f81e96 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -1,7 +1,6 @@
"""Assess data quality and find bad (and flat) channels."""
from types import SimpleNamespace
-from typing import Optional
import mne
import pandas as pd
@@ -36,9 +35,9 @@ def get_input_fnames_data_quality(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
) -> dict:
"""Get paths of files required by assess_data_quality function."""
kwargs = dict(
@@ -68,9 +67,9 @@ def assess_data_quality(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
in_files: dict,
) -> dict:
"""Assess data quality and find and mark bad channels."""
@@ -146,11 +145,11 @@ def _find_bads_maxwell(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
bids_path_in: BIDSPath,
- bids_path_ref_in: Optional[BIDSPath],
+ bids_path_ref_in: BIDSPath | None,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
out_files: dict,
):
if cfg.find_flat_channels_meg and not cfg.find_noisy_channels_meg:
@@ -287,7 +286,7 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> SimpleNamespace:
extra_kwargs = dict()
if config.find_noisy_channels_meg or config.find_flat_channels_meg:
diff --git a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
index d4a6a2c6b..de8996338 100644
--- a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
+++ b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py
@@ -1,7 +1,6 @@
"""Estimate head positions."""
from types import SimpleNamespace
-from typing import Optional
import mne
@@ -25,9 +24,9 @@ def get_input_fnames_head_pos(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
) -> dict:
"""Get paths of files required by run_head_pos function."""
return _get_run_rest_noise_path(
@@ -49,9 +48,9 @@ def run_head_pos(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
in_files: dict,
) -> dict:
import matplotlib.pyplot as plt
@@ -148,7 +147,7 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
mf_mc_t_step_min=config.mf_mc_t_step_min,
diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
index 00ce5ad0a..e1e178395 100644
--- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py
@@ -17,7 +17,6 @@
import gc
from copy import deepcopy
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -50,7 +49,7 @@ def get_input_fnames_esss(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
kwargs = dict(
cfg=cfg,
@@ -76,7 +75,7 @@ def compute_esss_proj(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
import matplotlib.pyplot as plt
@@ -182,9 +181,9 @@ def get_input_fnames_maxwell_filter(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
) -> dict:
"""Get paths of files required by maxwell_filter function."""
kwargs = dict(
@@ -274,9 +273,9 @@ def run_maxwell_filter(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
- run: Optional[str],
- task: Optional[str],
+ session: str | None,
+ run: str | None,
+ task: str | None,
in_files: dict,
) -> dict:
if cfg.proc and "sss" in cfg.proc and cfg.use_maxwell_filter:
@@ -539,7 +538,7 @@ def get_config_esss(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
mf_esss=config.mf_esss,
@@ -553,7 +552,7 @@ def get_config_maxwell_filter(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
mf_cal_fname=get_mf_cal_fname(
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index fd9c6c874..330829ab8 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -16,7 +16,7 @@
from collections.abc import Iterable
from types import SimpleNamespace
-from typing import Literal, Optional, Union
+from typing import Literal
import mne
import numpy as np
@@ -45,9 +45,9 @@ def get_input_fnames_frequency_filter(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
) -> dict:
"""Get paths of files required by filter_data function."""
kind = "sss" if cfg.use_maxwell_filter else "orig"
@@ -65,14 +65,14 @@ def get_input_fnames_frequency_filter(
def notch_filter(
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
- freqs: Optional[Union[float, Iterable[float]]],
- trans_bandwidth: Union[float, Literal["auto"]],
- notch_widths: Optional[Union[float, Iterable[float]]],
+ task: str | None,
+ freqs: float | Iterable[float] | None,
+ trans_bandwidth: float | Literal["auto"],
+ notch_widths: float | Iterable[float] | None,
run_type: Literal["experimental", "empty-room", "resting-state"],
- picks: Optional[np.ndarray],
+ picks: np.ndarray | None,
) -> None:
"""Filter data channels (MEG and EEG)."""
if freqs is None:
@@ -97,15 +97,15 @@ def notch_filter(
def bandpass_filter(
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
- l_freq: Optional[float],
- h_freq: Optional[float],
- l_trans_bandwidth: Union[float, Literal["auto"]],
- h_trans_bandwidth: Union[float, Literal["auto"]],
+ task: str | None,
+ l_freq: float | None,
+ h_freq: float | None,
+ l_trans_bandwidth: float | Literal["auto"],
+ h_trans_bandwidth: float | Literal["auto"],
run_type: Literal["experimental", "empty-room", "resting-state"],
- picks: Optional[np.ndarray],
+ picks: np.ndarray | None,
) -> None:
"""Filter data channels (MEG and EEG)."""
if l_freq is not None and h_freq is None:
@@ -135,9 +135,9 @@ def bandpass_filter(
def resample(
raw: mne.io.BaseRaw,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
sfreq: float,
run_type: Literal["experimental", "empty-room", "resting-state"],
) -> None:
@@ -157,9 +157,9 @@ def filter_data(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
in_files: dict,
) -> dict:
"""Filter data from a single subject."""
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
index 9fce737cc..bc63acd64 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -1,7 +1,6 @@
"""Temporal regression for artifact removal."""
from types import SimpleNamespace
-from typing import Optional
import mne
from mne.io.pick import _picks_to_idx
@@ -23,9 +22,9 @@ def get_input_fnames_regress_artifact(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
) -> dict:
"""Get paths of files required by regress_artifact function."""
out = _get_run_rest_noise_path(
@@ -49,9 +48,9 @@ def run_regress_artifact(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
in_files: dict,
) -> dict:
model = EOGRegression(proj=False, **cfg.regress_artifact)
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
index 79e15a235..5776f3abf 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a1_fit_ica.py
@@ -10,7 +10,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import autoreject
import mne
@@ -37,7 +36,7 @@ def get_input_fnames_run_ica(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
bids_basename = BIDSPath(
subject=subject,
@@ -69,7 +68,7 @@ def run_ica(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
"""Run ICA."""
@@ -295,7 +294,7 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str] = None,
+ session: str | None = None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
conditions=config.conditions,
diff --git a/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py b/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
index d85398bfc..93788eccc 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06a2_find_ica_artifacts.py
@@ -9,7 +9,7 @@
import shutil
from types import SimpleNamespace
-from typing import Literal, Optional
+from typing import Literal
import mne
import numpy as np
@@ -34,11 +34,11 @@ def detect_bad_components(
*,
cfg,
which: Literal["eog", "ecg"],
- epochs: Optional[mne.BaseEpochs],
+ epochs: mne.BaseEpochs | None,
ica: mne.preprocessing.ICA,
- ch_names: Optional[list[str]],
+ ch_names: list[str] | None,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> tuple[list[int], np.ndarray]:
artifact = which.upper()
if epochs is None:
@@ -89,7 +89,7 @@ def get_input_fnames_find_ica_artifacts(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
bids_basename = BIDSPath(
subject=subject,
@@ -127,7 +127,7 @@ def find_ica_artifacts(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
"""Run ICA."""
@@ -341,7 +341,7 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str] = None,
+ session: str | None = None,
) -> SimpleNamespace:
cfg = SimpleNamespace(
conditions=config.conditions,
diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
index 1580836ca..b17816a7e 100644
--- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py
@@ -5,7 +5,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -28,7 +27,7 @@
from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs
-def _find_ecg_events(raw: mne.io.Raw, ch_name: Optional[str]) -> np.ndarray:
+def _find_ecg_events(raw: mne.io.Raw, ch_name: str | None) -> np.ndarray:
"""Wrap find_ecg_events to use the same defaults as create_ecg_events."""
return find_ecg_events(raw, ch_name=ch_name, l_freq=8, h_freq=16)[0]
@@ -37,7 +36,7 @@ def get_input_fnames_run_ssp(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
bids_basename = BIDSPath(
subject=subject,
@@ -69,7 +68,7 @@ def run_ssp(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
import matplotlib.pyplot as plt
diff --git a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
index cc010d0b8..47f717959 100644
--- a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
+++ b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py
@@ -9,7 +9,6 @@
import inspect
from types import SimpleNamespace
-from typing import Optional
import mne
from mne_bids import BIDSPath
@@ -38,7 +37,7 @@ def get_input_fnames_epochs(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
"""Get paths of files required by filter_data function."""
# Construct the basenames of the files we wish to load, and of the empty-
@@ -79,7 +78,7 @@ def run_epochs(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
"""Extract epochs for one subject."""
diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
index b64e99f3a..4cea12ba4 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py
@@ -6,7 +6,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
import pandas as pd
@@ -30,7 +29,7 @@ def _ica_paths(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
):
bids_basename = BIDSPath(
subject=subject,
@@ -68,7 +67,7 @@ def get_input_fnames_apply_ica_epochs(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = _ica_paths(cfg=cfg, subject=subject, session=session)
in_files["epochs"] = (
@@ -88,9 +87,9 @@ def get_input_fnames_apply_ica_raw(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
) -> dict:
in_files = _get_run_rest_noise_path(
cfg=cfg,
@@ -114,7 +113,7 @@ def apply_ica_epochs(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
bids_basename = in_files["ica"].copy().update(processing=None)
@@ -224,9 +223,9 @@ def apply_ica_raw(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
in_files: dict,
) -> dict:
ica = _read_ica_and_exclude(in_files)
diff --git a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
index 9a0026a78..ee2c56cb9 100644
--- a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
+++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py
@@ -5,7 +5,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
@@ -26,7 +25,7 @@ def get_input_fnames_apply_ssp_epochs(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = dict()
in_files["proj"] = _proj_path(cfg=cfg, subject=subject, session=session)
@@ -43,7 +42,7 @@ def apply_ssp_epochs(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
out_files = dict()
@@ -74,9 +73,9 @@ def get_input_fnames_apply_ssp_raw(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
) -> dict:
in_files = _get_run_rest_noise_path(
cfg=cfg,
@@ -100,9 +99,9 @@ def apply_ssp_raw(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
run: str,
- task: Optional[str],
+ task: str | None,
in_files: dict,
) -> dict:
projs = mne.read_proj(in_files.pop("proj"))
diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
index 51ca1149b..e9de67a27 100644
--- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
+++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py
@@ -9,7 +9,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import autoreject
import mne
@@ -33,7 +32,7 @@ def get_input_fnames_drop_ptp(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
bids_path = BIDSPath(
subject=subject,
@@ -63,7 +62,7 @@ def drop_ptp(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
out_files = dict()
diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
index 71835faee..879baf298 100644
--- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
+++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py
@@ -1,7 +1,6 @@
"""Extract evoked data for each condition."""
from types import SimpleNamespace
-from typing import Optional
import mne
from mne_bids import BIDSPath
@@ -31,7 +30,7 @@ def get_input_fnames_evoked(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
fname_epochs = BIDSPath(
subject=subject,
@@ -62,7 +61,7 @@ def run_evoked(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
out_files = dict()
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 4034245ff..58ccc7f2e 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -10,7 +10,6 @@
import os.path as op
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -46,7 +45,7 @@ def get_input_fnames_epochs_decoding(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
condition1: str,
condition2: str,
) -> dict:
@@ -80,7 +79,7 @@ def run_epochs_decoding(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
condition1: str,
condition2: str,
in_files: dict,
diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
index 5ad221d67..8e5402d96 100644
--- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
+++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py
@@ -13,7 +13,6 @@
import os.path as op
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -54,7 +53,7 @@ def get_input_fnames_time_decoding(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
condition1: str,
condition2: str,
) -> dict:
@@ -88,7 +87,7 @@ def run_time_decoding(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
condition1: str,
condition2: str,
in_files: dict,
diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
index 0ab3aa3ea..be04ca547 100644
--- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
+++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py
@@ -5,7 +5,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -29,7 +28,7 @@ def get_input_fnames_time_frequency(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
fname_epochs = BIDSPath(
subject=subject,
@@ -60,7 +59,7 @@ def run_time_frequency(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
import matplotlib.pyplot as plt
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index ca7791fd4..368caaeb3 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -2,7 +2,6 @@
import os.path as op
from types import SimpleNamespace
-from typing import Optional
import matplotlib.transforms
import mne
@@ -102,7 +101,7 @@ def get_input_fnames_csp(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
contrast: tuple[str],
) -> dict:
proc = _get_decoding_proc(config=cfg)
@@ -493,7 +492,7 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
def get_config(
- *, config: SimpleNamespace, subject: str, session: Optional[str]
+ *, config: SimpleNamespace, subject: str, session: str | None
) -> SimpleNamespace:
cfg = SimpleNamespace(
# Data parameters
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index e3c8cdc9e..a9e23cc60 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -4,7 +4,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
from mne_bids import BIDSPath
@@ -34,7 +33,7 @@ def get_input_fnames_cov(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
cov_type = _get_cov_type(cfg)
in_files = dict()
@@ -94,12 +93,12 @@ def get_input_fnames_cov(
def compute_cov_from_epochs(
*,
- tmin: Optional[float],
- tmax: Optional[float],
+ tmin: float | None,
+ tmax: float | None,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
out_files: dict,
) -> mne.Covariance:
@@ -129,7 +128,7 @@ def compute_cov_from_raw(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
out_files: dict,
) -> mne.Covariance:
@@ -152,7 +151,7 @@ def retrieve_custom_cov(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
out_files: dict,
) -> mne.Covariance:
@@ -213,7 +212,7 @@ def run_covariance(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str] = None,
+ session: str | None = None,
in_files: dict,
) -> dict:
import matplotlib.pyplot as plt
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 923b61ccb..2403bce3c 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -7,7 +7,6 @@
import os.path as op
from functools import partial
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -47,7 +46,7 @@ def get_input_fnames_average_evokeds(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[dict],
+ session: dict | None,
) -> dict:
in_files = dict()
for this_subject in cfg.subjects:
@@ -76,7 +75,7 @@ def average_evokeds(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
logger.info(**gen_log_kwargs(message="Creating grand averages"))
@@ -185,7 +184,7 @@ class ClusterAcrossTime(TypedDict):
def _decoding_cluster_permutation_test(
scores: np.ndarray,
times: np.ndarray,
- cluster_forming_t_threshold: Optional[float],
+ cluster_forming_t_threshold: float | None,
n_permutations: int,
random_seed: int,
) -> tuple[np.ndarray, list[ClusterAcrossTime], int]:
@@ -220,7 +219,7 @@ def _get_epochs_in_files(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = dict()
in_files["epochs"] = BIDSPath(
@@ -245,7 +244,7 @@ def _decoding_out_fname(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
cond_1: str,
cond_2: str,
kind: str,
@@ -277,7 +276,7 @@ def _get_input_fnames_decoding(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
cond_1: str,
cond_2: str,
kind: str,
@@ -308,7 +307,7 @@ def average_time_by_time_decoding(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
cond_1: str,
cond_2: str,
in_files: dict,
@@ -551,7 +550,7 @@ def average_full_epochs_decoding(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
cond_1: str,
cond_2: str,
in_files: dict,
@@ -626,7 +625,7 @@ def get_input_files_average_full_epochs_report(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
decoding_contrasts: list[list[str]],
) -> dict:
in_files = dict()
@@ -650,7 +649,7 @@ def average_full_epochs_report(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
decoding_contrasts: list[list[str]],
in_files: dict,
) -> dict:
@@ -712,7 +711,7 @@ def average_csp_decoding(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
cond_1: str,
cond_2: str,
in_files: dict,
@@ -887,7 +886,7 @@ def _average_csp_time_freq(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
data: pd.DataFrame,
) -> pd.DataFrame:
# Prepare a dataframe for storing the results.
diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
index 22c67c235..95c451327 100644
--- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
+++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py
@@ -6,7 +6,6 @@
import glob
from pathlib import Path
from types import SimpleNamespace
-from typing import Optional
import mne
@@ -42,7 +41,7 @@ def get_input_fnames_make_bem_surfaces(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = dict()
mri_images, mri_dir, flash_dir = _get_bem_params(cfg)
@@ -59,7 +58,7 @@ def get_output_fnames_make_bem_surfaces(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
out_files = dict()
conductivity, _ = _get_bem_conductivity(cfg)
@@ -79,7 +78,7 @@ def make_bem_surfaces(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
mri_images, _, _ = _get_bem_params(cfg)
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index 87bcf6fd9..12342cfa6 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -4,7 +4,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -31,7 +30,7 @@ def _prepare_trans_template(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
info: mne.Info,
) -> mne.transforms.Transform:
assert isinstance(cfg.use_template_mri, str)
@@ -67,7 +66,7 @@ def _prepare_trans_subject(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
bids_path: BIDSPath,
) -> mne.transforms.Transform:
# Generate a head ↔ MRI transformation matrix from the
@@ -119,7 +118,7 @@ def run_forward(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
bids_path = BIDSPath(
@@ -218,7 +217,7 @@ def get_config(
*,
config: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> SimpleNamespace:
if config.mri_t1_path_generator is None:
t1_bids_path = None
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index 54f9fd0ae..c623ef7ee 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -4,7 +4,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
from mne.minimum_norm import (
@@ -33,7 +32,7 @@ def get_input_fnames_inverse(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
):
bids_path = BIDSPath(
subject=subject,
@@ -68,7 +67,7 @@ def run_inverse(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
# TODO: Eventually we should maybe loop over ch_types, e.g., to create
diff --git a/mne_bids_pipeline/steps/source/_99_group_average.py b/mne_bids_pipeline/steps/source/_99_group_average.py
index eb26c1c5f..81de0a01b 100644
--- a/mne_bids_pipeline/steps/source/_99_group_average.py
+++ b/mne_bids_pipeline/steps/source/_99_group_average.py
@@ -4,7 +4,6 @@
"""
from types import SimpleNamespace
-from typing import Optional
import mne
import numpy as np
@@ -28,7 +27,7 @@ def _stc_path(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
condition: str,
morphed: bool,
) -> BIDSPath:
@@ -58,7 +57,7 @@ def get_input_fnames_morph_stc(
cfg: SimpleNamespace,
subject: str,
fs_subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = dict()
for condition in _all_conditions(cfg=cfg):
@@ -81,7 +80,7 @@ def morph_stc(
exec_params: SimpleNamespace,
subject: str,
fs_subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
) -> dict:
out_files = dict()
@@ -113,7 +112,7 @@ def get_input_fnames_run_average(
*,
cfg: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
) -> dict:
in_files = dict()
assert subject == "average"
@@ -137,7 +136,7 @@ def run_average(
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
- session: Optional[str],
+ session: str | None,
in_files: dict,
):
assert subject == "average"
diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py
index cc69c9efc..952be5f13 100644
--- a/mne_bids_pipeline/tests/test_run.py
+++ b/mne_bids_pipeline/tests/test_run.py
@@ -5,7 +5,7 @@
import sys
from collections.abc import Collection
from pathlib import Path
-from typing import Optional, TypedDict
+from typing import TypedDict
import pytest
@@ -25,7 +25,7 @@ class _TestOptionsT(TypedDict, total=False):
dataset: str # key.split("_")[0]
config: str # f"config_{key}.py"
steps: Collection[str] # ("preprocessing", "sensor")
- task: Optional[str] # None
+ task: str | None # None
env: dict[str, str] # {}
requires: Collection[str] # ()
extra_config: str # ""
diff --git a/mne_bids_pipeline/typing.py b/mne_bids_pipeline/typing.py
index 61f2abdeb..8ac9ecfe4 100644
--- a/mne_bids_pipeline/typing.py
+++ b/mne_bids_pipeline/typing.py
@@ -2,7 +2,7 @@
import pathlib
import sys
-from typing import Annotated, Union
+from typing import Annotated
if sys.version_info < (3, 12):
from typing_extensions import TypedDict
@@ -14,7 +14,7 @@
from numpy.typing import ArrayLike
from pydantic import PlainValidator
-PathLike = Union[str, pathlib.Path]
+PathLike = str | pathlib.Path
class ArbitraryContrast(TypedDict):
diff --git a/pyproject.toml b/pyproject.toml
index 7c4928f35..fae794214 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -7,7 +7,7 @@ name = "mne-bids-pipeline"
# Keep in sync with README.md:
description = "A full-flegded processing pipeline for your MEG and EEG data"
readme = "README.md"
-requires-python = ">=3.9"
+requires-python = ">=3.10"
license = { file = "LICENSE.txt" }
keywords = ["science", "neuroscience", "psychology"]
authors = [
From f74d9ebf423b61b12fc82a4a6b88f4fe2ccd76d6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Thu, 28 Mar 2024 17:08:41 +0100
Subject: [PATCH 098/132] Render type annotations in the documentation again
(#909)
---
docs/mkdocs.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 190d0f517..6288ba69e 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -152,6 +152,8 @@ plugins:
show_root_toc_entry: false
show_root_full_path: false
separate_signature: true
+ show_signature_annotations: true
+ signature_crossrefs: true
line_length: 88 # Black's default
show_bases: false
docstring_style: numpy
From 59d83fa65a657b5570ca59ae61b0cd03d8e0228e Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Thu, 28 Mar 2024 12:53:45 -0400
Subject: [PATCH 099/132] MAINT: Ensure input changes cause output changes
(#904)
---
docs/source/v1.9.md.inc | 8 +-
mne_bids_pipeline/_import_data.py | 7 +-
mne_bids_pipeline/_logging.py | 1 +
mne_bids_pipeline/_report.py | 2 +-
mne_bids_pipeline/_run.py | 30 ++-
.../steps/init/_01_init_derivatives_dir.py | 21 +-
.../steps/preprocessing/_01_data_quality.py | 191 +++++++++---------
.../preprocessing/_04_frequency_filter.py | 6 +-
.../preprocessing/_05_regress_artifact.py | 5 +-
.../steps/sensor/_02_decoding_full_epochs.py | 2 +-
.../steps/sensor/_05_decoding_csp.py | 2 +-
.../steps/sensor/_06_make_cov.py | 7 +-
.../steps/sensor/_99_group_average.py | 29 ++-
mne_bids_pipeline/tests/test_functions.py | 64 ++++++
14 files changed, 246 insertions(+), 129 deletions(-)
create mode 100644 mne_bids_pipeline/tests/test_functions.py
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index 0ff3240cc..8d56bc9a5 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -18,9 +18,13 @@
[//]: # (- Whatever (#000 by @whoever))
-[//]: # (### :bug: Bug fixes)
+### :bug: Bug fixes
-[//]: # (- Whatever (#000 by @whoever))
+- When running the pipeline with [`find_bad_channels_meg`][mne_bids_pipeline._config. find_bad_channels_meg] enabled,
+ then disabling it and running the pipeline again, the pipeline would incorrectly still use automatically detected
+ bad channels from the first pipeline run. Now, we ensure that the original bad channels would be used and the
+ related section is removed from the report in this case. (#902 by @larsoner)
+- Fixed group-average decoding statistics were not updated in some cases, even if relevant configuration options had been changed. (#902 by @larsoner)
### :medical_symbol: Code health and infrastructure
diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py
index 1043db0d5..e001406f1 100644
--- a/mne_bids_pipeline/_import_data.py
+++ b/mne_bids_pipeline/_import_data.py
@@ -816,12 +816,13 @@ def _import_data_kwargs(*, config: SimpleNamespace, subject: str) -> dict:
)
-def _get_run_type(
+def _read_raw_msg(
+ bids_path_in: BIDSPath,
run: str | None,
task: str | None,
-) -> str:
+) -> tuple[str]:
if run is None and task in ("noise", "rest"):
run_type = dict(rest="resting-state", noise="empty-room")[task]
else:
run_type = "experimental"
- return run_type
+ return f"Reading {run_type} recording: {bids_path_in.basename}", run_type
diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py
index 187930be2..4d561a488 100644
--- a/mne_bids_pipeline/_logging.py
+++ b/mne_bids_pipeline/_logging.py
@@ -117,6 +117,7 @@ def gen_log_kwargs(
emoji: str = "⏳️",
) -> LogKwargsT:
# Try to figure these out
+ assert isinstance(message, str), type(message)
stack = inspect.stack()
up_locals = stack[1].frame.f_locals
if subject is None:
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index db14020fa..1ef1efbb6 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -209,7 +209,7 @@ def _plot_mean_cv_score(x, **kwargs):
g.set_xlabels("")
fig = g.fig
- return fig, caption
+ return fig, caption, data
def _plot_time_by_time_decoding_scores(
diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py
index de1321352..8f5cdfcf0 100644
--- a/mne_bids_pipeline/_run.py
+++ b/mne_bids_pipeline/_run.py
@@ -24,8 +24,10 @@
def failsafe_run(
+ *,
get_input_fnames: Callable | None = None,
get_output_fnames: Callable | None = None,
+ require_output: bool = True,
) -> Callable:
def failsafe_run_decorator(func):
@functools.wraps(func) # Preserve "identity" of original function
@@ -37,6 +39,8 @@ def __mne_bids_pipeline_failsafe_wrapper__(*args, **kwargs):
exec_params=exec_params,
get_input_fnames=get_input_fnames,
get_output_fnames=get_output_fnames,
+ require_output=require_output,
+ func_name=f"{__mne_bids_pipeline_step__}::{func.__name__}",
)
t0 = time.time()
log_info = pd.concat(
@@ -117,7 +121,15 @@ def hash_file_path(path: pathlib.Path) -> str:
class ConditionalStepMemory:
- def __init__(self, *, exec_params, get_input_fnames, get_output_fnames):
+ def __init__(
+ self,
+ *,
+ exec_params: SimpleNamespace,
+ get_input_fnames: Callable | None,
+ get_output_fnames: Callable | None,
+ require_output: bool,
+ func_name: str,
+ ):
memory_location = exec_params.memory_location
if memory_location is True:
use_location = exec_params.deriv_root / exec_params.memory_subdir
@@ -135,6 +147,8 @@ def __init__(self, *, exec_params, get_input_fnames, get_output_fnames):
self.get_input_fnames = get_input_fnames
self.get_output_fnames = get_output_fnames
self.memory_file_method = exec_params.memory_file_method
+ self.require_output = require_output
+ self.func_name = func_name
def cache(self, func):
def wrapper(*args, **kwargs):
@@ -263,9 +277,19 @@ def wrapper(*args, **kwargs):
# https://joblib.readthedocs.io/en/latest/memory.html#joblib.memory.MemorizedFunc.call # noqa: E501
if force_run or unknown_inputs or bad_out_files:
- memorized_func.call(*args, **kwargs)
+ out_files, _ = memorized_func.call(*args, **kwargs)
+ else:
+ out_files = memorized_func(*args, **kwargs)
+ if self.require_output:
+ assert isinstance(out_files, dict) and len(out_files), (
+ f"Internal error: step must return non-empty out_files dict, got "
+ f"{type(out_files).__name__} for:\n{self.func_name}"
+ )
else:
- memorized_func(*args, **kwargs)
+ assert out_files is None, (
+ f"Internal error: step must return None, got {type(out_files)} "
+ f"for:\n{self.func_name}"
+ )
return wrapper
diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
index de7a2de0d..e779b1382 100644
--- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
+++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py
@@ -3,6 +3,7 @@
Initialize the derivatives directory.
"""
+from pathlib import Path
from types import SimpleNamespace
from mne_bids.config import BIDS_VERSION
@@ -10,16 +11,14 @@
from ..._config_utils import _bids_kwargs, get_sessions, get_subjects
from ..._logging import gen_log_kwargs, logger
-from ..._run import failsafe_run
+from ..._run import _prep_out_files, failsafe_run
-def init_dataset(cfg) -> None:
+@failsafe_run()
+def init_dataset(cfg: SimpleNamespace, exec_params: SimpleNamespace) -> dict[str, Path]:
"""Prepare the pipeline directory in /derivatives."""
- fname_json = cfg.deriv_root / "dataset_description.json"
- if fname_json.is_file():
- msg = "Output directories already exist …"
- logger.info(**gen_log_kwargs(message=msg, emoji="✅"))
- return
+ out_files = dict()
+ out_files["json"] = cfg.deriv_root / "dataset_description.json"
logger.info(**gen_log_kwargs(message="Initializing output directories."))
cfg.deriv_root.mkdir(exist_ok=True, parents=True)
@@ -37,10 +36,12 @@ def init_dataset(cfg) -> None:
"URL": "n/a",
}
- _write_json(fname_json, ds_json, overwrite=True)
+ _write_json(out_files["json"], ds_json, overwrite=True)
+ return _prep_out_files(
+ exec_params=exec_params, out_files=out_files, bids_only=False
+ )
-@failsafe_run()
def init_subject_dirs(
*,
cfg: SimpleNamespace,
@@ -72,7 +73,7 @@ def get_config(
def main(*, config):
"""Initialize the output directories."""
- init_dataset(cfg=get_config(config=config))
+ init_dataset(cfg=get_config(config=config), exec_params=config.exec_params)
# Don't bother with parallelization here as I/O operations are generally
# not well parallelized (and this should be very fast anyway)
for subject in get_subjects(config):
diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
index 0c8f81e96..2f5f98bb7 100644
--- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
+++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py
@@ -4,7 +4,6 @@
import mne
import pandas as pd
-from mne_bids import BIDSPath
from ..._config_utils import (
_do_mf_autobad,
@@ -20,6 +19,7 @@
_get_mf_reference_run_path,
_get_run_rest_noise_path,
_import_data_kwargs,
+ _read_raw_msg,
import_er_data,
import_experimental_data,
)
@@ -78,25 +78,96 @@ def assess_data_quality(
out_files = dict()
key = f"raw_task-{task}_run-{run}"
bids_path_in = in_files.pop(key)
+ if key == "raw_task-noise_run-None":
+ bids_path_ref_in = in_files.pop("raw_ref_run", None)
+ else:
+ bids_path_ref_in = None
+ msg, _ = _read_raw_msg(bids_path_in=bids_path_in, run=run, task=task)
+ logger.info(**gen_log_kwargs(message=msg))
+ if run is None and task == "noise":
+ raw = import_er_data(
+ cfg=cfg,
+ bids_path_er_in=bids_path_in,
+ bids_path_er_bads_in=None,
+ bids_path_ref_in=bids_path_ref_in,
+ bids_path_ref_bads_in=None,
+ prepare_maxwell_filter=True,
+ )
+ else:
+ data_is_rest = run is None and task == "rest"
+ raw = import_experimental_data(
+ bids_path_in=bids_path_in,
+ bids_path_bads_in=None,
+ cfg=cfg,
+ data_is_rest=data_is_rest,
+ )
+ preexisting_bads = set(raw.info["bads"])
+
if _do_mf_autobad(cfg=cfg):
- if key == "raw_task-noise_run-None":
- bids_path_ref_in = in_files.pop("raw_ref_run")
- else:
- bids_path_ref_in = None
- auto_scores = _find_bads_maxwell(
+ (
+ auto_noisy_chs,
+ auto_flat_chs,
+ auto_scores,
+ ) = _find_bads_maxwell(
cfg=cfg,
exec_params=exec_params,
- bids_path_in=bids_path_in,
- bids_path_ref_in=bids_path_ref_in,
+ raw=raw,
subject=subject,
session=session,
run=run,
task=task,
- out_files=out_files,
)
+ bads = sorted(set(raw.info["bads"] + auto_noisy_chs + auto_flat_chs))
+ msg = f"Found {len(bads)} channel{_pl(bads)} as bad."
+ raw.info["bads"] = bads
+ del bads
+ logger.info(**gen_log_kwargs(message=msg))
else:
- auto_scores = None
- del key
+ auto_scores = auto_noisy_chs = auto_flat_chs = None
+ del key, raw
+
+ # Always output the scores and bads TSV
+ out_files["auto_scores"] = bids_path_in.copy().update(
+ suffix="scores",
+ extension=".json",
+ root=cfg.deriv_root,
+ split=None,
+ check=False,
+ session=session,
+ subject=subject,
+ )
+ _write_json(out_files["auto_scores"], auto_scores)
+
+ # Write the bad channels to disk.
+ out_files["bads_tsv"] = _bads_path(
+ cfg=cfg,
+ bids_path_in=bids_path_in,
+ subject=subject,
+ session=session,
+ )
+ bads_for_tsv = []
+ reasons = []
+
+ if auto_flat_chs:
+ bads_for_tsv.extend(auto_flat_chs)
+ reasons.extend(["auto-flat"] * len(auto_flat_chs))
+ preexisting_bads -= set(auto_flat_chs)
+
+ if auto_noisy_chs is not None:
+ bads_for_tsv.extend(auto_noisy_chs)
+ reasons.extend(["auto-noisy"] * len(auto_noisy_chs))
+ preexisting_bads -= set(auto_noisy_chs)
+
+ preexisting_bads = sorted(preexisting_bads)
+ if preexisting_bads:
+ bads_for_tsv.extend(preexisting_bads)
+ reasons.extend(
+ ["pre-existing (before MNE-BIDS-pipeline was run)"] * len(preexisting_bads)
+ )
+
+ tsv_data = pd.DataFrame(dict(name=bads_for_tsv, reason=reasons))
+ tsv_data = tsv_data.sort_values(by="name")
+ tsv_data.to_csv(out_files["bads_tsv"], sep="\t", index=False)
# Report
with _open_report(
@@ -118,6 +189,7 @@ def assess_data_quality(
title=f"Raw ({kind})",
tags=("data-quality",),
)
+ title = f"Bad channel detection: {run}"
if cfg.find_noisy_channels_meg:
assert auto_scores is not None
msg = "Adding noisy channel detection to report"
@@ -129,12 +201,14 @@ def assess_data_quality(
fig=figs,
caption=captions,
section="Data quality",
- title=f"Bad channel detection: {run}",
+ title=title,
tags=tags,
replace=True,
)
for fig in figs:
plt.close(fig)
+ else:
+ report.remove(title=title)
assert len(in_files) == 0, in_files.keys()
return _prep_out_files(exec_params=exec_params, out_files=out_files)
@@ -144,45 +218,25 @@ def _find_bads_maxwell(
*,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
- bids_path_in: BIDSPath,
- bids_path_ref_in: BIDSPath | None,
+ raw: mne.io.Raw,
subject: str,
session: str | None,
run: str | None,
task: str | None,
- out_files: dict,
):
- if cfg.find_flat_channels_meg and not cfg.find_noisy_channels_meg:
- msg = "Finding flat channels."
- elif cfg.find_noisy_channels_meg and not cfg.find_flat_channels_meg:
- msg = "Finding noisy channels using Maxwell filtering."
+ if cfg.find_flat_channels_meg:
+ if cfg.find_noisy_channels_meg:
+ msg = "Finding flat channels and noisy channels using Maxwell filtering."
+ else:
+ msg = "Finding flat channels."
else:
- msg = "Finding flat channels and noisy channels using Maxwell filtering."
+ assert cfg.find_noisy_channels_meg
+ msg = "Finding noisy channels using Maxwell filtering."
logger.info(**gen_log_kwargs(message=msg))
- if run is None and task == "noise":
- raw = import_er_data(
- cfg=cfg,
- bids_path_er_in=bids_path_in,
- bids_path_er_bads_in=None,
- bids_path_ref_in=bids_path_ref_in,
- bids_path_ref_bads_in=None,
- prepare_maxwell_filter=True,
- )
- else:
- data_is_rest = run is None and task == "rest"
- raw = import_experimental_data(
- bids_path_in=bids_path_in,
- bids_path_bads_in=None,
- cfg=cfg,
- data_is_rest=data_is_rest,
- )
-
# Filter the data manually before passing it to find_bad_channels_maxwell()
# This reduces memory usage, as we can control the number of jobs used
# during filtering.
- preexisting_bads = raw.info["bads"].copy()
- bads = preexisting_bads.copy()
raw_filt = raw.copy().filter(l_freq=None, h_freq=40, n_jobs=1)
(
auto_noisy_chs,
@@ -208,7 +262,8 @@ def _find_bads_maxwell(
else:
msg = "Found no flat channels."
logger.info(**gen_log_kwargs(message=msg))
- bads.extend(auto_flat_chs)
+ else:
+ auto_flat_chs = []
if cfg.find_noisy_channels_meg:
if auto_noisy_chs:
@@ -221,56 +276,8 @@ def _find_bads_maxwell(
msg = "Found no noisy channels."
logger.info(**gen_log_kwargs(message=msg))
- bads.extend(auto_noisy_chs)
-
- bads = sorted(set(bads))
- msg = f"Found {len(bads)} channel{_pl(bads)} as bad."
- raw.info["bads"] = bads
- del bads
- logger.info(**gen_log_kwargs(message=msg))
-
- if cfg.find_noisy_channels_meg:
- out_files["auto_scores"] = bids_path_in.copy().update(
- suffix="scores",
- extension=".json",
- root=cfg.deriv_root,
- split=None,
- check=False,
- session=session,
- subject=subject,
- )
- _write_json(out_files["auto_scores"], auto_scores)
-
- # Write the bad channels to disk.
- out_files["bads_tsv"] = _bads_path(
- cfg=cfg,
- bids_path_in=bids_path_in,
- subject=subject,
- session=session,
- )
- bads_for_tsv = []
- reasons = []
-
- if cfg.find_flat_channels_meg:
- bads_for_tsv.extend(auto_flat_chs)
- reasons.extend(["auto-flat"] * len(auto_flat_chs))
- preexisting_bads = set(preexisting_bads) - set(auto_flat_chs)
-
- if cfg.find_noisy_channels_meg:
- bads_for_tsv.extend(auto_noisy_chs)
- reasons.extend(["auto-noisy"] * len(auto_noisy_chs))
- preexisting_bads = set(preexisting_bads) - set(auto_noisy_chs)
-
- preexisting_bads = list(preexisting_bads)
- if preexisting_bads:
- bads_for_tsv.extend(preexisting_bads)
- reasons.extend(
- ["pre-existing (before MNE-BIDS-pipeline was run)"] * len(preexisting_bads)
- )
-
- tsv_data = pd.DataFrame(dict(name=bads_for_tsv, reason=reasons))
- tsv_data = tsv_data.sort_values(by="name")
- tsv_data.to_csv(out_files["bads_tsv"], sep="\t", index=False)
+ else:
+ auto_noisy_chs = []
# Interaction
if exec_params.interactive and cfg.find_noisy_channels_meg:
@@ -279,7 +286,7 @@ def _find_bads_maxwell(
plot_auto_scores(auto_scores, ch_types=cfg.ch_types)
plt.show()
- return auto_scores
+ return auto_noisy_chs, auto_flat_chs, auto_scores
def get_config(
diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
index 330829ab8..aec4b609a 100644
--- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
+++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py
@@ -30,8 +30,8 @@
)
from ..._import_data import (
_get_run_rest_noise_path,
- _get_run_type,
_import_data_kwargs,
+ _read_raw_msg,
import_er_data,
import_experimental_data,
)
@@ -167,9 +167,7 @@ def filter_data(
in_key = f"raw_task-{task}_run-{run}"
bids_path_in = in_files.pop(in_key)
bids_path_bads_in = in_files.pop(f"{in_key}-bads", None)
-
- run_type = _get_run_type(run=run, task=task)
- msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}"
+ msg, run_type = _read_raw_msg(bids_path_in=bids_path_in, run=run, task=task)
logger.info(**gen_log_kwargs(message=msg))
if cfg.use_maxwell_filter:
raw = mne.io.read_raw_fif(bids_path_in)
diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
index bc63acd64..d52f78ed1 100644
--- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
+++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py
@@ -11,7 +11,7 @@
get_sessions,
get_subjects,
)
-from ..._import_data import _get_run_rest_noise_path, _get_run_type, _import_data_kwargs
+from ..._import_data import _get_run_rest_noise_path, _import_data_kwargs, _read_raw_msg
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _add_raw, _open_report
@@ -58,8 +58,7 @@ def run_regress_artifact(
in_key = f"raw_task-{task}_run-{run}"
bids_path_in = in_files.pop(in_key)
out_files[in_key] = bids_path_in.copy().update(processing="regress")
- run_type = _get_run_type(run=run, task=task)
- msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}"
+ msg, _ = _read_raw_msg(bids_path_in=bids_path_in, run=run, task=task)
logger.info(**gen_log_kwargs(message=msg))
raw = mne.io.read_raw_fif(bids_path_in).load_data()
projs = raw.info["projs"]
diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
index 58ccc7f2e..597ee409f 100644
--- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
+++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py
@@ -200,7 +200,7 @@ def run_epochs_decoding(
all_contrasts.append(contrast)
del fname_decoding, processing, a_vs_b, decoding_data
- fig, caption = _plot_full_epochs_decoding_scores(
+ fig, caption, _ = _plot_full_epochs_decoding_scores(
contrast_names=_contrasts_to_names(all_contrasts),
scores=all_decoding_scores,
metric=cfg.decoding_metric,
diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
index 368caaeb3..4f3cd0ae2 100644
--- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
+++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py
@@ -392,7 +392,7 @@ def _fmt_contrast(cond1, cond2, fmin, fmax, freq_range_name, tmin=None, tmax=Non
contrast_names.append(
f"{freq_range_name}\n" f"({f_min:0.1f}-{f_max:0.1f} Hz)"
)
- fig, caption = _plot_full_epochs_decoding_scores(
+ fig, caption, _ = _plot_full_epochs_decoding_scores(
contrast_names=contrast_names,
scores=all_decoding_scores,
metric=cfg.decoding_metric,
diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
index a9e23cc60..0b907b679 100644
--- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py
+++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py
@@ -78,8 +78,7 @@ def get_input_fnames_cov(
root=cfg.deriv_root,
check=False,
)
- run_type = "resting-state" if cfg.noise_cov == "rest" else "empty-room"
- if run_type == "resting-state":
+ if cfg.noise_cov == "rest":
bids_path_raw_noise.task = "rest"
else:
bids_path_raw_noise.task = "noise"
@@ -133,8 +132,8 @@ def compute_cov_from_raw(
out_files: dict,
) -> mne.Covariance:
fname_raw = in_files.pop("raw")
- run_type = "resting-state" if fname_raw.task == "rest" else "empty-room"
- msg = f"Computing regularized covariance based on {run_type} recording."
+ run_msg = "resting-state" if fname_raw.task == "rest" else "empty-room"
+ msg = f"Computing regularized covariance based on {run_msg} recording."
logger.info(**gen_log_kwargs(message=msg))
msg = f"Input: {fname_raw.basename}"
logger.info(**gen_log_kwargs(message=msg))
diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py
index 2403bce3c..ffc645cdf 100644
--- a/mne_bids_pipeline/steps/sensor/_99_group_average.py
+++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py
@@ -245,13 +245,19 @@ def _decoding_out_fname(
cfg: SimpleNamespace,
subject: str,
session: str | None,
- cond_1: str,
- cond_2: str,
+ cond_1: str | None,
+ cond_2: str | None,
kind: str,
extension: str = ".mat",
):
+ if cond_1 is None:
+ assert cond_2 is None
+ processing = ""
+ else:
+ assert cond_2 is not None
+ processing = f"{cond_1}+{cond_2}+"
processing = (
- f"{cond_1}+{cond_2}+{kind}+{cfg.decoding_metric}".replace(op.sep, "")
+ f"{processing}{kind}+{cfg.decoding_metric}".replace(op.sep, "")
.replace("_", "-")
.replace("-", "")
)
@@ -654,6 +660,17 @@ def average_full_epochs_report(
in_files: dict,
) -> dict:
"""Add decoding results to the grand average report."""
+ out_files = dict()
+ out_files["cluster"] = _decoding_out_fname(
+ cfg=cfg,
+ subject=subject,
+ session=session,
+ cond_1=None,
+ cond_2=None,
+ kind="FullEpochs",
+ extension=".xlsx",
+ )
+
with _open_report(
cfg=cfg, exec_params=exec_params, subject=subject, session=session
) as report:
@@ -672,12 +689,14 @@ def average_full_epochs_report(
all_decoding_scores.append(np.atleast_1d(decoding_data["scores"].squeeze()))
del decoding_data
- fig, caption = _plot_full_epochs_decoding_scores(
+ fig, caption, data = _plot_full_epochs_decoding_scores(
contrast_names=_contrasts_to_names(decoding_contrasts),
scores=all_decoding_scores,
metric=cfg.decoding_metric,
kind="grand-average",
)
+ with pd.ExcelWriter(out_files["cluster"]) as w:
+ data.to_excel(w, sheet_name="FullEpochs", index=False)
report.add_figure(
fig=fig,
title="Full-epochs decoding",
@@ -696,7 +715,7 @@ def average_full_epochs_report(
)
# close figure to save memory
plt.close(fig)
- return _prep_out_files(exec_params=exec_params, out_files=dict())
+ return _prep_out_files(exec_params=exec_params, out_files=out_files)
@failsafe_run(
diff --git a/mne_bids_pipeline/tests/test_functions.py b/mne_bids_pipeline/tests/test_functions.py
new file mode 100644
index 000000000..f4d64adf4
--- /dev/null
+++ b/mne_bids_pipeline/tests/test_functions.py
@@ -0,0 +1,64 @@
+"""Test some properties of our core processing-step functions."""
+
+import ast
+import inspect
+
+import pytest
+
+from mne_bids_pipeline._config_utils import _get_step_modules
+
+# mne_bids_pipeline.init._01_init_derivatives_dir:
+FLAT_MODULES = {x.__name__: x for x in sum(_get_step_modules().values(), ())}
+
+
+@pytest.mark.parametrize("module_name", list(FLAT_MODULES))
+def test_all_functions_return(module_name):
+ """Test that all functions decorated with failsafe_run return a dict."""
+ # Find the functions within the module that use the failsafe_run decorator
+ module = FLAT_MODULES[module_name]
+ funcs = list()
+ for name in dir(module):
+ obj = getattr(module, name)
+ if not callable(obj):
+ continue
+ if getattr(obj, "__module__", None) != module_name:
+ continue
+ if not hasattr(obj, "__wrapped__"):
+ continue
+ # All our failsafe_run decorated functions should look like this
+ assert "__mne_bids_pipeline_failsafe_wrapper__" in repr(obj.__code__)
+ funcs.append(obj)
+ # Some module names we know don't have any
+ if module_name.split(".")[-1] in ("_01_recon_all",):
+ assert len(funcs) == 0
+ return
+
+ assert len(funcs) != 0, f"No failsafe_runs functions found in {module_name}"
+
+ # Adapted from numpydoc RT01 validation
+ def get_returns_not_on_nested_functions(node):
+ returns = [node] if isinstance(node, ast.Return) else []
+ for child in ast.iter_child_nodes(node):
+ # Ignore nested functions and its subtrees.
+ if not isinstance(child, ast.FunctionDef):
+ child_returns = get_returns_not_on_nested_functions(child)
+ returns.extend(child_returns)
+ return returns
+
+ for func in funcs:
+ what = f"{module_name}.{func.__name__}"
+ tree = ast.parse(inspect.getsource(func.__wrapped__)).body
+ if func.__closure__[-1].cell_contents is False:
+ continue # last closure node is require_output=False
+ assert tree, f"Failed to parse source code for {what}"
+ returns = get_returns_not_on_nested_functions(tree[0])
+ return_values = [r.value for r in returns]
+ # Replace Constant nodes valued None for None.
+ for i, v in enumerate(return_values):
+ if isinstance(v, ast.Constant) and v.value is None:
+ return_values[i] = None
+ assert len(return_values), f"Function does not return anything: {what}"
+ for r in return_values:
+ assert (
+ isinstance(r, ast.Call) and r.func.id == "_prep_out_files"
+ ), f"Function does _prep_out_files: {what}"
From 8cc07545e52af2de72f2c3809aba8a56594dacfe Mon Sep 17 00:00:00 2001
From: Sophie Herbst
Date: Fri, 29 Mar 2024 13:40:40 +0100
Subject: [PATCH 100/132] Add number of subjects to grand-average report
(cont'd) (#910)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/source/v1.9.md.inc | 2 +-
mne_bids_pipeline/_report.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index 8d56bc9a5..d42c91b31 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -2,7 +2,7 @@
### :new: New features & enhancements
-- Added number of subject to `sub-average` report (#902 by @SophieHerbst)
+- Added number of subject to `sub-average` report (#902, #910 by @SophieHerbst)
- The type annotations in the default configuration file are now easier to read: We
replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908 by @hoechenberger)
diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py
index 1ef1efbb6..fe77d7951 100644
--- a/mne_bids_pipeline/_report.py
+++ b/mne_bids_pipeline/_report.py
@@ -603,7 +603,7 @@ def add_csp_grand_average(
import matplotlib.pyplot as plt # nested import to help joblib
# First, plot decoding scores across frequency bins (entire epochs).
- section = "Decoding: CSP"
+ section = f"Decoding: CSP, N = {len(cfg.subjects)}"
freq_name_to_bins_map, _ = _handle_csp_args(
cfg.decoding_csp_times,
cfg.decoding_csp_freqs,
From 97069467934c7dfa483cb32a0f1f1b7c797271af Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Fri, 29 Mar 2024 16:10:57 +0100
Subject: [PATCH 101/132] Do not show `Annotated` types in configuration
options documentation (#911)
---
.circleci/config.yml | 4 ++++
docs/mkdocs.yml | 1 +
docs/source/v1.9.md.inc | 2 +-
pyproject.toml | 15 +++++++++------
4 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5dc0d8901..255f62063 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1030,6 +1030,10 @@ jobs:
- attach_workspace:
at: ~/
- bash_env
+ - run:
+ name: Install dependencies
+ command: |
+ pip install -ve .[docs]
- run:
name: Build documentation
command: |
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 6288ba69e..b8e187b5c 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -153,6 +153,7 @@ plugins:
show_root_full_path: false
separate_signature: true
show_signature_annotations: true
+ unwrap_annotated: true
signature_crossrefs: true
line_length: 88 # Black's default
show_bases: false
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index d42c91b31..c429abbcc 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -4,7 +4,7 @@
- Added number of subject to `sub-average` report (#902, #910 by @SophieHerbst)
- The type annotations in the default configuration file are now easier to read: We
- replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908 by @hoechenberger)
+ replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908, #911 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
diff --git a/pyproject.toml b/pyproject.toml
index fae794214..738fe2a6a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -59,6 +59,14 @@ tests = [
"pooch",
"psutil",
"ruff",
+ "jinja2",
+ "openneuro-py >= 2022.2.0",
+ "httpx >= 0.20",
+ "tqdm",
+ "Pygments",
+ "pyyaml",
+]
+docs = [
"mkdocs",
"mkdocs-material >= 9.0.4",
"mkdocs-material-extensions",
@@ -67,13 +75,8 @@ tests = [
"mkdocs-exclude",
"mkdocstrings-python",
"mike",
- "jinja2",
"livereload",
- "openneuro-py >= 2022.2.0",
- "httpx >= 0.20",
- "tqdm",
- "Pygments",
- "pyyaml",
+ "black", # docstring reformatting
]
[project.scripts]
From 635b49aad4192decdc33d9bb82a2f9e3b9ac2d1d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Fri, 29 Mar 2024 17:39:24 +0100
Subject: [PATCH 102/132] Try to fix documentation deployment (#913)
---
.circleci/config.yml | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 255f62063..66d173fb4 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1062,6 +1062,10 @@ jobs:
at: ~/
- bash_env
- gitconfig
+ - run:
+ name: Install dependencies
+ command: |
+ pip install -ve .[docs]
- run:
# This is a bit computationally inefficient, but it should be much
# faster to "cp" directly on the machine rather than persist
From 00d390cff75016b3ec47f279465010f4ad69a4a6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Sat, 30 Mar 2024 17:52:49 +0100
Subject: [PATCH 103/132] Restructure configuration options documentation
sections (#914)
---
docs/mkdocs.yml | 6 ++-
docs/source/settings/gen_settings.py | 13 ++++-
docs/source/v1.9.md.inc | 10 ++--
mne_bids_pipeline/_config.py | 81 +++++++++++++++++-----------
4 files changed, 72 insertions(+), 38 deletions(-)
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index b8e187b5c..2cba8080e 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -103,7 +103,11 @@ nav:
- Source space & forward solution: settings/source/forward.md
- Inverse solution: settings/source/inverse.md
- Report generation: settings/reports/report_generation.md
- - Execution: settings/execution.md
+ - Caching: settings/caching.md
+ - Parallelization: settings/parallelization.md
+ - Logging: settings/logging.md
+ - Error handling: settings/error_handling.md
+
- Examples:
- Examples Gallery: examples/examples.md
- examples/ds003392.md
diff --git a/docs/source/settings/gen_settings.py b/docs/source/settings/gen_settings.py
index 8300245fe..7dc56d32f 100755
--- a/docs/source/settings/gen_settings.py
+++ b/docs/source/settings/gen_settings.py
@@ -46,7 +46,13 @@
"reports": "reports",
"report generation": "report_generation",
# root file
- "execution": "execution",
+ "caching": "caching",
+ # root file
+ "parallelization": "parallelization",
+ # root file
+ "logging": "logging",
+ # root file
+ "error handling": "error_handling",
}
# TODO: Make sure these are consistent, autogenerate some based on section names,
# and/or autogenerate based on inputs/outputs of actual functions.
@@ -76,7 +82,10 @@
"inverse solution": ("inverse-solution",),
"reports": (),
"report generation": ("report",),
- "execution": (),
+ "caching": ("cache",),
+ "parallelization": ("paralleliation", "dask", "out-of-core"),
+ "logging": ("logging", "error-handling"),
+ "error handling": ("error-handling",),
}
extra_headers = {
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index c429abbcc..a8f897da5 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -6,18 +6,22 @@
- The type annotations in the default configuration file are now easier to read: We
replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908, #911 by @hoechenberger)
-[//]: # (- Whatever (#000 by @whoever))
-
[//]: # (### :warning: Behavior changes)
[//]: # (- Whatever (#000 by @whoever))
### :package: Requirements
-- We dropped support for Python 3.9. You now need Python 3.10 or newer.
+- We dropped support for Python 3.9. You now need Python 3.10 or newer. (#908 by @hoechenberger)
[//]: # (- Whatever (#000 by @whoever))
+### :book: Documentation
+
+- We removed the `Execution` section from configuration options documentation and
+ replaced it with new, more explicit sections (namely, Caching, Parallelization,
+ Logging, and Error handling). (#914 by @hoechenberger)
+
### :bug: Bug fixes
- When running the pipeline with [`find_bad_channels_meg`][mne_bids_pipeline._config. find_bad_channels_meg] enabled,
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index 175b96cd2..c4d382a3f 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2172,10 +2172,44 @@ def noise_cov(bids_path):
"""
# %%
-# # Execution
+# # Caching
#
-# These options control how the pipeline is executed but should not affect
-# what outputs get produced.
+# These settings control if and how pipeline output is being cached to avoid unnecessary
+# computations on a re-run.
+
+memory_location: PathLike | bool | None = True
+"""
+If not None (or False), caching will be enabled and the cache files will be
+stored in the given directory. The default (True) will use a
+`"_cache"` subdirectory (name configurable via the
+[`memory_subdir`][mne_bids_pipeline._config.memory_subdir]
+variable) in the BIDS derivative root of the dataset.
+"""
+
+memory_subdir: str = "_cache"
+"""
+The caching directory name to use if `memory_location` is `True`.
+"""
+
+memory_file_method: Literal["mtime", "hash"] = "mtime"
+"""
+The method to use for cache invalidation (i.e., detecting changes). Using the
+"modified time" reported by the filesystem (`'mtime'`, default) is very fast
+but requires that the filesystem supports proper mtime reporting. Using file
+hashes (`'hash'`) is slower and requires reading all input files but should
+work on any filesystem.
+"""
+
+memory_verbose: int = 0
+"""
+The verbosity to use when using memory. The default (0) does not print, while
+1 will print the function calls that will be cached. See the documentation for
+the joblib.Memory class for more information."""
+
+# %%
+# # Parallelization
+#
+# These options control parallel processing (e.g., multiple subjects at once),
n_jobs: int = 1
"""
@@ -2215,6 +2249,11 @@ def noise_cov(bids_path):
The maximum amount of RAM per Dask worker.
"""
+# %%
+# # Logging
+#
+# These options control how much logging output is produced.
+
log_level: Literal["info", "error"] = "info"
"""
Set the pipeline logging verbosity.
@@ -2225,6 +2264,13 @@ def noise_cov(bids_path):
Set the MNE-Python logging verbosity.
"""
+
+# %%
+# # Error handling
+#
+# These options control how errors while processing the data or the configuration file
+# are handled.
+
on_error: Literal["continue", "abort", "debug"] = "abort"
"""
Whether to abort processing as soon as an error occurs, continue with all other
@@ -2235,35 +2281,6 @@ def noise_cov(bids_path):
Enabling debug mode deactivates parallel processing.
"""
-memory_location: PathLike | bool | None = True
-"""
-If not None (or False), caching will be enabled and the cache files will be
-stored in the given directory. The default (True) will use a
-`"_cache"` subdirectory (name configurable via the
-[`memory_subdir`][mne_bids_pipeline._config.memory_subdir]
-variable) in the BIDS derivative root of the dataset.
-"""
-
-memory_subdir: str = "_cache"
-"""
-The caching directory name to use if `memory_location` is `True`.
-"""
-
-memory_file_method: Literal["mtime", "hash"] = "mtime"
-"""
-The method to use for cache invalidation (i.e., detecting changes). Using the
-"modified time" reported by the filesystem (`'mtime'`, default) is very fast
-but requires that the filesystem supports proper mtime reporting. Using file
-hashes (`'hash'`) is slower and requires reading all input files but should
-work on any filesystem.
-"""
-
-memory_verbose: int = 0
-"""
-The verbosity to use when using memory. The default (0) does not print, while
-1 will print the function calls that will be cached. See the documentation for
-the joblib.Memory class for more information."""
-
config_validation: Literal["raise", "warn", "ignore"] = "raise"
"""
How strictly to validate the configuration. Errors are always raised for
From d8c1e4b683f45567f0c1aad59ab87c89355bf34b Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 2 Apr 2024 07:24:58 +0200
Subject: [PATCH 104/132] [pre-commit.ci] pre-commit autoupdate (#917)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 00b8889fe..b5268cfa0 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$
exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.4
+ rev: v0.3.5
hooks:
- id: ruff
args: ["--fix"]
From 3eccc1dde190b498b21783a76789304e56e3ab54 Mon Sep 17 00:00:00 2001
From: Sophie Herbst
Date: Tue, 2 Apr 2024 16:02:37 +0200
Subject: [PATCH 105/132] enhance documentation of caching, continuation of
#914 (#918)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
docs/source/v1.9.md.inc | 2 +-
mne_bids_pipeline/_config.py | 8 ++++++--
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index a8f897da5..b81a3d8c6 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -20,7 +20,7 @@
- We removed the `Execution` section from configuration options documentation and
replaced it with new, more explicit sections (namely, Caching, Parallelization,
- Logging, and Error handling). (#914 by @hoechenberger)
+ Logging, and Error handling), and enhanced documentation. (#914 by @hoechenberger, #916 by @SophieHerbst)
### :bug: Bug fixes
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index c4d382a3f..e285a0e9a 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2174,8 +2174,12 @@ def noise_cov(bids_path):
# %%
# # Caching
#
-# These settings control if and how pipeline output is being cached to avoid unnecessary
-# computations on a re-run.
+# Per default, the pipeline output is cached (temporarily stored),
+# to avoid unnecessary reruns of previously computed steps.
+# Yet, for consistency, changes in configuration parameters trigger
+# automatic reruns of previous steps.
+# !!! info
+# To force rerunning a given step, run the pipeline with the option: `--no-cache`.
memory_location: PathLike | bool | None = True
"""
From 628877087730e8c7e6b3b9a30ff5b4d313e1a545 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Richard=20H=C3=B6chenberger?=
Date: Tue, 2 Apr 2024 18:14:50 +0200
Subject: [PATCH 106/132] Improve documentation and config validation of
`loose` and `depth` parameters; drop support for `loose=None` (#915)
Co-authored-by: Eric Larson
---
docs/source/v1.9.md.inc | 7 +++----
mne_bids_pipeline/_config.py | 37 ++++++++++++++++++++++--------------
2 files changed, 26 insertions(+), 18 deletions(-)
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index b81a3d8c6..6ac23551a 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -6,16 +6,15 @@
- The type annotations in the default configuration file are now easier to read: We
replaced `Union[X, Y]` with `X | Y` and `Optional[X]` with `X | None`. (#908, #911 by @hoechenberger)
-[//]: # (### :warning: Behavior changes)
+### :warning: Behavior changes
-[//]: # (- Whatever (#000 by @whoever))
+- The [`depth`][mne_bids_pipeline._config.depth] parameter doesn't accept `None`
+ anymore. Please use `0` instead. (#915 by @hoechenberger)
### :package: Requirements
- We dropped support for Python 3.9. You now need Python 3.10 or newer. (#908 by @hoechenberger)
-[//]: # (- Whatever (#000 by @whoever))
-
### :book: Documentation
- We removed the `Execution` section from configuration options documentation and
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index e285a0e9a..b28c096dd 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -1979,24 +1979,33 @@ def mri_landmarks_kind(bids_path):
# ## Inverse solution
-loose: float | Literal["auto"] = 0.2
+loose: Annotated[float, Interval(ge=0, le=1)] | Literal["auto"] = 0.2
"""
-Value that weights the source variances of the dipole components
-that are parallel (tangential) to the cortical surface. If `0`, then the
-inverse solution is computed with **fixed orientation.**
-If `1`, it corresponds to **free orientation.**
-The default value, `'auto'`, is set to `0.2` for surface-oriented source
-spaces, and to `1.0` for volumetric, discrete, or mixed source spaces,
-unless `fixed is True` in which case the value 0. is used.
+A value between 0 and 1 that weights the source variances of the dipole components
+that are parallel (tangential) to the cortical surface.
+
+If `0`, then the inverse solution is computed with **fixed orientation**, i.e.,
+only dipole components perpendicular to the cortical surface are considered.
+
+If `1`, it corresponds to **free orientation**, i.e., dipole components with any
+orientation are considered.
+
+The default value, `0.2`, is suitable for surface-oriented source spaces.
+
+For volume or mixed source spaces, choose `1.0`.
+
+!!! info
+ Support for modeling volume and mixed source spaces will be added in a future
+ version of MNE-BIDS-Pipeline.
"""
-depth: float | dict | None = 0.8
+depth: Annotated[float, Interval(ge=0, le=1)] | dict = 0.8
"""
-If float (default 0.8), it acts as the depth weighting exponent (`exp`)
-to use (must be between 0 and 1). None is equivalent to 0, meaning no
-depth weighting is performed. Can also be a `dict` containing additional
-keyword arguments to pass to :func:`mne.forward.compute_depth_prior`
-(see docstring for details and defaults).
+If a number, it acts as the depth weighting exponent to use
+(must be between `0` and`1`), with`0` meaning no depth weighting is performed.
+
+Can also be a dictionary containing additional keyword arguments to pass to
+`mne.forward.compute_depth_prior` (see docstring for details and defaults).
"""
inverse_method: Literal["MNE", "dSPM", "sLORETA", "eLORETA"] = "dSPM"
From cbeeb98d0625ac948f8a6951305de11520eac5d5 Mon Sep 17 00:00:00 2001
From: Sophie Herbst
Date: Tue, 2 Apr 2024 18:52:51 +0200
Subject: [PATCH 107/132] change default for info to use for inverse #905
(#919)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Eric Larson
---
docs/source/v1.9.md.inc | 3 +++
mne_bids_pipeline/_config.py | 23 +++++++++++++++----
mne_bids_pipeline/_config_import.py | 1 -
.../steps/source/_04_make_forward.py | 16 +++++++++++--
.../steps/source/_05_make_inverse.py | 14 ++++++++++-
5 files changed, 48 insertions(+), 9 deletions(-)
diff --git a/docs/source/v1.9.md.inc b/docs/source/v1.9.md.inc
index 6ac23551a..443a20ebe 100644
--- a/docs/source/v1.9.md.inc
+++ b/docs/source/v1.9.md.inc
@@ -8,6 +8,9 @@
### :warning: Behavior changes
+- Changed default for `source_info_path_update` to `None`. In `_04_make_forward.py`
+ and `_05_make_inverse.py`, we retrieve the info from the file from which
+ the `noise_cov` is computed (#919 by @SophieHerbst)
- The [`depth`][mne_bids_pipeline._config.depth] parameter doesn't accept `None`
anymore. Please use `0` instead. (#915 by @hoechenberger)
diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py
index b28c096dd..1d14b03bf 100644
--- a/mne_bids_pipeline/_config.py
+++ b/mne_bids_pipeline/_config.py
@@ -2098,15 +2098,17 @@ def noise_cov(bids_path):
of `mne.compute_covariance` for details.
"""
-source_info_path_update: dict[str, str] | None = dict(suffix="ave")
+source_info_path_update: dict[str, str] | None = None
"""
-When computing the forward and inverse solutions, by default the pipeline
-retrieves the `mne.Info` object from the cleaned evoked data. However, in
-certain situations you may wish to use a different `Info`.
-
+When computing the forward and inverse solutions, it is important to
+provide the `mne.Info` object from the data on which the noise covariance was
+computed, to avoid problems resulting from mismatching ranks.
This parameter allows you to explicitly specify from which file to retrieve the
`mne.Info` object. Use this parameter to supply a dictionary to
`BIDSPath.update()` during the forward and inverse processing steps.
+If set to `None` (default), the info will be retrieved either from the raw
+file specified in `noise_cov`, or the cleaned evoked
+(if `noise_cov` is None or `ad-hoc`).
???+ example "Example"
Use the `Info` object stored in the cleaned epochs:
@@ -2114,6 +2116,17 @@ def noise_cov(bids_path):
source_info_path_update = {'processing': 'clean',
'suffix': 'epo'}
```
+
+ Use the `Info` object stored in a raw file (e.g. resting state):
+ ```python
+ source_info_path_update = {'processing': 'clean',
+ 'suffix': 'raw',
+ 'task': 'rest'}
+ ```
+ If you set `noise_cov = 'rest'` and `source_path_info = None`,
+ then the behavior is identical to that above
+ (it will automatically use the resting state data).
+
"""
inverse_targets: list[Literal["evoked"]] = ["evoked"]
diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py
index a52c82119..63c176ffa 100644
--- a/mne_bids_pipeline/_config_import.py
+++ b/mne_bids_pipeline/_config_import.py
@@ -344,7 +344,6 @@ def _default_factory(key, val):
allowlist = [
{"n_mag": 1, "n_grad": 1, "n_eeg": 1}, # n_proj_*
{"custom": (8, 24.0, 40)}, # decoding_csp_freqs
- {"suffix": "ave"}, # source_info_path_update
["evoked"], # inverse_targets
[4, 8, 16], # autoreject_n_interpolate
]
diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py
index 12342cfa6..1596cadff 100644
--- a/mne_bids_pipeline/steps/source/_04_make_forward.py
+++ b/mne_bids_pipeline/steps/source/_04_make_forward.py
@@ -23,7 +23,7 @@
from ..._logging import gen_log_kwargs, logger
from ..._parallel import get_parallel_backend, parallel_func
from ..._report import _open_report, _render_bem
-from ..._run import _prep_out_files, failsafe_run, save_logs
+from ..._run import _prep_out_files, _sanitize_callable, failsafe_run, save_logs
def _prepare_trans_template(
@@ -102,7 +102,18 @@ def get_input_fnames_forward(*, cfg, subject, session):
check=False,
)
in_files = dict()
- in_files["info"] = bids_path.copy().update(**cfg.source_info_path_update)
+ # for consistency with 05_make_inverse, read the info from the
+ # data used for the noise_cov
+ if cfg.source_info_path_update is None:
+ if cfg.noise_cov in ("rest", "noise"):
+ source_info_path_update = dict(
+ processing="clean", suffix="raw", task=cfg.noise_cov
+ )
+ else:
+ source_info_path_update = dict(suffix="ave")
+ else:
+ source_info_path_update = cfg.source_info_path_update
+ in_files["info"] = bids_path.copy().update(**source_info_path_update)
bem_path = cfg.fs_subjects_dir / cfg.fs_subject / "bem"
_, tag = _get_bem_conductivity(cfg)
in_files["bem"] = bem_path / f"{cfg.fs_subject}-{tag}-bem-sol.fif"
@@ -242,6 +253,7 @@ def get_config(
use_template_mri=config.use_template_mri,
adjust_coreg=config.adjust_coreg,
source_info_path_update=config.source_info_path_update,
+ noise_cov=_sanitize_callable(config.noise_cov),
ch_types=config.ch_types,
fs_subject=get_fs_subject(config=config, subject=subject),
fs_subjects_dir=get_fs_subjects_dir(config),
diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py
index c623ef7ee..9cc01b74f 100644
--- a/mne_bids_pipeline/steps/source/_05_make_inverse.py
+++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py
@@ -48,7 +48,19 @@ def get_input_fnames_inverse(
check=False,
)
in_files = dict()
- in_files["info"] = bids_path.copy().update(**cfg.source_info_path_update)
+ # make sure the info matches the data from which the noise cov
+ # is computed to avoid rank-mismatch
+ if cfg.source_info_path_update is None:
+ if cfg.noise_cov in ("rest", "noise"):
+ source_info_path_update = dict(
+ processing="clean", suffix="raw", task=cfg.noise_cov
+ )
+ else:
+ source_info_path_update = dict(suffix="ave")
+ # XXX is this the right solution also for noise_cov = 'ad-hoc'?
+ else:
+ source_info_path_update = cfg.source_info_path_update
+ in_files["info"] = bids_path.copy().update(**source_info_path_update)
in_files["forward"] = bids_path.copy().update(suffix="fwd")
if cfg.noise_cov != "ad-hoc":
in_files["cov"] = get_noise_cov_bids_path(
From 856dfe2f0111dc1afe84cd4c5c9729ed7672a446 Mon Sep 17 00:00:00 2001
From: Eric Larson
Date: Tue, 16 Apr 2024 16:03:49 -0400
Subject: [PATCH 108/132] ENH: Streamline ICA reporting (#899)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Richard Höchenberger
---
.circleci/config.yml | 2 +-
.circleci/remove_examples.sh | 33 +++++++++++++++++
.circleci/setup_bash.sh | 1 +
docs/source/examples/gen_examples.py | 12 ++++++
docs/source/v1.9.md.inc | 7 +++-
mne_bids_pipeline/_config.py | 10 +++++
mne_bids_pipeline/_run.py | 7 +++-
.../steps/preprocessing/_06a1_fit_ica.py | 28 +++++++-------
.../preprocessing/_06a2_find_ica_artifacts.py | 31 ++++------------
.../steps/preprocessing/_08a_apply_ica.py | 37 ++-----------------
.../tests/configs/config_ERP_CORE.py | 2 +-
mne_bids_pipeline/tests/conftest.py | 4 ++
12 files changed, 96 insertions(+), 78 deletions(-)
create mode 100755 .circleci/remove_examples.sh
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 66d173fb4..e808443a9 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -50,7 +50,7 @@ jobs:
pip install --upgrade --progress-bar off pip
pip install --upgrade --progress-bar off "autoreject @ https://api.github.com/repos/autoreject/autoreject/zipball/master" "mne[hdf5] @ git+https://github.com/mne-tools/mne-python@main" "mne-bids[full] @ https://api.github.com/repos/mne-tools/mne-bids/zipball/main" numba
pip install -ve .[tests]
- pip install "PyQt6!=6.6.1,!=6.6.2" "PyQt6-Qt6!=6.6.1,!=6.6.2"
+ pip install "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1,!=6.6.2,!=6.6.3"
- run:
name: Check Qt
command: |
diff --git a/.circleci/remove_examples.sh b/.circleci/remove_examples.sh
new file mode 100755
index 000000000..ee4004442
--- /dev/null
+++ b/.circleci/remove_examples.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -eo pipefail
+
+VER=$1
+if [ -z "$VER" ]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+ROOT="$PWD/$VER/examples/"
+if [ ! -d ${ROOT} ]; then
+ echo "Version directory does not exist or appears incorrect:"
+ echo
+ echo "$ROOT"
+ echo
+ echo "Are you on the gh-pages branch and is the ds000117 directory present?"
+ exit 1
+fi
+if [ ! -d ${ROOT}ds000117 ]; then
+ echo "Directory does not exist:"
+ echo
+ echo "${ROOT}ds000117"
+ echo
+ echo "Assuming already pruned and exiting."
+ exit 0
+fi
+echo "Pruning examples in ${ROOT} ..."
+
+find $ROOT -type d -name "*" | tail -n +2 | xargs rm -Rf
+find $ROOT -name "*.html" -exec sed -i /^\ |