Skip to content
This repository has been archived by the owner on Jan 27, 2025. It is now read-only.

Commit

Permalink
Merge pull request #256 from nipreps/enh/bold-registration-experiment
Browse files Browse the repository at this point in the history
ENH: Add bold realignment notebook (ISMRM'25 Experiment 2)
  • Loading branch information
oesteban authored Dec 19, 2024
2 parents 3cba47d + 5123f50 commit 68f199d
Show file tree
Hide file tree
Showing 6 changed files with 1,835 additions and 47 deletions.
1,777 changes: 1,777 additions & 0 deletions docs/notebooks/bold_realignment.ipynb

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ test = [

antsopt = [
"ConfigSpace",
"nipreps",
"skimage",
"smac",
]

Expand Down
2 changes: 1 addition & 1 deletion src/eddymotion/data/dmri.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2022 The NiPreps Developers <nipreps@gmail.com>
# Copyright The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down
54 changes: 27 additions & 27 deletions src/eddymotion/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class EddyMotionEstimator:

@staticmethod
def estimate(
dwdata,
data,
*,
align_kwargs=None,
iter_kwargs=None,
Expand All @@ -53,7 +53,7 @@ def estimate(
Parameters
----------
dwdata : :obj:`~eddymotion.dmri.DWI`
data : :obj:`~eddymotion.dmri.DWI`
The target DWI dataset, represented by this tool's internal
type. The object is used in-place, and will contain the estimated
parameters in its ``em_affines`` property, as well as the rotated
Expand Down Expand Up @@ -88,7 +88,7 @@ def estimate(
"seed": None,
"bvals": None, # TODO: extract b-vals here if pertinent
} | iter_kwargs
iter_kwargs["size"] = len(dwdata)
iter_kwargs["size"] = len(data)

iterfunc = getattr(eutils, f'{iter_kwargs.pop("strategy", "random")}_iterator')
index_order = list(iterfunc(**iter_kwargs))
Expand All @@ -107,9 +107,9 @@ def estimate(

for i_iter, model in enumerate(models):
# When downsampling these need to be set per-level
bmask_img = _prepare_brainmask_data(dwdata.brainmask, dwdata.affine)
bmask_img = _prepare_brainmask_data(data.brainmask, data.affine)

_prepare_kwargs(dwdata, kwargs)
_prepare_kwargs(data, kwargs)

single_model = model.lower() in (
"b0",
Expand All @@ -130,7 +130,7 @@ def estimate(
model=model,
**kwargs,
)
dwmodel.fit(dwdata.dataobj, n_jobs=n_jobs)
dwmodel.fit(data.dataobj, n_jobs=n_jobs)

with TemporaryDirectory() as tmp_dir:
print(f"Processing in <{tmp_dir}>")
Expand All @@ -141,12 +141,12 @@ def estimate(
pbar.set_description_str(
f"Pass {i_iter + 1}/{n_iter} | Fit and predict b-index <{i}>"
)
data_train, data_test = lovo_split(dwdata, i, with_b0=True)
data_train, data_test = lovo_split(data, i, with_b0=True)
grad_str = f"{i}, {data_test[1][:3]}, b={int(data_test[1][3])}"
pbar.set_description_str(f"[{grad_str}], {n_jobs} jobs")

if not single_model: # A true LOGO estimator
if hasattr(dwdata, "gradients"):
if hasattr(data, "gradients"):
kwargs["gtab"] = data_train[1]
# Factory creates the appropriate model and pipes arguments
dwmodel = ModelFactory.init(
Expand All @@ -166,7 +166,7 @@ def estimate(

# prepare data for running ANTs
fixed, moving = _prepare_registration_data(
data_test[0], predicted, dwdata.affine, i, ptmp_dir, reg_target_type
data_test[0], predicted, data.affine, i, ptmp_dir, reg_target_type
)

pbar.set_description_str(
Expand All @@ -177,11 +177,11 @@ def estimate(
fixed,
moving,
bmask_img,
dwdata.em_affines,
dwdata.affine,
dwdata.dataobj.shape[:3],
data.em_affines,
data.affine,
data.dataobj.shape[:3],
data_test[1][3],
dwdata.fieldmap,
data.fieldmap,
i_iter,
i,
ptmp_dir,
Expand All @@ -190,10 +190,10 @@ def estimate(
)

# update
dwdata.set_transform(i, xform.matrix)
data.set_transform(i, xform.matrix)
pbar.update()

return dwdata.em_affines
return data.em_affines


def _prepare_brainmask_data(brainmask, affine):
Expand All @@ -219,7 +219,7 @@ def _prepare_brainmask_data(brainmask, affine):
return bmask_img


def _prepare_kwargs(dwdata, kwargs):
def _prepare_kwargs(data, kwargs):
"""Prepare the keyword arguments depending on the DWI data: add attributes corresponding to
the ``brainmask``, ``bzero``, ``gradients``, ``frame_time``, and ``total_duration`` DWI data
properties.
Expand All @@ -228,24 +228,24 @@ def _prepare_kwargs(dwdata, kwargs):
Parameters
----------
dwdata : :class:`eddymotion.data.dmri.DWI`
data : :class:`eddymotion.data.dmri.DWI`
DWI data object.
kwargs : :obj:`dict`
Keyword arguments.
"""
from eddymotion.data.filtering import advanced_clip as _advanced_clip

if dwdata.brainmask is not None:
kwargs["mask"] = dwdata.brainmask
if data.brainmask is not None:
kwargs["mask"] = data.brainmask

if hasattr(dwdata, "bzero") and dwdata.bzero is not None:
kwargs["S0"] = _advanced_clip(dwdata.bzero)
if hasattr(data, "bzero") and data.bzero is not None:
kwargs["S0"] = _advanced_clip(data.bzero)

if hasattr(dwdata, "gradients"):
kwargs["gtab"] = dwdata.gradients
if hasattr(data, "gradients"):
kwargs["gtab"] = data.gradients

if hasattr(dwdata, "frame_time"):
kwargs["timepoints"] = dwdata.frame_time
if hasattr(data, "frame_time"):
kwargs["timepoints"] = data.frame_time

if hasattr(dwdata, "total_duration"):
kwargs["xlim"] = dwdata.total_duration
if hasattr(data, "total_duration"):
kwargs["xlim"] = data.total_duration
17 changes: 10 additions & 7 deletions src/eddymotion/model/gpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,12 @@ class EddyMotionGPR(GaussianProcessRegressor):
The reason for that is that such methods typically use fewer steps, and
when the cost of calculating the derivatives is small/moderate compared
to calculating the functions itself (as is the case for Eq. (12)) then
execution time can be much shorter. However, we found that for the
multi-shell case a heuristic optimisation method such as the Nelder-Mead
simplex method (Nelder and Mead, 1965) was frequently better at avoiding
local maxima. Hence, that was the method we used for all optimisations
in the present paper.
execution time can be much shorter.
However, we found that for the multi-shell case a heuristic optimisation
method such as the Nelder-Mead simplex method (Nelder and Mead, 1965) was
frequently better at avoiding local maxima.
Hence, that was the method we used for all optimisations in the present
paper.
**Multi-shell regression (TODO).**
For multi-shell modeling, the kernel :math:`k(\textbf{x}, \textbf{x'})`
Expand All @@ -135,8 +136,10 @@ class EddyMotionGPR(GaussianProcessRegressor):
\mathbf{K} = \left[
\begin{matrix}
\lambda C_{\theta}(\theta (\mathbf{G}_{1}); a) + \sigma_{1}^{2} \mathbf{I} &
\lambda C_{\theta}(\theta (\mathbf{G}_{2}, \mathbf{G}_{1}); a) C_{b}(b_{2}, b_{1}; \ell) \\
\lambda C_{\theta}(\theta (\mathbf{G}_{1}, \mathbf{G}_{2}); a) C_{b}(b_{1}, b_{2}; \ell) &
\lambda C_{\theta}(\theta (\mathbf{G}_{2},
\mathbf{G}_{1}); a) C_{b}(b_{2}, b_{1}; \ell) \\
\lambda C_{\theta}(\theta (\mathbf{G}_{1}, \mathbf{G}_{2});
a) C_{b}(b_{1}, b_{2}; \ell) &
\lambda C_{\theta}(\theta (\mathbf{G}_{2}); a) + \sigma_{2}^{2} \mathbf{I} \\
\end{matrix}
\right]
Expand Down
30 changes: 18 additions & 12 deletions test/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,15 @@

from eddymotion.data.dmri import DWI
from eddymotion.estimator import EddyMotionEstimator
from eddymotion.registration.utils import displacements_within_mask


def test_proximity_estimator_trivial_model(datadir):
def test_proximity_estimator_trivial_model(datadir, tmp_path):
"""Check the proximity of transforms estimated by the estimator with a trivial B0 model."""

dwdata = DWI.from_filename(datadir / "dwi.h5")
b0nii = nb.Nifti1Image(dwdata.bzero, dwdata.affine, None)
masknii = nb.Nifti1Image(dwdata.brainmask.astype(np.uint8), dwdata.affine, None)

# Generate a list of large-yet-plausible bulk-head motion.
xfms = nt.linear.LinearTransformsMapping(
Expand All @@ -56,8 +58,8 @@ def test_proximity_estimator_trivial_model(datadir):
moved_nii = (~xfms).apply(b0nii, reference=b0nii)

# Uncomment to see the moved dataset
# moved_nii.to_filename(tmp_path / "test.nii.gz")
# xfms.apply(moved_nii).to_filename(tmp_path / "ground_truth.nii.gz")
moved_nii.to_filename(tmp_path / "test.nii.gz")
xfms.apply(moved_nii).to_filename(tmp_path / "ground_truth.nii.gz")

# Wrap into dataset object
dwi_motion = DWI(
Expand All @@ -70,7 +72,7 @@ def test_proximity_estimator_trivial_model(datadir):

estimator = EddyMotionEstimator()
em_affines = estimator.estimate(
dwdata=dwi_motion,
data=dwi_motion,
models=("b0",),
seed=None,
align_kwargs={
Expand All @@ -81,14 +83,18 @@ def test_proximity_estimator_trivial_model(datadir):
)

# Uncomment to see the realigned dataset
# nt.linear.LinearTransformsMapping(
# em_affines,
# reference=b0nii,
# ).apply(moved_nii).to_filename(tmp_path / "realigned.nii.gz")
nt.linear.LinearTransformsMapping(
em_affines,
reference=b0nii,
).apply(moved_nii).to_filename(tmp_path / "realigned.nii.gz")

# For each moved b0 volume
coords = xfms.reference.ndcoords.T
for i, est in enumerate(em_affines):
xfm = nt.linear.Affine(xfms.matrix[i], reference=b0nii)
est = nt.linear.Affine(est, reference=b0nii)
assert np.sqrt(((xfm.map(coords) - est.map(coords)) ** 2).sum(1)).mean() < 0.2
assert (
displacements_within_mask(
masknii,
nt.linear.Affine(est),
xfms[i],
).max()
< 0.2
)

0 comments on commit 68f199d

Please sign in to comment.