Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Wrapper dev #77

Merged
merged 23 commits into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
e6b1126
Bounds fixes
IvanARashid Jan 8, 2024
6a1cff1
Minor change to super inputs
IvanARashid Jan 19, 2024
f60cd1e
Testing for >1D inputs
IvanARashid Jan 19, 2024
ff486c3
Added support for >1D fits
IvanARashid Jan 19, 2024
05cf746
Fixed support for >1D inputs
IvanARashid Jan 19, 2024
8f5d07b
Merge branch 'main' into wrapper_dev
IvanARashid May 4, 2024
09f5c4f
Merge branch 'main' into wrapper_dev
IvanARashid Jul 8, 2024
9799018
Standardized fit results as dictionaries. Modified all of fitting alg…
IvanARashid Jul 8, 2024
aed14cb
Added osipp_fit_full_volume
IvanARashid Aug 14, 2024
0fe7c55
Some adjustments to osipi_fit_full_volume
IvanARashid Aug 14, 2024
6d87890
Added ivim_fit_full_volume to the class
IvanARashid Aug 14, 2024
c927dbc
Adjusted doc string of osipi_fit_full_volume
IvanARashid Aug 14, 2024
89c03ff
Merge branch 'main' into wrapper_dev
IvanARashid Aug 27, 2024
07a89d2
Dictioynary output support
IvanARashid Aug 27, 2024
b62e2c6
Changed to dictionary outputs
IvanARashid Aug 27, 2024
51e7264
new version with fixed dictionary output
IvanARashid Sep 27, 2024
563205f
Create wrapper_usage_example.ipynb
IvanARashid Sep 27, 2024
dc99c0c
Merge branch 'main' into wrapper_dev
IvanARashid Dec 9, 2024
2505164
Changed to dictionary outputs
IvanARashid Dec 9, 2024
5fb922f
Dictionary output support + fix of small error
IvanARashid Dec 9, 2024
14e77f9
Merge branch 'wrapper_dev' of https://github.com/OSIPI/TF2.4_IVIM-MRI…
IvanARashid Dec 9, 2024
bce6ec7
Changed to dictionary outputs from osipi_fit
IvanARashid Dec 10, 2024
8d57500
Changed to dictionary outputs from osipi_fit
IvanARashid Dec 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions src/standardized/ETP_SRI_LinearFitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,21 @@ def ivim_fit(self, signals, bvalues=None, linear_fit_option=False, **kwargs):
ETP_object = LinearFit()
else:
ETP_object = LinearFit(self.thresholds[0])


results = {}
if linear_fit_option:
f, Dstar = ETP_object.linear_fit(bvalues, signals)
return f, Dstar

results["f"] = f
results["D*"] = Dstar

return results
else:
f, D, Dstar = ETP_object.ivim_fit(bvalues, signals)
return f, Dstar, D

results["f"] = f
results["D*"] = Dstar
results["D"] = D

return results

41 changes: 37 additions & 4 deletions src/standardized/IAR_LU_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,41 @@ def ivim_fit(self, signals, bvalues, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return f, Dstar, D
return results

def ivim_fit_full_volume(self, signals, bvalues, **kwargs):
"""Perform the IVIM fit

Args:
signals (array-like)
bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None.

Returns:
_type_: _description_
"""

if self.IAR_algorithm is None:
if bvalues is None:
bvalues = self.bvalues
else:
bvalues = np.asarray(bvalues)

bvec = np.zeros((bvalues.size, 3))
bvec[:,2] = 1
gtab = gradient_table(bvalues, bvec, b0_threshold=0)

self.IAR_algorithm = IvimModelBiExp(gtab)

fit_results = self.IAR_algorithm.fit(signals)

results = {}
results["f"] = fit_results.model_params[..., 1]
results["D*"] = fit_results.model_params[..., 2]
results["D"] = fit_results.model_params[..., 3]

return results
12 changes: 8 additions & 4 deletions src/standardized/IAR_LU_modified_mix.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,12 @@ def ivim_fit(self, signals, bvalues, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
#f = fit_results.model_params[1]
#Dstar = fit_results.model_params[2]
#D = fit_results.model_params[3]
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return f, Dstar, D
return results
14 changes: 10 additions & 4 deletions src/standardized/IAR_LU_modified_topopro.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,14 @@ def ivim_fit(self, signals, bvalues, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
#f = fit_results.model_params[1]
#Dstar = fit_results.model_params[2]
#D = fit_results.model_params[3]

return f, Dstar, D
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
14 changes: 10 additions & 4 deletions src/standardized/IAR_LU_segmented_2step.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,14 @@ def ivim_fit(self, signals, bvalues, thresholds=None, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
#f = fit_results.model_params[1]
#Dstar = fit_results.model_params[2]
#D = fit_results.model_params[3]

return f, Dstar, D
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
14 changes: 10 additions & 4 deletions src/standardized/IAR_LU_segmented_3step.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,14 @@ def ivim_fit(self, signals, bvalues, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
#f = fit_results.model_params[1]
#Dstar = fit_results.model_params[2]
#D = fit_results.model_params[3]

return f, Dstar, D
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
14 changes: 10 additions & 4 deletions src/standardized/IAR_LU_subtracted.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,14 @@ def ivim_fit(self, signals, bvalues, **kwargs):

fit_results = self.IAR_algorithm.fit(signals)

f = fit_results.model_params[1]
Dstar = fit_results.model_params[2]
D = fit_results.model_params[3]
#f = fit_results.model_params[1]
#Dstar = fit_results.model_params[2]
#D = fit_results.model_params[3]

return f, Dstar, D
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
9 changes: 5 additions & 4 deletions src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ def ivim_fit(self, signals, bvalues, initial_guess=None, **kwargs):
fit_results=fit_results+(1,)
fit_results = self.OGC_algorithm(bvalues, signals, self.neg_log_prior, x0=fit_results, fitS0=self.fitS0)

D = fit_results[0]
f = fit_results[1]
Dstar = fit_results[2]
results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["D*"] = fit_results[2]

return f, Dstar, D
return results
16 changes: 10 additions & 6 deletions src/standardized/OGC_AmsterdamUMC_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,11 @@ def __init__(self, bvalues=None, thresholds=None, bounds=([0, 0, 0.005, 0.7],[0.
Our OsipiBase object could contain functions that compare the inputs with
the requirements.
"""
super(OGC_AmsterdamUMC_biexp, self).__init__(bvalues, bounds, initial_guess, fitS0)
#super(OGC_AmsterdamUMC_biexp, self).__init__(bvalues, bounds, initial_guess, fitS0)
super(OGC_AmsterdamUMC_biexp, self).__init__(bvalues=bvalues, bounds=bounds, initial_guess=initial_guess)
self.OGC_algorithm = fit_least_squares
self.initialize(bounds, initial_guess, fitS0)
#self.initialize(bounds, initial_guess, fitS0)
self.fitS0=fitS0

def initialize(self, bounds, initial_guess, fitS0):
if bounds is None:
Expand All @@ -60,13 +62,15 @@ def ivim_fit(self, signals, bvalues, initial_guess=None, **kwargs):
Returns:
_type_: _description_
"""

if initial_guess is not None and len(initial_guess) == 4:
self.initial_guess = initial_guess
bvalues=np.array(bvalues)
fit_results = self.OGC_algorithm(bvalues, signals, p0=self.initial_guess, bounds=self.bounds, fitS0=self.fitS0)

D = fit_results[0]
f = fit_results[1]
Dstar = fit_results[2]
results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["D*"] = fit_results[2]

return f, Dstar, D
return results
9 changes: 5 additions & 4 deletions src/standardized/OGC_AmsterdamUMC_biexp_segmented.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,9 @@ def ivim_fit(self, signals, bvalues, initial_guess=None, **kwargs):
bvalues=np.array(bvalues)
fit_results = self.OGC_algorithm(bvalues, signals, bounds=self.bounds, cutoff=self.thresholds, p0=self.initial_guess)

D = fit_results[0]
f = fit_results[1]
Dstar = fit_results[2]
results = {}
results["D"] = fit_results[0]
results["f"] = fit_results[1]
results["D*"] = fit_results[2]

return f, Dstar, D
return results
11 changes: 6 additions & 5 deletions src/standardized/OJ_GU_seg.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,10 @@ def ivim_fit(self, signals, bvalues=None):
bthr = self.thresholds[0]

fit_results = seg(signals, bvalues, bthr)

results = {}
results["f"] = fit_results['f']
results["D*"] = fit_results['Dstar']
results["D"] = fit_results['D']

f = fit_results['f']
Dstar = fit_results['Dstar']
D = fit_results['D']

return f, Dstar, D
return results
11 changes: 6 additions & 5 deletions src/standardized/PV_MUMC_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,10 @@ def ivim_fit(self, signals, bvalues=None):


fit_results = self.PV_algorithm(bvalues, signals)

results = {}
results["f"] = fit_results[1]
results["D*"] = fit_results[2]
results["D"] = fit_results[0]

f = fit_results[1]
Dstar = fit_results[2]
D = fit_results[0]

return f, Dstar, D
return results
9 changes: 5 additions & 4 deletions src/standardized/PvH_KB_NKI_IVIMfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,9 @@ def ivim_fit(self, signals, bvalues=None):
signals = np.reshape(signals, (1, 1, 1, len(signals))) # assuming that in this test the signals are always single voxel
fit_results = self.NKI_algorithm(signals,bvalues)

D = fit_results[0][0,0,0]/1000
f = fit_results[1][0,0,0]
Dstar = fit_results[2][0,0,0]/1000
results = {}
results["D"] = fit_results[0][0,0,0]/1000
results["f"] = fit_results[1][0,0,0]
results["D*"] = fit_results[2][0,0,0]/1000

return f, Dstar, D
return results
72 changes: 69 additions & 3 deletions src/wrappers/OsipiBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from scipy.stats import norm
import pathlib
import sys
from tqdm import tqdm

class OsipiBase:
"""The base class for OSIPI IVIM fitting"""
Expand Down Expand Up @@ -47,7 +48,7 @@ def initialize(**kwargs):
pass

#def osipi_fit(self, data=None, bvalues=None, thresholds=None, bounds=None, initial_guess=None, **kwargs):
def osipi_fit(self, data, bvalues, **kwargs):
def osipi_fit(self, data, bvalues=None, **kwargs):
"""Fits the data with the bvalues
Returns [S0, f, Dstar, D]
"""
Expand All @@ -68,7 +69,6 @@ def osipi_fit(self, data, bvalues, **kwargs):
#kwargs["bvalues"] = use_bvalues

#args = [data, use_bvalues, use_thresholds]
args = [data, use_bvalues]
#if self.required_bounds or self.required_bounds_optional:
#args.append(use_bounds)
#if self.required_initial_guess or self.required_initial_guess_optional:
Expand All @@ -83,11 +83,77 @@ def osipi_fit(self, data, bvalues, **kwargs):

#args = [data, use_bvalues, use_initial_guess, use_bounds, use_thresholds]
#args = [arg for arg in args if arg is not None]
results = self.ivim_fit(*args, **kwargs)

# Check if there is an attribute that defines the result dictionary keys
if hasattr(self, "result_keys"):
# result_keys is a list of strings of parameter names, e.g. "S0", "f1", "f2", etc.
result_keys = self.result_keys
else:
# Default is ["f", "D*", "D"]
self.result_keys = ["f", "D*", "D"]

results = {}
for key in self.result_keys:
results[key] = np.empty(list(data.shape[:-1]))

# Assuming the last dimension of the data is the signal values of each b-value
#results = np.empty(list(data.shape[:-1])+[3]) # Create an array with the voxel dimensions + the ones required for the fit
#for ijk in tqdm(np.ndindex(data.shape[:-1]), total=np.prod(data.shape[:-1])):
#args = [data[ijk], use_bvalues]
#fit = list(self.ivim_fit(*args, **kwargs))
#results[ijk] = fit

for ijk in tqdm(np.ndindex(data.shape[:-1]), total=np.prod(data.shape[:-1])):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've been thinking about parallelizing the fitting and have something similar running as a separate script. I'm thinking that this structure is fine for now but will be too rigid. I think we'd want each algorithm to determine what is input into each step. Perhaps some combine all directions in one input, for example. Perhaps there's a generic generator that does essentially this, but can be overridden by the algorithm to supply data as it sees fit.

I should just finish up my changes and push them maybe it would make more sense to have it all in context.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was working on fixing the notebook yesterday evening and ran into the volume being too large and fitting would take 1 hour if I didn't use Paulien and Merels selective indexing (I tried to avoid the 1D data reformatting). I agree that a loop like this would be a good place for some parallelization, although I would like to not be the one doing it as it has been a great source of headaches in the past :)

But regarding multi-directional data, the only way I've seen and handled such data has been as a long list of b-values with separate header-info to discriminate the directions. And it is sort of the easiest way to do it. I think DIPY essentially forces you to do it that way to avoid these formatting issues.

I'm gonna push my fixes and we could hold off on this merge until you've pushed yours?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey! My commits already do parallel fits. I'm not sure whether it is the most efficient implementation, but it is a simple one.

# here we try parallel computing, but if fails, go back to computing one single core.
single = False
if njobs > 2:
try:
# define the parallel function
def parfun(i):
return fit_segmented(bvalues, dw_data[i, :], bounds=bounds, cutoff=cutoff,p0=p0)
output = Parallel(n_jobs=njobs)(delayed(parfun)(i) for i in tqdm(range(len(dw_data)), position=0, leave=True))
Dt, Fp, Dp = np.transpose(output)
except:
# if fails, retry using single core
single = True
else:
# or, if specified, immediately go to single core
single = True
if single:
# initialize empty arrays
Dp = np.zeros(len(dw_data))
Dt = np.zeros(len(dw_data))
Fp = np.zeros(len(dw_data))
for i in tqdm(range(len(dw_data)), position=0, leave=True):
# fill arrays with fit results on a per voxel base:
Dt[i], Fp[i], Dp[i] = fit_segmented(bvalues, dw_data[i, :], bounds=bounds, cutoff=cutoff,p0=p0)

# here we try parallel computing, but if fails, go back to computing one single core.
single = False
if njobs > 2:
    try:
        # define the parallel function
        def parfun(i):
            return fit_segmented(bvalues, dw_data[i, :], bounds=bounds, cutoff=cutoff,p0=p0)


        output = Parallel(n_jobs=njobs)(delayed(parfun)(i) for i in tqdm(range(len(dw_data)), position=0, leave=True))
        Dt, Fp, Dp = np.transpose(output)
    except:
        # if fails, retry using single core
        single = True
else:
    # or, if specified, immediately go to single core
    single = True
if single:
    # initialize empty arrays
    Dp = np.zeros(len(dw_data))
    Dt = np.zeros(len(dw_data))
    Fp = np.zeros(len(dw_data))
    for i in tqdm(range(len(dw_data)), position=0, leave=True):
        # fill arrays with fit results on a per voxel base:
        Dt[i], Fp[i], Dp[i] = fit_segmented(bvalues, dw_data[i, :], bounds=bounds, cutoff=cutoff,p0=p0)

args = [data[ijk], use_bvalues]
fit = self.ivim_fit(*args, **kwargs) # For single voxel fits, we assume this is a dict with a float value per key.
for key in list(fit.keys()):
results[key][ijk] = fit[key]

#self.parameter_estimates = self.ivim_fit(data, bvalues)
return results

def osipi_fit_full_volume(self, data, bvalues=None, **kwargs):
"""Sends a full volume in one go to the fitting algorithm. The osipi_fit method only sends one voxel at a time.

Args:
data (array): 3D (single slice) or 4D (multi slice) DWI data.
bvalues (array, optional): The b-values of the DWI data. Defaults to None.

Returns:
results (dict): Dict with key each containing an array which is a parametric map.
"""

try:
use_bvalues = bvalues if bvalues is not None else self.bvalues
if use_bvalues is not None: use_bvalues = np.asarray(use_bvalues)

# Check if there is an attribute that defines the result dictionary keys
if hasattr(self, "result_keys"):
# result_keys is a list of strings of parameter names, e.g. "S0", "f1", "f2", etc.
result_keys = self.result_keys
else:
# Default is ["f", "D*", "D"]
self.result_keys = ["f", "D*", "D"]

# Create the results dictionary
results = {}
for key in self.result_keys:
results[key] = np.empty(list(data.shape[:-1]))

args = [data, use_bvalues]
fit = self.ivim_fit_full_volume(*args, **kwargs) # Assume this is a dict with an array per key representing the parametric maps
for key in list(fit.keys()):
results[key] = fit[key]

return results

except:
# Check if the problem is that full volume fitting is simply not supported in the standardized implementation
if not hasattr(self, "ivim_fit_full_volume"): #and callable(getattr(self, "ivim_fit_full_volume")):
print("Full volume fitting not supported for this algorithm")

return False

def osipi_print_requirements(self):
"""
Prints the requirements of the algorithm.
Expand Down
Loading
Loading