diff --git a/requirements.txt b/requirements.txt index e845f8033b..5dee8fa175 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ nilearn==0.6.* numpy==1.21.* Pillow==9.0.1 bids-validator==1.6.0 -pybids==0.10.* +pybids==0.15.* pyparsing==2.2.* PySocks==1.7.* python-dateutil==2.7.* @@ -38,3 +38,4 @@ dmri-commit==1.4.* openpyxl==2.6.* cvxpy==1.1.* dmri-amico==1.2.* +formulaic==0.2.4 diff --git a/scilpy/gradientsampling/optimize_gradient_sampling.py b/scilpy/gradientsampling/optimize_gradient_sampling.py index 1f56dc4f29..27737a96ce 100644 --- a/scilpy/gradientsampling/optimize_gradient_sampling.py +++ b/scilpy/gradientsampling/optimize_gradient_sampling.py @@ -49,7 +49,8 @@ def swap_sampling_eddy(points, shell_idx, verbose=1): # System energy matrix # TODO: test other energy functions such as electron repulsion - dist = squareform(pdist(shell_pts, 'Euclidean')) + 2 * np.eye(shell_pts.shape[0]) + dist = squareform(pdist(shell_pts, 'Euclidean')) \ + + 2 * np.eye(shell_pts.shape[0]) it = 0 converged = False @@ -183,10 +184,9 @@ def correct_b0s_philips(points, shell_idx, verbose=1): new_points = points.copy() - non_b0_pts = points[np.where(shell_idx != -1)] - # Assume non-collinearity of non-b0s bvecs (i.e. Caruyer sampler type) - new_points[np.where(shell_idx == -1)[0]] = non_b0_pts + new_points[np.where(shell_idx == -1)[0][1:]] \ + = new_points[np.where(shell_idx == -1)[0][1:] - 1] logging.info('Done adapting b0s for Philips scanner.') @@ -260,7 +260,8 @@ def compute_min_duty_cycle_bruteforce(points, shell_idx, bvals, ker_size=10, logging.debug('Iter {} / {} : {}'.format(it, Niter, power_best)) ordering_current = np.random.permutation(N_dir) - q_scheme_current[non_b0s_mask] = q_scheme[non_b0s_mask][ordering_current] + q_scheme_current[non_b0s_mask] \ + = q_scheme[non_b0s_mask][ordering_current] power_current = compute_peak_power(q_scheme_current, ker_size=ker_size) diff --git a/scilpy/reconst/dti.py b/scilpy/reconst/dti.py new file mode 100644 index 0000000000..3bb764f70b --- /dev/null +++ b/scilpy/reconst/dti.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +supported_tensor_formats = ['fsl', 'nifti', 'mrtrix', 'dipy'] +tensor_format_description = \ + """ + Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639 + + MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz] + Shape: [i, j , k, 6]. + Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html + + ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]. + Shape: [i, j , k, 1, 6] (Careful, file is 5D). + Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software + + FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide + (Also used for the Fibernavigator) + """ + + +def convert_tensor_to_dipy_format(tensor, initial_format): + """ + See description of formats at the top of this file. + """ + assert initial_format in supported_tensor_formats, \ + "Tensor format not supported" + + if initial_format == 'nifti' or initial_format == 'dipy': + correct_order = [0, 1, 2, 3, 4, 5] + tensor = np.squeeze(tensor) + elif initial_format == 'mrtrix': + correct_order = [0, 3, 1, 4, 5, 2] + else: # initial_format == 'fsl': + correct_order = [0, 1, 3, 2, 4, 5] + + return tensor[..., correct_order] + + +def convert_tensor_from_dipy_format(tensor, final_format): + """ + See description of formats at the top of this file. + """ + assert final_format in supported_tensor_formats, \ + "Tensor format not supported" + + if final_format == 'nifti' or final_format == 'dipy': + correct_order = [0, 1, 2, 3, 4, 5] + elif final_format == 'mrtrix': + correct_order = [0, 2, 5, 1, 3, 4] + else: # final_format == 'fsl'. + correct_order = [0, 1, 3, 2, 4, 5] + + tensor_reordered = tensor[..., correct_order] + + if final_format == 'nifti': + # We need to add the fifth dimension + tensor_reordered = tensor_reordered[:, :, :, None, :] + + return tensor_reordered + + +def convert_tensor_format(tensor, initial_format, final_format): + """ + See description of formats at the top of this file. + """ + tensor = convert_tensor_to_dipy_format(tensor, initial_format) + return convert_tensor_from_dipy_format(tensor, final_format) + diff --git a/scripts/scil_compute_dti_metrics.py b/scripts/scil_compute_dti_metrics.py index 13fa30acc9..d9817960b6 100755 --- a/scripts/scil_compute_dti_metrics.py +++ b/scripts/scil_compute_dti_metrics.py @@ -38,9 +38,12 @@ radial_diffusivity, lower_triangular) # Aliased to avoid clashes with images called mode. from dipy.reconst.dti import mode as dipy_mode + from scilpy.io.image import get_data_as_mask from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, assert_outputs_exist, add_force_b0_arg) +from scilpy.reconst.dti import convert_tensor_from_dipy_format, \ + supported_tensor_formats, tensor_format_description from scilpy.utils.bvec_bval_tools import (normalize_bvecs, is_normalized_bvecs, check_b0_threshold) from scilpy.utils.filenames import add_filename_suffix, split_name_with_nii @@ -75,7 +78,8 @@ def _build_arg_parser(): help='Tensor fit method.\nWLS for weighted least squares' + '\nLS for ordinary least squares' + '\nNLLS for non-linear least-squares' + - '\nrestore for RESTORE robust tensor fitting. (Default: %(default)s)') + '\nrestore for RESTORE robust tensor fitting. ' + '(Default: %(default)s)') p.add_argument( '--not_all', action='store_true', dest='not_all', help='If set, will only save the metrics explicitly specified using ' @@ -114,6 +118,11 @@ def _build_arg_parser(): g.add_argument( '--tensor', dest='tensor', metavar='file', default='', help='Output filename for the tensor coefficients.') + g.add_argument('--tensor_format', choices=supported_tensor_formats, + default='fsl', + help=("Format used for the tensors saved in --tensor file." + "(default: %(default)s)\n" + + tensor_format_description)) g = p.add_argument_group(title='Quality control files flags') g.add_argument( @@ -203,11 +212,12 @@ def main(): FA = np.clip(FA, 0, 1) if args.tensor: - # Get the Tensor values and format them for visualisation - # in the Fibernavigator. + # Get the Tensor values + # Format them for visualization in various software. tensor_vals = lower_triangular(tenfit.quadratic_form) - correct_order = [0, 1, 3, 2, 4, 5] - tensor_vals_reordered = tensor_vals[..., correct_order] + tensor_vals_reordered = convert_tensor_from_dipy_format( + tensor_vals, final_format=args.tensor_format) + fiber_tensors = nib.Nifti1Image( tensor_vals_reordered.astype(np.float32), affine) nib.save(fiber_tensors, args.tensor) diff --git a/scripts/scil_convert_tensors.py b/scripts/scil_convert_tensors.py new file mode 100644 index 0000000000..c88da642e4 --- /dev/null +++ b/scripts/scil_convert_tensors.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Conversion of tensors (the 6 values from the triangular matrix) between various +software standards. We cannot discover the input format type, user must know +how the tensors were created. +""" + +import argparse + +import nibabel as nib +import numpy as np + +from scilpy.io.utils import (add_overwrite_arg, add_reference_arg, + assert_inputs_exist, assert_outputs_exist) +from scilpy.reconst.dti import (supported_tensor_formats, + tensor_format_description, + convert_tensor_format) + + +def _build_arg_parser(): + p = argparse.ArgumentParser(description=__doc__ + tensor_format_description, + formatter_class=argparse.RawTextHelpFormatter) + + p.add_argument('in_file', + help='Input tensors filename.') + p.add_argument('out_file', + help='Output tensors filename.') + p.add_argument('in_format', metavar='in_format', + choices=supported_tensor_formats, + help='Input format. Choices: {}' + .format(supported_tensor_formats)) + p.add_argument('out_format', metavar='out_format', + choices=supported_tensor_formats, + help='Output format. Choices: {}' + .format(supported_tensor_formats)) + add_overwrite_arg(p) + + return p + + +def main(): + parser = _build_arg_parser() + args = parser.parse_args() + + assert_inputs_exist(parser, args.in_file) + assert_outputs_exist(parser, args, args.out_file) + + in_tensors_img = nib.load(args.in_file) + in_tensors = in_tensors_img.get_fdata(dtype=np.float32) + + out_tensors = convert_tensor_format(in_tensors, args.in_format, + args.out_format) + out_tensors_img = nib.Nifti1Image( + out_tensors.astype(np.float32), in_tensors_img.affine) + nib.save(out_tensors_img, args.out_file) + + +if __name__ == "__main__": + main() diff --git a/scripts/scil_generate_gradient_sampling.py b/scripts/scil_generate_gradient_sampling.py index ce3b8c2e7b..67262a4f31 100755 --- a/scripts/scil_generate_gradient_sampling.py +++ b/scripts/scil_generate_gradient_sampling.py @@ -5,8 +5,8 @@ Generate multi-shell gradient sampling with various processing to accelerate acquisition and help artefact correction. -Multi-shell gradient sampling is generated as in [1], the bvecs are then flipped -to maximize spread for eddy current correction, b0s are interleaved +Multi-shell gradient sampling is generated as in [1], the bvecs are then +flipped to maximize spread for eddy current correction, b0s are interleaved at equal spacing and the non-b0 samples are finally shuffled to minimize the total diffusion gradient amplitude over a few TR. """ @@ -21,6 +21,7 @@ from scilpy.gradientsampling.gen_gradient_sampling import generate_gradient_sampling from scilpy.gradientsampling.optimize_gradient_sampling import (add_b0s, add_bvalue_b0, + correct_b0s_philips, compute_bvalue_lin_b, compute_bvalue_lin_q, compute_min_duty_cycle_bruteforce, @@ -73,6 +74,10 @@ def _build_arg_parser(): p.add_argument('--b0_value', type=float, default=0.0, help='b-value of the b0s. [%(default)s]') + p.add_argument('--b0_philips', + action='store_true', + help='Replace values of b0s bvecs by existing bvecs for ' + 'Philips handling. [%(default)s]') bvals_group = p.add_mutually_exclusive_group(required=True) bvals_group.add_argument('--bvals', @@ -138,6 +143,7 @@ def main(): b0_every = args.b0_every b0_end = args.b0_end b0_value = args.b0_value + b0_philips = args.b0_philips # Only a b0 at the beginning if (b0_every > K) or (b0_every < 0): @@ -181,6 +187,10 @@ def main(): points, shell_idx = compute_min_duty_cycle_bruteforce( points, shell_idx, bvals) + # Correcting b0s bvecs for Philips + if b0_philips and np.sum(shell_idx == -1) > 1: + points, shell_idx = correct_b0s_philips(points, shell_idx) + if fsl: save_gradient_sampling_fsl(points, shell_idx, bvals, out_filename[0], out_filename[1]) diff --git a/scripts/scil_validate_and_correct_eddy_gradients.py b/scripts/scil_validate_and_correct_eddy_gradients.py new file mode 100644 index 0000000000..326766c9a6 --- /dev/null +++ b/scripts/scil_validate_and_correct_eddy_gradients.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Validate and correct gradients from eddy outputs +With full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval) +that doesnt fit with the output dwi (1x nb of dir) +""" + +import argparse + +import numpy as np + +from scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist, + assert_outputs_exist) + + +def _build_arg_parser(): + p = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawTextHelpFormatter) + p.add_argument('in_bvec', + help='In bvec file.') + p.add_argument('in_bval', + help='In bval file.') + p.add_argument('nb_dirs', type=int, + help='Number of directions per DWI.') + p.add_argument('out_bvec', + help='Out bvec file.') + p.add_argument('out_bval', + help='Out bval file.') + add_overwrite_arg(p) + return p + + +def main(): + parser = _build_arg_parser() + args = parser.parse_args() + + assert_inputs_exist(parser, [args.in_bvec, args.in_bval]) + assert_outputs_exist(parser, args, [args.out_bval, args.out_bvec]) + + """ + IN BVEC + """ + in_bvec = np.genfromtxt(args.in_bvec) + split_dirs = in_bvec.shape[1] / args.nb_dirs + if int(split_dirs) != split_dirs: + parser.error('Number of directions in bvec ({}) can\'t be splited in ' + 'even parts using nb_dirs ({}).'.format(in_bvec.shape[1], + args.nb_dirs)) + in_bvec_split = np.hsplit(in_bvec, int(split_dirs)) + if len(in_bvec_split) == 2: + out_bvec = np.mean(np.array([in_bvec_split[0], + in_bvec_split[1]]), axis=0) + else: + out_bvec = in_bvec_split[0] + np.savetxt(args.out_bvec, out_bvec, '%.8f') + + """ + IN BVAL + """ + in_bval = np.genfromtxt(args.in_bval) + np.savetxt(args.out_bval, in_bval[0:out_bvec.shape[1]], '%.0f') + + +if __name__ == '__main__': + main() diff --git a/scripts/scil_validate_bids.py b/scripts/scil_validate_bids.py index 2e0ef5d1d6..d4e36e65fd 100755 --- a/scripts/scil_validate_bids.py +++ b/scripts/scil_validate_bids.py @@ -9,9 +9,15 @@ import argparse from bids import BIDSLayout +from glob import glob import json +import logging +import pathlib -from scilpy.io.utils import add_overwrite_arg, assert_outputs_exist +import coloredlogs + +from scilpy.io.utils import (add_overwrite_arg, add_verbose_arg, + assert_outputs_exist) def _build_arg_parser(): @@ -25,6 +31,10 @@ def _build_arg_parser(): p.add_argument("out_json", help="Output json file.") + p.add_argument("--fs", + help='Output freesurfer path. It will add keys wmparc and ' + 'aparc+aseg.') + p.add_argument('--participants_label', nargs="+", help='The label(s) of the specific participant(s) you' ' want to be be analyzed. Participants should not ' @@ -40,10 +50,30 @@ def _build_arg_parser(): help="Default total readout time value [%(default)s].") add_overwrite_arg(p) + add_verbose_arg(p) return p +def _load_bidsignore_(bids_root): + """Load .bidsignore file from a BIDS dataset, returns list of regexps""" + bids_root = pathlib.Path(bids_root) + bids_ignore_path = bids_root / ".bidsignore" + if bids_ignore_path.exists(): + import re + import fnmatch + + bids_ignores = bids_ignore_path.read_text().splitlines() + return tuple( + [ + re.compile(fnmatch.translate(bi)) + for bi in bids_ignores + if len(bi) and bi.strip()[0] != "#" + ] + ) + return tuple() + + def get_metadata(bf): """ Return the metadata of a BIDSFile @@ -56,12 +86,13 @@ def get_metadata(bf): Dictionnary containing the metadata """ filename = bf.path.replace( - '.' + bf.get_entities()['extension'], '') + bf.entities['extension'], '') + with open(filename + '.json', 'r') as handle: return json.load(handle) -def get_dwi_associations(fmaps, bvals, bvecs): +def get_dwi_associations(fmaps, bvals, bvecs, sbrefs): """ Return DWI associations Parameters @@ -75,6 +106,9 @@ def get_dwi_associations(fmaps, bvals, bvecs): bvecs : List of BIDSFile object List of b-vector files + sbrefs : List of BIDSFile object + List of sbref files + Returns ------- Dictionnary containing the files associated to a DWI @@ -107,6 +141,7 @@ def get_dwi_associations(fmaps, bvals, bvecs): intended = metadata.get('IntendedFor', '') else: intended = [metadata.get('IntendedFor', '')] + for target in intended: dwi_filename = os.path.basename(target) if dwi_filename not in associations.keys(): @@ -116,10 +151,20 @@ def get_dwi_associations(fmaps, bvals, bvecs): else: associations[dwi_filename]['fmap'] = [fmap] + # Associate sbref + for sbref in sbrefs: + dwi_filename = os.path.basename(sbref.path).replace('sbref', 'dwi') + if dwi_filename not in associations.keys(): + associations[dwi_filename] = {'sbref': [sbref]} + elif 'sbref' in associations[dwi_filename].keys(): + associations[dwi_filename]['sbref'].append(sbref) + else: + associations[dwi_filename]['sbref'] = [sbref] + return associations -def get_data(nSub, dwi, t1s, associations, default_readout, clean): +def get_data(nSub, dwi, t1s, fs, associations, default_readout, clean): """ Return subject data Parameters @@ -127,8 +172,8 @@ def get_data(nSub, dwi, t1s, associations, default_readout, clean): nSub : String Subject name - dwi : BIDSFile object - DWI object + dwi : list of BIDSFile object + DWI objects t1s : List of BIDSFile object List of T1s associated to the current subject @@ -143,135 +188,281 @@ def get_data(nSub, dwi, t1s, associations, default_readout, clean): ------- Dictionnary containing the metadata """ + bvec_path = ['todo', ''] + bval_path = ['todo', ''] + dwi_path = ['todo', ''] + PE = ['todo', ''] + topup_fmap = ['', ''] + topup_sbref = ['', ''] + fmaps = ['', ''] + sbref = ['', ''] nSess = 0 - if 'session' in dwi.get_entities().keys(): - nSess = dwi.get_entities()['session'] + if 'session' in dwi[0].entities: + nSess = dwi[0].entities['session'] nRun = 0 - if 'run' in dwi.get_entities().keys(): - nRun = dwi.get_entities()['run'] - - fmaps = [] - bval_path = 'todo' - bvec_path = 'todo' - if dwi.filename in associations.keys(): - if "bval" in associations[dwi.filename].keys(): - bval_path = associations[dwi.filename]['bval'] - if "bvec" in associations[dwi.filename].keys(): - bvec_path = associations[dwi.filename]['bvec'] - if "fmap" in associations[dwi.filename].keys(): - fmaps = associations[dwi.filename]['fmap'] - - dwi_PE = 'todo' - dwi_revPE = -1 - conversion = {"i": "x", "j": "y", "k": "z"} - dwi_metadata = get_metadata(dwi) - if 'PhaseEncodingDirection' in dwi_metadata: - dwi_PE = dwi_metadata['PhaseEncodingDirection'] - dwi_PE = dwi_PE.replace(dwi_PE[0], conversion[dwi_PE[0]]) - if len(dwi_PE) == 1: - dwi_revPE = dwi_PE + '-' + if 'run' in dwi[0].entities: + nRun = dwi[0].entities['run'] + + for index, curr_dwi in enumerate(dwi): + dwi_path[index] = curr_dwi.path + + if curr_dwi.filename in associations.keys(): + if "bval" in associations[curr_dwi.filename].keys(): + bval_path[index] = associations[curr_dwi.filename]['bval'] + if "bvec" in associations[curr_dwi.filename].keys(): + bvec_path[index] = associations[curr_dwi.filename]['bvec'] + if "fmap" in associations[curr_dwi.filename].keys(): + fmaps[index] = associations[curr_dwi.filename]['fmap'] + if len(fmaps[index]) == 1 and isinstance(fmaps[index][0], list): + fmaps[index] = [x for xs in fmaps[index] for x in xs] + if "sbref" in associations[curr_dwi.filename].keys(): + sbref[index] = associations[curr_dwi.filename]['sbref'] + if len(sbref[index]) == 1 and isinstance(sbref[index][0], list): + sbref[index] = [x for xs in sbref[index] for x in xs] + + conversion = {"i": "x", "j": "y", "k": "z"} + dwi_metadata = get_metadata(curr_dwi) + if 'PhaseEncodingDirection' in dwi_metadata and index == 0: + dwi_PE = dwi_metadata['PhaseEncodingDirection'] + dwi_PE = dwi_PE.replace(dwi_PE[0], conversion[dwi_PE[0]]) + if len(dwi_PE) == 1: + PE[index] = dwi_PE + PE[index+1] = dwi_PE + '-' + else: + PE[index] = dwi_PE + PE[index+1] = dwi_PE[0] + elif clean: + return {} + + # Find b0 for topup, take the first one + # Check fMAP + totalreadout = default_readout + fmaps = [fmap for fmap in fmaps if fmap != ''] + if not fmaps: + if 'TotalReadoutTime' in dwi_metadata: + totalreadout = dwi_metadata['TotalReadoutTime'] else: - dwi_revPE = dwi_PE[0] - elif clean: - return {} - - # Find b0 for topup, take the first one - revb0_path = '' - totalreadout = default_readout - if len(fmaps) == 0: - if 'TotalReadoutTime' in dwi_metadata: - totalreadout = dwi_metadata['TotalReadoutTime'] - else: - for nfmap in fmaps: - nfmap_metadata = get_metadata(nfmap) - if 'PhaseEncodingDirection' in nfmap_metadata: - fmap_PE = nfmap_metadata['PhaseEncodingDirection'] - fmap_PE = fmap_PE.replace(fmap_PE[0], conversion[fmap_PE[0]]) - if fmap_PE == dwi_revPE: + if isinstance(fmaps[0], list): + fmaps = [x for xs in fmaps for x in xs] + + for nfmap in fmaps: + nfmap_metadata = get_metadata(nfmap) + if 'PhaseEncodingDirection' in nfmap_metadata: + fmap_PE = nfmap_metadata['PhaseEncodingDirection'] + fmap_PE = fmap_PE.replace(fmap_PE[0], conversion[fmap_PE[0]]) + + opposite_PE = PE.index(fmap_PE) if 'TotalReadoutTime' in dwi_metadata: if 'TotalReadoutTime' in nfmap_metadata: dwi_RT = dwi_metadata['TotalReadoutTime'] fmap_RT = nfmap_metadata['TotalReadoutTime'] if dwi_RT != fmap_RT and totalreadout == '': totalreadout = 'error_readout' - revb0_path = 'error_readout' + topup_fmap[opposite_PE] = 'error_readout' elif dwi_RT == fmap_RT: - revb0_path = nfmap.path + topup_fmap[opposite_PE] = nfmap.path totalreadout = dwi_RT - break else: - revb0_path = nfmap.path + topup_fmap[opposite_PE] = nfmap.path totalreadout = default_readout + if sbref[index] != '' and len(sbref[index]) == 1: + topup_sbref[index] = sbref[index][0].path + + if len(dwi) == 2: + if not any(s == '' for s in topup_sbref): + topup = topup_sbref + elif not any(s == '' for s in topup_fmap): + topup = topup_fmap + else: + topup = ['todo', 'todo'] + elif len(dwi) == 1: + if topup_fmap[1] != '': + topup = topup_fmap + else: + topup = ['', ''] + else: + print(""" + BIDS structure unkown.Please send an issue: + https://github.com/scilus/scilpy/issues + """) + t1_path = 'todo' - t1_nSess = [] - if not t1s and clean: - return {} - - for t1 in t1s: - if 'session' in t1.get_entities().keys() and\ - t1.get_entities()['session'] == nSess: - t1_nSess.append(t1) - elif 'session' not in t1.get_entities().keys(): - t1_nSess.append(t1) - - # Take the right T1, if multiple T1s the field must be completed ('todo') - if len(t1_nSess) == 1: - t1_path = t1_nSess[0].path - elif 'run' in dwi.path: - for t1 in t1_nSess: - if 'run-' + str(nRun) in t1.path: - t1_path = t1.path + wmparc_path = '' + aparc_aseg_path = '' + if fs: + t1_path = fs[0] + wmparc_path = fs[1] + aparc_aseg_path = fs[2] + else: + t1_nSess = [] + if not t1s and clean: + return {} + + for t1 in t1s: + if 'session' in t1.get_entities().keys() and\ + t1.get_entities()['session'] == nSess: + t1_nSess.append(t1) + elif 'session' not in t1.get_entities().keys(): + t1_nSess.append(t1) + + if len(t1_nSess) == 1: + t1_path = t1_nSess[0].path return {'subject': nSub, 'session': nSess, 'run': nRun, 't1': t1_path, - 'dwi': dwi.path, - 'bvec': bvec_path, - 'bval': bval_path, - 'rev_b0': revb0_path, - 'DWIPhaseEncodingDir': dwi_PE, + 'wmparc': wmparc_path, + 'aparc_aseg': aparc_aseg_path, + 'dwi': dwi_path[0], + 'bvec': bvec_path[0], + 'bval': bval_path[0], + 'rev_dwi': dwi_path[1], + 'rev_bvec': bvec_path[1], + 'rev_bval': bval_path[1], + 'topup': topup[0], + 'rev_topup': topup[1], + 'DWIPhaseEncodingDir': PE[0], + 'rev_DWIPhaseEncodingDir': PE[1], 'TotalReadoutTime': totalreadout} +def associate_dwis(layout, nSub): + """ Return subject data + Parameters + ---------- + layout: pyBIDS layout + BIDS layout + nSub: String + Current subject to analyse + Returns + ------- + all_dwis: list + List of dwi + """ + all_dwis = [] + if layout.get_sessions(subject=nSub): + for curr_sess in layout.get_sessions(subject=nSub): + dwis = layout.get(subject=nSub, + session=curr_sess, + datatype='dwi', extension='nii.gz', + suffix='dwi') + + if len(dwis) == 1: + all_dwis.append(dwis) + elif len(dwis) > 1: + all_runs = [curr_dwi.entities['run'] for curr_dwi in dwis if 'run' in curr_dwi.entities] + if all_runs: + for curr_run in all_runs: + dwis = layout.get(subject=nSub, + session=curr_sess, + run=curr_run, + datatype='dwi', extension='nii.gz', + suffix='dwi') + if len(dwis) == 2: + all_dwis.append(dwis) + else: + print("ERROR MORE DWI THAN EXPECTED") + elif len(dwis) == 2: + all_dwis.append(dwis) + else: + print(dwis) + print("ERROR MORE DWI THAN EXPECTED") + else: + dwis = layout.get(subject=nSub, + datatype='dwi', extension='nii.gz', + suffix='dwi') + if len(dwis) == 1: + all_dwis.append(dwis) + elif len(dwis) > 1: + all_runs = [curr_dwi.entities['run'] for curr_dwi in dwis if 'run' in curr_dwi.entities] + if all_runs: + for curr_run in all_runs: + dwis = layout.get(subject=nSub, + run=curr_run, + datatype='dwi', extension='nii.gz', + suffix='dwi') + if len(dwis) <= 2: + all_dwis.append(dwis) + else: + print("ERROR MORE DWI THAN EXPECTED") + elif len(dwis) == 2: + all_dwis.append(dwis) + else: + print("ERROR MORE DWI THAN EXPECTED") + + return all_dwis + + def main(): parser = _build_arg_parser() args = parser.parse_args() assert_outputs_exist(parser, args, args.out_json) + log_level = logging.WARNING + if args.verbose: + log_level = logging.INFO + logging.basicConfig(level=log_level) + coloredlogs.install(level=log_level) + data = [] - layout = BIDSLayout(args.in_bids, index_metadata=False) + layout = BIDSLayout(args.in_bids, validate=False, + ignore=_load_bidsignore_(args.in_bids)) subjects = layout.get_subjects() if args.participants_label: subjects = [nSub for nSub in args.participants_label if nSub in subjects] + subjects.sort() + + logging.info("Found {} subject(s)".format(len(subjects))) + for nSub in subjects: - dwis = layout.get(subject=nSub, - datatype='dwi', extension='nii.gz', - suffix='dwi') - t1s = layout.get(subject=nSub, - datatype='anat', extension='nii.gz', - suffix='T1w') + mess = '# Validating subject: {}'.format(nSub) + logging.info("-" * len(mess)) + logging.info(mess) + dwis = associate_dwis(layout, nSub) + + fs_inputs = [] + t1s = [] + + if args.fs: + logging.info("# Looking for FS files") + t1_fs = glob(os.path.join(args.fs, 'sub-' + nSub, 'mri/T1.mgz')) + wmparc = glob(os.path.join(args.fs, 'sub-' + nSub, 'mri/wmparc.mgz')) + aparc_aseg = glob(os.path.join(args.fs, 'sub-' + nSub, + 'mri/aparc+aseg.mgz')) + if len(t1_fs) == 1 and len(wmparc) == 1 and len(aparc_aseg) == 1: + fs_inputs = [t1_fs[0], wmparc[0], aparc_aseg[0]] + else: + logging.info("# Looking for T1 files") + t1s = layout.get(subject=nSub, + datatype='anat', extension='nii.gz', + suffix='T1w') + fmaps = layout.get(subject=nSub, datatype='fmap', extension='nii.gz', suffix='epi') + bvals = layout.get(subject=nSub, datatype='dwi', extension='bval', suffix='dwi') bvecs = layout.get(subject=nSub, datatype='dwi', extension='bvec', suffix='dwi') + sbrefs = layout.get(subject=nSub, + datatype='dwi', extension='nii.gz', + suffix='sbref') # Get associations relatives to DWIs - associations = get_dwi_associations(fmaps, bvals, bvecs) + associations = get_dwi_associations(fmaps, bvals, bvecs, sbrefs) # Get the data for each run of DWIs for dwi in dwis: - data.append(get_data(nSub, dwi, t1s, associations, + data.append(get_data(nSub, dwi, t1s, fs_inputs, associations, args.readout, args.clean)) if args.clean: diff --git a/scripts/tests/test_convert_tensors.py b/scripts/tests/test_convert_tensors.py new file mode 100644 index 0000000000..e3bfae93fc --- /dev/null +++ b/scripts/tests/test_convert_tensors.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import tempfile + +from scilpy.io.fetcher import fetch_data, get_home, get_testing_files_dict + +# If they already exist, this only takes 5 seconds (check md5sum) +fetch_data(get_testing_files_dict(), keys=['processing.zip']) +tmp_dir = tempfile.TemporaryDirectory() + + +def test_help_option(script_runner): + ret = script_runner.run('scil_convert_tensors.py', '--help') + assert ret.success + + +def test_execution_processing(script_runner): + os.chdir(os.path.expanduser(tmp_dir.name)) + + # No tensor in the current test data! I'm running the compute_dti_metrics + # to create one. + in_dwi = os.path.join(get_home(), 'processing', + 'dwi_crop_1000.nii.gz') + in_bval = os.path.join(get_home(), 'processing', + '1000.bval') + in_bvec = os.path.join(get_home(), 'processing', + '1000.bvec') + script_runner.run('scil_compute_dti_metrics.py', in_dwi, + in_bval, in_bvec, '--not_all', + '--tensor', 'tensors.nii.gz', '--tensor_format', 'fsl') + + ret = script_runner.run('scil_convert_tensors.py', 'tensors.nii.gz', + 'converted_tensors.nii.gz', 'fsl', 'mrtrix') + + assert ret.success diff --git a/scripts/tests/test_validate_and_correct_eddy_gradients.py b/scripts/tests/test_validate_and_correct_eddy_gradients.py new file mode 100644 index 0000000000..8457d8f65f --- /dev/null +++ b/scripts/tests/test_validate_and_correct_eddy_gradients.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import tempfile + +from scilpy.io.fetcher import fetch_data, get_home, get_testing_files_dict + +# If they already exist, this only takes 5 seconds (check md5sum) +fetch_data(get_testing_files_dict(), keys=['processing.zip']) +tmp_dir = tempfile.TemporaryDirectory() + + +def test_help_option(script_runner): + ret = script_runner.run('scil_validate_and_correct_eddy_gradients.py', '--help') + assert ret.success + + +def test_execution_extract_half(script_runner): + os.chdir(os.path.expanduser(tmp_dir.name)) + in_bvec = os.path.join(get_home(), 'processing', 'dwi.bvec') + in_bval = os.path.join(get_home(), 'processing', 'dwi.bval') + ret = script_runner.run('scil_validate_and_correct_eddy_gradients.py', + in_bvec, in_bval, "32", + 'out.bvec', + 'out.bval', + '-f') + assert ret.success + +def test_execution_extract_total(script_runner): + os.chdir(os.path.expanduser(tmp_dir.name)) + in_bvec = os.path.join(get_home(), 'processing', 'dwi.bvec') + in_bval = os.path.join(get_home(), 'processing', 'dwi.bval') + ret = script_runner.run('scil_validate_and_correct_eddy_gradients.py', + in_bvec, in_bval, "64", + 'out.bvec', + 'out.bval', + '-f') + assert ret.success