From 8563352df3b557bc693246d6180b5da107837ce6 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 26 Oct 2023 22:29:08 +1100 Subject: [PATCH 01/11] Python: Initial work toward wrappers for executables Change filesystem structure of python/. No need to preserve the lib/mrtrix3/ sub-directory structure in the repository; that can be constructed exclusively in the build directory. Executables now reside in sub-directory python/scripts/. For algorithm-based scripts, all code is placed in a sub-directory of python/scripts/, with the previous bin/ file contents now placed in __init__.py of that directory, thus co-locating all relevant source code for such commands. The algorithm module is simplified somewhat since the algorithm files are co-located with the interface source file. Delete file python/bin/mrtrix3.py, and script source files no longer load the mrtrix3 module and invoke the execute function at the end of the file; an alternative mechanism for loading the API and script entrypoint will be created subsequently to this commit. --- python/{lib/mrtrix3 => }/__init__.py | 7 -- python/{lib/mrtrix3 => }/_version.py.in | 0 python/algorithm.py | 45 ++++++++++++ python/{lib/mrtrix3 => }/app.py | 4 -- python/bin/mrtrix3.py | 66 ------------------ python/{lib/mrtrix3 => }/fsl.py | 0 python/{lib/mrtrix3 => }/image.py | 0 python/lib/mrtrix3/_5ttgen/__init__.py | 0 python/lib/mrtrix3/algorithm.py | 69 ------------------- python/lib/mrtrix3/dwi2mask/__init__.py | 0 python/lib/mrtrix3/dwi2response/__init__.py | 0 python/lib/mrtrix3/dwibiascorrect/__init__.py | 0 python/lib/mrtrix3/dwinormalise/__init__.py | 0 python/{lib/mrtrix3 => }/matrix.py | 0 python/{lib/mrtrix3 => }/path.py | 24 +------ python/{lib/mrtrix3 => }/phaseencoding.py | 0 python/{lib/mrtrix3 => }/run.py | 0 .../5ttgen => scripts/5ttgen/__init__.py} | 8 +-- .../_5ttgen => scripts/5ttgen}/freesurfer.py | 0 .../mrtrix3/_5ttgen => scripts/5ttgen}/fsl.py | 0 .../mrtrix3/_5ttgen => scripts/5ttgen}/gif.py | 0 .../_5ttgen => scripts/5ttgen}/hsvs.py | 0 python/{bin/blend => scripts/blend.py} | 0 .../convert_bruker.py} | 0 .../dwi2mask/3dautomask.py | 0 .../dwi2mask => scripts/dwi2mask/__init__.py} | 8 +-- .../{lib/mrtrix3 => scripts}/dwi2mask/ants.py | 0 .../dwi2mask/b02template.py | 0 .../mrtrix3 => scripts}/dwi2mask/consensus.py | 0 .../mrtrix3 => scripts}/dwi2mask/fslbet.py | 0 .../mrtrix3 => scripts}/dwi2mask/hdbet.py | 0 .../mrtrix3 => scripts}/dwi2mask/legacy.py | 0 .../{lib/mrtrix3 => scripts}/dwi2mask/mean.py | 0 .../mrtrix3 => scripts}/dwi2mask/mtnorm.py | 0 .../dwi2mask/synthstrip.py | 0 .../mrtrix3 => scripts}/dwi2mask/trace.py | 0 .../dwi2response/__init__.py} | 11 +-- .../dwi2response/dhollander.py | 0 .../mrtrix3 => scripts}/dwi2response/fa.py | 0 .../dwi2response/manual.py | 0 .../dwi2response/msmt_5tt.py | 0 .../mrtrix3 => scripts}/dwi2response/tax.py | 0 .../dwi2response/tournier.py | 0 .../dwibiascorrect/__init__.py} | 10 +-- .../dwibiascorrect/ants.py | 0 .../mrtrix3 => scripts}/dwibiascorrect/fsl.py | 0 .../dwibiascorrect/mtnorm.py | 0 .../dwibiasnormmask.py} | 6 -- python/{bin/dwicat => scripts/dwicat.py} | 7 -- .../dwifslpreproc.py} | 9 --- .../dwigradcheck => scripts/dwigradcheck.py} | 5 -- .../dwinormalise/__init__.py} | 10 +-- .../mrtrix3 => scripts}/dwinormalise/group.py | 0 .../dwinormalise/manual.py | 0 .../dwinormalise/mtnorm.py | 0 .../dwishellmath => scripts/dwishellmath.py} | 5 -- python/{bin/for_each => scripts/for_each.py} | 9 --- .../{bin/gen_scheme => scripts/gen_scheme.py} | 0 .../labelsgmfix => scripts/labelsgmfix.py} | 8 --- .../{bin/mask2glass => scripts/mask2glass.py} | 4 -- .../mrtrix_cleanup.py} | 6 -- python/{bin/notfound => scripts/notfound.py} | 0 .../population_template.py} | 6 -- .../responsemean => scripts/responsemean.py} | 7 -- python/{lib/mrtrix3 => }/sh.py | 0 python/{lib/mrtrix3 => }/utils.py | 0 .../{_5ttgen => 5ttgen}/FreeSurfer2ACT.txt | 0 .../FreeSurfer2ACT_sgm_amyg_hipp.txt | 0 .../hsvs/AmygSubfields.txt | 0 .../hsvs/HippSubfields.txt | 0 70 files changed, 51 insertions(+), 283 deletions(-) rename python/{lib/mrtrix3 => }/__init__.py (93%) rename python/{lib/mrtrix3 => }/_version.py.in (100%) create mode 100644 python/algorithm.py rename python/{lib/mrtrix3 => }/app.py (99%) delete mode 100644 python/bin/mrtrix3.py rename python/{lib/mrtrix3 => }/fsl.py (100%) rename python/{lib/mrtrix3 => }/image.py (100%) delete mode 100644 python/lib/mrtrix3/_5ttgen/__init__.py delete mode 100644 python/lib/mrtrix3/algorithm.py delete mode 100644 python/lib/mrtrix3/dwi2mask/__init__.py delete mode 100644 python/lib/mrtrix3/dwi2response/__init__.py delete mode 100644 python/lib/mrtrix3/dwibiascorrect/__init__.py delete mode 100644 python/lib/mrtrix3/dwinormalise/__init__.py rename python/{lib/mrtrix3 => }/matrix.py (100%) rename python/{lib/mrtrix3 => }/path.py (90%) rename python/{lib/mrtrix3 => }/phaseencoding.py (100%) rename python/{lib/mrtrix3 => }/run.py (100%) rename python/{bin/5ttgen => scripts/5ttgen/__init__.py} (94%) mode change 100755 => 100644 rename python/{lib/mrtrix3/_5ttgen => scripts/5ttgen}/freesurfer.py (100%) rename python/{lib/mrtrix3/_5ttgen => scripts/5ttgen}/fsl.py (100%) rename python/{lib/mrtrix3/_5ttgen => scripts/5ttgen}/gif.py (100%) rename python/{lib/mrtrix3/_5ttgen => scripts/5ttgen}/hsvs.py (100%) rename python/{bin/blend => scripts/blend.py} (100%) mode change 100755 => 100644 rename python/{bin/convert_bruker => scripts/convert_bruker.py} (100%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => scripts}/dwi2mask/3dautomask.py (100%) rename python/{bin/dwi2mask => scripts/dwi2mask/__init__.py} (96%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => scripts}/dwi2mask/ants.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/b02template.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/consensus.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/fslbet.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/hdbet.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/legacy.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/mean.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/mtnorm.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/synthstrip.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2mask/trace.py (100%) rename python/{bin/dwi2response => scripts/dwi2response/__init__.py} (97%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => scripts}/dwi2response/dhollander.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2response/fa.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2response/manual.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2response/msmt_5tt.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2response/tax.py (100%) rename python/{lib/mrtrix3 => scripts}/dwi2response/tournier.py (100%) rename python/{bin/dwibiascorrect => scripts/dwibiascorrect/__init__.py} (95%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => scripts}/dwibiascorrect/ants.py (100%) rename python/{lib/mrtrix3 => scripts}/dwibiascorrect/fsl.py (100%) rename python/{lib/mrtrix3 => scripts}/dwibiascorrect/mtnorm.py (100%) rename python/{bin/dwibiasnormmask => scripts/dwibiasnormmask.py} (99%) mode change 100755 => 100644 rename python/{bin/dwicat => scripts/dwicat.py} (98%) mode change 100755 => 100644 rename python/{bin/dwifslpreproc => scripts/dwifslpreproc.py} (99%) mode change 100755 => 100644 rename python/{bin/dwigradcheck => scripts/dwigradcheck.py} (98%) mode change 100755 => 100644 rename python/{bin/dwinormalise => scripts/dwinormalise/__init__.py} (91%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => scripts}/dwinormalise/group.py (100%) rename python/{lib/mrtrix3 => scripts}/dwinormalise/manual.py (100%) rename python/{lib/mrtrix3 => scripts}/dwinormalise/mtnorm.py (100%) rename python/{bin/dwishellmath => scripts/dwishellmath.py} (96%) mode change 100755 => 100644 rename python/{bin/for_each => scripts/for_each.py} (99%) mode change 100755 => 100644 rename python/{bin/gen_scheme => scripts/gen_scheme.py} (100%) mode change 100755 => 100644 rename python/{bin/labelsgmfix => scripts/labelsgmfix.py} (98%) mode change 100755 => 100644 rename python/{bin/mask2glass => scripts/mask2glass.py} (97%) mode change 100755 => 100644 rename python/{bin/mrtrix_cleanup => scripts/mrtrix_cleanup.py} (98%) mode change 100755 => 100644 rename python/{bin/notfound => scripts/notfound.py} (100%) mode change 100755 => 100644 rename python/{bin/population_template => scripts/population_template.py} (99%) mode change 100755 => 100644 rename python/{bin/responsemean => scripts/responsemean.py} (97%) mode change 100755 => 100644 rename python/{lib/mrtrix3 => }/sh.py (100%) rename python/{lib/mrtrix3 => }/utils.py (100%) rename share/mrtrix3/{_5ttgen => 5ttgen}/FreeSurfer2ACT.txt (100%) rename share/mrtrix3/{_5ttgen => 5ttgen}/FreeSurfer2ACT_sgm_amyg_hipp.txt (100%) rename share/mrtrix3/{_5ttgen => 5ttgen}/hsvs/AmygSubfields.txt (100%) rename share/mrtrix3/{_5ttgen => 5ttgen}/hsvs/HippSubfields.txt (100%) diff --git a/python/lib/mrtrix3/__init__.py b/python/__init__.py similarity index 93% rename from python/lib/mrtrix3/__init__.py rename to python/__init__.py index a4f29bcd41..12dc5ab0b8 100644 --- a/python/lib/mrtrix3/__init__.py +++ b/python/__init__.py @@ -81,10 +81,3 @@ def setup_ansi(): if sys.stderr.isatty() and not ('TerminalColor' in CONFIG and CONFIG['TerminalColor'].lower() in ['no', 'false', '0']): ANSI = ANSICodes('\033[0K', '\033[0m', '\033[03;32m', '\033[03;34m', '\033[01;31m', '\033[03;36m', '\033[00;31m') #pylint: disable=unused-variable setup_ansi() - - - -# Execute a command -def execute(): #pylint: disable=unused-variable - from . import app #pylint: disable=import-outside-toplevel - app._execute(inspect.getmodule(inspect.stack()[1][0])) # pylint: disable=protected-access diff --git a/python/lib/mrtrix3/_version.py.in b/python/_version.py.in similarity index 100% rename from python/lib/mrtrix3/_version.py.in rename to python/_version.py.in diff --git a/python/algorithm.py b/python/algorithm.py new file mode 100644 index 0000000000..c21623b5fb --- /dev/null +++ b/python/algorithm.py @@ -0,0 +1,45 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# Set of functionalities for when a single script has many 'algorithms' that may be invoked, +# i.e. the script deals with generating a particular output, but there are a number of +# processes to select from, each of which is capable of generating that output. + + +import importlib, inspect, pkgutil, sys + + + +# Note: This function essentially duplicates the current state of app.cmdline in order for command-line +# options common to all algorithms of a particular script to be applicable once any particular sub-parser +# is invoked. Therefore this function must be called _after_ all such options are set up. +def usage(cmdline): #pylint: disable=unused-variable + from mrtrix3 import app #pylint: disable=import-outside-toplevel + module_name = inspect.currentframe().f_back.f_globals["__name__"] + submodules = [submodule_info.name for submodule_info in pkgutil.walk_packages(sys.modules[module_name].__spec__.submodule_search_locations)] + base_parser = app.Parser(description='Base parser for construction of subparsers', parents=[cmdline]) + subparsers = cmdline.add_subparsers(title='Algorithm choices', + help='Select the algorithm to be used to complete the script operation; ' + 'additional details and options become available once an algorithm is nominated. ' + 'Options are: ' + ', '.join(submodules), dest='algorithm') + for submodule in submodules: + module = importlib.import_module(module_name + '.' + submodule) + module.usage(base_parser, subparsers) + return + + + +def get(name): #pylint: disable=unused-variable + return sys.modules[inspect.currentframe().f_back.f_globals["__name__"] + '.' + name] diff --git a/python/lib/mrtrix3/app.py b/python/app.py similarity index 99% rename from python/lib/mrtrix3/app.py rename to python/app.py index c80a413ad5..6d285abb03 100644 --- a/python/lib/mrtrix3/app.py +++ b/python/app.py @@ -102,10 +102,6 @@ -# Generally preferable to use: -# "import mrtrix3" -# "mrtrix3.execute()" -# , rather than executing this function directly def _execute(module): #pylint: disable=unused-variable from mrtrix3 import run #pylint: disable=import-outside-toplevel global ARGS, CMDLINE, CONTINUE_OPTION, DO_CLEANUP, FORCE_OVERWRITE, NUM_THREADS, SCRATCH_DIR, VERBOSITY diff --git a/python/bin/mrtrix3.py b/python/bin/mrtrix3.py deleted file mode 100644 index c697f1a62d..0000000000 --- a/python/bin/mrtrix3.py +++ /dev/null @@ -1,66 +0,0 @@ - -# Copyright (c) 2008-2019 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import imp, os, sys - -def imported(lib_path): - success = False - fp = None - try: - fp, pathname, description = imp.find_module('mrtrix3', [ lib_path ]) - imp.load_module('mrtrix3', fp, pathname, description) - success = True - except ImportError: - pass - finally: - if fp: - fp.close() - return success - -# Can the MRtrix3 Python modules be found based on their relative location to this file? -# Note that this includes the case where this file is a softlink within an external module, -# which provides a direct link to the core installation -if not imported (os.path.normpath (os.path.join ( \ - os.path.dirname (os.path.realpath (__file__)), os.pardir, 'lib') )): - - # If this file is a duplicate, which has been stored in an external module, - # we may be able to figure out the location of the core library using the - # build script. - - # case 1: build is a symbolic link: - if not imported (os.path.join (os.path.dirname (os.path.realpath ( \ - os.path.join (os.path.dirname(__file__), os.pardir, 'build'))), 'lib')): - - # case 2: build is a file containing the path to the core build script: - try: - with open (os.path.join (os.path.dirname(__file__), os.pardir, 'build')) as fp: - for line in fp: - build_path = line.split ('#',1)[0].strip() - if build_path: - break - except IOError: - pass - - if not imported (os.path.join (os.path.dirname (build_path), 'lib')): - - sys.stderr.write(''' -ERROR: Unable to locate MRtrix3 Python modules - -For detailed instructions, please refer to: -https://mrtrix.readthedocs.io/en/latest/tips_and_tricks/external_modules.html -''') - sys.stderr.flush() - sys.exit(1) diff --git a/python/lib/mrtrix3/fsl.py b/python/fsl.py similarity index 100% rename from python/lib/mrtrix3/fsl.py rename to python/fsl.py diff --git a/python/lib/mrtrix3/image.py b/python/image.py similarity index 100% rename from python/lib/mrtrix3/image.py rename to python/image.py diff --git a/python/lib/mrtrix3/_5ttgen/__init__.py b/python/lib/mrtrix3/_5ttgen/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/lib/mrtrix3/algorithm.py b/python/lib/mrtrix3/algorithm.py deleted file mode 100644 index 88386e2acc..0000000000 --- a/python/lib/mrtrix3/algorithm.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Set of functionalities for when a single script has many 'algorithms' that may be invoked, -# i.e. the script deals with generating a particular output, but there are a number of -# processes to select from, each of which is capable of generating that output. - - -import importlib, inspect, os, pkgutil, sys - - - -# Helper function for finding where the files representing different script algorithms will be stored -# These will be in a sub-directory relative to this library file -def _algorithms_path(): - from mrtrix3 import path #pylint: disable=import-outside-toplevel - return os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(inspect.getouterframes(inspect.currentframe())[-1][1])), os.pardir, 'lib', 'mrtrix3', path.script_subdir_name())) - - - -# This function needs to be safe to run in order to populate the help page; that is, no app initialisation has been run -def get_list(): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=import-outside-toplevel - algorithm_list = [ ] - for filename in os.listdir(_algorithms_path()): - filename = filename.split('.') - if len(filename) == 2 and filename[1] == 'py' and filename[0] != '__init__': - algorithm_list.append(filename[0]) - algorithm_list = sorted(algorithm_list) - app.debug('Found algorithms: ' + str(algorithm_list)) - return algorithm_list - - - -# Note: This function essentially duplicates the current state of app.cmdline in order for command-line -# options common to all algorithms of a particular script to be applicable once any particular sub-parser -# is invoked. Therefore this function must be called _after_ all such options are set up. -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app, path #pylint: disable=import-outside-toplevel - sys.path.insert(0, os.path.realpath(os.path.join(_algorithms_path(), os.pardir))) - initlist = [ ] - # Don't let Python 3 try to read incompatible .pyc files generated by Python 2 for no-longer-existent .py files - pylist = get_list() - base_parser = app.Parser(description='Base parser for construction of subparsers', parents=[cmdline]) - subparsers = cmdline.add_subparsers(title='Algorithm choices', help='Select the algorithm to be used to complete the script operation; additional details and options become available once an algorithm is nominated. Options are: ' + ', '.join(get_list()), dest='algorithm') - for dummy_importer, package_name, dummy_ispkg in pkgutil.iter_modules( [ _algorithms_path() ] ): - if package_name in pylist: - module = importlib.import_module(path.script_subdir_name() + '.' + package_name) - module.usage(base_parser, subparsers) - initlist.extend(package_name) - app.debug('Initialised algorithms: ' + str(initlist)) - - - -def get_module(name): #pylint: disable=unused-variable - from mrtrix3 import path #pylint: disable=import-outside-toplevel - return sys.modules[path.script_subdir_name() + '.' + name] diff --git a/python/lib/mrtrix3/dwi2mask/__init__.py b/python/lib/mrtrix3/dwi2mask/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/lib/mrtrix3/dwi2response/__init__.py b/python/lib/mrtrix3/dwi2response/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/lib/mrtrix3/dwibiascorrect/__init__.py b/python/lib/mrtrix3/dwibiascorrect/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/lib/mrtrix3/dwinormalise/__init__.py b/python/lib/mrtrix3/dwinormalise/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/lib/mrtrix3/matrix.py b/python/matrix.py similarity index 100% rename from python/lib/mrtrix3/matrix.py rename to python/matrix.py diff --git a/python/lib/mrtrix3/path.py b/python/path.py similarity index 90% rename from python/lib/mrtrix3/path.py rename to python/path.py index 485730b4b7..96b73e36e0 100644 --- a/python/lib/mrtrix3/path.py +++ b/python/path.py @@ -116,31 +116,9 @@ def name_temporary(suffix): #pylint: disable=unused-variable -# Determine the name of a sub-directory containing additional data / source files for a script -# This can be algorithm files in lib/mrtrix3/, or data files in share/mrtrix3/ -# This function appears here rather than in the algorithm module as some scripts may -# need to access the shared data directory but not actually be using the algorithm module -def script_subdir_name(): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=import-outside-toplevel - frameinfo = inspect.stack()[-1] - try: - frame = frameinfo.frame - except AttributeError: # Prior to Version 3.5 - frame = frameinfo[0] - # If the script has been run through a softlink, we need the name of the original - # script in order to locate the additional data - name = os.path.basename(os.path.realpath(inspect.getfile(frame))) - if not name[0].isalpha(): - name = '_' + name - app.debug(name) - return name - - - # Find data in the relevant directory # Some scripts come with additional requisite data files; this function makes it easy to find them. -# For data that is stored in a named sub-directory specifically for a particular script, this function will -# need to be used in conjunction with scriptSubDirName() +# TODO Perhaps this should be looking relative to the executable rather than the library API file? def shared_data_path(): #pylint: disable=unused-variable from mrtrix3 import app #pylint: disable=import-outside-toplevel result = os.path.realpath(os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, 'share', 'mrtrix3'))) diff --git a/python/lib/mrtrix3/phaseencoding.py b/python/phaseencoding.py similarity index 100% rename from python/lib/mrtrix3/phaseencoding.py rename to python/phaseencoding.py diff --git a/python/lib/mrtrix3/run.py b/python/run.py similarity index 100% rename from python/lib/mrtrix3/run.py rename to python/run.py diff --git a/python/bin/5ttgen b/python/scripts/5ttgen/__init__.py old mode 100755 new mode 100644 similarity index 94% rename from python/bin/5ttgen rename to python/scripts/5ttgen/__init__.py index 8fc341f1f3..f15a63ae0e --- a/python/bin/5ttgen +++ b/python/scripts/5ttgen/__init__.py @@ -39,7 +39,7 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module, import-outside-toplevel # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) + alg = algorithm.get(app.ARGS.algorithm) alg.check_output_paths() @@ -54,9 +54,3 @@ def execute(): #pylint: disable=unused-variable app.warn('Generated image does not perfectly conform to 5TT format:') for line in stderr.splitlines(): app.warn(line) - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/_5ttgen/freesurfer.py b/python/scripts/5ttgen/freesurfer.py similarity index 100% rename from python/lib/mrtrix3/_5ttgen/freesurfer.py rename to python/scripts/5ttgen/freesurfer.py diff --git a/python/lib/mrtrix3/_5ttgen/fsl.py b/python/scripts/5ttgen/fsl.py similarity index 100% rename from python/lib/mrtrix3/_5ttgen/fsl.py rename to python/scripts/5ttgen/fsl.py diff --git a/python/lib/mrtrix3/_5ttgen/gif.py b/python/scripts/5ttgen/gif.py similarity index 100% rename from python/lib/mrtrix3/_5ttgen/gif.py rename to python/scripts/5ttgen/gif.py diff --git a/python/lib/mrtrix3/_5ttgen/hsvs.py b/python/scripts/5ttgen/hsvs.py similarity index 100% rename from python/lib/mrtrix3/_5ttgen/hsvs.py rename to python/scripts/5ttgen/hsvs.py diff --git a/python/bin/blend b/python/scripts/blend.py old mode 100755 new mode 100644 similarity index 100% rename from python/bin/blend rename to python/scripts/blend.py diff --git a/python/bin/convert_bruker b/python/scripts/convert_bruker.py old mode 100755 new mode 100644 similarity index 100% rename from python/bin/convert_bruker rename to python/scripts/convert_bruker.py diff --git a/python/lib/mrtrix3/dwi2mask/3dautomask.py b/python/scripts/dwi2mask/3dautomask.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/3dautomask.py rename to python/scripts/dwi2mask/3dautomask.py diff --git a/python/bin/dwi2mask b/python/scripts/dwi2mask/__init__.py old mode 100755 new mode 100644 similarity index 96% rename from python/bin/dwi2mask rename to python/scripts/dwi2mask/__init__.py index 80a8996a06..4806ebcc74 --- a/python/bin/dwi2mask +++ b/python/scripts/dwi2mask/__init__.py @@ -41,7 +41,7 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) + alg = algorithm.get(app.ARGS.algorithm) app.check_output_path(app.ARGS.output) @@ -98,9 +98,3 @@ def execute(): #pylint: disable=unused-variable + ' -datatype bit', mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/dwi2mask/ants.py b/python/scripts/dwi2mask/ants.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/ants.py rename to python/scripts/dwi2mask/ants.py diff --git a/python/lib/mrtrix3/dwi2mask/b02template.py b/python/scripts/dwi2mask/b02template.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/b02template.py rename to python/scripts/dwi2mask/b02template.py diff --git a/python/lib/mrtrix3/dwi2mask/consensus.py b/python/scripts/dwi2mask/consensus.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/consensus.py rename to python/scripts/dwi2mask/consensus.py diff --git a/python/lib/mrtrix3/dwi2mask/fslbet.py b/python/scripts/dwi2mask/fslbet.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/fslbet.py rename to python/scripts/dwi2mask/fslbet.py diff --git a/python/lib/mrtrix3/dwi2mask/hdbet.py b/python/scripts/dwi2mask/hdbet.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/hdbet.py rename to python/scripts/dwi2mask/hdbet.py diff --git a/python/lib/mrtrix3/dwi2mask/legacy.py b/python/scripts/dwi2mask/legacy.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/legacy.py rename to python/scripts/dwi2mask/legacy.py diff --git a/python/lib/mrtrix3/dwi2mask/mean.py b/python/scripts/dwi2mask/mean.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/mean.py rename to python/scripts/dwi2mask/mean.py diff --git a/python/lib/mrtrix3/dwi2mask/mtnorm.py b/python/scripts/dwi2mask/mtnorm.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/mtnorm.py rename to python/scripts/dwi2mask/mtnorm.py diff --git a/python/lib/mrtrix3/dwi2mask/synthstrip.py b/python/scripts/dwi2mask/synthstrip.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/synthstrip.py rename to python/scripts/dwi2mask/synthstrip.py diff --git a/python/lib/mrtrix3/dwi2mask/trace.py b/python/scripts/dwi2mask/trace.py similarity index 100% rename from python/lib/mrtrix3/dwi2mask/trace.py rename to python/scripts/dwi2mask/trace.py diff --git a/python/bin/dwi2response b/python/scripts/dwi2response/__init__.py old mode 100755 new mode 100644 similarity index 97% rename from python/bin/dwi2response rename to python/scripts/dwi2response/__init__.py index 8461a66378..3ae0310db4 --- a/python/bin/dwi2response +++ b/python/scripts/dwi2response/__init__.py @@ -52,7 +52,7 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) + alg = algorithm.get(app.ARGS.algorithm) # Check for prior existence of output files, and grab any input files, used by the particular algorithm if app.ARGS.voxels: @@ -120,12 +120,3 @@ def execute(): #pylint: disable=unused-variable # From here, the script splits depending on what estimation algorithm is being used alg.execute() - - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/dwi2response/dhollander.py b/python/scripts/dwi2response/dhollander.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/dhollander.py rename to python/scripts/dwi2response/dhollander.py diff --git a/python/lib/mrtrix3/dwi2response/fa.py b/python/scripts/dwi2response/fa.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/fa.py rename to python/scripts/dwi2response/fa.py diff --git a/python/lib/mrtrix3/dwi2response/manual.py b/python/scripts/dwi2response/manual.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/manual.py rename to python/scripts/dwi2response/manual.py diff --git a/python/lib/mrtrix3/dwi2response/msmt_5tt.py b/python/scripts/dwi2response/msmt_5tt.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/msmt_5tt.py rename to python/scripts/dwi2response/msmt_5tt.py diff --git a/python/lib/mrtrix3/dwi2response/tax.py b/python/scripts/dwi2response/tax.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/tax.py rename to python/scripts/dwi2response/tax.py diff --git a/python/lib/mrtrix3/dwi2response/tournier.py b/python/scripts/dwi2response/tournier.py similarity index 100% rename from python/lib/mrtrix3/dwi2response/tournier.py rename to python/scripts/dwi2response/tournier.py diff --git a/python/bin/dwibiascorrect b/python/scripts/dwibiascorrect/__init__.py old mode 100755 new mode 100644 similarity index 95% rename from python/bin/dwibiascorrect rename to python/scripts/dwibiascorrect/__init__.py index 64387392b9..92e0700938 --- a/python/bin/dwibiascorrect +++ b/python/scripts/dwibiascorrect/__init__.py @@ -40,7 +40,7 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) + alg = algorithm.get(app.ARGS.algorithm) app.check_output_path(app.ARGS.output) app.check_output_path(app.ARGS.bias) @@ -75,11 +75,3 @@ def execute(): #pylint: disable=unused-variable # From here, the script splits depending on what estimation algorithm is being used alg.execute() - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/dwibiascorrect/ants.py b/python/scripts/dwibiascorrect/ants.py similarity index 100% rename from python/lib/mrtrix3/dwibiascorrect/ants.py rename to python/scripts/dwibiascorrect/ants.py diff --git a/python/lib/mrtrix3/dwibiascorrect/fsl.py b/python/scripts/dwibiascorrect/fsl.py similarity index 100% rename from python/lib/mrtrix3/dwibiascorrect/fsl.py rename to python/scripts/dwibiascorrect/fsl.py diff --git a/python/lib/mrtrix3/dwibiascorrect/mtnorm.py b/python/scripts/dwibiascorrect/mtnorm.py similarity index 100% rename from python/lib/mrtrix3/dwibiascorrect/mtnorm.py rename to python/scripts/dwibiascorrect/mtnorm.py diff --git a/python/bin/dwibiasnormmask b/python/scripts/dwibiasnormmask.py old mode 100755 new mode 100644 similarity index 99% rename from python/bin/dwibiasnormmask rename to python/scripts/dwibiasnormmask.py index d3a18e6dcd..2263081f43 --- a/python/bin/dwibiasnormmask +++ b/python/scripts/dwibiasnormmask.py @@ -454,9 +454,3 @@ def msg(): run.command(['mrconvert', tissue_sum_image, path.from_user(app.ARGS.output_tissuesum, False)], mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/dwicat b/python/scripts/dwicat.py old mode 100755 new mode 100644 similarity index 98% rename from python/bin/dwicat rename to python/scripts/dwicat.py index 22a135e71c..0c648fd3f3 --- a/python/bin/dwicat +++ b/python/scripts/dwicat.py @@ -150,10 +150,3 @@ def check_header(header): json.dump(keyval, output_json_file) run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval='result_final.json', force=app.FORCE_OVERWRITE) - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/dwifslpreproc b/python/scripts/dwifslpreproc.py old mode 100755 new mode 100644 similarity index 99% rename from python/bin/dwifslpreproc rename to python/scripts/dwifslpreproc.py index cd6f7f6d7f..9a9d55789b --- a/python/bin/dwifslpreproc +++ b/python/scripts/dwifslpreproc.py @@ -1405,12 +1405,3 @@ def scheme_times_match(one, two): # Finish! run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output) + grad_export_option, mrconvert_keyval='output.json', force=app.FORCE_OVERWRITE) - - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/dwigradcheck b/python/scripts/dwigradcheck.py old mode 100755 new mode 100644 similarity index 98% rename from python/bin/dwigradcheck rename to python/scripts/dwigradcheck.py index 91161664ae..1c9b9f6656 --- a/python/bin/dwigradcheck +++ b/python/scripts/dwigradcheck.py @@ -204,8 +204,3 @@ def execute(): #pylint: disable=unused-variable elif best[3] == 'image': grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' run.command('mrinfo data.mif' + grad_import_option + grad_export_option, force=app.FORCE_OVERWRITE) - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/dwinormalise b/python/scripts/dwinormalise/__init__.py old mode 100755 new mode 100644 similarity index 91% rename from python/bin/dwinormalise rename to python/scripts/dwinormalise/__init__.py index e838a18508..7c7f91deed --- a/python/bin/dwinormalise +++ b/python/scripts/dwinormalise/__init__.py @@ -33,16 +33,8 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel # Find out which algorithm the user has requested - alg = algorithm.get_module(app.ARGS.algorithm) + alg = algorithm.get(app.ARGS.algorithm) alg.check_output_paths() # From here, the script splits depending on what algorithm is being used alg.execute() - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/dwinormalise/group.py b/python/scripts/dwinormalise/group.py similarity index 100% rename from python/lib/mrtrix3/dwinormalise/group.py rename to python/scripts/dwinormalise/group.py diff --git a/python/lib/mrtrix3/dwinormalise/manual.py b/python/scripts/dwinormalise/manual.py similarity index 100% rename from python/lib/mrtrix3/dwinormalise/manual.py rename to python/scripts/dwinormalise/manual.py diff --git a/python/lib/mrtrix3/dwinormalise/mtnorm.py b/python/scripts/dwinormalise/mtnorm.py similarity index 100% rename from python/lib/mrtrix3/dwinormalise/mtnorm.py rename to python/scripts/dwinormalise/mtnorm.py diff --git a/python/bin/dwishellmath b/python/scripts/dwishellmath.py old mode 100755 new mode 100644 similarity index 96% rename from python/bin/dwishellmath rename to python/scripts/dwishellmath.py index 30ce230c01..92ff77317d --- a/python/bin/dwishellmath +++ b/python/scripts/dwishellmath.py @@ -63,8 +63,3 @@ def execute(): #pylint: disable=unused-variable # make a 4D image with one volume app.warn('Only one unique b-value present in DWI data; command mrmath with -axis 3 option may be preferable') run.command('mrconvert ' + files[0] + ' ' + path.from_user(app.ARGS.output) + ' -axes 0,1,2,-1', mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/for_each b/python/scripts/for_each.py old mode 100755 new mode 100644 similarity index 99% rename from python/bin/for_each rename to python/scripts/for_each.py index d0b4c8d893..0f07b06704 --- a/python/bin/for_each +++ b/python/scripts/for_each.py @@ -297,12 +297,3 @@ def execute_parallel(): app.console('No output from command for any inputs') app.console('Script reported successful completion for all inputs') - - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/gen_scheme b/python/scripts/gen_scheme.py old mode 100755 new mode 100644 similarity index 100% rename from python/bin/gen_scheme rename to python/scripts/gen_scheme.py diff --git a/python/bin/labelsgmfix b/python/scripts/labelsgmfix.py old mode 100755 new mode 100644 similarity index 98% rename from python/bin/labelsgmfix rename to python/scripts/labelsgmfix.py index 0b55bd24ef..17b8470501 --- a/python/bin/labelsgmfix +++ b/python/scripts/labelsgmfix.py @@ -165,11 +165,3 @@ def execute(): #pylint: disable=unused-variable # Enforce unsigned integer datatype of output image run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.parc, False), force=app.FORCE_OVERWRITE) - - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/mask2glass b/python/scripts/mask2glass.py old mode 100755 new mode 100644 similarity index 97% rename from python/bin/mask2glass rename to python/scripts/mask2glass.py index bcc9b9ce51..3b89038138 --- a/python/bin/mask2glass +++ b/python/scripts/mask2glass.py @@ -77,7 +77,3 @@ def execute(): #pylint: disable=unused-variable run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/mrtrix_cleanup b/python/scripts/mrtrix_cleanup.py old mode 100755 new mode 100644 similarity index 98% rename from python/bin/mrtrix_cleanup rename to python/scripts/mrtrix_cleanup.py index 4b938a1cc9..cefc59c8c3 --- a/python/bin/mrtrix_cleanup +++ b/python/scripts/mrtrix_cleanup.py @@ -132,9 +132,3 @@ def print_freed(): app.console('All items deleted successfully' + print_freed()) else: app.console('No files or directories found') - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/notfound b/python/scripts/notfound.py old mode 100755 new mode 100644 similarity index 100% rename from python/bin/notfound rename to python/scripts/notfound.py diff --git a/python/bin/population_template b/python/scripts/population_template.py old mode 100755 new mode 100644 similarity index 99% rename from python/bin/population_template rename to python/scripts/population_template.py index 0095ef4c44..a4e8b4c612 --- a/python/bin/population_template +++ b/python/scripts/population_template.py @@ -1483,9 +1483,3 @@ def nonlinear_msg(): if app.ARGS.template_mask: run.command('mrconvert ' + current_template_mask + ' ' + path.from_user(app.ARGS.template_mask, True), mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/bin/responsemean b/python/scripts/responsemean.py old mode 100755 new mode 100644 similarity index 97% rename from python/bin/responsemean rename to python/scripts/responsemean.py index 115e7557d9..9e8e847dcf --- a/python/bin/responsemean +++ b/python/scripts/responsemean.py @@ -76,10 +76,3 @@ def execute(): #pylint: disable=unused-variable mean_coeffs = [ [ f/len(data) for f in line ] for line in weighted_sum_coeffs ] matrix.save_matrix(app.ARGS.output, mean_coeffs, force=app.FORCE_OVERWRITE) - - - - -# Execute the script -import mrtrix3 #pylint: disable=wrong-import-position -mrtrix3.execute() #pylint: disable=no-member diff --git a/python/lib/mrtrix3/sh.py b/python/sh.py similarity index 100% rename from python/lib/mrtrix3/sh.py rename to python/sh.py diff --git a/python/lib/mrtrix3/utils.py b/python/utils.py similarity index 100% rename from python/lib/mrtrix3/utils.py rename to python/utils.py diff --git a/share/mrtrix3/_5ttgen/FreeSurfer2ACT.txt b/share/mrtrix3/5ttgen/FreeSurfer2ACT.txt similarity index 100% rename from share/mrtrix3/_5ttgen/FreeSurfer2ACT.txt rename to share/mrtrix3/5ttgen/FreeSurfer2ACT.txt diff --git a/share/mrtrix3/_5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt b/share/mrtrix3/5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt similarity index 100% rename from share/mrtrix3/_5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt rename to share/mrtrix3/5ttgen/FreeSurfer2ACT_sgm_amyg_hipp.txt diff --git a/share/mrtrix3/_5ttgen/hsvs/AmygSubfields.txt b/share/mrtrix3/5ttgen/hsvs/AmygSubfields.txt similarity index 100% rename from share/mrtrix3/_5ttgen/hsvs/AmygSubfields.txt rename to share/mrtrix3/5ttgen/hsvs/AmygSubfields.txt diff --git a/share/mrtrix3/_5ttgen/hsvs/HippSubfields.txt b/share/mrtrix3/5ttgen/hsvs/HippSubfields.txt similarity index 100% rename from share/mrtrix3/_5ttgen/hsvs/HippSubfields.txt rename to share/mrtrix3/5ttgen/hsvs/HippSubfields.txt From 48b648c874317238e14531799c42475a2c7e31b3 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 28 Oct 2023 21:30:41 +1100 Subject: [PATCH 02/11] cmake: Updates for Python restructure proposal --- CMakeLists.txt | 2 + python/CMakeLists.txt | 108 ++++++++++++++++++++++++++++++------------ 2 files changed, 80 insertions(+), 30 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 841f322a13..743a2cd615 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,6 +59,8 @@ include(FindFFTW) include(CompilerCache) include(ECMEnableSanitizers) +find_package(Python3 COMPONENTS Interpreter) + use_compiler_cache() if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git AND NOT EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git/hooks/pre-commit) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 46187b89c6..758d63f614 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,68 +1,116 @@ -set(PYTHON_VERSION_FILE ${CMAKE_CURRENT_SOURCE_DIR}/lib/mrtrix3/_version.py) +set(PYTHON_VERSION_FILE ${CMAKE_CURRENT_SOURCE_DIR}/_version.py) + +set(CMAKE_INSTALL_SRCDIR ${CMAKE_INSTALL_LIBDIR}/../src/) find_package(Git QUIET) -file(GLOB_RECURSE PYTHON_BIN_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/bin/* +file(GLOB PYTHON_LIB_FILES + ${CMAKE_CURRENT_SOURCE_DIR}/*.py ) -file(GLOB_RECURSE PYTHON_LIB_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/lib/* +file(GLOB_RECURSE PYTHON_SRC_FILES + ${CMAKE_CURRENT_SOURCE_DIR}/scripts/*.py ) -add_custom_target(Python SOURCES - ${PYTHON_BIN_FILES} +file(GLOB PYTHON_COMMANDS + ${CMAKE_CURRENT_SOURCE_DIR}/scripts/* ) # We generate the version file at configure time, # so tools like Pylint can run without building the project execute_process( - COMMAND ${CMAKE_COMMAND} + COMMAND ${CMAKE_COMMAND} -D GIT_EXECUTABLE=${GIT_EXECUTABLE} -D MRTRIX_BASE_VERSION=${MRTRIX_BASE_VERSION} -D DST=${PYTHON_VERSION_FILE} - -D SRC=${CMAKE_CURRENT_SOURCE_DIR}/lib/mrtrix3/_version.py.in + -D SRC=${CMAKE_CURRENT_SOURCE_DIR}/_version.py.in -P ${PROJECT_SOURCE_DIR}/cmake/FindVersion.cmake WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) add_custom_target(CopyPythonFiles ALL) -set(PYTHON_BUILD_BIN_FILES "") - -foreach(BIN_FILE ${PYTHON_BIN_FILES}) - get_filename_component(BIN_FILE_NAME ${BIN_FILE} NAME) - set(DST_BIN_FILE ${PROJECT_BINARY_DIR}/bin/${BIN_FILE_NAME}) +set(PYTHON_BUILD_LIB_FILES "") +set(PYTHON_BUILD_SRC_FILES "") +foreach(LIB_FILE ${PYTHON_LIB_FILES}) + get_filename_component(LIB_FILE_NAME ${LIB_FILE} NAME) + set(DST_LIB_FILE ${PROJECT_BINARY_DIR}/lib/mrtrix3/${LIB_FILE_NAME}) add_custom_command( TARGET CopyPythonFiles - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${BIN_FILE} ${DST_BIN_FILE} - DEPENDS ${BIN_FILE} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LIB_FILE} ${DST_LIB_FILE} + DEPENDS ${LIB_FILE} ) - list(APPEND PYTHON_BUILD_BIN_FILES ${DST_BIN_FILE}) + list(APPEND PYTHON_BUILD_LIB_FILES ${DST_LIB_FILE}) +endforeach() +foreach(SRC_FILE ${PYTHON_SRC_FILES}) + file(RELATIVE_PATH SRC_RELPATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts ${SRC_FILE}) + # Modify path to 5ttgen, since it's not a valid Python module name + if(SRC_RELPATH MATCHES "^[0-9].*$") + set(SRC_RELPATH _${SRC_RELPATH}) + endif() + set(DST_SRC_FILE ${PROJECT_BINARY_DIR}/src/mrtrix3/${SRC_RELPATH}) + add_custom_command( + TARGET CopyPythonFiles + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SRC_FILE} ${DST_SRC_FILE} + DEPENDS ${SRC_FILE} + ) + list(APPEND PYTHON_BUILD_SRC_FILES ${DST_SRC_FILE}) +endforeach() +foreach(CMDNAME ${PYTHON_COMMANDS}) + # Strip .py extension + # TODO What will happen to module directory names? + get_filename_component(BINNAME ${CMDNAME} NAME_WE) + set(MODULENAME ${BINNAME}) + if(MODULENAME MATCHES "^[0-9].*$") + set(MODULENAME _${MODULENAME}) + endif() + if(CMDNAME MATCHES "^.*\.py$") + set(SPECPATH "'${MODULENAME}.py'") + else() + set(SPECPATH "'${MODULENAME}', '__init__.py'") + endif() + set(BINPATH ${PROJECT_BINARY_DIR}/python/${BINNAME}) + file(WRITE ${BINPATH} "#!${Python3_EXECUTABLE}\n") + file(APPEND ${BINPATH} "# -*- coding: utf-8 -*-\n") + file(APPEND ${BINPATH} "import importlib.util\n") + file(APPEND ${BINPATH} "import os\n") + file(APPEND ${BINPATH} "import sys\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "api_location = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', '__init__.py'))\n") + file(APPEND ${BINPATH} "api_spec = importlib.util.spec_from_file_location('mrtrix3', api_location)\n") + file(APPEND ${BINPATH} "api_module = importlib.util.module_from_spec(api_spec)\n") + file(APPEND ${BINPATH} "sys.modules['mrtrix3'] = api_module\n") + file(APPEND ${BINPATH} "api_spec.loader.exec_module(api_module)\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "src_spec = importlib.util.spec_from_file_location('${MODULENAME}',\n") + file(APPEND ${BINPATH} " os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'src', 'mrtrix3', ${SPECPATH})))\n") + file(APPEND ${BINPATH} "src_module = importlib.util.module_from_spec(src_spec)\n") + file(APPEND ${BINPATH} "sys.modules[src_spec.name] = src_module\n") + file(APPEND ${BINPATH} "src_spec.loader.exec_module(src_module)\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "from mrtrix3.app import _execute\n") + file(APPEND ${BINPATH} "import ${MODULENAME}\n") + file(APPEND ${BINPATH} "_execute(${MODULENAME})\n") + file(COPY ${BINPATH} DESTINATION ${PROJECT_BINARY_DIR}/bin + FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ) endforeach() -add_custom_command( - TARGET CopyPythonFiles - COMMAND ${CMAKE_COMMAND} -E copy_directory - "${CMAKE_CURRENT_SOURCE_DIR}/lib" "${PROJECT_BINARY_DIR}/lib" -) - -set_target_properties(CopyPythonFiles +set_target_properties(CopyPythonFiles PROPERTIES ADDITIONAL_CLEAN_FILES - "${PYTHON_BUILD_BIN_FILES};${PROJECT_BINARY_DIR}/lib" + "${PYTHON_BUILD_LIB_FILES};${PROJECT_BINARY_DIR}/lib;${PYTHON_BUILD_SRC_FILES};${PROJECT_BINARY_DIR}/src;${PROJECT_BINARY_DIR}/python" ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/bin/ - DESTINATION ${CMAKE_INSTALL_BINDIR} +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/lib/ + DESTINATION ${CMAKE_INSTALL_LIBDIR} USE_SOURCE_PERMISSIONS PATTERN "__pycache__" EXCLUDE + PATTERN "*.py.in" EXCLUDE PATTERN ".pyc" EXCLUDE ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/lib/ - DESTINATION ${CMAKE_INSTALL_LIBDIR} +install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/ + DESTINATION ${CMAKE_INSTALL_SRCDIR} USE_SOURCE_PERMISSIONS PATTERN "__pycache__" EXCLUDE - PATTERN "*.py.in" EXCLUDE PATTERN ".pyc" EXCLUDE ) From 9e75d884f89d45443beca7e3b722a34217d04ff0 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sun, 29 Oct 2023 20:40:34 +1100 Subject: [PATCH 03/11] cmake: Update installation for Python changes --- python/CMakeLists.txt | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 758d63f614..0ce4a48a9b 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -8,11 +8,11 @@ file(GLOB PYTHON_LIB_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.py ) -file(GLOB_RECURSE PYTHON_SRC_FILES +file(GLOB_RECURSE PYTHON_ALL_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/scripts/*.py ) -file(GLOB PYTHON_COMMANDS +file(GLOB PYTHON_ALL_COMMANDS ${CMAKE_CURRENT_SOURCE_DIR}/scripts/* ) @@ -28,6 +28,7 @@ execute_process( WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) +# TODO Attempt making symlinks during build; see if possible to then copy during install add_custom_target(CopyPythonFiles ALL) set(PYTHON_BUILD_LIB_FILES "") set(PYTHON_BUILD_SRC_FILES "") @@ -41,7 +42,7 @@ foreach(LIB_FILE ${PYTHON_LIB_FILES}) ) list(APPEND PYTHON_BUILD_LIB_FILES ${DST_LIB_FILE}) endforeach() -foreach(SRC_FILE ${PYTHON_SRC_FILES}) +foreach(SRC_FILE ${PYTHON_ALL_SRC_FILES}) file(RELATIVE_PATH SRC_RELPATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts ${SRC_FILE}) # Modify path to 5ttgen, since it's not a valid Python module name if(SRC_RELPATH MATCHES "^[0-9].*$") @@ -53,11 +54,13 @@ foreach(SRC_FILE ${PYTHON_SRC_FILES}) COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SRC_FILE} ${DST_SRC_FILE} DEPENDS ${SRC_FILE} ) + get_filename_component(SRC_RELPATH_DIRECTORY ${SRC_RELPATH} DIRECTORY) + install(FILES ${SRC_FILE} DESTINATION ${CMAKE_INSTALL_SRCDIR}/mrtrix3/${SRC_RELPATH_DIRECTORY}) list(APPEND PYTHON_BUILD_SRC_FILES ${DST_SRC_FILE}) endforeach() -foreach(CMDNAME ${PYTHON_COMMANDS}) +set(PYTHON_BIN_FILES "") +foreach(CMDNAME ${PYTHON_ALL_COMMANDS}) # Strip .py extension - # TODO What will happen to module directory names? get_filename_component(BINNAME ${CMDNAME} NAME_WE) set(MODULENAME ${BINNAME}) if(MODULENAME MATCHES "^[0-9].*$") @@ -92,6 +95,7 @@ foreach(CMDNAME ${PYTHON_COMMANDS}) file(APPEND ${BINPATH} "_execute(${MODULENAME})\n") file(COPY ${BINPATH} DESTINATION ${PROJECT_BINARY_DIR}/bin FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ) + list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${BINNAME}) endforeach() set_target_properties(CopyPythonFiles @@ -99,19 +103,13 @@ set_target_properties(CopyPythonFiles "${PYTHON_BUILD_LIB_FILES};${PROJECT_BINARY_DIR}/lib;${PYTHON_BUILD_SRC_FILES};${PROJECT_BINARY_DIR}/src;${PROJECT_BINARY_DIR}/python" ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/lib/ - DESTINATION ${CMAKE_INSTALL_LIBDIR} - USE_SOURCE_PERMISSIONS - PATTERN "__pycache__" EXCLUDE - PATTERN "*.py.in" EXCLUDE - PATTERN ".pyc" EXCLUDE +install(FILES ${PYTHON_BIN_FILES} + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ + DESTINATION ${CMAKE_INSTALL_BINDIR} ) -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/ - DESTINATION ${CMAKE_INSTALL_SRCDIR} - USE_SOURCE_PERMISSIONS - PATTERN "__pycache__" EXCLUDE - PATTERN ".pyc" EXCLUDE +install(FILES ${PYTHON_LIB_FILES} + DESTINATION ${CMAKE_INSTALL_LIBDIR}/mrtrix3 ) install(FILES ${PYTHON_VERSION_FILE} From d39459076ee92c4c5ee15b086f0bbad71b0adfd3 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sun, 29 Oct 2023 22:24:15 +1100 Subject: [PATCH 04/11] cmake: Use softlinks to Python files in build directory --- python/CMakeLists.txt | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 0ce4a48a9b..fc4571145d 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -28,30 +28,49 @@ execute_process( WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) -# TODO Attempt making symlinks during build; see if possible to then copy during install -add_custom_target(CopyPythonFiles ALL) +add_custom_target(LinkPythonFiles ALL) +add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/lib/mrtrix3 +) +add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/src/mrtrix3 +) +foreach(SUBDIR ${PYTHON_ALL_COMMANDS}) + if(IS_DIRECTORY ${SUBDIR}) + get_filename_component(CMDNAME ${SUBDIR} NAME) + set(MODULENAME ${CMDNAME}) + if(MODULENAME MATCHES "^[0-9].*$") + set(MODULENAME _${MODULENAME}) + endif() + add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/src/mrtrix3/${MODULENAME} + ) + endif() +endforeach() set(PYTHON_BUILD_LIB_FILES "") set(PYTHON_BUILD_SRC_FILES "") foreach(LIB_FILE ${PYTHON_LIB_FILES}) get_filename_component(LIB_FILE_NAME ${LIB_FILE} NAME) set(DST_LIB_FILE ${PROJECT_BINARY_DIR}/lib/mrtrix3/${LIB_FILE_NAME}) add_custom_command( - TARGET CopyPythonFiles - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LIB_FILE} ${DST_LIB_FILE} + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIB_FILE} ${DST_LIB_FILE} DEPENDS ${LIB_FILE} ) list(APPEND PYTHON_BUILD_LIB_FILES ${DST_LIB_FILE}) endforeach() foreach(SRC_FILE ${PYTHON_ALL_SRC_FILES}) file(RELATIVE_PATH SRC_RELPATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts ${SRC_FILE}) - # Modify path to 5ttgen, since it's not a valid Python module name if(SRC_RELPATH MATCHES "^[0-9].*$") set(SRC_RELPATH _${SRC_RELPATH}) endif() set(DST_SRC_FILE ${PROJECT_BINARY_DIR}/src/mrtrix3/${SRC_RELPATH}) add_custom_command( - TARGET CopyPythonFiles - COMMAND ${CMAKE_COMMAND} -E copy_if_different ${SRC_FILE} ${DST_SRC_FILE} + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E create_symlink ${SRC_FILE} ${DST_SRC_FILE} DEPENDS ${SRC_FILE} ) get_filename_component(SRC_RELPATH_DIRECTORY ${SRC_RELPATH} DIRECTORY) @@ -98,7 +117,7 @@ foreach(CMDNAME ${PYTHON_ALL_COMMANDS}) list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${BINNAME}) endforeach() -set_target_properties(CopyPythonFiles +set_target_properties(LinkPythonFiles PROPERTIES ADDITIONAL_CLEAN_FILES "${PYTHON_BUILD_LIB_FILES};${PROJECT_BINARY_DIR}/lib;${PYTHON_BUILD_SRC_FILES};${PROJECT_BINARY_DIR}/src;${PROJECT_BINARY_DIR}/python" ) From 72ee94f37066dc10b14c4cc099f9ff69fbab16cd Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 31 Oct 2023 22:15:01 +1100 Subject: [PATCH 05/11] Python: Remove erroneous shebangs --- python/scripts/5ttgen/__init__.py | 2 -- python/scripts/blend.py | 2 -- python/scripts/convert_bruker.py | 2 -- python/scripts/dwi2mask/__init__.py | 2 -- python/scripts/dwi2response/__init__.py | 2 -- python/scripts/dwibiascorrect/__init__.py | 2 -- python/scripts/dwibiasnormmask.py | 2 -- python/scripts/dwicat.py | 2 -- python/scripts/dwifslpreproc.py | 2 -- python/scripts/dwigradcheck.py | 2 -- python/scripts/dwinormalise/__init__.py | 2 -- python/scripts/dwishellmath.py | 2 -- python/scripts/for_each.py | 2 -- python/scripts/gen_scheme.py | 2 -- python/scripts/labelsgmfix.py | 2 -- python/scripts/mask2glass.py | 2 -- python/scripts/mrtrix_cleanup.py | 2 -- python/scripts/notfound.py | 2 -- python/scripts/population_template.py | 2 -- python/scripts/responsemean.py | 2 -- 20 files changed, 40 deletions(-) diff --git a/python/scripts/5ttgen/__init__.py b/python/scripts/5ttgen/__init__.py index f15a63ae0e..10938536b7 100644 --- a/python/scripts/5ttgen/__init__.py +++ b/python/scripts/5ttgen/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/blend.py b/python/scripts/blend.py index ebe03f07cd..e722b2d24c 100644 --- a/python/scripts/blend.py +++ b/python/scripts/blend.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/convert_bruker.py b/python/scripts/convert_bruker.py index 59fae16c51..624e94edae 100644 --- a/python/scripts/convert_bruker.py +++ b/python/scripts/convert_bruker.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwi2mask/__init__.py b/python/scripts/dwi2mask/__init__.py index 4806ebcc74..9692f45dc7 100644 --- a/python/scripts/dwi2mask/__init__.py +++ b/python/scripts/dwi2mask/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwi2response/__init__.py b/python/scripts/dwi2response/__init__.py index 3ae0310db4..768914190d 100644 --- a/python/scripts/dwi2response/__init__.py +++ b/python/scripts/dwi2response/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwibiascorrect/__init__.py b/python/scripts/dwibiascorrect/__init__.py index 92e0700938..aec62396a4 100644 --- a/python/scripts/dwibiascorrect/__init__.py +++ b/python/scripts/dwibiascorrect/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwibiasnormmask.py b/python/scripts/dwibiasnormmask.py index 2263081f43..5b559a067e 100644 --- a/python/scripts/dwibiasnormmask.py +++ b/python/scripts/dwibiasnormmask.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwicat.py b/python/scripts/dwicat.py index 0c648fd3f3..039a1d0bd2 100644 --- a/python/scripts/dwicat.py +++ b/python/scripts/dwicat.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwifslpreproc.py b/python/scripts/dwifslpreproc.py index 9a9d55789b..cb944f7ea3 100644 --- a/python/scripts/dwifslpreproc.py +++ b/python/scripts/dwifslpreproc.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwigradcheck.py b/python/scripts/dwigradcheck.py index 1c9b9f6656..0deacf64e6 100644 --- a/python/scripts/dwigradcheck.py +++ b/python/scripts/dwigradcheck.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwinormalise/__init__.py b/python/scripts/dwinormalise/__init__.py index 7c7f91deed..0225cdde37 100644 --- a/python/scripts/dwinormalise/__init__.py +++ b/python/scripts/dwinormalise/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/dwishellmath.py b/python/scripts/dwishellmath.py index 92ff77317d..49d0922ebf 100644 --- a/python/scripts/dwishellmath.py +++ b/python/scripts/dwishellmath.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/for_each.py b/python/scripts/for_each.py index 0f07b06704..294b70aae5 100644 --- a/python/scripts/for_each.py +++ b/python/scripts/for_each.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/gen_scheme.py b/python/scripts/gen_scheme.py index 897a8f27a1..0ffe18cfd5 100644 --- a/python/scripts/gen_scheme.py +++ b/python/scripts/gen_scheme.py @@ -1,5 +1,3 @@ -#!/bin/bash - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/labelsgmfix.py b/python/scripts/labelsgmfix.py index 17b8470501..f2d1b63dc0 100644 --- a/python/scripts/labelsgmfix.py +++ b/python/scripts/labelsgmfix.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/mask2glass.py b/python/scripts/mask2glass.py index 3b89038138..13ddbae8fd 100644 --- a/python/scripts/mask2glass.py +++ b/python/scripts/mask2glass.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/mrtrix_cleanup.py b/python/scripts/mrtrix_cleanup.py index cefc59c8c3..0f8282f1a0 100644 --- a/python/scripts/mrtrix_cleanup.py +++ b/python/scripts/mrtrix_cleanup.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/notfound.py b/python/scripts/notfound.py index d52003924c..828163e110 100644 --- a/python/scripts/notfound.py +++ b/python/scripts/notfound.py @@ -1,5 +1,3 @@ -#!/bin/bash - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/population_template.py b/python/scripts/population_template.py index a4e8b4c612..5d35ec27b6 100644 --- a/python/scripts/population_template.py +++ b/python/scripts/population_template.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public diff --git a/python/scripts/responsemean.py b/python/scripts/responsemean.py index 9e8e847dcf..a3fb17e43d 100644 --- a/python/scripts/responsemean.py +++ b/python/scripts/responsemean.py @@ -1,5 +1,3 @@ -#!/usr/bin/python3 - # Copyright (c) 2008-2023 the MRtrix3 contributors. # # This Source Code Form is subject to the terms of the Mozilla Public From d71efa66ac63aaa2ce3163e71cb48ea5372f508c Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Mar 2024 19:04:57 +1100 Subject: [PATCH 06/11] Python: Experimenting with alternative filesystem structure - Do not split between "lib" and "src": All Python content goes into lib/mrtrix3/. - Rather than having some executable scripts living as standalone .py files and others as sub-directories (to deal with separate algorithm files), place all files that are not part of the API into sub-directory trees. For now, all files have been renamed to __init__.py. --- python/CMakeLists.txt | 136 +-------------- .../{scripts => mrtrix3}/5ttgen/__init__.py | 0 .../5ttgen/freesurfer/__init__.py} | 0 .../fsl.py => mrtrix3/5ttgen/fsl/__init__.py} | 0 .../gif.py => mrtrix3/5ttgen/gif/__init__.py} | 0 .../5ttgen/hsvs/__init__.py} | 0 python/mrtrix3/CMakeLists.txt | 158 ++++++++++++++++++ python/{ => mrtrix3}/__init__.py | 0 python/{ => mrtrix3}/_version.py.in | 0 python/{ => mrtrix3}/algorithm.py | 0 python/{ => mrtrix3}/app.py | 0 .../blend.py => mrtrix3/blend/__init__.py} | 0 .../convert_bruker/__init__.py} | 0 .../dwi2mask/3dautomask/__init__.py} | 0 .../{scripts => mrtrix3}/dwi2mask/__init__.py | 0 .../dwi2mask/ants/__init__.py} | 0 .../dwi2mask/b02template/__init__.py} | 0 .../dwi2mask/consensus/__init__.py} | 0 .../dwi2mask/fslbet/__init__.py} | 0 .../dwi2mask/hdbet/__init__.py} | 0 .../dwi2mask/legacy/__init__.py} | 0 .../dwi2mask/mean/__init__.py} | 0 .../dwi2mask/mtnorm/__init__.py} | 0 .../dwi2mask/synthstrip/__init__.py} | 0 .../dwi2mask/trace/__init__.py} | 0 .../dwi2response/__init__.py | 0 .../dwi2response/dhollander/__init__.py} | 0 .../dwi2response/fa/__init__.py} | 0 .../dwi2response/manual/__init__.py} | 0 .../dwi2response/msmt_5tt/__init__.py} | 0 .../dwi2response/tax/__init__.py} | 0 .../dwi2response/tournier/__init__.py} | 0 .../dwibiascorrect/__init__.py | 0 .../dwibiascorrect/ants/__init__.py} | 0 .../dwibiascorrect/fsl/__init__.py} | 0 .../dwibiascorrect/mtnorm/__init__.py} | 0 .../dwibiasnormmask/__init__.py} | 0 .../dwicat.py => mrtrix3/dwicat/__init__.py} | 0 .../dwifslpreproc/__init__.py} | 0 .../dwigradcheck/__init__.py} | 0 .../dwinormalise/__init__.py | 0 .../dwinormalise/group/__init__.py} | 0 .../dwinormalise/manual/__init__.py} | 0 .../dwinormalise/mtnorm/__init__.py} | 0 .../dwishellmath/__init__.py} | 0 .../for_each/__init__.py} | 0 python/{ => mrtrix3}/fsl.py | 0 .../gen_scheme/__init__.py} | 0 python/{ => mrtrix3}/image.py | 0 .../labelsgmfix/__init__.py} | 0 .../mask2glass/__init__.py} | 0 python/{ => mrtrix3}/matrix.py | 0 .../mrtrix_cleanup/__init__.py} | 0 .../notfound/__init__.py} | 0 python/{ => mrtrix3}/path.py | 0 python/{ => mrtrix3}/phaseencoding.py | 0 .../population_template/__init__.py} | 0 .../responsemean/__init__.py} | 0 python/{ => mrtrix3}/run.py | 0 python/{ => mrtrix3}/sh.py | 0 python/{ => mrtrix3}/utils.py | 0 61 files changed, 159 insertions(+), 135 deletions(-) rename python/{scripts => mrtrix3}/5ttgen/__init__.py (100%) rename python/{scripts/5ttgen/freesurfer.py => mrtrix3/5ttgen/freesurfer/__init__.py} (100%) rename python/{scripts/5ttgen/fsl.py => mrtrix3/5ttgen/fsl/__init__.py} (100%) rename python/{scripts/5ttgen/gif.py => mrtrix3/5ttgen/gif/__init__.py} (100%) rename python/{scripts/5ttgen/hsvs.py => mrtrix3/5ttgen/hsvs/__init__.py} (100%) create mode 100644 python/mrtrix3/CMakeLists.txt rename python/{ => mrtrix3}/__init__.py (100%) rename python/{ => mrtrix3}/_version.py.in (100%) rename python/{ => mrtrix3}/algorithm.py (100%) rename python/{ => mrtrix3}/app.py (100%) rename python/{scripts/blend.py => mrtrix3/blend/__init__.py} (100%) rename python/{scripts/convert_bruker.py => mrtrix3/convert_bruker/__init__.py} (100%) rename python/{scripts/dwi2mask/3dautomask.py => mrtrix3/dwi2mask/3dautomask/__init__.py} (100%) rename python/{scripts => mrtrix3}/dwi2mask/__init__.py (100%) rename python/{scripts/dwi2mask/ants.py => mrtrix3/dwi2mask/ants/__init__.py} (100%) rename python/{scripts/dwi2mask/b02template.py => mrtrix3/dwi2mask/b02template/__init__.py} (100%) rename python/{scripts/dwi2mask/consensus.py => mrtrix3/dwi2mask/consensus/__init__.py} (100%) rename python/{scripts/dwi2mask/fslbet.py => mrtrix3/dwi2mask/fslbet/__init__.py} (100%) rename python/{scripts/dwi2mask/hdbet.py => mrtrix3/dwi2mask/hdbet/__init__.py} (100%) rename python/{scripts/dwi2mask/legacy.py => mrtrix3/dwi2mask/legacy/__init__.py} (100%) rename python/{scripts/dwi2mask/mean.py => mrtrix3/dwi2mask/mean/__init__.py} (100%) rename python/{scripts/dwi2mask/mtnorm.py => mrtrix3/dwi2mask/mtnorm/__init__.py} (100%) rename python/{scripts/dwi2mask/synthstrip.py => mrtrix3/dwi2mask/synthstrip/__init__.py} (100%) rename python/{scripts/dwi2mask/trace.py => mrtrix3/dwi2mask/trace/__init__.py} (100%) rename python/{scripts => mrtrix3}/dwi2response/__init__.py (100%) rename python/{scripts/dwi2response/dhollander.py => mrtrix3/dwi2response/dhollander/__init__.py} (100%) rename python/{scripts/dwi2response/fa.py => mrtrix3/dwi2response/fa/__init__.py} (100%) rename python/{scripts/dwi2response/manual.py => mrtrix3/dwi2response/manual/__init__.py} (100%) rename python/{scripts/dwi2response/msmt_5tt.py => mrtrix3/dwi2response/msmt_5tt/__init__.py} (100%) rename python/{scripts/dwi2response/tax.py => mrtrix3/dwi2response/tax/__init__.py} (100%) rename python/{scripts/dwi2response/tournier.py => mrtrix3/dwi2response/tournier/__init__.py} (100%) rename python/{scripts => mrtrix3}/dwibiascorrect/__init__.py (100%) rename python/{scripts/dwibiascorrect/ants.py => mrtrix3/dwibiascorrect/ants/__init__.py} (100%) rename python/{scripts/dwibiascorrect/fsl.py => mrtrix3/dwibiascorrect/fsl/__init__.py} (100%) rename python/{scripts/dwibiascorrect/mtnorm.py => mrtrix3/dwibiascorrect/mtnorm/__init__.py} (100%) rename python/{scripts/dwibiasnormmask.py => mrtrix3/dwibiasnormmask/__init__.py} (100%) rename python/{scripts/dwicat.py => mrtrix3/dwicat/__init__.py} (100%) rename python/{scripts/dwifslpreproc.py => mrtrix3/dwifslpreproc/__init__.py} (100%) rename python/{scripts/dwigradcheck.py => mrtrix3/dwigradcheck/__init__.py} (100%) rename python/{scripts => mrtrix3}/dwinormalise/__init__.py (100%) rename python/{scripts/dwinormalise/group.py => mrtrix3/dwinormalise/group/__init__.py} (100%) rename python/{scripts/dwinormalise/manual.py => mrtrix3/dwinormalise/manual/__init__.py} (100%) rename python/{scripts/dwinormalise/mtnorm.py => mrtrix3/dwinormalise/mtnorm/__init__.py} (100%) rename python/{scripts/dwishellmath.py => mrtrix3/dwishellmath/__init__.py} (100%) rename python/{scripts/for_each.py => mrtrix3/for_each/__init__.py} (100%) rename python/{ => mrtrix3}/fsl.py (100%) rename python/{scripts/gen_scheme.py => mrtrix3/gen_scheme/__init__.py} (100%) rename python/{ => mrtrix3}/image.py (100%) rename python/{scripts/labelsgmfix.py => mrtrix3/labelsgmfix/__init__.py} (100%) rename python/{scripts/mask2glass.py => mrtrix3/mask2glass/__init__.py} (100%) rename python/{ => mrtrix3}/matrix.py (100%) rename python/{scripts/mrtrix_cleanup.py => mrtrix3/mrtrix_cleanup/__init__.py} (100%) rename python/{scripts/notfound.py => mrtrix3/notfound/__init__.py} (100%) rename python/{ => mrtrix3}/path.py (100%) rename python/{ => mrtrix3}/phaseencoding.py (100%) rename python/{scripts/population_template.py => mrtrix3/population_template/__init__.py} (100%) rename python/{scripts/responsemean.py => mrtrix3/responsemean/__init__.py} (100%) rename python/{ => mrtrix3}/run.py (100%) rename python/{ => mrtrix3}/sh.py (100%) rename python/{ => mrtrix3}/utils.py (100%) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index fc4571145d..bf439217e3 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,136 +1,2 @@ -set(PYTHON_VERSION_FILE ${CMAKE_CURRENT_SOURCE_DIR}/_version.py) +add_subdirectory(mrtrix3) -set(CMAKE_INSTALL_SRCDIR ${CMAKE_INSTALL_LIBDIR}/../src/) - -find_package(Git QUIET) - -file(GLOB PYTHON_LIB_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/*.py -) - -file(GLOB_RECURSE PYTHON_ALL_SRC_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/scripts/*.py -) - -file(GLOB PYTHON_ALL_COMMANDS - ${CMAKE_CURRENT_SOURCE_DIR}/scripts/* -) - -# We generate the version file at configure time, -# so tools like Pylint can run without building the project -execute_process( - COMMAND ${CMAKE_COMMAND} - -D GIT_EXECUTABLE=${GIT_EXECUTABLE} - -D MRTRIX_BASE_VERSION=${MRTRIX_BASE_VERSION} - -D DST=${PYTHON_VERSION_FILE} - -D SRC=${CMAKE_CURRENT_SOURCE_DIR}/_version.py.in - -P ${PROJECT_SOURCE_DIR}/cmake/FindVersion.cmake - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} -) - -add_custom_target(LinkPythonFiles ALL) -add_custom_command( - TARGET LinkPythonFiles - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/lib/mrtrix3 -) -add_custom_command( - TARGET LinkPythonFiles - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/src/mrtrix3 -) -foreach(SUBDIR ${PYTHON_ALL_COMMANDS}) - if(IS_DIRECTORY ${SUBDIR}) - get_filename_component(CMDNAME ${SUBDIR} NAME) - set(MODULENAME ${CMDNAME}) - if(MODULENAME MATCHES "^[0-9].*$") - set(MODULENAME _${MODULENAME}) - endif() - add_custom_command( - TARGET LinkPythonFiles - COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/src/mrtrix3/${MODULENAME} - ) - endif() -endforeach() -set(PYTHON_BUILD_LIB_FILES "") -set(PYTHON_BUILD_SRC_FILES "") -foreach(LIB_FILE ${PYTHON_LIB_FILES}) - get_filename_component(LIB_FILE_NAME ${LIB_FILE} NAME) - set(DST_LIB_FILE ${PROJECT_BINARY_DIR}/lib/mrtrix3/${LIB_FILE_NAME}) - add_custom_command( - TARGET LinkPythonFiles - COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIB_FILE} ${DST_LIB_FILE} - DEPENDS ${LIB_FILE} - ) - list(APPEND PYTHON_BUILD_LIB_FILES ${DST_LIB_FILE}) -endforeach() -foreach(SRC_FILE ${PYTHON_ALL_SRC_FILES}) - file(RELATIVE_PATH SRC_RELPATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts ${SRC_FILE}) - if(SRC_RELPATH MATCHES "^[0-9].*$") - set(SRC_RELPATH _${SRC_RELPATH}) - endif() - set(DST_SRC_FILE ${PROJECT_BINARY_DIR}/src/mrtrix3/${SRC_RELPATH}) - add_custom_command( - TARGET LinkPythonFiles - COMMAND ${CMAKE_COMMAND} -E create_symlink ${SRC_FILE} ${DST_SRC_FILE} - DEPENDS ${SRC_FILE} - ) - get_filename_component(SRC_RELPATH_DIRECTORY ${SRC_RELPATH} DIRECTORY) - install(FILES ${SRC_FILE} DESTINATION ${CMAKE_INSTALL_SRCDIR}/mrtrix3/${SRC_RELPATH_DIRECTORY}) - list(APPEND PYTHON_BUILD_SRC_FILES ${DST_SRC_FILE}) -endforeach() -set(PYTHON_BIN_FILES "") -foreach(CMDNAME ${PYTHON_ALL_COMMANDS}) - # Strip .py extension - get_filename_component(BINNAME ${CMDNAME} NAME_WE) - set(MODULENAME ${BINNAME}) - if(MODULENAME MATCHES "^[0-9].*$") - set(MODULENAME _${MODULENAME}) - endif() - if(CMDNAME MATCHES "^.*\.py$") - set(SPECPATH "'${MODULENAME}.py'") - else() - set(SPECPATH "'${MODULENAME}', '__init__.py'") - endif() - set(BINPATH ${PROJECT_BINARY_DIR}/python/${BINNAME}) - file(WRITE ${BINPATH} "#!${Python3_EXECUTABLE}\n") - file(APPEND ${BINPATH} "# -*- coding: utf-8 -*-\n") - file(APPEND ${BINPATH} "import importlib.util\n") - file(APPEND ${BINPATH} "import os\n") - file(APPEND ${BINPATH} "import sys\n") - file(APPEND ${BINPATH} "\n") - file(APPEND ${BINPATH} "api_location = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', '__init__.py'))\n") - file(APPEND ${BINPATH} "api_spec = importlib.util.spec_from_file_location('mrtrix3', api_location)\n") - file(APPEND ${BINPATH} "api_module = importlib.util.module_from_spec(api_spec)\n") - file(APPEND ${BINPATH} "sys.modules['mrtrix3'] = api_module\n") - file(APPEND ${BINPATH} "api_spec.loader.exec_module(api_module)\n") - file(APPEND ${BINPATH} "\n") - file(APPEND ${BINPATH} "src_spec = importlib.util.spec_from_file_location('${MODULENAME}',\n") - file(APPEND ${BINPATH} " os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'src', 'mrtrix3', ${SPECPATH})))\n") - file(APPEND ${BINPATH} "src_module = importlib.util.module_from_spec(src_spec)\n") - file(APPEND ${BINPATH} "sys.modules[src_spec.name] = src_module\n") - file(APPEND ${BINPATH} "src_spec.loader.exec_module(src_module)\n") - file(APPEND ${BINPATH} "\n") - file(APPEND ${BINPATH} "from mrtrix3.app import _execute\n") - file(APPEND ${BINPATH} "import ${MODULENAME}\n") - file(APPEND ${BINPATH} "_execute(${MODULENAME})\n") - file(COPY ${BINPATH} DESTINATION ${PROJECT_BINARY_DIR}/bin - FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ) - list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${BINNAME}) -endforeach() - -set_target_properties(LinkPythonFiles - PROPERTIES ADDITIONAL_CLEAN_FILES - "${PYTHON_BUILD_LIB_FILES};${PROJECT_BINARY_DIR}/lib;${PYTHON_BUILD_SRC_FILES};${PROJECT_BINARY_DIR}/src;${PROJECT_BINARY_DIR}/python" -) - -install(FILES ${PYTHON_BIN_FILES} - PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ - DESTINATION ${CMAKE_INSTALL_BINDIR} -) - -install(FILES ${PYTHON_LIB_FILES} - DESTINATION ${CMAKE_INSTALL_LIBDIR}/mrtrix3 -) - -install(FILES ${PYTHON_VERSION_FILE} - DESTINATION ${CMAKE_INSTALL_LIBDIR}/mrtrix3 -) diff --git a/python/scripts/5ttgen/__init__.py b/python/mrtrix3/5ttgen/__init__.py similarity index 100% rename from python/scripts/5ttgen/__init__.py rename to python/mrtrix3/5ttgen/__init__.py diff --git a/python/scripts/5ttgen/freesurfer.py b/python/mrtrix3/5ttgen/freesurfer/__init__.py similarity index 100% rename from python/scripts/5ttgen/freesurfer.py rename to python/mrtrix3/5ttgen/freesurfer/__init__.py diff --git a/python/scripts/5ttgen/fsl.py b/python/mrtrix3/5ttgen/fsl/__init__.py similarity index 100% rename from python/scripts/5ttgen/fsl.py rename to python/mrtrix3/5ttgen/fsl/__init__.py diff --git a/python/scripts/5ttgen/gif.py b/python/mrtrix3/5ttgen/gif/__init__.py similarity index 100% rename from python/scripts/5ttgen/gif.py rename to python/mrtrix3/5ttgen/gif/__init__.py diff --git a/python/scripts/5ttgen/hsvs.py b/python/mrtrix3/5ttgen/hsvs/__init__.py similarity index 100% rename from python/scripts/5ttgen/hsvs.py rename to python/mrtrix3/5ttgen/hsvs/__init__.py diff --git a/python/mrtrix3/CMakeLists.txt b/python/mrtrix3/CMakeLists.txt new file mode 100644 index 0000000000..809738a8d9 --- /dev/null +++ b/python/mrtrix3/CMakeLists.txt @@ -0,0 +1,158 @@ +set(PYTHON_VERSION_FILE ${CMAKE_CURRENT_SOURCE_DIR}/_version.py) + +find_package(Git QUIET) + +file(GLOB PYTHON_LIB_FILES + ${CMAKE_CURRENT_SOURCE_DIR}/*.py +) + +file(GLOB PYTHON_ROOT_ENTRIES + ${CMAKE_CURRENT_SOURCE_DIR}/* +) + +# TODO LIST_DIRECTORIES seems to not be having an effect here... +file(GLOB_RECURSE PYTHON_SRC_PATHS + ${CMAKE_CURRENT_SOURCE_DIR}/*/* +) + +message(STATUS "PYTHON_LIB_FILES=${PYTHON_LIB_FILES}") +message(STATUS "PYTHON_SRC_PATHS=${PYTHON_SRC_PATHS}") + +# TODO Test to see if this can be done with glob exclusion +set(PYTHON_COMMAND_LIST "") +foreach(PYTHON_PATH ${PYTHON_ROOT_ENTRIES}) + if(IS_DIRECTORY ${PYTHON_PATH}) + get_filename_component(CMDNAME ${PYTHON_PATH} NAME) + list(APPEND PYTHON_COMMAND_LIST ${CMDNAME}) + endif() +endforeach() + +message(STATUS "PYTHON_COMMAND_LIST=${PYTHON_COMMAND_LIST}") + +# We generate the version file at configure time, +# so tools like Pylint can run without building the project +execute_process( + COMMAND ${CMAKE_COMMAND} + -D GIT_EXECUTABLE=${GIT_EXECUTABLE} + -D MRTRIX_BASE_VERSION=${MRTRIX_BASE_VERSION} + -D DST=${PYTHON_VERSION_FILE} + -D SRC=${CMAKE_CURRENT_SOURCE_DIR}/_version.py.in + -P ${PROJECT_SOURCE_DIR}/cmake/FindVersion.cmake + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} +) + +add_custom_target(LinkPythonFiles ALL) +add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/lib/mrtrix3 +) + +set(PYTHON_BUILD_FILES "") + +foreach(PYTHON_LIB_FILE ${PYTHON_LIB_FILES}) + get_filename_component(LIB_FILE_NAME ${PYTHON_LIB_FILE} NAME) + set(DST_LIB_FILE ${PROJECT_BINARY_DIR}/lib/mrtrix3/${LIB_FILE_NAME}) + add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E create_symlink ${PYTHON_LIB_FILE} ${DST_LIB_FILE} + DEPENDS ${LIB_FILE} + ) + list(APPEND PYTHON_BUILD_FILES ${DST_LIB_FILE}) +endforeach() + +# Have to append commands to create all directories +# before commands to symlink files can appear +set(PYTHON_DST_SUBDIRS "") +foreach(PYTHON_SRC_PATH ${PYTHON_SRC_PATHS}) + file(RELATIVE_PATH DST_PATH ${CMAKE_CURRENT_SOURCE_DIR} ${PYTHON_SRC_PATH}) + # TODO Ideally would perform this check for every child directory + if(DST_PATH MATCHES "^[0-9].*$") + set(DST_PATH _${DST_PATH}) + endif() + set(DST_PATH ${PROJECT_BINARY_DIR}/lib/mrtrix3/${DST_PATH}) + get_filename_component(FILENAME ${PYTHON_SRC_PATH} NAME) + if(${FILENAME} STREQUAL "__init__.py") + get_filename_component(DST_PARENTDIR ${DST_PATH} DIRECTORY) + list(APPEND PYTHON_DST_SUBDIRS ${DST_PARENTDIR}) + endif() +endforeach() + +message(STATUS "PYTHON_DST_SUBDIRS=${PYTHON_DST_SUBDIRS}") + +foreach(PYTHON_DST_SUBDIR ${PYTHON_DST_SUBDIRS}) + add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E make_directory ${PYTHON_DST_SUBDIR} + ) +endforeach() + +foreach(PYTHON_SRC_PATH ${PYTHON_SRC_PATHS}) + file(RELATIVE_PATH DST_RELPATH ${CMAKE_CURRENT_SOURCE_DIR} ${PYTHON_SRC_PATH}) + # TODO Ideally would perform this check for every child directory + if(DST_RELPATH MATCHES "^[0-9].*$") + set(DST_RELPATH _${DST_RELPATH}) + endif() + set(DST_BUILDPATH ${PROJECT_BINARY_DIR}/lib/mrtrix3/${DST_RELPATH}) + add_custom_command( + TARGET LinkPythonFiles + COMMAND ${CMAKE_COMMAND} -E create_symlink ${PYTHON_SRC_PATH} ${DST_BUILDPATH} + DEPENDS ${PYTHON_SRC_PATH} + ) + set(DST_INSTALLPATH ${CMAKE_INSTALL_LIBDIR}/mrtrix3/${DST_RELPATH}) + install(FILES ${PYTHON_SRC_PATH} DESTINATION ${DST_INSTALLPATH}) + list(APPEND PYTHON_BUILD_FILES ${DST_PATH}) +endforeach() + +set(PYTHON_BIN_FILES "") +foreach(CMDNAME ${PYTHON_COMMAND_LIST}) + # Strip .py extension + get_filename_component(BINNAME ${CMDNAME} NAME_WE) + set(MODULENAME ${BINNAME}) + if(MODULENAME MATCHES "^[0-9].*$") + set(MODULENAME _${MODULENAME}) + endif() + set(SPECPATH "'${MODULENAME}', '__init__.py'") + set(BINPATH "${PROJECT_BINARY_DIR}/temporary/python/${BINNAME}") + file(WRITE ${BINPATH} "#!${Python3_EXECUTABLE}\n") + file(APPEND ${BINPATH} "# -*- coding: utf-8 -*-\n") + file(APPEND ${BINPATH} "import importlib.util\n") + file(APPEND ${BINPATH} "import os\n") + file(APPEND ${BINPATH} "import sys\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "api_location = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', '__init__.py'))\n") + file(APPEND ${BINPATH} "api_spec = importlib.util.spec_from_file_location('mrtrix3', api_location)\n") + file(APPEND ${BINPATH} "api_module = importlib.util.module_from_spec(api_spec)\n") + file(APPEND ${BINPATH} "sys.modules['mrtrix3'] = api_module\n") + file(APPEND ${BINPATH} "api_spec.loader.exec_module(api_module)\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "src_spec = importlib.util.spec_from_file_location('${MODULENAME}',\n") + file(APPEND ${BINPATH} " os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', ${SPECPATH})))\n") + file(APPEND ${BINPATH} "src_module = importlib.util.module_from_spec(src_spec)\n") + file(APPEND ${BINPATH} "sys.modules[src_spec.name] = src_module\n") + file(APPEND ${BINPATH} "src_spec.loader.exec_module(src_module)\n") + file(APPEND ${BINPATH} "\n") + file(APPEND ${BINPATH} "from mrtrix3.app import _execute\n") + file(APPEND ${BINPATH} "import ${MODULENAME}\n") + file(APPEND ${BINPATH} "_execute(${MODULENAME})\n") + file(COPY ${BINPATH} DESTINATION ${PROJECT_BINARY_DIR}/bin + FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ) + list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${BINNAME}) +endforeach() + +set_target_properties(LinkPythonFiles + PROPERTIES ADDITIONAL_CLEAN_FILES + "${PYTHON_BUILD_FILES};${PROJECT_BINARY_DIR}/lib;${PROJECT_BINARY_DIR}/temporary/python" +) + +install(FILES ${PYTHON_BIN_FILES} + PERMISSIONS OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE + DESTINATION ${CMAKE_INSTALL_BINDIR} +) + +install(FILES ${PYTHON_LIB_FILES} + DESTINATION ${CMAKE_INSTALL_LIBDIR}/mrtrix3 +) + +install(FILES ${PYTHON_VERSION_FILE} + DESTINATION ${CMAKE_INSTALL_LIBDIR}/mrtrix3 +) diff --git a/python/__init__.py b/python/mrtrix3/__init__.py similarity index 100% rename from python/__init__.py rename to python/mrtrix3/__init__.py diff --git a/python/_version.py.in b/python/mrtrix3/_version.py.in similarity index 100% rename from python/_version.py.in rename to python/mrtrix3/_version.py.in diff --git a/python/algorithm.py b/python/mrtrix3/algorithm.py similarity index 100% rename from python/algorithm.py rename to python/mrtrix3/algorithm.py diff --git a/python/app.py b/python/mrtrix3/app.py similarity index 100% rename from python/app.py rename to python/mrtrix3/app.py diff --git a/python/scripts/blend.py b/python/mrtrix3/blend/__init__.py similarity index 100% rename from python/scripts/blend.py rename to python/mrtrix3/blend/__init__.py diff --git a/python/scripts/convert_bruker.py b/python/mrtrix3/convert_bruker/__init__.py similarity index 100% rename from python/scripts/convert_bruker.py rename to python/mrtrix3/convert_bruker/__init__.py diff --git a/python/scripts/dwi2mask/3dautomask.py b/python/mrtrix3/dwi2mask/3dautomask/__init__.py similarity index 100% rename from python/scripts/dwi2mask/3dautomask.py rename to python/mrtrix3/dwi2mask/3dautomask/__init__.py diff --git a/python/scripts/dwi2mask/__init__.py b/python/mrtrix3/dwi2mask/__init__.py similarity index 100% rename from python/scripts/dwi2mask/__init__.py rename to python/mrtrix3/dwi2mask/__init__.py diff --git a/python/scripts/dwi2mask/ants.py b/python/mrtrix3/dwi2mask/ants/__init__.py similarity index 100% rename from python/scripts/dwi2mask/ants.py rename to python/mrtrix3/dwi2mask/ants/__init__.py diff --git a/python/scripts/dwi2mask/b02template.py b/python/mrtrix3/dwi2mask/b02template/__init__.py similarity index 100% rename from python/scripts/dwi2mask/b02template.py rename to python/mrtrix3/dwi2mask/b02template/__init__.py diff --git a/python/scripts/dwi2mask/consensus.py b/python/mrtrix3/dwi2mask/consensus/__init__.py similarity index 100% rename from python/scripts/dwi2mask/consensus.py rename to python/mrtrix3/dwi2mask/consensus/__init__.py diff --git a/python/scripts/dwi2mask/fslbet.py b/python/mrtrix3/dwi2mask/fslbet/__init__.py similarity index 100% rename from python/scripts/dwi2mask/fslbet.py rename to python/mrtrix3/dwi2mask/fslbet/__init__.py diff --git a/python/scripts/dwi2mask/hdbet.py b/python/mrtrix3/dwi2mask/hdbet/__init__.py similarity index 100% rename from python/scripts/dwi2mask/hdbet.py rename to python/mrtrix3/dwi2mask/hdbet/__init__.py diff --git a/python/scripts/dwi2mask/legacy.py b/python/mrtrix3/dwi2mask/legacy/__init__.py similarity index 100% rename from python/scripts/dwi2mask/legacy.py rename to python/mrtrix3/dwi2mask/legacy/__init__.py diff --git a/python/scripts/dwi2mask/mean.py b/python/mrtrix3/dwi2mask/mean/__init__.py similarity index 100% rename from python/scripts/dwi2mask/mean.py rename to python/mrtrix3/dwi2mask/mean/__init__.py diff --git a/python/scripts/dwi2mask/mtnorm.py b/python/mrtrix3/dwi2mask/mtnorm/__init__.py similarity index 100% rename from python/scripts/dwi2mask/mtnorm.py rename to python/mrtrix3/dwi2mask/mtnorm/__init__.py diff --git a/python/scripts/dwi2mask/synthstrip.py b/python/mrtrix3/dwi2mask/synthstrip/__init__.py similarity index 100% rename from python/scripts/dwi2mask/synthstrip.py rename to python/mrtrix3/dwi2mask/synthstrip/__init__.py diff --git a/python/scripts/dwi2mask/trace.py b/python/mrtrix3/dwi2mask/trace/__init__.py similarity index 100% rename from python/scripts/dwi2mask/trace.py rename to python/mrtrix3/dwi2mask/trace/__init__.py diff --git a/python/scripts/dwi2response/__init__.py b/python/mrtrix3/dwi2response/__init__.py similarity index 100% rename from python/scripts/dwi2response/__init__.py rename to python/mrtrix3/dwi2response/__init__.py diff --git a/python/scripts/dwi2response/dhollander.py b/python/mrtrix3/dwi2response/dhollander/__init__.py similarity index 100% rename from python/scripts/dwi2response/dhollander.py rename to python/mrtrix3/dwi2response/dhollander/__init__.py diff --git a/python/scripts/dwi2response/fa.py b/python/mrtrix3/dwi2response/fa/__init__.py similarity index 100% rename from python/scripts/dwi2response/fa.py rename to python/mrtrix3/dwi2response/fa/__init__.py diff --git a/python/scripts/dwi2response/manual.py b/python/mrtrix3/dwi2response/manual/__init__.py similarity index 100% rename from python/scripts/dwi2response/manual.py rename to python/mrtrix3/dwi2response/manual/__init__.py diff --git a/python/scripts/dwi2response/msmt_5tt.py b/python/mrtrix3/dwi2response/msmt_5tt/__init__.py similarity index 100% rename from python/scripts/dwi2response/msmt_5tt.py rename to python/mrtrix3/dwi2response/msmt_5tt/__init__.py diff --git a/python/scripts/dwi2response/tax.py b/python/mrtrix3/dwi2response/tax/__init__.py similarity index 100% rename from python/scripts/dwi2response/tax.py rename to python/mrtrix3/dwi2response/tax/__init__.py diff --git a/python/scripts/dwi2response/tournier.py b/python/mrtrix3/dwi2response/tournier/__init__.py similarity index 100% rename from python/scripts/dwi2response/tournier.py rename to python/mrtrix3/dwi2response/tournier/__init__.py diff --git a/python/scripts/dwibiascorrect/__init__.py b/python/mrtrix3/dwibiascorrect/__init__.py similarity index 100% rename from python/scripts/dwibiascorrect/__init__.py rename to python/mrtrix3/dwibiascorrect/__init__.py diff --git a/python/scripts/dwibiascorrect/ants.py b/python/mrtrix3/dwibiascorrect/ants/__init__.py similarity index 100% rename from python/scripts/dwibiascorrect/ants.py rename to python/mrtrix3/dwibiascorrect/ants/__init__.py diff --git a/python/scripts/dwibiascorrect/fsl.py b/python/mrtrix3/dwibiascorrect/fsl/__init__.py similarity index 100% rename from python/scripts/dwibiascorrect/fsl.py rename to python/mrtrix3/dwibiascorrect/fsl/__init__.py diff --git a/python/scripts/dwibiascorrect/mtnorm.py b/python/mrtrix3/dwibiascorrect/mtnorm/__init__.py similarity index 100% rename from python/scripts/dwibiascorrect/mtnorm.py rename to python/mrtrix3/dwibiascorrect/mtnorm/__init__.py diff --git a/python/scripts/dwibiasnormmask.py b/python/mrtrix3/dwibiasnormmask/__init__.py similarity index 100% rename from python/scripts/dwibiasnormmask.py rename to python/mrtrix3/dwibiasnormmask/__init__.py diff --git a/python/scripts/dwicat.py b/python/mrtrix3/dwicat/__init__.py similarity index 100% rename from python/scripts/dwicat.py rename to python/mrtrix3/dwicat/__init__.py diff --git a/python/scripts/dwifslpreproc.py b/python/mrtrix3/dwifslpreproc/__init__.py similarity index 100% rename from python/scripts/dwifslpreproc.py rename to python/mrtrix3/dwifslpreproc/__init__.py diff --git a/python/scripts/dwigradcheck.py b/python/mrtrix3/dwigradcheck/__init__.py similarity index 100% rename from python/scripts/dwigradcheck.py rename to python/mrtrix3/dwigradcheck/__init__.py diff --git a/python/scripts/dwinormalise/__init__.py b/python/mrtrix3/dwinormalise/__init__.py similarity index 100% rename from python/scripts/dwinormalise/__init__.py rename to python/mrtrix3/dwinormalise/__init__.py diff --git a/python/scripts/dwinormalise/group.py b/python/mrtrix3/dwinormalise/group/__init__.py similarity index 100% rename from python/scripts/dwinormalise/group.py rename to python/mrtrix3/dwinormalise/group/__init__.py diff --git a/python/scripts/dwinormalise/manual.py b/python/mrtrix3/dwinormalise/manual/__init__.py similarity index 100% rename from python/scripts/dwinormalise/manual.py rename to python/mrtrix3/dwinormalise/manual/__init__.py diff --git a/python/scripts/dwinormalise/mtnorm.py b/python/mrtrix3/dwinormalise/mtnorm/__init__.py similarity index 100% rename from python/scripts/dwinormalise/mtnorm.py rename to python/mrtrix3/dwinormalise/mtnorm/__init__.py diff --git a/python/scripts/dwishellmath.py b/python/mrtrix3/dwishellmath/__init__.py similarity index 100% rename from python/scripts/dwishellmath.py rename to python/mrtrix3/dwishellmath/__init__.py diff --git a/python/scripts/for_each.py b/python/mrtrix3/for_each/__init__.py similarity index 100% rename from python/scripts/for_each.py rename to python/mrtrix3/for_each/__init__.py diff --git a/python/fsl.py b/python/mrtrix3/fsl.py similarity index 100% rename from python/fsl.py rename to python/mrtrix3/fsl.py diff --git a/python/scripts/gen_scheme.py b/python/mrtrix3/gen_scheme/__init__.py similarity index 100% rename from python/scripts/gen_scheme.py rename to python/mrtrix3/gen_scheme/__init__.py diff --git a/python/image.py b/python/mrtrix3/image.py similarity index 100% rename from python/image.py rename to python/mrtrix3/image.py diff --git a/python/scripts/labelsgmfix.py b/python/mrtrix3/labelsgmfix/__init__.py similarity index 100% rename from python/scripts/labelsgmfix.py rename to python/mrtrix3/labelsgmfix/__init__.py diff --git a/python/scripts/mask2glass.py b/python/mrtrix3/mask2glass/__init__.py similarity index 100% rename from python/scripts/mask2glass.py rename to python/mrtrix3/mask2glass/__init__.py diff --git a/python/matrix.py b/python/mrtrix3/matrix.py similarity index 100% rename from python/matrix.py rename to python/mrtrix3/matrix.py diff --git a/python/scripts/mrtrix_cleanup.py b/python/mrtrix3/mrtrix_cleanup/__init__.py similarity index 100% rename from python/scripts/mrtrix_cleanup.py rename to python/mrtrix3/mrtrix_cleanup/__init__.py diff --git a/python/scripts/notfound.py b/python/mrtrix3/notfound/__init__.py similarity index 100% rename from python/scripts/notfound.py rename to python/mrtrix3/notfound/__init__.py diff --git a/python/path.py b/python/mrtrix3/path.py similarity index 100% rename from python/path.py rename to python/mrtrix3/path.py diff --git a/python/phaseencoding.py b/python/mrtrix3/phaseencoding.py similarity index 100% rename from python/phaseencoding.py rename to python/mrtrix3/phaseencoding.py diff --git a/python/scripts/population_template.py b/python/mrtrix3/population_template/__init__.py similarity index 100% rename from python/scripts/population_template.py rename to python/mrtrix3/population_template/__init__.py diff --git a/python/scripts/responsemean.py b/python/mrtrix3/responsemean/__init__.py similarity index 100% rename from python/scripts/responsemean.py rename to python/mrtrix3/responsemean/__init__.py diff --git a/python/run.py b/python/mrtrix3/run.py similarity index 100% rename from python/run.py rename to python/mrtrix3/run.py diff --git a/python/sh.py b/python/mrtrix3/sh.py similarity index 100% rename from python/sh.py rename to python/mrtrix3/sh.py diff --git a/python/utils.py b/python/mrtrix3/utils.py similarity index 100% rename from python/utils.py rename to python/mrtrix3/utils.py From 7f9c631e563da901a5d4d392aa158adfd65df4c6 Mon Sep 17 00:00:00 2001 From: MRtrixBot Date: Wed, 6 Mar 2024 22:16:06 +1100 Subject: [PATCH 07/11] Python: Further filesystem re-arrangement - For each script (or algorithm thereof), split code across at least usage.py and execute.py files. - Remove Python scripts that are not based on the Python API. --- python/mrtrix3/5ttgen/__init__.py | 54 - python/mrtrix3/5ttgen/execute.py | 35 + python/mrtrix3/5ttgen/freesurfer/__init__.py | 82 - .../5ttgen/freesurfer/check_output_paths.py | 19 + python/mrtrix3/5ttgen/freesurfer/execute.py | 57 + .../mrtrix3/5ttgen/freesurfer/get_inputs.py | 22 + python/mrtrix3/5ttgen/freesurfer/usage.py | 23 + python/mrtrix3/5ttgen/fsl/__init__.py | 234 --- .../mrtrix3/5ttgen/fsl/check_output_paths.py | 19 + python/mrtrix3/5ttgen/fsl/execute.py | 197 +++ python/mrtrix3/5ttgen/fsl/get_inputs.py | 27 + python/mrtrix3/5ttgen/fsl/usage.py | 30 + python/mrtrix3/5ttgen/gif/__init__.py | 68 - .../mrtrix3/5ttgen/gif/check_output_paths.py | 19 + python/mrtrix3/5ttgen/gif/execute.py | 40 + python/mrtrix3/5ttgen/gif/get_inputs.py | 29 + python/mrtrix3/5ttgen/gif/usage.py | 21 + python/mrtrix3/5ttgen/hsvs/__init__.py | 797 +-------- .../mrtrix3/5ttgen/hsvs/check_output_paths.py | 19 + python/mrtrix3/5ttgen/hsvs/execute.py | 772 +++++++++ python/mrtrix3/5ttgen/hsvs/get_inputs.py | 24 + python/mrtrix3/5ttgen/hsvs/usage.py | 31 + python/mrtrix3/5ttgen/usage.py | 30 + python/mrtrix3/blend/__init__.py | 50 - python/mrtrix3/convert_bruker/__init__.py | 141 -- .../mrtrix3/dwi2mask/3dautomask/__init__.py | 81 +- python/mrtrix3/dwi2mask/3dautomask/execute.py | 62 + .../mrtrix3/dwi2mask/3dautomask/get_inputs.py | 17 + python/mrtrix3/dwi2mask/3dautomask/usage.py | 34 + python/mrtrix3/dwi2mask/__init__.py | 98 -- python/mrtrix3/dwi2mask/ants/__init__.py | 62 +- python/mrtrix3/dwi2mask/ants/execute.py | 41 + python/mrtrix3/dwi2mask/ants/get_inputs.py | 32 + python/mrtrix3/dwi2mask/ants/usage.py | 24 + .../mrtrix3/dwi2mask/b02template/__init__.py | 215 +-- .../mrtrix3/dwi2mask/b02template/execute.py | 150 ++ .../dwi2mask/b02template/get_inputs.py | 63 + python/mrtrix3/dwi2mask/b02template/usage.py | 44 + python/mrtrix3/dwi2mask/consensus/__init__.py | 115 +- python/mrtrix3/dwi2mask/consensus/execute.py | 93 ++ .../mrtrix3/dwi2mask/consensus/get_inputs.py | 32 + python/mrtrix3/dwi2mask/consensus/usage.py | 28 + python/mrtrix3/dwi2mask/execute.py | 78 + python/mrtrix3/dwi2mask/fslbet/__init__.py | 65 +- python/mrtrix3/dwi2mask/fslbet/execute.py | 51 + python/mrtrix3/dwi2mask/fslbet/get_inputs.py | 17 + python/mrtrix3/dwi2mask/fslbet/usage.py | 28 + python/mrtrix3/dwi2mask/hdbet/__init__.py | 58 +- python/mrtrix3/dwi2mask/hdbet/execute.py | 46 + python/mrtrix3/dwi2mask/hdbet/get_inputs.py | 17 + python/mrtrix3/dwi2mask/hdbet/usage.py | 24 + python/mrtrix3/dwi2mask/legacy/__init__.py | 43 +- python/mrtrix3/dwi2mask/legacy/execute.py | 30 + python/mrtrix3/dwi2mask/legacy/get_inputs.py | 17 + python/mrtrix3/dwi2mask/legacy/usage.py | 29 + python/mrtrix3/dwi2mask/mean/__init__.py | 42 +- python/mrtrix3/dwi2mask/mean/execute.py | 29 + python/mrtrix3/dwi2mask/mean/get_inputs.py | 17 + python/mrtrix3/dwi2mask/mean/usage.py | 31 + python/mrtrix3/dwi2mask/mtnorm/__init__.py | 142 +- python/mrtrix3/dwi2mask/mtnorm/execute.py | 104 ++ python/mrtrix3/dwi2mask/mtnorm/get_inputs.py | 20 + python/mrtrix3/dwi2mask/mtnorm/usage.py | 57 + .../mrtrix3/dwi2mask/synthstrip/__init__.py | 68 +- python/mrtrix3/dwi2mask/synthstrip/execute.py | 52 + .../mrtrix3/dwi2mask/synthstrip/get_inputs.py | 17 + python/mrtrix3/dwi2mask/synthstrip/usage.py | 30 + python/mrtrix3/dwi2mask/trace/__init__.py | 113 +- python/mrtrix3/dwi2mask/trace/execute.py | 95 ++ python/mrtrix3/dwi2mask/trace/get_inputs.py | 17 + python/mrtrix3/dwi2mask/trace/usage.py | 36 + python/mrtrix3/dwi2mask/usage.py | 32 + python/mrtrix3/dwi2response/__init__.py | 120 -- .../dwi2response/dhollander/__init__.py | 283 +--- .../dhollander/check_output_paths.py | 21 + .../dwi2response/dhollander/execute.py | 246 +++ .../dwi2response/dhollander/get_inputs.py | 17 + .../mrtrix3/dwi2response/dhollander/usage.py | 36 + python/mrtrix3/dwi2response/execute.py | 89 + python/mrtrix3/dwi2response/fa/__init__.py | 66 +- .../dwi2response/fa/check_output_paths.py | 19 + python/mrtrix3/dwi2response/fa/execute.py | 42 + python/mrtrix3/dwi2response/fa/get_inputs.py | 17 + python/mrtrix3/dwi2response/fa/usage.py | 27 + .../mrtrix3/dwi2response/manual/__init__.py | 75 +- .../dwi2response/manual/check_output_paths.py | 19 + python/mrtrix3/dwi2response/manual/execute.py | 48 + .../mrtrix3/dwi2response/manual/get_inputs.py | 26 + python/mrtrix3/dwi2response/manual/usage.py | 24 + .../mrtrix3/dwi2response/msmt_5tt/__init__.py | 145 +- .../msmt_5tt/check_output_paths.py | 21 + .../mrtrix3/dwi2response/msmt_5tt/execute.py | 110 ++ .../dwi2response/msmt_5tt/get_inputs.py | 21 + python/mrtrix3/dwi2response/msmt_5tt/usage.py | 33 + python/mrtrix3/dwi2response/tax/__init__.py | 142 +- .../dwi2response/tax/check_output_paths.py | 19 + python/mrtrix3/dwi2response/tax/execute.py | 119 ++ python/mrtrix3/dwi2response/tax/get_inputs.py | 17 + python/mrtrix3/dwi2response/tax/usage.py | 26 + .../mrtrix3/dwi2response/tournier/__init__.py | 136 +- .../tournier/check_output_paths.py | 19 + .../mrtrix3/dwi2response/tournier/execute.py | 112 ++ .../dwi2response/tournier/get_inputs.py | 17 + python/mrtrix3/dwi2response/tournier/usage.py | 27 + python/mrtrix3/dwi2response/usage.py | 40 + python/mrtrix3/dwibiascorrect/__init__.py | 75 - .../mrtrix3/dwibiascorrect/ants/__init__.py | 67 - .../dwibiascorrect/ants/check_output_paths.py | 17 + python/mrtrix3/dwibiascorrect/ants/execute.py | 55 + .../mrtrix3/dwibiascorrect/ants/get_inputs.py | 17 + python/mrtrix3/dwibiascorrect/ants/usage.py | 27 + python/mrtrix3/dwibiascorrect/execute.py | 56 + python/mrtrix3/dwibiascorrect/fsl/__init__.py | 70 - .../dwibiascorrect/fsl/check_output_paths.py | 17 + python/mrtrix3/dwibiascorrect/fsl/execute.py | 46 + .../mrtrix3/dwibiascorrect/fsl/get_inputs.py | 17 + python/mrtrix3/dwibiascorrect/fsl/usage.py | 24 + .../mrtrix3/dwibiascorrect/mtnorm/__init__.py | 118 -- .../mtnorm/check_output_paths.py | 17 + .../mrtrix3/dwibiascorrect/mtnorm/execute.py | 88 + .../dwibiascorrect/mtnorm/get_inputs.py | 17 + python/mrtrix3/dwibiascorrect/mtnorm/usage.py | 48 + python/mrtrix3/dwibiascorrect/usage.py | 31 + python/mrtrix3/dwibiasnormmask/__init__.py | 432 ----- python/mrtrix3/dwibiasnormmask/execute.py | 357 ++++ python/mrtrix3/dwibiasnormmask/usage.py | 111 ++ python/mrtrix3/dwicat/__init__.py | 150 -- python/mrtrix3/dwicat/execute.py | 132 ++ python/mrtrix3/dwicat/usage.py | 26 + python/mrtrix3/dwifslpreproc/__init__.py | 1405 ---------------- python/mrtrix3/dwifslpreproc/execute.py | 1331 +++++++++++++++ python/mrtrix3/dwifslpreproc/usage.py | 80 + python/mrtrix3/dwigradcheck/__init__.py | 204 --- python/mrtrix3/dwigradcheck/execute.py | 182 +++ python/mrtrix3/dwigradcheck/usage.py | 32 + python/mrtrix3/dwinormalise/__init__.py | 38 - python/mrtrix3/dwinormalise/execute.py | 25 + python/mrtrix3/dwinormalise/group/__init__.py | 122 -- .../dwinormalise/group/check_output_paths.py | 21 + python/mrtrix3/dwinormalise/group/execute.py | 98 ++ python/mrtrix3/dwinormalise/group/usage.py | 27 + .../mrtrix3/dwinormalise/manual/__init__.py | 60 +- .../dwinormalise/manual/check_output_paths.py | 19 + python/mrtrix3/dwinormalise/manual/execute.py | 51 + python/mrtrix3/dwinormalise/manual/usage.py | 28 + .../mrtrix3/dwinormalise/mtnorm/__init__.py | 157 -- .../dwinormalise/mtnorm/check_output_paths.py | 19 + python/mrtrix3/dwinormalise/mtnorm/execute.py | 116 ++ python/mrtrix3/dwinormalise/mtnorm/usage.py | 64 + python/mrtrix3/dwinormalise/usage.py | 26 + python/mrtrix3/dwishellmath/__init__.py | 47 - python/mrtrix3/dwishellmath/execute.py | 45 + python/mrtrix3/dwishellmath/usage.py | 30 + python/mrtrix3/for_each/__init__.py | 277 ---- python/mrtrix3/for_each/entry.py | 46 + python/mrtrix3/for_each/execute.py | 182 +++ python/mrtrix3/for_each/shared.py | 30 + python/mrtrix3/for_each/usage.py | 69 + python/mrtrix3/gen_scheme/__init__.py | 145 -- python/mrtrix3/labelsgmfix/__init__.py | 165 -- python/mrtrix3/labelsgmfix/execute.py | 135 ++ python/mrtrix3/labelsgmfix/usage.py | 27 + python/mrtrix3/mask2glass/__init__.py | 77 - python/mrtrix3/mask2glass/execute.py | 61 + python/mrtrix3/mask2glass/usage.py | 29 + python/mrtrix3/mrtrix_cleanup/__init__.py | 132 -- python/mrtrix3/mrtrix_cleanup/execute.py | 117 ++ python/mrtrix3/mrtrix_cleanup/usage.py | 25 + python/mrtrix3/notfound/__init__.py | 36 - .../mrtrix3/population_template/__init__.py | 1454 ----------------- .../mrtrix3/population_template/contrasts.py | 106 ++ python/mrtrix3/population_template/execute.py | 925 +++++++++++ python/mrtrix3/population_template/input.py | 136 ++ python/mrtrix3/population_template/usage.py | 70 + python/mrtrix3/population_template/utils.py | 295 ++++ python/mrtrix3/responsemean/__init__.py | 76 - python/mrtrix3/responsemean/execute.py | 61 + python/mrtrix3/responsemean/usage.py | 26 + 178 files changed, 9782 insertions(+), 8777 deletions(-) create mode 100644 python/mrtrix3/5ttgen/execute.py create mode 100644 python/mrtrix3/5ttgen/freesurfer/check_output_paths.py create mode 100644 python/mrtrix3/5ttgen/freesurfer/execute.py create mode 100644 python/mrtrix3/5ttgen/freesurfer/get_inputs.py create mode 100644 python/mrtrix3/5ttgen/freesurfer/usage.py create mode 100644 python/mrtrix3/5ttgen/fsl/check_output_paths.py create mode 100644 python/mrtrix3/5ttgen/fsl/execute.py create mode 100644 python/mrtrix3/5ttgen/fsl/get_inputs.py create mode 100644 python/mrtrix3/5ttgen/fsl/usage.py create mode 100644 python/mrtrix3/5ttgen/gif/check_output_paths.py create mode 100644 python/mrtrix3/5ttgen/gif/execute.py create mode 100644 python/mrtrix3/5ttgen/gif/get_inputs.py create mode 100644 python/mrtrix3/5ttgen/gif/usage.py create mode 100644 python/mrtrix3/5ttgen/hsvs/check_output_paths.py create mode 100644 python/mrtrix3/5ttgen/hsvs/execute.py create mode 100644 python/mrtrix3/5ttgen/hsvs/get_inputs.py create mode 100644 python/mrtrix3/5ttgen/hsvs/usage.py create mode 100644 python/mrtrix3/5ttgen/usage.py delete mode 100644 python/mrtrix3/blend/__init__.py delete mode 100644 python/mrtrix3/convert_bruker/__init__.py create mode 100644 python/mrtrix3/dwi2mask/3dautomask/execute.py create mode 100644 python/mrtrix3/dwi2mask/3dautomask/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/3dautomask/usage.py create mode 100644 python/mrtrix3/dwi2mask/ants/execute.py create mode 100644 python/mrtrix3/dwi2mask/ants/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/ants/usage.py create mode 100644 python/mrtrix3/dwi2mask/b02template/execute.py create mode 100644 python/mrtrix3/dwi2mask/b02template/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/b02template/usage.py create mode 100644 python/mrtrix3/dwi2mask/consensus/execute.py create mode 100644 python/mrtrix3/dwi2mask/consensus/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/consensus/usage.py create mode 100644 python/mrtrix3/dwi2mask/execute.py create mode 100644 python/mrtrix3/dwi2mask/fslbet/execute.py create mode 100644 python/mrtrix3/dwi2mask/fslbet/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/fslbet/usage.py create mode 100644 python/mrtrix3/dwi2mask/hdbet/execute.py create mode 100644 python/mrtrix3/dwi2mask/hdbet/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/hdbet/usage.py create mode 100644 python/mrtrix3/dwi2mask/legacy/execute.py create mode 100644 python/mrtrix3/dwi2mask/legacy/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/legacy/usage.py create mode 100644 python/mrtrix3/dwi2mask/mean/execute.py create mode 100644 python/mrtrix3/dwi2mask/mean/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/mean/usage.py create mode 100644 python/mrtrix3/dwi2mask/mtnorm/execute.py create mode 100644 python/mrtrix3/dwi2mask/mtnorm/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/mtnorm/usage.py create mode 100644 python/mrtrix3/dwi2mask/synthstrip/execute.py create mode 100644 python/mrtrix3/dwi2mask/synthstrip/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/synthstrip/usage.py create mode 100644 python/mrtrix3/dwi2mask/trace/execute.py create mode 100644 python/mrtrix3/dwi2mask/trace/get_inputs.py create mode 100644 python/mrtrix3/dwi2mask/trace/usage.py create mode 100644 python/mrtrix3/dwi2mask/usage.py create mode 100644 python/mrtrix3/dwi2response/dhollander/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/dhollander/execute.py create mode 100644 python/mrtrix3/dwi2response/dhollander/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/dhollander/usage.py create mode 100644 python/mrtrix3/dwi2response/execute.py create mode 100644 python/mrtrix3/dwi2response/fa/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/fa/execute.py create mode 100644 python/mrtrix3/dwi2response/fa/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/fa/usage.py create mode 100644 python/mrtrix3/dwi2response/manual/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/manual/execute.py create mode 100644 python/mrtrix3/dwi2response/manual/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/manual/usage.py create mode 100644 python/mrtrix3/dwi2response/msmt_5tt/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/msmt_5tt/execute.py create mode 100644 python/mrtrix3/dwi2response/msmt_5tt/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/msmt_5tt/usage.py create mode 100644 python/mrtrix3/dwi2response/tax/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/tax/execute.py create mode 100644 python/mrtrix3/dwi2response/tax/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/tax/usage.py create mode 100644 python/mrtrix3/dwi2response/tournier/check_output_paths.py create mode 100644 python/mrtrix3/dwi2response/tournier/execute.py create mode 100644 python/mrtrix3/dwi2response/tournier/get_inputs.py create mode 100644 python/mrtrix3/dwi2response/tournier/usage.py create mode 100644 python/mrtrix3/dwi2response/usage.py create mode 100644 python/mrtrix3/dwibiascorrect/ants/check_output_paths.py create mode 100644 python/mrtrix3/dwibiascorrect/ants/execute.py create mode 100644 python/mrtrix3/dwibiascorrect/ants/get_inputs.py create mode 100644 python/mrtrix3/dwibiascorrect/ants/usage.py create mode 100644 python/mrtrix3/dwibiascorrect/execute.py create mode 100644 python/mrtrix3/dwibiascorrect/fsl/check_output_paths.py create mode 100644 python/mrtrix3/dwibiascorrect/fsl/execute.py create mode 100644 python/mrtrix3/dwibiascorrect/fsl/get_inputs.py create mode 100644 python/mrtrix3/dwibiascorrect/fsl/usage.py create mode 100644 python/mrtrix3/dwibiascorrect/mtnorm/check_output_paths.py create mode 100644 python/mrtrix3/dwibiascorrect/mtnorm/execute.py create mode 100644 python/mrtrix3/dwibiascorrect/mtnorm/get_inputs.py create mode 100644 python/mrtrix3/dwibiascorrect/mtnorm/usage.py create mode 100644 python/mrtrix3/dwibiascorrect/usage.py create mode 100644 python/mrtrix3/dwibiasnormmask/execute.py create mode 100644 python/mrtrix3/dwibiasnormmask/usage.py create mode 100644 python/mrtrix3/dwicat/execute.py create mode 100644 python/mrtrix3/dwicat/usage.py create mode 100644 python/mrtrix3/dwifslpreproc/execute.py create mode 100644 python/mrtrix3/dwifslpreproc/usage.py create mode 100644 python/mrtrix3/dwigradcheck/execute.py create mode 100644 python/mrtrix3/dwigradcheck/usage.py create mode 100644 python/mrtrix3/dwinormalise/execute.py create mode 100644 python/mrtrix3/dwinormalise/group/check_output_paths.py create mode 100644 python/mrtrix3/dwinormalise/group/execute.py create mode 100644 python/mrtrix3/dwinormalise/group/usage.py create mode 100644 python/mrtrix3/dwinormalise/manual/check_output_paths.py create mode 100644 python/mrtrix3/dwinormalise/manual/execute.py create mode 100644 python/mrtrix3/dwinormalise/manual/usage.py create mode 100644 python/mrtrix3/dwinormalise/mtnorm/check_output_paths.py create mode 100644 python/mrtrix3/dwinormalise/mtnorm/execute.py create mode 100644 python/mrtrix3/dwinormalise/mtnorm/usage.py create mode 100644 python/mrtrix3/dwinormalise/usage.py create mode 100644 python/mrtrix3/dwishellmath/execute.py create mode 100644 python/mrtrix3/dwishellmath/usage.py create mode 100644 python/mrtrix3/for_each/entry.py create mode 100644 python/mrtrix3/for_each/execute.py create mode 100644 python/mrtrix3/for_each/shared.py create mode 100644 python/mrtrix3/for_each/usage.py delete mode 100644 python/mrtrix3/gen_scheme/__init__.py create mode 100644 python/mrtrix3/labelsgmfix/execute.py create mode 100644 python/mrtrix3/labelsgmfix/usage.py create mode 100644 python/mrtrix3/mask2glass/execute.py create mode 100644 python/mrtrix3/mask2glass/usage.py create mode 100644 python/mrtrix3/mrtrix_cleanup/execute.py create mode 100644 python/mrtrix3/mrtrix_cleanup/usage.py delete mode 100644 python/mrtrix3/notfound/__init__.py create mode 100644 python/mrtrix3/population_template/contrasts.py create mode 100644 python/mrtrix3/population_template/execute.py create mode 100644 python/mrtrix3/population_template/input.py create mode 100644 python/mrtrix3/population_template/usage.py create mode 100644 python/mrtrix3/population_template/utils.py create mode 100644 python/mrtrix3/responsemean/execute.py create mode 100644 python/mrtrix3/responsemean/usage.py diff --git a/python/mrtrix3/5ttgen/__init__.py b/python/mrtrix3/5ttgen/__init__.py index 10938536b7..e69de29bb2 100644 --- a/python/mrtrix3/5ttgen/__init__.py +++ b/python/mrtrix3/5ttgen/__init__.py @@ -1,54 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel - - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Generate a 5TT image suitable for ACT') - cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938') - cmdline.add_description('5ttgen acts as a \'master\' script for generating a five-tissue-type (5TT) segmented tissue image suitable for use in Anatomically-Constrained Tractography (ACT). A range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'5ttgen\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') - cmdline.add_description('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fsl\' algorithm, type \'5ttgen fsl\'.') - - common_options = cmdline.add_argument_group('Options common to all 5ttgen algorithms') - common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') - common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - - alg.check_output_paths() - - app.make_scratch_dir() - alg.get_inputs() - app.goto_scratch_dir() - - alg.execute() - - stderr = run.command('5ttcheck result.mif').stderr - if '[WARNING]' in stderr: - app.warn('Generated image does not perfectly conform to 5TT format:') - for line in stderr.splitlines(): - app.warn(line) diff --git a/python/mrtrix3/5ttgen/execute.py b/python/mrtrix3/5ttgen/execute.py new file mode 100644 index 0000000000..6d3040a25e --- /dev/null +++ b/python/mrtrix3/5ttgen/execute.py @@ -0,0 +1,35 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + # Find out which algorithm the user has requested + alg = algorithm.get(app.ARGS.algorithm) + + alg.check_output_paths() + + app.make_scratch_dir() + alg.get_inputs() + app.goto_scratch_dir() + + alg.execute() + + stderr = run.command('5ttcheck result.mif').stderr + if '[WARNING]' in stderr: + app.warn('Generated image does not perfectly conform to 5TT format:') + for line in stderr.splitlines(): + app.warn(line) diff --git a/python/mrtrix3/5ttgen/freesurfer/__init__.py b/python/mrtrix3/5ttgen/freesurfer/__init__.py index cdfbed3717..e69de29bb2 100644 --- a/python/mrtrix3/5ttgen/freesurfer/__init__.py +++ b/python/mrtrix3/5ttgen/freesurfer/__init__.py @@ -1,82 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os.path, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('freesurfer', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Generate the 5TT image based on a FreeSurfer parcellation image') - parser.add_argument('input', help='The input FreeSurfer parcellation image (any image containing \'aseg\' in its name)') - parser.add_argument('output', help='The output 5TT image') - options = parser.add_argument_group('Options specific to the \'freesurfer\' algorithm') - options.add_argument('-lut', help='Manually provide path to the lookup table on which the input parcellation image is based (e.g. FreeSurferColorLUT.txt)') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) - if app.ARGS.lut: - run.function(shutil.copyfile, path.from_user(app.ARGS.lut, False), path.to_scratch('LUT.txt', False)) - - - -def execute(): #pylint: disable=unused-variable - lut_input_path = 'LUT.txt' - if not os.path.exists('LUT.txt'): - freesurfer_home = os.environ.get('FREESURFER_HOME', '') - if not freesurfer_home: - raise MRtrixError('Environment variable FREESURFER_HOME is not set; please run appropriate FreeSurfer configuration script, set this variable manually, or provide script with path to file FreeSurferColorLUT.txt using -lut option') - lut_input_path = os.path.join(freesurfer_home, 'FreeSurferColorLUT.txt') - if not os.path.isfile(lut_input_path): - raise MRtrixError('Could not find FreeSurfer lookup table file (expected location: ' + lut_input_path + '), and none provided using -lut') - - if app.ARGS.sgm_amyg_hipp: - lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt' - else: - lut_output_file_name = 'FreeSurfer2ACT.txt' - lut_output_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), lut_output_file_name) - if not os.path.isfile(lut_output_path): - raise MRtrixError('Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')') - - # Initial conversion from FreeSurfer parcellation to five principal tissue types - run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif') - - # Crop to reduce file size - if app.ARGS.nocrop: - image = 'indices.mif' - else: - image = 'indices_cropped.mif' - run.command('mrthreshold indices.mif - -abs 0.5 | mrgrid indices.mif crop ' + image + ' -mask -') - - # Convert into the 5TT format for ACT - run.command('mrcalc ' + image + ' 1 -eq cgm.mif') - run.command('mrcalc ' + image + ' 2 -eq sgm.mif') - run.command('mrcalc ' + image + ' 3 -eq wm.mif') - run.command('mrcalc ' + image + ' 4 -eq csf.mif') - run.command('mrcalc ' + image + ' 5 -eq path.mif') - - run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32') - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/freesurfer/check_output_paths.py b/python/mrtrix3/5ttgen/freesurfer/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/5ttgen/freesurfer/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/5ttgen/freesurfer/execute.py b/python/mrtrix3/5ttgen/freesurfer/execute.py new file mode 100644 index 0000000000..83dec77dbb --- /dev/null +++ b/python/mrtrix3/5ttgen/freesurfer/execute.py @@ -0,0 +1,57 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os.path +from mrtrix3 import MRtrixError +from mrtrix3 import app, path, run + +def execute(): #pylint: disable=unused-variable + lut_input_path = 'LUT.txt' + if not os.path.exists('LUT.txt'): + freesurfer_home = os.environ.get('FREESURFER_HOME', '') + if not freesurfer_home: + raise MRtrixError('Environment variable FREESURFER_HOME is not set; please run appropriate FreeSurfer configuration script, set this variable manually, or provide script with path to file FreeSurferColorLUT.txt using -lut option') + lut_input_path = os.path.join(freesurfer_home, 'FreeSurferColorLUT.txt') + if not os.path.isfile(lut_input_path): + raise MRtrixError('Could not find FreeSurfer lookup table file (expected location: ' + lut_input_path + '), and none provided using -lut') + + if app.ARGS.sgm_amyg_hipp: + lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt' + else: + lut_output_file_name = 'FreeSurfer2ACT.txt' + lut_output_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), lut_output_file_name) + if not os.path.isfile(lut_output_path): + raise MRtrixError('Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')') + + # Initial conversion from FreeSurfer parcellation to five principal tissue types + run.command('labelconvert input.mif ' + lut_input_path + ' ' + lut_output_path + ' indices.mif') + + # Crop to reduce file size + if app.ARGS.nocrop: + image = 'indices.mif' + else: + image = 'indices_cropped.mif' + run.command('mrthreshold indices.mif - -abs 0.5 | mrgrid indices.mif crop ' + image + ' -mask -') + + # Convert into the 5TT format for ACT + run.command('mrcalc ' + image + ' 1 -eq cgm.mif') + run.command('mrcalc ' + image + ' 2 -eq sgm.mif') + run.command('mrcalc ' + image + ' 3 -eq wm.mif') + run.command('mrcalc ' + image + ' 4 -eq csf.mif') + run.command('mrcalc ' + image + ' 5 -eq path.mif') + + run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - result.mif -datatype float32') + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/freesurfer/get_inputs.py b/python/mrtrix3/5ttgen/freesurfer/get_inputs.py new file mode 100644 index 0000000000..d25fbbf6db --- /dev/null +++ b/python/mrtrix3/5ttgen/freesurfer/get_inputs.py @@ -0,0 +1,22 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) + if app.ARGS.lut: + run.function(shutil.copyfile, path.from_user(app.ARGS.lut, False), path.to_scratch('LUT.txt', False)) diff --git a/python/mrtrix3/5ttgen/freesurfer/usage.py b/python/mrtrix3/5ttgen/freesurfer/usage.py new file mode 100644 index 0000000000..1b7de8e048 --- /dev/null +++ b/python/mrtrix3/5ttgen/freesurfer/usage.py @@ -0,0 +1,23 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('freesurfer', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Generate the 5TT image based on a FreeSurfer parcellation image') + parser.add_argument('input', help='The input FreeSurfer parcellation image (any image containing \'aseg\' in its name)') + parser.add_argument('output', help='The output 5TT image') + options = parser.add_argument_group('Options specific to the \'freesurfer\' algorithm') + options.add_argument('-lut', help='Manually provide path to the lookup table on which the input parcellation image is based (e.g. FreeSurferColorLUT.txt)') diff --git a/python/mrtrix3/5ttgen/fsl/__init__.py b/python/mrtrix3/5ttgen/fsl/__init__.py index e90e7399cb..e69de29bb2 100644 --- a/python/mrtrix3/5ttgen/fsl/__init__.py +++ b/python/mrtrix3/5ttgen/fsl/__init__.py @@ -1,234 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import math, os, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, fsl, image, path, run, utils - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('fsl', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use FSL commands to generate the 5TT image based on a T1-weighted image') - parser.add_citation('Smith, S. M. Fast robust automated brain extraction. Human Brain Mapping, 2002, 17, 143-155', is_external=True) - parser.add_citation('Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', is_external=True) - parser.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) - parser.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - parser.add_argument('input', help='The input T1-weighted image') - parser.add_argument('output', help='The output 5TT image') - options = parser.add_argument_group('Options specific to the \'fsl\' algorithm') - options.add_argument('-t2', metavar='', help='Provide a T2-weighted image in addition to the default T1-weighted image; this will be used as a second input to FSL FAST') - options.add_argument('-mask', help='Manually provide a brain mask, rather than deriving one in the script') - options.add_argument('-premasked', action='store_true', help='Indicate that brain masking has already been applied to the input image') - parser.flag_mutually_exclusive_options( [ 'mask', 'premasked' ] ) - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit -strides -1,+2,+3') - if app.ARGS.t2: - if not image.match(path.from_user(app.ARGS.input, False), path.from_user(app.ARGS.t2, False)): - raise MRtrixError('Provided T2 image does not match input T1 image') - run.command('mrconvert ' + path.from_user(app.ARGS.t2) + ' ' + path.to_scratch('T2.nii') + ' -strides -1,+2,+3') - - - -def execute(): #pylint: disable=unused-variable - if utils.is_windows(): - raise MRtrixError('\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows') - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - bet_cmd = fsl.exe_name('bet') - fast_cmd = fsl.exe_name('fast') - first_cmd = fsl.exe_name('run_first_all') - ssroi_cmd = fsl.exe_name('standard_space_roi') - - first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') - if not os.path.isdir(first_atlas_path): - raise MRtrixError('Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager') - - fsl_suffix = fsl.suffix() - - if not app.ARGS.mask and not app.ARGS.premasked and not shutil.which('dc'): - app.warn('Unix command "dc" not found; FSL script "standard_space_roi" may fail') - - sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] - if app.ARGS.sgm_amyg_hipp: - sgm_structures.extend([ 'L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp' ]) - - t1_spacing = image.Header('input.mif').spacing() - upsample_for_first = False - # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data - if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: - app.warn('Voxel size larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' - 'note that ACT does not require re-gridding of T1 image to DWI space, and indeed ' - 'retaining the original higher resolution of the T1 image is preferable') - upsample_for_first = True - - run.command('mrconvert input.mif T1.nii -strides -1,+2,+3') - - fast_t1_input = 'T1.nii' - fast_t2_input = '' - - # Decide whether or not we're going to do any brain masking - if app.ARGS.mask: - - fast_t1_input = 'T1_masked' + fsl_suffix - - # Check to see if the mask matches the T1 image - if image.match('T1.nii', 'mask.mif'): - run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) - mask_path = 'mask.mif' - else: - app.warn('Mask image does not match input image - re-gridding') - run.command('mrtransform mask.mif mask_regrid.mif -template T1.nii -datatype bit') - run.command('mrcalc T1.nii mask_regrid.mif -mult ' + fast_t1_input) - mask_path = 'mask_regrid.mif' - - if os.path.exists('T2.nii'): - fast_t2_input = 'T2_masked' + fsl_suffix - run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) - - elif app.ARGS.premasked: - - fast_t1_input = 'T1.nii' - if os.path.exists('T2.nii'): - fast_t2_input = 'T2.nii' - - else: - - # Use FSL command standard_space_roi to do an initial masking of the image before BET - # Also reduce the FoV of the image - # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation - mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') - mni_mask_dilation = 0 - if os.path.exists (mni_mask_path): - mni_mask_dilation = 4 - else: - mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') - if os.path.exists (mni_mask_path): - mni_mask_dilation = 2 - try: - if mni_mask_dilation: - run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) - if app.ARGS.nocrop: - ssroi_roi_option = ' -roiNONE' - else: - ssroi_roi_option = ' -roiFOV' - run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option) - else: - run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b') - except run.MRtrixCmdError: - pass - try: - pre_bet_image = fsl.find_image('T1_preBET') - except MRtrixError: - app.warn('FSL script \'standard_space_roi\' did not complete successfully' + \ - ('' if shutil.which('dc') else ' (possibly due to program \'dc\' not being installed') + '; ' + \ - 'attempting to continue by providing un-cropped image to BET') - pre_bet_image = 'T1.nii' - - # BET - run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix + ' -f 0.15 -R') - fast_t1_input = fsl.find_image('T1_BET' + fsl_suffix) - - if os.path.exists('T2.nii'): - if app.ARGS.nocrop: - fast_t2_input = 'T2.nii' - else: - # Just a reduction of FoV, no sub-voxel interpolation going on - run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') - fast_t2_input = 'T2_cropped.nii' - - # Finish branching based on brain masking - - # FAST - if fast_t2_input: - run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) - else: - run.command(fast_cmd + ' ' + fast_t1_input) - - # FIRST - first_input = 'T1.nii' - if upsample_for_first: - app.warn('Generating 1mm isotropic T1 image for FIRST in hope of preventing failure, since input image is of lower resolution') - run.command('mrgrid T1.nii regrid T1_1mm.nii -voxel 1.0 -interp sinc') - first_input = 'T1_1mm.nii' - first_brain_extracted_option = '' - if app.ARGS.premasked: - first_brain_extracted_option = ' -b' - first_debug_option = '' - if not app.DO_CLEANUP: - first_debug_option = ' -d' - first_verbosity_option = '' - if app.VERBOSITY == 3: - first_verbosity_option = ' -v' - run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) + ' -i ' + first_input + ' -o first' + first_brain_extracted_option + first_debug_option + first_verbosity_option) - fsl.check_first('first', sgm_structures) - - # Convert FIRST meshes to partial volume images - pve_image_list = [ ] - progress = app.ProgressBar('Generating partial volume images for SGM structures', len(sgm_structures)) - for struct in sgm_structures: - pve_image_path = 'mesh2voxel_' + struct + '.mif' - vtk_in_path = 'first-' + struct + '_first.vtk' - vtk_temp_path = struct + '.vtk' - run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real ' + first_input) - run.command('mesh2voxel ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) - pve_image_list.append(pve_image_path) - progress.increment() - progress.done() - run.command(['mrmath', pve_image_list, 'sum', '-', '|', \ - 'mrcalc', '-', '1.0', '-min', 'all_sgms.mif']) - - # Combine the tissue images into the 5TT format within the script itself - fast_output_prefix = fast_t1_input.split('.')[0] - fast_csf_output = fsl.find_image(fast_output_prefix + '_pve_0') - fast_gm_output = fsl.find_image(fast_output_prefix + '_pve_1') - fast_wm_output = fsl.find_image(fast_output_prefix + '_pve_2') - # Step 1: Run LCC on the WM image - run.command('mrthreshold ' + fast_wm_output + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit') - # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to: - # - Preserve CSF as-is - # - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp - # - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0 - run.command('mrcalc ' + fast_csf_output + ' remove_unconnected_wm_mask.mif -mult csf.mif') - run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') - run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output + ' ' + fast_wm_output + ' -add -div multiplier.mif') - run.command('mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif') - run.command('mrcalc ' + fast_gm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif') - run.command('mrcalc ' + fast_wm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif') - run.command('mrcalc 0 wm.mif -min path.mif') - run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -strides +2,+3,+4,+1') - - # Crop to reduce file size (improves caching of image data during tracking) - if app.ARGS.nocrop: - run.function(os.rename, 'combined_precrop.mif', 'result.mif') - else: - run.command('mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid combined_precrop.mif crop result.mif -mask -') - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/fsl/check_output_paths.py b/python/mrtrix3/5ttgen/fsl/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/5ttgen/fsl/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/5ttgen/fsl/execute.py b/python/mrtrix3/5ttgen/fsl/execute.py new file mode 100644 index 0000000000..5df6bce358 --- /dev/null +++ b/python/mrtrix3/5ttgen/fsl/execute.py @@ -0,0 +1,197 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, fsl, image, path, run, utils + +def execute(): #pylint: disable=unused-variable + if utils.is_windows(): + raise MRtrixError('\'fsl\' algorithm of 5ttgen script cannot be run on Windows: FSL not available on Windows') + + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + bet_cmd = fsl.exe_name('bet') + fast_cmd = fsl.exe_name('fast') + first_cmd = fsl.exe_name('run_first_all') + ssroi_cmd = fsl.exe_name('standard_space_roi') + + first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') + if not os.path.isdir(first_atlas_path): + raise MRtrixError('Atlases required for FSL\'s FIRST program not installed; please install fsl-first-data using your relevant package manager') + + fsl_suffix = fsl.suffix() + + if not app.ARGS.mask and not app.ARGS.premasked and not shutil.which('dc'): + app.warn('Unix command "dc" not found; FSL script "standard_space_roi" may fail') + + sgm_structures = [ 'L_Accu', 'R_Accu', 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', 'L_Thal', 'R_Thal' ] + if app.ARGS.sgm_amyg_hipp: + sgm_structures.extend([ 'L_Amyg', 'R_Amyg', 'L_Hipp', 'R_Hipp' ]) + + t1_spacing = image.Header('input.mif').spacing() + upsample_for_first = False + # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data + if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: + app.warn('Voxel size larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' + 'note that ACT does not require re-gridding of T1 image to DWI space, and indeed ' + 'retaining the original higher resolution of the T1 image is preferable') + upsample_for_first = True + + run.command('mrconvert input.mif T1.nii -strides -1,+2,+3') + + fast_t1_input = 'T1.nii' + fast_t2_input = '' + + # Decide whether or not we're going to do any brain masking + if app.ARGS.mask: + + fast_t1_input = 'T1_masked' + fsl_suffix + + # Check to see if the mask matches the T1 image + if image.match('T1.nii', 'mask.mif'): + run.command('mrcalc T1.nii mask.mif -mult ' + fast_t1_input) + mask_path = 'mask.mif' + else: + app.warn('Mask image does not match input image - re-gridding') + run.command('mrtransform mask.mif mask_regrid.mif -template T1.nii -datatype bit') + run.command('mrcalc T1.nii mask_regrid.mif -mult ' + fast_t1_input) + mask_path = 'mask_regrid.mif' + + if os.path.exists('T2.nii'): + fast_t2_input = 'T2_masked' + fsl_suffix + run.command('mrcalc T2.nii ' + mask_path + ' -mult ' + fast_t2_input) + + elif app.ARGS.premasked: + + fast_t1_input = 'T1.nii' + if os.path.exists('T2.nii'): + fast_t2_input = 'T2.nii' + + else: + + # Use FSL command standard_space_roi to do an initial masking of the image before BET + # Also reduce the FoV of the image + # Using MNI 1mm dilated brain mask rather than the -b option in standard_space_roi (which uses the 2mm mask); the latter looks 'buggy' to me... Unfortunately even with the 1mm 'dilated' mask, it can still cut into some brain areas, hence the explicit dilation + mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_1mm_brain_mask_dil.nii.gz') + mni_mask_dilation = 0 + if os.path.exists (mni_mask_path): + mni_mask_dilation = 4 + else: + mni_mask_path = os.path.join(fsl_path, 'data', 'standard', 'MNI152_T1_2mm_brain_mask_dil.nii.gz') + if os.path.exists (mni_mask_path): + mni_mask_dilation = 2 + try: + if mni_mask_dilation: + run.command('maskfilter ' + mni_mask_path + ' dilate mni_mask.nii -npass ' + str(mni_mask_dilation)) + if app.ARGS.nocrop: + ssroi_roi_option = ' -roiNONE' + else: + ssroi_roi_option = ' -roiFOV' + run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -maskMASK mni_mask.nii' + ssroi_roi_option) + else: + run.command(ssroi_cmd + ' T1.nii T1_preBET' + fsl_suffix + ' -b') + except run.MRtrixCmdError: + pass + try: + pre_bet_image = fsl.find_image('T1_preBET') + except MRtrixError: + app.warn('FSL script \'standard_space_roi\' did not complete successfully' + \ + ('' if shutil.which('dc') else ' (possibly due to program \'dc\' not being installed') + '; ' + \ + 'attempting to continue by providing un-cropped image to BET') + pre_bet_image = 'T1.nii' + + # BET + run.command(bet_cmd + ' ' + pre_bet_image + ' T1_BET' + fsl_suffix + ' -f 0.15 -R') + fast_t1_input = fsl.find_image('T1_BET' + fsl_suffix) + + if os.path.exists('T2.nii'): + if app.ARGS.nocrop: + fast_t2_input = 'T2.nii' + else: + # Just a reduction of FoV, no sub-voxel interpolation going on + run.command('mrtransform T2.nii T2_cropped.nii -template ' + fast_t1_input + ' -interp nearest') + fast_t2_input = 'T2_cropped.nii' + + # Finish branching based on brain masking + + # FAST + if fast_t2_input: + run.command(fast_cmd + ' -S 2 ' + fast_t2_input + ' ' + fast_t1_input) + else: + run.command(fast_cmd + ' ' + fast_t1_input) + + # FIRST + first_input = 'T1.nii' + if upsample_for_first: + app.warn('Generating 1mm isotropic T1 image for FIRST in hope of preventing failure, since input image is of lower resolution') + run.command('mrgrid T1.nii regrid T1_1mm.nii -voxel 1.0 -interp sinc') + first_input = 'T1_1mm.nii' + first_brain_extracted_option = '' + if app.ARGS.premasked: + first_brain_extracted_option = ' -b' + first_debug_option = '' + if not app.DO_CLEANUP: + first_debug_option = ' -d' + first_verbosity_option = '' + if app.VERBOSITY == 3: + first_verbosity_option = ' -v' + run.command(first_cmd + ' -m none -s ' + ','.join(sgm_structures) + ' -i ' + first_input + ' -o first' + first_brain_extracted_option + first_debug_option + first_verbosity_option) + fsl.check_first('first', sgm_structures) + + # Convert FIRST meshes to partial volume images + pve_image_list = [ ] + progress = app.ProgressBar('Generating partial volume images for SGM structures', len(sgm_structures)) + for struct in sgm_structures: + pve_image_path = 'mesh2voxel_' + struct + '.mif' + vtk_in_path = 'first-' + struct + '_first.vtk' + vtk_temp_path = struct + '.vtk' + run.command('meshconvert ' + vtk_in_path + ' ' + vtk_temp_path + ' -transform first2real ' + first_input) + run.command('mesh2voxel ' + vtk_temp_path + ' ' + fast_t1_input + ' ' + pve_image_path) + pve_image_list.append(pve_image_path) + progress.increment() + progress.done() + run.command(['mrmath', pve_image_list, 'sum', '-', '|', \ + 'mrcalc', '-', '1.0', '-min', 'all_sgms.mif']) + + # Combine the tissue images into the 5TT format within the script itself + fast_output_prefix = fast_t1_input.split('.')[0] + fast_csf_output = fsl.find_image(fast_output_prefix + '_pve_0') + fast_gm_output = fsl.find_image(fast_output_prefix + '_pve_1') + fast_wm_output = fsl.find_image(fast_output_prefix + '_pve_2') + # Step 1: Run LCC on the WM image + run.command('mrthreshold ' + fast_wm_output + ' - -abs 0.001 | maskfilter - connect - -connectivity | mrcalc 1 - 1 -gt -sub remove_unconnected_wm_mask.mif -datatype bit') + # Step 2: Generate the images in the same fashion as the old 5ttgen binary used to: + # - Preserve CSF as-is + # - Preserve SGM, unless it results in a sum of volume fractions greater than 1, in which case clamp + # - Multiply the FAST volume fractions of GM and CSF, so that the sum of CSF, SGM, CGM and WM is 1.0 + run.command('mrcalc ' + fast_csf_output + ' remove_unconnected_wm_mask.mif -mult csf.mif') + run.command('mrcalc 1.0 csf.mif -sub all_sgms.mif -min sgm.mif') + run.command('mrcalc 1.0 csf.mif sgm.mif -add -sub ' + fast_gm_output + ' ' + fast_wm_output + ' -add -div multiplier.mif') + run.command('mrcalc multiplier.mif -finite multiplier.mif 0.0 -if multiplier_noNAN.mif') + run.command('mrcalc ' + fast_gm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult cgm.mif') + run.command('mrcalc ' + fast_wm_output + ' multiplier_noNAN.mif -mult remove_unconnected_wm_mask.mif -mult wm.mif') + run.command('mrcalc 0 wm.mif -min path.mif') + run.command('mrcat cgm.mif sgm.mif wm.mif csf.mif path.mif - -axis 3 | mrconvert - combined_precrop.mif -strides +2,+3,+4,+1') + + # Crop to reduce file size (improves caching of image data during tracking) + if app.ARGS.nocrop: + run.function(os.rename, 'combined_precrop.mif', 'result.mif') + else: + run.command('mrmath combined_precrop.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid combined_precrop.mif crop result.mif -mask -') + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/fsl/get_inputs.py b/python/mrtrix3/5ttgen/fsl/get_inputs.py new file mode 100644 index 0000000000..ef572fc48a --- /dev/null +++ b/python/mrtrix3/5ttgen/fsl/get_inputs.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def get_inputs(): #pylint: disable=unused-variable + image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit -strides -1,+2,+3') + if app.ARGS.t2: + if not image.match(path.from_user(app.ARGS.input, False), path.from_user(app.ARGS.t2, False)): + raise MRtrixError('Provided T2 image does not match input T1 image') + run.command('mrconvert ' + path.from_user(app.ARGS.t2) + ' ' + path.to_scratch('T2.nii') + ' -strides -1,+2,+3') diff --git a/python/mrtrix3/5ttgen/fsl/usage.py b/python/mrtrix3/5ttgen/fsl/usage.py new file mode 100644 index 0000000000..012baa7f9d --- /dev/null +++ b/python/mrtrix3/5ttgen/fsl/usage.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('fsl', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use FSL commands to generate the 5TT image based on a T1-weighted image') + parser.add_citation('Smith, S. M. Fast robust automated brain extraction. Human Brain Mapping, 2002, 17, 143-155', is_external=True) + parser.add_citation('Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', is_external=True) + parser.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) + parser.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + parser.add_argument('input', help='The input T1-weighted image') + parser.add_argument('output', help='The output 5TT image') + options = parser.add_argument_group('Options specific to the \'fsl\' algorithm') + options.add_argument('-t2', metavar='', help='Provide a T2-weighted image in addition to the default T1-weighted image; this will be used as a second input to FSL FAST') + options.add_argument('-mask', help='Manually provide a brain mask, rather than deriving one in the script') + options.add_argument('-premasked', action='store_true', help='Indicate that brain masking has already been applied to the input image') + parser.flag_mutually_exclusive_options( [ 'mask', 'premasked' ] ) diff --git a/python/mrtrix3/5ttgen/gif/__init__.py b/python/mrtrix3/5ttgen/gif/__init__.py index 36b4faf699..e69de29bb2 100644 --- a/python/mrtrix3/5ttgen/gif/__init__.py +++ b/python/mrtrix3/5ttgen/gif/__init__.py @@ -1,68 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('gif', parents=[base_parser]) - parser.set_author('Matteo Mancini (m.mancini@ucl.ac.uk)') - parser.set_synopsis('Generate the 5TT image based on a Geodesic Information Flow (GIF) segmentation image') - parser.add_argument('input', help='The input Geodesic Information Flow (GIF) segmentation image') - parser.add_argument('output', help='The output 5TT image') - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - -def check_gif_input(image_path): - dim = image.Header(image_path).size() - if len(dim) < 4: - raise MRtrixError('Image \'' + image_path + '\' does not look like GIF segmentation (less than 4 spatial dimensions)') - if min(dim[:4]) == 1: - raise MRtrixError('Image \'' + image_path + '\' does not look like GIF segmentation (axis with size 1)') - - -def get_inputs(): #pylint: disable=unused-variable - check_gif_input(path.from_user(app.ARGS.input, False)) - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) - - -def execute(): #pylint: disable=unused-variable - # Generate the images related to each tissue - run.command('mrconvert input.mif -coord 3 1 CSF.mif') - run.command('mrconvert input.mif -coord 3 2 cGM.mif') - run.command('mrconvert input.mif -coord 3 3 cWM.mif') - run.command('mrconvert input.mif -coord 3 4 sGM.mif') - - # Combine WM and subcortical WM into a unique WM image - run.command('mrconvert input.mif - -coord 3 3,5 | mrmath - sum WM.mif -axis 3') - - # Create an empty lesion image - run.command('mrcalc WM.mif 0 -mul lsn.mif') - - # Convert into the 5tt format - run.command('mrcat cGM.mif sGM.mif WM.mif CSF.mif lsn.mif 5tt.mif -axis 3') - - if app.ARGS.nocrop: - run.function(os.rename, '5tt.mif', 'result.mif') - else: - run.command('mrmath 5tt.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid 5tt.mif crop result.mif -mask -') - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/gif/check_output_paths.py b/python/mrtrix3/5ttgen/gif/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/5ttgen/gif/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/5ttgen/gif/execute.py b/python/mrtrix3/5ttgen/gif/execute.py new file mode 100644 index 0000000000..a221335697 --- /dev/null +++ b/python/mrtrix3/5ttgen/gif/execute.py @@ -0,0 +1,40 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import app, path, run + +def execute(): #pylint: disable=unused-variable + # Generate the images related to each tissue + run.command('mrconvert input.mif -coord 3 1 CSF.mif') + run.command('mrconvert input.mif -coord 3 2 cGM.mif') + run.command('mrconvert input.mif -coord 3 3 cWM.mif') + run.command('mrconvert input.mif -coord 3 4 sGM.mif') + + # Combine WM and subcortical WM into a unique WM image + run.command('mrconvert input.mif - -coord 3 3,5 | mrmath - sum WM.mif -axis 3') + + # Create an empty lesion image + run.command('mrcalc WM.mif 0 -mul lsn.mif') + + # Convert into the 5tt format + run.command('mrcat cGM.mif sGM.mif WM.mif CSF.mif lsn.mif 5tt.mif -axis 3') + + if app.ARGS.nocrop: + run.function(os.rename, '5tt.mif', 'result.mif') + else: + run.command('mrmath 5tt.mif sum - -axis 3 | mrthreshold - - -abs 0.5 | mrgrid 5tt.mif crop result.mif -mask -') + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/gif/get_inputs.py b/python/mrtrix3/5ttgen/gif/get_inputs.py new file mode 100644 index 0000000000..733b185a47 --- /dev/null +++ b/python/mrtrix3/5ttgen/gif/get_inputs.py @@ -0,0 +1,29 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def check_gif_input(image_path): + dim = image.Header(image_path).size() + if len(dim) < 4: + raise MRtrixError('Image \'' + image_path + '\' does not look like GIF segmentation (less than 4 spatial dimensions)') + if min(dim[:4]) == 1: + raise MRtrixError('Image \'' + image_path + '\' does not look like GIF segmentation (axis with size 1)') + + +def get_inputs(): #pylint: disable=unused-variable + check_gif_input(path.from_user(app.ARGS.input, False)) + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif')) diff --git a/python/mrtrix3/5ttgen/gif/usage.py b/python/mrtrix3/5ttgen/gif/usage.py new file mode 100644 index 0000000000..586d321c58 --- /dev/null +++ b/python/mrtrix3/5ttgen/gif/usage.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('gif', parents=[base_parser]) + parser.set_author('Matteo Mancini (m.mancini@ucl.ac.uk)') + parser.set_synopsis('Generate the 5TT image based on a Geodesic Information Flow (GIF) segmentation image') + parser.add_argument('input', help='The input Geodesic Information Flow (GIF) segmentation image') + parser.add_argument('output', help='The output 5TT image') diff --git a/python/mrtrix3/5ttgen/hsvs/__init__.py b/python/mrtrix3/5ttgen/hsvs/__init__.py index 1605d63b48..c892ffc712 100644 --- a/python/mrtrix3/5ttgen/hsvs/__init__.py +++ b/python/mrtrix3/5ttgen/hsvs/__init__.py @@ -13,13 +13,8 @@ # # For more details, see http://www.mrtrix.org/. - - -import glob, os, re, shutil +import os from mrtrix3 import MRtrixError -from mrtrix3 import app, fsl, image, path, run - - HIPPOCAMPI_CHOICES = [ 'subfields', 'first', 'aseg' ] THALAMI_CHOICES = [ 'nuclei', 'first', 'aseg' ] @@ -30,28 +25,6 @@ ATTEMPT_PC = False -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('hsvs', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Generate a 5TT image based on Hybrid Surface and Volume Segmentation (HSVS), using FreeSurfer and FSL tools') - parser.add_argument('input', help='The input FreeSurfer subject directory') - parser.add_argument('output', help='The output 5TT image') - parser.add_argument('-template', help='Provide an image that will form the template for the generated 5TT image') - parser.add_argument('-hippocampi', choices=HIPPOCAMPI_CHOICES, help='Select method to be used for hippocampi (& amygdalae) segmentation; options are: ' + ','.join(HIPPOCAMPI_CHOICES)) - parser.add_argument('-thalami', choices=THALAMI_CHOICES, help='Select method to be used for thalamic segmentation; options are: ' + ','.join(THALAMI_CHOICES)) - parser.add_argument('-white_stem', action='store_true', help='Classify the brainstem as white matter') - parser.add_citation('Smith, R.; Skoch, A.; Bajada, C.; Caspers, S.; Connelly, A. Hybrid Surface-Volume Segmentation for improved Anatomically-Constrained Tractography. In Proc OHBM 2020') - parser.add_citation('Fischl, B. Freesurfer. NeuroImage, 2012, 62(2), 774-781', is_external=True) - parser.add_citation('Iglesias, J.E.; Augustinack, J.C.; Nguyen, K.; Player, C.M.; Player, A.; Wright, M.; Roy, N.; Frosch, M.P.; Mc Kee, A.C.; Wald, L.L.; Fischl, B.; and Van Leemput, K. A computational atlas of the hippocampal formation using ex vivo, ultra-high resolution MRI: Application to adaptive segmentation of in vivo MRI. NeuroImage, 2015, 115, 117-137', condition='If FreeSurfer hippocampal subfields module is utilised', is_external=True) - parser.add_citation('Saygin, Z.M. & Kliemann, D.; Iglesias, J.E.; van der Kouwe, A.J.W.; Boyd, E.; Reuter, M.; Stevens, A.; Van Leemput, K.; Mc Kee, A.; Frosch, M.P.; Fischl, B.; Augustinack, J.C. High-resolution magnetic resonance imaging reveals nuclei of the human amygdala: manual segmentation to automatic atlas. NeuroImage, 2017, 155, 370-382', condition='If FreeSurfer hippocampal subfields module is utilised and includes amygdalae segmentation', is_external=True) - parser.add_citation('Iglesias, J.E.; Insausti, R.; Lerma-Usabiaga, G.; Bocchetta, M.; Van Leemput, K.; Greve, D.N.; van der Kouwe, A.; ADNI; Fischl, B.; Caballero-Gaudes, C.; Paz-Alonso, P.M. A probabilistic atlas of the human thalamic nuclei combining ex vivo MRI and histology. NeuroImage, 2018, 183, 314-326', condition='If -thalami nuclei is used', is_external=True) - parser.add_citation('Ardekani, B.; Bachman, A.H. Model-based automatic detection of the anterior and posterior commissures on MRI scans. NeuroImage, 2009, 46(3), 677-682', condition='If ACPCDetect is installed', is_external=True) - - - - - - ASEG_STRUCTURES = [ ( 5, 4, 'Left-Inf-Lat-Vent'), (14, 4, '3rd-Ventricle'), @@ -123,771 +96,3 @@ def usage(base_parser, subparsers): #pylint: disable=unused-variable 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } - - -def check_file(filepath): - if not os.path.isfile(filepath): - raise MRtrixError('Required input file missing (expected location: ' + filepath + ')') - -def check_dir(dirpath): - if not os.path.isdir(dirpath): - raise MRtrixError('Unable to find sub-directory \'' + dirpath + '\' within input directory') - - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - # Most freeSurfer files will be accessed in-place; no need to pre-convert them into the temporary directory - # However convert aparc image so that it does not have to be repeatedly uncompressed - run.command('mrconvert ' + path.from_user(os.path.join(app.ARGS.input, 'mri', 'aparc+aseg.mgz'), True) + ' ' + path.to_scratch('aparc.mif', True)) - if app.ARGS.template: - run.command('mrconvert ' + path.from_user(app.ARGS.template, True) + ' ' + path.to_scratch('template.mif', True) + ' -axes 0,1,2') - - - -def execute(): #pylint: disable=unused-variable - - subject_dir = os.path.abspath(path.from_user(app.ARGS.input, False)) - if not os.path.isdir(subject_dir): - raise MRtrixError('Input to hsvs algorithm must be a directory') - surf_dir = os.path.join(subject_dir, 'surf') - mri_dir = os.path.join(subject_dir, 'mri') - check_dir(surf_dir) - check_dir(mri_dir) - #aparc_image = os.path.join(mri_dir, 'aparc+aseg.mgz') - aparc_image = 'aparc.mif' - mask_image = os.path.join(mri_dir, 'brainmask.mgz') - reg_file = os.path.join(mri_dir, 'transforms', 'talairach.xfm') - check_file(aparc_image) - check_file(mask_image) - check_file(reg_file) - template_image = 'template.mif' if app.ARGS.template else aparc_image - - have_first = False - have_fast = False - fsl_path = os.environ.get('FSLDIR', '') - if fsl_path: - # Use brain-extracted, bias-corrected image for FSL tools - norm_image = os.path.join(mri_dir, 'norm.mgz') - check_file(norm_image) - run.command('mrconvert ' + norm_image + ' T1.nii -stride -1,+2,+3') - # Verify FAST availability - try: - fast_cmd = fsl.exe_name('fast') - except MRtrixError: - fast_cmd = None - if fast_cmd: - have_fast = True - if fast_cmd == 'fast': - fast_suffix = fsl.suffix() - else: - fast_suffix = '.nii.gz' - else: - app.warn('Could not find FSL program fast; script will not use fast for cerebellar tissue segmentation') - # Verify FIRST availability - try: - first_cmd = fsl.exe_name('run_first_all') - except MRtrixError: - first_cmd = None - first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') - have_first = first_cmd and os.path.isdir(first_atlas_path) - else: - app.warn('Environment variable FSLDIR is not set; script will run without FSL components') - - acpc_string = 'anterior ' + ('& posterior commissures' if ATTEMPT_PC else 'commissure') - have_acpcdetect = bool(shutil.which('acpcdetect')) and 'ARTHOME' in os.environ - if have_acpcdetect: - if have_fast: - app.console('ACPCdetect and FSL FAST will be used for explicit segmentation of ' + acpc_string) - else: - app.warn('ACPCdetect is installed, but FSL FAST not found; cannot segment ' + acpc_string) - have_acpcdetect = False - else: - app.warn('ACPCdetect not installed; cannot segment ' + acpc_string) - - # Need to perform a better search for hippocampal subfield output: names & version numbers may change - have_hipp_subfields = False - hipp_subfield_has_amyg = False - # Could result in multiple matches - hipp_subfield_regex = re.compile(r'^[lr]h\.hippo[a-zA-Z]*Labels-[a-zA-Z0-9]*\.v[0-9]+\.?[a-zA-Z0-9]*\.mg[hz]$') - hipp_subfield_all_images = sorted(list(filter(hipp_subfield_regex.match, os.listdir(mri_dir)))) - # Remove any images that provide segmentations in FreeSurfer voxel space; we want the high-resolution versions - hipp_subfield_all_images = [ item for item in hipp_subfield_all_images if 'FSvoxelSpace' not in item ] - # Arrange the images into lr pairs - hipp_subfield_paired_images = [ ] - for lh_filename in [ item for item in hipp_subfield_all_images if item[0] == 'l' ]: - if 'r' + lh_filename[1:] in hipp_subfield_all_images: - hipp_subfield_paired_images.append(lh_filename[1:]) - # Choose which of these image pairs we are going to use - for code in [ '.CA.', '.FS60.' ]: - if any(code in filename for filename in hipp_subfield_paired_images): - hipp_subfield_image_suffix = [ filename for filename in hipp_subfield_paired_images if code in filename ][0] - have_hipp_subfields = True - break - # Choose the pair with the shortest filename string if we have no other criteria - if not have_hipp_subfields and hipp_subfield_paired_images: - hipp_subfield_paired_images = sorted(hipp_subfield_paired_images, key=len) - if hipp_subfield_paired_images: - hipp_subfield_image_suffix = hipp_subfield_paired_images[0] - have_hipp_subfields = True - if have_hipp_subfields: - hipp_subfield_has_amyg = 'Amyg' in hipp_subfield_image_suffix - - # Perform a similar search for thalamic nuclei submodule output - thal_nuclei_image = None - thal_nuclei_regex = re.compile(r'^ThalamicNuclei\.v[0-9]+\.?[a-zA-Z0-9]*.mg[hz]$') - thal_nuclei_all_images = sorted(list(filter(thal_nuclei_regex.match, os.listdir(mri_dir)))) - thal_nuclei_all_images = [ item for item in thal_nuclei_all_images if 'FSvoxelSpace' not in item ] - if thal_nuclei_all_images: - if len(thal_nuclei_all_images) == 1: - thal_nuclei_image = thal_nuclei_all_images[0] - else: - # How to choose which version to use? - # Start with software version - thal_nuclei_versions = [ int(item.split('.')[1].lstrip('v')) for item in thal_nuclei_all_images ] - thal_nuclei_all_images = [ filepath for filepath, version_number in zip(thal_nuclei_all_images, thal_nuclei_versions) if version_number == max(thal_nuclei_versions) ] - if len(thal_nuclei_all_images) == 1: - thal_nuclei_image = thal_nuclei_all_images[0] - else: - # Revert to filename length - thal_nuclei_all_images = sorted(thal_nuclei_all_images, key=len) - thal_nuclei_image = thal_nuclei_all_images[0] - - # If particular hippocampal segmentation method is requested, make sure we can perform such; - # if not, decide how to segment hippocampus based on what's available - hippocampi_method = app.ARGS.hippocampi - if hippocampi_method: - if hippocampi_method == 'subfields': - if not have_hipp_subfields: - raise MRtrixError('Could not isolate hippocampal subfields module output (candidate images: ' + str(hipp_subfield_all_images) + ')') - elif hippocampi_method == 'first': - if not have_first: - raise MRtrixError('Cannot use "first" method for hippocampi segmentation; check FSL installation') - else: - if have_hipp_subfields: - hippocampi_method = 'subfields' - app.console('Hippocampal subfields module output detected; will utilise for hippocampi ' - + ('and amygdalae ' if hipp_subfield_has_amyg else '') - + 'segmentation') - elif have_first: - hippocampi_method = 'first' - app.console('No hippocampal subfields module output detected, but FSL FIRST is installed; ' - 'will utilise latter for hippocampi segmentation') - else: - hippocampi_method = 'aseg' - app.console('Neither hippocampal subfields module output nor FSL FIRST detected; ' - 'FreeSurfer aseg will be used for hippocampi segmentation') - - if hippocampi_method == 'subfields': - if 'FREESURFER_HOME' not in os.environ: - raise MRtrixError('FREESURFER_HOME environment variable not set; required for use of hippocampal subfields module') - freesurfer_lut_file = os.path.join(os.environ['FREESURFER_HOME'], 'FreeSurferColorLUT.txt') - check_file(freesurfer_lut_file) - hipp_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'HippSubfields.txt') - check_file(hipp_lut_file) - if hipp_subfield_has_amyg: - amyg_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'AmygSubfields.txt') - check_file(amyg_lut_file) - - if app.ARGS.sgm_amyg_hipp: - app.warn('Option -sgm_amyg_hipp ignored ' - '(hsvs algorithm always assigns hippocampi & ampygdalae as sub-cortical grey matter)') - - - # Similar logic for thalami - thalami_method = app.ARGS.thalami - if thalami_method: - if thalami_method == 'nuclei': - if not thal_nuclei_image: - raise MRtrixError('Could not find thalamic nuclei module output') - elif thalami_method == 'first': - if not have_first: - raise MRtrixError('Cannot use "first" method for thalami segmentation; check FSL installation') - else: - # Not happy with outputs of thalamic nuclei submodule; default to FIRST - if have_first: - thalami_method = 'first' - if thal_nuclei_image: - app.console('Thalamic nuclei submodule output ignored in favour of FSL FIRST ' - '(can override using -thalami option)') - else: - app.console('Will utilise FSL FIRST for thalami segmentation') - elif thal_nuclei_image: - thalami_method = 'nuclei' - app.console('Will utilise detected thalamic nuclei submodule output') - else: - thalami_method = 'aseg' - app.console('Neither thalamic nuclei module output nor FSL FIRST detected; ' - 'FreeSurfer aseg will be used for thalami segmentation') - - - ########################### - # Commencing segmentation # - ########################### - - tissue_images = [ [ 'lh.pial.mif', 'rh.pial.mif' ], - [], - [ 'lh.white.mif', 'rh.white.mif' ], - [], - [] ] - - # Get the main cerebrum segments; these are already smooth - progress = app.ProgressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 8) - for hemi in [ 'lh', 'rh' ]: - for basename in [ hemi+'.white', hemi+'.pial' ]: - filepath = os.path.join(surf_dir, basename) - check_file(filepath) - transformed_path = basename + '_realspace.obj' - run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image) - progress.increment() - run.command('mesh2voxel ' + transformed_path + ' ' + template_image + ' ' + basename + '.mif') - app.cleanup(transformed_path) - progress.increment() - progress.done() - - - - # Get other structures that need to be converted from the aseg voxel image - from_aseg = list(ASEG_STRUCTURES) - if hippocampi_method == 'subfields': - if not hipp_subfield_has_amyg and not have_first: - from_aseg.extend(AMYG_ASEG) - elif hippocampi_method == 'aseg': - from_aseg.extend(HIPP_ASEG) - from_aseg.extend(AMYG_ASEG) - if thalami_method == 'aseg': - from_aseg.extend(THAL_ASEG) - if not have_first: - from_aseg.extend(OTHER_SGM_ASEG) - progress = app.ProgressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(from_aseg) + 2) - for (index, tissue, name) in from_aseg: - init_mesh_path = name + '_init.vtk' - smoothed_mesh_path = name + '.vtk' - run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | voxel2mesh - -threshold 0.5 ' + init_mesh_path) - run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path) - app.cleanup(init_mesh_path) - run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') - app.cleanup(smoothed_mesh_path) - tissue_images[tissue-1].append(name + '.mif') - progress.increment() - # Lateral ventricles are separate as we want to combine with choroid plexus prior to mesh conversion - for hemi_index, hemi_name in enumerate(['Left', 'Right']): - name = hemi_name + '_LatVent_ChorPlex' - init_mesh_path = name + '_init.vtk' - smoothed_mesh_path = name + '.vtk' - run.command('mrcalc ' + ' '.join(aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in VENTRICLE_CP_ASEG[hemi_index]) + ' -add - | ' - + 'voxel2mesh - -threshold 0.5 ' + init_mesh_path) - run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path) - app.cleanup(init_mesh_path) - run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') - app.cleanup(smoothed_mesh_path) - tissue_images[3].append(name + '.mif') - progress.increment() - progress.done() - - - - # Combine corpus callosum segments before smoothing - progress = app.ProgressBar('Combining and smoothing corpus callosum segmentation', len(CORPUS_CALLOSUM_ASEG) + 3) - for (index, name) in CORPUS_CALLOSUM_ASEG: - run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype bit') - progress.increment() - cc_init_mesh_path = 'combined_corpus_callosum_init.vtk' - cc_smoothed_mesh_path = 'combined_corpus_callosum.vtk' - run.command('mrmath ' + ' '.join([ name + '.mif' for (index, name) in CORPUS_CALLOSUM_ASEG ]) + ' sum - | voxel2mesh - -threshold 0.5 ' + cc_init_mesh_path) - for name in [ n for _, n in CORPUS_CALLOSUM_ASEG ]: - app.cleanup(name + '.mif') - progress.increment() - run.command('meshfilter ' + cc_init_mesh_path + ' smooth ' + cc_smoothed_mesh_path) - app.cleanup(cc_init_mesh_path) - progress.increment() - run.command('mesh2voxel ' + cc_smoothed_mesh_path + ' ' + template_image + ' combined_corpus_callosum.mif') - app.cleanup(cc_smoothed_mesh_path) - progress.done() - tissue_images[2].append('combined_corpus_callosum.mif') - - - - # Deal with brain stem, including determining those voxels that should - # be erased from the 5TT image in order for streamlines traversing down - # the spinal column to be terminated & accepted - bs_fullmask_path = 'brain_stem_init.mif' - bs_cropmask_path = '' - progress = app.ProgressBar('Segmenting and cropping brain stem', 5) - run.command('mrcalc ' + aparc_image + ' ' + str(BRAIN_STEM_ASEG[0][0]) + ' -eq ' - + ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, name in BRAIN_STEM_ASEG[1:] ]) + ' -add ' - + bs_fullmask_path + ' -datatype bit') - progress.increment() - bs_init_mesh_path = 'brain_stem_init.vtk' - run.command('voxel2mesh ' + bs_fullmask_path + ' ' + bs_init_mesh_path) - progress.increment() - bs_smoothed_mesh_path = 'brain_stem.vtk' - run.command('meshfilter ' + bs_init_mesh_path + ' smooth ' + bs_smoothed_mesh_path) - app.cleanup(bs_init_mesh_path) - progress.increment() - run.command('mesh2voxel ' + bs_smoothed_mesh_path + ' ' + template_image + ' brain_stem.mif') - app.cleanup(bs_smoothed_mesh_path) - progress.increment() - fourthventricle_zmin = min(int(line.split()[2]) for line in run.command('maskdump 4th-Ventricle.mif')[0].splitlines()) - if fourthventricle_zmin: - bs_cropmask_path = 'brain_stem_crop.mif' - run.command('mredit brain_stem.mif - ' + ' '.join([ '-plane 2 ' + str(index) + ' 0' for index in range(0, fourthventricle_zmin) ]) + ' | ' - 'mrcalc brain_stem.mif - -sub 1e-6 -gt ' + bs_cropmask_path + ' -datatype bit') - app.cleanup(bs_fullmask_path) - progress.done() - - - if hippocampi_method == 'subfields': - progress = app.ProgressBar('Using detected FreeSurfer hippocampal subfields module output', - 64 if hipp_subfield_has_amyg else 32) - - subfields = [ ( hipp_lut_file, 'hipp' ) ] - if hipp_subfield_has_amyg: - subfields.append(( amyg_lut_file, 'amyg' )) - - for subfields_lut_file, structure_name in subfields: - for hemi, filename in zip([ 'Left', 'Right'], [ prefix + hipp_subfield_image_suffix for prefix in [ 'l', 'r' ] ]): - # Extract individual components from image and assign to different tissues - subfields_all_tissues_image = hemi + '_' + structure_name + '_subfields.mif' - run.command('labelconvert ' + os.path.join(mri_dir, filename) + ' ' + freesurfer_lut_file + ' ' + subfields_lut_file + ' ' + subfields_all_tissues_image) - progress.increment() - for tissue in range(0, 5): - init_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '_init.vtk' - smooth_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.vtk' - subfield_tissue_image = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.mif' - run.command('mrcalc ' + subfields_all_tissues_image + ' ' + str(tissue+1) + ' -eq - | ' + \ - 'voxel2mesh - ' + init_mesh_path) - progress.increment() - # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing - run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2') - app.cleanup(init_mesh_path) - progress.increment() - run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + subfield_tissue_image) - app.cleanup(smooth_mesh_path) - progress.increment() - tissue_images[tissue].append(subfield_tissue_image) - app.cleanup(subfields_all_tissues_image) - progress.done() - - - if thalami_method == 'nuclei': - progress = app.ProgressBar('Using detected FreeSurfer thalamic nuclei module output', 6) - for hemi in ['Left', 'Right']: - thal_mask_path = hemi + '_Thalamus_mask.mif' - init_mesh_path = hemi + '_Thalamus_init.vtk' - smooth_mesh_path = hemi + '_Thalamus.vtk' - thalamus_image = hemi + '_Thalamus.mif' - if hemi == 'Right': - run.command('mrthreshold ' + os.path.join(mri_dir, thal_nuclei_image) + ' -abs 8200 ' + thal_mask_path) - else: - run.command('mrcalc ' + os.path.join(mri_dir, thal_nuclei_image) + ' 0 -gt ' - + os.path.join(mri_dir, thal_nuclei_image) + ' 8200 -lt ' - + '-mult ' + thal_mask_path) - run.command('voxel2mesh ' + thal_mask_path + ' ' + init_mesh_path) - app.cleanup(thal_mask_path) - progress.increment() - run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2') - app.cleanup(init_mesh_path) - progress.increment() - run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + thalamus_image) - app.cleanup(smooth_mesh_path) - progress.increment() - tissue_images[1].append(thalamus_image) - progress.done() - - if have_first: - app.console('Running FSL FIRST to segment sub-cortical grey matter structures') - from_first = SGM_FIRST_MAP.copy() - if hippocampi_method == 'subfields': - from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value } - if hipp_subfield_has_amyg: - from_first = { key: value for key, value in from_first.items() if 'Amygdala' not in value } - elif hippocampi_method == 'aseg': - from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value and 'Amygdala' not in value } - if thalami_method != 'first': - from_first = { key: value for key, value in from_first.items() if 'Thalamus' not in value } - run.command(first_cmd + ' -s ' + ','.join(from_first.keys()) + ' -i T1.nii -b -o first') - fsl.check_first('first', from_first.keys()) - app.cleanup(glob.glob('T1_to_std_sub.*')) - progress = app.ProgressBar('Mapping FIRST segmentations to image', 2*len(from_first)) - for key, value in from_first.items(): - vtk_in_path = 'first-' + key + '_first.vtk' - vtk_converted_path = 'first-' + key + '_transformed.vtk' - run.command('meshconvert ' + vtk_in_path + ' ' + vtk_converted_path + ' -transform first2real T1.nii') - app.cleanup(vtk_in_path) - progress.increment() - run.command('mesh2voxel ' + vtk_converted_path + ' ' + template_image + ' ' + value + '.mif') - app.cleanup(vtk_converted_path) - tissue_images[1].append(value + '.mif') - progress.increment() - if not have_fast: - app.cleanup('T1.nii') - app.cleanup(glob.glob('first*')) - progress.done() - - # Run ACPCdetect, use results to draw spherical ROIs on T1 that will be fed to FSL FAST, - # the WM components of which will then be added to the 5TT - if have_acpcdetect: - progress = app.ProgressBar('Using ACPCdetect and FAST to segment ' + acpc_string, 5) - # ACPCdetect requires input image to be 16-bit - # We also want to realign to RAS beforehand so that we can interpret the output voxel locations properly - acpcdetect_input_image = 'T1RAS_16b.nii' - run.command('mrconvert ' + norm_image + ' -datatype uint16 -stride +1,+2,+3 ' + acpcdetect_input_image) - progress.increment() - run.command('acpcdetect -i ' + acpcdetect_input_image) - progress.increment() - # We need the header in order to go from voxel coordinates to scanner coordinates - acpcdetect_input_header = image.Header(acpcdetect_input_image) - acpcdetect_output_path = os.path.splitext(acpcdetect_input_image)[0] + '_ACPC.txt' - app.cleanup(acpcdetect_input_image) - with open(acpcdetect_output_path, 'r', encoding='utf-8') as acpc_file: - acpcdetect_output_data = acpc_file.read().splitlines() - app.cleanup(glob.glob(os.path.splitext(acpcdetect_input_image)[0] + "*")) - # Need to scan through the contents of this file, - # isolating the AC and PC locations - ac_voxel = pc_voxel = None - for index, line in enumerate(acpcdetect_output_data): - if 'AC' in line and 'voxel location' in line: - ac_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()] - elif 'PC' in line and 'voxel location' in line: - pc_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()] - if not ac_voxel or not pc_voxel: - raise MRtrixError('Error parsing text file from "acpcdetect"') - - def voxel2scanner(voxel, header): - return [ voxel[0]*header.spacing()[0]*header.transform()[axis][0] - + voxel[1]*header.spacing()[1]*header.transform()[axis][1] - + voxel[2]*header.spacing()[2]*header.transform()[axis][2] - + header.transform()[axis][3] - for axis in range(0,3) ] - - ac_scanner = voxel2scanner(ac_voxel, acpcdetect_input_header) - pc_scanner = voxel2scanner(pc_voxel, acpcdetect_input_header) - - # Generate the mask image within which FAST will be run - acpc_prefix = 'ACPC' if ATTEMPT_PC else 'AC' - acpc_mask_image = acpc_prefix + '_FAST_mask.mif' - run.command('mrcalc ' + template_image + ' nan -eq - | ' - 'mredit - ' + acpc_mask_image + ' -scanner ' - '-sphere ' + ','.join(str(value) for value in ac_scanner) + ' 8 1 ' - + ('-sphere ' + ','.join(str(value) for value in pc_scanner) + ' 5 1' if ATTEMPT_PC else '')) - progress.increment() - - acpc_t1_masked_image = acpc_prefix + '_T1.nii' - run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' - 'mrcalc - ' + acpc_mask_image + ' -mult ' + acpc_t1_masked_image) - app.cleanup(acpc_mask_image) - progress.increment() - - run.command(fast_cmd + ' -N ' + acpc_t1_masked_image) - app.cleanup(acpc_t1_masked_image) - progress.increment() - - # Ideally don't want to have to add these manually; instead add all outputs from FAST - # to the 5TT (both cerebellum and AC / PC) in a single go - # This should involve grabbing just the WM component of these images - # Actually, in retrospect, it may be preferable to do the AC PC segmentation - # earlier on, and simply add them to the list of WM structures - acpc_wm_image = acpc_prefix + '.mif' - run.command('mrconvert ' + fsl.find_image(acpc_prefix + '_T1_pve_2') + ' ' + acpc_wm_image) - tissue_images[2].append(acpc_wm_image) - app.cleanup(glob.glob(os.path.splitext(acpc_t1_masked_image)[0] + '*')) - progress.done() - - - # If we don't have FAST, do cerebellar segmentation in a comparable way to the cortical GM / WM: - # Generate one 'pial-like' surface containing the GM and WM of the cerebellum, - # and another with just the WM - if not have_fast: - progress = app.ProgressBar('Adding FreeSurfer cerebellar segmentations directly', 6) - for hemi in [ 'Left-', 'Right-' ]: - wm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'White' in name ][0] - gm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'Cortex' in name ][0] - run.command('mrcalc ' + aparc_image + ' ' + str(wm_index) + ' -eq ' + aparc_image + ' ' + str(gm_index) + ' -eq -add - | ' + \ - 'voxel2mesh - ' + hemi + 'cerebellum_all_init.vtk') - progress.increment() - run.command('mrcalc ' + aparc_image + ' ' + str(gm_index) + ' -eq - | ' + \ - 'voxel2mesh - ' + hemi + 'cerebellum_grey_init.vtk') - progress.increment() - for name, tissue in { 'all':2, 'grey':1 }.items(): - run.command('meshfilter ' + hemi + 'cerebellum_' + name + '_init.vtk smooth ' + hemi + 'cerebellum_' + name + '.vtk') - app.cleanup(hemi + 'cerebellum_' + name + '_init.vtk') - progress.increment() - run.command('mesh2voxel ' + hemi + 'cerebellum_' + name + '.vtk ' + template_image + ' ' + hemi + 'cerebellum_' + name + '.mif') - app.cleanup(hemi + 'cerebellum_' + name + '.vtk') - progress.increment() - tissue_images[tissue].append(hemi + 'cerebellum_' + name + '.mif') - progress.done() - - - # Construct images with the partial volume of each tissue - progress = app.ProgressBar('Combining segmentations of all structures corresponding to each tissue type', 5) - for tissue in range(0,5): - run.command('mrmath ' + ' '.join(tissue_images[tissue]) + (' brain_stem.mif' if tissue == 2 else '') + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif') - app.cleanup(tissue_images[tissue]) - progress.increment() - progress.done() - - - # This can hopefully be done with a connected-component analysis: Take just the WM image, and - # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) - # Make sure that floating-point values are handled appropriately - # Combine these images together using the appropriate logic in order to form the 5TT image - progress = app.ProgressBar('Modulating segmentation images based on other tissues', 9) - tissue_images = [ 'tissue0.mif', 'tissue1.mif', 'tissue2.mif', 'tissue3.mif', 'tissue4.mif' ] - run.function(os.rename, 'tissue4_init.mif', 'tissue4.mif') - progress.increment() - run.command('mrcalc tissue3_init.mif tissue3_init.mif ' + tissue_images[4] + ' -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[3]) - app.cleanup('tissue3_init.mif') - progress.increment() - run.command('mrmath ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_34.mif') - progress.increment() - run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[1]) - app.cleanup('tissue1_init.mif') - app.cleanup('tissuesum_34.mif') - progress.increment() - run.command('mrmath ' + tissue_images[1] + ' ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_134.mif') - progress.increment() - run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[2]) - app.cleanup('tissue2_init.mif') - app.cleanup('tissuesum_134.mif') - progress.increment() - run.command('mrmath ' + ' '.join(tissue_images[1:5]) + ' sum tissuesum_1234.mif') - progress.increment() - run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[0]) - app.cleanup('tissue0_init.mif') - app.cleanup('tissuesum_1234.mif') - progress.increment() - tissue_sum_image = 'tissuesum_01234.mif' - run.command('mrmath ' + ' '.join(tissue_images) + ' sum ' + tissue_sum_image) - progress.done() - - - if app.ARGS.template: - run.command('mrtransform ' + mask_image + ' -template template.mif - | mrthreshold - brainmask.mif -abs 0.5') - mask_image = 'brainmask.mif' - - - # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum - if have_fast: - - # How to support -template option? - # - Re-grid norm.mgz to template image before running FAST - # - Re-grid FAST output to template image - # Consider splitting, including initial mapping of cerebellar regions: - # - If we're not using a separate template image, just map cerebellar regions to voxels to - # produce a mask, and run FAST within that mask - # - If we have a template, combine cerebellar regions, convert to surfaces (one per hemisphere), - # map these to the template image, run FIRST on a binary mask from this, then - # re-combine this with the tissue maps from other sources based on the estimated PVF of - # cerebellum meshes - cerebellum_volume_image = 'Cerebellum_volume.mif' - cerebellum_mask_image = 'Cerebellum_mask.mif' - t1_cerebellum_masked = 'T1_cerebellum_precrop.mif' - if app.ARGS.template: - - # If this is the case, then we haven't yet performed any cerebellar segmentation / meshing - # What we want to do is: for each hemisphere, combine all three "cerebellar" segments from FreeSurfer, - # convert to a surface, map that surface to the template image - progress = app.ProgressBar('Preparing images of cerebellum for intensity-based segmentation', 9) - cerebellar_hemi_pvf_images = [ ] - for hemi in [ 'Left', 'Right' ]: - init_mesh_path = hemi + '-Cerebellum-All-Init.vtk' - smooth_mesh_path = hemi + '-Cerebellum-All-Smooth.vtk' - pvf_image_path = hemi + '-Cerebellum-PVF-Template.mif' - cerebellum_aseg_hemi = [ entry for entry in CEREBELLUM_ASEG if hemi in entry[2] ] - run.command('mrcalc ' + aparc_image + ' ' + str(cerebellum_aseg_hemi[0][0]) + ' -eq ' + \ - ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in cerebellum_aseg_hemi[1:] ]) + ' -add - | ' + \ - 'voxel2mesh - ' + init_mesh_path) - progress.increment() - run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path) - app.cleanup(init_mesh_path) - progress.increment() - run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + pvf_image_path) - app.cleanup(smooth_mesh_path) - cerebellar_hemi_pvf_images.append(pvf_image_path) - progress.increment() - - # Combine the two hemispheres together into: - # - An image in preparation for running FAST - # - A combined total partial volume fraction image that will be later used for tissue recombination - run.command('mrcalc ' + ' '.join(cerebellar_hemi_pvf_images) + ' -add 1.0 -min ' + cerebellum_volume_image) - app.cleanup(cerebellar_hemi_pvf_images) - progress.increment() - - run.command('mrthreshold ' + cerebellum_volume_image + ' ' + cerebellum_mask_image + ' -abs 1e-6') - progress.increment() - run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' + \ - 'mrcalc - ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked) - progress.done() - - else: - app.console('Preparing images of cerebellum for intensity-based segmentation') - run.command('mrcalc ' + aparc_image + ' ' + str(CEREBELLUM_ASEG[0][0]) + ' -eq ' + \ - ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in CEREBELLUM_ASEG[1:] ]) + ' -add ' + \ - cerebellum_volume_image) - cerebellum_mask_image = cerebellum_volume_image - run.command('mrcalc T1.nii ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked) - - app.cleanup('T1.nii') - - # Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions - # (in the case of no explicit template image, it's a mask, but the logic still applies) - - app.console('Running FSL fast to segment the cerebellum based on intensity information') - - # Run FSL FAST just within the cerebellum - # FAST memory usage can also be huge when using a high-resolution template image: - # Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV - fast_input_image = 'T1_cerebellum.nii' - run.command('mrgrid ' + t1_cerebellum_masked + ' crop -mask ' + cerebellum_mask_image + ' ' + fast_input_image) - app.cleanup(t1_cerebellum_masked) - # Cleanup of cerebellum_mask_image: - # May be same image as cerebellum_volume_image, which is required later - if cerebellum_mask_image != cerebellum_volume_image: - app.cleanup(cerebellum_mask_image) - run.command(fast_cmd + ' -N ' + fast_input_image) - app.cleanup(fast_input_image) - - # Use glob to clean up unwanted FAST outputs - fast_output_prefix = os.path.splitext(fast_input_image)[0] - fast_pve_output_prefix = fast_output_prefix + '_pve_' - app.cleanup([ entry for entry in glob.glob(fast_output_prefix + '*') if not fast_pve_output_prefix in entry ]) - - progress = app.ProgressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10) - fast_outputs_cropped = [ fast_pve_output_prefix + str(n) + fast_suffix for n in range(0,3) ] - fast_outputs_template = [ 'FAST_' + str(n) + '.mif' for n in range(0,3) ] - for inpath, outpath in zip(fast_outputs_cropped, fast_outputs_template): - run.command('mrtransform ' + inpath + ' -interp nearest -template ' + template_image + ' ' + outpath) - app.cleanup(inpath) - progress.increment() - if app.ARGS.template: - app.cleanup(template_image) - - # Generate the revised tissue images, using output from FAST inside the cerebellum and - # output from previous processing everywhere else - # Note that the middle intensity (grey matter) in the FAST output here gets assigned - # to the sub-cortical grey matter component - - # Some of these voxels may have existing non-zero tissue components. - # In that case, let's find a multiplier to apply to cerebellum tissues such that the - # sum does not exceed 1.0 - new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4_fast.mif' ] - new_tissue_sum_image = 'tissuesum_01234_fast.mif' - cerebellum_multiplier_image = 'Cerebellar_multiplier.mif' - run.command('mrcalc ' + cerebellum_volume_image + ' ' + tissue_sum_image + ' -add 0.5 -gt 1.0 ' + tissue_sum_image + ' -sub 0.0 -if ' + cerebellum_multiplier_image) - app.cleanup(cerebellum_volume_image) - progress.increment() - run.command('mrconvert ' + tissue_images[0] + ' ' + new_tissue_images[0]) - app.cleanup(tissue_images[0]) - progress.increment() - run.command('mrcalc ' + tissue_images[1] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[1] + ' -mult -add ' + new_tissue_images[1]) - app.cleanup(tissue_images[1]) - app.cleanup(fast_outputs_template[1]) - progress.increment() - run.command('mrcalc ' + tissue_images[2] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[2] + ' -mult -add ' + new_tissue_images[2]) - app.cleanup(tissue_images[2]) - app.cleanup(fast_outputs_template[2]) - progress.increment() - run.command('mrcalc ' + tissue_images[3] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[0] + ' -mult -add ' + new_tissue_images[3]) - app.cleanup(tissue_images[3]) - app.cleanup(fast_outputs_template[0]) - app.cleanup(cerebellum_multiplier_image) - progress.increment() - run.command('mrconvert ' + tissue_images[4] + ' ' + new_tissue_images[4]) - app.cleanup(tissue_images[4]) - progress.increment() - run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) - app.cleanup(tissue_sum_image) - progress.done() - tissue_images = new_tissue_images - tissue_sum_image = new_tissue_sum_image - - - - # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 - progress = app.ProgressBar('Performing fill operations to preserve unity tissue volume', 2) - - # Some voxels may get a non-zero cortical GM fraction due to native use of the surface representation, yet - # these voxels are actually outside FreeSurfer's own provided brain mask. So what we need to do here is - # get the union of the tissue sum nonzero image and the mask image, and use that at the -mult step of the - # mrcalc call. - # Required image: (tissue_sum_image > 0.0) || mask_image - # tissue_sum_image 0.0 -gt mask_image -add 1.0 -min - - new_tissue_images = [ tissue_images[0], - tissue_images[1], - tissue_images[2], - os.path.splitext(tissue_images[3])[0] + '_filled.mif', - tissue_images[4] ] - csf_fill_image = 'csf_fill.mif' - run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + tissue_sum_image + ' 0.0 -gt ' + mask_image + ' -add 1.0 -min -mult 0.0 -max ' + csf_fill_image) - app.cleanup(tissue_sum_image) - # If no template is specified, this file is part of the FreeSurfer output; hence don't modify - if app.ARGS.template: - app.cleanup(mask_image) - progress.increment() - run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) - app.cleanup(csf_fill_image) - app.cleanup(tissue_images[3]) - progress.done() - tissue_images = new_tissue_images - - - - # Move brain stem from white matter to pathology at final step: - # this prevents the brain stem segmentation from overwriting other - # structures that it otherwise wouldn't if it were written to WM - if not app.ARGS.white_stem: - progress = app.ProgressBar('Moving brain stem to volume index 4', 3) - new_tissue_images = [ tissue_images[0], - tissue_images[1], - os.path.splitext(tissue_images[2])[0] + '_no_brainstem.mif', - tissue_images[3], - os.path.splitext(tissue_images[4])[0] + '_with_brainstem.mif' ] - run.command('mrcalc ' + tissue_images[2] + ' brain_stem.mif -min brain_stem_white_overlap.mif') - app.cleanup('brain_stem.mif') - progress.increment() - run.command('mrcalc ' + tissue_images[2] + ' brain_stem_white_overlap.mif -sub ' + new_tissue_images[2]) - app.cleanup(tissue_images[2]) - progress.increment() - run.command('mrcalc ' + tissue_images[4] + ' brain_stem_white_overlap.mif -add ' + new_tissue_images[4]) - app.cleanup(tissue_images[4]) - app.cleanup('brain_stem_white_overlap.mif') - progress.done() - tissue_images = new_tissue_images - - - - # Finally, concatenate the volumes to produce the 5TT image - app.console('Concatenating tissue volumes into 5TT format') - precrop_result_image = '5TT.mif' - if bs_cropmask_path: - run.command('mrcat ' + ' '.join(tissue_images) + ' - -axis 3 | ' + \ - '5ttedit - ' + precrop_result_image + ' -none ' + bs_cropmask_path) - app.cleanup(bs_cropmask_path) - else: - run.command('mrcat ' + ' '.join(tissue_images) + ' ' + precrop_result_image + ' -axis 3') - app.cleanup(tissue_images) - - - # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; - # instead get just a voxel clearance from all other tissue types (maybe two) - if app.ARGS.nocrop: - run.function(os.rename, precrop_result_image, 'result.mif') - else: - app.console('Cropping final 5TT image') - crop_mask_image = 'crop_mask.mif' - run.command('mrconvert ' + precrop_result_image + ' -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate ' + crop_mask_image) - run.command('mrgrid ' + precrop_result_image + ' crop result.mif -mask ' + crop_mask_image) - app.cleanup(crop_mask_image) - app.cleanup(precrop_result_image) - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), - mrconvert_keyval=path.from_user(os.path.join(app.ARGS.input, 'mri', 'aparc+aseg.mgz'), True), - force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/hsvs/check_output_paths.py b/python/mrtrix3/5ttgen/hsvs/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/5ttgen/hsvs/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/5ttgen/hsvs/execute.py b/python/mrtrix3/5ttgen/hsvs/execute.py new file mode 100644 index 0000000000..336499fd1b --- /dev/null +++ b/python/mrtrix3/5ttgen/hsvs/execute.py @@ -0,0 +1,772 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import glob, os, re, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, fsl, image, path, run + + + +def check_file(filepath): + if not os.path.isfile(filepath): + raise MRtrixError('Required input file missing (expected location: ' + filepath + ')') + +def check_dir(dirpath): + if not os.path.isdir(dirpath): + raise MRtrixError('Unable to find sub-directory \'' + dirpath + '\' within input directory') + + + +def execute(): #pylint: disable=unused-variable + + subject_dir = os.path.abspath(path.from_user(app.ARGS.input, False)) + if not os.path.isdir(subject_dir): + raise MRtrixError('Input to hsvs algorithm must be a directory') + surf_dir = os.path.join(subject_dir, 'surf') + mri_dir = os.path.join(subject_dir, 'mri') + check_dir(surf_dir) + check_dir(mri_dir) + #aparc_image = os.path.join(mri_dir, 'aparc+aseg.mgz') + aparc_image = 'aparc.mif' + mask_image = os.path.join(mri_dir, 'brainmask.mgz') + reg_file = os.path.join(mri_dir, 'transforms', 'talairach.xfm') + check_file(aparc_image) + check_file(mask_image) + check_file(reg_file) + template_image = 'template.mif' if app.ARGS.template else aparc_image + + have_first = False + have_fast = False + fsl_path = os.environ.get('FSLDIR', '') + if fsl_path: + # Use brain-extracted, bias-corrected image for FSL tools + norm_image = os.path.join(mri_dir, 'norm.mgz') + check_file(norm_image) + run.command('mrconvert ' + norm_image + ' T1.nii -stride -1,+2,+3') + # Verify FAST availability + try: + fast_cmd = fsl.exe_name('fast') + except MRtrixError: + fast_cmd = None + if fast_cmd: + have_fast = True + if fast_cmd == 'fast': + fast_suffix = fsl.suffix() + else: + fast_suffix = '.nii.gz' + else: + app.warn('Could not find FSL program fast; script will not use fast for cerebellar tissue segmentation') + # Verify FIRST availability + try: + first_cmd = fsl.exe_name('run_first_all') + except MRtrixError: + first_cmd = None + first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') + have_first = first_cmd and os.path.isdir(first_atlas_path) + else: + app.warn('Environment variable FSLDIR is not set; script will run without FSL components') + + acpc_string = 'anterior ' + ('& posterior commissures' if ATTEMPT_PC else 'commissure') + have_acpcdetect = bool(shutil.which('acpcdetect')) and 'ARTHOME' in os.environ + if have_acpcdetect: + if have_fast: + app.console('ACPCdetect and FSL FAST will be used for explicit segmentation of ' + acpc_string) + else: + app.warn('ACPCdetect is installed, but FSL FAST not found; cannot segment ' + acpc_string) + have_acpcdetect = False + else: + app.warn('ACPCdetect not installed; cannot segment ' + acpc_string) + + # Need to perform a better search for hippocampal subfield output: names & version numbers may change + have_hipp_subfields = False + hipp_subfield_has_amyg = False + # Could result in multiple matches + hipp_subfield_regex = re.compile(r'^[lr]h\.hippo[a-zA-Z]*Labels-[a-zA-Z0-9]*\.v[0-9]+\.?[a-zA-Z0-9]*\.mg[hz]$') + hipp_subfield_all_images = sorted(list(filter(hipp_subfield_regex.match, os.listdir(mri_dir)))) + # Remove any images that provide segmentations in FreeSurfer voxel space; we want the high-resolution versions + hipp_subfield_all_images = [ item for item in hipp_subfield_all_images if 'FSvoxelSpace' not in item ] + # Arrange the images into lr pairs + hipp_subfield_paired_images = [ ] + for lh_filename in [ item for item in hipp_subfield_all_images if item[0] == 'l' ]: + if 'r' + lh_filename[1:] in hipp_subfield_all_images: + hipp_subfield_paired_images.append(lh_filename[1:]) + # Choose which of these image pairs we are going to use + for code in [ '.CA.', '.FS60.' ]: + if any(code in filename for filename in hipp_subfield_paired_images): + hipp_subfield_image_suffix = [ filename for filename in hipp_subfield_paired_images if code in filename ][0] + have_hipp_subfields = True + break + # Choose the pair with the shortest filename string if we have no other criteria + if not have_hipp_subfields and hipp_subfield_paired_images: + hipp_subfield_paired_images = sorted(hipp_subfield_paired_images, key=len) + if hipp_subfield_paired_images: + hipp_subfield_image_suffix = hipp_subfield_paired_images[0] + have_hipp_subfields = True + if have_hipp_subfields: + hipp_subfield_has_amyg = 'Amyg' in hipp_subfield_image_suffix + + # Perform a similar search for thalamic nuclei submodule output + thal_nuclei_image = None + thal_nuclei_regex = re.compile(r'^ThalamicNuclei\.v[0-9]+\.?[a-zA-Z0-9]*.mg[hz]$') + thal_nuclei_all_images = sorted(list(filter(thal_nuclei_regex.match, os.listdir(mri_dir)))) + thal_nuclei_all_images = [ item for item in thal_nuclei_all_images if 'FSvoxelSpace' not in item ] + if thal_nuclei_all_images: + if len(thal_nuclei_all_images) == 1: + thal_nuclei_image = thal_nuclei_all_images[0] + else: + # How to choose which version to use? + # Start with software version + thal_nuclei_versions = [ int(item.split('.')[1].lstrip('v')) for item in thal_nuclei_all_images ] + thal_nuclei_all_images = [ filepath for filepath, version_number in zip(thal_nuclei_all_images, thal_nuclei_versions) if version_number == max(thal_nuclei_versions) ] + if len(thal_nuclei_all_images) == 1: + thal_nuclei_image = thal_nuclei_all_images[0] + else: + # Revert to filename length + thal_nuclei_all_images = sorted(thal_nuclei_all_images, key=len) + thal_nuclei_image = thal_nuclei_all_images[0] + + # If particular hippocampal segmentation method is requested, make sure we can perform such; + # if not, decide how to segment hippocampus based on what's available + hippocampi_method = app.ARGS.hippocampi + if hippocampi_method: + if hippocampi_method == 'subfields': + if not have_hipp_subfields: + raise MRtrixError('Could not isolate hippocampal subfields module output (candidate images: ' + str(hipp_subfield_all_images) + ')') + elif hippocampi_method == 'first': + if not have_first: + raise MRtrixError('Cannot use "first" method for hippocampi segmentation; check FSL installation') + else: + if have_hipp_subfields: + hippocampi_method = 'subfields' + app.console('Hippocampal subfields module output detected; will utilise for hippocampi ' + + ('and amygdalae ' if hipp_subfield_has_amyg else '') + + 'segmentation') + elif have_first: + hippocampi_method = 'first' + app.console('No hippocampal subfields module output detected, but FSL FIRST is installed; ' + 'will utilise latter for hippocampi segmentation') + else: + hippocampi_method = 'aseg' + app.console('Neither hippocampal subfields module output nor FSL FIRST detected; ' + 'FreeSurfer aseg will be used for hippocampi segmentation') + + if hippocampi_method == 'subfields': + if 'FREESURFER_HOME' not in os.environ: + raise MRtrixError('FREESURFER_HOME environment variable not set; required for use of hippocampal subfields module') + freesurfer_lut_file = os.path.join(os.environ['FREESURFER_HOME'], 'FreeSurferColorLUT.txt') + check_file(freesurfer_lut_file) + hipp_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'HippSubfields.txt') + check_file(hipp_lut_file) + if hipp_subfield_has_amyg: + amyg_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'AmygSubfields.txt') + check_file(amyg_lut_file) + + if app.ARGS.sgm_amyg_hipp: + app.warn('Option -sgm_amyg_hipp ignored ' + '(hsvs algorithm always assigns hippocampi & ampygdalae as sub-cortical grey matter)') + + + # Similar logic for thalami + thalami_method = app.ARGS.thalami + if thalami_method: + if thalami_method == 'nuclei': + if not thal_nuclei_image: + raise MRtrixError('Could not find thalamic nuclei module output') + elif thalami_method == 'first': + if not have_first: + raise MRtrixError('Cannot use "first" method for thalami segmentation; check FSL installation') + else: + # Not happy with outputs of thalamic nuclei submodule; default to FIRST + if have_first: + thalami_method = 'first' + if thal_nuclei_image: + app.console('Thalamic nuclei submodule output ignored in favour of FSL FIRST ' + '(can override using -thalami option)') + else: + app.console('Will utilise FSL FIRST for thalami segmentation') + elif thal_nuclei_image: + thalami_method = 'nuclei' + app.console('Will utilise detected thalamic nuclei submodule output') + else: + thalami_method = 'aseg' + app.console('Neither thalamic nuclei module output nor FSL FIRST detected; ' + 'FreeSurfer aseg will be used for thalami segmentation') + + + ########################### + # Commencing segmentation # + ########################### + + tissue_images = [ [ 'lh.pial.mif', 'rh.pial.mif' ], + [], + [ 'lh.white.mif', 'rh.white.mif' ], + [], + [] ] + + # Get the main cerebrum segments; these are already smooth + progress = app.ProgressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 8) + for hemi in [ 'lh', 'rh' ]: + for basename in [ hemi+'.white', hemi+'.pial' ]: + filepath = os.path.join(surf_dir, basename) + check_file(filepath) + transformed_path = basename + '_realspace.obj' + run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image) + progress.increment() + run.command('mesh2voxel ' + transformed_path + ' ' + template_image + ' ' + basename + '.mif') + app.cleanup(transformed_path) + progress.increment() + progress.done() + + + + # Get other structures that need to be converted from the aseg voxel image + from_aseg = list(ASEG_STRUCTURES) + if hippocampi_method == 'subfields': + if not hipp_subfield_has_amyg and not have_first: + from_aseg.extend(AMYG_ASEG) + elif hippocampi_method == 'aseg': + from_aseg.extend(HIPP_ASEG) + from_aseg.extend(AMYG_ASEG) + if thalami_method == 'aseg': + from_aseg.extend(THAL_ASEG) + if not have_first: + from_aseg.extend(OTHER_SGM_ASEG) + progress = app.ProgressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(from_aseg) + 2) + for (index, tissue, name) in from_aseg: + init_mesh_path = name + '_init.vtk' + smoothed_mesh_path = name + '.vtk' + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | voxel2mesh - -threshold 0.5 ' + init_mesh_path) + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path) + app.cleanup(init_mesh_path) + run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') + app.cleanup(smoothed_mesh_path) + tissue_images[tissue-1].append(name + '.mif') + progress.increment() + # Lateral ventricles are separate as we want to combine with choroid plexus prior to mesh conversion + for hemi_index, hemi_name in enumerate(['Left', 'Right']): + name = hemi_name + '_LatVent_ChorPlex' + init_mesh_path = name + '_init.vtk' + smoothed_mesh_path = name + '.vtk' + run.command('mrcalc ' + ' '.join(aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in VENTRICLE_CP_ASEG[hemi_index]) + ' -add - | ' + + 'voxel2mesh - -threshold 0.5 ' + init_mesh_path) + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path) + app.cleanup(init_mesh_path) + run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') + app.cleanup(smoothed_mesh_path) + tissue_images[3].append(name + '.mif') + progress.increment() + progress.done() + + + + # Combine corpus callosum segments before smoothing + progress = app.ProgressBar('Combining and smoothing corpus callosum segmentation', len(CORPUS_CALLOSUM_ASEG) + 3) + for (index, name) in CORPUS_CALLOSUM_ASEG: + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype bit') + progress.increment() + cc_init_mesh_path = 'combined_corpus_callosum_init.vtk' + cc_smoothed_mesh_path = 'combined_corpus_callosum.vtk' + run.command('mrmath ' + ' '.join([ name + '.mif' for (index, name) in CORPUS_CALLOSUM_ASEG ]) + ' sum - | voxel2mesh - -threshold 0.5 ' + cc_init_mesh_path) + for name in [ n for _, n in CORPUS_CALLOSUM_ASEG ]: + app.cleanup(name + '.mif') + progress.increment() + run.command('meshfilter ' + cc_init_mesh_path + ' smooth ' + cc_smoothed_mesh_path) + app.cleanup(cc_init_mesh_path) + progress.increment() + run.command('mesh2voxel ' + cc_smoothed_mesh_path + ' ' + template_image + ' combined_corpus_callosum.mif') + app.cleanup(cc_smoothed_mesh_path) + progress.done() + tissue_images[2].append('combined_corpus_callosum.mif') + + + + # Deal with brain stem, including determining those voxels that should + # be erased from the 5TT image in order for streamlines traversing down + # the spinal column to be terminated & accepted + bs_fullmask_path = 'brain_stem_init.mif' + bs_cropmask_path = '' + progress = app.ProgressBar('Segmenting and cropping brain stem', 5) + run.command('mrcalc ' + aparc_image + ' ' + str(BRAIN_STEM_ASEG[0][0]) + ' -eq ' + + ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, name in BRAIN_STEM_ASEG[1:] ]) + ' -add ' + + bs_fullmask_path + ' -datatype bit') + progress.increment() + bs_init_mesh_path = 'brain_stem_init.vtk' + run.command('voxel2mesh ' + bs_fullmask_path + ' ' + bs_init_mesh_path) + progress.increment() + bs_smoothed_mesh_path = 'brain_stem.vtk' + run.command('meshfilter ' + bs_init_mesh_path + ' smooth ' + bs_smoothed_mesh_path) + app.cleanup(bs_init_mesh_path) + progress.increment() + run.command('mesh2voxel ' + bs_smoothed_mesh_path + ' ' + template_image + ' brain_stem.mif') + app.cleanup(bs_smoothed_mesh_path) + progress.increment() + fourthventricle_zmin = min(int(line.split()[2]) for line in run.command('maskdump 4th-Ventricle.mif')[0].splitlines()) + if fourthventricle_zmin: + bs_cropmask_path = 'brain_stem_crop.mif' + run.command('mredit brain_stem.mif - ' + ' '.join([ '-plane 2 ' + str(index) + ' 0' for index in range(0, fourthventricle_zmin) ]) + ' | ' + 'mrcalc brain_stem.mif - -sub 1e-6 -gt ' + bs_cropmask_path + ' -datatype bit') + app.cleanup(bs_fullmask_path) + progress.done() + + + if hippocampi_method == 'subfields': + progress = app.ProgressBar('Using detected FreeSurfer hippocampal subfields module output', + 64 if hipp_subfield_has_amyg else 32) + + subfields = [ ( hipp_lut_file, 'hipp' ) ] + if hipp_subfield_has_amyg: + subfields.append(( amyg_lut_file, 'amyg' )) + + for subfields_lut_file, structure_name in subfields: + for hemi, filename in zip([ 'Left', 'Right'], [ prefix + hipp_subfield_image_suffix for prefix in [ 'l', 'r' ] ]): + # Extract individual components from image and assign to different tissues + subfields_all_tissues_image = hemi + '_' + structure_name + '_subfields.mif' + run.command('labelconvert ' + os.path.join(mri_dir, filename) + ' ' + freesurfer_lut_file + ' ' + subfields_lut_file + ' ' + subfields_all_tissues_image) + progress.increment() + for tissue in range(0, 5): + init_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '_init.vtk' + smooth_mesh_path = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.vtk' + subfield_tissue_image = hemi + '_' + structure_name + '_subfield_' + str(tissue) + '.mif' + run.command('mrcalc ' + subfields_all_tissues_image + ' ' + str(tissue+1) + ' -eq - | ' + \ + 'voxel2mesh - ' + init_mesh_path) + progress.increment() + # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2') + app.cleanup(init_mesh_path) + progress.increment() + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + subfield_tissue_image) + app.cleanup(smooth_mesh_path) + progress.increment() + tissue_images[tissue].append(subfield_tissue_image) + app.cleanup(subfields_all_tissues_image) + progress.done() + + + if thalami_method == 'nuclei': + progress = app.ProgressBar('Using detected FreeSurfer thalamic nuclei module output', 6) + for hemi in ['Left', 'Right']: + thal_mask_path = hemi + '_Thalamus_mask.mif' + init_mesh_path = hemi + '_Thalamus_init.vtk' + smooth_mesh_path = hemi + '_Thalamus.vtk' + thalamus_image = hemi + '_Thalamus.mif' + if hemi == 'Right': + run.command('mrthreshold ' + os.path.join(mri_dir, thal_nuclei_image) + ' -abs 8200 ' + thal_mask_path) + else: + run.command('mrcalc ' + os.path.join(mri_dir, thal_nuclei_image) + ' 0 -gt ' + + os.path.join(mri_dir, thal_nuclei_image) + ' 8200 -lt ' + + '-mult ' + thal_mask_path) + run.command('voxel2mesh ' + thal_mask_path + ' ' + init_mesh_path) + app.cleanup(thal_mask_path) + progress.increment() + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 2 -smooth_influence 2') + app.cleanup(init_mesh_path) + progress.increment() + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + thalamus_image) + app.cleanup(smooth_mesh_path) + progress.increment() + tissue_images[1].append(thalamus_image) + progress.done() + + if have_first: + app.console('Running FSL FIRST to segment sub-cortical grey matter structures') + from_first = SGM_FIRST_MAP.copy() + if hippocampi_method == 'subfields': + from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value } + if hipp_subfield_has_amyg: + from_first = { key: value for key, value in from_first.items() if 'Amygdala' not in value } + elif hippocampi_method == 'aseg': + from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value and 'Amygdala' not in value } + if thalami_method != 'first': + from_first = { key: value for key, value in from_first.items() if 'Thalamus' not in value } + run.command(first_cmd + ' -s ' + ','.join(from_first.keys()) + ' -i T1.nii -b -o first') + fsl.check_first('first', from_first.keys()) + app.cleanup(glob.glob('T1_to_std_sub.*')) + progress = app.ProgressBar('Mapping FIRST segmentations to image', 2*len(from_first)) + for key, value in from_first.items(): + vtk_in_path = 'first-' + key + '_first.vtk' + vtk_converted_path = 'first-' + key + '_transformed.vtk' + run.command('meshconvert ' + vtk_in_path + ' ' + vtk_converted_path + ' -transform first2real T1.nii') + app.cleanup(vtk_in_path) + progress.increment() + run.command('mesh2voxel ' + vtk_converted_path + ' ' + template_image + ' ' + value + '.mif') + app.cleanup(vtk_converted_path) + tissue_images[1].append(value + '.mif') + progress.increment() + if not have_fast: + app.cleanup('T1.nii') + app.cleanup(glob.glob('first*')) + progress.done() + + # Run ACPCdetect, use results to draw spherical ROIs on T1 that will be fed to FSL FAST, + # the WM components of which will then be added to the 5TT + if have_acpcdetect: + progress = app.ProgressBar('Using ACPCdetect and FAST to segment ' + acpc_string, 5) + # ACPCdetect requires input image to be 16-bit + # We also want to realign to RAS beforehand so that we can interpret the output voxel locations properly + acpcdetect_input_image = 'T1RAS_16b.nii' + run.command('mrconvert ' + norm_image + ' -datatype uint16 -stride +1,+2,+3 ' + acpcdetect_input_image) + progress.increment() + run.command('acpcdetect -i ' + acpcdetect_input_image) + progress.increment() + # We need the header in order to go from voxel coordinates to scanner coordinates + acpcdetect_input_header = image.Header(acpcdetect_input_image) + acpcdetect_output_path = os.path.splitext(acpcdetect_input_image)[0] + '_ACPC.txt' + app.cleanup(acpcdetect_input_image) + with open(acpcdetect_output_path, 'r', encoding='utf-8') as acpc_file: + acpcdetect_output_data = acpc_file.read().splitlines() + app.cleanup(glob.glob(os.path.splitext(acpcdetect_input_image)[0] + "*")) + # Need to scan through the contents of this file, + # isolating the AC and PC locations + ac_voxel = pc_voxel = None + for index, line in enumerate(acpcdetect_output_data): + if 'AC' in line and 'voxel location' in line: + ac_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()] + elif 'PC' in line and 'voxel location' in line: + pc_voxel = [float(item) for item in acpcdetect_output_data[index+1].strip().split()] + if not ac_voxel or not pc_voxel: + raise MRtrixError('Error parsing text file from "acpcdetect"') + + def voxel2scanner(voxel, header): + return [ voxel[0]*header.spacing()[0]*header.transform()[axis][0] + + voxel[1]*header.spacing()[1]*header.transform()[axis][1] + + voxel[2]*header.spacing()[2]*header.transform()[axis][2] + + header.transform()[axis][3] + for axis in range(0,3) ] + + ac_scanner = voxel2scanner(ac_voxel, acpcdetect_input_header) + pc_scanner = voxel2scanner(pc_voxel, acpcdetect_input_header) + + # Generate the mask image within which FAST will be run + acpc_prefix = 'ACPC' if ATTEMPT_PC else 'AC' + acpc_mask_image = acpc_prefix + '_FAST_mask.mif' + run.command('mrcalc ' + template_image + ' nan -eq - | ' + 'mredit - ' + acpc_mask_image + ' -scanner ' + '-sphere ' + ','.join(str(value) for value in ac_scanner) + ' 8 1 ' + + ('-sphere ' + ','.join(str(value) for value in pc_scanner) + ' 5 1' if ATTEMPT_PC else '')) + progress.increment() + + acpc_t1_masked_image = acpc_prefix + '_T1.nii' + run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' + 'mrcalc - ' + acpc_mask_image + ' -mult ' + acpc_t1_masked_image) + app.cleanup(acpc_mask_image) + progress.increment() + + run.command(fast_cmd + ' -N ' + acpc_t1_masked_image) + app.cleanup(acpc_t1_masked_image) + progress.increment() + + # Ideally don't want to have to add these manually; instead add all outputs from FAST + # to the 5TT (both cerebellum and AC / PC) in a single go + # This should involve grabbing just the WM component of these images + # Actually, in retrospect, it may be preferable to do the AC PC segmentation + # earlier on, and simply add them to the list of WM structures + acpc_wm_image = acpc_prefix + '.mif' + run.command('mrconvert ' + fsl.find_image(acpc_prefix + '_T1_pve_2') + ' ' + acpc_wm_image) + tissue_images[2].append(acpc_wm_image) + app.cleanup(glob.glob(os.path.splitext(acpc_t1_masked_image)[0] + '*')) + progress.done() + + + # If we don't have FAST, do cerebellar segmentation in a comparable way to the cortical GM / WM: + # Generate one 'pial-like' surface containing the GM and WM of the cerebellum, + # and another with just the WM + if not have_fast: + progress = app.ProgressBar('Adding FreeSurfer cerebellar segmentations directly', 6) + for hemi in [ 'Left-', 'Right-' ]: + wm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'White' in name ][0] + gm_index = [ index for index, tissue, name in CEREBELLUM_ASEG if name.startswith(hemi) and 'Cortex' in name ][0] + run.command('mrcalc ' + aparc_image + ' ' + str(wm_index) + ' -eq ' + aparc_image + ' ' + str(gm_index) + ' -eq -add - | ' + \ + 'voxel2mesh - ' + hemi + 'cerebellum_all_init.vtk') + progress.increment() + run.command('mrcalc ' + aparc_image + ' ' + str(gm_index) + ' -eq - | ' + \ + 'voxel2mesh - ' + hemi + 'cerebellum_grey_init.vtk') + progress.increment() + for name, tissue in { 'all':2, 'grey':1 }.items(): + run.command('meshfilter ' + hemi + 'cerebellum_' + name + '_init.vtk smooth ' + hemi + 'cerebellum_' + name + '.vtk') + app.cleanup(hemi + 'cerebellum_' + name + '_init.vtk') + progress.increment() + run.command('mesh2voxel ' + hemi + 'cerebellum_' + name + '.vtk ' + template_image + ' ' + hemi + 'cerebellum_' + name + '.mif') + app.cleanup(hemi + 'cerebellum_' + name + '.vtk') + progress.increment() + tissue_images[tissue].append(hemi + 'cerebellum_' + name + '.mif') + progress.done() + + + # Construct images with the partial volume of each tissue + progress = app.ProgressBar('Combining segmentations of all structures corresponding to each tissue type', 5) + for tissue in range(0,5): + run.command('mrmath ' + ' '.join(tissue_images[tissue]) + (' brain_stem.mif' if tissue == 2 else '') + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif') + app.cleanup(tissue_images[tissue]) + progress.increment() + progress.done() + + + # This can hopefully be done with a connected-component analysis: Take just the WM image, and + # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) + # Make sure that floating-point values are handled appropriately + # Combine these images together using the appropriate logic in order to form the 5TT image + progress = app.ProgressBar('Modulating segmentation images based on other tissues', 9) + tissue_images = [ 'tissue0.mif', 'tissue1.mif', 'tissue2.mif', 'tissue3.mif', 'tissue4.mif' ] + run.function(os.rename, 'tissue4_init.mif', 'tissue4.mif') + progress.increment() + run.command('mrcalc tissue3_init.mif tissue3_init.mif ' + tissue_images[4] + ' -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[3]) + app.cleanup('tissue3_init.mif') + progress.increment() + run.command('mrmath ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_34.mif') + progress.increment() + run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[1]) + app.cleanup('tissue1_init.mif') + app.cleanup('tissuesum_34.mif') + progress.increment() + run.command('mrmath ' + tissue_images[1] + ' ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_134.mif') + progress.increment() + run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[2]) + app.cleanup('tissue2_init.mif') + app.cleanup('tissuesum_134.mif') + progress.increment() + run.command('mrmath ' + ' '.join(tissue_images[1:5]) + ' sum tissuesum_1234.mif') + progress.increment() + run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub 0.0 -max ' + tissue_images[0]) + app.cleanup('tissue0_init.mif') + app.cleanup('tissuesum_1234.mif') + progress.increment() + tissue_sum_image = 'tissuesum_01234.mif' + run.command('mrmath ' + ' '.join(tissue_images) + ' sum ' + tissue_sum_image) + progress.done() + + + if app.ARGS.template: + run.command('mrtransform ' + mask_image + ' -template template.mif - | mrthreshold - brainmask.mif -abs 0.5') + mask_image = 'brainmask.mif' + + + # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum + if have_fast: + + # How to support -template option? + # - Re-grid norm.mgz to template image before running FAST + # - Re-grid FAST output to template image + # Consider splitting, including initial mapping of cerebellar regions: + # - If we're not using a separate template image, just map cerebellar regions to voxels to + # produce a mask, and run FAST within that mask + # - If we have a template, combine cerebellar regions, convert to surfaces (one per hemisphere), + # map these to the template image, run FIRST on a binary mask from this, then + # re-combine this with the tissue maps from other sources based on the estimated PVF of + # cerebellum meshes + cerebellum_volume_image = 'Cerebellum_volume.mif' + cerebellum_mask_image = 'Cerebellum_mask.mif' + t1_cerebellum_masked = 'T1_cerebellum_precrop.mif' + if app.ARGS.template: + + # If this is the case, then we haven't yet performed any cerebellar segmentation / meshing + # What we want to do is: for each hemisphere, combine all three "cerebellar" segments from FreeSurfer, + # convert to a surface, map that surface to the template image + progress = app.ProgressBar('Preparing images of cerebellum for intensity-based segmentation', 9) + cerebellar_hemi_pvf_images = [ ] + for hemi in [ 'Left', 'Right' ]: + init_mesh_path = hemi + '-Cerebellum-All-Init.vtk' + smooth_mesh_path = hemi + '-Cerebellum-All-Smooth.vtk' + pvf_image_path = hemi + '-Cerebellum-PVF-Template.mif' + cerebellum_aseg_hemi = [ entry for entry in CEREBELLUM_ASEG if hemi in entry[2] ] + run.command('mrcalc ' + aparc_image + ' ' + str(cerebellum_aseg_hemi[0][0]) + ' -eq ' + \ + ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in cerebellum_aseg_hemi[1:] ]) + ' -add - | ' + \ + 'voxel2mesh - ' + init_mesh_path) + progress.increment() + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path) + app.cleanup(init_mesh_path) + progress.increment() + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + pvf_image_path) + app.cleanup(smooth_mesh_path) + cerebellar_hemi_pvf_images.append(pvf_image_path) + progress.increment() + + # Combine the two hemispheres together into: + # - An image in preparation for running FAST + # - A combined total partial volume fraction image that will be later used for tissue recombination + run.command('mrcalc ' + ' '.join(cerebellar_hemi_pvf_images) + ' -add 1.0 -min ' + cerebellum_volume_image) + app.cleanup(cerebellar_hemi_pvf_images) + progress.increment() + + run.command('mrthreshold ' + cerebellum_volume_image + ' ' + cerebellum_mask_image + ' -abs 1e-6') + progress.increment() + run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' + \ + 'mrcalc - ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked) + progress.done() + + else: + app.console('Preparing images of cerebellum for intensity-based segmentation') + run.command('mrcalc ' + aparc_image + ' ' + str(CEREBELLUM_ASEG[0][0]) + ' -eq ' + \ + ' -add '.join([ aparc_image + ' ' + str(index) + ' -eq' for index, tissue, name in CEREBELLUM_ASEG[1:] ]) + ' -add ' + \ + cerebellum_volume_image) + cerebellum_mask_image = cerebellum_volume_image + run.command('mrcalc T1.nii ' + cerebellum_mask_image + ' -mult ' + t1_cerebellum_masked) + + app.cleanup('T1.nii') + + # Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions + # (in the case of no explicit template image, it's a mask, but the logic still applies) + + app.console('Running FSL fast to segment the cerebellum based on intensity information') + + # Run FSL FAST just within the cerebellum + # FAST memory usage can also be huge when using a high-resolution template image: + # Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV + fast_input_image = 'T1_cerebellum.nii' + run.command('mrgrid ' + t1_cerebellum_masked + ' crop -mask ' + cerebellum_mask_image + ' ' + fast_input_image) + app.cleanup(t1_cerebellum_masked) + # Cleanup of cerebellum_mask_image: + # May be same image as cerebellum_volume_image, which is required later + if cerebellum_mask_image != cerebellum_volume_image: + app.cleanup(cerebellum_mask_image) + run.command(fast_cmd + ' -N ' + fast_input_image) + app.cleanup(fast_input_image) + + # Use glob to clean up unwanted FAST outputs + fast_output_prefix = os.path.splitext(fast_input_image)[0] + fast_pve_output_prefix = fast_output_prefix + '_pve_' + app.cleanup([ entry for entry in glob.glob(fast_output_prefix + '*') if not fast_pve_output_prefix in entry ]) + + progress = app.ProgressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10) + fast_outputs_cropped = [ fast_pve_output_prefix + str(n) + fast_suffix for n in range(0,3) ] + fast_outputs_template = [ 'FAST_' + str(n) + '.mif' for n in range(0,3) ] + for inpath, outpath in zip(fast_outputs_cropped, fast_outputs_template): + run.command('mrtransform ' + inpath + ' -interp nearest -template ' + template_image + ' ' + outpath) + app.cleanup(inpath) + progress.increment() + if app.ARGS.template: + app.cleanup(template_image) + + # Generate the revised tissue images, using output from FAST inside the cerebellum and + # output from previous processing everywhere else + # Note that the middle intensity (grey matter) in the FAST output here gets assigned + # to the sub-cortical grey matter component + + # Some of these voxels may have existing non-zero tissue components. + # In that case, let's find a multiplier to apply to cerebellum tissues such that the + # sum does not exceed 1.0 + new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4_fast.mif' ] + new_tissue_sum_image = 'tissuesum_01234_fast.mif' + cerebellum_multiplier_image = 'Cerebellar_multiplier.mif' + run.command('mrcalc ' + cerebellum_volume_image + ' ' + tissue_sum_image + ' -add 0.5 -gt 1.0 ' + tissue_sum_image + ' -sub 0.0 -if ' + cerebellum_multiplier_image) + app.cleanup(cerebellum_volume_image) + progress.increment() + run.command('mrconvert ' + tissue_images[0] + ' ' + new_tissue_images[0]) + app.cleanup(tissue_images[0]) + progress.increment() + run.command('mrcalc ' + tissue_images[1] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[1] + ' -mult -add ' + new_tissue_images[1]) + app.cleanup(tissue_images[1]) + app.cleanup(fast_outputs_template[1]) + progress.increment() + run.command('mrcalc ' + tissue_images[2] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[2] + ' -mult -add ' + new_tissue_images[2]) + app.cleanup(tissue_images[2]) + app.cleanup(fast_outputs_template[2]) + progress.increment() + run.command('mrcalc ' + tissue_images[3] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[0] + ' -mult -add ' + new_tissue_images[3]) + app.cleanup(tissue_images[3]) + app.cleanup(fast_outputs_template[0]) + app.cleanup(cerebellum_multiplier_image) + progress.increment() + run.command('mrconvert ' + tissue_images[4] + ' ' + new_tissue_images[4]) + app.cleanup(tissue_images[4]) + progress.increment() + run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) + app.cleanup(tissue_sum_image) + progress.done() + tissue_images = new_tissue_images + tissue_sum_image = new_tissue_sum_image + + + + # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 + progress = app.ProgressBar('Performing fill operations to preserve unity tissue volume', 2) + + # Some voxels may get a non-zero cortical GM fraction due to native use of the surface representation, yet + # these voxels are actually outside FreeSurfer's own provided brain mask. So what we need to do here is + # get the union of the tissue sum nonzero image and the mask image, and use that at the -mult step of the + # mrcalc call. + # Required image: (tissue_sum_image > 0.0) || mask_image + # tissue_sum_image 0.0 -gt mask_image -add 1.0 -min + + new_tissue_images = [ tissue_images[0], + tissue_images[1], + tissue_images[2], + os.path.splitext(tissue_images[3])[0] + '_filled.mif', + tissue_images[4] ] + csf_fill_image = 'csf_fill.mif' + run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + tissue_sum_image + ' 0.0 -gt ' + mask_image + ' -add 1.0 -min -mult 0.0 -max ' + csf_fill_image) + app.cleanup(tissue_sum_image) + # If no template is specified, this file is part of the FreeSurfer output; hence don't modify + if app.ARGS.template: + app.cleanup(mask_image) + progress.increment() + run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) + app.cleanup(csf_fill_image) + app.cleanup(tissue_images[3]) + progress.done() + tissue_images = new_tissue_images + + + + # Move brain stem from white matter to pathology at final step: + # this prevents the brain stem segmentation from overwriting other + # structures that it otherwise wouldn't if it were written to WM + if not app.ARGS.white_stem: + progress = app.ProgressBar('Moving brain stem to volume index 4', 3) + new_tissue_images = [ tissue_images[0], + tissue_images[1], + os.path.splitext(tissue_images[2])[0] + '_no_brainstem.mif', + tissue_images[3], + os.path.splitext(tissue_images[4])[0] + '_with_brainstem.mif' ] + run.command('mrcalc ' + tissue_images[2] + ' brain_stem.mif -min brain_stem_white_overlap.mif') + app.cleanup('brain_stem.mif') + progress.increment() + run.command('mrcalc ' + tissue_images[2] + ' brain_stem_white_overlap.mif -sub ' + new_tissue_images[2]) + app.cleanup(tissue_images[2]) + progress.increment() + run.command('mrcalc ' + tissue_images[4] + ' brain_stem_white_overlap.mif -add ' + new_tissue_images[4]) + app.cleanup(tissue_images[4]) + app.cleanup('brain_stem_white_overlap.mif') + progress.done() + tissue_images = new_tissue_images + + + + # Finally, concatenate the volumes to produce the 5TT image + app.console('Concatenating tissue volumes into 5TT format') + precrop_result_image = '5TT.mif' + if bs_cropmask_path: + run.command('mrcat ' + ' '.join(tissue_images) + ' - -axis 3 | ' + \ + '5ttedit - ' + precrop_result_image + ' -none ' + bs_cropmask_path) + app.cleanup(bs_cropmask_path) + else: + run.command('mrcat ' + ' '.join(tissue_images) + ' ' + precrop_result_image + ' -axis 3') + app.cleanup(tissue_images) + + + # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; + # instead get just a voxel clearance from all other tissue types (maybe two) + if app.ARGS.nocrop: + run.function(os.rename, precrop_result_image, 'result.mif') + else: + app.console('Cropping final 5TT image') + crop_mask_image = 'crop_mask.mif' + run.command('mrconvert ' + precrop_result_image + ' -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate ' + crop_mask_image) + run.command('mrgrid ' + precrop_result_image + ' crop result.mif -mask ' + crop_mask_image) + app.cleanup(crop_mask_image) + app.cleanup(precrop_result_image) + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), + mrconvert_keyval=path.from_user(os.path.join(app.ARGS.input, 'mri', 'aparc+aseg.mgz'), True), + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/5ttgen/hsvs/get_inputs.py b/python/mrtrix3/5ttgen/hsvs/get_inputs.py new file mode 100644 index 0000000000..eeabe4b9fd --- /dev/null +++ b/python/mrtrix3/5ttgen/hsvs/get_inputs.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + # Most freeSurfer files will be accessed in-place; no need to pre-convert them into the temporary directory + # However convert aparc image so that it does not have to be repeatedly uncompressed + run.command('mrconvert ' + path.from_user(os.path.join(app.ARGS.input, 'mri', 'aparc+aseg.mgz'), True) + ' ' + path.to_scratch('aparc.mif', True)) + if app.ARGS.template: + run.command('mrconvert ' + path.from_user(app.ARGS.template, True) + ' ' + path.to_scratch('template.mif', True) + ' -axes 0,1,2') diff --git a/python/mrtrix3/5ttgen/hsvs/usage.py b/python/mrtrix3/5ttgen/hsvs/usage.py new file mode 100644 index 0000000000..73077724cb --- /dev/null +++ b/python/mrtrix3/5ttgen/hsvs/usage.py @@ -0,0 +1,31 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('hsvs', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Generate a 5TT image based on Hybrid Surface and Volume Segmentation (HSVS), using FreeSurfer and FSL tools') + parser.add_argument('input', help='The input FreeSurfer subject directory') + parser.add_argument('output', help='The output 5TT image') + parser.add_argument('-template', help='Provide an image that will form the template for the generated 5TT image') + parser.add_argument('-hippocampi', choices=HIPPOCAMPI_CHOICES, help='Select method to be used for hippocampi (& amygdalae) segmentation; options are: ' + ','.join(HIPPOCAMPI_CHOICES)) + parser.add_argument('-thalami', choices=THALAMI_CHOICES, help='Select method to be used for thalamic segmentation; options are: ' + ','.join(THALAMI_CHOICES)) + parser.add_argument('-white_stem', action='store_true', help='Classify the brainstem as white matter') + parser.add_citation('Smith, R.; Skoch, A.; Bajada, C.; Caspers, S.; Connelly, A. Hybrid Surface-Volume Segmentation for improved Anatomically-Constrained Tractography. In Proc OHBM 2020') + parser.add_citation('Fischl, B. Freesurfer. NeuroImage, 2012, 62(2), 774-781', is_external=True) + parser.add_citation('Iglesias, J.E.; Augustinack, J.C.; Nguyen, K.; Player, C.M.; Player, A.; Wright, M.; Roy, N.; Frosch, M.P.; Mc Kee, A.C.; Wald, L.L.; Fischl, B.; and Van Leemput, K. A computational atlas of the hippocampal formation using ex vivo, ultra-high resolution MRI: Application to adaptive segmentation of in vivo MRI. NeuroImage, 2015, 115, 117-137', condition='If FreeSurfer hippocampal subfields module is utilised', is_external=True) + parser.add_citation('Saygin, Z.M. & Kliemann, D.; Iglesias, J.E.; van der Kouwe, A.J.W.; Boyd, E.; Reuter, M.; Stevens, A.; Van Leemput, K.; Mc Kee, A.; Frosch, M.P.; Fischl, B.; Augustinack, J.C. High-resolution magnetic resonance imaging reveals nuclei of the human amygdala: manual segmentation to automatic atlas. NeuroImage, 2017, 155, 370-382', condition='If FreeSurfer hippocampal subfields module is utilised and includes amygdalae segmentation', is_external=True) + parser.add_citation('Iglesias, J.E.; Insausti, R.; Lerma-Usabiaga, G.; Bocchetta, M.; Van Leemput, K.; Greve, D.N.; van der Kouwe, A.; ADNI; Fischl, B.; Caballero-Gaudes, C.; Paz-Alonso, P.M. A probabilistic atlas of the human thalamic nuclei combining ex vivo MRI and histology. NeuroImage, 2018, 183, 314-326', condition='If -thalami nuclei is used', is_external=True) + parser.add_citation('Ardekani, B.; Bachman, A.H. Model-based automatic detection of the anterior and posterior commissures on MRI scans. NeuroImage, 2009, 46(3), 677-682', condition='If ACPCDetect is installed', is_external=True) diff --git a/python/mrtrix3/5ttgen/usage.py b/python/mrtrix3/5ttgen/usage.py new file mode 100644 index 0000000000..e0c37a7d66 --- /dev/null +++ b/python/mrtrix3/5ttgen/usage.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm #pylint: disable=no-name-in-module +def usage(cmdline): #pylint: disable=unused-variable + + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Generate a 5TT image suitable for ACT') + cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938') + cmdline.add_description('5ttgen acts as a \'master\' script for generating a five-tissue-type (5TT) segmented tissue image suitable for use in Anatomically-Constrained Tractography (ACT). A range of different algorithms are available for completing this task. When using this script, the name of the algorithm to be used must appear as the first argument on the command-line after \'5ttgen\'. The subsequent compulsory arguments and options available depend on the particular algorithm being invoked.') + cmdline.add_description('Each algorithm available also has its own help page, including necessary references; e.g. to see the help page of the \'fsl\' algorithm, type \'5ttgen fsl\'.') + + common_options = cmdline.add_argument_group('Options common to all 5ttgen algorithms') + common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') + common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) diff --git a/python/mrtrix3/blend/__init__.py b/python/mrtrix3/blend/__init__.py deleted file mode 100644 index e722b2d24c..0000000000 --- a/python/mrtrix3/blend/__init__.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os -import sys - -if len(sys.argv) <= 1: - sys.stderr.write('A script to blend two sets of movie frames together with a desired overlap.\n') - sys.stderr.write('The input arguments are two folders containing the movie frames ' - '(eg. output from the MRview screenshot tool), and the desired number ' - 'of overlapping frames.\n') - sys.stderr.write('eg: blend folder1 folder2 20 output_folder\n') - sys.exit(1) - -INPUT_FOLDER_1 = sys.argv[1] -INPUT_FOLDER_2 = sys.argv[2] -FILE_LIST_1 = sorted(os.listdir(INPUT_FOLDER_1)) -FILE_LIST_2 = sorted(os.listdir(INPUT_FOLDER_2)) -NUM_OVERLAP = int(sys.argv[3]) -OUTPUT_FOLDER = sys.argv[4] - -if not os.path.exists(OUTPUT_FOLDER): - os.mkdir(OUTPUT_FOLDER) - -NUM_OUTPUT_FRAMES = len(FILE_LIST_1) + len(FILE_LIST_2) - NUM_OVERLAP -for i in range(NUM_OUTPUT_FRAMES): - file_name = 'frame' + '%0*d' % (5, i) + '.png' - if i <= len(FILE_LIST_1) - NUM_OVERLAP: - os.system('cp -L ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + OUTPUT_FOLDER + '/' + file_name) - if len(FILE_LIST_1) - NUM_OVERLAP < i < len(FILE_LIST_1): - i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 - blend_amount = 100 * float(i2 + 1) / float(NUM_OVERLAP) - os.system('convert ' + INPUT_FOLDER_1 + '/' + FILE_LIST_1[i] + ' ' + INPUT_FOLDER_2 \ - + '/' + FILE_LIST_2[i2] + ' -alpha on -compose blend -define compose:args=' \ - + str(blend_amount) + ' -gravity South -composite ' + OUTPUT_FOLDER + '/' + file_name) - if i >= (len(FILE_LIST_1)): - i2 = i - (len(FILE_LIST_1) - NUM_OVERLAP) - 1 - os.system('cp -L ' + INPUT_FOLDER_2 + '/' + FILE_LIST_2[i2] + ' ' + OUTPUT_FOLDER + '/' + file_name) diff --git a/python/mrtrix3/convert_bruker/__init__.py b/python/mrtrix3/convert_bruker/__init__.py deleted file mode 100644 index 624e94edae..0000000000 --- a/python/mrtrix3/convert_bruker/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import sys, os.path - -if len (sys.argv) != 3: - sys.stderr.write("usage: convert_bruker 2dseq header.mih\n") - sys.exit (0) - - -#if os.path.basename (sys.argv[1]) != '2dseq': - #print ("expected '2dseq' file as first argument") - #sys.exit (1) - -if not sys.argv[2].endswith ('.mih'): - sys.stderr.write("expected .mih suffix as the second argument\n") - sys.exit (1) - - - -def main(): - - with open (os.path.join (os.path.dirname (sys.argv[1]), 'reco'), encoding='utf-8') as file_reco: - lines = file_reco.read().split ('##$') - - with open (os.path.join (os.path.dirname (sys.argv[1]), '../../acqp'), encoding='utf-8') as file_acqp: - lines += file_acqp.read().split ('##$') - - with open (os.path.join (os.path.dirname (sys.argv[1]), '../../method'), encoding='utf-8') as file_method: - lines += file_method.read().split ('##$') - - - for line in lines: - line = line.lower() - if line.startswith ('reco_size='): - mat_size = line.splitlines()[1].split() - print ('mat_size', mat_size) - elif line.startswith ('nslices='): - nslices = line.split('=')[1].split()[0] - print ('nslices', nslices) - elif line.startswith ('acq_time_points='): - nacq = len (line.split('\n',1)[1].split()) - print ('nacq', nacq) - elif line.startswith ('reco_wordtype='): - wtype = line.split('=')[1].split()[0] - print ('wtype', wtype) - elif line.startswith ('reco_byte_order='): - byteorder = line.split('=')[1].split()[0] - print ('byteorder', byteorder) - elif line.startswith ('pvm_spatresol='): - res = line.splitlines()[1].split() - print ('res', res) - elif line.startswith ('pvm_spackarrslicedistance='): - slicethick = line.splitlines()[1].split()[0] - print ('slicethick', slicethick) - elif line.startswith ('pvm_dweffbval='): - bval = line.split('\n',1)[1].split() - print ('bval', bval) - elif line.startswith ('pvm_dwgradvec='): - bvec = line.split('\n',1)[1].split() - print ('bvec', bvec) - - - with open (sys.argv[2], 'w', encoding='utf-8') as file_out: - file_out.write ('mrtrix image\ndim: ' + mat_size[0] + ',' + mat_size[1]) - if len(mat_size) > 2: - file_out.write (',' + str(mat_size[2])) - else: - try: - nslices #pylint: disable=pointless-statement - file_out.write (',' + str(nslices)) - except NameError: - pass - - try: - nacq #pylint: disable=pointless-statement - file_out.write (',' + str(nacq)) - except NameError: - pass - - file_out.write ('\nvox: ' + str(res[0]) + ',' + str(res[1])) - if len(res) > 2: - file_out.write (',' + str(res[2])) - else: - try: - slicethick #pylint: disable=pointless-statement - file_out.write (',' + str(slicethick)) - except NameError: - pass - try: - nacq #pylint: disable=pointless-statement - file_out.write (',') - except NameError: - pass - - file_out.write ('\ndatatype: ') - if wtype == '_16bit_sgn_int': - file_out.write ('int16') - elif wtype == '_32bit_sgn_int': - file_out.write ('int32') - - if byteorder=='littleendian': - file_out.write ('le') - else: - file_out.write ('be') - - file_out.write ('\nlayout: +0,+1') - try: - nslices #pylint: disable=pointless-statement - file_out.write (',+2') - except NameError: - pass - try: - nacq #pylint: disable=pointless-statement - file_out.write (',+3') - except NameError: - pass - - file_out.write ('\nfile: ' + sys.argv[1] + '\n') - - try: - assert len(bvec) == 3*len(bval) - bvec = [ bvec[n:n+3] for n in range(0,len(bvec),3) ] - for direction, value in zip(bvec, bval): - file_out.write ('dw_scheme: ' + direction[0] + ',' + direction[1] + ',' + str(-float(direction[2])) + ',' + value + '\n') - except AssertionError: - pass - -main() diff --git a/python/mrtrix3/dwi2mask/3dautomask/__init__.py b/python/mrtrix3/dwi2mask/3dautomask/__init__.py index 59664bb732..f42b4ac94e 100644 --- a/python/mrtrix3/dwi2mask/3dautomask/__init__.py +++ b/python/mrtrix3/dwi2mask/3dautomask/__init__.py @@ -13,85 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, run +NEEDS_MEAN_BZERO = True AFNI3DAUTOMASK_CMD = '3dAutomask' - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('3dautomask', parents=[base_parser]) - parser.set_author('Ricardo Rios (ricardo.rios@cimat.mx)') - parser.set_synopsis('Use AFNI 3dAutomask to derive a brain mask from the DWI mean b=0 image') - parser.add_citation('RW Cox. AFNI: Software for analysis and visualization of functional magnetic resonance neuroimages. Computers and Biomedical Research, 29:162-173, 1996.', is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the \'afni_3dautomask\' algorithm') - options.add_argument('-clfrac', type=float, help='Set the \'clip level fraction\', must be a number between 0.1 and 0.9. A small value means to make the initial threshold for clipping smaller, which will tend to make the mask larger.') - options.add_argument('-nograd', action='store_true', help='The program uses a \'gradual\' clip level by default. Add this option to use a fixed clip level.') - options.add_argument('-peels', type=float, help='Peel (erode) the mask n times, then unpeel (dilate).') - options.add_argument('-nbhrs', type=int, help='Define the number of neighbors needed for a voxel NOT to be eroded. It should be between 6 and 26.') - options.add_argument('-eclip', action='store_true', help='After creating the mask, remove exterior voxels below the clip threshold.') - options.add_argument('-SI', type=float, help='After creating the mask, find the most superior voxel, then zero out everything more than SI millimeters inferior to that. 130 seems to be decent (i.e., for Homo Sapiens brains).') - options.add_argument('-dilate', type=int, help='Dilate the mask outwards n times') - options.add_argument('-erode', type=int, help='Erode the mask outwards n times') - - options.add_argument('-NN1', action='store_true', help='Erode and dilate based on mask faces') - options.add_argument('-NN2', action='store_true', help='Erode and dilate based on mask edges') - options.add_argument('-NN3', action='store_true', help='Erode and dilate based on mask corners') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - if not shutil.which(AFNI3DAUTOMASK_CMD): - raise MRtrixError('Unable to locate AFNI "' - + AFNI3DAUTOMASK_CMD - + '" executable; check installation') - - # main command to execute - mask_path = 'afni_mask.nii.gz' - cmd_string = AFNI3DAUTOMASK_CMD + ' -prefix ' + mask_path - - # Adding optional parameters - if app.ARGS.clfrac is not None: - cmd_string += ' -clfrac ' + str(app.ARGS.clfrac) - if app.ARGS.peels is not None: - cmd_string += ' -peels ' + str(app.ARGS.peels) - if app.ARGS.nbhrs is not None: - cmd_string += ' -nbhrs ' + str(app.ARGS.nbhrs) - if app.ARGS.dilate is not None: - cmd_string += ' -dilate ' + str(app.ARGS.dilate) - if app.ARGS.erode is not None: - cmd_string += ' -erode ' + str(app.ARGS.erode) - if app.ARGS.SI is not None: - cmd_string += ' -SI ' + str(app.ARGS.SI) - - if app.ARGS.nograd: - cmd_string += ' -nograd' - if app.ARGS.eclip: - cmd_string += ' -eclip' - if app.ARGS.NN1: - cmd_string += ' -NN1' - if app.ARGS.NN2: - cmd_string += ' -NN2' - if app.ARGS.NN3: - cmd_string += ' -NN3' - - # Adding input dataset to main command - cmd_string += ' bzero.nii' - - # Execute main command for afni 3dautomask - run.command(cmd_string) - - return mask_path diff --git a/python/mrtrix3/dwi2mask/3dautomask/execute.py b/python/mrtrix3/dwi2mask/3dautomask/execute.py new file mode 100644 index 0000000000..1ba0528c70 --- /dev/null +++ b/python/mrtrix3/dwi2mask/3dautomask/execute.py @@ -0,0 +1,62 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, run +from . import AFNI3DAUTOMASK_CMD + +def execute(): #pylint: disable=unused-variable + if not shutil.which(AFNI3DAUTOMASK_CMD): + raise MRtrixError('Unable to locate AFNI "' + + AFNI3DAUTOMASK_CMD + + '" executable; check installation') + + # main command to execute + mask_path = 'afni_mask.nii.gz' + cmd_string = AFNI3DAUTOMASK_CMD + ' -prefix ' + mask_path + + # Adding optional parameters + if app.ARGS.clfrac is not None: + cmd_string += ' -clfrac ' + str(app.ARGS.clfrac) + if app.ARGS.peels is not None: + cmd_string += ' -peels ' + str(app.ARGS.peels) + if app.ARGS.nbhrs is not None: + cmd_string += ' -nbhrs ' + str(app.ARGS.nbhrs) + if app.ARGS.dilate is not None: + cmd_string += ' -dilate ' + str(app.ARGS.dilate) + if app.ARGS.erode is not None: + cmd_string += ' -erode ' + str(app.ARGS.erode) + if app.ARGS.SI is not None: + cmd_string += ' -SI ' + str(app.ARGS.SI) + + if app.ARGS.nograd: + cmd_string += ' -nograd' + if app.ARGS.eclip: + cmd_string += ' -eclip' + if app.ARGS.NN1: + cmd_string += ' -NN1' + if app.ARGS.NN2: + cmd_string += ' -NN2' + if app.ARGS.NN3: + cmd_string += ' -NN3' + + # Adding input dataset to main command + cmd_string += ' bzero.nii' + + # Execute main command for afni 3dautomask + run.command(cmd_string) + + return mask_path diff --git a/python/mrtrix3/dwi2mask/3dautomask/get_inputs.py b/python/mrtrix3/dwi2mask/3dautomask/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/3dautomask/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/3dautomask/usage.py b/python/mrtrix3/dwi2mask/3dautomask/usage.py new file mode 100644 index 0000000000..5ccd313a63 --- /dev/null +++ b/python/mrtrix3/dwi2mask/3dautomask/usage.py @@ -0,0 +1,34 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('3dautomask', parents=[base_parser]) + parser.set_author('Ricardo Rios (ricardo.rios@cimat.mx)') + parser.set_synopsis('Use AFNI 3dAutomask to derive a brain mask from the DWI mean b=0 image') + parser.add_citation('RW Cox. AFNI: Software for analysis and visualization of functional magnetic resonance neuroimages. Computers and Biomedical Research, 29:162-173, 1996.', is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the \'afni_3dautomask\' algorithm') + options.add_argument('-clfrac', type=float, help='Set the \'clip level fraction\', must be a number between 0.1 and 0.9. A small value means to make the initial threshold for clipping smaller, which will tend to make the mask larger.') + options.add_argument('-nograd', action='store_true', help='The program uses a \'gradual\' clip level by default. Add this option to use a fixed clip level.') + options.add_argument('-peels', type=float, help='Peel (erode) the mask n times, then unpeel (dilate).') + options.add_argument('-nbhrs', type=int, help='Define the number of neighbors needed for a voxel NOT to be eroded. It should be between 6 and 26.') + options.add_argument('-eclip', action='store_true', help='After creating the mask, remove exterior voxels below the clip threshold.') + options.add_argument('-SI', type=float, help='After creating the mask, find the most superior voxel, then zero out everything more than SI millimeters inferior to that. 130 seems to be decent (i.e., for Homo Sapiens brains).') + options.add_argument('-dilate', type=int, help='Dilate the mask outwards n times') + options.add_argument('-erode', type=int, help='Erode the mask outwards n times') + options.add_argument('-NN1', action='store_true', help='Erode and dilate based on mask faces') + options.add_argument('-NN2', action='store_true', help='Erode and dilate based on mask edges') + options.add_argument('-NN3', action='store_true', help='Erode and dilate based on mask corners') diff --git a/python/mrtrix3/dwi2mask/__init__.py b/python/mrtrix3/dwi2mask/__init__.py index 9692f45dc7..e69de29bb2 100644 --- a/python/mrtrix3/dwi2mask/__init__.py +++ b/python/mrtrix3/dwi2mask/__init__.py @@ -1,98 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module, import-outside-toplevel - - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Warda Syeda (wtsyeda@unimelb.edu.au)') - cmdline.set_synopsis('Generate a binary mask from DWI data') - cmdline.add_description('This script serves as an interface for many different algorithms that generate a binary mask from DWI data in different ways. ' - 'Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fslbet\' algorithm, type \'dwi2mask fslbet\'.') - cmdline.add_description('More information on mask derivation from DWI data can be found at the following link: \n' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') - - # General options - #common_options = cmdline.add_argument_group('General dwi2mask options') - app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - - app.check_output_path(app.ARGS.output) - - input_header = image.Header(path.from_user(app.ARGS.input, False)) - image.check_3d_nonunity(input_header) - grad_import_option = app.read_dwgrad_import_options() - if not grad_import_option and 'dw_scheme' not in input_header.keyval(): - raise MRtrixError('Script requires diffusion gradient table: ' - 'either in image header, or using -grad / -fslgrad option') - - app.make_scratch_dir() - - # Get input data into the scratch directory - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif') - + ' -strides 0,0,0,1' + grad_import_option) - alg.get_inputs() - - app.goto_scratch_dir() - - # Generate a mean b=0 image (common task in many algorithms) - if alg.needs_mean_bzero(): - run.command('dwiextract input.mif -bzero - | ' - 'mrmath - mean - -axis 3 | ' - 'mrconvert - bzero.nii -strides +1,+2,+3') - - # Get a mask of voxels for which the DWI data are valid - # (want to ensure that no algorithm includes any voxels where - # there is no valid DWI data, regardless of how they operate) - run.command('mrmath input.mif max - -axis 3 | ' - 'mrthreshold - -abs 0 -comparison gt input_pos_mask.mif') - - # Make relative strides of three spatial axes of output mask equivalent - # to input DWI; this may involve decrementing magnitude of stride - # if the input DWI is volume-contiguous - strides = image.Header('input.mif').strides()[0:3] - strides = [(abs(value) + 1 - min(abs(v) for v in strides)) * (-1 if value < 0 else 1) for value in strides] - - # From here, the script splits depending on what algorithm is being used - # The return value of the execute() function should be the name of the - # image in the scratch directory that is to be exported - mask_path = alg.execute() - - # Before exporting the mask image, get a mask of voxels for which - # the DWI data are valid - # (want to ensure that no algorithm includes any voxels where - # there is no valid DWI data, regardless of how they operate) - run.command('mrcalc ' - + mask_path - + ' input_pos_mask.mif -mult -' - + ' |' - + ' mrconvert - ' - + path.from_user(app.ARGS.output) - + ' -strides ' + ','.join(str(value) for value in strides) - + ' -datatype bit', - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2mask/ants/__init__.py b/python/mrtrix3/dwi2mask/ants/__init__.py index 48a25475ad..aa4e64bb3a 100644 --- a/python/mrtrix3/dwi2mask/ants/__init__.py +++ b/python/mrtrix3/dwi2mask/ants/__init__.py @@ -13,66 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import os, shutil -from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import app, path, run - +NEEDS_MEAN_BZERO = True ANTS_BRAIN_EXTRACTION_CMD = 'antsBrainExtraction.sh' - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('ants', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use ANTs Brain Extraction to derive a DWI brain mask') - parser.add_citation('B. Avants, N.J. Tustison, G. Song, P.A. Cook, A. Klein, J.C. Jee. A reproducible evaluation of ANTs similarity metric performance in brain image registration. NeuroImage, 2011, 54, 2033-2044', is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the "ants" algorithm') - options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide the template image and corresponding mask for antsBrainExtraction.sh to use; the template image should be T2-weighted.') - - - -def get_inputs(): #pylint: disable=unused-variable - if app.ARGS.template: - run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - else: - raise MRtrixError('No template image information available from ' - 'either command-line or MRtrix configuration file(s)') - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - ants_path = os.environ.get('ANTSPATH', '') - if not ants_path: - raise MRtrixError('Environment variable ANTSPATH is not set; ' - 'please appropriately confirure ANTs software') - if not shutil.which(ANTS_BRAIN_EXTRACTION_CMD): - raise MRtrixError('Unable to find command "' - + ANTS_BRAIN_EXTRACTION_CMD - + '"; please check ANTs installation') - - run.command(ANTS_BRAIN_EXTRACTION_CMD - + ' -d 3' - + ' -c 3x3x2x1' - + ' -a bzero.nii' - + ' -e template_image.nii' - + ' -m template_mask.nii' - + ' -o out' - + ('' if app.DO_CLEANUP else ' -k 1') - + (' -z' if app.VERBOSITY >= 3 else '')) - - return 'outBrainExtractionMask.nii.gz' diff --git a/python/mrtrix3/dwi2mask/ants/execute.py b/python/mrtrix3/dwi2mask/ants/execute.py new file mode 100644 index 0000000000..64ff355a38 --- /dev/null +++ b/python/mrtrix3/dwi2mask/ants/execute.py @@ -0,0 +1,41 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, run +from . import ANTS_BRAIN_EXTRACTION_CMD + +def execute(): #pylint: disable=unused-variable + ants_path = os.environ.get('ANTSPATH', '') + if not ants_path: + raise MRtrixError('Environment variable ANTSPATH is not set; ' + 'please appropriately confirure ANTs software') + if not shutil.which(ANTS_BRAIN_EXTRACTION_CMD): + raise MRtrixError('Unable to find command "' + + ANTS_BRAIN_EXTRACTION_CMD + + '"; please check ANTs installation') + + run.command(ANTS_BRAIN_EXTRACTION_CMD + + ' -d 3' + + ' -c 3x3x2x1' + + ' -a bzero.nii' + + ' -e template_image.nii' + + ' -m template_mask.nii' + + ' -o out' + + ('' if app.DO_CLEANUP else ' -k 1') + + (' -z' if app.VERBOSITY >= 3 else '')) + + return 'outBrainExtractionMask.nii.gz' diff --git a/python/mrtrix3/dwi2mask/ants/get_inputs.py b/python/mrtrix3/dwi2mask/ants/get_inputs.py new file mode 100644 index 0000000000..fa6debcf14 --- /dev/null +++ b/python/mrtrix3/dwi2mask/ants/get_inputs.py @@ -0,0 +1,32 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + if app.ARGS.template: + run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + else: + raise MRtrixError('No template image information available from ' + 'either command-line or MRtrix configuration file(s)') diff --git a/python/mrtrix3/dwi2mask/ants/usage.py b/python/mrtrix3/dwi2mask/ants/usage.py new file mode 100644 index 0000000000..25e2ab0ec6 --- /dev/null +++ b/python/mrtrix3/dwi2mask/ants/usage.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('ants', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use ANTs Brain Extraction to derive a DWI brain mask') + parser.add_citation('B. Avants, N.J. Tustison, G. Song, P.A. Cook, A. Klein, J.C. Jee. A reproducible evaluation of ANTs similarity metric performance in brain image registration. NeuroImage, 2011, 54, 2033-2044', is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the "ants" algorithm') + options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide the template image and corresponding mask for antsBrainExtraction.sh to use; the template image should be T2-weighted.') diff --git a/python/mrtrix3/dwi2mask/b02template/__init__.py b/python/mrtrix3/dwi2mask/b02template/__init__.py index 4c3d1ed073..3feffb8446 100644 --- a/python/mrtrix3/dwi2mask/b02template/__init__.py +++ b/python/mrtrix3/dwi2mask/b02template/__init__.py @@ -13,10 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import os, shutil -from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import app, fsl, path, run - +NEEDS_MEAN_BZERO = True SOFTWARES = ['antsfull', 'antsquick', 'fsl'] DEFAULT_SOFTWARE = 'antsquick' @@ -47,213 +44,3 @@ ' --shrink-factors 6x4x2x1' ANTS_REGISTERQUICK_OPTIONS = '-j 1' - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('b02template', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Register the mean b=0 image to a T2-weighted template to back-propagate a brain mask') - parser.add_description('This script currently assumes that the template image provided via the -template option ' - 'is T2-weighted, and can therefore be trivially registered to a mean b=0 image.') - parser.add_description('Command-line option -ants_options can be used with either the "antsquick" or "antsfull" software options. ' - 'In both cases, image dimensionality is assumed to be 3, and so this should be omitted from the user-specified options.' - 'The input can be either a string (encased in double-quotes if more than one option is specified), or a path to a text file containing the requested options. ' - 'In the case of the "antsfull" software option, one will require the names of the fixed and moving images that are provided to the antsRegistration command: these are "template_image.nii" and "bzero.nii" respectively.') - parser.add_citation('M. Jenkinson, C.F. Beckmann, T.E. Behrens, M.W. Woolrich, S.M. Smith. FSL. NeuroImage, 62:782-90, 2012', - condition='If FSL software is used for registration', - is_external=True) - parser.add_citation('B. Avants, N.J. Tustison, G. Song, P.A. Cook, A. Klein, J.C. Jee. A reproducible evaluation of ANTs similarity metric performance in brain image registration. NeuroImage, 2011, 54, 2033-2044', - condition='If ANTs software is used for registration', - is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the "template" algorithm') - options.add_argument('-software', choices=SOFTWARES, help='The software to use for template registration; options are: ' + ','.join(SOFTWARES) + '; default is ' + DEFAULT_SOFTWARE) - options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide the template image to which the input data will be registered, and the mask to be projected to the input image. The template image should be T2-weighted.') - ants_options = parser.add_argument_group('Options applicable when using the ANTs software for registration') - ants_options.add_argument('-ants_options', help='Provide options to be passed to the ANTs registration command (see Description)') - fsl_options = parser.add_argument_group('Options applicable when using the FSL software for registration') - fsl_options.add_argument('-flirt_options', metavar='" FlirtOptions"', help='Command-line options to pass to the FSL flirt command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to flirt)') - fsl_options.add_argument('-fnirt_config', metavar='FILE', help='Specify a FNIRT configuration file for registration') - - - -def get_inputs(): #pylint: disable=unused-variable - - reg_software = app.ARGS.software if app.ARGS.software else CONFIG.get('Dwi2maskTemplateSoftware', DEFAULT_SOFTWARE) - if reg_software.startswith('ants'): - if not os.environ.get('ANTSPATH', ''): - raise MRtrixError('Environment variable ANTSPATH is not set; ' - 'please appropriately configure ANTs software') - if app.ARGS.ants_options: - if os.path.isfile(path.from_user(app.ARGS.ants_options, False)): - run.function(shutil.copyfile, path.from_user(app.ARGS.ants_options, False), path.to_scratch('ants_options.txt', False)) - elif reg_software == 'fsl': - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; ' - 'please run appropriate FSL configuration script') - if app.ARGS.fnirt_config: - fnirt_config = path.from_user(app.ARGS.fnirt_config, False) - if not os.path.isfile(fnirt_config): - raise MRtrixError('No file found at -fnirt_config location "' + fnirt_config + '"') - elif 'Dwi2maskTemplateFSLFnirtConfig' in CONFIG: - fnirt_config = CONFIG['Dwi2maskTemplateFSLFnirtConfig'] - if not os.path.isfile(fnirt_config): - raise MRtrixError('No file found at config file entry "Dwi2maskTemplateFSLFnirtConfig" location "' + fnirt_config + '"') - else: - fnirt_config = None - if fnirt_config: - run.function(shutil.copyfile, fnirt_config, path.to_scratch('fnirt_config.cnf', False)) - else: - assert False - - if app.ARGS.template: - run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - else: - raise MRtrixError('No template image information available from ' - 'either command-line or MRtrix configuration file(s)') - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - - reg_software = app.ARGS.software if app.ARGS.software else CONFIG.get('Dwi2maskTemplateSoftware', DEFAULT_SOFTWARE) - - if reg_software.startswith('ants'): - - def check_ants_executable(cmdname): - if not shutil.which(cmdname): - raise MRtrixError('Unable to find ANTs command "' + cmdname + '"; please check ANTs installation') - check_ants_executable(ANTS_REGISTERFULL_CMD if reg_software == 'antsfull' else ANTS_REGISTERQUICK_CMD) - check_ants_executable(ANTS_TRANSFORM_CMD) - - if app.ARGS.ants_options: - if os.path.isfile('ants_options.txt'): - with open('ants_options.txt', 'r', encoding='utf-8') as ants_options_file: - ants_options = ants_options_file.readlines() - ants_options = ' '.join(line.lstrip().rstrip('\n \\') for line in ants_options if line.strip() and not line.lstrip()[0] == '#') - else: - ants_options = app.ARGS.ants_options - else: - if reg_software == 'antsfull': - ants_options = CONFIG.get('Dwi2maskTemplateANTsFullOptions', ANTS_REGISTERFULL_OPTIONS) - elif reg_software == 'antsquick': - ants_options = CONFIG.get('Dwi2maskTemplateANTsQuickOptions', ANTS_REGISTERQUICK_OPTIONS) - - # Use ANTs SyN for registration to template - if reg_software == 'antsfull': - run.command(ANTS_REGISTERFULL_CMD - + ' --dimensionality 3' - + ' --output ANTS' - + ' ' - + ants_options) - ants_options_split = ants_options.split() - nonlinear = any(i for i in range(0, len(ants_options_split)-1) - if ants_options_split[i] == '--transform' - and not any(item in ants_options_split[i+1] for item in ['Rigid', 'Affine', 'Translation'])) - else: - # Use ANTs SyNQuick for registration to template - run.command(ANTS_REGISTERQUICK_CMD - + ' -d 3' - + ' -f template_image.nii' - + ' -m bzero.nii' - + ' -o ANTS' - + ' ' - + ants_options) - ants_options_split = ants_options.split() - nonlinear = not [i for i in range(0, len(ants_options_split)-1) - if ants_options_split[i] == '-t' - and ants_options_split[i+1] in ['t', 'r', 'a']] - - transformed_path = 'transformed.nii' - # Note: Don't use nearest-neighbour interpolation; - # allow "partial volume fractions" in output, and threshold later - run.command(ANTS_TRANSFORM_CMD - + ' -d 3' - + ' -i template_mask.nii' - + ' -o ' + transformed_path - + ' -r bzero.nii' - + ' -t [ANTS0GenericAffine.mat,1]' - + (' -t ANTS1InverseWarp.nii.gz' if nonlinear else '')) - - elif reg_software == 'fsl': - - flirt_cmd = fsl.exe_name('flirt') - fnirt_cmd = fsl.exe_name('fnirt') - invwarp_cmd = fsl.exe_name('invwarp') - applywarp_cmd = fsl.exe_name('applywarp') - - flirt_options = app.ARGS.flirt_options \ - if app.ARGS.flirt_options \ - else CONFIG.get('Dwi2maskTemplateFSLFlirtOptions', '-dof 12') - - # Initial affine registration to template - run.command(flirt_cmd - + ' -ref template_image.nii' - + ' -in bzero.nii' - + ' -omat bzero_to_template.mat' - + ' ' - + flirt_options - + (' -v' if app.VERBOSITY >= 3 else '')) - - # Produce dilated template mask image, so that registration is not - # too influenced by effects at the edge of the processing mask - run.command('maskfilter template_mask.nii dilate - -npass 3 | ' - 'mrconvert - template_mask_dilated.nii -datatype uint8') - - # Non-linear registration to template - if os.path.isfile('fnirt_config.cnf'): - fnirt_config_option = ' --config=fnirt_config' - else: - fnirt_config_option = '' - app.console('No config file provided for FSL fnirt; it will use its internal defaults') - run.command(fnirt_cmd - + fnirt_config_option - + ' --ref=template_image.nii' - + ' --refmask=template_mask_dilated.nii' - + ' --in=bzero.nii' - + ' --aff=bzero_to_template.mat' - + ' --cout=bzero_to_template.nii' - + (' --verbose' if app.VERBOSITY >= 3 else '')) - fnirt_output_path = fsl.find_image('bzero_to_template') - - # Invert non-linear warp from subject->template to template->subject - run.command(invwarp_cmd - + ' --ref=bzero.nii' - + ' --warp=' + fnirt_output_path - + ' --out=template_to_bzero.nii') - invwarp_output_path = fsl.find_image('template_to_bzero') - - # Transform mask image from template to subject - # Note: Don't use nearest-neighbour interpolation; - # allow "partial volume fractions" in output, and threshold later - run.command(applywarp_cmd - + ' --ref=bzero.nii' - + ' --in=template_mask.nii' - + ' --warp=' + invwarp_output_path - + ' --out=transformed.nii') - transformed_path = fsl.find_image('transformed.nii') - - else: - assert False - - # Instead of neaerest-neighbour interpolation during transformation, - # apply a threshold of 0.5 at this point - run.command('mrthreshold ' - + transformed_path - + ' mask.mif -abs 0.5') - return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/b02template/execute.py b/python/mrtrix3/dwi2mask/b02template/execute.py new file mode 100644 index 0000000000..e9a4ba1a46 --- /dev/null +++ b/python/mrtrix3/dwi2mask/b02template/execute.py @@ -0,0 +1,150 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shutil +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, fsl, run +from . import ANTS_REGISTERFULL_CMD, ANTS_REGISTERQUICK_CMD, ANTS_TRANSFORM_CMD +from . import ANTS_REGISTERFULL_OPTIONS, ANTS_REGISTERQUICK_OPTIONS +from . import DEFAULT_SOFTWARE + +def execute(): #pylint: disable=unused-variable + + reg_software = app.ARGS.software if app.ARGS.software else CONFIG.get('Dwi2maskTemplateSoftware', DEFAULT_SOFTWARE) + + if reg_software.startswith('ants'): + + def check_ants_executable(cmdname): + if not shutil.which(cmdname): + raise MRtrixError('Unable to find ANTs command "' + cmdname + '"; please check ANTs installation') + check_ants_executable(ANTS_REGISTERFULL_CMD if reg_software == 'antsfull' else ANTS_REGISTERQUICK_CMD) + check_ants_executable(ANTS_TRANSFORM_CMD) + + if app.ARGS.ants_options: + if os.path.isfile('ants_options.txt'): + with open('ants_options.txt', 'r', encoding='utf-8') as ants_options_file: + ants_options = ants_options_file.readlines() + ants_options = ' '.join(line.lstrip().rstrip('\n \\') for line in ants_options if line.strip() and not line.lstrip()[0] == '#') + else: + ants_options = app.ARGS.ants_options + else: + if reg_software == 'antsfull': + ants_options = CONFIG.get('Dwi2maskTemplateANTsFullOptions', ANTS_REGISTERFULL_OPTIONS) + elif reg_software == 'antsquick': + ants_options = CONFIG.get('Dwi2maskTemplateANTsQuickOptions', ANTS_REGISTERQUICK_OPTIONS) + + # Use ANTs SyN for registration to template + if reg_software == 'antsfull': + run.command(ANTS_REGISTERFULL_CMD + + ' --dimensionality 3' + + ' --output ANTS' + + ' ' + + ants_options) + ants_options_split = ants_options.split() + nonlinear = any(i for i in range(0, len(ants_options_split)-1) + if ants_options_split[i] == '--transform' + and not any(item in ants_options_split[i+1] for item in ['Rigid', 'Affine', 'Translation'])) + else: + # Use ANTs SyNQuick for registration to template + run.command(ANTS_REGISTERQUICK_CMD + + ' -d 3' + + ' -f template_image.nii' + + ' -m bzero.nii' + + ' -o ANTS' + + ' ' + + ants_options) + ants_options_split = ants_options.split() + nonlinear = not [i for i in range(0, len(ants_options_split)-1) + if ants_options_split[i] == '-t' + and ants_options_split[i+1] in ['t', 'r', 'a']] + + transformed_path = 'transformed.nii' + # Note: Don't use nearest-neighbour interpolation; + # allow "partial volume fractions" in output, and threshold later + run.command(ANTS_TRANSFORM_CMD + + ' -d 3' + + ' -i template_mask.nii' + + ' -o ' + transformed_path + + ' -r bzero.nii' + + ' -t [ANTS0GenericAffine.mat,1]' + + (' -t ANTS1InverseWarp.nii.gz' if nonlinear else '')) + + elif reg_software == 'fsl': + + flirt_cmd = fsl.exe_name('flirt') + fnirt_cmd = fsl.exe_name('fnirt') + invwarp_cmd = fsl.exe_name('invwarp') + applywarp_cmd = fsl.exe_name('applywarp') + + flirt_options = app.ARGS.flirt_options \ + if app.ARGS.flirt_options \ + else CONFIG.get('Dwi2maskTemplateFSLFlirtOptions', '-dof 12') + + # Initial affine registration to template + run.command(flirt_cmd + + ' -ref template_image.nii' + + ' -in bzero.nii' + + ' -omat bzero_to_template.mat' + + ' ' + + flirt_options + + (' -v' if app.VERBOSITY >= 3 else '')) + + # Produce dilated template mask image, so that registration is not + # too influenced by effects at the edge of the processing mask + run.command('maskfilter template_mask.nii dilate - -npass 3 | ' + 'mrconvert - template_mask_dilated.nii -datatype uint8') + + # Non-linear registration to template + if os.path.isfile('fnirt_config.cnf'): + fnirt_config_option = ' --config=fnirt_config' + else: + fnirt_config_option = '' + app.console('No config file provided for FSL fnirt; it will use its internal defaults') + run.command(fnirt_cmd + + fnirt_config_option + + ' --ref=template_image.nii' + + ' --refmask=template_mask_dilated.nii' + + ' --in=bzero.nii' + + ' --aff=bzero_to_template.mat' + + ' --cout=bzero_to_template.nii' + + (' --verbose' if app.VERBOSITY >= 3 else '')) + fnirt_output_path = fsl.find_image('bzero_to_template') + + # Invert non-linear warp from subject->template to template->subject + run.command(invwarp_cmd + + ' --ref=bzero.nii' + + ' --warp=' + fnirt_output_path + + ' --out=template_to_bzero.nii') + invwarp_output_path = fsl.find_image('template_to_bzero') + + # Transform mask image from template to subject + # Note: Don't use nearest-neighbour interpolation; + # allow "partial volume fractions" in output, and threshold later + run.command(applywarp_cmd + + ' --ref=bzero.nii' + + ' --in=template_mask.nii' + + ' --warp=' + invwarp_output_path + + ' --out=transformed.nii') + transformed_path = fsl.find_image('transformed.nii') + + else: + assert False + + # Instead of neaerest-neighbour interpolation during transformation, + # apply a threshold of 0.5 at this point + run.command('mrthreshold ' + + transformed_path + + ' mask.mif -abs 0.5') + return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/b02template/get_inputs.py b/python/mrtrix3/dwi2mask/b02template/get_inputs.py new file mode 100644 index 0000000000..8f916b4ade --- /dev/null +++ b/python/mrtrix3/dwi2mask/b02template/get_inputs.py @@ -0,0 +1,63 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shutil +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, path, run +from . import DEFAULT_SOFTWARE + +def get_inputs(): #pylint: disable=unused-variable + + reg_software = app.ARGS.software if app.ARGS.software else CONFIG.get('Dwi2maskTemplateSoftware', DEFAULT_SOFTWARE) + if reg_software.startswith('ants'): + if not os.environ.get('ANTSPATH', ''): + raise MRtrixError('Environment variable ANTSPATH is not set; ' + 'please appropriately configure ANTs software') + if app.ARGS.ants_options: + if os.path.isfile(path.from_user(app.ARGS.ants_options, False)): + run.function(shutil.copyfile, path.from_user(app.ARGS.ants_options, False), path.to_scratch('ants_options.txt', False)) + elif reg_software == 'fsl': + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; ' + 'please run appropriate FSL configuration script') + if app.ARGS.fnirt_config: + fnirt_config = path.from_user(app.ARGS.fnirt_config, False) + if not os.path.isfile(fnirt_config): + raise MRtrixError('No file found at -fnirt_config location "' + fnirt_config + '"') + elif 'Dwi2maskTemplateFSLFnirtConfig' in CONFIG: + fnirt_config = CONFIG['Dwi2maskTemplateFSLFnirtConfig'] + if not os.path.isfile(fnirt_config): + raise MRtrixError('No file found at config file entry "Dwi2maskTemplateFSLFnirtConfig" location "' + fnirt_config + '"') + else: + fnirt_config = None + if fnirt_config: + run.function(shutil.copyfile, fnirt_config, path.to_scratch('fnirt_config.cnf', False)) + else: + assert False + + if app.ARGS.template: + run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + else: + raise MRtrixError('No template image information available from ' + 'either command-line or MRtrix configuration file(s)') diff --git a/python/mrtrix3/dwi2mask/b02template/usage.py b/python/mrtrix3/dwi2mask/b02template/usage.py new file mode 100644 index 0000000000..1b2318e54a --- /dev/null +++ b/python/mrtrix3/dwi2mask/b02template/usage.py @@ -0,0 +1,44 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import DEFAULT_SOFTWARE +from . import SOFTWARE + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('b02template', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Register the mean b=0 image to a T2-weighted template to back-propagate a brain mask') + parser.add_description('This script currently assumes that the template image provided via the -template option ' + 'is T2-weighted, and can therefore be trivially registered to a mean b=0 image.') + parser.add_description('Command-line option -ants_options can be used with either the "antsquick" or "antsfull" software options. ' + 'In both cases, image dimensionality is assumed to be 3, and so this should be omitted from the user-specified options.' + 'The input can be either a string (encased in double-quotes if more than one option is specified), or a path to a text file containing the requested options. ' + 'In the case of the "antsfull" software option, one will require the names of the fixed and moving images that are provided to the antsRegistration command: these are "template_image.nii" and "bzero.nii" respectively.') + parser.add_citation('M. Jenkinson, C.F. Beckmann, T.E. Behrens, M.W. Woolrich, S.M. Smith. FSL. NeuroImage, 62:782-90, 2012', + condition='If FSL software is used for registration', + is_external=True) + parser.add_citation('B. Avants, N.J. Tustison, G. Song, P.A. Cook, A. Klein, J.C. Jee. A reproducible evaluation of ANTs similarity metric performance in brain image registration. NeuroImage, 2011, 54, 2033-2044', + condition='If ANTs software is used for registration', + is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the "template" algorithm') + options.add_argument('-software', choices=SOFTWARES, help='The software to use for template registration; options are: ' + ','.join(SOFTWARES) + '; default is ' + DEFAULT_SOFTWARE) + options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide the template image to which the input data will be registered, and the mask to be projected to the input image. The template image should be T2-weighted.') + ants_options = parser.add_argument_group('Options applicable when using the ANTs software for registration') + ants_options.add_argument('-ants_options', help='Provide options to be passed to the ANTs registration command (see Description)') + fsl_options = parser.add_argument_group('Options applicable when using the FSL software for registration') + fsl_options.add_argument('-flirt_options', metavar='" FlirtOptions"', help='Command-line options to pass to the FSL flirt command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to flirt)') + fsl_options.add_argument('-fnirt_config', metavar='FILE', help='Specify a FNIRT configuration file for registration') diff --git a/python/mrtrix3/dwi2mask/consensus/__init__.py b/python/mrtrix3/dwi2mask/consensus/__init__.py index 1092fa0d44..aec280be9d 100644 --- a/python/mrtrix3/dwi2mask/consensus/__init__.py +++ b/python/mrtrix3/dwi2mask/consensus/__init__.py @@ -13,119 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import algorithm, app, path, run +NEEDS_MEAN_BZERO = False DEFAULT_THRESHOLD = 0.501 - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('consensus', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Generate a brain mask based on the consensus of all dwi2mask algorithms') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the "consensus" algorithm') - options.add_argument('-algorithms', nargs='+', help='Provide a list of dwi2mask algorithms that are to be utilised') - options.add_argument('-masks', help='Export a 4D image containing the individual algorithm masks') - options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide a template image and corresponding mask for those algorithms requiring such') - options.add_argument('-threshold', type=float, default=DEFAULT_THRESHOLD, help='The fraction of algorithms that must include a voxel for that voxel to be present in the final mask (default: ' + str(DEFAULT_THRESHOLD) + ')') - - - -def get_inputs(): #pylint: disable=unused-variable - if app.ARGS.template: - run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') - + ' -strides +1,+2,+3') - run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') - + ' -strides +1,+2,+3 -datatype uint8') - else: - raise MRtrixError('No template image information available from ' - 'either command-line or MRtrix configuration file(s)') - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return False - - - -def execute(): #pylint: disable=unused-variable - - if app.ARGS.threshold <= 0.0 or app.ARGS.threshold > 1.0: - raise MRtrixError('-threshold parameter value must lie between 0.0 and 1.0') - - if app.ARGS.masks: - app.check_output_path(path.from_user(app.ARGS.masks, False)) - - algorithm_list = [item for item in algorithm.get_list() if item != 'consensus'] - app.debug(str(algorithm_list)) - - if app.ARGS.algorithms: - if 'consensus' in app.ARGS.algorithms: - raise MRtrixError('Cannot provide "consensus" in list of dwi2mask algorithms to utilise') - invalid_algs = [entry for entry in app.ARGS.algorithms if entry not in algorithm_list] - if invalid_algs: - raise MRtrixError('Requested dwi2mask algorithm' - + ('s' if len(invalid_algs) > 1 else '') - + ' not available: ' - + str(invalid_algs)) - algorithm_list = app.ARGS.algorithms - - # For "template" algorithm, can run twice with two different softwares - # Ideally this would be determined based on the help page, - # rather than pre-programmed - # Don't use "-software antsquick"; we're assuming that "antsfull" is superior - if 'b02template' in algorithm_list: - algorithm_list = [item for item in algorithm_list if item != 'b02template'] - algorithm_list.append('b02template -software antsfull') - algorithm_list.append('b02template -software fsl') - app.debug(str(algorithm_list)) - - mask_list = [] - for alg in algorithm_list: - alg_string = alg.replace(' -software ', '_') - mask_path = alg_string + '.mif' - cmd = 'dwi2mask ' + alg + ' input.mif ' + mask_path - # Ideally this would be determined based on the presence of this option - # in the command's help page - if any(item in alg for item in ['ants', 'b02template']): - cmd += ' -template template_image.nii template_mask.nii' - cmd += ' -scratch ' + app.SCRATCH_DIR - if not app.DO_CLEANUP: - cmd += ' -nocleanup' - try: - run.command(cmd) - mask_list.append(mask_path) - except run.MRtrixCmdError as e_dwi2mask: - app.warn('"dwi2mask ' + alg + '" failed; will be omitted from consensus') - app.debug(str(e_dwi2mask)) - with open('error_' + alg_string + '.txt', 'w', encoding='utf-8') as f_error: - f_error.write(str(e_dwi2mask)) - - app.debug(str(mask_list)) - if not mask_list: - raise MRtrixError('No dwi2mask algorithms were successful; cannot generate mask') - if len(mask_list) == 1: - app.warn('Only one dwi2mask algorithm was successful; output mask will be this result and not a consensus') - if app.ARGS.masks: - run.command('mrconvert ' + mask_list[0] + ' ' + path.from_user(app.ARGS.masks), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - return mask_list[0] - final_mask = 'consensus.mif' - app.console('Computing consensus from ' + str(len(mask_list)) + ' of ' + str(len(algorithm_list)) + ' algorithms') - run.command(['mrcat', mask_list, '-axis', '3', 'all_masks.mif']) - run.command('mrmath all_masks.mif mean - -axis 3 | ' - 'mrthreshold - -abs ' + str(app.ARGS.threshold) + ' -comparison ge ' + final_mask) - - if app.ARGS.masks: - run.command('mrconvert all_masks.mif ' + path.from_user(app.ARGS.masks), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - return final_mask diff --git a/python/mrtrix3/dwi2mask/consensus/execute.py b/python/mrtrix3/dwi2mask/consensus/execute.py new file mode 100644 index 0000000000..9614d8e0ec --- /dev/null +++ b/python/mrtrix3/dwi2mask/consensus/execute.py @@ -0,0 +1,93 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError +from mrtrix3 import algorithm, app, path, run + +def execute(): #pylint: disable=unused-variable + + if app.ARGS.threshold <= 0.0 or app.ARGS.threshold > 1.0: + raise MRtrixError('-threshold parameter value must lie between 0.0 and 1.0') + + if app.ARGS.masks: + app.check_output_path(path.from_user(app.ARGS.masks, False)) + + algorithm_list = [item for item in algorithm.get_list() if item != 'consensus'] + app.debug(str(algorithm_list)) + + if app.ARGS.algorithms: + if 'consensus' in app.ARGS.algorithms: + raise MRtrixError('Cannot provide "consensus" in list of dwi2mask algorithms to utilise') + invalid_algs = [entry for entry in app.ARGS.algorithms if entry not in algorithm_list] + if invalid_algs: + raise MRtrixError('Requested dwi2mask algorithm' + + ('s' if len(invalid_algs) > 1 else '') + + ' not available: ' + + str(invalid_algs)) + algorithm_list = app.ARGS.algorithms + + # For "template" algorithm, can run twice with two different softwares + # Ideally this would be determined based on the help page, + # rather than pre-programmed + # Don't use "-software antsquick"; we're assuming that "antsfull" is superior + if 'b02template' in algorithm_list: + algorithm_list = [item for item in algorithm_list if item != 'b02template'] + algorithm_list.append('b02template -software antsfull') + algorithm_list.append('b02template -software fsl') + app.debug(str(algorithm_list)) + + mask_list = [] + for alg in algorithm_list: + alg_string = alg.replace(' -software ', '_') + mask_path = alg_string + '.mif' + cmd = 'dwi2mask ' + alg + ' input.mif ' + mask_path + # Ideally this would be determined based on the presence of this option + # in the command's help page + if any(item in alg for item in ['ants', 'b02template']): + cmd += ' -template template_image.nii template_mask.nii' + cmd += ' -scratch ' + app.SCRATCH_DIR + if not app.DO_CLEANUP: + cmd += ' -nocleanup' + try: + run.command(cmd) + mask_list.append(mask_path) + except run.MRtrixCmdError as e_dwi2mask: + app.warn('"dwi2mask ' + alg + '" failed; will be omitted from consensus') + app.debug(str(e_dwi2mask)) + with open('error_' + alg_string + '.txt', 'w', encoding='utf-8') as f_error: + f_error.write(str(e_dwi2mask)) + + app.debug(str(mask_list)) + if not mask_list: + raise MRtrixError('No dwi2mask algorithms were successful; cannot generate mask') + if len(mask_list) == 1: + app.warn('Only one dwi2mask algorithm was successful; output mask will be this result and not a consensus') + if app.ARGS.masks: + run.command('mrconvert ' + mask_list[0] + ' ' + path.from_user(app.ARGS.masks), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + return mask_list[0] + final_mask = 'consensus.mif' + app.console('Computing consensus from ' + str(len(mask_list)) + ' of ' + str(len(algorithm_list)) + ' algorithms') + run.command(['mrcat', mask_list, '-axis', '3', 'all_masks.mif']) + run.command('mrmath all_masks.mif mean - -axis 3 | ' + 'mrthreshold - -abs ' + str(app.ARGS.threshold) + ' -comparison ge ' + final_mask) + + if app.ARGS.masks: + run.command('mrconvert all_masks.mif ' + path.from_user(app.ARGS.masks), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + return final_mask diff --git a/python/mrtrix3/dwi2mask/consensus/get_inputs.py b/python/mrtrix3/dwi2mask/consensus/get_inputs.py new file mode 100644 index 0000000000..fa6debcf14 --- /dev/null +++ b/python/mrtrix3/dwi2mask/consensus/get_inputs.py @@ -0,0 +1,32 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + if app.ARGS.template: + run.command('mrconvert ' + app.ARGS.template[0] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + app.ARGS.template[1] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + elif all(item in CONFIG for item in ['Dwi2maskTemplateImage', 'Dwi2maskTemplateMask']): + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateImage'] + ' ' + path.to_scratch('template_image.nii') + + ' -strides +1,+2,+3') + run.command('mrconvert ' + CONFIG['Dwi2maskTemplateMask'] + ' ' + path.to_scratch('template_mask.nii') + + ' -strides +1,+2,+3 -datatype uint8') + else: + raise MRtrixError('No template image information available from ' + 'either command-line or MRtrix configuration file(s)') diff --git a/python/mrtrix3/dwi2mask/consensus/usage.py b/python/mrtrix3/dwi2mask/consensus/usage.py new file mode 100644 index 0000000000..85a4a57586 --- /dev/null +++ b/python/mrtrix3/dwi2mask/consensus/usage.py @@ -0,0 +1,28 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import DEFAULT_THRESHOLD + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('consensus', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Generate a brain mask based on the consensus of all dwi2mask algorithms') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the "consensus" algorithm') + options.add_argument('-algorithms', nargs='+', help='Provide a list of dwi2mask algorithms that are to be utilised') + options.add_argument('-masks', help='Export a 4D image containing the individual algorithm masks') + options.add_argument('-template', metavar='TemplateImage MaskImage', nargs=2, help='Provide a template image and corresponding mask for those algorithms requiring such') + options.add_argument('-threshold', type=float, default=DEFAULT_THRESHOLD, help='The fraction of algorithms that must include a voxel for that voxel to be present in the final mask (default: ' + str(DEFAULT_THRESHOLD) + ')') diff --git a/python/mrtrix3/dwi2mask/execute.py b/python/mrtrix3/dwi2mask/execute.py new file mode 100644 index 0000000000..51b303057a --- /dev/null +++ b/python/mrtrix3/dwi2mask/execute.py @@ -0,0 +1,78 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + # Find out which algorithm the user has requested + alg = algorithm.get(app.ARGS.algorithm) + + app.check_output_path(app.ARGS.output) + + input_header = image.Header(path.from_user(app.ARGS.input, False)) + image.check_3d_nonunity(input_header) + grad_import_option = app.read_dwgrad_import_options() + if not grad_import_option and 'dw_scheme' not in input_header.keyval(): + raise MRtrixError('Script requires diffusion gradient table: ' + 'either in image header, or using -grad / -fslgrad option') + + app.make_scratch_dir() + + # Get input data into the scratch directory + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif') + + ' -strides 0,0,0,1' + grad_import_option) + alg.get_inputs() + + app.goto_scratch_dir() + + # Generate a mean b=0 image (common task in many algorithms) + if alg.needs_mean_bzero(): + run.command('dwiextract input.mif -bzero - | ' + 'mrmath - mean - -axis 3 | ' + 'mrconvert - bzero.nii -strides +1,+2,+3') + + # Get a mask of voxels for which the DWI data are valid + # (want to ensure that no algorithm includes any voxels where + # there is no valid DWI data, regardless of how they operate) + run.command('mrmath input.mif max - -axis 3 | ' + 'mrthreshold - -abs 0 -comparison gt input_pos_mask.mif') + + # Make relative strides of three spatial axes of output mask equivalent + # to input DWI; this may involve decrementing magnitude of stride + # if the input DWI is volume-contiguous + strides = image.Header('input.mif').strides()[0:3] + strides = [(abs(value) + 1 - min(abs(v) for v in strides)) * (-1 if value < 0 else 1) for value in strides] + + # From here, the script splits depending on what algorithm is being used + # The return value of the execute() function should be the name of the + # image in the scratch directory that is to be exported + mask_path = alg.execute() + + # Before exporting the mask image, get a mask of voxels for which + # the DWI data are valid + # (want to ensure that no algorithm includes any voxels where + # there is no valid DWI data, regardless of how they operate) + run.command('mrcalc ' + + mask_path + + ' input_pos_mask.mif -mult -' + + ' |' + + ' mrconvert - ' + + path.from_user(app.ARGS.output) + + ' -strides ' + ','.join(str(value) for value in strides) + + ' -datatype bit', + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2mask/fslbet/__init__.py b/python/mrtrix3/dwi2mask/fslbet/__init__.py index 4c67cda5a1..cc15ae55ec 100644 --- a/python/mrtrix3/dwi2mask/fslbet/__init__.py +++ b/python/mrtrix3/dwi2mask/fslbet/__init__.py @@ -13,67 +13,4 @@ # # For more details, see http://www.mrtrix.org/. -import os -from mrtrix3 import MRtrixError -from mrtrix3 import app, fsl, image, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('fslbet', parents=[base_parser]) - parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use the FSL Brain Extraction Tool (bet) to generate a brain mask') - parser.add_citation('Smith, S. M. Fast robust automated brain extraction. Human Brain Mapping, 2002, 17, 143-155', is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the \'fslbet\' algorithm') - options.add_argument('-bet_f', type=float, help='Fractional intensity threshold (0->1); smaller values give larger brain outline estimates') - options.add_argument('-bet_g', type=float, help='Vertical gradient in fractional intensity threshold (-1->1); positive values give larger brain outline at bottom, smaller at top') - options.add_argument('-bet_c', nargs=3, metavar='', help='Centre-of-gravity (voxels not mm) of initial mesh surface') - options.add_argument('-bet_r', type=float, help='Head radius (mm not voxels); initial surface sphere is set to half of this') - options.add_argument('-rescale', action='store_true', help='Rescale voxel size provided to BET to 1mm isotropic; can improve results for rodent data') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - if not os.environ.get('FSLDIR', ''): - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - bet_cmd = fsl.exe_name('bet') - - # Starting brain masking using BET - if app.ARGS.rescale: - run.command('mrconvert bzero.nii bzero_rescaled.nii -vox 1,1,1') - vox = image.Header('bzero.nii').spacing() - b0_image = 'bzero_rescaled.nii' - else: - b0_image = 'bzero.nii' - - cmd_string = bet_cmd + ' ' + b0_image + ' DWI_BET -R -m' - - if app.ARGS.bet_f is not None: - cmd_string += ' -f ' + str(app.ARGS.bet_f) - if app.ARGS.bet_g is not None: - cmd_string += ' -g ' + str(app.ARGS.bet_g) - if app.ARGS.bet_r is not None: - cmd_string += ' -r ' + str(app.ARGS.bet_r) - if app.ARGS.bet_c is not None: - cmd_string += ' -c ' + ' '.join(app.ARGS.bet_c) - - # Running BET command - run.command(cmd_string) - mask = fsl.find_image('DWI_BET_mask') - - if app.ARGS.rescale: - run.command('mrconvert ' + mask + ' mask_rescaled.nii -vox ' + ','.join(str(value) for value in vox)) - return 'mask_rescaled.nii' - return mask +NEEDS_MEAN_BZERO = True diff --git a/python/mrtrix3/dwi2mask/fslbet/execute.py b/python/mrtrix3/dwi2mask/fslbet/execute.py new file mode 100644 index 0000000000..18b580c75e --- /dev/null +++ b/python/mrtrix3/dwi2mask/fslbet/execute.py @@ -0,0 +1,51 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import MRtrixError +from mrtrix3 import app, fsl, image, run + +def execute(): #pylint: disable=unused-variable + if not os.environ.get('FSLDIR', ''): + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + bet_cmd = fsl.exe_name('bet') + + # Starting brain masking using BET + if app.ARGS.rescale: + run.command('mrconvert bzero.nii bzero_rescaled.nii -vox 1,1,1') + vox = image.Header('bzero.nii').spacing() + b0_image = 'bzero_rescaled.nii' + else: + b0_image = 'bzero.nii' + + cmd_string = bet_cmd + ' ' + b0_image + ' DWI_BET -R -m' + + if app.ARGS.bet_f is not None: + cmd_string += ' -f ' + str(app.ARGS.bet_f) + if app.ARGS.bet_g is not None: + cmd_string += ' -g ' + str(app.ARGS.bet_g) + if app.ARGS.bet_r is not None: + cmd_string += ' -r ' + str(app.ARGS.bet_r) + if app.ARGS.bet_c is not None: + cmd_string += ' -c ' + ' '.join(app.ARGS.bet_c) + + # Running BET command + run.command(cmd_string) + mask = fsl.find_image('DWI_BET_mask') + + if app.ARGS.rescale: + run.command('mrconvert ' + mask + ' mask_rescaled.nii -vox ' + ','.join(str(value) for value in vox)) + return 'mask_rescaled.nii' + return mask diff --git a/python/mrtrix3/dwi2mask/fslbet/get_inputs.py b/python/mrtrix3/dwi2mask/fslbet/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/fslbet/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/fslbet/usage.py b/python/mrtrix3/dwi2mask/fslbet/usage.py new file mode 100644 index 0000000000..943ff3c9c1 --- /dev/null +++ b/python/mrtrix3/dwi2mask/fslbet/usage.py @@ -0,0 +1,28 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('fslbet', parents=[base_parser]) + parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use the FSL Brain Extraction Tool (bet) to generate a brain mask') + parser.add_citation('Smith, S. M. Fast robust automated brain extraction. Human Brain Mapping, 2002, 17, 143-155', is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the \'fslbet\' algorithm') + options.add_argument('-bet_f', type=float, help='Fractional intensity threshold (0->1); smaller values give larger brain outline estimates') + options.add_argument('-bet_g', type=float, help='Vertical gradient in fractional intensity threshold (-1->1); positive values give larger brain outline at bottom, smaller at top') + options.add_argument('-bet_c', nargs=3, metavar='', help='Centre-of-gravity (voxels not mm) of initial mesh surface') + options.add_argument('-bet_r', type=float, help='Head radius (mm not voxels); initial surface sphere is set to half of this') + options.add_argument('-rescale', action='store_true', help='Rescale voxel size provided to BET to 1mm isotropic; can improve results for rodent data') diff --git a/python/mrtrix3/dwi2mask/hdbet/__init__.py b/python/mrtrix3/dwi2mask/hdbet/__init__.py index 0ead11ddf7..cc15ae55ec 100644 --- a/python/mrtrix3/dwi2mask/hdbet/__init__.py +++ b/python/mrtrix3/dwi2mask/hdbet/__init__.py @@ -13,60 +13,4 @@ # # For more details, see http://www.mrtrix.org/. -import shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, run - - -OUTPUT_IMAGE_PATH = 'bzero_bet_mask.nii.gz' - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('hdbet', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use HD-BET to derive a brain mask from the DWI mean b=0 image') - parser.add_citation('Isensee F, Schell M, Tursunova I, Brugnara G, Bonekamp D, Neuberger U, Wick A, Schlemmer HP, Heiland S, Wick W, Bendszus M, Maier-Hein KH, Kickingereder P. Automated brain extraction of multi-sequence MRI using artificial neural networks. Hum Brain Mapp. 2019; 1-13. https://doi.org/10.1002/hbm.24750', is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the \'hdbet\' algorithm') - options.add_argument('-nogpu', action='store_true', help='Do not attempt to run on the GPU') - - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - hdbet_cmd = shutil.which('hd-bet') - if not hdbet_cmd: - raise MRtrixError('Unable to locate "hd-bet" executable; check installation') - - # GPU version is not guaranteed to work; - # attempt CPU version if that is the case - e_gpu = None #pylint: disable=unused-variable - if not app.ARGS.nogpu: - try: - run.command('hd-bet -i bzero.nii') - return OUTPUT_IMAGE_PATH - except run.MRtrixCmdError as e_gpu: #pylint: disable=unused-variable - app.warn('HD-BET failed when running on GPU; attempting on CPU') - try: - run.command('hd-bet -i bzero.nii -device cpu -mode fast -tta 0') - return OUTPUT_IMAGE_PATH - except run.MRtrixCmdError as e_cpu: - if app.ARGS.nogpu: - raise - gpu_header = ('===\nGPU\n===\n') - cpu_header = ('===\nCPU\n===\n') - exception_stdout = gpu_header + e_gpu.stdout + '\n\n' + cpu_header + e_cpu.stdout + '\n\n' - exception_stderr = gpu_header + e_gpu.stderr + '\n\n' + cpu_header + e_cpu.stderr + '\n\n' - raise run.MRtrixCmdError('hd-bet', 1, exception_stdout, exception_stderr) +NEEDS_MEAN_BZERO = True diff --git a/python/mrtrix3/dwi2mask/hdbet/execute.py b/python/mrtrix3/dwi2mask/hdbet/execute.py new file mode 100644 index 0000000000..9277ca5dcc --- /dev/null +++ b/python/mrtrix3/dwi2mask/hdbet/execute.py @@ -0,0 +1,46 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, run + +def execute(): #pylint: disable=unused-variable + hdbet_cmd = shutil.which('hd-bet') + if not hdbet_cmd: + raise MRtrixError('Unable to locate "hd-bet" executable; check installation') + + output_image_path = 'bzero_bet_mask.nii.gz' + + # GPU version is not guaranteed to work; + # attempt CPU version if that is the case + e_gpu = None #pylint: disable=unused-variable + if not app.ARGS.nogpu: + try: + run.command('hd-bet -i bzero.nii') + return output_image_path + except run.MRtrixCmdError as e_gpu: #pylint: disable=unused-variable + app.warn('HD-BET failed when running on GPU; attempting on CPU') + try: + run.command('hd-bet -i bzero.nii -device cpu -mode fast -tta 0') + return output_image_path + except run.MRtrixCmdError as e_cpu: + if app.ARGS.nogpu: + raise + gpu_header = ('===\nGPU\n===\n') + cpu_header = ('===\nCPU\n===\n') + exception_stdout = gpu_header + e_gpu.stdout + '\n\n' + cpu_header + e_cpu.stdout + '\n\n' + exception_stderr = gpu_header + e_gpu.stderr + '\n\n' + cpu_header + e_cpu.stderr + '\n\n' + raise run.MRtrixCmdError('hd-bet', 1, exception_stdout, exception_stderr) diff --git a/python/mrtrix3/dwi2mask/hdbet/get_inputs.py b/python/mrtrix3/dwi2mask/hdbet/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/hdbet/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/hdbet/usage.py b/python/mrtrix3/dwi2mask/hdbet/usage.py new file mode 100644 index 0000000000..a3ab8106fe --- /dev/null +++ b/python/mrtrix3/dwi2mask/hdbet/usage.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('hdbet', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use HD-BET to derive a brain mask from the DWI mean b=0 image') + parser.add_citation('Isensee F, Schell M, Tursunova I, Brugnara G, Bonekamp D, Neuberger U, Wick A, Schlemmer HP, Heiland S, Wick W, Bendszus M, Maier-Hein KH, Kickingereder P. Automated brain extraction of multi-sequence MRI using artificial neural networks. Hum Brain Mapp. 2019; 1-13. https://doi.org/10.1002/hbm.24750', is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the \'hdbet\' algorithm') + options.add_argument('-nogpu', action='store_true', help='Do not attempt to run on the GPU') diff --git a/python/mrtrix3/dwi2mask/legacy/__init__.py b/python/mrtrix3/dwi2mask/legacy/__init__.py index dcba64a179..bca64e8202 100644 --- a/python/mrtrix3/dwi2mask/legacy/__init__.py +++ b/python/mrtrix3/dwi2mask/legacy/__init__.py @@ -13,47 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app, run +NEEDS_MEAN_BZERO = False DEFAULT_CLEAN_SCALE = 2 - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('legacy', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use the legacy MRtrix3 dwi2mask heuristic (based on thresholded trace images)') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - parser.add_argument('-clean_scale', - type=int, - default=DEFAULT_CLEAN_SCALE, - help='the maximum scale used to cut bridges. A certain maximum scale cuts ' - 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' - 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return False - - - -def execute(): #pylint: disable=unused-variable - - run.command('mrcalc input.mif 0 -max input_nonneg.mif') - run.command('dwishellmath input_nonneg.mif mean trace.mif') - app.cleanup('input_nonneg.mif') - run.command('mrthreshold trace.mif - -comparison gt | ' - 'mrmath - max -axis 3 - | ' - 'maskfilter - median - | ' - 'maskfilter - connect -largest - | ' - 'maskfilter - fill - | ' - 'maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' mask.mif') - - return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/legacy/execute.py b/python/mrtrix3/dwi2mask/legacy/execute.py new file mode 100644 index 0000000000..bc3843ad8d --- /dev/null +++ b/python/mrtrix3/dwi2mask/legacy/execute.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, run + +def execute(): #pylint: disable=unused-variable + + run.command('mrcalc input.mif 0 -max input_nonneg.mif') + run.command('dwishellmath input_nonneg.mif mean trace.mif') + app.cleanup('input_nonneg.mif') + run.command('mrthreshold trace.mif - -comparison gt | ' + 'mrmath - max -axis 3 - | ' + 'maskfilter - median - | ' + 'maskfilter - connect -largest - | ' + 'maskfilter - fill - | ' + 'maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' mask.mif') + + return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/legacy/get_inputs.py b/python/mrtrix3/dwi2mask/legacy/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/legacy/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/legacy/usage.py b/python/mrtrix3/dwi2mask/legacy/usage.py new file mode 100644 index 0000000000..cda8b7f5b6 --- /dev/null +++ b/python/mrtrix3/dwi2mask/legacy/usage.py @@ -0,0 +1,29 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import DEFAULT_CLEAN_SCALE + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('legacy', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use the legacy MRtrix3 dwi2mask heuristic (based on thresholded trace images)') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + parser.add_argument('-clean_scale', + type=int, + default=DEFAULT_CLEAN_SCALE, + help='the maximum scale used to cut bridges. A certain maximum scale cuts ' + 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' + 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') diff --git a/python/mrtrix3/dwi2mask/mean/__init__.py b/python/mrtrix3/dwi2mask/mean/__init__.py index 30060fdd43..bca64e8202 100644 --- a/python/mrtrix3/dwi2mask/mean/__init__.py +++ b/python/mrtrix3/dwi2mask/mean/__init__.py @@ -13,46 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app, run +NEEDS_MEAN_BZERO = False DEFAULT_CLEAN_SCALE = 2 - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('mean', parents=[base_parser]) - parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au)') - parser.set_synopsis('Generate a mask based on simply averaging all volumes in the DWI series') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the \'mean\' algorithm') - options.add_argument('-shells', help='Comma separated list of shells to be included in the volume averaging') - options.add_argument('-clean_scale', - type=int, - default=DEFAULT_CLEAN_SCALE, - help='the maximum scale used to cut bridges. A certain maximum scale cuts ' - 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' - 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return False - - - -def execute(): #pylint: disable=unused-variable - - run.command(('dwiextract input.mif - -shells ' + app.ARGS.shells + ' | mrmath -' \ - if app.ARGS.shells \ - else 'mrmath input.mif') - + ' mean - -axis 3 |' - + ' mrthreshold - - |' - + ' maskfilter - connect -largest - |' - + ' maskfilter - fill - |' - + ' maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' mask.mif') - - return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/mean/execute.py b/python/mrtrix3/dwi2mask/mean/execute.py new file mode 100644 index 0000000000..ecf1fb518a --- /dev/null +++ b/python/mrtrix3/dwi2mask/mean/execute.py @@ -0,0 +1,29 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, run + +def execute(): #pylint: disable=unused-variable + + run.command(('dwiextract input.mif - -shells ' + app.ARGS.shells + ' | mrmath -' \ + if app.ARGS.shells \ + else 'mrmath input.mif') + + ' mean - -axis 3 |' + + ' mrthreshold - - |' + + ' maskfilter - connect -largest - |' + + ' maskfilter - fill - |' + + ' maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' mask.mif') + + return 'mask.mif' diff --git a/python/mrtrix3/dwi2mask/mean/get_inputs.py b/python/mrtrix3/dwi2mask/mean/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/mean/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/mean/usage.py b/python/mrtrix3/dwi2mask/mean/usage.py new file mode 100644 index 0000000000..1fd7c7a47f --- /dev/null +++ b/python/mrtrix3/dwi2mask/mean/usage.py @@ -0,0 +1,31 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import DEFAULT_CLEAN_SCALE + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('mean', parents=[base_parser]) + parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au)') + parser.set_synopsis('Generate a mask based on simply averaging all volumes in the DWI series') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the \'mean\' algorithm') + options.add_argument('-shells', help='Comma separated list of shells to be included in the volume averaging') + options.add_argument('-clean_scale', + type=int, + default=DEFAULT_CLEAN_SCALE, + help='the maximum scale used to cut bridges. A certain maximum scale cuts ' + 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' + 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') diff --git a/python/mrtrix3/dwi2mask/mtnorm/__init__.py b/python/mrtrix3/dwi2mask/mtnorm/__init__.py index 340e923723..04157bbead 100644 --- a/python/mrtrix3/dwi2mask/mtnorm/__init__.py +++ b/python/mrtrix3/dwi2mask/mtnorm/__init__.py @@ -13,148 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -import math -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - +NEEDS_MEAN_BZERO = False LMAXES_MULTI = [4, 0, 0] LMAXES_SINGLE = [4, 0] THRESHOLD_DEFAULT = 0.5 - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('mtnorm', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') - parser.set_synopsis('Derives a DWI brain mask by calculating and then thresholding a sum-of-tissue-densities image') - parser.add_description('This script attempts to identify brain voxels based on the total density of macroscopic ' - 'tissues as estimated through multi-tissue deconvolution. Following response function ' - 'estimation and multi-tissue deconvolution, the sum of tissue densities is thresholded at a ' - 'fixed value (default is ' + str(THRESHOLD_DEFAULT) + '), and subsequent mask image cleaning ' - 'operations are performed.') - parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' - 'Many users may find that comprehensive solution preferable; this dwi2mask algorithm is nevertheless ' - 'provided to demonstrate specifically the mask estimation portion of that command.') - parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' - 'degree than what would be advised for analysis. This is done for computational efficiency. This ' - 'behaviour can be modified through the -lmax command-line option.') - parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' - 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' - 'NeuroImage, 2014, 103, 411-426') - parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' - 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' - 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') - options.add_argument('-init_mask', - metavar='image', - help='Provide an initial brain mask, which will constrain the response function estimation ' - '(if omitted, the default dwi2mask algorithm will be used)') - options.add_argument('-lmax', - metavar='values', - help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' - 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') - options.add_argument('-threshold', - type=float, - metavar='value', - default=THRESHOLD_DEFAULT, - help='the threshold on the total tissue density sum image used to derive the brain mask; default is ' + str(THRESHOLD_DEFAULT)) - options.add_argument('-tissuesum', metavar='image', help='Export the tissue sum image that was used to generate the mask') - - - -def get_inputs(): #pylint: disable=unused-variable - if app.ARGS.init_mask: - run.command(['mrconvert', path.from_user(app.ARGS.init_mask, False), path.to_scratch('init_mask.mif', False), '-datatype', 'bit']) - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return False - - -def execute(): #pylint: disable=unused-variable - - # Verify user inputs - lmax = None - if app.ARGS.lmax: - try: - lmax = [int(i) for i in app.ARGS.lmax.split(',')] - except ValueError as exc: - raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc - if any(value < 0 or value % 2 for value in lmax): - raise MRtrixError('lmax values must be non-negative even integers') - if len(lmax) not in [2, 3]: - raise MRtrixError('Length of lmax vector expected to be either 2 or 3') - if app.ARGS.threshold <= 0.0 or app.ARGS.threshold >= 1.0: - raise MRtrixError('Tissue density sum threshold must lie within the range (0.0, 1.0)') - - # Determine whether we are working with single-shell or multi-shell data - bvalues = [ - int(round(float(value))) - for value in image.mrinfo('input.mif', 'shell_bvalues') \ - .strip().split()] - multishell = (len(bvalues) > 2) - if lmax is None: - lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE - elif len(lmax) == 3 and not multishell: - raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') - - class Tissue(object): #pylint: disable=useless-object-inheritance - def __init__(self, name): - self.name = name - self.tissue_rf = 'response_' + name + '.txt' - self.fod = 'FOD_' + name + '.mif' - - dwi_image = 'input.mif' - tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] - - run.command('dwi2response dhollander ' - + dwi_image - + (' -mask init_mask.mif' if app.ARGS.init_mask else '') - + ' ' - + ' '.join(tissue.tissue_rf for tissue in tissues)) - - # Immediately remove GM if we can't deal with it - if not multishell: - app.cleanup(tissues[1].tissue_rf) - tissues = tissues[::2] - - run.command('dwi2fod msmt_csd ' - + dwi_image - + ' -lmax ' + ','.join(str(item) for item in lmax) - + ' ' - + ' '.join(tissue.tissue_rf + ' ' + tissue.fod - for tissue in tissues)) - - tissue_sum_image = 'tissuesum.mif' - run.command('mrconvert ' - + tissues[0].fod - + ' -coord 3 0 - |' - + ' mrmath - ' - + ' '.join(tissue.fod for tissue in tissues[1:]) - + ' sum - | ' - + 'mrcalc - ' + str(math.sqrt(4.0 * math.pi)) + ' -mult ' - + tissue_sum_image) - - mask_image = 'mask.mif' - run.command('mrthreshold ' - + tissue_sum_image - + ' -abs ' - + str(app.ARGS.threshold) - + ' - |' - + ' maskfilter - connect -largest - |' - + ' mrcalc 1 - -sub - -datatype bit |' - + ' maskfilter - connect -largest - |' - + ' mrcalc 1 - -sub - -datatype bit |' - + ' maskfilter - clean ' - + mask_image) - app.cleanup([tissue.fod for tissue in tissues]) - - if app.ARGS.tissuesum: - run.command(['mrconvert', tissue_sum_image, path.from_user(app.ARGS.tissuesum, False)], - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - return mask_image diff --git a/python/mrtrix3/dwi2mask/mtnorm/execute.py b/python/mrtrix3/dwi2mask/mtnorm/execute.py new file mode 100644 index 0000000000..e8b2a5b8d6 --- /dev/null +++ b/python/mrtrix3/dwi2mask/mtnorm/execute.py @@ -0,0 +1,104 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run +from . import LMAXES_MULTI, LMAXES_SINGLE + +def execute(): #pylint: disable=unused-variable + + # Verify user inputs + lmax = None + if app.ARGS.lmax: + try: + lmax = [int(i) for i in app.ARGS.lmax.split(',')] + except ValueError as exc: + raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc + if any(value < 0 or value % 2 for value in lmax): + raise MRtrixError('lmax values must be non-negative even integers') + if len(lmax) not in [2, 3]: + raise MRtrixError('Length of lmax vector expected to be either 2 or 3') + if app.ARGS.threshold <= 0.0 or app.ARGS.threshold >= 1.0: + raise MRtrixError('Tissue density sum threshold must lie within the range (0.0, 1.0)') + + # Determine whether we are working with single-shell or multi-shell data + bvalues = [ + int(round(float(value))) + for value in image.mrinfo('input.mif', 'shell_bvalues') \ + .strip().split()] + multishell = (len(bvalues) > 2) + if lmax is None: + lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE + elif len(lmax) == 3 and not multishell: + raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') + + class Tissue(object): #pylint: disable=useless-object-inheritance + def __init__(self, name): + self.name = name + self.tissue_rf = 'response_' + name + '.txt' + self.fod = 'FOD_' + name + '.mif' + + dwi_image = 'input.mif' + tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] + + run.command('dwi2response dhollander ' + + dwi_image + + (' -mask init_mask.mif' if app.ARGS.init_mask else '') + + ' ' + + ' '.join(tissue.tissue_rf for tissue in tissues)) + + # Immediately remove GM if we can't deal with it + if not multishell: + app.cleanup(tissues[1].tissue_rf) + tissues = tissues[::2] + + run.command('dwi2fod msmt_csd ' + + dwi_image + + ' -lmax ' + ','.join(str(item) for item in lmax) + + ' ' + + ' '.join(tissue.tissue_rf + ' ' + tissue.fod + for tissue in tissues)) + + tissue_sum_image = 'tissuesum.mif' + run.command('mrconvert ' + + tissues[0].fod + + ' -coord 3 0 - |' + + ' mrmath - ' + + ' '.join(tissue.fod for tissue in tissues[1:]) + + ' sum - | ' + + 'mrcalc - ' + str(math.sqrt(4.0 * math.pi)) + ' -mult ' + + tissue_sum_image) + + mask_image = 'mask.mif' + run.command('mrthreshold ' + + tissue_sum_image + + ' -abs ' + + str(app.ARGS.threshold) + + ' - |' + + ' maskfilter - connect -largest - |' + + ' mrcalc 1 - -sub - -datatype bit |' + + ' maskfilter - connect -largest - |' + + ' mrcalc 1 - -sub - -datatype bit |' + + ' maskfilter - clean ' + + mask_image) + app.cleanup([tissue.fod for tissue in tissues]) + + if app.ARGS.tissuesum: + run.command(['mrconvert', tissue_sum_image, path.from_user(app.ARGS.tissuesum, False)], + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + return mask_image diff --git a/python/mrtrix3/dwi2mask/mtnorm/get_inputs.py b/python/mrtrix3/dwi2mask/mtnorm/get_inputs.py new file mode 100644 index 0000000000..95c61351e1 --- /dev/null +++ b/python/mrtrix3/dwi2mask/mtnorm/get_inputs.py @@ -0,0 +1,20 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + if app.ARGS.init_mask: + run.command(['mrconvert', path.from_user(app.ARGS.init_mask, False), path.to_scratch('init_mask.mif', False), '-datatype', 'bit']) diff --git a/python/mrtrix3/dwi2mask/mtnorm/usage.py b/python/mrtrix3/dwi2mask/mtnorm/usage.py new file mode 100644 index 0000000000..c4cea3d8c7 --- /dev/null +++ b/python/mrtrix3/dwi2mask/mtnorm/usage.py @@ -0,0 +1,57 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import LMAXES_MULTI +from . import LMAXES_SINGLE +from . import THRESHOLD_DEFAULT + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('mtnorm', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') + parser.set_synopsis('Derives a DWI brain mask by calculating and then thresholding a sum-of-tissue-densities image') + parser.add_description('This script attempts to identify brain voxels based on the total density of macroscopic ' + 'tissues as estimated through multi-tissue deconvolution. Following response function ' + 'estimation and multi-tissue deconvolution, the sum of tissue densities is thresholded at a ' + 'fixed value (default is ' + str(THRESHOLD_DEFAULT) + '), and subsequent mask image cleaning ' + 'operations are performed.') + parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' + 'Many users may find that comprehensive solution preferable; this dwi2mask algorithm is nevertheless ' + 'provided to demonstrate specifically the mask estimation portion of that command.') + parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' + 'degree than what would be advised for analysis. This is done for computational efficiency. This ' + 'behaviour can be modified through the -lmax command-line option.') + parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' + 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' + 'NeuroImage, 2014, 103, 411-426') + parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' + 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' + 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') + options.add_argument('-init_mask', + metavar='image', + help='Provide an initial brain mask, which will constrain the response function estimation ' + '(if omitted, the default dwi2mask algorithm will be used)') + options.add_argument('-lmax', + metavar='values', + help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' + 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') + options.add_argument('-threshold', + type=float, + metavar='value', + default=THRESHOLD_DEFAULT, + help='the threshold on the total tissue density sum image used to derive the brain mask; default is ' + str(THRESHOLD_DEFAULT)) + options.add_argument('-tissuesum', metavar='image', help='Export the tissue sum image that was used to generate the mask') diff --git a/python/mrtrix3/dwi2mask/synthstrip/__init__.py b/python/mrtrix3/dwi2mask/synthstrip/__init__.py index d02070ee94..d4a42fa04f 100644 --- a/python/mrtrix3/dwi2mask/synthstrip/__init__.py +++ b/python/mrtrix3/dwi2mask/synthstrip/__init__.py @@ -13,73 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, path, run - - +NEEDS_MEAN_BZERO = True SYNTHSTRIP_CMD='mri_synthstrip' SYNTHSTRIP_SINGULARITY='sythstrip-singularity' - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('synthstrip', parents=[base_parser]) - parser.set_author('Ruobing Chen (chrc@student.unimelb.edu.au)') - parser.set_synopsis('Use the FreeSurfer Synthstrip method on the mean b=0 image') - parser.add_description('This algorithm requires that the SynthStrip method be installed and available via PATH; ' - 'this could be via Freesufer 7.3.0 or later, or the dedicated Singularity container.') - parser.add_citation('A. Hoopes, J. S. Mora, A. V. Dalca, B. Fischl, M. Hoffmann. SynthStrip: Skull-Stripping for Any Brain Image. NeuroImage, 2022, 260, 119474', is_external=True) - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options=parser.add_argument_group('Options specific to the \'Synthstrip\' algorithm') - options.add_argument('-stripped', help='The output stripped image') - options.add_argument('-gpu', action='store_true', default=False, help='Use the GPU') - options.add_argument('-model', metavar='file', help='Alternative model weights') - options.add_argument('-nocsf', action='store_true', default=False, help='Compute the immediate boundary of brain matter excluding surrounding CSF') - options.add_argument('-border', type=int, help='Control the boundary distance from the brain') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - - synthstrip_cmd = shutil.which(SYNTHSTRIP_CMD) - if not synthstrip_cmd: - synthstrip_cmd=shutil.which(SYNTHSTRIP_SINGULARITY) - if not synthstrip_cmd: - raise MRtrixError('Unable to locate "Synthstrip" executable; please check installation') - - output_file = 'synthstrip_mask.nii' - stripped_file = 'stripped.nii' - cmd_string = SYNTHSTRIP_CMD + ' -i bzero.nii -m ' + output_file - - if app.ARGS.stripped: - cmd_string += ' -o ' + stripped_file - if app.ARGS.gpu: - cmd_string += ' -g' - - if app.ARGS.nocsf: - cmd_string += ' --no-csf' - - if app.ARGS.border is not None: - cmd_string += ' -b' + ' ' + str(app.ARGS.border) - - if app.ARGS.model: - cmd_string += ' --model' + path.from_user(app.ARGS.model) - - run.command(cmd_string) - if app.ARGS.stripped: - run.command('mrconvert ' + stripped_file + ' ' + path.from_user(app.ARGS.stripped), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - return output_file diff --git a/python/mrtrix3/dwi2mask/synthstrip/execute.py b/python/mrtrix3/dwi2mask/synthstrip/execute.py new file mode 100644 index 0000000000..90e5398f37 --- /dev/null +++ b/python/mrtrix3/dwi2mask/synthstrip/execute.py @@ -0,0 +1,52 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, path, run +from . import SYNTHSTRIP_CMD, SYNTHSTRIP_SINGULARITY + +def execute(): #pylint: disable=unused-variable + + synthstrip_cmd = shutil.which(SYNTHSTRIP_CMD) + if not synthstrip_cmd: + synthstrip_cmd=shutil.which(SYNTHSTRIP_SINGULARITY) + if not synthstrip_cmd: + raise MRtrixError('Unable to locate "Synthstrip" executable; please check installation') + + output_file = 'synthstrip_mask.nii' + stripped_file = 'stripped.nii' + cmd_string = SYNTHSTRIP_CMD + ' -i bzero.nii -m ' + output_file + + if app.ARGS.stripped: + cmd_string += ' -o ' + stripped_file + if app.ARGS.gpu: + cmd_string += ' -g' + + if app.ARGS.nocsf: + cmd_string += ' --no-csf' + + if app.ARGS.border is not None: + cmd_string += ' -b' + ' ' + str(app.ARGS.border) + + if app.ARGS.model: + cmd_string += ' --model' + path.from_user(app.ARGS.model) + + run.command(cmd_string) + if app.ARGS.stripped: + run.command('mrconvert ' + stripped_file + ' ' + path.from_user(app.ARGS.stripped), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + return output_file diff --git a/python/mrtrix3/dwi2mask/synthstrip/get_inputs.py b/python/mrtrix3/dwi2mask/synthstrip/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/synthstrip/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/synthstrip/usage.py b/python/mrtrix3/dwi2mask/synthstrip/usage.py new file mode 100644 index 0000000000..94de3853cd --- /dev/null +++ b/python/mrtrix3/dwi2mask/synthstrip/usage.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('synthstrip', parents=[base_parser]) + parser.set_author('Ruobing Chen (chrc@student.unimelb.edu.au)') + parser.set_synopsis('Use the FreeSurfer Synthstrip method on the mean b=0 image') + parser.add_description('This algorithm requires that the SynthStrip method be installed and available via PATH; ' + 'this could be via Freesufer 7.3.0 or later, or the dedicated Singularity container.') + parser.add_citation('A. Hoopes, J. S. Mora, A. V. Dalca, B. Fischl, M. Hoffmann. SynthStrip: Skull-Stripping for Any Brain Image. NeuroImage, 2022, 260, 119474', is_external=True) + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options=parser.add_argument_group('Options specific to the \'Synthstrip\' algorithm') + options.add_argument('-stripped', help='The output stripped image') + options.add_argument('-gpu', action='store_true', default=False, help='Use the GPU') + options.add_argument('-model', metavar='file', help='Alternative model weights') + options.add_argument('-nocsf', action='store_true', default=False, help='Compute the immediate boundary of brain matter excluding surrounding CSF') + options.add_argument('-border', type=int, help='Control the boundary distance from the brain') diff --git a/python/mrtrix3/dwi2mask/trace/__init__.py b/python/mrtrix3/dwi2mask/trace/__init__.py index 823d36f95e..3c08030957 100644 --- a/python/mrtrix3/dwi2mask/trace/__init__.py +++ b/python/mrtrix3/dwi2mask/trace/__init__.py @@ -13,118 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import math, os -from mrtrix3 import app, image, run +NEEDS_MEAN_BZERO = False DEFAULT_CLEAN_SCALE = 2 DEFAULT_MAX_ITERS = 10 - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('trace', parents=[base_parser]) - parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('A method to generate a brain mask from trace images of b-value shells') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The output mask image') - options = parser.add_argument_group('Options specific to the \'trace\' algorithm') - options.add_argument('-shells', help='Comma separated list of shells used to generate trace-weighted images for masking') - options.add_argument('-clean_scale', - type=int, - default=DEFAULT_CLEAN_SCALE, - help='the maximum scale used to cut bridges. A certain maximum scale cuts ' - 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' - 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') - iter_options = parser.add_argument_group('Options for turning \'dwi2mask trace\' into an iterative algorithm') - iter_options.add_argument('-iterative', - action='store_true', - help='(EXPERIMENTAL) Iteratively refine the weights for combination of per-shell trace-weighted images prior to thresholding') - iter_options.add_argument('-max_iters', type=int, default=DEFAULT_MAX_ITERS, help='Set the maximum number of iterations for the algorithm (default: ' + str(DEFAULT_MAX_ITERS) + ')') - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_mean_bzero(): #pylint: disable=unused-variable - return False - - - -def execute(): #pylint: disable=unused-variable - - if app.ARGS.shells: - run.command('dwiextract input.mif input_shells.mif -shells ' + app.ARGS.shells) - run.command('dwishellmath input_shells.mif mean shell_traces.mif') - else: - run.command('dwishellmath input.mif mean shell_traces.mif') - - run.command('mrconvert shell_traces.mif -coord 3 0 -axes 0,1,2 shell-00.mif') - - # run per-shell histogram matching - files = ['shell-00.mif'] - shell_count = image.Header('shell_traces.mif').size()[-1] - progress = app.ProgressBar('Performing per-shell histogram matching', shell_count-1) - for index in range(1, shell_count): - filename = 'shell-{:02d}.mif'.format(index) - run.command('mrconvert shell_traces.mif -coord 3 ' + str(index) + ' -axes 0,1,2 - | ' - 'mrhistmatch scale - shell-00.mif ' + filename) - files.append(filename) - progress.increment() - progress.done() - - # concatenate intensity-matched shells, and perform standard cleaning - run.command(['mrmath', files, 'mean', '-', '|', - 'mrthreshold', '-', '-', '|', - 'maskfilter', '-', 'connect', '-largest', '-', '|', - 'maskfilter', '-', 'fill', '-', '|', - 'maskfilter', '-', 'clean', '-scale', str(app.ARGS.clean_scale), 'init_mask.mif']) - - if not app.ARGS.iterative: - return 'init_mask.mif' - - # The per-shell histogram matching should be only the first pass - # Once an initial mask has been derived, the weights with which the different - # shells should be revised, based on how well each shell separates brain from - # non-brain - # Each shell trace image has a mean and standard deviation inside the mask, and - # a mean and standard deviation outside the mask - # A shell that provides a strong separation between within-mask and outside-mask - # intensities should have a greater contribution toward the combined image - # Cohen's d would be an appropriate per-shell weight - - current_mask = 'init_mask.mif' - iteration = 0 - while True: - current_mask_inv = os.path.splitext(current_mask)[0] + '_inv.mif' - run.command('mrcalc 1 ' + current_mask + ' -sub ' + current_mask_inv + ' -datatype bit') - shell_weights = [] - iteration += 1 - for index in range(0, shell_count): - stats_inside = image.statistics('shell-{:02d}.mif'.format(index), mask=current_mask) - stats_outside = image.statistics('shell-{:02d}.mif'.format(index), mask=current_mask_inv) - variance = (((stats_inside.count - 1) * stats_inside.std * stats_inside.std) \ - + ((stats_outside.count - 1) * stats_outside.std * stats_outside.std)) \ - / (stats_inside.count + stats_outside.count - 2) - cohen_d = (stats_inside.mean - stats_outside.mean) / math.sqrt(variance) - shell_weights.append(cohen_d) - mask_path = 'mask-{:02d}.mif'.format(iteration) - run.command('mrcalc shell-00.mif ' + str(shell_weights[0]) + ' -mult ' - + ' -add '.join(filepath + ' ' + str(weight) + ' -mult' for filepath, weight in zip(files[1:], shell_weights[1:])) - + ' -add - |' - + ' mrthreshold - - |' - + ' maskfilter - connect -largest - |' - + ' maskfilter - fill - |' - + ' maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' - |' - + ' mrcalc input_pos_mask.mif - -mult ' + mask_path + ' -datatype bit') - mask_mismatch_path = 'mask_mismatch-{:02d}.mif'.format(iteration) - run.command('mrcalc ' + current_mask + ' ' + mask_path + ' -sub -abs ' + mask_mismatch_path) - if not image.statistics(mask_mismatch_path).mean: - app.console('Terminating optimisation due to convergence of masks between iterations') - return mask_path - if iteration == app.ARGS.max_iters: - app.console('Terminating optimisation due to maximum number of iterations') - return mask_path - current_mask = mask_path - - assert False diff --git a/python/mrtrix3/dwi2mask/trace/execute.py b/python/mrtrix3/dwi2mask/trace/execute.py new file mode 100644 index 0000000000..dc6da0ba9d --- /dev/null +++ b/python/mrtrix3/dwi2mask/trace/execute.py @@ -0,0 +1,95 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os +from mrtrix3 import app, image, run + +def execute(): #pylint: disable=unused-variable + + if app.ARGS.shells: + run.command('dwiextract input.mif input_shells.mif -shells ' + app.ARGS.shells) + run.command('dwishellmath input_shells.mif mean shell_traces.mif') + else: + run.command('dwishellmath input.mif mean shell_traces.mif') + + run.command('mrconvert shell_traces.mif -coord 3 0 -axes 0,1,2 shell-00.mif') + + # run per-shell histogram matching + files = ['shell-00.mif'] + shell_count = image.Header('shell_traces.mif').size()[-1] + progress = app.ProgressBar('Performing per-shell histogram matching', shell_count-1) + for index in range(1, shell_count): + filename = 'shell-{:02d}.mif'.format(index) + run.command('mrconvert shell_traces.mif -coord 3 ' + str(index) + ' -axes 0,1,2 - | ' + 'mrhistmatch scale - shell-00.mif ' + filename) + files.append(filename) + progress.increment() + progress.done() + + # concatenate intensity-matched shells, and perform standard cleaning + run.command(['mrmath', files, 'mean', '-', '|', + 'mrthreshold', '-', '-', '|', + 'maskfilter', '-', 'connect', '-largest', '-', '|', + 'maskfilter', '-', 'fill', '-', '|', + 'maskfilter', '-', 'clean', '-scale', str(app.ARGS.clean_scale), 'init_mask.mif']) + + if not app.ARGS.iterative: + return 'init_mask.mif' + + # The per-shell histogram matching should be only the first pass + # Once an initial mask has been derived, the weights with which the different + # shells should be revised, based on how well each shell separates brain from + # non-brain + # Each shell trace image has a mean and standard deviation inside the mask, and + # a mean and standard deviation outside the mask + # A shell that provides a strong separation between within-mask and outside-mask + # intensities should have a greater contribution toward the combined image + # Cohen's d would be an appropriate per-shell weight + + current_mask = 'init_mask.mif' + iteration = 0 + while True: + current_mask_inv = os.path.splitext(current_mask)[0] + '_inv.mif' + run.command('mrcalc 1 ' + current_mask + ' -sub ' + current_mask_inv + ' -datatype bit') + shell_weights = [] + iteration += 1 + for index in range(0, shell_count): + stats_inside = image.statistics('shell-{:02d}.mif'.format(index), mask=current_mask) + stats_outside = image.statistics('shell-{:02d}.mif'.format(index), mask=current_mask_inv) + variance = (((stats_inside.count - 1) * stats_inside.std * stats_inside.std) \ + + ((stats_outside.count - 1) * stats_outside.std * stats_outside.std)) \ + / (stats_inside.count + stats_outside.count - 2) + cohen_d = (stats_inside.mean - stats_outside.mean) / math.sqrt(variance) + shell_weights.append(cohen_d) + mask_path = 'mask-{:02d}.mif'.format(iteration) + run.command('mrcalc shell-00.mif ' + str(shell_weights[0]) + ' -mult ' + + ' -add '.join(filepath + ' ' + str(weight) + ' -mult' for filepath, weight in zip(files[1:], shell_weights[1:])) + + ' -add - |' + + ' mrthreshold - - |' + + ' maskfilter - connect -largest - |' + + ' maskfilter - fill - |' + + ' maskfilter - clean -scale ' + str(app.ARGS.clean_scale) + ' - |' + + ' mrcalc input_pos_mask.mif - -mult ' + mask_path + ' -datatype bit') + mask_mismatch_path = 'mask_mismatch-{:02d}.mif'.format(iteration) + run.command('mrcalc ' + current_mask + ' ' + mask_path + ' -sub -abs ' + mask_mismatch_path) + if not image.statistics(mask_mismatch_path).mean: + app.console('Terminating optimisation due to convergence of masks between iterations') + return mask_path + if iteration == app.ARGS.max_iters: + app.console('Terminating optimisation due to maximum number of iterations') + return mask_path + current_mask = mask_path + + assert False diff --git a/python/mrtrix3/dwi2mask/trace/get_inputs.py b/python/mrtrix3/dwi2mask/trace/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2mask/trace/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2mask/trace/usage.py b/python/mrtrix3/dwi2mask/trace/usage.py new file mode 100644 index 0000000000..f59446a3a7 --- /dev/null +++ b/python/mrtrix3/dwi2mask/trace/usage.py @@ -0,0 +1,36 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import DEFAULT_CLEAN_SCALE, DEFAULT_MAX_ITERS + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('trace', parents=[base_parser]) + parser.set_author('Warda Syeda (wtsyeda@unimelb.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('A method to generate a brain mask from trace images of b-value shells') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The output mask image') + options = parser.add_argument_group('Options specific to the \'trace\' algorithm') + options.add_argument('-shells', help='Comma separated list of shells used to generate trace-weighted images for masking') + options.add_argument('-clean_scale', + type=int, + default=DEFAULT_CLEAN_SCALE, + help='the maximum scale used to cut bridges. A certain maximum scale cuts ' + 'bridges up to a width (in voxels) of 2x the provided scale. Setting ' + 'this to 0 disables the mask cleaning step. (Default: ' + str(DEFAULT_CLEAN_SCALE) + ')') + iter_options = parser.add_argument_group('Options for turning \'dwi2mask trace\' into an iterative algorithm') + iter_options.add_argument('-iterative', + action='store_true', + help='(EXPERIMENTAL) Iteratively refine the weights for combination of per-shell trace-weighted images prior to thresholding') + iter_options.add_argument('-max_iters', type=int, default=DEFAULT_MAX_ITERS, help='Set the maximum number of iterations for the algorithm (default: ' + str(DEFAULT_MAX_ITERS) + ')') diff --git a/python/mrtrix3/dwi2mask/usage.py b/python/mrtrix3/dwi2mask/usage.py new file mode 100644 index 0000000000..56f0782be7 --- /dev/null +++ b/python/mrtrix3/dwi2mask/usage.py @@ -0,0 +1,32 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Warda Syeda (wtsyeda@unimelb.edu.au)') + cmdline.set_synopsis('Generate a binary mask from DWI data') + cmdline.add_description('This script serves as an interface for many different algorithms that generate a binary mask from DWI data in different ways. ' + 'Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fslbet\' algorithm, type \'dwi2mask fslbet\'.') + cmdline.add_description('More information on mask derivation from DWI data can be found at the following link: \n' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') + + # General options + #common_options = cmdline.add_argument_group('General dwi2mask options') + app.add_dwgrad_import_options(cmdline) + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) diff --git a/python/mrtrix3/dwi2response/__init__.py b/python/mrtrix3/dwi2response/__init__.py index 768914190d..e69de29bb2 100644 --- a/python/mrtrix3/dwi2response/__init__.py +++ b/python/mrtrix3/dwi2response/__init__.py @@ -1,120 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module, import-outside-toplevel - - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)') - cmdline.set_synopsis('Estimate response function(s) for spherical deconvolution') - cmdline.add_description('dwi2response offers different algorithms for performing various types of response function estimation. The name of the algorithm must appear as the first argument on the command-line after \'dwi2response\'. The subsequent arguments and options depend on the particular algorithm being invoked.') - cmdline.add_description('Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fa\' algorithm, type \'dwi2response fa\'.') - cmdline.add_description('More information on response function estimation for spherical deconvolution can be found at the following link: \n' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/constrained_spherical_deconvolution/response_function_estimation.html') - cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' - 'derive an initial voxel exclusion mask. ' - 'More information on mask derivation from DWI data can be found at: ' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') - - # General options - common_options = cmdline.add_argument_group('General dwi2response options') - common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection') - common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)') - common_options.add_argument('-shells', help='The b-value(s) to use in response function estimation (comma-separated list in case of multiple b-values, b=0 must be included explicitly)') - common_options.add_argument('-lmax', help='The maximum harmonic degree(s) for response function estimation (comma-separated list in case of multiple b-values)') - app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - - # Check for prior existence of output files, and grab any input files, used by the particular algorithm - if app.ARGS.voxels: - app.check_output_path(app.ARGS.voxels) - alg.check_output_paths() - - # Sanitise some inputs, and get ready for data import - if app.ARGS.lmax: - try: - lmax = [ int(x) for x in app.ARGS.lmax.split(',') ] - if any(lmax_value%2 for lmax_value in lmax): - raise MRtrixError('Value of lmax must be even') - except ValueError as exception: - raise MRtrixError('Parameter lmax must be a number') from exception - if alg.needs_single_shell() and not len(lmax) == 1: - raise MRtrixError('Can only specify a single lmax value for single-shell algorithms') - shells_option = '' - if app.ARGS.shells: - try: - shells_values = [ int(round(float(x))) for x in app.ARGS.shells.split(',') ] - except ValueError as exception: - raise MRtrixError('-shells option should provide a comma-separated list of b-values') from exception - if alg.needs_single_shell() and not len(shells_values) == 1: - raise MRtrixError('Can only specify a single b-value shell for single-shell algorithms') - shells_option = ' -shells ' + app.ARGS.shells - singleshell_option = '' - if alg.needs_single_shell(): - singleshell_option = ' -singleshell -no_bzero' - - grad_import_option = app.read_dwgrad_import_options() - if not grad_import_option and 'dw_scheme' not in image.Header(path.from_user(app.ARGS.input, False)).keyval(): - raise MRtrixError('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option') - - app.make_scratch_dir() - - # Get standard input data into the scratch directory - if alg.needs_single_shell() or shells_option: - app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ') and selecting b-values...') - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.to_scratch('dwi.mif') + shells_option + singleshell_option, show=False) - else: # Don't discard b=0 in multi-shell algorithms - app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ')...') - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + ' -strides 0,0,0,1' + grad_import_option, show=False) - if app.ARGS.mask: - app.console('Importing mask (' + path.from_user(app.ARGS.mask) + ')...') - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit', show=False) - - alg.get_inputs() - - app.goto_scratch_dir() - - if alg.supports_mask(): - if app.ARGS.mask: - # Check that the brain mask is appropriate - mask_header = image.Header('mask.mif') - if mask_header.size()[:3] != image.Header('dwi.mif').size()[:3]: - raise MRtrixError('Dimensions of provided mask image do not match DWI') - if not (len(mask_header.size()) == 3 or (len(mask_header.size()) == 4 and mask_header.size()[3] == 1)): - raise MRtrixError('Provided mask image needs to be a 3D image') - else: - app.console('Computing brain mask (dwi2mask)...') - run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' dwi.mif mask.mif', show=False) - - if not image.statistics('mask.mif', mask='mask.mif').count: - raise MRtrixError(('Provided' if app.ARGS.mask else 'Generated') + ' mask image does not contain any voxels') - - # From here, the script splits depending on what estimation algorithm is being used - alg.execute() diff --git a/python/mrtrix3/dwi2response/dhollander/__init__.py b/python/mrtrix3/dwi2response/dhollander/__init__.py index faaedecd68..edbcecf5cd 100644 --- a/python/mrtrix3/dwi2response/dhollander/__init__.py +++ b/python/mrtrix3/dwi2response/dhollander/__init__.py @@ -13,285 +13,6 @@ # # For more details, see http://www.mrtrix.org/. - -import math, shlex, shutil -from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import app, image, path, run - - +NEEDS_SINGLE_SHELL = False +SUPPORTS_MASK = True WM_ALGOS = [ 'fa', 'tax', 'tournier' ] - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('dhollander', parents=[base_parser]) - parser.set_author('Thijs Dhollander (thijs.dhollander@gmail.com)') - parser.set_synopsis('Unsupervised estimation of WM, GM and CSF response functions that does not require a T1 image (or segmentation thereof)') - parser.add_description('This is an improved version of the Dhollander et al. (2016) algorithm for unsupervised estimation of WM, GM and CSF response functions, which includes the Dhollander et al. (2019) improvements for single-fibre WM response function estimation (prior to this update, the "dwi2response tournier" algorithm had been utilised specifically for the single-fibre WM response function estimation step).') - parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') - parser.add_citation('Dhollander, T.; Mito, R.; Raffelt, D. & Connelly, A. Improved white matter response function estimation for 3-tissue constrained spherical deconvolution. Proc Intl Soc Mag Reson Med, 2019, 555', - condition='If -wm_algo option is not used') - parser.add_argument('input', help='Input DWI dataset') - parser.add_argument('out_sfwm', help='Output single-fibre WM response function text file') - parser.add_argument('out_gm', help='Output GM response function text file') - parser.add_argument('out_csf', help='Output CSF response function text file') - options = parser.add_argument_group('Options for the \'dhollander\' algorithm') - options.add_argument('-erode', type=int, default=3, help='Number of erosion passes to apply to initial (whole brain) mask. Set to 0 to not erode the brain mask. (default: 3)') - options.add_argument('-fa', type=float, default=0.2, help='FA threshold for crude WM versus GM-CSF separation. (default: 0.2)') - options.add_argument('-sfwm', type=float, default=0.5, help='Final number of single-fibre WM voxels to select, as a percentage of refined WM. (default: 0.5 per cent)') - options.add_argument('-gm', type=float, default=2.0, help='Final number of GM voxels to select, as a percentage of refined GM. (default: 2 per cent)') - options.add_argument('-csf', type=float, default=10.0, help='Final number of CSF voxels to select, as a percentage of refined CSF. (default: 10 per cent)') - options.add_argument('-wm_algo', metavar='algorithm', choices=WM_ALGOS, help='Use external dwi2response algorithm for WM single-fibre voxel selection (options: ' + ', '.join(WM_ALGOS) + ') (default: built-in Dhollander 2019)') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.out_sfwm) - app.check_output_path(app.ARGS.out_gm) - app.check_output_path(app.ARGS.out_csf) - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_single_shell(): #pylint: disable=unused-variable - return False - - - -def supports_mask(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - bzero_threshold = float(CONFIG['BZeroThreshold']) if 'BZeroThreshold' in CONFIG else 10.0 - - - # CHECK INPUTS AND OPTIONS - app.console('-------') - - # Get b-values and number of volumes per b-value. - bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - bvolumes = [ int(x) for x in image.mrinfo('dwi.mif', 'shell_sizes').split() ] - app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes') - if len(bvalues) < 2: - raise MRtrixError('Need at least 2 unique b-values (including b=0).') - bvalues_option = ' -shells ' + ','.join(map(str,bvalues)) - - # Get lmax information (if provided). - sfwm_lmax = [ ] - if app.ARGS.lmax: - sfwm_lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] - if not len(sfwm_lmax) == len(bvalues): - raise MRtrixError('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str,sfwm_lmax)) + ') does not match number of unique b-values.') - for sfl in sfwm_lmax: - if sfl%2: - raise MRtrixError('Values supplied to the -lmax option must be even.') - if sfl<0: - raise MRtrixError('Values supplied to the -lmax option must be non-negative.') - sfwm_lmax_option = '' - if sfwm_lmax: - sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) - - - # PREPARATION - app.console('-------') - app.console('Preparation:') - - # Erode (brain) mask. - if app.ARGS.erode > 0: - app.console('* Eroding brain mask by ' + str(app.ARGS.erode) + ' pass(es)...') - run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.ARGS.erode), show=False) - else: - app.console('Not eroding brain mask.') - run.command('mrconvert mask.mif eroded_mask.mif -datatype bit', show=False) - statmaskcount = image.statistics('mask.mif', mask='mask.mif').count - statemaskcount = image.statistics('eroded_mask.mif', mask='eroded_mask.mif').count - app.console(' [ mask: ' + str(statmaskcount) + ' -> ' + str(statemaskcount) + ' ]') - - # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. - app.console('* Computing signal decay metric (SDM):') - totvolumes = 0 - fullsdmcmd = 'mrcalc' - errcmd = 'mrcalc' - zeropath = 'mean_b' + str(bvalues[0]) + '.mif' - for ibv, bval in enumerate(bvalues): - app.console(' * b=' + str(bval) + '...') - meanpath = 'mean_b' + str(bval) + '.mif' - run.command('dwiextract dwi.mif -shells ' + str(bval) + ' - | mrcalc - 0 -max - | mrmath - mean ' + meanpath + ' -axis 3', show=False) - errpath = 'err_b' + str(bval) + '.mif' - run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit', show=False) - errcmd += ' ' + errpath - if ibv>0: - errcmd += ' -add' - sdmpath = 'sdm_b' + str(bval) + '.mif' - run.command('mrcalc ' + zeropath + ' ' + meanpath + ' -divide -log ' + sdmpath, show=False) - totvolumes += bvolumes[ibv] - fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[ibv]) + ' -mult' - if ibv>1: - fullsdmcmd += ' -add' - fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif' - run.command(fullsdmcmd, show=False) - app.console('* Removing erroneous voxels from mask and correcting SDM...') - run.command('mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit', show=False) - errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit' - run.command(errcmd, show=False) - run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif', show=False) - statsmaskcount = image.statistics('safe_mask.mif', mask='safe_mask.mif').count - app.console(' [ mask: ' + str(statemaskcount) + ' -> ' + str(statsmaskcount) + ' ]') - - - # CRUDE SEGMENTATION - app.console('-------') - app.console('Crude segmentation:') - - # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. - app.console('* Crude WM versus GM-CSF separation (at FA=' + str(app.ARGS.fa) + ')...') - run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif', show=False) - run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.ARGS.fa) + ' -gt crude_wm.mif -datatype bit', show=False) - run.command('mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit', show=False) - statcrudewmcount = image.statistics('crude_wm.mif', mask='crude_wm.mif').count - statcrudenonwmcount = image.statistics('_crudenonwm.mif', mask='_crudenonwm.mif').count - app.console(' [ ' + str(statsmaskcount) + ' -> ' + str(statcrudewmcount) + ' (WM) & ' + str(statcrudenonwmcount) + ' (GM-CSF) ]') - - # Crude GM versus CSF separation based on SDM. - app.console('* Crude GM versus CSF separation...') - crudenonwmmedian = image.statistics('safe_sdm.mif', mask='_crudenonwm.mif').median - run.command('mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit', show=False) - run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit', show=False) - statcrudegmcount = image.statistics('crude_gm.mif', mask='crude_gm.mif').count - statcrudecsfcount = image.statistics('crude_csf.mif', mask='crude_csf.mif').count - app.console(' [ ' + str(statcrudenonwmcount) + ' -> ' + str(statcrudegmcount) + ' (GM) & ' + str(statcrudecsfcount) + ' (CSF) ]') - - - # REFINED SEGMENTATION - app.console('-------') - app.console('Refined segmentation:') - - # Refine WM: remove high SDM outliers. - app.console('* Refining WM...') - crudewmmedian = image.statistics('safe_sdm.mif', mask='crude_wm.mif').median - run.command('mrcalc crude_wm.mif safe_sdm.mif ' + str(crudewmmedian) + ' -subtract -abs 0 -if _crudewm_sdmad.mif', show=False) - crudewmmad = image.statistics('_crudewm_sdmad.mif', mask='crude_wm.mif').median - crudewmoutlthresh = crudewmmedian + (1.4826 * crudewmmad * 2.0) - run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit', show=False) - run.command('mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit', show=False) - statrefwmcount = image.statistics('refined_wm.mif', mask='refined_wm.mif').count - app.console(' [ WM: ' + str(statcrudewmcount) + ' -> ' + str(statrefwmcount) + ' ]') - - # Refine GM: separate safer GM from partial volumed voxels. - app.console('* Refining GM...') - crudegmmedian = image.statistics('safe_sdm.mif', mask='crude_gm.mif').median - run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit', show=False) - run.command('mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit', show=False) - run.command('mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit', show=False) - run.command('mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit', show=False) - run.command('mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit', show=False) - statrefgmcount = image.statistics('refined_gm.mif', mask='refined_gm.mif').count - app.console(' [ GM: ' + str(statcrudegmcount) + ' -> ' + str(statrefgmcount) + ' ]') - - # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels. - app.console('* Refining CSF...') - crudecsfmin = image.statistics('safe_sdm.mif', mask='crude_csf.mif').min - run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit', show=False) - run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit', show=False) - statrefcsfcount = image.statistics('refined_csf.mif', mask='refined_csf.mif').count - app.console(' [ CSF: ' + str(statcrudecsfcount) + ' -> ' + str(statrefcsfcount) + ' ]') - - - # FINAL VOXEL SELECTION AND RESPONSE FUNCTION ESTIMATION - app.console('-------') - app.console('Final voxel selection and response function estimation:') - - # Get final voxels for CSF response function estimation from refined CSF. - app.console('* CSF:') - app.console(' * Selecting final voxels (' + str(app.ARGS.csf) + '% of refined CSF)...') - voxcsfcount = int(round(statrefcsfcount * app.ARGS.csf / 100.0)) - run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if - -datatype bit | mrconvert - voxels_csf.mif -axes 0,1,2', show=False) - statvoxcsfcount = image.statistics('voxels_csf.mif', mask='voxels_csf.mif').count - app.console(' [ CSF: ' + str(statrefcsfcount) + ' -> ' + str(statvoxcsfcount) + ' ]') - # Estimate CSF response function - app.console(' * Estimating response function...') - run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic', show=False) - - # Get final voxels for GM response function estimation from refined GM. - app.console('* GM:') - app.console(' * Selecting final voxels (' + str(app.ARGS.gm) + '% of refined GM)...') - voxgmcount = int(round(statrefgmcount * app.ARGS.gm / 100.0)) - refgmmedian = image.statistics('safe_sdm.mif', mask='refined_gm.mif').median - run.command('mrcalc refined_gm.mif safe_sdm.mif ' + str(refgmmedian) + ' -subtract -abs 1 -add 0 -if - | mrthreshold - - -bottom ' + str(voxgmcount) + ' -ignorezero | mrcalc refined_gm.mif - 0 -if - -datatype bit | mrconvert - voxels_gm.mif -axes 0,1,2', show=False) - statvoxgmcount = image.statistics('voxels_gm.mif', mask='voxels_gm.mif').count - app.console(' [ GM: ' + str(statrefgmcount) + ' -> ' + str(statvoxgmcount) + ' ]') - # Estimate GM response function - app.console(' * Estimating response function...') - run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic', show=False) - - # Get final voxels for single-fibre WM response function estimation from refined WM. - app.console('* Single-fibre WM:') - app.console(' * Selecting final voxels' - + ('' if app.ARGS.wm_algo == 'tax' else (' ('+ str(app.ARGS.sfwm) + '% of refined WM)')) - + '...') - voxsfwmcount = int(round(statrefwmcount * app.ARGS.sfwm / 100.0)) - - if app.ARGS.wm_algo: - recursive_cleanup_option='' - if not app.DO_CLEANUP: - recursive_cleanup_option = ' -nocleanup' - app.console(' Selecting WM single-fibre voxels using \'' + app.ARGS.wm_algo + '\' algorithm') - if app.ARGS.wm_algo == 'tax' and app.ARGS.sfwm != 0.5: - app.warn('Single-fibre WM response function selection algorithm "tax" will not honour requested WM voxel percentage') - run.command('dwi2response ' + app.ARGS.wm_algo + ' dwi.mif _respsfwmss.txt -mask refined_wm.mif -voxels voxels_sfwm.mif' - + ('' if app.ARGS.wm_algo == 'tax' else (' -number ' + str(voxsfwmcount))) - + ' -scratch ' + shlex.quote(app.SCRATCH_DIR) - + recursive_cleanup_option, - show=False) - else: - app.console(' Selecting WM single-fibre voxels using built-in (Dhollander et al., 2019) algorithm') - run.command('mrmath dwi.mif mean mean_sig.mif -axis 3', show=False) - refwmcoef = image.statistics('mean_sig.mif', mask='refined_wm.mif').median * math.sqrt(4.0 * math.pi) - if sfwm_lmax: - isiso = [ lm == 0 for lm in sfwm_lmax ] - else: - isiso = [ bv < bzero_threshold for bv in bvalues ] - with open('ewmrf.txt', 'w', encoding='utf-8') as ewr: - for iis in isiso: - if iis: - ewr.write("%s 0 0 0\n" % refwmcoef) - else: - ewr.write("%s -%s %s -%s\n" % (refwmcoef, refwmcoef, refwmcoef, refwmcoef)) - run.command('dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm2.mif response_csf.txt abs_csf2.mif -mask refined_wm.mif -lmax 2,0' + bvalues_option, show=False) - run.command('mrconvert abs_ewm2.mif - -coord 3 0 | mrcalc - abs_csf2.mif -add abs_sum2.mif', show=False) - run.command('sh2peaks abs_ewm2.mif - -num 1 -mask refined_wm.mif | peaks2amp - - | mrcalc - abs_sum2.mif -divide - | mrconvert - metric_sfwm2.mif -coord 3 0 -axes 0,1,2', show=False) - run.command('mrcalc refined_wm.mif metric_sfwm2.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount * 2) + ' -ignorezero | mrcalc refined_wm.mif - 0 -if - -datatype bit | mrconvert - refined_sfwm.mif -axes 0,1,2', show=False) - run.command('dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm6.mif response_csf.txt abs_csf6.mif -mask refined_sfwm.mif -lmax 6,0' + bvalues_option, show=False) - run.command('mrconvert abs_ewm6.mif - -coord 3 0 | mrcalc - abs_csf6.mif -add abs_sum6.mif', show=False) - run.command('sh2peaks abs_ewm6.mif - -num 1 -mask refined_sfwm.mif | peaks2amp - - | mrcalc - abs_sum6.mif -divide - | mrconvert - metric_sfwm6.mif -coord 3 0 -axes 0,1,2', show=False) - run.command('mrcalc refined_sfwm.mif metric_sfwm6.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount) + ' -ignorezero | mrcalc refined_sfwm.mif - 0 -if - -datatype bit | mrconvert - voxels_sfwm.mif -axes 0,1,2', show=False) - - statvoxsfwmcount = image.statistics('voxels_sfwm.mif', mask='voxels_sfwm.mif').count - app.console(' [ WM: ' + str(statrefwmcount) + ' -> ' + str(statvoxsfwmcount) + ' (single-fibre) ]') - # Estimate SF WM response function - app.console(' * Estimating response function...') - run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option, show=False) - - - # OUTPUT AND SUMMARY - app.console('-------') - app.console('Generating outputs...') - - # Generate 4D binary images with voxel selections at major stages in algorithm (RGB: WM=blue, GM=green, CSF=red). - run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif check_crude.mif -axis 3', show=False) - run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif check_refined.mif -axis 3', show=False) - run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif check_voxels.mif -axis 3', show=False) - - # Copy results to output files - run.function(shutil.copyfile, 'response_sfwm.txt', path.from_user(app.ARGS.out_sfwm, False), show=False) - run.function(shutil.copyfile, 'response_gm.txt', path.from_user(app.ARGS.out_gm, False), show=False) - run.function(shutil.copyfile, 'response_csf.txt', path.from_user(app.ARGS.out_csf, False), show=False) - if app.ARGS.voxels: - run.command('mrconvert check_voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE, show=False) - app.console('-------') diff --git a/python/mrtrix3/dwi2response/dhollander/check_output_paths.py b/python/mrtrix3/dwi2response/dhollander/check_output_paths.py new file mode 100644 index 0000000000..08fcd6e484 --- /dev/null +++ b/python/mrtrix3/dwi2response/dhollander/check_output_paths.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.out_sfwm) + app.check_output_path(app.ARGS.out_gm) + app.check_output_path(app.ARGS.out_csf) diff --git a/python/mrtrix3/dwi2response/dhollander/execute.py b/python/mrtrix3/dwi2response/dhollander/execute.py new file mode 100644 index 0000000000..5e69f401a5 --- /dev/null +++ b/python/mrtrix3/dwi2response/dhollander/execute.py @@ -0,0 +1,246 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, shlex, shutil +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, image, path, run + +def execute(): #pylint: disable=unused-variable + bzero_threshold = float(CONFIG['BZeroThreshold']) if 'BZeroThreshold' in CONFIG else 10.0 + + # CHECK INPUTS AND OPTIONS + app.console('-------') + + # Get b-values and number of volumes per b-value. + bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + bvolumes = [ int(x) for x in image.mrinfo('dwi.mif', 'shell_sizes').split() ] + app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes') + if len(bvalues) < 2: + raise MRtrixError('Need at least 2 unique b-values (including b=0).') + bvalues_option = ' -shells ' + ','.join(map(str,bvalues)) + + # Get lmax information (if provided). + sfwm_lmax = [ ] + if app.ARGS.lmax: + sfwm_lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] + if not len(sfwm_lmax) == len(bvalues): + raise MRtrixError('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str,sfwm_lmax)) + ') does not match number of unique b-values.') + for sfl in sfwm_lmax: + if sfl%2: + raise MRtrixError('Values supplied to the -lmax option must be even.') + if sfl<0: + raise MRtrixError('Values supplied to the -lmax option must be non-negative.') + sfwm_lmax_option = '' + if sfwm_lmax: + sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) + + + # PREPARATION + app.console('-------') + app.console('Preparation:') + + # Erode (brain) mask. + if app.ARGS.erode > 0: + app.console('* Eroding brain mask by ' + str(app.ARGS.erode) + ' pass(es)...') + run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.ARGS.erode), show=False) + else: + app.console('Not eroding brain mask.') + run.command('mrconvert mask.mif eroded_mask.mif -datatype bit', show=False) + statmaskcount = image.statistics('mask.mif', mask='mask.mif').count + statemaskcount = image.statistics('eroded_mask.mif', mask='eroded_mask.mif').count + app.console(' [ mask: ' + str(statmaskcount) + ' -> ' + str(statemaskcount) + ' ]') + + # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. + app.console('* Computing signal decay metric (SDM):') + totvolumes = 0 + fullsdmcmd = 'mrcalc' + errcmd = 'mrcalc' + zeropath = 'mean_b' + str(bvalues[0]) + '.mif' + for ibv, bval in enumerate(bvalues): + app.console(' * b=' + str(bval) + '...') + meanpath = 'mean_b' + str(bval) + '.mif' + run.command('dwiextract dwi.mif -shells ' + str(bval) + ' - | mrcalc - 0 -max - | mrmath - mean ' + meanpath + ' -axis 3', show=False) + errpath = 'err_b' + str(bval) + '.mif' + run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit', show=False) + errcmd += ' ' + errpath + if ibv>0: + errcmd += ' -add' + sdmpath = 'sdm_b' + str(bval) + '.mif' + run.command('mrcalc ' + zeropath + ' ' + meanpath + ' -divide -log ' + sdmpath, show=False) + totvolumes += bvolumes[ibv] + fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[ibv]) + ' -mult' + if ibv>1: + fullsdmcmd += ' -add' + fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif' + run.command(fullsdmcmd, show=False) + app.console('* Removing erroneous voxels from mask and correcting SDM...') + run.command('mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit', show=False) + errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit' + run.command(errcmd, show=False) + run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif', show=False) + statsmaskcount = image.statistics('safe_mask.mif', mask='safe_mask.mif').count + app.console(' [ mask: ' + str(statemaskcount) + ' -> ' + str(statsmaskcount) + ' ]') + + + # CRUDE SEGMENTATION + app.console('-------') + app.console('Crude segmentation:') + + # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. + app.console('* Crude WM versus GM-CSF separation (at FA=' + str(app.ARGS.fa) + ')...') + run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif', show=False) + run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.ARGS.fa) + ' -gt crude_wm.mif -datatype bit', show=False) + run.command('mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit', show=False) + statcrudewmcount = image.statistics('crude_wm.mif', mask='crude_wm.mif').count + statcrudenonwmcount = image.statistics('_crudenonwm.mif', mask='_crudenonwm.mif').count + app.console(' [ ' + str(statsmaskcount) + ' -> ' + str(statcrudewmcount) + ' (WM) & ' + str(statcrudenonwmcount) + ' (GM-CSF) ]') + + # Crude GM versus CSF separation based on SDM. + app.console('* Crude GM versus CSF separation...') + crudenonwmmedian = image.statistics('safe_sdm.mif', mask='_crudenonwm.mif').median + run.command('mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit', show=False) + run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit', show=False) + statcrudegmcount = image.statistics('crude_gm.mif', mask='crude_gm.mif').count + statcrudecsfcount = image.statistics('crude_csf.mif', mask='crude_csf.mif').count + app.console(' [ ' + str(statcrudenonwmcount) + ' -> ' + str(statcrudegmcount) + ' (GM) & ' + str(statcrudecsfcount) + ' (CSF) ]') + + + # REFINED SEGMENTATION + app.console('-------') + app.console('Refined segmentation:') + + # Refine WM: remove high SDM outliers. + app.console('* Refining WM...') + crudewmmedian = image.statistics('safe_sdm.mif', mask='crude_wm.mif').median + run.command('mrcalc crude_wm.mif safe_sdm.mif ' + str(crudewmmedian) + ' -subtract -abs 0 -if _crudewm_sdmad.mif', show=False) + crudewmmad = image.statistics('_crudewm_sdmad.mif', mask='crude_wm.mif').median + crudewmoutlthresh = crudewmmedian + (1.4826 * crudewmmad * 2.0) + run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit', show=False) + run.command('mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit', show=False) + statrefwmcount = image.statistics('refined_wm.mif', mask='refined_wm.mif').count + app.console(' [ WM: ' + str(statcrudewmcount) + ' -> ' + str(statrefwmcount) + ' ]') + + # Refine GM: separate safer GM from partial volumed voxels. + app.console('* Refining GM...') + crudegmmedian = image.statistics('safe_sdm.mif', mask='crude_gm.mif').median + run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit', show=False) + run.command('mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit', show=False) + run.command('mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit', show=False) + run.command('mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit', show=False) + run.command('mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit', show=False) + statrefgmcount = image.statistics('refined_gm.mif', mask='refined_gm.mif').count + app.console(' [ GM: ' + str(statcrudegmcount) + ' -> ' + str(statrefgmcount) + ' ]') + + # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels. + app.console('* Refining CSF...') + crudecsfmin = image.statistics('safe_sdm.mif', mask='crude_csf.mif').min + run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit', show=False) + run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit', show=False) + statrefcsfcount = image.statistics('refined_csf.mif', mask='refined_csf.mif').count + app.console(' [ CSF: ' + str(statcrudecsfcount) + ' -> ' + str(statrefcsfcount) + ' ]') + + + # FINAL VOXEL SELECTION AND RESPONSE FUNCTION ESTIMATION + app.console('-------') + app.console('Final voxel selection and response function estimation:') + + # Get final voxels for CSF response function estimation from refined CSF. + app.console('* CSF:') + app.console(' * Selecting final voxels (' + str(app.ARGS.csf) + '% of refined CSF)...') + voxcsfcount = int(round(statrefcsfcount * app.ARGS.csf / 100.0)) + run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if - -datatype bit | mrconvert - voxels_csf.mif -axes 0,1,2', show=False) + statvoxcsfcount = image.statistics('voxels_csf.mif', mask='voxels_csf.mif').count + app.console(' [ CSF: ' + str(statrefcsfcount) + ' -> ' + str(statvoxcsfcount) + ' ]') + # Estimate CSF response function + app.console(' * Estimating response function...') + run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic', show=False) + + # Get final voxels for GM response function estimation from refined GM. + app.console('* GM:') + app.console(' * Selecting final voxels (' + str(app.ARGS.gm) + '% of refined GM)...') + voxgmcount = int(round(statrefgmcount * app.ARGS.gm / 100.0)) + refgmmedian = image.statistics('safe_sdm.mif', mask='refined_gm.mif').median + run.command('mrcalc refined_gm.mif safe_sdm.mif ' + str(refgmmedian) + ' -subtract -abs 1 -add 0 -if - | mrthreshold - - -bottom ' + str(voxgmcount) + ' -ignorezero | mrcalc refined_gm.mif - 0 -if - -datatype bit | mrconvert - voxels_gm.mif -axes 0,1,2', show=False) + statvoxgmcount = image.statistics('voxels_gm.mif', mask='voxels_gm.mif').count + app.console(' [ GM: ' + str(statrefgmcount) + ' -> ' + str(statvoxgmcount) + ' ]') + # Estimate GM response function + app.console(' * Estimating response function...') + run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic', show=False) + + # Get final voxels for single-fibre WM response function estimation from refined WM. + app.console('* Single-fibre WM:') + app.console(' * Selecting final voxels' + + ('' if app.ARGS.wm_algo == 'tax' else (' ('+ str(app.ARGS.sfwm) + '% of refined WM)')) + + '...') + voxsfwmcount = int(round(statrefwmcount * app.ARGS.sfwm / 100.0)) + + if app.ARGS.wm_algo: + recursive_cleanup_option='' + if not app.DO_CLEANUP: + recursive_cleanup_option = ' -nocleanup' + app.console(' Selecting WM single-fibre voxels using \'' + app.ARGS.wm_algo + '\' algorithm') + if app.ARGS.wm_algo == 'tax' and app.ARGS.sfwm != 0.5: + app.warn('Single-fibre WM response function selection algorithm "tax" will not honour requested WM voxel percentage') + run.command('dwi2response ' + app.ARGS.wm_algo + ' dwi.mif _respsfwmss.txt -mask refined_wm.mif -voxels voxels_sfwm.mif' + + ('' if app.ARGS.wm_algo == 'tax' else (' -number ' + str(voxsfwmcount))) + + ' -scratch ' + shlex.quote(app.SCRATCH_DIR) + + recursive_cleanup_option, + show=False) + else: + app.console(' Selecting WM single-fibre voxels using built-in (Dhollander et al., 2019) algorithm') + run.command('mrmath dwi.mif mean mean_sig.mif -axis 3', show=False) + refwmcoef = image.statistics('mean_sig.mif', mask='refined_wm.mif').median * math.sqrt(4.0 * math.pi) + if sfwm_lmax: + isiso = [ lm == 0 for lm in sfwm_lmax ] + else: + isiso = [ bv < bzero_threshold for bv in bvalues ] + with open('ewmrf.txt', 'w', encoding='utf-8') as ewr: + for iis in isiso: + if iis: + ewr.write("%s 0 0 0\n" % refwmcoef) + else: + ewr.write("%s -%s %s -%s\n" % (refwmcoef, refwmcoef, refwmcoef, refwmcoef)) + run.command('dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm2.mif response_csf.txt abs_csf2.mif -mask refined_wm.mif -lmax 2,0' + bvalues_option, show=False) + run.command('mrconvert abs_ewm2.mif - -coord 3 0 | mrcalc - abs_csf2.mif -add abs_sum2.mif', show=False) + run.command('sh2peaks abs_ewm2.mif - -num 1 -mask refined_wm.mif | peaks2amp - - | mrcalc - abs_sum2.mif -divide - | mrconvert - metric_sfwm2.mif -coord 3 0 -axes 0,1,2', show=False) + run.command('mrcalc refined_wm.mif metric_sfwm2.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount * 2) + ' -ignorezero | mrcalc refined_wm.mif - 0 -if - -datatype bit | mrconvert - refined_sfwm.mif -axes 0,1,2', show=False) + run.command('dwi2fod msmt_csd dwi.mif ewmrf.txt abs_ewm6.mif response_csf.txt abs_csf6.mif -mask refined_sfwm.mif -lmax 6,0' + bvalues_option, show=False) + run.command('mrconvert abs_ewm6.mif - -coord 3 0 | mrcalc - abs_csf6.mif -add abs_sum6.mif', show=False) + run.command('sh2peaks abs_ewm6.mif - -num 1 -mask refined_sfwm.mif | peaks2amp - - | mrcalc - abs_sum6.mif -divide - | mrconvert - metric_sfwm6.mif -coord 3 0 -axes 0,1,2', show=False) + run.command('mrcalc refined_sfwm.mif metric_sfwm6.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount) + ' -ignorezero | mrcalc refined_sfwm.mif - 0 -if - -datatype bit | mrconvert - voxels_sfwm.mif -axes 0,1,2', show=False) + + statvoxsfwmcount = image.statistics('voxels_sfwm.mif', mask='voxels_sfwm.mif').count + app.console(' [ WM: ' + str(statrefwmcount) + ' -> ' + str(statvoxsfwmcount) + ' (single-fibre) ]') + # Estimate SF WM response function + app.console(' * Estimating response function...') + run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option, show=False) + + + # OUTPUT AND SUMMARY + app.console('-------') + app.console('Generating outputs...') + + # Generate 4D binary images with voxel selections at major stages in algorithm (RGB: WM=blue, GM=green, CSF=red). + run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif check_crude.mif -axis 3', show=False) + run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif check_refined.mif -axis 3', show=False) + run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif check_voxels.mif -axis 3', show=False) + + # Copy results to output files + run.function(shutil.copyfile, 'response_sfwm.txt', path.from_user(app.ARGS.out_sfwm, False), show=False) + run.function(shutil.copyfile, 'response_gm.txt', path.from_user(app.ARGS.out_gm, False), show=False) + run.function(shutil.copyfile, 'response_csf.txt', path.from_user(app.ARGS.out_csf, False), show=False) + if app.ARGS.voxels: + run.command('mrconvert check_voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE, show=False) + app.console('-------') diff --git a/python/mrtrix3/dwi2response/dhollander/get_inputs.py b/python/mrtrix3/dwi2response/dhollander/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2response/dhollander/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2response/dhollander/usage.py b/python/mrtrix3/dwi2response/dhollander/usage.py new file mode 100644 index 0000000000..20e95f7fa5 --- /dev/null +++ b/python/mrtrix3/dwi2response/dhollander/usage.py @@ -0,0 +1,36 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import WM_ALGOS + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('dhollander', parents=[base_parser]) + parser.set_author('Thijs Dhollander (thijs.dhollander@gmail.com)') + parser.set_synopsis('Unsupervised estimation of WM, GM and CSF response functions that does not require a T1 image (or segmentation thereof)') + parser.add_description('This is an improved version of the Dhollander et al. (2016) algorithm for unsupervised estimation of WM, GM and CSF response functions, which includes the Dhollander et al. (2019) improvements for single-fibre WM response function estimation (prior to this update, the "dwi2response tournier" algorithm had been utilised specifically for the single-fibre WM response function estimation step).') + parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') + parser.add_citation('Dhollander, T.; Mito, R.; Raffelt, D. & Connelly, A. Improved white matter response function estimation for 3-tissue constrained spherical deconvolution. Proc Intl Soc Mag Reson Med, 2019, 555', + condition='If -wm_algo option is not used') + parser.add_argument('input', help='Input DWI dataset') + parser.add_argument('out_sfwm', help='Output single-fibre WM response function text file') + parser.add_argument('out_gm', help='Output GM response function text file') + parser.add_argument('out_csf', help='Output CSF response function text file') + options = parser.add_argument_group('Options for the \'dhollander\' algorithm') + options.add_argument('-erode', type=int, default=3, help='Number of erosion passes to apply to initial (whole brain) mask. Set to 0 to not erode the brain mask. (default: 3)') + options.add_argument('-fa', type=float, default=0.2, help='FA threshold for crude WM versus GM-CSF separation. (default: 0.2)') + options.add_argument('-sfwm', type=float, default=0.5, help='Final number of single-fibre WM voxels to select, as a percentage of refined WM. (default: 0.5 per cent)') + options.add_argument('-gm', type=float, default=2.0, help='Final number of GM voxels to select, as a percentage of refined GM. (default: 2 per cent)') + options.add_argument('-csf', type=float, default=10.0, help='Final number of CSF voxels to select, as a percentage of refined CSF. (default: 10 per cent)') + options.add_argument('-wm_algo', metavar='algorithm', choices=WM_ALGOS, help='Use external dwi2response algorithm for WM single-fibre voxel selection (options: ' + ', '.join(WM_ALGOS) + ') (default: built-in Dhollander 2019)') diff --git a/python/mrtrix3/dwi2response/execute.py b/python/mrtrix3/dwi2response/execute.py new file mode 100644 index 0000000000..f830e87fe3 --- /dev/null +++ b/python/mrtrix3/dwi2response/execute.py @@ -0,0 +1,89 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + # Find out which algorithm the user has requested + alg = algorithm.get(app.ARGS.algorithm) + + # Check for prior existence of output files, and grab any input files, used by the particular algorithm + if app.ARGS.voxels: + app.check_output_path(app.ARGS.voxels) + alg.check_output_paths() + + # Sanitise some inputs, and get ready for data import + if app.ARGS.lmax: + try: + lmax = [ int(x) for x in app.ARGS.lmax.split(',') ] + if any(lmax_value%2 for lmax_value in lmax): + raise MRtrixError('Value of lmax must be even') + except ValueError as exception: + raise MRtrixError('Parameter lmax must be a number') from exception + if alg.NEEDS_SINGLE_SHELL and not len(lmax) == 1: + raise MRtrixError('Can only specify a single lmax value for single-shell algorithms') + shells_option = '' + if app.ARGS.shells: + try: + shells_values = [ int(round(float(x))) for x in app.ARGS.shells.split(',') ] + except ValueError as exception: + raise MRtrixError('-shells option should provide a comma-separated list of b-values') from exception + if alg.NEEDS_SINGLE_SHELL and not len(shells_values) == 1: + raise MRtrixError('Can only specify a single b-value shell for single-shell algorithms') + shells_option = ' -shells ' + app.ARGS.shells + singleshell_option = '' + if alg.NEEDS_SINGLE_SHELL: + singleshell_option = ' -singleshell -no_bzero' + + grad_import_option = app.read_dwgrad_import_options() + if not grad_import_option and 'dw_scheme' not in image.Header(path.from_user(app.ARGS.input, False)).keyval(): + raise MRtrixError('Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option') + + app.make_scratch_dir() + + # Get standard input data into the scratch directory + if alg.NEEDS_SINGLE_SHELL or shells_option: + app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ') and selecting b-values...') + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' - -strides 0,0,0,1' + grad_import_option + ' | dwiextract - ' + path.to_scratch('dwi.mif') + shells_option + singleshell_option, show=False) + else: # Don't discard b=0 in multi-shell algorithms + app.console('Importing DWI data (' + path.from_user(app.ARGS.input) + ')...') + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + ' -strides 0,0,0,1' + grad_import_option, show=False) + if app.ARGS.mask: + app.console('Importing mask (' + path.from_user(app.ARGS.mask) + ')...') + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit', show=False) + + alg.get_inputs() + + app.goto_scratch_dir() + + if alg.SUPPORTS_MASK: + if app.ARGS.mask: + # Check that the brain mask is appropriate + mask_header = image.Header('mask.mif') + if mask_header.size()[:3] != image.Header('dwi.mif').size()[:3]: + raise MRtrixError('Dimensions of provided mask image do not match DWI') + if not (len(mask_header.size()) == 3 or (len(mask_header.size()) == 4 and mask_header.size()[3] == 1)): + raise MRtrixError('Provided mask image needs to be a 3D image') + else: + app.console('Computing brain mask (dwi2mask)...') + run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' dwi.mif mask.mif', show=False) + + if not image.statistics('mask.mif', mask='mask.mif').count: + raise MRtrixError(('Provided' if app.ARGS.mask else 'Generated') + ' mask image does not contain any voxels') + + # From here, the script splits depending on what estimation algorithm is being used + alg.execute() diff --git a/python/mrtrix3/dwi2response/fa/__init__.py b/python/mrtrix3/dwi2response/fa/__init__.py index 5de851ac12..248c8e7c0a 100644 --- a/python/mrtrix3/dwi2response/fa/__init__.py +++ b/python/mrtrix3/dwi2response/fa/__init__.py @@ -13,67 +13,5 @@ # # For more details, see http://www.mrtrix.org/. -import shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('fa', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use the old FA-threshold heuristic for single-fibre voxel selection and response function estimation') - parser.add_citation('Tournier, J.-D.; Calamante, F.; Gadian, D. G. & Connelly, A. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution. NeuroImage, 2004, 23, 1176-1185') - parser.add_argument('input', help='The input DWI') - parser.add_argument('output', help='The output response function text file') - options = parser.add_argument_group('Options specific to the \'fa\' algorithm') - options.add_argument('-erode', type=int, default=3, help='Number of brain mask erosion steps to apply prior to threshold (not used if mask is provided manually)') - options.add_argument('-number', type=int, default=300, help='The number of highest-FA voxels to use') - options.add_argument('-threshold', type=float, help='Apply a hard FA threshold, rather than selecting the top voxels') - parser.flag_mutually_exclusive_options( [ 'number', 'threshold' ] ) - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_single_shell(): #pylint: disable=unused-variable - return False - - - -def supports_mask(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - if len(bvalues) < 2: - raise MRtrixError('Need at least 2 unique b-values (including b=0).') - lmax_option = '' - if app.ARGS.lmax: - lmax_option = ' -lmax ' + app.ARGS.lmax - if not app.ARGS.mask: - run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.ARGS.erode)) - mask_path = 'mask_eroded.mif' - else: - mask_path = 'mask.mif' - run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif') - run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path) - if app.ARGS.threshold: - run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.ARGS.threshold)) - else: - run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.ARGS.number)) - run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option) - - run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) - if app.ARGS.voxels: - run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) +NEEDS_SINGLE_SHELL = False +SUPPORTS_MASK = True diff --git a/python/mrtrix3/dwi2response/fa/check_output_paths.py b/python/mrtrix3/dwi2response/fa/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/dwi2response/fa/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/dwi2response/fa/execute.py b/python/mrtrix3/dwi2response/fa/execute.py new file mode 100644 index 0000000000..91877b8e01 --- /dev/null +++ b/python/mrtrix3/dwi2response/fa/execute.py @@ -0,0 +1,42 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def execute(): #pylint: disable=unused-variable + bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + if len(bvalues) < 2: + raise MRtrixError('Need at least 2 unique b-values (including b=0).') + lmax_option = '' + if app.ARGS.lmax: + lmax_option = ' -lmax ' + app.ARGS.lmax + if not app.ARGS.mask: + run.command('maskfilter mask.mif erode mask_eroded.mif -npass ' + str(app.ARGS.erode)) + mask_path = 'mask_eroded.mif' + else: + mask_path = 'mask.mif' + run.command('dwi2tensor dwi.mif -mask ' + mask_path + ' tensor.mif') + run.command('tensor2metric tensor.mif -fa fa.mif -vector vector.mif -mask ' + mask_path) + if app.ARGS.threshold: + run.command('mrthreshold fa.mif voxels.mif -abs ' + str(app.ARGS.threshold)) + else: + run.command('mrthreshold fa.mif voxels.mif -top ' + str(app.ARGS.number)) + run.command('dwiextract dwi.mif - -singleshell -no_bzero | amp2response - voxels.mif vector.mif response.txt' + lmax_option) + + run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) + if app.ARGS.voxels: + run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/fa/get_inputs.py b/python/mrtrix3/dwi2response/fa/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2response/fa/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2response/fa/usage.py b/python/mrtrix3/dwi2response/fa/usage.py new file mode 100644 index 0000000000..67c1de8fe4 --- /dev/null +++ b/python/mrtrix3/dwi2response/fa/usage.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('fa', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use the old FA-threshold heuristic for single-fibre voxel selection and response function estimation') + parser.add_citation('Tournier, J.-D.; Calamante, F.; Gadian, D. G. & Connelly, A. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution. NeuroImage, 2004, 23, 1176-1185') + parser.add_argument('input', help='The input DWI') + parser.add_argument('output', help='The output response function text file') + options = parser.add_argument_group('Options specific to the \'fa\' algorithm') + options.add_argument('-erode', type=int, default=3, help='Number of brain mask erosion steps to apply prior to threshold (not used if mask is provided manually)') + options.add_argument('-number', type=int, default=300, help='The number of highest-FA voxels to use') + options.add_argument('-threshold', type=float, help='Apply a hard FA threshold, rather than selecting the top voxels') + parser.flag_mutually_exclusive_options( [ 'number', 'threshold' ] ) diff --git a/python/mrtrix3/dwi2response/manual/__init__.py b/python/mrtrix3/dwi2response/manual/__init__.py index 0d69b56f12..240fc1e4d7 100644 --- a/python/mrtrix3/dwi2response/manual/__init__.py +++ b/python/mrtrix3/dwi2response/manual/__init__.py @@ -13,76 +13,5 @@ # # For more details, see http://www.mrtrix.org/. -import os, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('manual', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Derive a response function using an input mask image alone (i.e. pre-selected voxels)') - parser.add_argument('input', help='The input DWI') - parser.add_argument('in_voxels', help='Input voxel selection mask') - parser.add_argument('output', help='Output response function text file') - options = parser.add_argument_group('Options specific to the \'manual\' algorithm') - options.add_argument('-dirs', help='Manually provide the fibre direction in each voxel (a tensor fit will be used otherwise)') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - mask_path = path.to_scratch('mask.mif', False) - if os.path.exists(mask_path): - app.warn('-mask option is ignored by algorithm \'manual\'') - os.remove(mask_path) - run.command('mrconvert ' + path.from_user(app.ARGS.in_voxels) + ' ' + path.to_scratch('in_voxels.mif')) - if app.ARGS.dirs: - run.command('mrconvert ' + path.from_user(app.ARGS.dirs) + ' ' + path.to_scratch('dirs.mif') + ' -strides 0,0,0,1') - - - -def needs_single_shell(): #pylint: disable=unused-variable - return False - - - -def supports_mask(): #pylint: disable=unused-variable - return False - - - -def execute(): #pylint: disable=unused-variable - shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - - # Get lmax information (if provided) - lmax = [ ] - if app.ARGS.lmax: - lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] - if not len(lmax) == len(shells): - raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') - for shell_l in lmax: - if shell_l % 2: - raise MRtrixError('Values for lmax must be even') - if shell_l < 0: - raise MRtrixError('Values for lmax must be non-negative') - - # Do we have directions, or do we need to calculate them? - if not os.path.exists('dirs.mif'): - run.command('dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif') - - # Get response function - bvalues_option = ' -shells ' + ','.join(map(str,shells)) - lmax_option = '' - if lmax: - lmax_option = ' -lmax ' + ','.join(map(str,lmax)) - run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option) - - run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) - if app.ARGS.voxels: - run.command('mrconvert in_voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) +NEEDS_SINGLE_SHELL = False +SUPPORTS_MASK = False diff --git a/python/mrtrix3/dwi2response/manual/check_output_paths.py b/python/mrtrix3/dwi2response/manual/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/dwi2response/manual/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/dwi2response/manual/execute.py b/python/mrtrix3/dwi2response/manual/execute.py new file mode 100644 index 0000000000..dd66a78682 --- /dev/null +++ b/python/mrtrix3/dwi2response/manual/execute.py @@ -0,0 +1,48 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def execute(): #pylint: disable=unused-variable + shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + + # Get lmax information (if provided) + lmax = [ ] + if app.ARGS.lmax: + lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] + if not len(lmax) == len(shells): + raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')') + for shell_l in lmax: + if shell_l % 2: + raise MRtrixError('Values for lmax must be even') + if shell_l < 0: + raise MRtrixError('Values for lmax must be non-negative') + + # Do we have directions, or do we need to calculate them? + if not os.path.exists('dirs.mif'): + run.command('dwi2tensor dwi.mif - -mask in_voxels.mif | tensor2metric - -vector dirs.mif') + + # Get response function + bvalues_option = ' -shells ' + ','.join(map(str,shells)) + lmax_option = '' + if lmax: + lmax_option = ' -lmax ' + ','.join(map(str,lmax)) + run.command('amp2response dwi.mif in_voxels.mif dirs.mif response.txt' + bvalues_option + lmax_option) + + run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) + if app.ARGS.voxels: + run.command('mrconvert in_voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/manual/get_inputs.py b/python/mrtrix3/dwi2response/manual/get_inputs.py new file mode 100644 index 0000000000..77bbb662bb --- /dev/null +++ b/python/mrtrix3/dwi2response/manual/get_inputs.py @@ -0,0 +1,26 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + mask_path = path.to_scratch('mask.mif', False) + if os.path.exists(mask_path): + app.warn('-mask option is ignored by algorithm \'manual\'') + os.remove(mask_path) + run.command('mrconvert ' + path.from_user(app.ARGS.in_voxels) + ' ' + path.to_scratch('in_voxels.mif')) + if app.ARGS.dirs: + run.command('mrconvert ' + path.from_user(app.ARGS.dirs) + ' ' + path.to_scratch('dirs.mif') + ' -strides 0,0,0,1') diff --git a/python/mrtrix3/dwi2response/manual/usage.py b/python/mrtrix3/dwi2response/manual/usage.py new file mode 100644 index 0000000000..706b152f72 --- /dev/null +++ b/python/mrtrix3/dwi2response/manual/usage.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('manual', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Derive a response function using an input mask image alone (i.e. pre-selected voxels)') + parser.add_argument('input', help='The input DWI') + parser.add_argument('in_voxels', help='Input voxel selection mask') + parser.add_argument('output', help='Output response function text file') + options = parser.add_argument_group('Options specific to the \'manual\' algorithm') + options.add_argument('-dirs', help='Manually provide the fibre direction in each voxel (a tensor fit will be used otherwise)') diff --git a/python/mrtrix3/dwi2response/msmt_5tt/__init__.py b/python/mrtrix3/dwi2response/msmt_5tt/__init__.py index 30fe810355..d2e5fe7cb7 100644 --- a/python/mrtrix3/dwi2response/msmt_5tt/__init__.py +++ b/python/mrtrix3/dwi2response/msmt_5tt/__init__.py @@ -13,147 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import os, shlex, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - - +NEEDS_SINGLE_SHELL = False +SUPPORS_MASK = True WM_ALGOS = [ 'fa', 'tax', 'tournier' ] - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('msmt_5tt', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Derive MSMT-CSD tissue response functions based on a co-registered five-tissue-type (5TT) image') - parser.add_citation('Jeurissen, B.; Tournier, J.-D.; Dhollander, T.; Connelly, A. & Sijbers, J. Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. NeuroImage, 2014, 103, 411-426') - parser.add_argument('input', help='The input DWI') - parser.add_argument('in_5tt', help='Input co-registered 5TT image') - parser.add_argument('out_wm', help='Output WM response text file') - parser.add_argument('out_gm', help='Output GM response text file') - parser.add_argument('out_csf', help='Output CSF response text file') - options = parser.add_argument_group('Options specific to the \'msmt_5tt\' algorithm') - options.add_argument('-dirs', help='Manually provide the fibre direction in each voxel (a tensor fit will be used otherwise)') - options.add_argument('-fa', type=float, default=0.2, help='Upper fractional anisotropy threshold for GM and CSF voxel selection (default: 0.2)') - options.add_argument('-pvf', type=float, default=0.95, help='Partial volume fraction threshold for tissue voxel selection (default: 0.95)') - options.add_argument('-wm_algo', metavar='algorithm', choices=WM_ALGOS, default='tournier', help='dwi2response algorithm to use for WM single-fibre voxel selection (options: ' + ', '.join(WM_ALGOS) + '; default: tournier)') - options.add_argument('-sfwm_fa_threshold', type=float, help='Sets -wm_algo to fa and allows to specify a hard FA threshold for single-fibre WM voxels, which is passed to the -threshold option of the fa algorithm (warning: overrides -wm_algo option)') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.out_wm) - app.check_output_path(app.ARGS.out_gm) - app.check_output_path(app.ARGS.out_csf) - - - -def get_inputs(): #pylint: disable=unused-variable - run.command('mrconvert ' + path.from_user(app.ARGS.in_5tt) + ' ' + path.to_scratch('5tt.mif')) - if app.ARGS.dirs: - run.command('mrconvert ' + path.from_user(app.ARGS.dirs) + ' ' + path.to_scratch('dirs.mif') + ' -strides 0,0,0,1') - - - -def needs_single_shell(): #pylint: disable=unused-variable - return False - - - -def supports_mask(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform - # May need to commit 5ttregrid... - - # Verify input 5tt image - verification_text = '' - try: - verification_text = run.command('5ttcheck 5tt.mif').stderr - except run.MRtrixCmdError as except_5ttcheck: - verification_text = except_5ttcheck.stderr - if 'WARNING' in verification_text or 'ERROR' in verification_text: - app.warn('Command 5ttcheck indicates problems with provided input 5TT image \'' + app.ARGS.in_5tt + '\':') - for line in verification_text.splitlines(): - app.warn(line) - app.warn('These may or may not interfere with the dwi2response msmt_5tt script') - - # Get shell information - shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - if len(shells) < 3: - app.warn('Less than three b-values; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm') - - # Get lmax information (if provided) - wm_lmax = [ ] - if app.ARGS.lmax: - wm_lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] - if not len(wm_lmax) == len(shells): - raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-values (' + str(len(shells)) + ')') - for shell_l in wm_lmax: - if shell_l % 2: - raise MRtrixError('Values for lmax must be even') - if shell_l < 0: - raise MRtrixError('Values for lmax must be non-negative') - - run.command('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif') - if not os.path.exists('dirs.mif'): - run.function(shutil.copy, 'vector.mif', 'dirs.mif') - run.command('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear') - - # Basic tissue masks - run.command('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt mask.mif -mult wm_mask.mif') - run.command('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult gm_mask.mif') - run.command('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult csf_mask.mif') - - # Revise WM mask to only include single-fibre voxels - recursive_cleanup_option='' - if not app.DO_CLEANUP: - recursive_cleanup_option = ' -nocleanup' - if not app.ARGS.sfwm_fa_threshold: - app.console('Selecting WM single-fibre voxels using \'' + app.ARGS.wm_algo + '\' algorithm') - run.command('dwi2response ' + app.ARGS.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -scratch ' + shlex.quote(app.SCRATCH_DIR) + recursive_cleanup_option) - else: - app.console('Selecting WM single-fibre voxels using \'fa\' algorithm with a hard FA threshold of ' + str(app.ARGS.sfwm_fa_threshold)) - run.command('dwi2response fa dwi.mif wm_ss_response.txt -mask wm_mask.mif -threshold ' + str(app.ARGS.sfwm_fa_threshold) + ' -voxels wm_sf_mask.mif -scratch ' + shlex.quote(app.SCRATCH_DIR) + recursive_cleanup_option) - - # Check for empty masks - wm_voxels = image.statistics('wm_sf_mask.mif', mask='wm_sf_mask.mif').count - gm_voxels = image.statistics('gm_mask.mif', mask='gm_mask.mif').count - csf_voxels = image.statistics('csf_mask.mif', mask='csf_mask.mif').count - empty_masks = [ ] - if not wm_voxels: - empty_masks.append('WM') - if not gm_voxels: - empty_masks.append('GM') - if not csf_voxels: - empty_masks.append('CSF') - if empty_masks: - message = ','.join(empty_masks) - message += ' tissue mask' - if len(empty_masks) > 1: - message += 's' - message += ' empty; cannot estimate response function' - if len(empty_masks) > 1: - message += 's' - raise MRtrixError(message) - - # For each of the three tissues, generate a multi-shell response - bvalues_option = ' -shells ' + ','.join(map(str,shells)) - sfwm_lmax_option = '' - if wm_lmax: - sfwm_lmax_option = ' -lmax ' + ','.join(map(str,wm_lmax)) - run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option) - run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic') - run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic') - run.function(shutil.copyfile, 'wm.txt', path.from_user(app.ARGS.out_wm, False)) - run.function(shutil.copyfile, 'gm.txt', path.from_user(app.ARGS.out_gm, False)) - run.function(shutil.copyfile, 'csf.txt', path.from_user(app.ARGS.out_csf, False)) - - # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper - run.command('mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3') - if app.ARGS.voxels: - run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/msmt_5tt/check_output_paths.py b/python/mrtrix3/dwi2response/msmt_5tt/check_output_paths.py new file mode 100644 index 0000000000..2f822719f5 --- /dev/null +++ b/python/mrtrix3/dwi2response/msmt_5tt/check_output_paths.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.out_wm) + app.check_output_path(app.ARGS.out_gm) + app.check_output_path(app.ARGS.out_csf) diff --git a/python/mrtrix3/dwi2response/msmt_5tt/execute.py b/python/mrtrix3/dwi2response/msmt_5tt/execute.py new file mode 100644 index 0000000000..a7aa85a1a0 --- /dev/null +++ b/python/mrtrix3/dwi2response/msmt_5tt/execute.py @@ -0,0 +1,110 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shlex, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def execute(): #pylint: disable=unused-variable + # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform + # May need to commit 5ttregrid... + + # Verify input 5tt image + verification_text = '' + try: + verification_text = run.command('5ttcheck 5tt.mif').stderr + except run.MRtrixCmdError as except_5ttcheck: + verification_text = except_5ttcheck.stderr + if 'WARNING' in verification_text or 'ERROR' in verification_text: + app.warn('Command 5ttcheck indicates problems with provided input 5TT image \'' + app.ARGS.in_5tt + '\':') + for line in verification_text.splitlines(): + app.warn(line) + app.warn('These may or may not interfere with the dwi2response msmt_5tt script') + + # Get shell information + shells = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + if len(shells) < 3: + app.warn('Less than three b-values; response functions will not be applicable in resolving three tissues using MSMT-CSD algorithm') + + # Get lmax information (if provided) + wm_lmax = [ ] + if app.ARGS.lmax: + wm_lmax = [ int(x.strip()) for x in app.ARGS.lmax.split(',') ] + if not len(wm_lmax) == len(shells): + raise MRtrixError('Number of manually-defined lmax\'s (' + str(len(wm_lmax)) + ') does not match number of b-values (' + str(len(shells)) + ')') + for shell_l in wm_lmax: + if shell_l % 2: + raise MRtrixError('Values for lmax must be even') + if shell_l < 0: + raise MRtrixError('Values for lmax must be non-negative') + + run.command('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif') + if not os.path.exists('dirs.mif'): + run.function(shutil.copy, 'vector.mif', 'dirs.mif') + run.command('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear') + + # Basic tissue masks + run.command('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt mask.mif -mult wm_mask.mif') + run.command('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult gm_mask.mif') + run.command('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(app.ARGS.pvf) + ' -gt fa.mif ' + str(app.ARGS.fa) + ' -lt -mult mask.mif -mult csf_mask.mif') + + # Revise WM mask to only include single-fibre voxels + recursive_cleanup_option='' + if not app.DO_CLEANUP: + recursive_cleanup_option = ' -nocleanup' + if not app.ARGS.sfwm_fa_threshold: + app.console('Selecting WM single-fibre voxels using \'' + app.ARGS.wm_algo + '\' algorithm') + run.command('dwi2response ' + app.ARGS.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif -scratch ' + shlex.quote(app.SCRATCH_DIR) + recursive_cleanup_option) + else: + app.console('Selecting WM single-fibre voxels using \'fa\' algorithm with a hard FA threshold of ' + str(app.ARGS.sfwm_fa_threshold)) + run.command('dwi2response fa dwi.mif wm_ss_response.txt -mask wm_mask.mif -threshold ' + str(app.ARGS.sfwm_fa_threshold) + ' -voxels wm_sf_mask.mif -scratch ' + shlex.quote(app.SCRATCH_DIR) + recursive_cleanup_option) + + # Check for empty masks + wm_voxels = image.statistics('wm_sf_mask.mif', mask='wm_sf_mask.mif').count + gm_voxels = image.statistics('gm_mask.mif', mask='gm_mask.mif').count + csf_voxels = image.statistics('csf_mask.mif', mask='csf_mask.mif').count + empty_masks = [ ] + if not wm_voxels: + empty_masks.append('WM') + if not gm_voxels: + empty_masks.append('GM') + if not csf_voxels: + empty_masks.append('CSF') + if empty_masks: + message = ','.join(empty_masks) + message += ' tissue mask' + if len(empty_masks) > 1: + message += 's' + message += ' empty; cannot estimate response function' + if len(empty_masks) > 1: + message += 's' + raise MRtrixError(message) + + # For each of the three tissues, generate a multi-shell response + bvalues_option = ' -shells ' + ','.join(map(str,shells)) + sfwm_lmax_option = '' + if wm_lmax: + sfwm_lmax_option = ' -lmax ' + ','.join(map(str,wm_lmax)) + run.command('amp2response dwi.mif wm_sf_mask.mif dirs.mif wm.txt' + bvalues_option + sfwm_lmax_option) + run.command('amp2response dwi.mif gm_mask.mif dirs.mif gm.txt' + bvalues_option + ' -isotropic') + run.command('amp2response dwi.mif csf_mask.mif dirs.mif csf.txt' + bvalues_option + ' -isotropic') + run.function(shutil.copyfile, 'wm.txt', path.from_user(app.ARGS.out_wm, False)) + run.function(shutil.copyfile, 'gm.txt', path.from_user(app.ARGS.out_gm, False)) + run.function(shutil.copyfile, 'csf.txt', path.from_user(app.ARGS.out_csf, False)) + + # Generate output 4D binary image with voxel selections; RGB as in MSMT-CSD paper + run.command('mrcat csf_mask.mif gm_mask.mif wm_sf_mask.mif voxels.mif -axis 3') + if app.ARGS.voxels: + run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/msmt_5tt/get_inputs.py b/python/mrtrix3/dwi2response/msmt_5tt/get_inputs.py new file mode 100644 index 0000000000..4bc251c78a --- /dev/null +++ b/python/mrtrix3/dwi2response/msmt_5tt/get_inputs.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, path, run + +def get_inputs(): #pylint: disable=unused-variable + run.command('mrconvert ' + path.from_user(app.ARGS.in_5tt) + ' ' + path.to_scratch('5tt.mif')) + if app.ARGS.dirs: + run.command('mrconvert ' + path.from_user(app.ARGS.dirs) + ' ' + path.to_scratch('dirs.mif') + ' -strides 0,0,0,1') diff --git a/python/mrtrix3/dwi2response/msmt_5tt/usage.py b/python/mrtrix3/dwi2response/msmt_5tt/usage.py new file mode 100644 index 0000000000..04424e352b --- /dev/null +++ b/python/mrtrix3/dwi2response/msmt_5tt/usage.py @@ -0,0 +1,33 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import WM_ALGOS + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('msmt_5tt', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Derive MSMT-CSD tissue response functions based on a co-registered five-tissue-type (5TT) image') + parser.add_citation('Jeurissen, B.; Tournier, J.-D.; Dhollander, T.; Connelly, A. & Sijbers, J. Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. NeuroImage, 2014, 103, 411-426') + parser.add_argument('input', help='The input DWI') + parser.add_argument('in_5tt', help='Input co-registered 5TT image') + parser.add_argument('out_wm', help='Output WM response text file') + parser.add_argument('out_gm', help='Output GM response text file') + parser.add_argument('out_csf', help='Output CSF response text file') + options = parser.add_argument_group('Options specific to the \'msmt_5tt\' algorithm') + options.add_argument('-dirs', help='Manually provide the fibre direction in each voxel (a tensor fit will be used otherwise)') + options.add_argument('-fa', type=float, default=0.2, help='Upper fractional anisotropy threshold for GM and CSF voxel selection (default: 0.2)') + options.add_argument('-pvf', type=float, default=0.95, help='Partial volume fraction threshold for tissue voxel selection (default: 0.95)') + options.add_argument('-wm_algo', metavar='algorithm', choices=WM_ALGOS, default='tournier', help='dwi2response algorithm to use for WM single-fibre voxel selection (options: ' + ', '.join(WM_ALGOS) + '; default: tournier)') + options.add_argument('-sfwm_fa_threshold', type=float, help='Sets -wm_algo to fa and allows to specify a hard FA threshold for single-fibre WM voxels, which is passed to the -threshold option of the fa algorithm (warning: overrides -wm_algo option)') diff --git a/python/mrtrix3/dwi2response/tax/__init__.py b/python/mrtrix3/dwi2response/tax/__init__.py index 33e46f0db7..8bb9ef9a4c 100644 --- a/python/mrtrix3/dwi2response/tax/__init__.py +++ b/python/mrtrix3/dwi2response/tax/__init__.py @@ -13,143 +13,5 @@ # # For more details, see http://www.mrtrix.org/. -import math, os, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, matrix, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('tax', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use the Tax et al. (2014) recursive calibration algorithm for single-fibre voxel selection and response function estimation') - parser.add_citation('Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. & Leemans, A. Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data. NeuroImage, 2014, 86, 67-80') - parser.add_argument('input', help='The input DWI') - parser.add_argument('output', help='The output response function text file') - options = parser.add_argument_group('Options specific to the \'tax\' algorithm') - options.add_argument('-peak_ratio', type=float, default=0.1, help='Second-to-first-peak amplitude ratio threshold') - options.add_argument('-max_iters', type=int, default=20, help='Maximum number of iterations') - options.add_argument('-convergence', type=float, default=0.5, help='Percentile change in any RF coefficient required to continue iterating') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_single_shell(): #pylint: disable=unused-variable - return True - - - -def supports_mask(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - lmax_option = '' - if app.ARGS.lmax: - lmax_option = ' -lmax ' + app.ARGS.lmax - - convergence_change = 0.01 * app.ARGS.convergence - - progress = app.ProgressBar('Optimising') - - iteration = 0 - while iteration < app.ARGS.max_iters: - prefix = 'iter' + str(iteration) + '_' - - # How to initialise response function? - # old dwi2response command used mean & standard deviation of DWI data; however - # this may force the output FODs to lmax=2 at the first iteration - # Chantal used a tensor with low FA, but it'd be preferable to get the scaling right - # Other option is to do as before, but get the ratio between l=0 and l=2, and - # generate l=4,6,... using that amplitude ratio - if iteration == 0: - rf_in_path = 'init_RF.txt' - mask_in_path = 'mask.mif' - - # Grab the mean and standard deviation across all volumes in a single mrstats call - # Also scale them to reflect the fact that we're moving to the SH basis - image_stats = image.statistics('dwi.mif', mask='mask.mif', allvolumes=True) - mean = image_stats.mean * math.sqrt(4.0 * math.pi) - std = image_stats.std * math.sqrt(4.0 * math.pi) - - # Now produce the initial response function - # Let's only do it to lmax 4 - init_rf = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ] - with open('init_RF.txt', 'w', encoding='utf-8') as init_rf_file: - init_rf_file.write(' '.join(init_rf)) - else: - rf_in_path = 'iter' + str(iteration-1) + '_RF.txt' - mask_in_path = 'iter' + str(iteration-1) + '_SF.mif' - - # Run CSD - run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) - # Get amplitudes of two largest peaks, and directions of largest - run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') - app.cleanup(prefix + 'FOD.mif') - run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif none ' + prefix + 'amps.mif') - run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') - run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') - app.cleanup(prefix + 'amps.mif') - run.command('fixel2peaks ' + prefix + 'fixel/directions.mif ' + prefix + 'first_dir.mif -number 1') - app.cleanup(prefix + 'fixel') - # Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak - run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif') - app.cleanup(prefix + 'first_peaks.mif') - app.cleanup(prefix + 'second_peaks.mif') - run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.ARGS.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit') - app.cleanup(prefix + 'peak_ratio.mif') - # Make sure image isn't empty - sf_voxel_count = image.statistics(prefix + 'SF.mif', mask=prefix+'SF.mif').count - if not sf_voxel_count: - raise MRtrixError('Aborting: All voxels have been excluded from single-fibre selection') - # Generate a new response function - run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option) - app.cleanup(prefix + 'first_dir.mif') - - new_rf = matrix.load_vector(prefix + 'RF.txt') - progress.increment('Optimising (' + str(iteration+1) + ' iterations, ' + str(sf_voxel_count) + ' voxels, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )') - - # Detect convergence - # Look for a change > some percentage - don't bother looking at the masks - if iteration > 0: - old_rf = matrix.load_vector(rf_in_path) - reiterate = False - for old_value, new_value in zip(old_rf, new_rf): - mean = 0.5 * (old_value + new_value) - diff = math.fabs(0.5 * (old_value - new_value)) - ratio = diff / mean - if ratio > convergence_change: - reiterate = True - if not reiterate: - run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') - run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif') - break - - app.cleanup(rf_in_path) - app.cleanup(mask_in_path) - - iteration += 1 - - progress.done() - - # If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location - if os.path.exists('response.txt'): - app.console('Exited at iteration ' + str(iteration+1) + ' with ' + str(sf_voxel_count) + ' SF voxels due to unchanged RF coefficients') - else: - app.console('Exited after maximum ' + str(app.ARGS.max_iters) + ' iterations with ' + str(sf_voxel_count) + ' SF voxels') - run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt') - run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif') - - run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) - if app.ARGS.voxels: - run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) +NEEDS_SINGLE_SHELL = True +SUPPORS_MASK = True diff --git a/python/mrtrix3/dwi2response/tax/check_output_paths.py b/python/mrtrix3/dwi2response/tax/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/dwi2response/tax/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/dwi2response/tax/execute.py b/python/mrtrix3/dwi2response/tax/execute.py new file mode 100644 index 0000000000..2060568dc2 --- /dev/null +++ b/python/mrtrix3/dwi2response/tax/execute.py @@ -0,0 +1,119 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, matrix, path, run + +def execute(): #pylint: disable=unused-variable + lmax_option = '' + if app.ARGS.lmax: + lmax_option = ' -lmax ' + app.ARGS.lmax + + convergence_change = 0.01 * app.ARGS.convergence + + progress = app.ProgressBar('Optimising') + + iteration = 0 + while iteration < app.ARGS.max_iters: + prefix = 'iter' + str(iteration) + '_' + + # How to initialise response function? + # old dwi2response command used mean & standard deviation of DWI data; however + # this may force the output FODs to lmax=2 at the first iteration + # Chantal used a tensor with low FA, but it'd be preferable to get the scaling right + # Other option is to do as before, but get the ratio between l=0 and l=2, and + # generate l=4,6,... using that amplitude ratio + if iteration == 0: + rf_in_path = 'init_RF.txt' + mask_in_path = 'mask.mif' + + # Grab the mean and standard deviation across all volumes in a single mrstats call + # Also scale them to reflect the fact that we're moving to the SH basis + image_stats = image.statistics('dwi.mif', mask='mask.mif', allvolumes=True) + mean = image_stats.mean * math.sqrt(4.0 * math.pi) + std = image_stats.std * math.sqrt(4.0 * math.pi) + + # Now produce the initial response function + # Let's only do it to lmax 4 + init_rf = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ] + with open('init_RF.txt', 'w', encoding='utf-8') as init_rf_file: + init_rf_file.write(' '.join(init_rf)) + else: + rf_in_path = 'iter' + str(iteration-1) + '_RF.txt' + mask_in_path = 'iter' + str(iteration-1) + '_SF.mif' + + # Run CSD + run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) + # Get amplitudes of two largest peaks, and directions of largest + run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') + app.cleanup(prefix + 'FOD.mif') + run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif none ' + prefix + 'amps.mif') + run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') + run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') + app.cleanup(prefix + 'amps.mif') + run.command('fixel2peaks ' + prefix + 'fixel/directions.mif ' + prefix + 'first_dir.mif -number 1') + app.cleanup(prefix + 'fixel') + # Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak + run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif') + app.cleanup(prefix + 'first_peaks.mif') + app.cleanup(prefix + 'second_peaks.mif') + run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.ARGS.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit') + app.cleanup(prefix + 'peak_ratio.mif') + # Make sure image isn't empty + sf_voxel_count = image.statistics(prefix + 'SF.mif', mask=prefix+'SF.mif').count + if not sf_voxel_count: + raise MRtrixError('Aborting: All voxels have been excluded from single-fibre selection') + # Generate a new response function + run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option) + app.cleanup(prefix + 'first_dir.mif') + + new_rf = matrix.load_vector(prefix + 'RF.txt') + progress.increment('Optimising (' + str(iteration+1) + ' iterations, ' + str(sf_voxel_count) + ' voxels, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )') + + # Detect convergence + # Look for a change > some percentage - don't bother looking at the masks + if iteration > 0: + old_rf = matrix.load_vector(rf_in_path) + reiterate = False + for old_value, new_value in zip(old_rf, new_rf): + mean = 0.5 * (old_value + new_value) + diff = math.fabs(0.5 * (old_value - new_value)) + ratio = diff / mean + if ratio > convergence_change: + reiterate = True + if not reiterate: + run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') + run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif') + break + + app.cleanup(rf_in_path) + app.cleanup(mask_in_path) + + iteration += 1 + + progress.done() + + # If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location + if os.path.exists('response.txt'): + app.console('Exited at iteration ' + str(iteration+1) + ' with ' + str(sf_voxel_count) + ' SF voxels due to unchanged RF coefficients') + else: + app.console('Exited after maximum ' + str(app.ARGS.max_iters) + ' iterations with ' + str(sf_voxel_count) + ' SF voxels') + run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt') + run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif') + + run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) + if app.ARGS.voxels: + run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/tax/get_inputs.py b/python/mrtrix3/dwi2response/tax/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2response/tax/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2response/tax/usage.py b/python/mrtrix3/dwi2response/tax/usage.py new file mode 100644 index 0000000000..ffdad547bb --- /dev/null +++ b/python/mrtrix3/dwi2response/tax/usage.py @@ -0,0 +1,26 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('tax', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use the Tax et al. (2014) recursive calibration algorithm for single-fibre voxel selection and response function estimation') + parser.add_citation('Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. & Leemans, A. Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data. NeuroImage, 2014, 86, 67-80') + parser.add_argument('input', help='The input DWI') + parser.add_argument('output', help='The output response function text file') + options = parser.add_argument_group('Options specific to the \'tax\' algorithm') + options.add_argument('-peak_ratio', type=float, default=0.1, help='Second-to-first-peak amplitude ratio threshold') + options.add_argument('-max_iters', type=int, default=20, help='Maximum number of iterations') + options.add_argument('-convergence', type=float, default=0.5, help='Percentile change in any RF coefficient required to continue iterating') diff --git a/python/mrtrix3/dwi2response/tournier/__init__.py b/python/mrtrix3/dwi2response/tournier/__init__.py index 49c31bad0d..987950a25b 100644 --- a/python/mrtrix3/dwi2response/tournier/__init__.py +++ b/python/mrtrix3/dwi2response/tournier/__init__.py @@ -13,137 +13,5 @@ # # For more details, see http://www.mrtrix.org/. -import os, shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, matrix, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('tournier', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Use the Tournier et al. (2013) iterative algorithm for single-fibre voxel selection and response function estimation') - parser.add_citation('Tournier, J.-D.; Calamante, F. & Connelly, A. Determination of the appropriate b-value and number of gradient directions for high-angular-resolution diffusion-weighted imaging. NMR Biomedicine, 2013, 26, 1775-1786') - parser.add_argument('input', help='The input DWI') - parser.add_argument('output', help='The output response function text file') - options = parser.add_argument_group('Options specific to the \'tournier\' algorithm') - options.add_argument('-number', type=int, default=300, help='Number of single-fibre voxels to use when calculating response function') - options.add_argument('-iter_voxels', type=int, default=0, help='Number of single-fibre voxels to select when preparing for the next iteration (default = 10 x value given in -number)') - options.add_argument('-dilate', type=int, default=1, help='Number of mask dilation steps to apply when deriving voxel mask to test in the next iteration') - options.add_argument('-max_iters', type=int, default=10, help='Maximum number of iterations') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def needs_single_shell(): #pylint: disable=unused-variable - return True - - - -def supports_mask(): #pylint: disable=unused-variable - return True - - - -def execute(): #pylint: disable=unused-variable - lmax_option = '' - if app.ARGS.lmax: - lmax_option = ' -lmax ' + app.ARGS.lmax - - if app.ARGS.max_iters < 2: - raise MRtrixError('Number of iterations must be at least 2') - - progress = app.ProgressBar('Optimising') - - iter_voxels = app.ARGS.iter_voxels - if iter_voxels == 0: - iter_voxels = 10*app.ARGS.number - elif iter_voxels < app.ARGS.number: - raise MRtrixError ('Number of selected voxels (-iter_voxels) must be greater than number of voxels desired (-number)') - - iteration = 0 - while iteration < app.ARGS.max_iters: - prefix = 'iter' + str(iteration) + '_' - - if iteration == 0: - rf_in_path = 'init_RF.txt' - mask_in_path = 'mask.mif' - init_rf = '1 -1 1' - with open(rf_in_path, 'w', encoding='utf-8') as init_rf_file: - init_rf_file.write(init_rf) - iter_lmax_option = ' -lmax 4' - else: - rf_in_path = 'iter' + str(iteration-1) + '_RF.txt' - mask_in_path = 'iter' + str(iteration-1) + '_SF_dilated.mif' - iter_lmax_option = lmax_option - - # Run CSD - run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) - # Get amplitudes of two largest peaks, and direction of largest - run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak_amp peak_amps.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') - app.cleanup(prefix + 'FOD.mif') - if iteration: - app.cleanup(mask_in_path) - run.command('fixel2voxel ' + os.path.join(prefix + 'fixel', 'peak_amps.mif') + ' none ' + prefix + 'amps.mif -number 2') - run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') - run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') - app.cleanup(prefix + 'amps.mif') - run.command('fixel2peaks ' + os.path.join(prefix + 'fixel', 'directions.mif') + ' ' + prefix + 'first_dir.mif -number 1') - app.cleanup(prefix + 'fixel') - # Calculate the 'cost function' Donald derived for selecting single-fibre voxels - # https://github.com/MRtrix3/mrtrix3/pull/426 - # sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2 - run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div -sub 2 -pow -mult '+ prefix + 'CF.mif') - app.cleanup(prefix + 'first_peaks.mif') - app.cleanup(prefix + 'second_peaks.mif') - voxel_count = image.statistics(prefix + 'CF.mif').count - # Select the top-ranked voxels - run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(min([app.ARGS.number, voxel_count])) + ' ' + prefix + 'SF.mif') - # Generate a new response function based on this selection - run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option) - app.cleanup(prefix + 'first_dir.mif') - - new_rf = matrix.load_vector(prefix + 'RF.txt') - progress.increment('Optimising (' + str(iteration+1) + ' iterations, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )') - - # Should we terminate? - if iteration > 0: - run.command('mrcalc ' + prefix + 'SF.mif iter' + str(iteration-1) + '_SF.mif -sub ' + prefix + 'SF_diff.mif') - app.cleanup('iter' + str(iteration-1) + '_SF.mif') - max_diff = image.statistics(prefix + 'SF_diff.mif').max - app.cleanup(prefix + 'SF_diff.mif') - if not max_diff: - app.cleanup(prefix + 'CF.mif') - run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') - run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif') - break - - # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask); - # these are the voxels that will be re-tested in the next iteration - run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(min([iter_voxels, voxel_count])) + ' - | maskfilter - dilate - -npass ' + str(app.ARGS.dilate) + ' | mrcalc mask.mif - -mult ' + prefix + 'SF_dilated.mif') - app.cleanup(prefix + 'CF.mif') - - iteration += 1 - - progress.done() - - # If terminating due to running out of iterations, still need to put the results in the appropriate location - if os.path.exists('response.txt'): - app.console('Convergence of SF voxel selection detected at iteration ' + str(iteration+1)) - else: - app.console('Exiting after maximum ' + str(app.ARGS.max_iters) + ' iterations') - run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt') - run.function(shutil.move, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif') - - run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) - if app.ARGS.voxels: - run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) +NEEDS_SINGLE_SHELL = True +SUPPORTS_MASK = True diff --git a/python/mrtrix3/dwi2response/tournier/check_output_paths.py b/python/mrtrix3/dwi2response/tournier/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/dwi2response/tournier/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/dwi2response/tournier/execute.py b/python/mrtrix3/dwi2response/tournier/execute.py new file mode 100644 index 0000000000..ebeb71855c --- /dev/null +++ b/python/mrtrix3/dwi2response/tournier/execute.py @@ -0,0 +1,112 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, matrix, path, run + +def execute(): #pylint: disable=unused-variable + lmax_option = '' + if app.ARGS.lmax: + lmax_option = ' -lmax ' + app.ARGS.lmax + + if app.ARGS.max_iters < 2: + raise MRtrixError('Number of iterations must be at least 2') + + progress = app.ProgressBar('Optimising') + + iter_voxels = app.ARGS.iter_voxels + if iter_voxels == 0: + iter_voxels = 10*app.ARGS.number + elif iter_voxels < app.ARGS.number: + raise MRtrixError ('Number of selected voxels (-iter_voxels) must be greater than number of voxels desired (-number)') + + iteration = 0 + while iteration < app.ARGS.max_iters: + prefix = 'iter' + str(iteration) + '_' + + if iteration == 0: + rf_in_path = 'init_RF.txt' + mask_in_path = 'mask.mif' + init_rf = '1 -1 1' + with open(rf_in_path, 'w', encoding='utf-8') as init_rf_file: + init_rf_file.write(init_rf) + iter_lmax_option = ' -lmax 4' + else: + rf_in_path = 'iter' + str(iteration-1) + '_RF.txt' + mask_in_path = 'iter' + str(iteration-1) + '_SF_dilated.mif' + iter_lmax_option = lmax_option + + # Run CSD + run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) + # Get amplitudes of two largest peaks, and direction of largest + run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak_amp peak_amps.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') + app.cleanup(prefix + 'FOD.mif') + if iteration: + app.cleanup(mask_in_path) + run.command('fixel2voxel ' + os.path.join(prefix + 'fixel', 'peak_amps.mif') + ' none ' + prefix + 'amps.mif -number 2') + run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2') + run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2') + app.cleanup(prefix + 'amps.mif') + run.command('fixel2peaks ' + os.path.join(prefix + 'fixel', 'directions.mif') + ' ' + prefix + 'first_dir.mif -number 1') + app.cleanup(prefix + 'fixel') + # Calculate the 'cost function' Donald derived for selecting single-fibre voxels + # https://github.com/MRtrix3/mrtrix3/pull/426 + # sqrt(|peak1|) * (1 - |peak2| / |peak1|)^2 + run.command('mrcalc ' + prefix + 'first_peaks.mif -sqrt 1 ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div -sub 2 -pow -mult '+ prefix + 'CF.mif') + app.cleanup(prefix + 'first_peaks.mif') + app.cleanup(prefix + 'second_peaks.mif') + voxel_count = image.statistics(prefix + 'CF.mif').count + # Select the top-ranked voxels + run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(min([app.ARGS.number, voxel_count])) + ' ' + prefix + 'SF.mif') + # Generate a new response function based on this selection + run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + iter_lmax_option) + app.cleanup(prefix + 'first_dir.mif') + + new_rf = matrix.load_vector(prefix + 'RF.txt') + progress.increment('Optimising (' + str(iteration+1) + ' iterations, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )') + + # Should we terminate? + if iteration > 0: + run.command('mrcalc ' + prefix + 'SF.mif iter' + str(iteration-1) + '_SF.mif -sub ' + prefix + 'SF_diff.mif') + app.cleanup('iter' + str(iteration-1) + '_SF.mif') + max_diff = image.statistics(prefix + 'SF_diff.mif').max + app.cleanup(prefix + 'SF_diff.mif') + if not max_diff: + app.cleanup(prefix + 'CF.mif') + run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt') + run.function(shutil.move, prefix + 'SF.mif', 'voxels.mif') + break + + # Select a greater number of top single-fibre voxels, and dilate (within bounds of initial mask); + # these are the voxels that will be re-tested in the next iteration + run.command('mrthreshold ' + prefix + 'CF.mif -top ' + str(min([iter_voxels, voxel_count])) + ' - | maskfilter - dilate - -npass ' + str(app.ARGS.dilate) + ' | mrcalc mask.mif - -mult ' + prefix + 'SF_dilated.mif') + app.cleanup(prefix + 'CF.mif') + + iteration += 1 + + progress.done() + + # If terminating due to running out of iterations, still need to put the results in the appropriate location + if os.path.exists('response.txt'): + app.console('Convergence of SF voxel selection detected at iteration ' + str(iteration+1)) + else: + app.console('Exiting after maximum ' + str(app.ARGS.max_iters) + ' iterations') + run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt') + run.function(shutil.move, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif') + + run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False)) + if app.ARGS.voxels: + run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwi2response/tournier/get_inputs.py b/python/mrtrix3/dwi2response/tournier/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwi2response/tournier/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwi2response/tournier/usage.py b/python/mrtrix3/dwi2response/tournier/usage.py new file mode 100644 index 0000000000..60f9f034c4 --- /dev/null +++ b/python/mrtrix3/dwi2response/tournier/usage.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('tournier', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Use the Tournier et al. (2013) iterative algorithm for single-fibre voxel selection and response function estimation') + parser.add_citation('Tournier, J.-D.; Calamante, F. & Connelly, A. Determination of the appropriate b-value and number of gradient directions for high-angular-resolution diffusion-weighted imaging. NMR Biomedicine, 2013, 26, 1775-1786') + parser.add_argument('input', help='The input DWI') + parser.add_argument('output', help='The output response function text file') + options = parser.add_argument_group('Options specific to the \'tournier\' algorithm') + options.add_argument('-number', type=int, default=300, help='Number of single-fibre voxels to use when calculating response function') + options.add_argument('-iter_voxels', type=int, default=0, help='Number of single-fibre voxels to select when preparing for the next iteration (default = 10 x value given in -number)') + options.add_argument('-dilate', type=int, default=1, help='Number of mask dilation steps to apply when deriving voxel mask to test in the next iteration') + options.add_argument('-max_iters', type=int, default=10, help='Maximum number of iterations') diff --git a/python/mrtrix3/dwi2response/usage.py b/python/mrtrix3/dwi2response/usage.py new file mode 100644 index 0000000000..d556753015 --- /dev/null +++ b/python/mrtrix3/dwi2response/usage.py @@ -0,0 +1,40 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Thijs Dhollander (thijs.dhollander@gmail.com)') + cmdline.set_synopsis('Estimate response function(s) for spherical deconvolution') + cmdline.add_description('dwi2response offers different algorithms for performing various types of response function estimation. The name of the algorithm must appear as the first argument on the command-line after \'dwi2response\'. The subsequent arguments and options depend on the particular algorithm being invoked.') + cmdline.add_description('Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the \'fa\' algorithm, type \'dwi2response fa\'.') + cmdline.add_description('More information on response function estimation for spherical deconvolution can be found at the following link: \n' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/constrained_spherical_deconvolution/response_function_estimation.html') + cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' + 'derive an initial voxel exclusion mask. ' + 'More information on mask derivation from DWI data can be found at: ' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') + + # General options + common_options = cmdline.add_argument_group('General dwi2response options') + common_options.add_argument('-mask', help='Provide an initial mask for response voxel selection') + common_options.add_argument('-voxels', help='Output an image showing the final voxel selection(s)') + common_options.add_argument('-shells', help='The b-value(s) to use in response function estimation (comma-separated list in case of multiple b-values, b=0 must be included explicitly)') + common_options.add_argument('-lmax', help='The maximum harmonic degree(s) for response function estimation (comma-separated list in case of multiple b-values)') + app.add_dwgrad_import_options(cmdline) + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) diff --git a/python/mrtrix3/dwibiascorrect/__init__.py b/python/mrtrix3/dwibiascorrect/__init__.py index aec62396a4..e69de29bb2 100644 --- a/python/mrtrix3/dwibiascorrect/__init__.py +++ b/python/mrtrix3/dwibiascorrect/__init__.py @@ -1,75 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform B1 field inhomogeneity correction for a DWI volume series') - cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' - 'derive a mask that will be passed to the relevant bias field estimation command. ' - 'More information on mask derivation from DWI data can be found at the following link: \n' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') - common_options = cmdline.add_argument_group('Options common to all dwibiascorrect algorithms') - common_options.add_argument('-mask', metavar='image', help='Manually provide a mask image for bias field estimation') - common_options.add_argument('-bias', metavar='image', help='Output the estimated bias field') - app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - - app.check_output_path(app.ARGS.output) - app.check_output_path(app.ARGS.bias) - alg.check_output_paths() - - app.make_scratch_dir() - - grad_import_option = app.read_dwgrad_import_options() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + grad_import_option) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - - alg.get_inputs() - - app.goto_scratch_dir() - - # Make sure it's actually a DWI that's been passed - dwi_header = image.Header('in.mif') - if len(dwi_header.size()) != 4: - raise MRtrixError('Input image must be a 4D image') - if 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No valid DW gradient scheme provided or present in image header') - if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: - raise MRtrixError('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') - - # Generate a brain mask if required, or check the mask if provided by the user - if app.ARGS.mask: - if not image.match('in.mif', 'mask.mif', up_to_dim=3): - raise MRtrixError('Provided mask image does not match input DWI') - else: - run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' in.mif mask.mif') - - # From here, the script splits depending on what estimation algorithm is being used - alg.execute() diff --git a/python/mrtrix3/dwibiascorrect/ants/__init__.py b/python/mrtrix3/dwibiascorrect/ants/__init__.py index baf8c7c963..e55c2de481 100644 --- a/python/mrtrix3/dwibiascorrect/ants/__init__.py +++ b/python/mrtrix3/dwibiascorrect/ants/__init__.py @@ -13,74 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import shutil -from mrtrix3 import MRtrixError -from mrtrix3 import app, path, run - - - OPT_N4_BIAS_FIELD_CORRECTION = { 's': ('4','shrink-factor applied to spatial dimensions'), 'b':('[100,3]','[initial mesh resolution in mm, spline order] This value is optimised for human adult data and needs to be adjusted for rodent data.'), 'c':('[1000,0.0]', '[numberOfIterations,convergenceThreshold]')} - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('ants', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Perform DWI bias field correction using the N4 algorithm as provided in ANTs') - parser.add_citation('Tustison, N.; Avants, B.; Cook, P.; Zheng, Y.; Egan, A.; Yushkevich, P. & Gee, J. N4ITK: Improved N3 Bias Correction. IEEE Transactions on Medical Imaging, 2010, 29, 1310-1320', is_external=True) - ants_options = parser.add_argument_group('Options for ANTs N4BiasFieldCorrection command') - for key in sorted(OPT_N4_BIAS_FIELD_CORRECTION): - ants_options.add_argument('-ants.'+key, metavar=OPT_N4_BIAS_FIELD_CORRECTION[key][0], help='N4BiasFieldCorrection option -%s. %s' % (key,OPT_N4_BIAS_FIELD_CORRECTION[key][1])) - parser.add_argument('input', help='The input image series to be corrected') - parser.add_argument('output', help='The output corrected image series') - - - -def check_output_paths(): #pylint: disable=unused-variable - pass - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def execute(): #pylint: disable=unused-variable - if not shutil.which('N4BiasFieldCorrection'): - raise MRtrixError('Could not find ANTS program N4BiasFieldCorrection; please check installation') - - for key in sorted(OPT_N4_BIAS_FIELD_CORRECTION): - if hasattr(app.ARGS, 'ants.' + key): - val = getattr(app.ARGS, 'ants.' + key) - if val is not None: - OPT_N4_BIAS_FIELD_CORRECTION[key] = (val, 'user defined') - ants_options = ' '.join(['-%s %s' %(k, v[0]) for k, v in OPT_N4_BIAS_FIELD_CORRECTION.items()]) - - # Generate a mean b=0 image - run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3') - - # Use the brain mask as a weights image rather than a mask; means that voxels at the edge of the mask - # will have a smoothly-varying bias field correction applied, rather than multiplying by 1.0 outside the mask - run.command('mrconvert mean_bzero.mif mean_bzero.nii -strides +1,+2,+3') - run.command('mrconvert mask.mif mask.nii -strides +1,+2,+3') - init_bias_path = 'init_bias.nii' - corrected_path = 'corrected.nii' - run.command('N4BiasFieldCorrection -d 3 -i mean_bzero.nii -w mask.nii -o [' + corrected_path + ',' + init_bias_path + '] ' + ants_options) - - # N4 can introduce large differences between subjects via a global scaling of the bias field - # Estimate this scaling based on the total integral of the pre- and post-correction images within the brain mask - input_integral = float(run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -').stdout) - output_integral = float(run.command('mrcalc ' + corrected_path + ' mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -').stdout) - multiplier = output_integral / input_integral - app.debug('Integrals: Input = ' + str(input_integral) + '; Output = ' + str(output_integral) + '; resulting multiplier = ' + str(multiplier)) - run.command('mrcalc ' + init_bias_path + ' ' + str(multiplier) + ' -mult bias.mif') - - # Common final steps for all algorithms - run.command('mrcalc in.mif bias.mif -div result.mif') - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - if app.ARGS.bias: - run.command('mrconvert bias.mif ' + path.from_user(app.ARGS.bias), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/ants/check_output_paths.py b/python/mrtrix3/dwibiascorrect/ants/check_output_paths.py new file mode 100644 index 0000000000..825492a82d --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/ants/check_output_paths.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def check_output_paths(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/ants/execute.py b/python/mrtrix3/dwibiascorrect/ants/execute.py new file mode 100644 index 0000000000..d32ffd3170 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/ants/execute.py @@ -0,0 +1,55 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import shutil +from mrtrix3 import MRtrixError +from mrtrix3 import app, path, run +from . import OPT_N4_BIAS_FIELD_CORRECTION + +def execute(): #pylint: disable=unused-variable + if not shutil.which('N4BiasFieldCorrection'): + raise MRtrixError('Could not find ANTS program N4BiasFieldCorrection; please check installation') + + for key in sorted(OPT_N4_BIAS_FIELD_CORRECTION): + if hasattr(app.ARGS, 'ants.' + key): + val = getattr(app.ARGS, 'ants.' + key) + if val is not None: + OPT_N4_BIAS_FIELD_CORRECTION[key] = (val, 'user defined') + ants_options = ' '.join(['-%s %s' %(k, v[0]) for k, v in OPT_N4_BIAS_FIELD_CORRECTION.items()]) + + # Generate a mean b=0 image + run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3') + + # Use the brain mask as a weights image rather than a mask; means that voxels at the edge of the mask + # will have a smoothly-varying bias field correction applied, rather than multiplying by 1.0 outside the mask + run.command('mrconvert mean_bzero.mif mean_bzero.nii -strides +1,+2,+3') + run.command('mrconvert mask.mif mask.nii -strides +1,+2,+3') + init_bias_path = 'init_bias.nii' + corrected_path = 'corrected.nii' + run.command('N4BiasFieldCorrection -d 3 -i mean_bzero.nii -w mask.nii -o [' + corrected_path + ',' + init_bias_path + '] ' + ants_options) + + # N4 can introduce large differences between subjects via a global scaling of the bias field + # Estimate this scaling based on the total integral of the pre- and post-correction images within the brain mask + input_integral = float(run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -').stdout) + output_integral = float(run.command('mrcalc ' + corrected_path + ' mask.mif -mult - | mrmath - sum - -axis 0 | mrmath - sum - -axis 1 | mrmath - sum - -axis 2 | mrdump -').stdout) + multiplier = output_integral / input_integral + app.debug('Integrals: Input = ' + str(input_integral) + '; Output = ' + str(output_integral) + '; resulting multiplier = ' + str(multiplier)) + run.command('mrcalc ' + init_bias_path + ' ' + str(multiplier) + ' -mult bias.mif') + + # Common final steps for all algorithms + run.command('mrcalc in.mif bias.mif -div result.mif') + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) + if app.ARGS.bias: + run.command('mrconvert bias.mif ' + path.from_user(app.ARGS.bias), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/ants/get_inputs.py b/python/mrtrix3/dwibiascorrect/ants/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/ants/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/ants/usage.py b/python/mrtrix3/dwibiascorrect/ants/usage.py new file mode 100644 index 0000000000..1d00b8c1d1 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/ants/usage.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import OPT_N4_BIAS_FIELD_CORRECTION + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('ants', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Perform DWI bias field correction using the N4 algorithm as provided in ANTs') + parser.add_citation('Tustison, N.; Avants, B.; Cook, P.; Zheng, Y.; Egan, A.; Yushkevich, P. & Gee, J. N4ITK: Improved N3 Bias Correction. IEEE Transactions on Medical Imaging, 2010, 29, 1310-1320', is_external=True) + ants_options = parser.add_argument_group('Options for ANTs N4BiasFieldCorrection command') + for key in sorted(OPT_N4_BIAS_FIELD_CORRECTION): + ants_options.add_argument('-ants.'+key, metavar=OPT_N4_BIAS_FIELD_CORRECTION[key][0], help='N4BiasFieldCorrection option -%s. %s' % (key,OPT_N4_BIAS_FIELD_CORRECTION[key][1])) + parser.add_argument('input', help='The input image series to be corrected') + parser.add_argument('output', help='The output corrected image series') diff --git a/python/mrtrix3/dwibiascorrect/execute.py b/python/mrtrix3/dwibiascorrect/execute.py new file mode 100644 index 0000000000..81671a33c3 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/execute.py @@ -0,0 +1,56 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + # Find out which algorithm the user has requested + alg = algorithm.get(app.ARGS.algorithm) + + app.check_output_path(app.ARGS.output) + app.check_output_path(app.ARGS.bias) + alg.check_output_paths() + + app.make_scratch_dir() + + grad_import_option = app.read_dwgrad_import_options() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + grad_import_option) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + + alg.get_inputs() + + app.goto_scratch_dir() + + # Make sure it's actually a DWI that's been passed + dwi_header = image.Header('in.mif') + if len(dwi_header.size()) != 4: + raise MRtrixError('Input image must be a 4D image') + if 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No valid DW gradient scheme provided or present in image header') + if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: + raise MRtrixError('DW gradient scheme contains different number of entries (' + str(len(dwi_header.keyval()['dw_scheme'])) + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') + + # Generate a brain mask if required, or check the mask if provided by the user + if app.ARGS.mask: + if not image.match('in.mif', 'mask.mif', up_to_dim=3): + raise MRtrixError('Provided mask image does not match input DWI') + else: + run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' in.mif mask.mif') + + # From here, the script splits depending on what estimation algorithm is being used + alg.execute() diff --git a/python/mrtrix3/dwibiascorrect/fsl/__init__.py b/python/mrtrix3/dwibiascorrect/fsl/__init__.py index c82ce84707..e69de29bb2 100644 --- a/python/mrtrix3/dwibiascorrect/fsl/__init__.py +++ b/python/mrtrix3/dwibiascorrect/fsl/__init__.py @@ -1,70 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os -from mrtrix3 import MRtrixError -from mrtrix3 import app, fsl, path, run, utils - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('fsl', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - parser.set_synopsis('Perform DWI bias field correction using the \'fast\' command as provided in FSL') - parser.add_citation('Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', is_external=True) - parser.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - parser.add_description('The FSL \'fast\' command only estimates the bias field within a brain mask, and cannot extrapolate this smoothly-varying field beyond the defined mask. As such, this algorithm by necessity introduces a hard masking of the input DWI. Since this attribute may interfere with the purpose of using the command (e.g. correction of a bias field is commonly used to improve brain mask estimation), use of this particular algorithm is generally not recommended.') - parser.add_argument('input', help='The input image series to be corrected') - parser.add_argument('output', help='The output corrected image series') - - - -def check_output_paths(): #pylint: disable=unused-variable - pass - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def execute(): #pylint: disable=unused-variable - if utils.is_windows(): - raise MRtrixError('Script cannot run using FSL on Windows due to FSL dependency') - - if not os.environ.get('FSLDIR', ''): - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - fast_cmd = fsl.exe_name('fast') - - app.warn('Use of fsl algorithm in dwibiascorrect script is discouraged due to its strong dependence ' + \ - 'on brain masking (specifically its inability to correct voxels outside of this mask).' + \ - 'Use of the ants algorithm is recommended for quantitative DWI analyses.') - - # Generate a mean b=0 image - run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3') - - # FAST doesn't accept a mask input; therefore need to explicitly mask the input image - run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrconvert - mean_bzero_masked.nii -strides -1,+2,+3') - run.command(fast_cmd + ' -t 2 -o fast -n 3 -b mean_bzero_masked.nii') - bias_path = fsl.find_image('fast_bias') - - # Rather than using a bias field estimate of 1.0 outside the brain mask, zero-fill the - # output image outside of this mask - run.command('mrcalc in.mif ' + bias_path + ' -div mask.mif -mult result.mif') - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - if app.ARGS.bias: - run.command('mrconvert ' + bias_path + ' ' + path.from_user(app.ARGS.bias), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/fsl/check_output_paths.py b/python/mrtrix3/dwibiascorrect/fsl/check_output_paths.py new file mode 100644 index 0000000000..825492a82d --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/fsl/check_output_paths.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def check_output_paths(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/fsl/execute.py b/python/mrtrix3/dwibiascorrect/fsl/execute.py new file mode 100644 index 0000000000..17abcd780a --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/fsl/execute.py @@ -0,0 +1,46 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import MRtrixError +from mrtrix3 import app, fsl, path, run, utils + +def execute(): #pylint: disable=unused-variable + if utils.is_windows(): + raise MRtrixError('Script cannot run using FSL on Windows due to FSL dependency') + + if not os.environ.get('FSLDIR', ''): + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + fast_cmd = fsl.exe_name('fast') + + app.warn('Use of fsl algorithm in dwibiascorrect script is discouraged due to its strong dependence ' + \ + 'on brain masking (specifically its inability to correct voxels outside of this mask).' + \ + 'Use of the ants algorithm is recommended for quantitative DWI analyses.') + + # Generate a mean b=0 image + run.command('dwiextract in.mif - -bzero | mrmath - mean mean_bzero.mif -axis 3') + + # FAST doesn't accept a mask input; therefore need to explicitly mask the input image + run.command('mrcalc mean_bzero.mif mask.mif -mult - | mrconvert - mean_bzero_masked.nii -strides -1,+2,+3') + run.command(fast_cmd + ' -t 2 -o fast -n 3 -b mean_bzero_masked.nii') + bias_path = fsl.find_image('fast_bias') + + # Rather than using a bias field estimate of 1.0 outside the brain mask, zero-fill the + # output image outside of this mask + run.command('mrcalc in.mif ' + bias_path + ' -div mask.mif -mult result.mif') + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) + if app.ARGS.bias: + run.command('mrconvert ' + bias_path + ' ' + path.from_user(app.ARGS.bias), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/fsl/get_inputs.py b/python/mrtrix3/dwibiascorrect/fsl/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/fsl/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/fsl/usage.py b/python/mrtrix3/dwibiascorrect/fsl/usage.py new file mode 100644 index 0000000000..8d75d086c1 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/fsl/usage.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('fsl', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + parser.set_synopsis('Perform DWI bias field correction using the \'fast\' command as provided in FSL') + parser.add_citation('Zhang, Y.; Brady, M. & Smith, S. Segmentation of brain MR images through a hidden Markov random field model and the expectation-maximization algorithm. IEEE Transactions on Medical Imaging, 2001, 20, 45-57', is_external=True) + parser.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + parser.add_description('The FSL \'fast\' command only estimates the bias field within a brain mask, and cannot extrapolate this smoothly-varying field beyond the defined mask. As such, this algorithm by necessity introduces a hard masking of the input DWI. Since this attribute may interfere with the purpose of using the command (e.g. correction of a bias field is commonly used to improve brain mask estimation), use of this particular algorithm is generally not recommended.') + parser.add_argument('input', help='The input image series to be corrected') + parser.add_argument('output', help='The output corrected image series') diff --git a/python/mrtrix3/dwibiascorrect/mtnorm/__init__.py b/python/mrtrix3/dwibiascorrect/mtnorm/__init__.py index dbbcd5350c..cb68f40883 100644 --- a/python/mrtrix3/dwibiascorrect/mtnorm/__init__.py +++ b/python/mrtrix3/dwibiascorrect/mtnorm/__init__.py @@ -13,123 +13,5 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - LMAXES_MULTI = [4, 0, 0] LMAXES_SINGLE = [4, 0] - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('mtnorm', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') - parser.set_synopsis('Perform DWI bias field correction using the "mtnormalise" command') - parser.add_description('This algorithm bases its operation almost entirely on the utilisation of multi-tissue ' - 'decomposition information to estimate an underlying B1 receive field, as is implemented ' - 'in the MRtrix3 command "mtnormalise". Its typical usage is however slightly different, ' - 'in that the primary output of the command is not the bias-field-corrected FODs, but a ' - 'bias-field-corrected version of the DWI series.') - parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' - 'Many users may find that comprehensive solution preferable; this dwibiascorrect algorithm is ' - 'nevertheless provided to demonstrate specifically the bias field correction portion of that command.') - parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' - 'degree than what would be advised for analysis. This is done for computational efficiency. This ' - 'behaviour can be modified through the -lmax command-line option.') - parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' - 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' - 'NeuroImage, 2014, 103, 411-426') - parser.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' - 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' - 'In Proc. ISMRM, 2017, 26, 3541') - parser.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' - 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' - 'In Proc. ISMRM, 2021, 29, 2472') - parser.add_argument('input', help='The input image series to be corrected') - parser.add_argument('output', help='The output corrected image series') - options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') - options.add_argument('-lmax', - metavar='values', - help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' - 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') - - - -def check_output_paths(): #pylint: disable=unused-variable - pass - - - -def get_inputs(): #pylint: disable=unused-variable - pass - - - -def execute(): #pylint: disable=unused-variable - - # Verify user inputs - lmax = None - if app.ARGS.lmax: - try: - lmax = [int(i) for i in app.ARGS.lmax.split(',')] - except ValueError as exc: - raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc - if any(value < 0 or value % 2 for value in lmax): - raise MRtrixError('lmax values must be non-negative even integers') - if len(lmax) not in [2, 3]: - raise MRtrixError('Length of lmax vector expected to be either 2 or 3') - - # Determine whether we are working with single-shell or multi-shell data - bvalues = [ - int(round(float(value))) - for value in image.mrinfo('in.mif', 'shell_bvalues') \ - .strip().split()] - multishell = (len(bvalues) > 2) - if lmax is None: - lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE - elif len(lmax) == 3 and not multishell: - raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') - - # RF estimation and multi-tissue CSD - class Tissue(object): #pylint: disable=useless-object-inheritance - def __init__(self, name): - self.name = name - self.tissue_rf = 'response_' + name + '.txt' - self.fod = 'FOD_' + name + '.mif' - self.fod_norm = 'FODnorm_' + name + '.mif' - - tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] - - run.command('dwi2response dhollander in.mif' - + (' -mask mask.mif' if app.ARGS.mask else '') - + ' ' - + ' '.join(tissue.tissue_rf for tissue in tissues)) - - # Immediately remove GM if we can't deal with it - if not multishell: - app.cleanup(tissues[1].tissue_rf) - tissues = tissues[::2] - - run.command('dwi2fod msmt_csd in.mif' - + ' -lmax ' + ','.join(str(item) for item in lmax) - + ' ' - + ' '.join(tissue.tissue_rf + ' ' + tissue.fod - for tissue in tissues)) - - run.command('maskfilter mask.mif erode - | ' - + 'mtnormalise -mask - -balanced' - + ' -check_norm field.mif ' - + ' '.join(tissue.fod + ' ' + tissue.fod_norm - for tissue in tissues)) - app.cleanup([tissue.fod for tissue in tissues]) - app.cleanup([tissue.fod_norm for tissue in tissues]) - app.cleanup([tissue.tissue_rf for tissue in tissues]) - - run.command('mrcalc in.mif field.mif -div - | ' - 'mrconvert - '+ path.from_user(app.ARGS.output), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - if app.ARGS.bias: - run.command('mrconvert field.mif ' + path.from_user(app.ARGS.bias), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/mtnorm/check_output_paths.py b/python/mrtrix3/dwibiascorrect/mtnorm/check_output_paths.py new file mode 100644 index 0000000000..825492a82d --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/mtnorm/check_output_paths.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def check_output_paths(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/mtnorm/execute.py b/python/mrtrix3/dwibiascorrect/mtnorm/execute.py new file mode 100644 index 0000000000..f405d82b7a --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/mtnorm/execute.py @@ -0,0 +1,88 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run +from . import LMAXES_MULTI, LMAXES_SINGLE + +def execute(): #pylint: disable=unused-variable + + # Verify user inputs + lmax = None + if app.ARGS.lmax: + try: + lmax = [int(i) for i in app.ARGS.lmax.split(',')] + except ValueError as exc: + raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc + if any(value < 0 or value % 2 for value in lmax): + raise MRtrixError('lmax values must be non-negative even integers') + if len(lmax) not in [2, 3]: + raise MRtrixError('Length of lmax vector expected to be either 2 or 3') + + # Determine whether we are working with single-shell or multi-shell data + bvalues = [ + int(round(float(value))) + for value in image.mrinfo('in.mif', 'shell_bvalues') \ + .strip().split()] + multishell = (len(bvalues) > 2) + if lmax is None: + lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE + elif len(lmax) == 3 and not multishell: + raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') + + # RF estimation and multi-tissue CSD + class Tissue(object): #pylint: disable=useless-object-inheritance + def __init__(self, name): + self.name = name + self.tissue_rf = 'response_' + name + '.txt' + self.fod = 'FOD_' + name + '.mif' + self.fod_norm = 'FODnorm_' + name + '.mif' + + tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] + + run.command('dwi2response dhollander in.mif' + + (' -mask mask.mif' if app.ARGS.mask else '') + + ' ' + + ' '.join(tissue.tissue_rf for tissue in tissues)) + + # Immediately remove GM if we can't deal with it + if not multishell: + app.cleanup(tissues[1].tissue_rf) + tissues = tissues[::2] + + run.command('dwi2fod msmt_csd in.mif' + + ' -lmax ' + ','.join(str(item) for item in lmax) + + ' ' + + ' '.join(tissue.tissue_rf + ' ' + tissue.fod + for tissue in tissues)) + + run.command('maskfilter mask.mif erode - | ' + + 'mtnormalise -mask - -balanced' + + ' -check_norm field.mif ' + + ' '.join(tissue.fod + ' ' + tissue.fod_norm + for tissue in tissues)) + app.cleanup([tissue.fod for tissue in tissues]) + app.cleanup([tissue.fod_norm for tissue in tissues]) + app.cleanup([tissue.tissue_rf for tissue in tissues]) + + run.command('mrcalc in.mif field.mif -div - | ' + 'mrconvert - '+ path.from_user(app.ARGS.output), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + if app.ARGS.bias: + run.command('mrconvert field.mif ' + path.from_user(app.ARGS.bias), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiascorrect/mtnorm/get_inputs.py b/python/mrtrix3/dwibiascorrect/mtnorm/get_inputs.py new file mode 100644 index 0000000000..db679df3c8 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/mtnorm/get_inputs.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def get_inputs(): #pylint: disable=unused-variable + pass diff --git a/python/mrtrix3/dwibiascorrect/mtnorm/usage.py b/python/mrtrix3/dwibiascorrect/mtnorm/usage.py new file mode 100644 index 0000000000..25ad71a0b0 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/mtnorm/usage.py @@ -0,0 +1,48 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from . import LMAXES_MULTI, LMAXES_SINGLE + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('mtnorm', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') + parser.set_synopsis('Perform DWI bias field correction using the "mtnormalise" command') + parser.add_description('This algorithm bases its operation almost entirely on the utilisation of multi-tissue ' + 'decomposition information to estimate an underlying B1 receive field, as is implemented ' + 'in the MRtrix3 command "mtnormalise". Its typical usage is however slightly different, ' + 'in that the primary output of the command is not the bias-field-corrected FODs, but a ' + 'bias-field-corrected version of the DWI series.') + parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' + 'Many users may find that comprehensive solution preferable; this dwibiascorrect algorithm is ' + 'nevertheless provided to demonstrate specifically the bias field correction portion of that command.') + parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' + 'degree than what would be advised for analysis. This is done for computational efficiency. This ' + 'behaviour can be modified through the -lmax command-line option.') + parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' + 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' + 'NeuroImage, 2014, 103, 411-426') + parser.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' + 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' + 'In Proc. ISMRM, 2017, 26, 3541') + parser.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' + 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' + 'In Proc. ISMRM, 2021, 29, 2472') + parser.add_argument('input', help='The input image series to be corrected') + parser.add_argument('output', help='The output corrected image series') + options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') + options.add_argument('-lmax', + metavar='values', + help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' + 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') diff --git a/python/mrtrix3/dwibiascorrect/usage.py b/python/mrtrix3/dwibiascorrect/usage.py new file mode 100644 index 0000000000..bb614ac345 --- /dev/null +++ b/python/mrtrix3/dwibiascorrect/usage.py @@ -0,0 +1,31 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform B1 field inhomogeneity correction for a DWI volume series') + cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' + 'derive a mask that will be passed to the relevant bias field estimation command. ' + 'More information on mask derivation from DWI data can be found at the following link: \n' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') + common_options = cmdline.add_argument_group('Options common to all dwibiascorrect algorithms') + common_options.add_argument('-mask', metavar='image', help='Manually provide a mask image for bias field estimation') + common_options.add_argument('-bias', metavar='image', help='Output the estimated bias field') + app.add_dwgrad_import_options(cmdline) + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) diff --git a/python/mrtrix3/dwibiasnormmask/__init__.py b/python/mrtrix3/dwibiasnormmask/__init__.py index 5b559a067e..d94dcc7d90 100644 --- a/python/mrtrix3/dwibiasnormmask/__init__.py +++ b/python/mrtrix3/dwibiasnormmask/__init__.py @@ -13,8 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import math, os, shutil - DWIBIASCORRECT_MAX_ITERS = 2 LMAXES_MULTI = [4,0,0] LMAXES_SINGLE = [4,0] @@ -22,433 +20,3 @@ MASK_ALGOS = ['dwi2mask', 'fslbet', 'hdbet', 'mrthreshold', 'synthstrip', 'threshold'] MASK_ALGO_DEFAULT = 'threshold' DICE_COEFF_DEFAULT = 1.0 - 1e-3 - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') - cmdline.set_synopsis('Perform a combination of bias field correction, intensity normalisation, and mask derivation, for DWI data') - cmdline.add_description('DWI bias field correction, intensity normalisation and masking are inter-related steps, and errors ' - 'in each may influence other steps. This script is designed to perform all of these steps in an integrated ' - 'iterative fashion, with the intention of making all steps more robust.') - cmdline.add_description('The operation of the algorithm is as follows. An initial mask is defined, either using the default dwi2mask ' - 'algorithm or as provided by the user. Based on this mask, a sequence of response function estimation, ' - 'multi-shell multi-tissue CSD, bias field correction (using the mtnormalise command), and intensity ' - 'normalisation is performed. The default dwi2mask algorithm is then re-executed on the bias-field-corrected ' - 'DWI series. This sequence of steps is then repeated based on the revised mask, until either a convergence ' - 'criterion or some number of maximum iterations is reached.') - cmdline.add_description('The MRtrix3 mtnormalise command is used to estimate information relating to bias field and intensity ' - 'normalisation. However its usage in this context is different to its conventional usage. Firstly, ' - 'while the corrected ODF images are typically used directly following invocation of this command, ' - 'here the estimated bias field and scaling factors are instead used to apply the relevant corrections to ' - 'the originating DWI data. Secondly, the global intensity scaling that is calculated and applied is ' - 'typically based on achieving close to a unity sum of tissue signal fractions throughout the masked region. ' - 'Here, it is instead the b=0 signal in CSF that forms the reference for this global intensity scaling; ' - 'this is calculated based on the estimated CSF response function and the tissue-specific intensity ' - 'scaling (this is calculated internally by mtnormalise as part of its optimisation process, but typically ' - 'subsequently discarded in favour of a single scaling factor for all tissues)') - cmdline.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' - 'degree than what would be advised for analysis. This is done for computational efficiency. This ' - 'behaviour can be modified through the -lmax command-line option.') - cmdline.add_description('By default, the optimisation procedure will terminate after only two iterations. This is done because ' - 'it has been observed for some data / configurations that additional iterations can lead to unstable ' - 'divergence and erroneous results for bias field estimation and masking. For other configurations, ' - 'it may be preferable to use a greater number of iterations, and allow the iterative algorithm to ' - 'converge to a stable solution. This can be controlled via the -max_iters command-line option.') - cmdline.add_description('Within the optimisation algorithm, derivation of the mask may potentially be performed differently to ' - 'a conventional mask derivation that is based on a DWI series (where, in many instances, it is actually ' - 'only the mean b=0 image that is used). Here, the image corresponding to the sum of tissue signal fractions ' - 'following spherical deconvolution / bias field correction / intensity normalisation is also available, ' - 'and this can potentially be used for mask derivation. Available options are as follows. ' - '"dwi2mask": Use the MRtrix3 command dwi2mask on the bias-field-corrected DWI series ' - '(ie. do not use the ODF tissue sum image for mask derivation); ' - 'the algorithm to be invoked can be controlled by the user via the MRtrix config file entry "Dwi2maskAlgorithm". ' - '"fslbet": Invoke the FSL command "bet" on the ODF tissue sum image. ' - '"hdbet": Invoke the HD-BET command on the ODF tissue sum image. ' - '"mrthreshold": Invoke the MRtrix3 command "mrthreshold" on the ODF tissue sum image, ' - 'where an appropriate threshold value will be determined automatically ' - '(and some heuristic cleanup of the resulting mask will be performed). ' - '"synthstrip": Invoke the FreeSurfer SynthStrip method on the ODF tissue sum image. ' - '"threshold": Apply a fixed partial volume threshold of 0.5 to the ODF tissue sum image ' - ' (and some heuristic cleanup of the resulting mask will be performed).') - cmdline.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' - 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' - 'NeuroImage, 2014, 103, 411-426') - cmdline.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' - 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' - 'In Proc. ISMRM, 2017, 26, 3541') - cmdline.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' - 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' - 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') - cmdline.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' - 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' - 'In Proc. ISMRM, 2021, 29, 2472') - cmdline.add_argument('input', help='The input DWI series to be corrected') - cmdline.add_argument('output_dwi', help='The output corrected DWI series') - cmdline.add_argument('output_mask', help='The output DWI mask') - output_options = cmdline.add_argument_group('Options that modulate the outputs of the script') - output_options.add_argument('-output_bias', metavar='image', - help='Export the final estimated bias field to an image') - output_options.add_argument('-output_scale', metavar='file', - help='Write the scaling factor applied to the DWI series to a text file') - output_options.add_argument('-output_tissuesum', metavar='image', - help='Export the tissue sum image that was used to generate the final mask') - output_options.add_argument('-reference', type=float, metavar='value', default=REFERENCE_INTENSITY, - help='Set the target CSF b=0 intensity in the output DWI series (default: ' + str(REFERENCE_INTENSITY) + ')') - internal_options = cmdline.add_argument_group('Options relevant to the internal optimisation procedure') - internal_options.add_argument('-dice', type=float, default=DICE_COEFF_DEFAULT, metavar='value', - help='Set the Dice coefficient threshold for similarity of masks between sequential iterations that will ' - 'result in termination due to convergence; default = ' + str(DICE_COEFF_DEFAULT)) - internal_options.add_argument('-init_mask', metavar='image', - help='Provide an initial mask for the first iteration of the algorithm ' - '(if not provided, the default dwi2mask algorithm will be used)') - internal_options.add_argument('-max_iters', type=int, default=DWIBIASCORRECT_MAX_ITERS, metavar='count', - help='The maximum number of iterations (see Description); default is ' + str(DWIBIASCORRECT_MAX_ITERS) + '; ' - 'set to 0 to proceed until convergence') - internal_options.add_argument('-mask_algo', choices=MASK_ALGOS, metavar='algorithm', - help='The algorithm to use for mask estimation, potentially based on the ODF sum image (see Description); default: ' + MASK_ALGO_DEFAULT) - internal_options.add_argument('-lmax', metavar='values', - help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' - 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') - app.add_dwgrad_import_options(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, fsl, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - # Check user inputs - if app.ARGS.max_iters < 0: - raise MRtrixError('Maximum number of iterations must be a non-negative integer') - lmax = None - if app.ARGS.lmax: - try: - lmax = [int(i) for i in app.ARGS.lmax.split(',')] - except ValueError as exc: - raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc - if any(value < 0 or value % 2 for value in lmax): - raise MRtrixError('lmax values must be non-negative even integers') - if len(lmax) not in [2, 3]: - raise MRtrixError('Length of lmax vector expected to be either 2 or 3') - if app.ARGS.dice <= 0.0 or app.ARGS.dice > 1.0: - raise MRtrixError('Dice coefficient for convergence detection must lie in the range (0.0, 1.0]') - if app.ARGS.reference <= 0.0: - raise MRtrixError('Reference intensity must be positive') - - # Check what masking agorithm is going to be used - mask_algo = MASK_ALGO_DEFAULT - if app.ARGS.mask_algo: - mask_algo = app.ARGS.mask_algo - elif 'DwibiasnormmaskMaskAlgorithm' in CONFIG: - mask_algo = CONFIG['DwibiasnormmaskMaskAlgorithm'] - if not mask_algo in MASK_ALGOS: - raise MRtrixError('Invalid masking algorithm selection "%s" in MRtrix config file' % mask_algo) - app.console('"%s" algorithm will be used for brain masking during iteration as specified in config file' % mask_algo) - else: - app.console('Default "%s" algorithm will be used for brain masking during iteration' % MASK_ALGO_DEFAULT) - - # Check mask algorithm, including availability of external software if necessary - for mask_algo, software, command in [('fslbet', 'FSL', 'bet'), ('hdbet', 'HD-BET', 'hd-bet'), ('synthstrip', 'FreeSurfer', 'mri_synthstrip')]: - if app.ARGS.mask_algo == mask_algo and not shutil.which(command): - raise MRtrixError(software + ' command "' + command + '" not found; cannot use for internal mask calculations') - - app.check_output_path(app.ARGS.output_dwi) - app.check_output_path(app.ARGS.output_mask) - app.make_scratch_dir() - - grad_import_option = app.read_dwgrad_import_options() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' - + path.to_scratch('input.mif') + grad_import_option) - - if app.ARGS.init_mask: - run.command('mrconvert ' + path.from_user(app.ARGS.init_mask) + ' ' - + path.to_scratch('dwi_mask_init.mif') + ' -datatype bit') - - app.goto_scratch_dir() - - - # Check inputs - # Make sure it's actually a DWI that's been passed - dwi_header = image.Header('input.mif') - if len(dwi_header.size()) != 4: - raise MRtrixError('Input image must be a 4D image') - if 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No valid DW gradient scheme provided or present in image header') - if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: - raise MRtrixError('DW gradient scheme contains different number of entries (' - + str(len(dwi_header.keyval()['dw_scheme'])) - + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') - - # Determine whether we are working with single-shell or multi-shell data - bvalues = [ - int(round(float(value))) - for value in image.mrinfo('input.mif', 'shell_bvalues') \ - .strip().split()] - multishell = (len(bvalues) > 2) - if lmax is None: - lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE - elif len(lmax) == 3 and not multishell: - raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') - - # Create a mask of voxels where the input data contain positive values; - # we want to make sure that these never end up included in the output mask - run.command('mrmath input.mif max - -axis 3 | ' - 'mrthreshold - -abs 0 -comparison gt input_pos_mask.mif') - - # Generate an initial brain mask if required, or check the initial mask if provided by the user - if app.ARGS.init_mask: - if not image.match('input.mif', 'dwi_mask_init.mif', up_to_dim=3): - raise MRtrixError('Provided mask image does not match input DWI') - else: - app.debug('Performing intial DWI brain masking') - run.command('dwi2mask ' - + CONFIG['Dwi2maskAlgorithm'] - + ' input.mif dwi_mask_init.mif') - - # Combined RF estimation / CSD / mtnormalise / mask revision - class Tissue(object): #pylint: disable=useless-object-inheritance - def __init__(self, name, index): - self.name = name - iter_string = '_iter' + str(index) - self.tissue_rf = 'response_' + name + iter_string + '.txt' - self.fod_init = 'FODinit_' + name + iter_string + '.mif' - self.fod_norm = 'FODnorm_' + name + iter_string + '.mif' - - - app.debug('Commencing iterative DWI bias field correction and brain masking with ' - + ('a maximum of ' + str(app.ARGS.max_iters) if app.ARGS.max_iters else 'no limit on number of ') + ' iterations') - - dwi_image = 'input.mif' - dwi_mask_image = 'dwi_mask_init.mif' - bias_field_image = None - tissue_sum_image = None - iteration = 0 - step = 'initialisation' - prev_dice_coefficient = 0.0 - total_scaling_factor = 1.0 - - def msg(): - return 'Iteration {0}; {1} step; previous Dice coefficient {2}' \ - .format(iteration, step, prev_dice_coefficient) - progress = app.ProgressBar(msg) - - iteration = 1 - while True: - iter_string = '_iter' + str(iteration) - tissues = [Tissue('WM', iteration), - Tissue('GM', iteration), - Tissue('CSF', iteration)] - - step = 'dwi2response' - progress.increment() - run.command('dwi2response dhollander ' - + dwi_image - + ' -mask ' - + dwi_mask_image - + ' ' - + ' '.join(tissue.tissue_rf for tissue in tissues)) - - - # Immediately remove GM if we can't deal with it - if not multishell: - app.cleanup(tissues[1].tissue_rf) - tissues = tissues[::2] - - step = 'dwi2fod' - progress.increment() - app.debug('Performing CSD with lmax values: ' + ','.join(str(item) for item in lmax)) - run.command('dwi2fod msmt_csd ' - + dwi_image - + ' -lmax ' + ','.join(str(item) for item in lmax) - + ' ' - + ' '.join(tissue.tissue_rf + ' ' + tissue.fod_init - for tissue in tissues)) - - step = 'maskfilter' - progress.increment() - eroded_mask = os.path.splitext(dwi_mask_image)[0] + '_eroded.mif' - run.command('maskfilter ' + dwi_mask_image + ' erode ' + eroded_mask) - - step = 'mtnormalise' - progress.increment() - old_bias_field_image = bias_field_image - bias_field_image = 'field' + iter_string + '.mif' - factors_path = 'factors' + iter_string + '.txt' - - run.command('mtnormalise -balanced' - + ' -mask ' + eroded_mask - + ' -check_norm ' + bias_field_image - + ' -check_factors ' + factors_path - + ' ' - + ' '.join(tissue.fod_init + ' ' + tissue.fod_norm - for tissue in tissues)) - app.cleanup([tissue.fod_init for tissue in tissues]) - app.cleanup(eroded_mask) - - app.debug('Iteration ' + str(iteration) + ', ' - + 'applying estimated bias field and appropiate scaling factor...') - csf_rf = matrix.load_matrix(tissues[-1].tissue_rf) - csf_rf_bzero_lzero = csf_rf[0][0] - app.cleanup([tissue.tissue_rf for tissue in tissues]) - balance_factors = matrix.load_vector(factors_path) - csf_balance_factor = balance_factors[-1] - app.cleanup(factors_path) - scale_multiplier = (app.ARGS.reference * math.sqrt(4.0*math.pi)) / \ - (csf_rf_bzero_lzero / csf_balance_factor) - new_dwi_image = 'dwi' + iter_string + '.mif' - run.command('mrcalc ' + dwi_image + ' ' - + bias_field_image + ' -div ' - + str(scale_multiplier) + ' -mult ' - + new_dwi_image) - - old_dwi_image = dwi_image - dwi_image = new_dwi_image - - old_tissue_sum_image = tissue_sum_image - tissue_sum_image = 'tissue_sum' + iter_string + '.mif' - - app.debug('Iteration ' + str(iteration) + ', ' - + 'revising brain mask...') - step = 'masking' - progress.increment() - - run.command('mrconvert ' - + tissues[0].fod_norm - + ' -coord 3 0 - |' - + ' mrmath - ' - + ' '.join(tissue.fod_norm for tissue in tissues[1:]) - + ' sum - | ' - + 'mrcalc - ' + str(math.sqrt(4.0 * math.pi)) + ' -mult ' - + tissue_sum_image) - app.cleanup([tissue.fod_norm for tissue in tissues]) - - new_dwi_mask_image = 'dwi_mask' + iter_string + '.mif' - tissue_sum_image_nii = None - new_dwi_mask_image_nii = None - if mask_algo in ['fslbet', 'hdbet', 'synthstrip']: - tissue_sum_image_nii = os.path.splitext(tissue_sum_image)[0] + '.nii' - run.command('mrconvert ' + tissue_sum_image + ' ' + tissue_sum_image_nii) - new_dwi_mask_image_nii = os.path.splitext(new_dwi_mask_image)[0] + '.nii' - if mask_algo == 'dwi2mask': - run.command('dwi2mask ' + CONFIG.get('Dwi2maskAlgorithm', 'legacy') + ' ' + new_dwi_image + ' ' + new_dwi_mask_image) - elif mask_algo == 'fslbet': - run.command('bet ' + tissue_sum_image_nii + ' ' + new_dwi_mask_image_nii + ' -R -m') - app.cleanup(fsl.find_image(os.path.splitext(new_dwi_mask_image_nii)[0])) - new_dwi_mask_image_nii = fsl.find_image(os.path.splitext(new_dwi_mask_image_nii)[0] + '_mask') - run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) - elif mask_algo == 'hdbet': - try: - run.command('hd-bet -i ' + tissue_sum_image_nii) - except run.MRtrixCmdError as e_gpu: - try: - run.command('hd-bet -i ' + tissue_sum_image_nii + ' -device cpu -mode fast -tta 0') - except run.MRtrixCmdError as e_cpu: - raise run.MRtrixCmdError('hd-bet', 1, e_gpu.stdout + e_cpu.stdout, e_gpu.stderr + e_cpu.stderr) - new_dwi_mask_image_nii = os.path.splitext(tissue_sum_image)[0] + '_bet_mask.nii.gz' - run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) - elif mask_algo in ['mrthreshold', 'threshold']: - mrthreshold_abs_option = ' -abs 0.5' if mask_algo == 'threshold' else '' - run.command('mrthreshold ' - + tissue_sum_image - + mrthreshold_abs_option - + ' - |' - + ' maskfilter - connect -largest - |' - + ' mrcalc 1 - -sub - -datatype bit |' - + ' maskfilter - connect -largest - |' - + ' mrcalc 1 - -sub - -datatype bit |' - + ' maskfilter - clean - |' - + ' mrcalc - input_pos_mask.mif -mult ' - + new_dwi_mask_image - + ' -datatype bit') - elif mask_algo == 'synthstrip': - run.command('mri_synthstrip -i ' + tissue_sum_image_nii + ' --mask ' + new_dwi_mask_image_nii) - run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) - else: - assert False - if tissue_sum_image_nii: - app.cleanup(tissue_sum_image_nii) - if new_dwi_mask_image_nii: - app.cleanup(new_dwi_mask_image_nii) - - step = 'mask comparison' - progress.increment() - dwi_old_mask_count = image.statistics(dwi_mask_image, - mask=dwi_mask_image).count - dwi_new_mask_count = image.statistics(new_dwi_mask_image, - mask=new_dwi_mask_image).count - app.debug('Old mask voxel count: ' + str(dwi_old_mask_count)) - app.debug('New mask voxel count: ' + str(dwi_new_mask_count)) - dwi_mask_overlap_image = 'dwi_mask_overlap' + iter_string + '.mif' - run.command(['mrcalc', dwi_mask_image, new_dwi_mask_image, '-mult', dwi_mask_overlap_image]) - - old_dwi_mask_image = dwi_mask_image - dwi_mask_image = new_dwi_mask_image - - mask_overlap_count = image.statistics(dwi_mask_overlap_image, - mask=dwi_mask_overlap_image).count - app.debug('Mask overlap voxel count: ' + str(mask_overlap_count)) - - new_dice_coefficient = 2.0 * mask_overlap_count / \ - (dwi_old_mask_count + dwi_new_mask_count) - - if iteration == app.ARGS.max_iters: - progress.done() - app.console('Terminating due to reaching maximum %d iterations; final Dice coefficient = %f' % (iteration, new_dice_coefficient)) - app.cleanup(old_dwi_image) - app.cleanup(old_dwi_mask_image) - app.cleanup(old_bias_field_image) - app.cleanup(old_tissue_sum_image) - total_scaling_factor *= scale_multiplier - break - - if new_dice_coefficient > app.ARGS.dice: - progress.done() - app.console('Exiting loop after %d iterations due to mask convergence (Dice coefficient = %f)' % (iteration, new_dice_coefficient)) - app.cleanup(old_dwi_image) - app.cleanup(old_dwi_mask_image) - app.cleanup(old_bias_field_image) - app.cleanup(old_tissue_sum_image) - total_scaling_factor *= scale_multiplier - break - - if new_dice_coefficient < prev_dice_coefficient: - progress.done() - app.warn('Mask divergence at iteration %d (Dice coefficient = %f); ' % (iteration, new_dice_coefficient) - + ' using mask from previous iteration') - app.cleanup(dwi_image) - app.cleanup(dwi_mask_image) - app.cleanup(bias_field_image) - app.cleanup(tissue_sum_image) - dwi_image = old_dwi_image - dwi_mask_image = old_dwi_mask_image - bias_field_image = old_bias_field_image - tissue_sum_image = old_tissue_sum_image - break - - iteration += 1 - prev_dice_coefficient = new_dice_coefficient - - - run.command(['mrconvert', dwi_image, path.from_user(app.ARGS.output_dwi, False)], - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - if app.ARGS.output_bias: - run.command(['mrconvert', bias_field_image, path.from_user(app.ARGS.output_bias, False)], - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - if app.ARGS.output_mask: - run.command(['mrconvert', dwi_mask_image, path.from_user(app.ARGS.output_mask, False)], - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - if app.ARGS.output_scale: - matrix.save_vector(path.from_user(app.ARGS.output_scale, False), - [total_scaling_factor], - force=app.FORCE_OVERWRITE) - - if app.ARGS.output_tissuesum: - run.command(['mrconvert', tissue_sum_image, path.from_user(app.ARGS.output_tissuesum, False)], - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiasnormmask/execute.py b/python/mrtrix3/dwibiasnormmask/execute.py new file mode 100644 index 0000000000..e3044a4c11 --- /dev/null +++ b/python/mrtrix3/dwibiasnormmask/execute.py @@ -0,0 +1,357 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os, shutil +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, fsl, image, matrix, path, run #pylint: disable=no-name-in-module +from . import LMAXES_MULTI, LMAXES_SINGLE, MASK_ALGO_DEFAULT, MASK_ALGOS + +def execute(): #pylint: disable=unused-variable + + # Check user inputs + if app.ARGS.max_iters < 0: + raise MRtrixError('Maximum number of iterations must be a non-negative integer') + lmax = None + if app.ARGS.lmax: + try: + lmax = [int(i) for i in app.ARGS.lmax.split(',')] + except ValueError as exc: + raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc + if any(value < 0 or value % 2 for value in lmax): + raise MRtrixError('lmax values must be non-negative even integers') + if len(lmax) not in [2, 3]: + raise MRtrixError('Length of lmax vector expected to be either 2 or 3') + if app.ARGS.dice <= 0.0 or app.ARGS.dice > 1.0: + raise MRtrixError('Dice coefficient for convergence detection must lie in the range (0.0, 1.0]') + if app.ARGS.reference <= 0.0: + raise MRtrixError('Reference intensity must be positive') + + # Check what masking agorithm is going to be used + mask_algo = MASK_ALGO_DEFAULT + if app.ARGS.mask_algo: + mask_algo = app.ARGS.mask_algo + elif 'DwibiasnormmaskMaskAlgorithm' in CONFIG: + mask_algo = CONFIG['DwibiasnormmaskMaskAlgorithm'] + if not mask_algo in MASK_ALGOS: + raise MRtrixError('Invalid masking algorithm selection "%s" in MRtrix config file' % mask_algo) + app.console('"%s" algorithm will be used for brain masking during iteration as specified in config file' % mask_algo) + else: + app.console('Default "%s" algorithm will be used for brain masking during iteration' % MASK_ALGO_DEFAULT) + + # Check mask algorithm, including availability of external software if necessary + for mask_algo, software, command in [('fslbet', 'FSL', 'bet'), ('hdbet', 'HD-BET', 'hd-bet'), ('synthstrip', 'FreeSurfer', 'mri_synthstrip')]: + if app.ARGS.mask_algo == mask_algo and not shutil.which(command): + raise MRtrixError(software + ' command "' + command + '" not found; cannot use for internal mask calculations') + + app.check_output_path(app.ARGS.output_dwi) + app.check_output_path(app.ARGS.output_mask) + app.make_scratch_dir() + + grad_import_option = app.read_dwgrad_import_options() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + + path.to_scratch('input.mif') + grad_import_option) + + if app.ARGS.init_mask: + run.command('mrconvert ' + path.from_user(app.ARGS.init_mask) + ' ' + + path.to_scratch('dwi_mask_init.mif') + ' -datatype bit') + + app.goto_scratch_dir() + + + # Check inputs + # Make sure it's actually a DWI that's been passed + dwi_header = image.Header('input.mif') + if len(dwi_header.size()) != 4: + raise MRtrixError('Input image must be a 4D image') + if 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No valid DW gradient scheme provided or present in image header') + if len(dwi_header.keyval()['dw_scheme']) != dwi_header.size()[3]: + raise MRtrixError('DW gradient scheme contains different number of entries (' + + str(len(dwi_header.keyval()['dw_scheme'])) + + ' to number of volumes in DWIs (' + dwi_header.size()[3] + ')') + + # Determine whether we are working with single-shell or multi-shell data + bvalues = [ + int(round(float(value))) + for value in image.mrinfo('input.mif', 'shell_bvalues') \ + .strip().split()] + multishell = (len(bvalues) > 2) + if lmax is None: + lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE + elif len(lmax) == 3 and not multishell: + raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') + + # Create a mask of voxels where the input data contain positive values; + # we want to make sure that these never end up included in the output mask + run.command('mrmath input.mif max - -axis 3 | ' + 'mrthreshold - -abs 0 -comparison gt input_pos_mask.mif') + + # Generate an initial brain mask if required, or check the initial mask if provided by the user + if app.ARGS.init_mask: + if not image.match('input.mif', 'dwi_mask_init.mif', up_to_dim=3): + raise MRtrixError('Provided mask image does not match input DWI') + else: + app.debug('Performing intial DWI brain masking') + run.command('dwi2mask ' + + CONFIG['Dwi2maskAlgorithm'] + + ' input.mif dwi_mask_init.mif') + + # Combined RF estimation / CSD / mtnormalise / mask revision + class Tissue(object): #pylint: disable=useless-object-inheritance + def __init__(self, name, index): + self.name = name + iter_string = '_iter' + str(index) + self.tissue_rf = 'response_' + name + iter_string + '.txt' + self.fod_init = 'FODinit_' + name + iter_string + '.mif' + self.fod_norm = 'FODnorm_' + name + iter_string + '.mif' + + + app.debug('Commencing iterative DWI bias field correction and brain masking with ' + + ('a maximum of ' + str(app.ARGS.max_iters) if app.ARGS.max_iters else 'no limit on number of ') + ' iterations') + + dwi_image = 'input.mif' + dwi_mask_image = 'dwi_mask_init.mif' + bias_field_image = None + tissue_sum_image = None + iteration = 0 + step = 'initialisation' + prev_dice_coefficient = 0.0 + total_scaling_factor = 1.0 + + def msg(): + return 'Iteration {0}; {1} step; previous Dice coefficient {2}' \ + .format(iteration, step, prev_dice_coefficient) + progress = app.ProgressBar(msg) + + iteration = 1 + while True: + iter_string = '_iter' + str(iteration) + tissues = [Tissue('WM', iteration), + Tissue('GM', iteration), + Tissue('CSF', iteration)] + + step = 'dwi2response' + progress.increment() + run.command('dwi2response dhollander ' + + dwi_image + + ' -mask ' + + dwi_mask_image + + ' ' + + ' '.join(tissue.tissue_rf for tissue in tissues)) + + + # Immediately remove GM if we can't deal with it + if not multishell: + app.cleanup(tissues[1].tissue_rf) + tissues = tissues[::2] + + step = 'dwi2fod' + progress.increment() + app.debug('Performing CSD with lmax values: ' + ','.join(str(item) for item in lmax)) + run.command('dwi2fod msmt_csd ' + + dwi_image + + ' -lmax ' + ','.join(str(item) for item in lmax) + + ' ' + + ' '.join(tissue.tissue_rf + ' ' + tissue.fod_init + for tissue in tissues)) + + step = 'maskfilter' + progress.increment() + eroded_mask = os.path.splitext(dwi_mask_image)[0] + '_eroded.mif' + run.command('maskfilter ' + dwi_mask_image + ' erode ' + eroded_mask) + + step = 'mtnormalise' + progress.increment() + old_bias_field_image = bias_field_image + bias_field_image = 'field' + iter_string + '.mif' + factors_path = 'factors' + iter_string + '.txt' + + run.command('mtnormalise -balanced' + + ' -mask ' + eroded_mask + + ' -check_norm ' + bias_field_image + + ' -check_factors ' + factors_path + + ' ' + + ' '.join(tissue.fod_init + ' ' + tissue.fod_norm + for tissue in tissues)) + app.cleanup([tissue.fod_init for tissue in tissues]) + app.cleanup(eroded_mask) + + app.debug('Iteration ' + str(iteration) + ', ' + + 'applying estimated bias field and appropiate scaling factor...') + csf_rf = matrix.load_matrix(tissues[-1].tissue_rf) + csf_rf_bzero_lzero = csf_rf[0][0] + app.cleanup([tissue.tissue_rf for tissue in tissues]) + balance_factors = matrix.load_vector(factors_path) + csf_balance_factor = balance_factors[-1] + app.cleanup(factors_path) + scale_multiplier = (app.ARGS.reference * math.sqrt(4.0*math.pi)) / \ + (csf_rf_bzero_lzero / csf_balance_factor) + new_dwi_image = 'dwi' + iter_string + '.mif' + run.command('mrcalc ' + dwi_image + ' ' + + bias_field_image + ' -div ' + + str(scale_multiplier) + ' -mult ' + + new_dwi_image) + + old_dwi_image = dwi_image + dwi_image = new_dwi_image + + old_tissue_sum_image = tissue_sum_image + tissue_sum_image = 'tissue_sum' + iter_string + '.mif' + + app.debug('Iteration ' + str(iteration) + ', ' + + 'revising brain mask...') + step = 'masking' + progress.increment() + + run.command('mrconvert ' + + tissues[0].fod_norm + + ' -coord 3 0 - |' + + ' mrmath - ' + + ' '.join(tissue.fod_norm for tissue in tissues[1:]) + + ' sum - | ' + + 'mrcalc - ' + str(math.sqrt(4.0 * math.pi)) + ' -mult ' + + tissue_sum_image) + app.cleanup([tissue.fod_norm for tissue in tissues]) + + new_dwi_mask_image = 'dwi_mask' + iter_string + '.mif' + tissue_sum_image_nii = None + new_dwi_mask_image_nii = None + if mask_algo in ['fslbet', 'hdbet', 'synthstrip']: + tissue_sum_image_nii = os.path.splitext(tissue_sum_image)[0] + '.nii' + run.command('mrconvert ' + tissue_sum_image + ' ' + tissue_sum_image_nii) + new_dwi_mask_image_nii = os.path.splitext(new_dwi_mask_image)[0] + '.nii' + if mask_algo == 'dwi2mask': + run.command('dwi2mask ' + CONFIG.get('Dwi2maskAlgorithm', 'legacy') + ' ' + new_dwi_image + ' ' + new_dwi_mask_image) + elif mask_algo == 'fslbet': + run.command('bet ' + tissue_sum_image_nii + ' ' + new_dwi_mask_image_nii + ' -R -m') + app.cleanup(fsl.find_image(os.path.splitext(new_dwi_mask_image_nii)[0])) + new_dwi_mask_image_nii = fsl.find_image(os.path.splitext(new_dwi_mask_image_nii)[0] + '_mask') + run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) + elif mask_algo == 'hdbet': + try: + run.command('hd-bet -i ' + tissue_sum_image_nii) + except run.MRtrixCmdError as e_gpu: + try: + run.command('hd-bet -i ' + tissue_sum_image_nii + ' -device cpu -mode fast -tta 0') + except run.MRtrixCmdError as e_cpu: + raise run.MRtrixCmdError('hd-bet', 1, e_gpu.stdout + e_cpu.stdout, e_gpu.stderr + e_cpu.stderr) + new_dwi_mask_image_nii = os.path.splitext(tissue_sum_image)[0] + '_bet_mask.nii.gz' + run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) + elif mask_algo in ['mrthreshold', 'threshold']: + mrthreshold_abs_option = ' -abs 0.5' if mask_algo == 'threshold' else '' + run.command('mrthreshold ' + + tissue_sum_image + + mrthreshold_abs_option + + ' - |' + + ' maskfilter - connect -largest - |' + + ' mrcalc 1 - -sub - -datatype bit |' + + ' maskfilter - connect -largest - |' + + ' mrcalc 1 - -sub - -datatype bit |' + + ' maskfilter - clean - |' + + ' mrcalc - input_pos_mask.mif -mult ' + + new_dwi_mask_image + + ' -datatype bit') + elif mask_algo == 'synthstrip': + run.command('mri_synthstrip -i ' + tissue_sum_image_nii + ' --mask ' + new_dwi_mask_image_nii) + run.command('mrcalc ' + new_dwi_mask_image_nii + ' input_pos_mask.mif -mult ' + new_dwi_mask_image) + else: + assert False + if tissue_sum_image_nii: + app.cleanup(tissue_sum_image_nii) + if new_dwi_mask_image_nii: + app.cleanup(new_dwi_mask_image_nii) + + step = 'mask comparison' + progress.increment() + dwi_old_mask_count = image.statistics(dwi_mask_image, + mask=dwi_mask_image).count + dwi_new_mask_count = image.statistics(new_dwi_mask_image, + mask=new_dwi_mask_image).count + app.debug('Old mask voxel count: ' + str(dwi_old_mask_count)) + app.debug('New mask voxel count: ' + str(dwi_new_mask_count)) + dwi_mask_overlap_image = 'dwi_mask_overlap' + iter_string + '.mif' + run.command(['mrcalc', dwi_mask_image, new_dwi_mask_image, '-mult', dwi_mask_overlap_image]) + + old_dwi_mask_image = dwi_mask_image + dwi_mask_image = new_dwi_mask_image + + mask_overlap_count = image.statistics(dwi_mask_overlap_image, + mask=dwi_mask_overlap_image).count + app.debug('Mask overlap voxel count: ' + str(mask_overlap_count)) + + new_dice_coefficient = 2.0 * mask_overlap_count / \ + (dwi_old_mask_count + dwi_new_mask_count) + + if iteration == app.ARGS.max_iters: + progress.done() + app.console('Terminating due to reaching maximum %d iterations; final Dice coefficient = %f' % (iteration, new_dice_coefficient)) + app.cleanup(old_dwi_image) + app.cleanup(old_dwi_mask_image) + app.cleanup(old_bias_field_image) + app.cleanup(old_tissue_sum_image) + total_scaling_factor *= scale_multiplier + break + + if new_dice_coefficient > app.ARGS.dice: + progress.done() + app.console('Exiting loop after %d iterations due to mask convergence (Dice coefficient = %f)' % (iteration, new_dice_coefficient)) + app.cleanup(old_dwi_image) + app.cleanup(old_dwi_mask_image) + app.cleanup(old_bias_field_image) + app.cleanup(old_tissue_sum_image) + total_scaling_factor *= scale_multiplier + break + + if new_dice_coefficient < prev_dice_coefficient: + progress.done() + app.warn('Mask divergence at iteration %d (Dice coefficient = %f); ' % (iteration, new_dice_coefficient) + + ' using mask from previous iteration') + app.cleanup(dwi_image) + app.cleanup(dwi_mask_image) + app.cleanup(bias_field_image) + app.cleanup(tissue_sum_image) + dwi_image = old_dwi_image + dwi_mask_image = old_dwi_mask_image + bias_field_image = old_bias_field_image + tissue_sum_image = old_tissue_sum_image + break + + iteration += 1 + prev_dice_coefficient = new_dice_coefficient + + + run.command(['mrconvert', dwi_image, path.from_user(app.ARGS.output_dwi, False)], + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + if app.ARGS.output_bias: + run.command(['mrconvert', bias_field_image, path.from_user(app.ARGS.output_bias, False)], + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + if app.ARGS.output_mask: + run.command(['mrconvert', dwi_mask_image, path.from_user(app.ARGS.output_mask, False)], + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + if app.ARGS.output_scale: + matrix.save_vector(path.from_user(app.ARGS.output_scale, False), + [total_scaling_factor], + force=app.FORCE_OVERWRITE) + + if app.ARGS.output_tissuesum: + run.command(['mrconvert', tissue_sum_image, path.from_user(app.ARGS.output_tissuesum, False)], + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwibiasnormmask/usage.py b/python/mrtrix3/dwibiasnormmask/usage.py new file mode 100644 index 0000000000..75dc1d2e2e --- /dev/null +++ b/python/mrtrix3/dwibiasnormmask/usage.py @@ -0,0 +1,111 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app #pylint: disable=no-name-in-module + +from . import DWIBIASCORRECT_MAX_ITERS +from . import LMAXES_MULTI +from . import LMAXES_SINGLE +from . import REFERENCE_INTENSITY +from . import MASK_ALGOS +from . import MASK_ALGO_DEFAULT +from . import DICE_COEFF_DEFAULT + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') + cmdline.set_synopsis('Perform a combination of bias field correction, intensity normalisation, and mask derivation, for DWI data') + cmdline.add_description('DWI bias field correction, intensity normalisation and masking are inter-related steps, and errors ' + 'in each may influence other steps. This script is designed to perform all of these steps in an integrated ' + 'iterative fashion, with the intention of making all steps more robust.') + cmdline.add_description('The operation of the algorithm is as follows. An initial mask is defined, either using the default dwi2mask ' + 'algorithm or as provided by the user. Based on this mask, a sequence of response function estimation, ' + 'multi-shell multi-tissue CSD, bias field correction (using the mtnormalise command), and intensity ' + 'normalisation is performed. The default dwi2mask algorithm is then re-executed on the bias-field-corrected ' + 'DWI series. This sequence of steps is then repeated based on the revised mask, until either a convergence ' + 'criterion or some number of maximum iterations is reached.') + cmdline.add_description('The MRtrix3 mtnormalise command is used to estimate information relating to bias field and intensity ' + 'normalisation. However its usage in this context is different to its conventional usage. Firstly, ' + 'while the corrected ODF images are typically used directly following invocation of this command, ' + 'here the estimated bias field and scaling factors are instead used to apply the relevant corrections to ' + 'the originating DWI data. Secondly, the global intensity scaling that is calculated and applied is ' + 'typically based on achieving close to a unity sum of tissue signal fractions throughout the masked region. ' + 'Here, it is instead the b=0 signal in CSF that forms the reference for this global intensity scaling; ' + 'this is calculated based on the estimated CSF response function and the tissue-specific intensity ' + 'scaling (this is calculated internally by mtnormalise as part of its optimisation process, but typically ' + 'subsequently discarded in favour of a single scaling factor for all tissues)') + cmdline.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' + 'degree than what would be advised for analysis. This is done for computational efficiency. This ' + 'behaviour can be modified through the -lmax command-line option.') + cmdline.add_description('By default, the optimisation procedure will terminate after only two iterations. This is done because ' + 'it has been observed for some data / configurations that additional iterations can lead to unstable ' + 'divergence and erroneous results for bias field estimation and masking. For other configurations, ' + 'it may be preferable to use a greater number of iterations, and allow the iterative algorithm to ' + 'converge to a stable solution. This can be controlled via the -max_iters command-line option.') + cmdline.add_description('Within the optimisation algorithm, derivation of the mask may potentially be performed differently to ' + 'a conventional mask derivation that is based on a DWI series (where, in many instances, it is actually ' + 'only the mean b=0 image that is used). Here, the image corresponding to the sum of tissue signal fractions ' + 'following spherical deconvolution / bias field correction / intensity normalisation is also available, ' + 'and this can potentially be used for mask derivation. Available options are as follows. ' + '"dwi2mask": Use the MRtrix3 command dwi2mask on the bias-field-corrected DWI series ' + '(ie. do not use the ODF tissue sum image for mask derivation); ' + 'the algorithm to be invoked can be controlled by the user via the MRtrix config file entry "Dwi2maskAlgorithm". ' + '"fslbet": Invoke the FSL command "bet" on the ODF tissue sum image. ' + '"hdbet": Invoke the HD-BET command on the ODF tissue sum image. ' + '"mrthreshold": Invoke the MRtrix3 command "mrthreshold" on the ODF tissue sum image, ' + 'where an appropriate threshold value will be determined automatically ' + '(and some heuristic cleanup of the resulting mask will be performed). ' + '"synthstrip": Invoke the FreeSurfer SynthStrip method on the ODF tissue sum image. ' + '"threshold": Apply a fixed partial volume threshold of 0.5 to the ODF tissue sum image ' + ' (and some heuristic cleanup of the resulting mask will be performed).') + cmdline.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' + 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' + 'NeuroImage, 2014, 103, 411-426') + cmdline.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' + 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' + 'In Proc. ISMRM, 2017, 26, 3541') + cmdline.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' + 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' + 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') + cmdline.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' + 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' + 'In Proc. ISMRM, 2021, 29, 2472') + cmdline.add_argument('input', help='The input DWI series to be corrected') + cmdline.add_argument('output_dwi', help='The output corrected DWI series') + cmdline.add_argument('output_mask', help='The output DWI mask') + output_options = cmdline.add_argument_group('Options that modulate the outputs of the script') + output_options.add_argument('-output_bias', metavar='image', + help='Export the final estimated bias field to an image') + output_options.add_argument('-output_scale', metavar='file', + help='Write the scaling factor applied to the DWI series to a text file') + output_options.add_argument('-output_tissuesum', metavar='image', + help='Export the tissue sum image that was used to generate the final mask') + output_options.add_argument('-reference', type=float, metavar='value', default=REFERENCE_INTENSITY, + help='Set the target CSF b=0 intensity in the output DWI series (default: ' + str(REFERENCE_INTENSITY) + ')') + internal_options = cmdline.add_argument_group('Options relevant to the internal optimisation procedure') + internal_options.add_argument('-dice', type=float, default=DICE_COEFF_DEFAULT, metavar='value', + help='Set the Dice coefficient threshold for similarity of masks between sequential iterations that will ' + 'result in termination due to convergence; default = ' + str(DICE_COEFF_DEFAULT)) + internal_options.add_argument('-init_mask', metavar='image', + help='Provide an initial mask for the first iteration of the algorithm ' + '(if not provided, the default dwi2mask algorithm will be used)') + internal_options.add_argument('-max_iters', type=int, default=DWIBIASCORRECT_MAX_ITERS, metavar='count', + help='The maximum number of iterations (see Description); default is ' + str(DWIBIASCORRECT_MAX_ITERS) + '; ' + 'set to 0 to proceed until convergence') + internal_options.add_argument('-mask_algo', choices=MASK_ALGOS, metavar='algorithm', + help='The algorithm to use for mask estimation, potentially based on the ODF sum image (see Description); default: ' + MASK_ALGO_DEFAULT) + internal_options.add_argument('-lmax', metavar='values', + help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' + 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') + app.add_dwgrad_import_options(cmdline) diff --git a/python/mrtrix3/dwicat/__init__.py b/python/mrtrix3/dwicat/__init__.py index 039a1d0bd2..e69de29bb2 100644 --- a/python/mrtrix3/dwicat/__init__.py +++ b/python/mrtrix3/dwicat/__init__.py @@ -1,150 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -import json, shutil - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Lena Dorfschmidt (ld548@cam.ac.uk) and Jakub Vohryzek (jakub.vohryzek@queens.ox.ac.uk) and Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Concatenating multiple DWI series accounting for differential intensity scaling') - cmdline.add_description('This script concatenates two or more 4D DWI series, accounting for the ' - 'fact that there may be differences in intensity scaling between those series. ' - 'This intensity scaling is corrected by determining scaling factors that will ' - 'make the overall image intensities in the b=0 volumes of each series approximately ' - 'equivalent.') - cmdline.add_argument('inputs', nargs='+', help='Multiple input diffusion MRI series') - cmdline.add_argument('output', help='The output image series (all DWIs concatenated)') - cmdline.add_argument('-mask', metavar='image', help='Provide a binary mask within which image intensities will be matched') - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - num_inputs = len(app.ARGS.inputs) - if num_inputs < 2: - raise MRtrixError('Script requires at least two input image series') - - # check input data - def check_header(header): - if len(header.size()) > 4: - raise MRtrixError('Image "' + header.name() + '" contains more than 4 dimensions') - if not 'dw_scheme' in header.keyval(): - raise MRtrixError('Image "' + header.name() + '" does not contain a gradient table') - dw_scheme = header.keyval()['dw_scheme'] - try: - if isinstance(dw_scheme[0], list): - num_grad_lines = len(dw_scheme) - elif (isinstance(dw_scheme[0], ( int, float))) and len(dw_scheme) >= 4: - num_grad_lines = 1 - else: - raise MRtrixError - except (IndexError, MRtrixError): - raise MRtrixError('Image "' + header.name() + '" contains gradient table of unknown format') # pylint: disable=raise-missing-from - if len(header.size()) == 4: - num_volumes = header.size()[3] - if num_grad_lines != num_volumes: - raise MRtrixError('Number of lines in gradient table for image "' + header.name() + '" (' + str(num_grad_lines) + ') does not match number of volumes (' + str(num_volumes) + ')') - elif not (num_grad_lines == 1 and len(dw_scheme) >= 4 and dw_scheme[3] <= float(CONFIG.get('BZeroThreshold', 10.0))): - raise MRtrixError('Image "' + header.name() + '" is 3D, and cannot be validated as a b=0 volume') - - first_header = image.Header(path.from_user(app.ARGS.inputs[0], False)) - check_header(first_header) - warn_protocol_mismatch = False - for filename in app.ARGS.inputs[1:]: - this_header = image.Header(path.from_user(filename, False)) - check_header(this_header) - if this_header.size()[0:3] != first_header.size()[0:3]: - raise MRtrixError('Spatial dimensions of image "' + filename + '" do not match those of first image "' + first_header.name() + '"') - for field_name in [ 'EchoTime', 'RepetitionTime', 'FlipAngle' ]: - first_value = first_header.keyval().get(field_name) - this_value = this_header.keyval().get(field_name) - if first_value and this_value and first_value != this_value: - warn_protocol_mismatch = True - if warn_protocol_mismatch: - app.warn('Mismatched protocol acquisition parameters detected between input images; ' + \ - 'the assumption of equivalent intensities between b=0 volumes of different inputs underlying operation of this script may not be valid') - if app.ARGS.mask: - mask_header = image.Header(path.from_user(app.ARGS.mask, False)) - if mask_header.size()[0:3] != first_header.size()[0:3]: - raise MRtrixError('Spatial dimensions of mask image "' + app.ARGS.mask + '" do not match those of first image "' + first_header.name() + '"') - - # check output path - app.check_output_path(path.from_user(app.ARGS.output, False)) - - # import data to scratch directory - app.make_scratch_dir() - for index, filename in enumerate(app.ARGS.inputs): - run.command('mrconvert ' + path.from_user(filename) + ' ' + path.to_scratch(str(index) + 'in.mif')) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - app.goto_scratch_dir() - - # extract b=0 volumes within each input series - for index in range(0, num_inputs): - infile = str(index) + 'in.mif' - outfile = str(index) + 'b0.mif' - if len(image.Header(infile).size()) > 3: - run.command('dwiextract ' + infile + ' ' + outfile + ' -bzero') - else: - run.function(shutil.copyfile, infile, outfile) - - mask_option = ' -mask_input mask.mif -mask_target mask.mif' if app.ARGS.mask else '' - - # for all but the first image series: - # - find multiplicative factor to match b=0 images to those of the first image - # - apply multiplicative factor to whole image series - # It would be better to not preferentially treat one of the inputs differently to any other: - # - compare all inputs to all other inputs - # - determine one single appropriate scaling factor for each image based on all results - # can't do a straight geometric average: e.g. if run for 2 images, each would map to - # the the input intensoty of the other image, and so the two corrected images would not match - # should be some mathematical theorem providing the optimal scaling factor for each image - # based on the resulting matrix of optimal scaling factors - filelist = [ '0in.mif' ] - for index in range(1, num_inputs): - stderr_text = run.command('mrhistmatch scale ' + str(index) + 'b0.mif 0b0.mif ' + str(index) + 'rescaledb0.mif' + mask_option).stderr - scaling_factor = None - for line in stderr_text.splitlines(): - if 'Estimated scale factor is' in line: - try: - scaling_factor = float(line.split()[-1]) - except ValueError as exception: - raise MRtrixError('Unable to convert scaling factor from mrhistmatch output to floating-point number') from exception - break - if scaling_factor is None: - raise MRtrixError('Unable to extract scaling factor from mrhistmatch output') - filename = str(index) + 'rescaled.mif' - run.command('mrcalc ' + str(index) + 'in.mif ' + str(scaling_factor) + ' -mult ' + filename) - filelist.append(filename) - - # concatenate all series together - run.command('mrcat ' + ' '.join(filelist) + ' - -axis 3 | ' + \ - 'mrconvert - result.mif -json_export result_init.json -strides 0,0,0,1') - - # remove current contents of command_history, since there's no sensible - # way to choose from which input image the contents should be taken; - # we do however want to keep other contents of keyval (e.g. gradient table) - with open('result_init.json', 'r', encoding='utf-8') as input_json_file: - keyval = json.load(input_json_file) - keyval.pop('command_history', None) - with open('result_final.json', 'w', encoding='utf-8') as output_json_file: - json.dump(keyval, output_json_file) - - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval='result_final.json', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwicat/execute.py b/python/mrtrix3/dwicat/execute.py new file mode 100644 index 0000000000..eb0d4aec3a --- /dev/null +++ b/python/mrtrix3/dwicat/execute.py @@ -0,0 +1,132 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import json, shutil +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + num_inputs = len(app.ARGS.inputs) + if num_inputs < 2: + raise MRtrixError('Script requires at least two input image series') + + # check input data + def check_header(header): + if len(header.size()) > 4: + raise MRtrixError('Image "' + header.name() + '" contains more than 4 dimensions') + if not 'dw_scheme' in header.keyval(): + raise MRtrixError('Image "' + header.name() + '" does not contain a gradient table') + dw_scheme = header.keyval()['dw_scheme'] + try: + if isinstance(dw_scheme[0], list): + num_grad_lines = len(dw_scheme) + elif (isinstance(dw_scheme[0], ( int, float))) and len(dw_scheme) >= 4: + num_grad_lines = 1 + else: + raise MRtrixError + except (IndexError, MRtrixError): + raise MRtrixError('Image "' + header.name() + '" contains gradient table of unknown format') # pylint: disable=raise-missing-from + if len(header.size()) == 4: + num_volumes = header.size()[3] + if num_grad_lines != num_volumes: + raise MRtrixError('Number of lines in gradient table for image "' + header.name() + '" (' + str(num_grad_lines) + ') does not match number of volumes (' + str(num_volumes) + ')') + elif not (num_grad_lines == 1 and len(dw_scheme) >= 4 and dw_scheme[3] <= float(CONFIG.get('BZeroThreshold', 10.0))): + raise MRtrixError('Image "' + header.name() + '" is 3D, and cannot be validated as a b=0 volume') + + first_header = image.Header(path.from_user(app.ARGS.inputs[0], False)) + check_header(first_header) + warn_protocol_mismatch = False + for filename in app.ARGS.inputs[1:]: + this_header = image.Header(path.from_user(filename, False)) + check_header(this_header) + if this_header.size()[0:3] != first_header.size()[0:3]: + raise MRtrixError('Spatial dimensions of image "' + filename + '" do not match those of first image "' + first_header.name() + '"') + for field_name in [ 'EchoTime', 'RepetitionTime', 'FlipAngle' ]: + first_value = first_header.keyval().get(field_name) + this_value = this_header.keyval().get(field_name) + if first_value and this_value and first_value != this_value: + warn_protocol_mismatch = True + if warn_protocol_mismatch: + app.warn('Mismatched protocol acquisition parameters detected between input images; ' + \ + 'the assumption of equivalent intensities between b=0 volumes of different inputs underlying operation of this script may not be valid') + if app.ARGS.mask: + mask_header = image.Header(path.from_user(app.ARGS.mask, False)) + if mask_header.size()[0:3] != first_header.size()[0:3]: + raise MRtrixError('Spatial dimensions of mask image "' + app.ARGS.mask + '" do not match those of first image "' + first_header.name() + '"') + + # check output path + app.check_output_path(path.from_user(app.ARGS.output, False)) + + # import data to scratch directory + app.make_scratch_dir() + for index, filename in enumerate(app.ARGS.inputs): + run.command('mrconvert ' + path.from_user(filename) + ' ' + path.to_scratch(str(index) + 'in.mif')) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + app.goto_scratch_dir() + + # extract b=0 volumes within each input series + for index in range(0, num_inputs): + infile = str(index) + 'in.mif' + outfile = str(index) + 'b0.mif' + if len(image.Header(infile).size()) > 3: + run.command('dwiextract ' + infile + ' ' + outfile + ' -bzero') + else: + run.function(shutil.copyfile, infile, outfile) + + mask_option = ' -mask_input mask.mif -mask_target mask.mif' if app.ARGS.mask else '' + + # for all but the first image series: + # - find multiplicative factor to match b=0 images to those of the first image + # - apply multiplicative factor to whole image series + # It would be better to not preferentially treat one of the inputs differently to any other: + # - compare all inputs to all other inputs + # - determine one single appropriate scaling factor for each image based on all results + # can't do a straight geometric average: e.g. if run for 2 images, each would map to + # the the input intensoty of the other image, and so the two corrected images would not match + # should be some mathematical theorem providing the optimal scaling factor for each image + # based on the resulting matrix of optimal scaling factors + filelist = [ '0in.mif' ] + for index in range(1, num_inputs): + stderr_text = run.command('mrhistmatch scale ' + str(index) + 'b0.mif 0b0.mif ' + str(index) + 'rescaledb0.mif' + mask_option).stderr + scaling_factor = None + for line in stderr_text.splitlines(): + if 'Estimated scale factor is' in line: + try: + scaling_factor = float(line.split()[-1]) + except ValueError as exception: + raise MRtrixError('Unable to convert scaling factor from mrhistmatch output to floating-point number') from exception + break + if scaling_factor is None: + raise MRtrixError('Unable to extract scaling factor from mrhistmatch output') + filename = str(index) + 'rescaled.mif' + run.command('mrcalc ' + str(index) + 'in.mif ' + str(scaling_factor) + ' -mult ' + filename) + filelist.append(filename) + + # concatenate all series together + run.command('mrcat ' + ' '.join(filelist) + ' - -axis 3 | ' + \ + 'mrconvert - result.mif -json_export result_init.json -strides 0,0,0,1') + + # remove current contents of command_history, since there's no sensible + # way to choose from which input image the contents should be taken; + # we do however want to keep other contents of keyval (e.g. gradient table) + with open('result_init.json', 'r', encoding='utf-8') as input_json_file: + keyval = json.load(input_json_file) + keyval.pop('command_history', None) + with open('result_final.json', 'w', encoding='utf-8') as output_json_file: + json.dump(keyval, output_json_file) + + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval='result_final.json', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwicat/usage.py b/python/mrtrix3/dwicat/usage.py new file mode 100644 index 0000000000..ad43e59194 --- /dev/null +++ b/python/mrtrix3/dwicat/usage.py @@ -0,0 +1,26 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Lena Dorfschmidt (ld548@cam.ac.uk) and Jakub Vohryzek (jakub.vohryzek@queens.ox.ac.uk) and Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Concatenating multiple DWI series accounting for differential intensity scaling') + cmdline.add_description('This script concatenates two or more 4D DWI series, accounting for the ' + 'fact that there may be differences in intensity scaling between those series. ' + 'This intensity scaling is corrected by determining scaling factors that will ' + 'make the overall image intensities in the b=0 volumes of each series approximately ' + 'equivalent.') + cmdline.add_argument('inputs', nargs='+', help='Multiple input diffusion MRI series') + cmdline.add_argument('output', help='The output image series (all DWIs concatenated)') + cmdline.add_argument('-mask', metavar='image', help='Provide a binary mask within which image intensities will be matched') diff --git a/python/mrtrix3/dwifslpreproc/__init__.py b/python/mrtrix3/dwifslpreproc/__init__.py index cb944f7ea3..e69de29bb2 100644 --- a/python/mrtrix3/dwifslpreproc/__init__.py +++ b/python/mrtrix3/dwifslpreproc/__init__.py @@ -1,1405 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - - -import glob, itertools, json, math, os, shutil, sys, shlex - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app, _version #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform diffusion image pre-processing using FSL\'s eddy tool; including inhomogeneity distortion correction using FSL\'s topup tool if possible') - cmdline.add_description('This script is intended to provide convenience of use of the FSL software tools topup and eddy for performing DWI pre-processing, by encapsulating some of the surrounding image data and metadata processing steps. It is intended to simply these processing steps for most commonly-used DWI acquisition strategies, whilst also providing support for some more exotic acquisitions. The "example usage" section demonstrates the ways in which the script can be used based on the (compulsory) -rpe_* command-line options.') - cmdline.add_description('More information on use of the dwifslpreproc command can be found at the following link: \nhttps://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/dwifslpreproc.html') - cmdline.add_description('Note that the MRtrix3 command dwi2mask will automatically be called to derive a processing mask for the FSL command "eddy", which determines which voxels contribute to the estimation of geometric distortion parameters and possibly also the classification of outlier slices. If FSL command "topup" is used to estimate a susceptibility field, then dwi2mask will be executed on the resuts of running FSL command "applytopup" to the input DWIs; otherwise it will be executed directly on the input DWIs. Alternatively, the -eddy_mask option can be specified in order to manually provide such a processing mask. More information on mask derivation from DWI data can be found at: https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') - cmdline.add_description('The "-topup_options" and "-eddy_options" command-line options allow the user to pass desired command-line options directly to the FSL commands topup and eddy. The available options for those commands may vary between versions of FSL; users can interrogate such by querying the help pages of the installed software, and/or the FSL online documentation: (topup) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide ; (eddy) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide') - cmdline.add_description('The script will attempt to run the CUDA version of eddy; if this does not succeed for any reason, or is not present on the system, the CPU version will be attempted instead. By default, the CUDA eddy binary found that indicates compilation against the most recent version of CUDA will be attempted; this can be over-ridden by providing a soft-link "eddy_cuda" within your path that links to the binary you wish to be executed.') - cmdline.add_description('Note that this script does not perform any explicit registration between images provided to topup via the -se_epi option, and the DWI volumes provided to eddy. In some instances (motion between acquisitions) this can result in erroneous application of the inhomogeneity field during distortion correction. Use of the -align_seepi option is advocated in this scenario, which ensures that the first volume in the series provided to topup is also the first volume in the series provided to eddy, guaranteeing alignment. But a prerequisite for this approach is that the image contrast within the images provided to the -se_epi option must match the b=0 volumes present within the input DWI series: this means equivalent TE, TR and flip angle (note that differences in multi-band factors between two acquisitions may lead to differences in TR).') - cmdline.add_example_usage('A basic DWI acquisition, where all image volumes are acquired in a single protocol with fixed phase encoding', - 'dwifslpreproc DWI_in.mif DWI_out.mif -rpe_none -pe_dir ap -readout_time 0.55', - 'Due to use of a single fixed phase encoding, no EPI distortion correction can be applied in this case.') - cmdline.add_example_usage('DWIs all acquired with a single fixed phase encoding; but additionally a pair of b=0 images with reversed phase encoding to estimate the inhomogeneity field', - 'mrcat b0_ap.mif b0_pa.mif b0_pair.mif -axis 3; dwifslpreproc DWI_in.mif DWI_out.mif -rpe_pair -se_epi b0_pair.mif -pe_dir ap -readout_time 0.72 -align_seepi', - 'Here the two individual b=0 volumes are concatenated into a single 4D image series, and this is provided to the script via the -se_epi option. Note that with the -rpe_pair option used here, which indicates that the SE-EPI image series contains one or more pairs of b=0 images with reversed phase encoding, the FIRST HALF of the volumes in the SE-EPI series must possess the same phase encoding as the input DWI series, while the second half are assumed to contain the opposite phase encoding direction but identical total readout time. Use of the -align_seepi option is advocated as long as its use is valid (more information in the Description section).') - cmdline.add_example_usage('All DWI directions & b-values are acquired twice, with the phase encoding direction of the second acquisition protocol being reversed with respect to the first', - 'mrcat DWI_lr.mif DWI_rl.mif DWI_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_all -pe_dir lr -readout_time 0.66', - 'Here the two acquisition protocols are concatenated into a single DWI series containing all acquired volumes. The direction indicated via the -pe_dir option should be the direction of phase encoding used in acquisition of the FIRST HALF of volumes in the input DWI series; ie. the first of the two files that was provided to the mrcat command. In this usage scenario, the output DWI series will contain the same number of image volumes as ONE of the acquired DWI series (ie. half of the number in the concatenated series); this is because the script will identify pairs of volumes that possess the same diffusion sensitisation but reversed phase encoding, and perform explicit recombination of those volume pairs in such a way that image contrast in regions of inhomogeneity is determined from the stretched rather than the compressed image.') - cmdline.add_example_usage('Any acquisition scheme that does not fall into one of the example usages above', - 'mrcat DWI_*.mif DWI_all.mif -axis 3; mrcat b0_*.mif b0_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_header -se_epi b0_all.mif -align_seepi', - 'With this usage, the relevant phase encoding information is determined entirely based on the contents of the relevant image headers, and dwifslpreproc prepares all metadata for the executed FSL commands accordingly. This can therefore be used if the particular DWI acquisition strategy used does not correspond to one of the simple examples as described in the prior examples. This usage is predicated on the headers of the input files containing appropriately-named key-value fields such that MRtrix3 tools identify them as such. In some cases, conversion from DICOM using MRtrix3 commands will automatically extract and embed this information; however this is not true for all scanner vendors and/or software versions. In the latter case it may be possible to manually provide these metadata; either using the -json_import command-line option of dwifslpreproc, or the -json_import or one of the -import_pe_* command-line options of MRtrix3\'s mrconvert command (and saving in .mif format) prior to running dwifslpreproc.') - cmdline.add_citation('Andersson, J. L. & Sotiropoulos, S. N. An integrated approach to correction for off-resonance effects and subject movement in diffusion MR imaging. NeuroImage, 2015, 125, 1063-1078', is_external=True) - cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - cmdline.add_citation('Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', condition='If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', is_external=True) - cmdline.add_citation('Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', condition='If performing EPI susceptibility distortion correction', is_external=True) - cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', condition='If including "--repol" in -eddy_options input', is_external=True) - cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', condition='If including "--mporder" in -eddy_options input', is_external=True) - cmdline.add_citation('Bastiani, M.; Cottaar, M.; Fitzgibbon, S.P.; Suri, S.; Alfaro-Almagro, F.; Sotiropoulos, S.N.; Jbabdi, S.; Andersson, J.L.R. Automated quality control for within and between studies diffusion MRI data using a non-parametric framework for movement and distortion correction. NeuroImage, 2019, 184, 801-812', condition='If using -eddyqc_text or -eddyqc_all option and eddy_quad is installed', is_external=True) - cmdline.add_argument('input', help='The input DWI series to be corrected') - cmdline.add_argument('output', help='The output corrected image series') - cmdline.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)') - pe_options = cmdline.add_argument_group('Options for manually specifying the phase encoding of the input DWIs') - pe_options.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)') - pe_options.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)') - distcorr_options = cmdline.add_argument_group('Options for achieving correction of susceptibility distortions') - distcorr_options.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)') - distcorr_options.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs (more information in Description section)') - distcorr_options.add_argument('-topup_options', metavar=('" TopupOptions"'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)') - distcorr_options.add_argument('-topup_files', metavar=('prefix'), help='Provide files generated by prior execution of the FSL "topup" command to be utilised by eddy') - cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'se_epi' ], False ) - cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'align_seepi' ], False ) - cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'topup_options' ], False ) - eddy_options = cmdline.add_argument_group('Options for affecting the operation of the FSL "eddy" command') - eddy_options.add_argument('-eddy_mask', metavar=('image'), help='Provide a processing mask to use for eddy, instead of having dwifslpreproc generate one internally using dwi2mask') - eddy_options.add_argument('-eddy_slspec', metavar=('file'), help='Provide a file containing slice groupings for eddy\'s slice-to-volume registration') - eddy_options.add_argument('-eddy_options', metavar=('" EddyOptions"'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)') - eddyqc_options = cmdline.add_argument_group('Options for utilising EddyQC') - eddyqc_options.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy, and the output of eddy_qc (if installed), into an output directory') - eddyqc_options.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images), and the output of eddy_qc (if installed), into an output directory') - cmdline.flag_mutually_exclusive_options( [ 'eddyqc_text', 'eddyqc_all' ], False ) - app.add_dwgrad_export_options(cmdline) - app.add_dwgrad_import_options(cmdline) - rpe_options = cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided') - rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only') - rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option)') - rpe_options.add_argument('-rpe_all', action='store_true', help='Specify that ALL DWIs have been acquired with opposing phase-encoding') - rpe_options.add_argument('-rpe_header', action='store_true', help='Specify that the phase-encoding information can be found in the image header(s), and that this is the information that the script should use') - cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'rpe_pair', 'rpe_all', 'rpe_header' ], True ) - cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all - cmdline.flag_mutually_exclusive_options( [ 'rpe_pair', 'topup_files'] ) # Would involve two separate sources of inhomogeneity field information - cmdline.flag_mutually_exclusive_options( [ 'se_epi', 'topup_files'] ) # Would involve two separate sources of inhomogeneity field information - cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header - cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - - if utils.is_windows(): - raise MRtrixError('Script cannot run on Windows due to FSL dependency') - - image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) - - pe_design = '' - if app.ARGS.rpe_none: - pe_design = 'None' - elif app.ARGS.rpe_pair: - pe_design = 'Pair' - if not app.ARGS.se_epi: - raise MRtrixError('If using the -rpe_pair option, the -se_epi option must be used to provide the spin-echo EPI data to be used by topup') - elif app.ARGS.rpe_all: - pe_design = 'All' - elif app.ARGS.rpe_header: - pe_design = 'Header' - else: - raise MRtrixError('Must explicitly specify phase-encoding acquisition design (even if none)') - - if app.ARGS.align_seepi and not app.ARGS.se_epi: - raise MRtrixError('-align_seepi option is only applicable when the -se_epi option is also used') - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - if not pe_design == 'None': - topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf') - if not os.path.isfile(topup_config_path): - raise MRtrixError('Could not find necessary default config file for FSL topup command (expected location: ' + topup_config_path + ')') - topup_cmd = fsl.exe_name('topup') - - if not fsl.eddy_binary(True) and not fsl.eddy_binary(False): - raise MRtrixError('Could not find any version of FSL eddy command') - fsl_suffix = fsl.suffix() - app.check_output_path(app.ARGS.output) - - # Export the gradient table to the path requested by the user if necessary - grad_export_option = app.read_dwgrad_export_options() - - - eddyqc_path = None - eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \ - 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \ - 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \ - 'eddy_movement_over_time' ] - if app.ARGS.eddyqc_text: - eddyqc_path = path.from_user(app.ARGS.eddyqc_text, False) - elif app.ARGS.eddyqc_all: - eddyqc_path = path.from_user(app.ARGS.eddyqc_all, False) - eddyqc_files.extend([ 'eddy_outlier_free_data.nii.gz', 'eddy_cnr_maps.nii.gz', 'eddy_residuals.nii.gz' ]) - if eddyqc_path: - if os.path.exists(eddyqc_path): - if os.path.isdir(eddyqc_path): - if any(os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files): - if app.FORCE_OVERWRITE: - app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion') - else: - raise MRtrixError('Output eddy QC directory already contains relevant files (use -force to override)') - else: - if app.FORCE_OVERWRITE: - app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion') - else: - raise MRtrixError('Target for eddy QC output exists, and is not a directory (use -force to override)') - - - eddy_manual_options = [] - topup_file_userpath = None - if app.ARGS.eddy_options: - # Initially process as a list; we'll convert back to a string later - eddy_manual_options = app.ARGS.eddy_options.strip().split() - # Check for erroneous usages before we perform any data importing - if any(entry.startswith('--mask=') for entry in eddy_manual_options): - raise MRtrixError('Cannot provide eddy processing mask via -eddy_options "--mask=..." as manipulations are required; use -eddy_mask option instead') - if any(entry.startswith('--slspec=') for entry in eddy_manual_options): - raise MRtrixError('Cannot provide eddy slice specification file via -eddy_options "--slspec=..." as manipulations are required; use -eddy_slspec option instead') - if '--resamp=lsr' in eddy_manual_options: - raise MRtrixError('dwifslpreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options') - eddy_topup_entry = [entry for entry in eddy_manual_options if entry.startswith('--topup=')] - if len(eddy_topup_entry) > 1: - raise MRtrixError('Input to -eddy_options contains multiple "--topup=" entries') - if eddy_topup_entry: - # -topup_files and -se_epi are mutually exclusive, but need to check in case - # pre-calculated topup output files were provided this way instead - if app.ARGS.se_epi: - raise MRtrixError('Cannot use both -eddy_options "--topup=" and -se_epi') - topup_file_userpath = path.from_user(eddy_topup_entry[0][len('--topup='):], False) - eddy_manual_options = [entry for entry in eddy_manual_options if not entry.startswith('--topup=')] - - - # Don't import slspec file directly; just make sure it exists - if app.ARGS.eddy_slspec and not os.path.isfile(path.from_user(app.ARGS.eddy_slspec, False)): - raise MRtrixError('Unable to find file \"' + app.ARGS.eddy_slspec + '\" provided via -eddy_slspec option') - - - # Attempt to find pre-generated topup files before constructing the scratch directory - topup_input_movpar = None - topup_input_fieldcoef = None - if app.ARGS.topup_files: - if topup_file_userpath: - raise MRtrixError('Cannot use -topup_files option and also specify "... --topup= ..." within content of -eddy_options') - topup_file_userpath = path.from_user(app.ARGS.topup_files, False) - - execute_applytopup = pe_design != 'None' or topup_file_userpath - if execute_applytopup: - applytopup_cmd = fsl.exe_name('applytopup') - - if topup_file_userpath: - # Find files based on what the user may or may not have specified: - # - Path to the movement parameters text file - # - Path to the field coefficients image - # - Path prefix including the underscore - # - Path prefix omitting the underscore - - def check_movpar(): - if not os.path.isfile(topup_input_movpar): - raise MRtrixError('No topup movement parameter file found based on path "' + topup_file_userpath + '" (expected location: ' + topup_input_movpar + ')') - - def find_fieldcoef(fieldcoef_prefix): - fieldcoef_candidates = glob.glob(fieldcoef_prefix + '_fieldcoef.nii*') - if not fieldcoef_candidates: - raise MRtrixError('No topup field coefficient image found based on path "' + topup_file_userpath + '"') - if len(fieldcoef_candidates) > 1: - raise MRtrixError('Multiple topup field coefficient images found based on path "' + topup_file_userpath + '": ' + str(fieldcoef_candidates)) - return fieldcoef_candidates[0] - - if os.path.isfile(topup_file_userpath): - if topup_file_userpath.endswith('_movpar.txt'): - topup_input_movpar = topup_file_userpath - topup_input_fieldcoef = find_fieldcoef(topup_file_userpath[:-len('_movpar.txt')]) - elif topup_file_userpath.endswith('_fieldcoef.nii') or topup_file_userpath.endswith('_fieldcoef.nii.gz'): - topup_input_fieldcoef = topup_file_userpath - topup_input_movpar = topup_file_userpath - if topup_input_movpar.endswith('.gz'): - topup_input_movpar = topup_input_movpar[:-len('.gz')] - topup_input_movpar = topup_input_movpar[:-len('_fieldcoef.nii')] + '_movpar.txt' - check_movpar() - else: - raise MRtrixError('Unrecognised file "' + topup_file_userpath + '" specified as pre-calculated topup susceptibility field') - else: - topup_input_movpar = topup_file_userpath - if topup_input_movpar[-1] == '_': - topup_input_movpar = topup_input_movpar[:-1] - topup_input_movpar += '_movpar.txt' - check_movpar() - topup_input_fieldcoef = find_fieldcoef(topup_input_movpar[:-len('_movpar.txt')]) - - - # Convert all input images into MRtrix format and store in scratch directory first - app.make_scratch_dir() - - grad_import_option = app.read_dwgrad_import_options() - json_import_option = '' - if app.ARGS.json_import: - json_import_option = ' -json_import ' + path.from_user(app.ARGS.json_import) - json_export_option = ' -json_export ' + path.to_scratch('dwi.json', True) - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + grad_import_option + json_import_option + json_export_option) - if app.ARGS.se_epi: - image.check_3d_nonunity(path.from_user(app.ARGS.se_epi, False)) - run.command('mrconvert ' + path.from_user(app.ARGS.se_epi) + ' ' + path.to_scratch('se_epi.mif')) - if topup_file_userpath: - run.function(shutil.copyfile, topup_input_movpar, path.to_scratch('field_movpar.txt', False)) - # Can't run field spline coefficients image through mrconvert: - # topup encodes voxel sizes within the three NIfTI intent parameters, and - # applytopup requires that these be set, but mrconvert will wipe them - run.function(shutil.copyfile, topup_input_fieldcoef, path.to_scratch('field_fieldcoef.nii' + ('.gz' if topup_input_fieldcoef.endswith('.nii.gz') else ''), False)) - if app.ARGS.eddy_mask: - run.command('mrconvert ' + path.from_user(app.ARGS.eddy_mask) + ' ' + path.to_scratch('eddy_mask.mif') + ' -datatype bit') - - app.goto_scratch_dir() - - - # Get information on the input images, and check their validity - dwi_header = image.Header('dwi.mif') - if not len(dwi_header.size()) == 4: - raise MRtrixError('Input DWI must be a 4D image') - dwi_num_volumes = dwi_header.size()[3] - app.debug('Number of DWI volumes: ' + str(dwi_num_volumes)) - dwi_num_slices = dwi_header.size()[2] - app.debug('Number of DWI slices: ' + str(dwi_num_slices)) - dwi_pe_scheme = phaseencoding.get_scheme(dwi_header) - if app.ARGS.se_epi: - se_epi_header = image.Header('se_epi.mif') - # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs - # if not len(se_epi_header.size()) == 4: - # raise MRtrixError('File provided using -se_epi option must contain more than one image volume') - se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) - if 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No diffusion gradient table found') - grad = dwi_header.keyval()['dw_scheme'] - if not len(grad) == dwi_num_volumes: - raise MRtrixError('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data') - - - # Deal with slice timing information for eddy slice-to-volume correction - slice_encoding_axis = 2 - eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options) - if eddy_mporder: - if 'SliceEncodingDirection' in dwi_header.keyval(): - slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection'] - app.debug('Slice encoding direction: ' + slice_encoding_direction) - if not slice_encoding_direction.startswith('k'): - raise MRtrixError('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwifslpreproc') - slice_encoding_direction = image.axis2dir(slice_encoding_direction) - else: - app.console('No slice encoding direction information present; assuming third axis corresponds to slices') - slice_encoding_direction = [0,0,1] - slice_encoding_axis = [ index for index, value in enumerate(slice_encoding_direction) if value ][0] - slice_groups = [ ] - slice_timing = [ ] - # Since there's a chance that we may need to pad this info, we can't just copy this file - # to the scratch directory... - if app.ARGS.eddy_slspec: - try: - slice_groups = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=int) - app.debug('Slice groups: ' + str(slice_groups)) - except ValueError: - try: - slice_timing = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=float) - app.debug('Slice timing: ' + str(slice_timing)) - app.warn('\"slspec\" file provided to FSL eddy is supposed to contain slice indices for slice groups; ' - 'contents of file \"' + app.ARGS.eddy_slspec + '\" appears to instead be slice timings; ' - 'these data have been imported and will be converted to the appropriate format') - if len(slice_timing) != dwi_num_slices: - raise MRtrixError('Cannot use slice timing information from file \"' + app.ARGS.eddy_slspec + '\" for slice-to-volume correction: ' # pylint: disable=raise-missing-from - 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_num_slices) + ')') - except ValueError: - raise MRtrixError('Error parsing eddy \"slspec\" file \"' + app.ARGS.eddy_slspec + '\" ' # pylint: disable=raise-missing-from - '(please see FSL eddy help page, specifically the --slspec option)') - else: - if 'SliceTiming' not in dwi_header.keyval(): - raise MRtrixError('Cannot perform slice-to-volume correction in eddy: ' - '-eddy_slspec option not specified, and no slice timing information present in input DWI header') - slice_timing = dwi_header.keyval()['SliceTiming'] - app.debug('Initial slice timing contents from header: ' + str(slice_timing)) - if slice_timing in ['invalid', 'variable']: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data flagged as "' + slice_timing + '"') - # Fudges necessary to maniupulate nature of slice timing data in cases where - # bad JSON formatting has led to the data not being simply a list of floats - # (whether from MRtrix3 DICOM conversion or from anything else) - if isinstance(slice_timing, str): - slice_timing = slice_timing.split() - if not isinstance(slice_timing, list): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data is not a list') - if len(slice_timing) == 1: - slice_timing = slice_timing[0] - if not isinstance(slice_timing, list): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'unexpected data format') - if isinstance(slice_timing[0], list): - if not all(len(entry) == 1 for entry in slice_timing): - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data do not appear to be 1D') - slice_timing = [ entry[0] for entry in slice_timing ] - if not all(isinstance(entry, float) for entry in slice_timing): - try: - slice_timing = [ float(entry) for entry in slice_timing ] - except ValueError as exception: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'data are not numeric') from exception - app.debug('Re-formatted slice timing contents from header: ' + str(slice_timing)) - if len(slice_timing) != dwi_num_slices: - raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' - 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_header.size()[2]) + ')') - elif app.ARGS.eddy_slspec: - app.warn('-eddy_slspec option provided, but "--mporder=" not provided via -eddy_options; ' - 'slice specification file not imported as it would not be utilised by eddy') - - - # Use new features of dirstat to query the quality of the diffusion acquisition scheme - # Need to know the mean b-value in each shell, and the asymmetry value of each shell - # But don't bother testing / warning the user if they're already controlling for this - if not app.ARGS.eddy_options or not any(s.startswith('--slm=') for s in app.ARGS.eddy_options.split()): - shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] - shell_asymmetries = [ float(value) for value in run.command('dirstat dwi.mif -output asym').stdout.splitlines() ] - # dirstat will skip any b=0 shell by default; therefore for correspondence between - # shell_bvalues and shell_symmetry, need to remove any b=0 from the former - if len(shell_bvalues) == len(shell_asymmetries) + 1: - shell_bvalues = shell_bvalues[1:] - elif len(shell_bvalues) != len(shell_asymmetries): - raise MRtrixError('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetries)) + ')') - for bvalue, asymmetry in zip(shell_bvalues, shell_asymmetries): - if asymmetry >= 0.1: - app.warn('sampling of b=' + str(bvalue) + ' shell is ' + ('strongly' if asymmetry >= 0.4 else 'moderately') + \ - ' asymmetric; distortion correction may benefit from use of: ' + \ - '-eddy_options " ... --slm=linear ... "') - - - # Since we want to access user-defined phase encoding information regardless of whether or not - # such information is present in the header, let's grab it here - manual_pe_dir = None - if app.ARGS.pe_dir: - manual_pe_dir = [ float(i) for i in phaseencoding.direction(app.ARGS.pe_dir) ] - app.debug('Manual PE direction: ' + str(manual_pe_dir)) - manual_trt = None - if app.ARGS.readout_time: - manual_trt = float(app.ARGS.readout_time) - app.debug('Manual readout time: ' + str(manual_trt)) - - - # Utilise the b-value clustering algorithm in src/dwi/shells.* - shell_indices = [ [ int(i) for i in entry.split(',') ] for entry in image.mrinfo('dwi.mif', 'shell_indices').split(' ') ] - shell_bvalues = [ float(f) for f in image.mrinfo('dwi.mif', 'shell_bvalues').split(' ')] - bzero_threshold = float(CONFIG.get('BZeroThreshold', 10.0)) - - # For each volume index, store the index of the shell to which it is attributed - # (this will make it much faster to determine whether or not two volumes belong to the same shell) - vol2shell = [ -1 ] * dwi_num_volumes - for index, volumes in enumerate(shell_indices): - for volume in volumes: - vol2shell[volume] = index - assert all(index >= 0 for index in vol2shell) - - - def grads_match(one, two): - # Are the two volumes assigned to different b-value shells? - if vol2shell[one] != vol2shell[two]: - return False - # Does this shell correspond to b=0? - if shell_bvalues[vol2shell[one]] <= bzero_threshold: - return True - # Dot product between gradient directions - # First, need to check for zero-norm vectors: - # - If both are zero, skip this check - # - If one is zero and the other is not, volumes don't match - # - If neither is zero, test the dot product - if any(grad[one][0:3]): - if not any(grad[two][0:3]): - return False - dot_product = grad[one][0]*grad[two][0] + grad[one][1]*grad[two][1] + grad[one][2]*grad[two][2] - if abs(dot_product) < 0.999: - return False - elif any(grad[two][0:3]): - return False - return True - - - # Manually generate a phase-encoding table for the input DWI based on user input - dwi_manual_pe_scheme = None - se_epi_manual_pe_scheme = None - auto_trt = 0.1 - dwi_auto_trt_warning = False - if manual_pe_dir: - - if manual_trt: - trt = manual_trt - else: - trt = auto_trt - dwi_auto_trt_warning = True - - # Still construct the manual PE scheme even with 'None' or 'Pair': - # there may be information in the header that we need to compare against - if pe_design == 'None': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.debug('Manual DWI PE scheme for \'None\' PE design: ' + str(dwi_manual_pe_scheme)) - - # With 'Pair', also need to construct the manual scheme for SE EPIs - elif pe_design == 'Pair': - line = list(manual_pe_dir) - line.append(trt) - dwi_manual_pe_scheme = [ line ] * dwi_num_volumes - app.debug('Manual DWI PE scheme for \'Pair\' PE design: ' + str(dwi_manual_pe_scheme)) - if len(se_epi_header.size()) != 4: - raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must be a 4D image') - se_epi_num_volumes = se_epi_header.size()[3] - if se_epi_num_volumes%2: - raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes') - # Assume that first half of volumes have same direction as series; - # second half have the opposite direction - se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2) - line = [ (-i if i else 0.0) for i in manual_pe_dir ] - line.append(trt) - se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) ) - app.debug('Manual SEEPI PE scheme for \'Pair\' PE design: ' + str(se_epi_manual_pe_scheme)) - - # If -rpe_all, need to scan through grad and figure out the pairings - # This will be required if relying on user-specified phase encode direction - # It will also be required at the end of the script for the manual recombination - # Update: The possible permutations of volume-matched acquisition is limited within the - # context of the -rpe_all option. In particular, the potential for having more - # than one b=0 volume within each half means that it is not possible to permit - # arbitrary ordering of those pairs, since b=0 volumes would then be matched - # despite having the same phase-encoding direction. Instead, explicitly enforce - # that volumes must be matched between the first and second halves of the DWI data. - elif pe_design == 'All': - if dwi_num_volumes%2: - raise MRtrixError('If using -rpe_all option, input image must contain an even number of volumes') - grads_matched = [ dwi_num_volumes ] * dwi_num_volumes - grad_pairs = [ ] - app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') - for index1 in range(int(dwi_num_volumes/2)): - if grads_matched[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes): - if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired - if grads_match(index1, index2): - grads_matched[index1] = index2 - grads_matched[index2] = index1 - grad_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - else: - raise MRtrixError('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1)) - if not len(grad_pairs) == dwi_num_volumes/2: - raise MRtrixError('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination') - # Construct manual PE scheme here: - # Regardless of whether or not there's a scheme in the header, need to have it: - # if there's one in the header, want to compare to the manually-generated one - dwi_manual_pe_scheme = [ ] - for index in range(0, dwi_num_volumes): - line = list(manual_pe_dir) - if index >= int(dwi_num_volumes/2): - line = [ (-i if i else 0.0) for i in line ] - line.append(trt) - dwi_manual_pe_scheme.append(line) - app.debug('Manual DWI PE scheme for \'All\' PE design: ' + str(dwi_manual_pe_scheme)) - - else: # No manual phase encode direction defined - - if not pe_design == 'Header': - raise MRtrixError('If not using -rpe_header, phase encoding direction must be provided using the -pe_dir option') - - - - def scheme_dirs_match(one, two): - for line_one, line_two in zip(one, two): - if not line_one[0:3] == line_two[0:3]: - return False - return True - - def scheme_times_match(one, two): - for line_one, line_two in zip(one, two): - if abs(line_one[3] - line_two[3]) > 5e-3: - return False - return True - - - - # Determine whether or not the phase encoding table generated manually should be used - # (possibly instead of a table present in the image header) - overwrite_dwi_pe_scheme = False - if dwi_pe_scheme: - if manual_pe_dir: - # Compare manual specification to that read from the header; - # overwrite & give warning to user if they differ - # Bear in mind that this could even be the case for -rpe_all; - # relying on earlier code having successfully generated the 'appropriate' - # PE scheme for the input volume based on the diffusion gradient table - if not scheme_dirs_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if manual_trt: - # Compare manual specification to that read from the header - if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in DWI image header; proceeding with user specification') - overwrite_dwi_pe_scheme = True - if overwrite_dwi_pe_scheme: - dwi_pe_scheme = dwi_manual_pe_scheme # May be used later for triggering volume recombination - else: - dwi_manual_pe_scheme = None # To guarantee that these generated data are never used - else: - # Nothing in the header; rely entirely on user specification - if pe_design == 'Header': - raise MRtrixError('No phase encoding information found in DWI image header') - if not manual_pe_dir: - raise MRtrixError('No phase encoding information provided either in header or at command-line') - if dwi_auto_trt_warning: - app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt)) - dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination - - # This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information - import_dwi_pe_table_option = '' - if dwi_manual_pe_scheme: - phaseencoding.save('dwi_manual_pe_scheme.txt', dwi_manual_pe_scheme) - import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt' - - - # Find the index of the first DWI volume that is a b=0 volume - # This needs to occur at the outermost loop as it is pertinent information - # not only for the -align_seepi option, but also for when the -se_epi option - # is not provided at all, and the input to topup is extracted solely from the DWIs - dwi_first_bzero_index = 0 - for line in grad: - if line[3] <= bzero_threshold: - break - dwi_first_bzero_index += 1 - app.debug('Index of first b=0 image in DWIs is ' + str(dwi_first_bzero_index)) - - - # Deal with the phase-encoding of the images to be fed to topup (if applicable) - execute_topup = (not pe_design == 'None') and not topup_file_userpath - overwrite_se_epi_pe_scheme = False - se_epi_path = 'se_epi.mif' - dwi_permvols_preeddy_option = '' - dwi_permvols_posteddy_option = '' - dwi_bzero_added_to_se_epi = False - if app.ARGS.se_epi: - - # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI - if not image.match(dwi_header, se_epi_header, up_to_dim=3): - app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; ' - 'the latter will be automatically re-gridded to match the former') - new_se_epi_path = 'se_epi_regrid.mif' - run.command('mrtransform ' + se_epi_path + ' - -reorient_fod no -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path) - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_header = image.Header(se_epi_path) - - # 3 possible sources of PE information: DWI header, topup image header, command-line - # Any pair of these may conflict, and any one could be absent - - # Have to switch here based on phase-encoding acquisition design - if pe_design == 'Pair': - # Criteria: - # * If present in own header, ignore DWI header entirely - - # - If also provided at command-line, look for conflict & report - # - If not provided at command-line, nothing to do - # * If _not_ present in own header: - # - If provided at command-line, infer appropriately - # - If not provided at command-line, but the DWI header has that information, infer appropriately - if se_epi_pe_scheme: - if manual_pe_dir: - if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if manual_trt: - if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): - app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification') - overwrite_se_epi_pe_scheme = True - if overwrite_se_epi_pe_scheme: - se_epi_pe_scheme = se_epi_manual_pe_scheme - else: - se_epi_manual_pe_scheme = None # To guarantee that these data are never used - else: - overwrite_se_epi_pe_scheme = True - se_epi_pe_scheme = se_epi_manual_pe_scheme - - elif pe_design == 'All': - # Criteria: - # * If present in own header: - # - Nothing to do - # * If _not_ present in own header: - # - Don't have enough information to proceed - # - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line) - if not se_epi_pe_scheme: - raise MRtrixError('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header') - - elif pe_design == 'Header': - # Criteria: - # * If present in own header: - # Nothing to do (-pe_dir option is mutually exclusive) - # * If _not_ present in own header: - # Cannot proceed - if not se_epi_pe_scheme: - raise MRtrixError('No phase-encoding information present in SE-EPI image header') - # If there is no phase encoding contrast within the SE-EPI series, - # try combining it with the DWI b=0 volumes, see if that produces some contrast - # However, this should probably only be permitted if the -align_seepi option is defined - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if not se_epi_pe_scheme_has_contrast: - if app.ARGS.align_seepi: - app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images') - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif' - # Don't worry about trying to produce a balanced scheme here - run.command('dwiextract dwi.mif - -bzero | mrcat - ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - se_epi_header = image.Header(new_se_epi_path) - se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() - if se_epi_pe_scheme_has_contrast: - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) - dwi_bzero_added_to_se_epi = True - # Delay testing appropriateness of the concatenation of these images - # (i.e. differences in contrast) to later - else: - raise MRtrixError('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; ' - 'cannot perform inhomogeneity field estimation') - else: - raise MRtrixError('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation') - - if app.ARGS.align_seepi: - - for field_name, description in { 'EchoTime': 'echo time', - 'RepetitionTime': 'repetition time', - 'FlipAngle': 'flip angle' }.items(): - dwi_value = dwi_header.keyval().get(field_name) - se_epi_value = se_epi_header.keyval().get(field_name) - if dwi_value and se_epi_value and dwi_value != se_epi_value: - app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different ' + description + ' to the DWIs being corrected. ' - 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' - 'due to use of the -align_seepi option.') - - # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves, - # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated - # by topup will not be correctly aligned with the volumes as they are processed by eddy. - # - # However, there's also a code path by which we may have already performed this addition. - # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image - # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to - # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of - # the SE-EPI images means the alignment issue should be dealt with. - - if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi: - - app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided') - - # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an - # absence of phase-encoding contrast in the latter, we don't need to perform the following - elif not dwi_bzero_added_to_se_epi: - - run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2') - dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index] - - se_epi_pe_sum = [ 0, 0, 0 ] - se_epi_volume_to_remove = len(se_epi_pe_scheme) - for index, line in enumerate(se_epi_pe_scheme): - se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ] - if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]: - se_epi_volume_to_remove = index - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif' - if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)): - app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs') - run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately if it's being set manually - # (if embedded within the image headers, should be updated through the command calls) - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - new_se_epi_manual_pe_scheme = [ ] - new_se_epi_manual_pe_scheme.append(first_line) - for index, entry in enumerate(se_epi_manual_pe_scheme): - if not index == se_epi_volume_to_remove: - new_se_epi_manual_pe_scheme.append(entry) - se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme - else: - if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme): - app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme') - else: - app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series') - run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') - # Also need to update the phase-encoding scheme appropriately - if se_epi_manual_pe_scheme: - first_line = list(manual_pe_dir) - first_line.append(trt) - se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ] - - # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes - app.cleanup(se_epi_path) - app.cleanup('dwi_first_bzero.mif') - se_epi_path = new_se_epi_path - - # Ended branching based on: - # - Detection of first b=0 volume in DWIs; or - # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI - - # Completed checking for presence of -se_epi option - - elif not pe_design == 'None' and not topup_file_userpath: # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI - - # If using 'All' or 'Header', and haven't been given any topup images, need to extract the b=0 volumes from the series, - # preserving phase-encoding information while doing so - # Preferably also make sure that there's some phase-encoding contrast in there... - # With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding - # of the extracted b=0's is propagated to the generated b=0 series - run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero') - se_epi_header = image.Header(se_epi_path) - - # If there's no contrast remaining in the phase-encoding scheme, it'll be written to - # PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme - # In this scenario, we will be unable to run topup, or volume recombination - if 'pe_scheme' not in se_epi_header.keyval(): - if pe_design == 'All': - raise MRtrixError('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing') - app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation') - execute_topup = False - run.function(os.remove, se_epi_path) - se_epi_path = None - se_epi_header = None - - - # If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to - # manually place it at the start of the DWI volumes when they are input to eddy, so that the - # first input volume to topup and the first input volume to eddy are one and the same. - # Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers), - # then this volume permutation will need to be taken into account - if not topup_file_userpath: - if dwi_first_bzero_index == len(grad): - app.warn("No image volumes were classified as b=0 by MRtrix3; no permutation of order of DWI volumes can occur " + \ - "(do you need to adjust config file entry BZeroThreshold?)") - elif dwi_first_bzero_index: - app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; ' - 'this will be permuted to be the first volume (index 0) when eddy is run') - dwi_permvols_preeddy_option = ' -coord 3 ' + \ - str(dwi_first_bzero_index) + \ - ',0' + \ - (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - dwi_permvols_posteddy_option = ' -coord 3 1' + \ - (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \ - ',0' + \ - (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ - (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') - app.debug('mrconvert options for axis permutation:') - app.debug('Pre: ' + str(dwi_permvols_preeddy_option)) - app.debug('Post: ' + str(dwi_permvols_posteddy_option)) - - - - # This may be required when setting up the topup call - se_epi_manual_pe_table_option = '' - if se_epi_manual_pe_scheme: - phaseencoding.save('se_epi_manual_pe_scheme.txt', se_epi_manual_pe_scheme) - se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt' - - - # Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy - run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b') - dwi2mask_algo = CONFIG['Dwi2maskAlgorithm'] - - eddy_in_topup_option = '' - dwi_post_eddy_crop_option = '' - slice_padded = False - dwi_path = 'dwi.mif' - if execute_topup: - - # topup will crash if its input image has a spatial dimension with a non-even size; - # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme - # The newest eddy also requires the output from topup and the input DWIs to have the same size; - # therefore this restriction applies to the DWIs as well - # Rather than crop in this case (which would result in a cropped output image), - # duplicate the last slice on any problematic axis, and then crop that extra - # slice at the output step - # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the - # SE-EPI images have already been re-gridded to DWI image space; - odd_axis_count = 0 - for axis_size in dwi_header.size()[:3]: - if int(axis_size%2): - odd_axis_count += 1 - if odd_axis_count: - app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; ' - 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards') - for axis, axis_size in enumerate(dwi_header.size()[:3]): - if int(axis_size%2): - new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis)) - app.cleanup(se_epi_path) - se_epi_path = new_se_epi_path - new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif' - run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' -clear dw_scheme - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis)) - app.cleanup(dwi_path) - dwi_path = new_dwi_path - dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1) - if axis == slice_encoding_axis: - slice_padded = True - dwi_num_slices += 1 - # If we are padding the slice axis, and performing slice-to-volume correction, - # then we need to perform the corresponding padding to the slice timing - if eddy_mporder: - # At this point in the script, this information may be encoded either within - # the slice timing vector (as imported from the image header), or as - # slice groups (i.e. in the format expected by eddy). How these data are - # stored affects how the padding is performed. - if slice_timing: - slice_timing.append(slice_timing[-1]) - elif slice_groups: - # Can't edit in place when looping through the list - new_slice_groups = [ ] - for group in slice_groups: - if axis_size-1 in group: - group.append(axis_size) - new_slice_groups.append(group) - slice_groups = new_slice_groups - - - # Do the conversion in preparation for topup - run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt') - app.cleanup(se_epi_path) - - # Run topup - topup_manual_options = '' - if app.ARGS.topup_options: - topup_manual_options = ' ' + app.ARGS.topup_options.strip() - topup_output = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + ' --verbose' + topup_manual_options) - with open('topup_output.txt', 'wb') as topup_output_file: - topup_output_file.write((topup_output.stdout + '\n' + topup_output.stderr + '\n').encode('utf-8', errors='replace')) - if app.VERBOSITY > 1: - app.console('Output of topup command:') - sys.stderr.write(topup_output.stdout + '\n' + topup_output.stderr + '\n') - - if execute_applytopup: - - # Apply the warp field to the input image series to get an initial corrected volume estimate - # applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding - # details may vary between volumes - if dwi_manual_pe_scheme: - run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - else: - run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt') - - # Call applytopup separately for each unique phase-encoding - # This should be the most compatible option with more complex phase-encoding acquisition designs, - # since we don't need to worry about applytopup performing volume recombination - # Plus, recombination doesn't need to be optimal; we're only using this to derive a brain mask - applytopup_image_list = [ ] - index = 1 - applytopup_config = matrix.load_matrix('applytopup_config.txt') - applytopup_indices = matrix.load_vector('applytopup_indices.txt', dtype=int) - applytopup_volumegroups = [ [ index for index, value in enumerate(applytopup_indices) if value == group ] for group in range(1, len(applytopup_config)+1) ] - app.debug('applytopup_config: ' + str(applytopup_config)) - app.debug('applytopup_indices: ' + str(applytopup_indices)) - app.debug('applytopup_volumegroups: ' + str(applytopup_volumegroups)) - for index, group in enumerate(applytopup_volumegroups): - prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index) - input_path = prefix + '.nii' - json_path = prefix + '.json' - temp_path = prefix + '_applytopup.nii' - output_path = prefix + '_applytopup.mif' - run.command('mrconvert ' + dwi_path + ' ' + input_path + ' -coord 3 ' + ','.join(str(value) for value in group) + ' -strides -1,+2,+3,+4 -json_export ' + json_path) - run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index+1) + ' --topup=field --out=' + temp_path + ' --method=jac') - app.cleanup(input_path) - temp_path = fsl.find_image(temp_path) - run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path) - app.cleanup(json_path) - app.cleanup(temp_path) - applytopup_image_list.append(output_path) - index += 1 - - # Use the initial corrected volumes to derive a brain mask for eddy - if not app.ARGS.eddy_mask: - - dwi2mask_out_path = 'dwi2mask_out.mif' - if len(applytopup_image_list) == 1: - dwi2mask_in_path = applytopup_image_list[0] - else: - dwi2mask_in_path = 'dwi2mask_in.mif' - run.command('mrcat ' + ' '.join(applytopup_image_list) + ' ' + dwi2mask_in_path + ' -axis 3') - run.command('dwi2mask ' + dwi2mask_algo + ' ' + dwi2mask_in_path + ' ' + dwi2mask_out_path) - run.command('maskfilter ' + dwi2mask_out_path + ' dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - if len(applytopup_image_list) > 1: - app.cleanup(dwi2mask_in_path) - app.cleanup(dwi2mask_out_path) - - app.cleanup(applytopup_image_list) - - eddy_in_topup_option = ' --topup=field' - - else: - - # Generate a processing mask for eddy based on the uncorrected input DWIs - if not app.ARGS.eddy_mask: - dwi2mask_out_path = 'dwi2mask_out.mif' - run.command('dwi2mask ' + dwi2mask_algo + ' ' + dwi_path + ' ' + dwi2mask_out_path) - run.command('maskfilter ' + dwi2mask_out_path + ' dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - app.cleanup(dwi2mask_out_path) - - - # Use user supplied mask for eddy instead of one derived from the images using dwi2mask - if app.ARGS.eddy_mask: - if image.match('eddy_mask.mif', dwi_path, up_to_dim=3): - run.command('mrconvert eddy_mask.mif eddy_mask.nii -datatype float32 -stride -1,+2,+3') - else: - app.warn('User-provided processing mask for eddy does not match DWI voxel grid; resampling') - run.command('mrtransform eddy_mask.mif - -template ' + dwi_path + ' -interp linear | ' - + 'mrthreshold - -abs 0.5 - | ' - + 'mrconvert - eddy_mask.nii -datatype float32 -stride -1,+2,+3') - app.cleanup('eddy_mask.mif') - - # Generate the text file containing slice timing / grouping information if necessary - if eddy_mporder: - if slice_timing: - # This list contains, for each slice, the timing offset between acquisition of the - # first slice in the volume, and acquisition of that slice - # Eddy however requires a text file where each row contains those slices that were - # acquired with a single readout, in ordered rows from first slice (group) - # acquired to last slice (group) acquired - if sum(slice_encoding_direction) < 0: - slice_timing = reversed(slice_timing) - slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable - app.debug('Slice timing: ' + str(slice_timing)) - app.debug('Resulting slice groups: ' + str(slice_groups)) - # Variable slice_groups may have already been defined in the correct format. - # In that instance, there's nothing to do other than write it to file; - # UNLESS the slice encoding direction is known to be reversed, in which case - # we need to reverse the timings. Would think that this would however be - # rare, given it requires that the slspec text file be provided manually but - # SliceEncodingDirection to be present. - elif slice_groups and sum(slice_encoding_direction) < 0: - new_slice_groups = [ ] - for group in new_slice_groups: - new_slice_groups.append([ dwi_num_slices-index for index in group ]) - app.debug('Slice groups reversed due to negative slice encoding direction') - app.debug('Original: ' + str(slice_groups)) - app.debug('New: ' + str(new_slice_groups)) - slice_groups = new_slice_groups - - matrix.save_numeric('slspec.txt', slice_groups, add_to_command_history=False, fmt='%d') - eddy_manual_options.append('--slspec=slspec.txt') - - - # Revert eddy_manual_options from a list back to a single string - eddy_manual_options = (' ' + ' '.join(eddy_manual_options)) if eddy_manual_options else '' - - - # Prepare input data for eddy - run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permvols_preeddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt') - app.cleanup(dwi_path) - - # Run eddy - # If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version - eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy --verbose' - eddy_cuda_cmd = fsl.eddy_binary(True) - eddy_openmp_cmd = fsl.eddy_binary(False) - if eddy_cuda_cmd: - # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails - try: - eddy_output = run.command(eddy_cuda_cmd + ' ' + eddy_all_options) - except run.MRtrixCmdError as exception_cuda: - if not eddy_openmp_cmd: - raise - with open('eddy_cuda_failure_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write(str(exception_cuda).encode('utf-8', errors='replace')) - app.console('CUDA version of \'eddy\' was not successful; attempting OpenMP version') - try: - eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) - except run.MRtrixCmdError as exception_openmp: - with open('eddy_openmp_failure_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write(str(exception_openmp).encode('utf-8', errors='replace')) - # Both have failed; want to combine error messages - eddy_cuda_header = ('=' * len(eddy_cuda_cmd)) \ - + '\n' \ - + eddy_cuda_cmd \ - + '\n' \ - + ('=' * len(eddy_cuda_cmd)) \ - + '\n' - eddy_openmp_header = ('=' * len(eddy_openmp_cmd)) \ - + '\n' \ - + eddy_openmp_cmd \ - + '\n' \ - + ('=' * len(eddy_openmp_cmd)) \ - + '\n' - exception_stdout = eddy_cuda_header \ - + exception_cuda.stdout \ - + '\n\n' \ - + eddy_openmp_header \ - + exception_openmp.stdout \ - + '\n\n' - exception_stderr = eddy_cuda_header \ - + exception_cuda.stderr \ - + '\n\n' \ - + eddy_openmp_header \ - + exception_openmp.stderr \ - + '\n\n' - raise run.MRtrixCmdError('eddy* ' + eddy_all_options, - 1, - exception_stdout, - exception_stderr) - - else: - eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) - with open('eddy_output.txt', 'wb') as eddy_output_file: - eddy_output_file.write((eddy_output.stdout + '\n' + eddy_output.stderr + '\n').encode('utf-8', errors='replace')) - if app.VERBOSITY > 1: - app.console('Output of eddy command:') - sys.stderr.write(eddy_output.stdout + '\n' + eddy_output.stderr + '\n') - app.cleanup('eddy_in.nii') - - eddy_output_image_path = fsl.find_image('dwi_post_eddy') - - - # Check to see whether or not eddy has provided a rotated bvecs file; - # if it has, import this into the output image - bvecs_path = 'dwi_post_eddy.eddy_rotated_bvecs' - if not os.path.isfile(bvecs_path): - app.warn('eddy has not provided rotated bvecs file; using original gradient table. Recommend updating FSL eddy to version 5.0.9 or later.') - bvecs_path = 'bvecs' - - - # Run eddy qc tool QUAD if installed and one of -eddyqc_text or -eddyqc_all is specified - eddyqc_prefix = 'dwi_post_eddy' - if eddyqc_path: - if shutil.which('eddy_quad'): - - eddyqc_mask = 'eddy_mask.nii' - eddyqc_fieldmap = fsl.find_image('field_map') if execute_topup else None - eddyqc_slspec = 'slspec.txt' if eddy_mporder else None - - # If there was any relevant padding applied, then we want to provide - # the comprehensive set of files to EddyQC with that padding removed - if dwi_post_eddy_crop_option: - progress = app.ProgressBar('Removing image padding prior to running EddyQC', len(eddyqc_files) + 3) - - for eddy_filename in eddyqc_files: - if os.path.isfile('dwi_post_eddy.' + eddy_filename): - if slice_padded and eddy_filename in [ 'eddy_outlier_map', 'eddy_outlier_n_sqr_stdev_map', 'eddy_outlier_n_stdev_map' ]: - with open('dwi_post_eddy.' + eddy_filename, 'r', encoding='utf-8') as f_eddyfile: - eddy_data = f_eddyfile.readlines() - eddy_data_header = eddy_data[0] - eddy_data = eddy_data[1:] - for line in eddy_data: - line = ' '.join(line.strip().split(' ')[:-1]) - with open('dwi_post_eddy_unpad.' + eddy_filename, 'w', encoding='utf-8') as f_eddyfile: - f_eddyfile.write(eddy_data_header + '\n') - f_eddyfile.write('\n'.join(eddy_data) + '\n') - elif eddy_filename.endswith('.nii.gz'): - run.command('mrconvert dwi_post_eddy.' + eddy_filename + ' dwi_post_eddy_unpad.' + eddy_filename + dwi_post_eddy_crop_option) - else: - run.function(os.symlink, 'dwi_post_eddy.' + eddy_filename, 'dwi_post_eddy_unpad.' + eddy_filename) - app.cleanup('dwi_post_eddy.' + eddy_filename) - progress.increment() - - if eddy_mporder and slice_padded: - app.debug('Current slice groups: ' + str(slice_groups)) - app.debug('Slice encoding direction: ' + str(slice_encoding_direction)) - # Remove padded slice from slice_groups, write new slspec - if sum(slice_encoding_direction) < 0: - slice_groups = [ [ index-1 for index in group if index ] for group in slice_groups ] - else: - slice_groups = [ [ index for index in group if index != dwi_num_slices-1 ] for group in slice_groups ] - eddyqc_slspec = 'slspec_unpad.txt' - app.debug('Slice groups after removal: ' + str(slice_groups)) - try: - # After this removal, slspec should now be a square matrix - assert all(len(group) == len(slice_groups[0]) for group in slice_groups[1:]) - matrix.save_matrix(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') - except AssertionError: - matrix.save_numeric(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') - raise - - run.command('mrconvert eddy_mask.nii eddy_mask_unpad.nii' + dwi_post_eddy_crop_option) - eddyqc_mask = 'eddy_mask_unpad.nii' - progress.increment() - run.command('mrconvert ' + fsl.find_image('field_map') + ' field_map_unpad.nii' + dwi_post_eddy_crop_option) - eddyqc_fieldmap = 'field_map_unpad.nii' - progress.increment() - run.command('mrconvert ' + eddy_output_image_path + ' dwi_post_eddy_unpad.nii.gz' + dwi_post_eddy_crop_option) - eddyqc_prefix = 'dwi_post_eddy_unpad' - progress.done() - - eddyqc_options = ' -idx eddy_indices.txt -par eddy_config.txt -b bvals -m ' + eddyqc_mask - if os.path.isfile(eddyqc_prefix + '.eddy_residuals.nii.gz'): - eddyqc_options += ' -g ' + bvecs_path - if execute_topup: - eddyqc_options += ' -f ' + eddyqc_fieldmap - if eddy_mporder: - eddyqc_options += ' -s ' + eddyqc_slspec - if app.VERBOSITY > 2: - eddyqc_options += ' -v' - try: - run.command('eddy_quad ' + eddyqc_prefix + eddyqc_options) - except run.MRtrixCmdError as exception: - with open('eddy_quad_failure_output.txt', 'wb') as eddy_quad_output_file: - eddy_quad_output_file.write(str(exception).encode('utf-8', errors='replace')) - app.debug(str(exception)) - app.warn('Error running automated EddyQC tool \'eddy_quad\'; QC data written to "' + eddyqc_path + '" will be files from "eddy" only') - # Delete the directory if the script only made it partway through - try: - shutil.rmtree(eddyqc_prefix + '.qc') - except OSError: - pass - else: - app.console('Command \'eddy_quad\' not found in PATH; skipping') - - - # Have to retain these images until after eddyQC is run - # If using -eddyqc_all, also write the mask provided to eddy to the output directory; - # therefore don't delete it yet here - if not app.ARGS.eddyqc_all: - app.cleanup('eddy_mask.nii') - if execute_topup: - app.cleanup(fsl.find_image('field_fieldcoef')) - - - # Get the axis strides from the input series, so the output image can be modified to match - stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()]) - - - # Determine whether or not volume recombination should be performed - # This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header - # Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code - # The phase-encoding scheme needs to be checked also - volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes - volume_pairs = [ ] - app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') - for index1 in range(dwi_num_volumes): - if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired - for index2 in range(index1+1, dwi_num_volumes): - if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired - # Here, need to check both gradient matching and reversed phase-encode direction - if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(index1, index2): - volume_matchings[index1] = index2 - volume_matchings[index2] = index1 - volume_pairs.append([index1, index2]) - app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' + - 'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' + - 'Gradients: ' + str(grad[index1]) + ' ' + str(grad[index2])) - break - - - if len(volume_pairs) != int(dwi_num_volumes/2): - - if execute_topup: - app.cleanup('topup_in.nii') - app.cleanup(fsl.find_image('field_map')) - - # Convert the resulting volume to the output image, and re-insert the diffusion encoding - run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permvols_posteddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals') - app.cleanup(eddy_output_image_path) - - else: - app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination') - - # Perform a manual combination of the volumes output by eddy, since LSR is disabled - - # Generate appropriate bvecs / bvals files - # Particularly if eddy has provided rotated bvecs, since we're combining two volumes into one that - # potentially have subject rotation between them (and therefore the sensitisation direction is - # not precisely equivalent), the best we can do is take the mean of the two vectors. - # Manual recombination of volumes needs to take into account the explicit volume matching - - bvecs = matrix.load_matrix(bvecs_path) - bvecs_combined_transpose = [ ] - bvals_combined = [ ] - - for pair in volume_pairs: - bvec_mean = [ 0.5*(bvecs[0][pair[0]] + bvecs[0][pair[1]]), - 0.5*(bvecs[1][pair[0]] + bvecs[1][pair[1]]), - 0.5*(bvecs[2][pair[0]] + bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # If one diffusion sensitisation gradient direction is reversed with respect to - # the other, still want to enable their recombination; but need to explicitly - # account for this when averaging the two directions - if norm2 < 0.5: - bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), - 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), - 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # Occasionally a b=0 volume can have a zero vector - if norm2: - factor = 1.0 / math.sqrt(norm2) - new_vec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] - else: - new_vec = [ 0.0, 0.0, 0.0 ] - bvecs_combined_transpose.append(new_vec) - bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) - - bvecs_combined = matrix.transpose(bvecs_combined_transpose) - matrix.save_matrix('bvecs_combined', bvecs_combined, add_to_command_history=False) - matrix.save_vector('bvals_combined', bvals_combined, add_to_command_history=False) - - # Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform, - # regardless of the transform of the input image - # Detect this, and manually replace the transform if necessary - # (even if this doesn't cause an issue with the subsequent mrcalc command, it may in the future, it's better for - # visualising the script intermediate files, and it gives the user a warning about an out-of-date FSL) - field_map_image = fsl.find_image('field_map') - field_map_header = image.Header(field_map_image) - if not image.match('topup_in.nii', field_map_header, up_to_dim=3): - app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later') - new_field_map_image = 'field_map_fix.mif' - run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image) - app.cleanup(field_map_image) - field_map_image = new_field_map_image - # In FSL 6.0.0, field map image is erroneously constructed with the same number of volumes as the input image, - # with all but the first volume containing intensity-scaled duplicates of the uncorrected input images - # The first volume is however the expected field offset image - elif len(field_map_header.size()) == 4: - app.console('Correcting erroneous FSL 6.0.0 field map image output') - new_field_map_image = 'field_map_fix.mif' - run.command('mrconvert ' + field_map_image + ' -coord 3 0 -axes 0,1,2 ' + new_field_map_image) - app.cleanup(field_map_image) - field_map_image = new_field_map_image - app.cleanup('topup_in.nii') - - - # Derive the weight images - # Scaling term for field map is identical to the bandwidth provided in the topup config file - # (converts Hz to pixel count; that way a simple image gradient can be used to get the Jacobians) - # Let mrfilter apply the default 1 voxel size gaussian smoothing filter before calculating the field gradient - # - # The jacobian image may be different for any particular volume pair - # The appropriate PE directions and total readout times can be acquired from the eddy-style config/index files - # eddy_config.txt and eddy_indices.txt - eddy_config = matrix.load_matrix('eddy_config.txt') - eddy_indices = matrix.load_vector('eddy_indices.txt', dtype=int) - app.debug('EDDY config: ' + str(eddy_config)) - app.debug('EDDY indices: ' + str(eddy_indices)) - - # This section derives, for each phase encoding configuration present, the 'weight' to be applied - # to the image during volume recombination, which is based on the Jacobian of the field in the - # phase encoding direction - for index, config in enumerate(eddy_config): - pe_axis = [ i for i, e in enumerate(config[0:3]) if e != 0][0] - sign_multiplier = ' -1.0 -mult' if config[pe_axis] < 0 else '' - field_derivative_path = 'field_deriv_pe_' + str(index+1) + '.mif' - run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2') - jacobian_path = 'jacobian_' + str(index+1) + '.mif' - run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path) - app.cleanup(field_derivative_path) - run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif') - app.cleanup(jacobian_path) - app.cleanup(field_map_image) - - # If eddy provides its main image output in a compressed format, the code block below will need to - # uncompress that image independently for every volume pair. Instead, if this is the case, let's - # convert it to an uncompressed format before we do anything with it. - if eddy_output_image_path.endswith('.gz'): - new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif' - run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path) - app.cleanup(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them - # back to their original positions; otherwise, the stored gradient vector directions / phase encode - # directions / matched volume pairs are no longer appropriate - if dwi_permvols_posteddy_option: - new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif' - run.command('mrconvert ' + eddy_output_image_path + dwi_permvols_posteddy_option + ' ' + new_eddy_output_image_path) - app.cleanup(eddy_output_image_path) - eddy_output_image_path = new_eddy_output_image_path - - # This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and - # derives a single image volume based on the recombination equation - combined_image_list = [ ] - progress = app.ProgressBar('Performing explicit volume recombination', len(volume_pairs)) - for index, volumes in enumerate(volume_pairs): - pe_indices = [ eddy_indices[i] for i in volumes ] - run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0])) - run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1])) - # Volume recombination equation described in Skare and Bammer 2010 - combined_image_path = 'combined' + str(index) + '.mif' - run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path) - combined_image_list.append(combined_image_path) - run.function(os.remove, 'volume0.mif') - run.function(os.remove, 'volume1.mif') - progress.increment() - progress.done() - - app.cleanup(eddy_output_image_path) - for index in range(0, len(eddy_config)): - app.cleanup('weight' + str(index+1) + '.mif') - - # Finally the recombined volumes must be concatenated to produce the resulting image series - combine_command = ['mrcat', combined_image_list, '-', '-axis', '3', '|', \ - 'mrconvert', '-', 'result.mif', '-fslgrad', 'bvecs_combined', 'bvals_combined'] - if dwi_post_eddy_crop_option: - combine_command.extend(dwi_post_eddy_crop_option.strip().split(' ')) - combine_command.extend(stride_option.strip().split(' ')) - run.command(combine_command) - app.cleanup(combined_image_list) - - - # Grab any relevant files that eddy has created, and copy them to the requested directory - if eddyqc_path: - if app.FORCE_OVERWRITE and os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path): - run.function(os.remove, eddyqc_path) - if not os.path.exists(eddyqc_path): - run.function(os.makedirs, eddyqc_path) - for filename in eddyqc_files: - if os.path.exists(eddyqc_prefix + '.' + filename): - # If this is an image, and axis padding was applied, want to undo the padding - if filename.endswith('.nii.gz') and dwi_post_eddy_crop_option: - run.command('mrconvert ' + eddyqc_prefix + '.' + filename + ' ' + shlex.quote(os.path.join(eddyqc_path, filename)) + dwi_post_eddy_crop_option, force=app.FORCE_OVERWRITE) - else: - run.function(shutil.copy, eddyqc_prefix + '.' + filename, os.path.join(eddyqc_path, filename)) - # Also grab any files generated by the eddy qc tool QUAD - if os.path.isdir(eddyqc_prefix + '.qc'): - if app.FORCE_OVERWRITE and os.path.exists(os.path.join(eddyqc_path, 'quad')): - run.function(shutil.rmtree, os.path.join(eddyqc_path, 'quad')) - run.function(shutil.copytree, eddyqc_prefix + '.qc', os.path.join(eddyqc_path, 'quad')) - # Also grab the brain mask that was provided to eddy if -eddyqc_all was specified - if app.ARGS.eddyqc_all: - if dwi_post_eddy_crop_option: - run.command('mrconvert eddy_mask.nii ' + shlex.quote(os.path.join(eddyqc_path, 'eddy_mask.nii')) + dwi_post_eddy_crop_option, force=app.FORCE_OVERWRITE) - else: - run.function(shutil.copy, 'eddy_mask.nii', os.path.join(eddyqc_path, 'eddy_mask.nii')) - app.cleanup('eddy_mask.nii') - - - - - keys_to_remove = [ 'MultibandAccelerationFactor', 'SliceEncodingDirection', 'SliceTiming' ] - # These keys are still relevant for the output data if no EPI distortion correction was performed - if execute_applytopup: - keys_to_remove.extend([ 'PhaseEncodingDirection', 'TotalReadoutTime', 'pe_scheme' ]) - # Get the header key-value entries from the input DWI, remove those we don't wish to keep, and - # export the result to a new JSON file so that they can be inserted into the output header - with open('dwi.json', 'r', encoding='utf-8') as input_json_file: - keyval = json.load(input_json_file) - for key in keys_to_remove: - keyval.pop(key, None) - # Make sure to use the revised diffusion gradient table rather than that of the input; - # incorporates motion correction, and possibly also the explicit volume recombination - keyval['dw_scheme'] = image.Header('result.mif').keyval()['dw_scheme'] - # 'Stash' the phase encoding scheme of the original uncorrected DWIs, since it still - # may be useful information at some point in the future but is no longer relevant - # for e.g. tracking for different volumes, or performing any geometric corrections - if execute_applytopup: - keyval['prior_pe_scheme'] = dwi_manual_pe_scheme if dwi_manual_pe_scheme else dwi_pe_scheme - with open('output.json', 'w', encoding='utf-8') as output_json_file: - json.dump(keyval, output_json_file) - - - # Finish! - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output) + grad_export_option, mrconvert_keyval='output.json', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwifslpreproc/execute.py b/python/mrtrix3/dwifslpreproc/execute.py new file mode 100644 index 0000000000..a1e816947a --- /dev/null +++ b/python/mrtrix3/dwifslpreproc/execute.py @@ -0,0 +1,1331 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import glob, itertools, json, math, os, shutil, sys, shlex +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + if utils.is_windows(): + raise MRtrixError('Script cannot run on Windows due to FSL dependency') + + image.check_3d_nonunity(path.from_user(app.ARGS.input, False)) + + pe_design = '' + if app.ARGS.rpe_none: + pe_design = 'None' + elif app.ARGS.rpe_pair: + pe_design = 'Pair' + if not app.ARGS.se_epi: + raise MRtrixError('If using the -rpe_pair option, the -se_epi option must be used to provide the spin-echo EPI data to be used by topup') + elif app.ARGS.rpe_all: + pe_design = 'All' + elif app.ARGS.rpe_header: + pe_design = 'Header' + else: + raise MRtrixError('Must explicitly specify phase-encoding acquisition design (even if none)') + + if app.ARGS.align_seepi and not app.ARGS.se_epi: + raise MRtrixError('-align_seepi option is only applicable when the -se_epi option is also used') + + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + if not pe_design == 'None': + topup_config_path = os.path.join(fsl_path, 'etc', 'flirtsch', 'b02b0.cnf') + if not os.path.isfile(topup_config_path): + raise MRtrixError('Could not find necessary default config file for FSL topup command (expected location: ' + topup_config_path + ')') + topup_cmd = fsl.exe_name('topup') + + if not fsl.eddy_binary(True) and not fsl.eddy_binary(False): + raise MRtrixError('Could not find any version of FSL eddy command') + fsl_suffix = fsl.suffix() + app.check_output_path(app.ARGS.output) + + # Export the gradient table to the path requested by the user if necessary + grad_export_option = app.read_dwgrad_export_options() + + + eddyqc_path = None + eddyqc_files = [ 'eddy_parameters', 'eddy_movement_rms', 'eddy_restricted_movement_rms', \ + 'eddy_post_eddy_shell_alignment_parameters', 'eddy_post_eddy_shell_PE_translation_parameters', \ + 'eddy_outlier_report', 'eddy_outlier_map', 'eddy_outlier_n_stdev_map', 'eddy_outlier_n_sqr_stdev_map', \ + 'eddy_movement_over_time' ] + if app.ARGS.eddyqc_text: + eddyqc_path = path.from_user(app.ARGS.eddyqc_text, False) + elif app.ARGS.eddyqc_all: + eddyqc_path = path.from_user(app.ARGS.eddyqc_all, False) + eddyqc_files.extend([ 'eddy_outlier_free_data.nii.gz', 'eddy_cnr_maps.nii.gz', 'eddy_residuals.nii.gz' ]) + if eddyqc_path: + if os.path.exists(eddyqc_path): + if os.path.isdir(eddyqc_path): + if any(os.path.exists(os.path.join(eddyqc_path, filename)) for filename in eddyqc_files): + if app.FORCE_OVERWRITE: + app.warn('Output eddy QC directory already contains relevant files; these will be overwritten on completion') + else: + raise MRtrixError('Output eddy QC directory already contains relevant files (use -force to override)') + else: + if app.FORCE_OVERWRITE: + app.warn('Target for eddy QC output is not a directory; it will be overwritten on completion') + else: + raise MRtrixError('Target for eddy QC output exists, and is not a directory (use -force to override)') + + + eddy_manual_options = [] + topup_file_userpath = None + if app.ARGS.eddy_options: + # Initially process as a list; we'll convert back to a string later + eddy_manual_options = app.ARGS.eddy_options.strip().split() + # Check for erroneous usages before we perform any data importing + if any(entry.startswith('--mask=') for entry in eddy_manual_options): + raise MRtrixError('Cannot provide eddy processing mask via -eddy_options "--mask=..." as manipulations are required; use -eddy_mask option instead') + if any(entry.startswith('--slspec=') for entry in eddy_manual_options): + raise MRtrixError('Cannot provide eddy slice specification file via -eddy_options "--slspec=..." as manipulations are required; use -eddy_slspec option instead') + if '--resamp=lsr' in eddy_manual_options: + raise MRtrixError('dwifslpreproc does not currently support least-squares reconstruction; this cannot be simply passed via -eddy_options') + eddy_topup_entry = [entry for entry in eddy_manual_options if entry.startswith('--topup=')] + if len(eddy_topup_entry) > 1: + raise MRtrixError('Input to -eddy_options contains multiple "--topup=" entries') + if eddy_topup_entry: + # -topup_files and -se_epi are mutually exclusive, but need to check in case + # pre-calculated topup output files were provided this way instead + if app.ARGS.se_epi: + raise MRtrixError('Cannot use both -eddy_options "--topup=" and -se_epi') + topup_file_userpath = path.from_user(eddy_topup_entry[0][len('--topup='):], False) + eddy_manual_options = [entry for entry in eddy_manual_options if not entry.startswith('--topup=')] + + + # Don't import slspec file directly; just make sure it exists + if app.ARGS.eddy_slspec and not os.path.isfile(path.from_user(app.ARGS.eddy_slspec, False)): + raise MRtrixError('Unable to find file \"' + app.ARGS.eddy_slspec + '\" provided via -eddy_slspec option') + + + # Attempt to find pre-generated topup files before constructing the scratch directory + topup_input_movpar = None + topup_input_fieldcoef = None + if app.ARGS.topup_files: + if topup_file_userpath: + raise MRtrixError('Cannot use -topup_files option and also specify "... --topup= ..." within content of -eddy_options') + topup_file_userpath = path.from_user(app.ARGS.topup_files, False) + + execute_applytopup = pe_design != 'None' or topup_file_userpath + if execute_applytopup: + applytopup_cmd = fsl.exe_name('applytopup') + + if topup_file_userpath: + # Find files based on what the user may or may not have specified: + # - Path to the movement parameters text file + # - Path to the field coefficients image + # - Path prefix including the underscore + # - Path prefix omitting the underscore + + def check_movpar(): + if not os.path.isfile(topup_input_movpar): + raise MRtrixError('No topup movement parameter file found based on path "' + topup_file_userpath + '" (expected location: ' + topup_input_movpar + ')') + + def find_fieldcoef(fieldcoef_prefix): + fieldcoef_candidates = glob.glob(fieldcoef_prefix + '_fieldcoef.nii*') + if not fieldcoef_candidates: + raise MRtrixError('No topup field coefficient image found based on path "' + topup_file_userpath + '"') + if len(fieldcoef_candidates) > 1: + raise MRtrixError('Multiple topup field coefficient images found based on path "' + topup_file_userpath + '": ' + str(fieldcoef_candidates)) + return fieldcoef_candidates[0] + + if os.path.isfile(topup_file_userpath): + if topup_file_userpath.endswith('_movpar.txt'): + topup_input_movpar = topup_file_userpath + topup_input_fieldcoef = find_fieldcoef(topup_file_userpath[:-len('_movpar.txt')]) + elif topup_file_userpath.endswith('_fieldcoef.nii') or topup_file_userpath.endswith('_fieldcoef.nii.gz'): + topup_input_fieldcoef = topup_file_userpath + topup_input_movpar = topup_file_userpath + if topup_input_movpar.endswith('.gz'): + topup_input_movpar = topup_input_movpar[:-len('.gz')] + topup_input_movpar = topup_input_movpar[:-len('_fieldcoef.nii')] + '_movpar.txt' + check_movpar() + else: + raise MRtrixError('Unrecognised file "' + topup_file_userpath + '" specified as pre-calculated topup susceptibility field') + else: + topup_input_movpar = topup_file_userpath + if topup_input_movpar[-1] == '_': + topup_input_movpar = topup_input_movpar[:-1] + topup_input_movpar += '_movpar.txt' + check_movpar() + topup_input_fieldcoef = find_fieldcoef(topup_input_movpar[:-len('_movpar.txt')]) + + + # Convert all input images into MRtrix format and store in scratch directory first + app.make_scratch_dir() + + grad_import_option = app.read_dwgrad_import_options() + json_import_option = '' + if app.ARGS.json_import: + json_import_option = ' -json_import ' + path.from_user(app.ARGS.json_import) + json_export_option = ' -json_export ' + path.to_scratch('dwi.json', True) + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('dwi.mif') + grad_import_option + json_import_option + json_export_option) + if app.ARGS.se_epi: + image.check_3d_nonunity(path.from_user(app.ARGS.se_epi, False)) + run.command('mrconvert ' + path.from_user(app.ARGS.se_epi) + ' ' + path.to_scratch('se_epi.mif')) + if topup_file_userpath: + run.function(shutil.copyfile, topup_input_movpar, path.to_scratch('field_movpar.txt', False)) + # Can't run field spline coefficients image through mrconvert: + # topup encodes voxel sizes within the three NIfTI intent parameters, and + # applytopup requires that these be set, but mrconvert will wipe them + run.function(shutil.copyfile, topup_input_fieldcoef, path.to_scratch('field_fieldcoef.nii' + ('.gz' if topup_input_fieldcoef.endswith('.nii.gz') else ''), False)) + if app.ARGS.eddy_mask: + run.command('mrconvert ' + path.from_user(app.ARGS.eddy_mask) + ' ' + path.to_scratch('eddy_mask.mif') + ' -datatype bit') + + app.goto_scratch_dir() + + + # Get information on the input images, and check their validity + dwi_header = image.Header('dwi.mif') + if not len(dwi_header.size()) == 4: + raise MRtrixError('Input DWI must be a 4D image') + dwi_num_volumes = dwi_header.size()[3] + app.debug('Number of DWI volumes: ' + str(dwi_num_volumes)) + dwi_num_slices = dwi_header.size()[2] + app.debug('Number of DWI slices: ' + str(dwi_num_slices)) + dwi_pe_scheme = phaseencoding.get_scheme(dwi_header) + if app.ARGS.se_epi: + se_epi_header = image.Header('se_epi.mif') + # This doesn't necessarily apply any more: May be able to combine e.g. a P>>A from -se_epi with an A>>P b=0 image from the DWIs + # if not len(se_epi_header.size()) == 4: + # raise MRtrixError('File provided using -se_epi option must contain more than one image volume') + se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) + if 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No diffusion gradient table found') + grad = dwi_header.keyval()['dw_scheme'] + if not len(grad) == dwi_num_volumes: + raise MRtrixError('Number of lines in gradient table (' + str(len(grad)) + ') does not match input image (' + str(dwi_num_volumes) + ' volumes); check your input data') + + + # Deal with slice timing information for eddy slice-to-volume correction + slice_encoding_axis = 2 + eddy_mporder = any(s.startswith('--mporder') for s in eddy_manual_options) + if eddy_mporder: + if 'SliceEncodingDirection' in dwi_header.keyval(): + slice_encoding_direction = dwi_header.keyval()['SliceEncodingDirection'] + app.debug('Slice encoding direction: ' + slice_encoding_direction) + if not slice_encoding_direction.startswith('k'): + raise MRtrixError('DWI header indicates that 3rd spatial axis is not the slice axis; this is not yet compatible with --mporder option in eddy, nor supported in dwifslpreproc') + slice_encoding_direction = image.axis2dir(slice_encoding_direction) + else: + app.console('No slice encoding direction information present; assuming third axis corresponds to slices') + slice_encoding_direction = [0,0,1] + slice_encoding_axis = [ index for index, value in enumerate(slice_encoding_direction) if value ][0] + slice_groups = [ ] + slice_timing = [ ] + # Since there's a chance that we may need to pad this info, we can't just copy this file + # to the scratch directory... + if app.ARGS.eddy_slspec: + try: + slice_groups = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=int) + app.debug('Slice groups: ' + str(slice_groups)) + except ValueError: + try: + slice_timing = matrix.load_numeric(path.from_user(app.ARGS.eddy_slspec, False), dtype=float) + app.debug('Slice timing: ' + str(slice_timing)) + app.warn('\"slspec\" file provided to FSL eddy is supposed to contain slice indices for slice groups; ' + 'contents of file \"' + app.ARGS.eddy_slspec + '\" appears to instead be slice timings; ' + 'these data have been imported and will be converted to the appropriate format') + if len(slice_timing) != dwi_num_slices: + raise MRtrixError('Cannot use slice timing information from file \"' + app.ARGS.eddy_slspec + '\" for slice-to-volume correction: ' # pylint: disable=raise-missing-from + 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_num_slices) + ')') + except ValueError: + raise MRtrixError('Error parsing eddy \"slspec\" file \"' + app.ARGS.eddy_slspec + '\" ' # pylint: disable=raise-missing-from + '(please see FSL eddy help page, specifically the --slspec option)') + else: + if 'SliceTiming' not in dwi_header.keyval(): + raise MRtrixError('Cannot perform slice-to-volume correction in eddy: ' + '-eddy_slspec option not specified, and no slice timing information present in input DWI header') + slice_timing = dwi_header.keyval()['SliceTiming'] + app.debug('Initial slice timing contents from header: ' + str(slice_timing)) + if slice_timing in ['invalid', 'variable']: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data flagged as "' + slice_timing + '"') + # Fudges necessary to maniupulate nature of slice timing data in cases where + # bad JSON formatting has led to the data not being simply a list of floats + # (whether from MRtrix3 DICOM conversion or from anything else) + if isinstance(slice_timing, str): + slice_timing = slice_timing.split() + if not isinstance(slice_timing, list): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data is not a list') + if len(slice_timing) == 1: + slice_timing = slice_timing[0] + if not isinstance(slice_timing, list): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'unexpected data format') + if isinstance(slice_timing[0], list): + if not all(len(entry) == 1 for entry in slice_timing): + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data do not appear to be 1D') + slice_timing = [ entry[0] for entry in slice_timing ] + if not all(isinstance(entry, float) for entry in slice_timing): + try: + slice_timing = [ float(entry) for entry in slice_timing ] + except ValueError as exception: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'data are not numeric') from exception + app.debug('Re-formatted slice timing contents from header: ' + str(slice_timing)) + if len(slice_timing) != dwi_num_slices: + raise MRtrixError('Cannot use slice timing information in image header for slice-to-volume correction: ' + 'number of entries (' + str(len(slice_timing)) + ') does not match number of slices (' + str(dwi_header.size()[2]) + ')') + elif app.ARGS.eddy_slspec: + app.warn('-eddy_slspec option provided, but "--mporder=" not provided via -eddy_options; ' + 'slice specification file not imported as it would not be utilised by eddy') + + + # Use new features of dirstat to query the quality of the diffusion acquisition scheme + # Need to know the mean b-value in each shell, and the asymmetry value of each shell + # But don't bother testing / warning the user if they're already controlling for this + if not app.ARGS.eddy_options or not any(s.startswith('--slm=') for s in app.ARGS.eddy_options.split()): + shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + shell_asymmetries = [ float(value) for value in run.command('dirstat dwi.mif -output asym').stdout.splitlines() ] + # dirstat will skip any b=0 shell by default; therefore for correspondence between + # shell_bvalues and shell_symmetry, need to remove any b=0 from the former + if len(shell_bvalues) == len(shell_asymmetries) + 1: + shell_bvalues = shell_bvalues[1:] + elif len(shell_bvalues) != len(shell_asymmetries): + raise MRtrixError('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetries)) + ')') + for bvalue, asymmetry in zip(shell_bvalues, shell_asymmetries): + if asymmetry >= 0.1: + app.warn('sampling of b=' + str(bvalue) + ' shell is ' + ('strongly' if asymmetry >= 0.4 else 'moderately') + \ + ' asymmetric; distortion correction may benefit from use of: ' + \ + '-eddy_options " ... --slm=linear ... "') + + + # Since we want to access user-defined phase encoding information regardless of whether or not + # such information is present in the header, let's grab it here + manual_pe_dir = None + if app.ARGS.pe_dir: + manual_pe_dir = [ float(i) for i in phaseencoding.direction(app.ARGS.pe_dir) ] + app.debug('Manual PE direction: ' + str(manual_pe_dir)) + manual_trt = None + if app.ARGS.readout_time: + manual_trt = float(app.ARGS.readout_time) + app.debug('Manual readout time: ' + str(manual_trt)) + + + # Utilise the b-value clustering algorithm in src/dwi/shells.* + shell_indices = [ [ int(i) for i in entry.split(',') ] for entry in image.mrinfo('dwi.mif', 'shell_indices').split(' ') ] + shell_bvalues = [ float(f) for f in image.mrinfo('dwi.mif', 'shell_bvalues').split(' ')] + bzero_threshold = float(CONFIG.get('BZeroThreshold', 10.0)) + + # For each volume index, store the index of the shell to which it is attributed + # (this will make it much faster to determine whether or not two volumes belong to the same shell) + vol2shell = [ -1 ] * dwi_num_volumes + for index, volumes in enumerate(shell_indices): + for volume in volumes: + vol2shell[volume] = index + assert all(index >= 0 for index in vol2shell) + + + def grads_match(one, two): + # Are the two volumes assigned to different b-value shells? + if vol2shell[one] != vol2shell[two]: + return False + # Does this shell correspond to b=0? + if shell_bvalues[vol2shell[one]] <= bzero_threshold: + return True + # Dot product between gradient directions + # First, need to check for zero-norm vectors: + # - If both are zero, skip this check + # - If one is zero and the other is not, volumes don't match + # - If neither is zero, test the dot product + if any(grad[one][0:3]): + if not any(grad[two][0:3]): + return False + dot_product = grad[one][0]*grad[two][0] + grad[one][1]*grad[two][1] + grad[one][2]*grad[two][2] + if abs(dot_product) < 0.999: + return False + elif any(grad[two][0:3]): + return False + return True + + + # Manually generate a phase-encoding table for the input DWI based on user input + dwi_manual_pe_scheme = None + se_epi_manual_pe_scheme = None + auto_trt = 0.1 + dwi_auto_trt_warning = False + if manual_pe_dir: + + if manual_trt: + trt = manual_trt + else: + trt = auto_trt + dwi_auto_trt_warning = True + + # Still construct the manual PE scheme even with 'None' or 'Pair': + # there may be information in the header that we need to compare against + if pe_design == 'None': + line = list(manual_pe_dir) + line.append(trt) + dwi_manual_pe_scheme = [ line ] * dwi_num_volumes + app.debug('Manual DWI PE scheme for \'None\' PE design: ' + str(dwi_manual_pe_scheme)) + + # With 'Pair', also need to construct the manual scheme for SE EPIs + elif pe_design == 'Pair': + line = list(manual_pe_dir) + line.append(trt) + dwi_manual_pe_scheme = [ line ] * dwi_num_volumes + app.debug('Manual DWI PE scheme for \'Pair\' PE design: ' + str(dwi_manual_pe_scheme)) + if len(se_epi_header.size()) != 4: + raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must be a 4D image') + se_epi_num_volumes = se_epi_header.size()[3] + if se_epi_num_volumes%2: + raise MRtrixError('If using -rpe_pair option, image provided using -se_epi must contain an even number of volumes') + # Assume that first half of volumes have same direction as series; + # second half have the opposite direction + se_epi_manual_pe_scheme = [ line ] * int(se_epi_num_volumes/2) + line = [ (-i if i else 0.0) for i in manual_pe_dir ] + line.append(trt) + se_epi_manual_pe_scheme.extend( [ line ] * int(se_epi_num_volumes/2) ) + app.debug('Manual SEEPI PE scheme for \'Pair\' PE design: ' + str(se_epi_manual_pe_scheme)) + + # If -rpe_all, need to scan through grad and figure out the pairings + # This will be required if relying on user-specified phase encode direction + # It will also be required at the end of the script for the manual recombination + # Update: The possible permutations of volume-matched acquisition is limited within the + # context of the -rpe_all option. In particular, the potential for having more + # than one b=0 volume within each half means that it is not possible to permit + # arbitrary ordering of those pairs, since b=0 volumes would then be matched + # despite having the same phase-encoding direction. Instead, explicitly enforce + # that volumes must be matched between the first and second halves of the DWI data. + elif pe_design == 'All': + if dwi_num_volumes%2: + raise MRtrixError('If using -rpe_all option, input image must contain an even number of volumes') + grads_matched = [ dwi_num_volumes ] * dwi_num_volumes + grad_pairs = [ ] + app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') + for index1 in range(int(dwi_num_volumes/2)): + if grads_matched[index1] == dwi_num_volumes: # As yet unpaired + for index2 in range(int(dwi_num_volumes/2), dwi_num_volumes): + if grads_matched[index2] == dwi_num_volumes: # Also as yet unpaired + if grads_match(index1, index2): + grads_matched[index1] = index2 + grads_matched[index2] = index1 + grad_pairs.append([index1, index2]) + app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + ': ' + str(grad[index1]) + ' ' + str(grad[index2])) + break + else: + raise MRtrixError('Unable to determine matching reversed phase-encode direction volume for DWI volume ' + str(index1)) + if not len(grad_pairs) == dwi_num_volumes/2: + raise MRtrixError('Unable to determine complete matching DWI volume pairs for reversed phase-encode combination') + # Construct manual PE scheme here: + # Regardless of whether or not there's a scheme in the header, need to have it: + # if there's one in the header, want to compare to the manually-generated one + dwi_manual_pe_scheme = [ ] + for index in range(0, dwi_num_volumes): + line = list(manual_pe_dir) + if index >= int(dwi_num_volumes/2): + line = [ (-i if i else 0.0) for i in line ] + line.append(trt) + dwi_manual_pe_scheme.append(line) + app.debug('Manual DWI PE scheme for \'All\' PE design: ' + str(dwi_manual_pe_scheme)) + + else: # No manual phase encode direction defined + + if not pe_design == 'Header': + raise MRtrixError('If not using -rpe_header, phase encoding direction must be provided using the -pe_dir option') + + + + def scheme_dirs_match(one, two): + for line_one, line_two in zip(one, two): + if not line_one[0:3] == line_two[0:3]: + return False + return True + + def scheme_times_match(one, two): + for line_one, line_two in zip(one, two): + if abs(line_one[3] - line_two[3]) > 5e-3: + return False + return True + + + + # Determine whether or not the phase encoding table generated manually should be used + # (possibly instead of a table present in the image header) + overwrite_dwi_pe_scheme = False + if dwi_pe_scheme: + if manual_pe_dir: + # Compare manual specification to that read from the header; + # overwrite & give warning to user if they differ + # Bear in mind that this could even be the case for -rpe_all; + # relying on earlier code having successfully generated the 'appropriate' + # PE scheme for the input volume based on the diffusion gradient table + if not scheme_dirs_match(dwi_pe_scheme, dwi_manual_pe_scheme): + app.warn('User-defined phase-encoding direction design does not match what is stored in DWI image header; proceeding with user specification') + overwrite_dwi_pe_scheme = True + if manual_trt: + # Compare manual specification to that read from the header + if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): + app.warn('User-defined total readout time does not match what is stored in DWI image header; proceeding with user specification') + overwrite_dwi_pe_scheme = True + if overwrite_dwi_pe_scheme: + dwi_pe_scheme = dwi_manual_pe_scheme # May be used later for triggering volume recombination + else: + dwi_manual_pe_scheme = None # To guarantee that these generated data are never used + else: + # Nothing in the header; rely entirely on user specification + if pe_design == 'Header': + raise MRtrixError('No phase encoding information found in DWI image header') + if not manual_pe_dir: + raise MRtrixError('No phase encoding information provided either in header or at command-line') + if dwi_auto_trt_warning: + app.console('Total readout time not provided at command-line; assuming sane default of ' + str(auto_trt)) + dwi_pe_scheme = dwi_manual_pe_scheme # May be needed later for triggering volume recombination + + # This may be required by -rpe_all for extracting b=0 volumes while retaining phase-encoding information + import_dwi_pe_table_option = '' + if dwi_manual_pe_scheme: + phaseencoding.save('dwi_manual_pe_scheme.txt', dwi_manual_pe_scheme) + import_dwi_pe_table_option = ' -import_pe_table dwi_manual_pe_scheme.txt' + + + # Find the index of the first DWI volume that is a b=0 volume + # This needs to occur at the outermost loop as it is pertinent information + # not only for the -align_seepi option, but also for when the -se_epi option + # is not provided at all, and the input to topup is extracted solely from the DWIs + dwi_first_bzero_index = 0 + for line in grad: + if line[3] <= bzero_threshold: + break + dwi_first_bzero_index += 1 + app.debug('Index of first b=0 image in DWIs is ' + str(dwi_first_bzero_index)) + + + # Deal with the phase-encoding of the images to be fed to topup (if applicable) + execute_topup = (not pe_design == 'None') and not topup_file_userpath + overwrite_se_epi_pe_scheme = False + se_epi_path = 'se_epi.mif' + dwi_permvols_preeddy_option = '' + dwi_permvols_posteddy_option = '' + dwi_bzero_added_to_se_epi = False + if app.ARGS.se_epi: + + # Newest version of eddy requires that topup field be on the same grid as the eddy input DWI + if not image.match(dwi_header, se_epi_header, up_to_dim=3): + app.console('DWIs and SE-EPI images used for inhomogeneity field estimation are defined on different image grids; ' + 'the latter will be automatically re-gridded to match the former') + new_se_epi_path = 'se_epi_regrid.mif' + run.command('mrtransform ' + se_epi_path + ' - -reorient_fod no -interp sinc -template dwi.mif | mrcalc - 0.0 -max ' + new_se_epi_path) + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + se_epi_header = image.Header(se_epi_path) + + # 3 possible sources of PE information: DWI header, topup image header, command-line + # Any pair of these may conflict, and any one could be absent + + # Have to switch here based on phase-encoding acquisition design + if pe_design == 'Pair': + # Criteria: + # * If present in own header, ignore DWI header entirely - + # - If also provided at command-line, look for conflict & report + # - If not provided at command-line, nothing to do + # * If _not_ present in own header: + # - If provided at command-line, infer appropriately + # - If not provided at command-line, but the DWI header has that information, infer appropriately + if se_epi_pe_scheme: + if manual_pe_dir: + if not scheme_dirs_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): + app.warn('User-defined phase-encoding direction design does not match what is stored in SE EPI image header; proceeding with user specification') + overwrite_se_epi_pe_scheme = True + if manual_trt: + if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): + app.warn('User-defined total readout time does not match what is stored in SE EPI image header; proceeding with user specification') + overwrite_se_epi_pe_scheme = True + if overwrite_se_epi_pe_scheme: + se_epi_pe_scheme = se_epi_manual_pe_scheme + else: + se_epi_manual_pe_scheme = None # To guarantee that these data are never used + else: + overwrite_se_epi_pe_scheme = True + se_epi_pe_scheme = se_epi_manual_pe_scheme + + elif pe_design == 'All': + # Criteria: + # * If present in own header: + # - Nothing to do + # * If _not_ present in own header: + # - Don't have enough information to proceed + # - Is this too harsh? (e.g. Have rules by which it may be inferred from the DWI header / command-line) + if not se_epi_pe_scheme: + raise MRtrixError('If explicitly including SE EPI images when using -rpe_all option, they must come with their own associated phase-encoding information in the image header') + + elif pe_design == 'Header': + # Criteria: + # * If present in own header: + # Nothing to do (-pe_dir option is mutually exclusive) + # * If _not_ present in own header: + # Cannot proceed + if not se_epi_pe_scheme: + raise MRtrixError('No phase-encoding information present in SE-EPI image header') + # If there is no phase encoding contrast within the SE-EPI series, + # try combining it with the DWI b=0 volumes, see if that produces some contrast + # However, this should probably only be permitted if the -align_seepi option is defined + se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() + if not se_epi_pe_scheme_has_contrast: + if app.ARGS.align_seepi: + app.console('No phase-encoding contrast present in SE-EPI images; will examine again after combining with DWI b=0 images') + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_dwibzeros.mif' + # Don't worry about trying to produce a balanced scheme here + run.command('dwiextract dwi.mif - -bzero | mrcat - ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') + se_epi_header = image.Header(new_se_epi_path) + se_epi_pe_scheme_has_contrast = 'pe_scheme' in se_epi_header.keyval() + if se_epi_pe_scheme_has_contrast: + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + se_epi_pe_scheme = phaseencoding.get_scheme(se_epi_header) + dwi_bzero_added_to_se_epi = True + # Delay testing appropriateness of the concatenation of these images + # (i.e. differences in contrast) to later + else: + raise MRtrixError('No phase-encoding contrast present in SE-EPI images, even after concatenating with b=0 images due to -align_seepi option; ' + 'cannot perform inhomogeneity field estimation') + else: + raise MRtrixError('No phase-encoding contrast present in SE-EPI images; cannot perform inhomogeneity field estimation') + + if app.ARGS.align_seepi: + + for field_name, description in { 'EchoTime': 'echo time', + 'RepetitionTime': 'repetition time', + 'FlipAngle': 'flip angle' }.items(): + dwi_value = dwi_header.keyval().get(field_name) + se_epi_value = se_epi_header.keyval().get(field_name) + if dwi_value and se_epi_value and dwi_value != se_epi_value: + app.warn('It appears that the spin-echo EPI images used for inhomogeneity field estimation have a different ' + description + ' to the DWIs being corrected. ' + 'This may cause issues in estimation of the field, as the first DWI b=0 volume will be added to the input series to topup ' + 'due to use of the -align_seepi option.') + + # If we are using the -se_epi option, and hence the input images to topup have not come from the DWIs themselves, + # we need to insert the first b=0 DWI volume to the start of the topup input image. Otherwise, the field estimated + # by topup will not be correctly aligned with the volumes as they are processed by eddy. + # + # However, there's also a code path by which we may have already performed this addition. + # If we have already apliced the b=0 volumes from the DWI input with the SE-EPI image + # (due to the absence of phase-encoding contrast in the SE-EPI series), we don't want to + # re-attempt such a concatenation; the fact that the DWI b=0 images were inserted ahead of + # the SE-EPI images means the alignment issue should be dealt with. + + if dwi_first_bzero_index == len(grad) and not dwi_bzero_added_to_se_epi: + + app.warn('Unable to find b=0 volume in input DWIs to provide alignment between topup and eddy; script will proceed as though the -align_seepi option were not provided') + + # If b=0 volumes from the DWIs have already been added to the SE-EPI image due to an + # absence of phase-encoding contrast in the latter, we don't need to perform the following + elif not dwi_bzero_added_to_se_epi: + + run.command('mrconvert dwi.mif dwi_first_bzero.mif -coord 3 ' + str(dwi_first_bzero_index) + ' -axes 0,1,2') + dwi_first_bzero_pe = dwi_manual_pe_scheme[dwi_first_bzero_index] if overwrite_dwi_pe_scheme else dwi_pe_scheme[dwi_first_bzero_index] + + se_epi_pe_sum = [ 0, 0, 0 ] + se_epi_volume_to_remove = len(se_epi_pe_scheme) + for index, line in enumerate(se_epi_pe_scheme): + se_epi_pe_sum = [ i + j for i, j in zip(se_epi_pe_sum, line[0:3]) ] + if se_epi_volume_to_remove == len(se_epi_pe_scheme) and line[0:3] == dwi_first_bzero_pe[0:3]: + se_epi_volume_to_remove = index + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_firstdwibzero.mif' + if (se_epi_pe_sum == [ 0, 0, 0 ]) and (se_epi_volume_to_remove < len(se_epi_pe_scheme)): + app.console('Balanced phase-encoding scheme detected in SE-EPI series; volume ' + str(se_epi_volume_to_remove) + ' will be removed and replaced with first b=0 from DWIs') + run.command('mrconvert ' + se_epi_path + ' - -coord 3 ' + ','.join([str(index) for index in range(len(se_epi_pe_scheme)) if not index == se_epi_volume_to_remove]) + ' | mrcat dwi_first_bzero.mif - ' + new_se_epi_path + ' -axis 3') + # Also need to update the phase-encoding scheme appropriately if it's being set manually + # (if embedded within the image headers, should be updated through the command calls) + if se_epi_manual_pe_scheme: + first_line = list(manual_pe_dir) + first_line.append(trt) + new_se_epi_manual_pe_scheme = [ ] + new_se_epi_manual_pe_scheme.append(first_line) + for index, entry in enumerate(se_epi_manual_pe_scheme): + if not index == se_epi_volume_to_remove: + new_se_epi_manual_pe_scheme.append(entry) + se_epi_manual_pe_scheme = new_se_epi_manual_pe_scheme + else: + if se_epi_pe_sum == [ 0, 0, 0 ] and se_epi_volume_to_remove == len(se_epi_pe_scheme): + app.console('Phase-encoding scheme of -se_epi image is balanced, but could not find appropriate volume with which to substitute first b=0 volume from DWIs; first b=0 DWI volume will be inserted to start of series, resulting in an unbalanced scheme') + else: + app.console('Unbalanced phase-encoding scheme detected in series provided via -se_epi option; first DWI b=0 volume will be inserted to start of series') + run.command('mrcat dwi_first_bzero.mif ' + se_epi_path + ' ' + new_se_epi_path + ' -axis 3') + # Also need to update the phase-encoding scheme appropriately + if se_epi_manual_pe_scheme: + first_line = list(manual_pe_dir) + first_line.append(trt) + se_epi_manual_pe_scheme = [ first_line, se_epi_manual_pe_scheme ] + + # Ended branching based on balanced-ness of PE acquisition scheme within SE-EPI volumes + app.cleanup(se_epi_path) + app.cleanup('dwi_first_bzero.mif') + se_epi_path = new_se_epi_path + + # Ended branching based on: + # - Detection of first b=0 volume in DWIs; or + # - Prior merge of SE-EPI and DWI b=0 volumes due to no phase-encoding contrast in SE-EPI + + # Completed checking for presence of -se_epi option + + elif not pe_design == 'None' and not topup_file_userpath: # No SE EPI images explicitly provided: In some cases, can extract appropriate b=0 images from DWI + + # If using 'All' or 'Header', and haven't been given any topup images, need to extract the b=0 volumes from the series, + # preserving phase-encoding information while doing so + # Preferably also make sure that there's some phase-encoding contrast in there... + # With -rpe_all, need to write inferred phase-encoding to file and import before using dwiextract so that the phase-encoding + # of the extracted b=0's is propagated to the generated b=0 series + run.command('mrconvert dwi.mif' + import_dwi_pe_table_option + ' - | dwiextract - ' + se_epi_path + ' -bzero') + se_epi_header = image.Header(se_epi_path) + + # If there's no contrast remaining in the phase-encoding scheme, it'll be written to + # PhaseEncodingDirection and TotalReadoutTime rather than pe_scheme + # In this scenario, we will be unable to run topup, or volume recombination + if 'pe_scheme' not in se_epi_header.keyval(): + if pe_design == 'All': + raise MRtrixError('DWI header indicates no phase encoding contrast between b=0 images; cannot proceed with volume recombination-based pre-processing') + app.warn('DWI header indicates no phase encoding contrast between b=0 images; proceeding without inhomogeneity field estimation') + execute_topup = False + run.function(os.remove, se_epi_path) + se_epi_path = None + se_epi_header = None + + + # If the first b=0 volume in the DWIs is in fact not the first volume (i.e. index zero), we're going to + # manually place it at the start of the DWI volumes when they are input to eddy, so that the + # first input volume to topup and the first input volume to eddy are one and the same. + # Note: If at a later date, the statistical outputs from eddy are considered (e.g. motion, outliers), + # then this volume permutation will need to be taken into account + if not topup_file_userpath: + if dwi_first_bzero_index == len(grad): + app.warn("No image volumes were classified as b=0 by MRtrix3; no permutation of order of DWI volumes can occur " + \ + "(do you need to adjust config file entry BZeroThreshold?)") + elif dwi_first_bzero_index: + app.console('First b=0 volume in input DWIs is volume index ' + str(dwi_first_bzero_index) + '; ' + 'this will be permuted to be the first volume (index 0) when eddy is run') + dwi_permvols_preeddy_option = ' -coord 3 ' + \ + str(dwi_first_bzero_index) + \ + ',0' + \ + (':' + str(dwi_first_bzero_index-1) if dwi_first_bzero_index > 1 else '') + \ + (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ + (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') + dwi_permvols_posteddy_option = ' -coord 3 1' + \ + (':' + str(dwi_first_bzero_index) if dwi_first_bzero_index > 1 else '') + \ + ',0' + \ + (',' + str(dwi_first_bzero_index+1) if dwi_first_bzero_index < dwi_num_volumes-1 else '') + \ + (':' + str(dwi_num_volumes-1) if dwi_first_bzero_index < dwi_num_volumes-2 else '') + app.debug('mrconvert options for axis permutation:') + app.debug('Pre: ' + str(dwi_permvols_preeddy_option)) + app.debug('Post: ' + str(dwi_permvols_posteddy_option)) + + + + # This may be required when setting up the topup call + se_epi_manual_pe_table_option = '' + if se_epi_manual_pe_scheme: + phaseencoding.save('se_epi_manual_pe_scheme.txt', se_epi_manual_pe_scheme) + se_epi_manual_pe_table_option = ' -import_pe_table se_epi_manual_pe_scheme.txt' + + + # Need gradient table if running dwi2mask after applytopup to derive a brain mask for eddy + run.command('mrinfo dwi.mif -export_grad_mrtrix grad.b') + dwi2mask_algo = CONFIG['Dwi2maskAlgorithm'] + + eddy_in_topup_option = '' + dwi_post_eddy_crop_option = '' + slice_padded = False + dwi_path = 'dwi.mif' + if execute_topup: + + # topup will crash if its input image has a spatial dimension with a non-even size; + # presumably due to a downsampling by a factor of 2 in a multi-resolution scheme + # The newest eddy also requires the output from topup and the input DWIs to have the same size; + # therefore this restriction applies to the DWIs as well + # Rather than crop in this case (which would result in a cropped output image), + # duplicate the last slice on any problematic axis, and then crop that extra + # slice at the output step + # By this point, if the input SE-EPI images and DWIs are not on the same image grid, the + # SE-EPI images have already been re-gridded to DWI image space; + odd_axis_count = 0 + for axis_size in dwi_header.size()[:3]: + if int(axis_size%2): + odd_axis_count += 1 + if odd_axis_count: + app.console(str(odd_axis_count) + ' spatial ' + ('axes of DWIs have' if odd_axis_count > 1 else 'axis of DWIs has') + ' non-even size; ' + 'this will be automatically padded for compatibility with topup, and the extra slice' + ('s' if odd_axis_count > 1 else '') + ' erased afterwards') + for axis, axis_size in enumerate(dwi_header.size()[:3]): + if int(axis_size%2): + new_se_epi_path = os.path.splitext(se_epi_path)[0] + '_pad' + str(axis) + '.mif' + run.command('mrconvert ' + se_epi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' - | mrcat ' + se_epi_path + ' - ' + new_se_epi_path + ' -axis ' + str(axis)) + app.cleanup(se_epi_path) + se_epi_path = new_se_epi_path + new_dwi_path = os.path.splitext(dwi_path)[0] + '_pad' + str(axis) + '.mif' + run.command('mrconvert ' + dwi_path + ' -coord ' + str(axis) + ' ' + str(axis_size-1) + ' -clear dw_scheme - | mrcat ' + dwi_path + ' - ' + new_dwi_path + ' -axis ' + str(axis)) + app.cleanup(dwi_path) + dwi_path = new_dwi_path + dwi_post_eddy_crop_option += ' -coord ' + str(axis) + ' 0:' + str(axis_size-1) + if axis == slice_encoding_axis: + slice_padded = True + dwi_num_slices += 1 + # If we are padding the slice axis, and performing slice-to-volume correction, + # then we need to perform the corresponding padding to the slice timing + if eddy_mporder: + # At this point in the script, this information may be encoded either within + # the slice timing vector (as imported from the image header), or as + # slice groups (i.e. in the format expected by eddy). How these data are + # stored affects how the padding is performed. + if slice_timing: + slice_timing.append(slice_timing[-1]) + elif slice_groups: + # Can't edit in place when looping through the list + new_slice_groups = [ ] + for group in slice_groups: + if axis_size-1 in group: + group.append(axis_size) + new_slice_groups.append(group) + slice_groups = new_slice_groups + + + # Do the conversion in preparation for topup + run.command('mrconvert ' + se_epi_path + ' topup_in.nii' + se_epi_manual_pe_table_option + ' -strides -1,+2,+3,+4 -export_pe_table topup_datain.txt') + app.cleanup(se_epi_path) + + # Run topup + topup_manual_options = '' + if app.ARGS.topup_options: + topup_manual_options = ' ' + app.ARGS.topup_options.strip() + topup_output = run.command(topup_cmd + ' --imain=topup_in.nii --datain=topup_datain.txt --out=field --fout=field_map' + fsl_suffix + ' --config=' + topup_config_path + ' --verbose' + topup_manual_options) + with open('topup_output.txt', 'wb') as topup_output_file: + topup_output_file.write((topup_output.stdout + '\n' + topup_output.stderr + '\n').encode('utf-8', errors='replace')) + if app.VERBOSITY > 1: + app.console('Output of topup command:') + sys.stderr.write(topup_output.stdout + '\n' + topup_output.stderr + '\n') + + if execute_applytopup: + + # Apply the warp field to the input image series to get an initial corrected volume estimate + # applytopup can't receive the complete DWI input and correct it as a whole, because the phase-encoding + # details may vary between volumes + if dwi_manual_pe_scheme: + run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + ' - | mrinfo - -export_pe_eddy applytopup_config.txt applytopup_indices.txt') + else: + run.command('mrinfo ' + dwi_path + ' -export_pe_eddy applytopup_config.txt applytopup_indices.txt') + + # Call applytopup separately for each unique phase-encoding + # This should be the most compatible option with more complex phase-encoding acquisition designs, + # since we don't need to worry about applytopup performing volume recombination + # Plus, recombination doesn't need to be optimal; we're only using this to derive a brain mask + applytopup_image_list = [ ] + index = 1 + applytopup_config = matrix.load_matrix('applytopup_config.txt') + applytopup_indices = matrix.load_vector('applytopup_indices.txt', dtype=int) + applytopup_volumegroups = [ [ index for index, value in enumerate(applytopup_indices) if value == group ] for group in range(1, len(applytopup_config)+1) ] + app.debug('applytopup_config: ' + str(applytopup_config)) + app.debug('applytopup_indices: ' + str(applytopup_indices)) + app.debug('applytopup_volumegroups: ' + str(applytopup_volumegroups)) + for index, group in enumerate(applytopup_volumegroups): + prefix = os.path.splitext(dwi_path)[0] + '_pe_' + str(index) + input_path = prefix + '.nii' + json_path = prefix + '.json' + temp_path = prefix + '_applytopup.nii' + output_path = prefix + '_applytopup.mif' + run.command('mrconvert ' + dwi_path + ' ' + input_path + ' -coord 3 ' + ','.join(str(value) for value in group) + ' -strides -1,+2,+3,+4 -json_export ' + json_path) + run.command(applytopup_cmd + ' --imain=' + input_path + ' --datain=applytopup_config.txt --inindex=' + str(index+1) + ' --topup=field --out=' + temp_path + ' --method=jac') + app.cleanup(input_path) + temp_path = fsl.find_image(temp_path) + run.command('mrconvert ' + temp_path + ' ' + output_path + ' -json_import ' + json_path) + app.cleanup(json_path) + app.cleanup(temp_path) + applytopup_image_list.append(output_path) + index += 1 + + # Use the initial corrected volumes to derive a brain mask for eddy + if not app.ARGS.eddy_mask: + + dwi2mask_out_path = 'dwi2mask_out.mif' + if len(applytopup_image_list) == 1: + dwi2mask_in_path = applytopup_image_list[0] + else: + dwi2mask_in_path = 'dwi2mask_in.mif' + run.command('mrcat ' + ' '.join(applytopup_image_list) + ' ' + dwi2mask_in_path + ' -axis 3') + run.command('dwi2mask ' + dwi2mask_algo + ' ' + dwi2mask_in_path + ' ' + dwi2mask_out_path) + run.command('maskfilter ' + dwi2mask_out_path + ' dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') + if len(applytopup_image_list) > 1: + app.cleanup(dwi2mask_in_path) + app.cleanup(dwi2mask_out_path) + + app.cleanup(applytopup_image_list) + + eddy_in_topup_option = ' --topup=field' + + else: + + # Generate a processing mask for eddy based on the uncorrected input DWIs + if not app.ARGS.eddy_mask: + dwi2mask_out_path = 'dwi2mask_out.mif' + run.command('dwi2mask ' + dwi2mask_algo + ' ' + dwi_path + ' ' + dwi2mask_out_path) + run.command('maskfilter ' + dwi2mask_out_path + ' dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') + app.cleanup(dwi2mask_out_path) + + + # Use user supplied mask for eddy instead of one derived from the images using dwi2mask + if app.ARGS.eddy_mask: + if image.match('eddy_mask.mif', dwi_path, up_to_dim=3): + run.command('mrconvert eddy_mask.mif eddy_mask.nii -datatype float32 -stride -1,+2,+3') + else: + app.warn('User-provided processing mask for eddy does not match DWI voxel grid; resampling') + run.command('mrtransform eddy_mask.mif - -template ' + dwi_path + ' -interp linear | ' + + 'mrthreshold - -abs 0.5 - | ' + + 'mrconvert - eddy_mask.nii -datatype float32 -stride -1,+2,+3') + app.cleanup('eddy_mask.mif') + + # Generate the text file containing slice timing / grouping information if necessary + if eddy_mporder: + if slice_timing: + # This list contains, for each slice, the timing offset between acquisition of the + # first slice in the volume, and acquisition of that slice + # Eddy however requires a text file where each row contains those slices that were + # acquired with a single readout, in ordered rows from first slice (group) + # acquired to last slice (group) acquired + if sum(slice_encoding_direction) < 0: + slice_timing = reversed(slice_timing) + slice_groups = [ [ x[0] for x in g ] for _, g in itertools.groupby(sorted(enumerate(slice_timing), key=lambda x:x[1]), key=lambda x:x[1]) ] #pylint: disable=unused-variable + app.debug('Slice timing: ' + str(slice_timing)) + app.debug('Resulting slice groups: ' + str(slice_groups)) + # Variable slice_groups may have already been defined in the correct format. + # In that instance, there's nothing to do other than write it to file; + # UNLESS the slice encoding direction is known to be reversed, in which case + # we need to reverse the timings. Would think that this would however be + # rare, given it requires that the slspec text file be provided manually but + # SliceEncodingDirection to be present. + elif slice_groups and sum(slice_encoding_direction) < 0: + new_slice_groups = [ ] + for group in new_slice_groups: + new_slice_groups.append([ dwi_num_slices-index for index in group ]) + app.debug('Slice groups reversed due to negative slice encoding direction') + app.debug('Original: ' + str(slice_groups)) + app.debug('New: ' + str(new_slice_groups)) + slice_groups = new_slice_groups + + matrix.save_numeric('slspec.txt', slice_groups, add_to_command_history=False, fmt='%d') + eddy_manual_options.append('--slspec=slspec.txt') + + + # Revert eddy_manual_options from a list back to a single string + eddy_manual_options = (' ' + ' '.join(eddy_manual_options)) if eddy_manual_options else '' + + + # Prepare input data for eddy + run.command('mrconvert ' + dwi_path + import_dwi_pe_table_option + dwi_permvols_preeddy_option + ' eddy_in.nii -strides -1,+2,+3,+4 -export_grad_fsl bvecs bvals -export_pe_eddy eddy_config.txt eddy_indices.txt') + app.cleanup(dwi_path) + + # Run eddy + # If a CUDA version is in PATH, run that first; if it fails, re-try using the non-CUDA version + eddy_all_options = '--imain=eddy_in.nii --mask=eddy_mask.nii --acqp=eddy_config.txt --index=eddy_indices.txt --bvecs=bvecs --bvals=bvals' + eddy_in_topup_option + eddy_manual_options + ' --out=dwi_post_eddy --verbose' + eddy_cuda_cmd = fsl.eddy_binary(True) + eddy_openmp_cmd = fsl.eddy_binary(False) + if eddy_cuda_cmd: + # If running CUDA version, but OpenMP version is also available, don't stop the script if the CUDA version fails + try: + eddy_output = run.command(eddy_cuda_cmd + ' ' + eddy_all_options) + except run.MRtrixCmdError as exception_cuda: + if not eddy_openmp_cmd: + raise + with open('eddy_cuda_failure_output.txt', 'wb') as eddy_output_file: + eddy_output_file.write(str(exception_cuda).encode('utf-8', errors='replace')) + app.console('CUDA version of \'eddy\' was not successful; attempting OpenMP version') + try: + eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) + except run.MRtrixCmdError as exception_openmp: + with open('eddy_openmp_failure_output.txt', 'wb') as eddy_output_file: + eddy_output_file.write(str(exception_openmp).encode('utf-8', errors='replace')) + # Both have failed; want to combine error messages + eddy_cuda_header = ('=' * len(eddy_cuda_cmd)) \ + + '\n' \ + + eddy_cuda_cmd \ + + '\n' \ + + ('=' * len(eddy_cuda_cmd)) \ + + '\n' + eddy_openmp_header = ('=' * len(eddy_openmp_cmd)) \ + + '\n' \ + + eddy_openmp_cmd \ + + '\n' \ + + ('=' * len(eddy_openmp_cmd)) \ + + '\n' + exception_stdout = eddy_cuda_header \ + + exception_cuda.stdout \ + + '\n\n' \ + + eddy_openmp_header \ + + exception_openmp.stdout \ + + '\n\n' + exception_stderr = eddy_cuda_header \ + + exception_cuda.stderr \ + + '\n\n' \ + + eddy_openmp_header \ + + exception_openmp.stderr \ + + '\n\n' + raise run.MRtrixCmdError('eddy* ' + eddy_all_options, + 1, + exception_stdout, + exception_stderr) + + else: + eddy_output = run.command(eddy_openmp_cmd + ' ' + eddy_all_options) + with open('eddy_output.txt', 'wb') as eddy_output_file: + eddy_output_file.write((eddy_output.stdout + '\n' + eddy_output.stderr + '\n').encode('utf-8', errors='replace')) + if app.VERBOSITY > 1: + app.console('Output of eddy command:') + sys.stderr.write(eddy_output.stdout + '\n' + eddy_output.stderr + '\n') + app.cleanup('eddy_in.nii') + + eddy_output_image_path = fsl.find_image('dwi_post_eddy') + + + # Check to see whether or not eddy has provided a rotated bvecs file; + # if it has, import this into the output image + bvecs_path = 'dwi_post_eddy.eddy_rotated_bvecs' + if not os.path.isfile(bvecs_path): + app.warn('eddy has not provided rotated bvecs file; using original gradient table. Recommend updating FSL eddy to version 5.0.9 or later.') + bvecs_path = 'bvecs' + + + # Run eddy qc tool QUAD if installed and one of -eddyqc_text or -eddyqc_all is specified + eddyqc_prefix = 'dwi_post_eddy' + if eddyqc_path: + if shutil.which('eddy_quad'): + + eddyqc_mask = 'eddy_mask.nii' + eddyqc_fieldmap = fsl.find_image('field_map') if execute_topup else None + eddyqc_slspec = 'slspec.txt' if eddy_mporder else None + + # If there was any relevant padding applied, then we want to provide + # the comprehensive set of files to EddyQC with that padding removed + if dwi_post_eddy_crop_option: + progress = app.ProgressBar('Removing image padding prior to running EddyQC', len(eddyqc_files) + 3) + + for eddy_filename in eddyqc_files: + if os.path.isfile('dwi_post_eddy.' + eddy_filename): + if slice_padded and eddy_filename in [ 'eddy_outlier_map', 'eddy_outlier_n_sqr_stdev_map', 'eddy_outlier_n_stdev_map' ]: + with open('dwi_post_eddy.' + eddy_filename, 'r', encoding='utf-8') as f_eddyfile: + eddy_data = f_eddyfile.readlines() + eddy_data_header = eddy_data[0] + eddy_data = eddy_data[1:] + for line in eddy_data: + line = ' '.join(line.strip().split(' ')[:-1]) + with open('dwi_post_eddy_unpad.' + eddy_filename, 'w', encoding='utf-8') as f_eddyfile: + f_eddyfile.write(eddy_data_header + '\n') + f_eddyfile.write('\n'.join(eddy_data) + '\n') + elif eddy_filename.endswith('.nii.gz'): + run.command('mrconvert dwi_post_eddy.' + eddy_filename + ' dwi_post_eddy_unpad.' + eddy_filename + dwi_post_eddy_crop_option) + else: + run.function(os.symlink, 'dwi_post_eddy.' + eddy_filename, 'dwi_post_eddy_unpad.' + eddy_filename) + app.cleanup('dwi_post_eddy.' + eddy_filename) + progress.increment() + + if eddy_mporder and slice_padded: + app.debug('Current slice groups: ' + str(slice_groups)) + app.debug('Slice encoding direction: ' + str(slice_encoding_direction)) + # Remove padded slice from slice_groups, write new slspec + if sum(slice_encoding_direction) < 0: + slice_groups = [ [ index-1 for index in group if index ] for group in slice_groups ] + else: + slice_groups = [ [ index for index in group if index != dwi_num_slices-1 ] for group in slice_groups ] + eddyqc_slspec = 'slspec_unpad.txt' + app.debug('Slice groups after removal: ' + str(slice_groups)) + try: + # After this removal, slspec should now be a square matrix + assert all(len(group) == len(slice_groups[0]) for group in slice_groups[1:]) + matrix.save_matrix(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') + except AssertionError: + matrix.save_numeric(eddyqc_slspec, slice_groups, add_to_command_history=False, fmt='%d') + raise + + run.command('mrconvert eddy_mask.nii eddy_mask_unpad.nii' + dwi_post_eddy_crop_option) + eddyqc_mask = 'eddy_mask_unpad.nii' + progress.increment() + run.command('mrconvert ' + fsl.find_image('field_map') + ' field_map_unpad.nii' + dwi_post_eddy_crop_option) + eddyqc_fieldmap = 'field_map_unpad.nii' + progress.increment() + run.command('mrconvert ' + eddy_output_image_path + ' dwi_post_eddy_unpad.nii.gz' + dwi_post_eddy_crop_option) + eddyqc_prefix = 'dwi_post_eddy_unpad' + progress.done() + + eddyqc_options = ' -idx eddy_indices.txt -par eddy_config.txt -b bvals -m ' + eddyqc_mask + if os.path.isfile(eddyqc_prefix + '.eddy_residuals.nii.gz'): + eddyqc_options += ' -g ' + bvecs_path + if execute_topup: + eddyqc_options += ' -f ' + eddyqc_fieldmap + if eddy_mporder: + eddyqc_options += ' -s ' + eddyqc_slspec + if app.VERBOSITY > 2: + eddyqc_options += ' -v' + try: + run.command('eddy_quad ' + eddyqc_prefix + eddyqc_options) + except run.MRtrixCmdError as exception: + with open('eddy_quad_failure_output.txt', 'wb') as eddy_quad_output_file: + eddy_quad_output_file.write(str(exception).encode('utf-8', errors='replace')) + app.debug(str(exception)) + app.warn('Error running automated EddyQC tool \'eddy_quad\'; QC data written to "' + eddyqc_path + '" will be files from "eddy" only') + # Delete the directory if the script only made it partway through + try: + shutil.rmtree(eddyqc_prefix + '.qc') + except OSError: + pass + else: + app.console('Command \'eddy_quad\' not found in PATH; skipping') + + + # Have to retain these images until after eddyQC is run + # If using -eddyqc_all, also write the mask provided to eddy to the output directory; + # therefore don't delete it yet here + if not app.ARGS.eddyqc_all: + app.cleanup('eddy_mask.nii') + if execute_topup: + app.cleanup(fsl.find_image('field_fieldcoef')) + + + # Get the axis strides from the input series, so the output image can be modified to match + stride_option = ' -strides ' + ','.join([str(i) for i in dwi_header.strides()]) + + + # Determine whether or not volume recombination should be performed + # This could be either due to use of -rpe_all option, or just due to the data provided with -rpe_header + # Rather than trying to re-use the code that was used in the case of -rpe_all, run fresh code + # The phase-encoding scheme needs to be checked also + volume_matchings = [ dwi_num_volumes ] * dwi_num_volumes + volume_pairs = [ ] + app.debug('Commencing gradient direction matching; ' + str(dwi_num_volumes) + ' volumes') + for index1 in range(dwi_num_volumes): + if volume_matchings[index1] == dwi_num_volumes: # As yet unpaired + for index2 in range(index1+1, dwi_num_volumes): + if volume_matchings[index2] == dwi_num_volumes: # Also as yet unpaired + # Here, need to check both gradient matching and reversed phase-encode direction + if not any(dwi_pe_scheme[index1][i] + dwi_pe_scheme[index2][i] for i in range(0,3)) and grads_match(index1, index2): + volume_matchings[index1] = index2 + volume_matchings[index2] = index1 + volume_pairs.append([index1, index2]) + app.debug('Matched volume ' + str(index1) + ' with ' + str(index2) + '\n' + + 'Phase encoding: ' + str(dwi_pe_scheme[index1]) + ' ' + str(dwi_pe_scheme[index2]) + '\n' + + 'Gradients: ' + str(grad[index1]) + ' ' + str(grad[index2])) + break + + + if len(volume_pairs) != int(dwi_num_volumes/2): + + if execute_topup: + app.cleanup('topup_in.nii') + app.cleanup(fsl.find_image('field_map')) + + # Convert the resulting volume to the output image, and re-insert the diffusion encoding + run.command('mrconvert ' + eddy_output_image_path + ' result.mif' + dwi_permvols_posteddy_option + dwi_post_eddy_crop_option + stride_option + ' -fslgrad ' + bvecs_path + ' bvals') + app.cleanup(eddy_output_image_path) + + else: + app.console('Detected matching DWI volumes with opposing phase encoding; performing explicit volume recombination') + + # Perform a manual combination of the volumes output by eddy, since LSR is disabled + + # Generate appropriate bvecs / bvals files + # Particularly if eddy has provided rotated bvecs, since we're combining two volumes into one that + # potentially have subject rotation between them (and therefore the sensitisation direction is + # not precisely equivalent), the best we can do is take the mean of the two vectors. + # Manual recombination of volumes needs to take into account the explicit volume matching + + bvecs = matrix.load_matrix(bvecs_path) + bvecs_combined_transpose = [ ] + bvals_combined = [ ] + + for pair in volume_pairs: + bvec_mean = [ 0.5*(bvecs[0][pair[0]] + bvecs[0][pair[1]]), + 0.5*(bvecs[1][pair[0]] + bvecs[1][pair[1]]), + 0.5*(bvecs[2][pair[0]] + bvecs[2][pair[1]]) ] + norm2 = matrix.dot(bvec_mean, bvec_mean) + + # If one diffusion sensitisation gradient direction is reversed with respect to + # the other, still want to enable their recombination; but need to explicitly + # account for this when averaging the two directions + if norm2 < 0.5: + bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), + 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), + 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] + norm2 = matrix.dot(bvec_mean, bvec_mean) + + # Occasionally a b=0 volume can have a zero vector + if norm2: + factor = 1.0 / math.sqrt(norm2) + new_vec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] + else: + new_vec = [ 0.0, 0.0, 0.0 ] + bvecs_combined_transpose.append(new_vec) + bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) + + bvecs_combined = matrix.transpose(bvecs_combined_transpose) + matrix.save_matrix('bvecs_combined', bvecs_combined, add_to_command_history=False) + matrix.save_vector('bvals_combined', bvals_combined, add_to_command_history=False) + + # Prior to 5.0.8, a bug resulted in the output field map image from topup having an identity transform, + # regardless of the transform of the input image + # Detect this, and manually replace the transform if necessary + # (even if this doesn't cause an issue with the subsequent mrcalc command, it may in the future, it's better for + # visualising the script intermediate files, and it gives the user a warning about an out-of-date FSL) + field_map_image = fsl.find_image('field_map') + field_map_header = image.Header(field_map_image) + if not image.match('topup_in.nii', field_map_header, up_to_dim=3): + app.warn('topup output field image has erroneous header; recommend updating FSL to version 5.0.8 or later') + new_field_map_image = 'field_map_fix.mif' + run.command('mrtransform ' + field_map_image + ' -replace topup_in.nii ' + new_field_map_image) + app.cleanup(field_map_image) + field_map_image = new_field_map_image + # In FSL 6.0.0, field map image is erroneously constructed with the same number of volumes as the input image, + # with all but the first volume containing intensity-scaled duplicates of the uncorrected input images + # The first volume is however the expected field offset image + elif len(field_map_header.size()) == 4: + app.console('Correcting erroneous FSL 6.0.0 field map image output') + new_field_map_image = 'field_map_fix.mif' + run.command('mrconvert ' + field_map_image + ' -coord 3 0 -axes 0,1,2 ' + new_field_map_image) + app.cleanup(field_map_image) + field_map_image = new_field_map_image + app.cleanup('topup_in.nii') + + + # Derive the weight images + # Scaling term for field map is identical to the bandwidth provided in the topup config file + # (converts Hz to pixel count; that way a simple image gradient can be used to get the Jacobians) + # Let mrfilter apply the default 1 voxel size gaussian smoothing filter before calculating the field gradient + # + # The jacobian image may be different for any particular volume pair + # The appropriate PE directions and total readout times can be acquired from the eddy-style config/index files + # eddy_config.txt and eddy_indices.txt + eddy_config = matrix.load_matrix('eddy_config.txt') + eddy_indices = matrix.load_vector('eddy_indices.txt', dtype=int) + app.debug('EDDY config: ' + str(eddy_config)) + app.debug('EDDY indices: ' + str(eddy_indices)) + + # This section derives, for each phase encoding configuration present, the 'weight' to be applied + # to the image during volume recombination, which is based on the Jacobian of the field in the + # phase encoding direction + for index, config in enumerate(eddy_config): + pe_axis = [ i for i, e in enumerate(config[0:3]) if e != 0][0] + sign_multiplier = ' -1.0 -mult' if config[pe_axis] < 0 else '' + field_derivative_path = 'field_deriv_pe_' + str(index+1) + '.mif' + run.command('mrcalc ' + field_map_image + ' ' + str(config[3]) + ' -mult' + sign_multiplier + ' - | mrfilter - gradient - | mrconvert - ' + field_derivative_path + ' -coord 3 ' + str(pe_axis) + ' -axes 0,1,2') + jacobian_path = 'jacobian_' + str(index+1) + '.mif' + run.command('mrcalc 1.0 ' + field_derivative_path + ' -add 0.0 -max ' + jacobian_path) + app.cleanup(field_derivative_path) + run.command('mrcalc ' + jacobian_path + ' ' + jacobian_path + ' -mult weight' + str(index+1) + '.mif') + app.cleanup(jacobian_path) + app.cleanup(field_map_image) + + # If eddy provides its main image output in a compressed format, the code block below will need to + # uncompress that image independently for every volume pair. Instead, if this is the case, let's + # convert it to an uncompressed format before we do anything with it. + if eddy_output_image_path.endswith('.gz'): + new_eddy_output_image_path = 'dwi_post_eddy_uncompressed.mif' + run.command('mrconvert ' + eddy_output_image_path + ' ' + new_eddy_output_image_path) + app.cleanup(eddy_output_image_path) + eddy_output_image_path = new_eddy_output_image_path + + # If the DWI volumes were permuted prior to running eddy, then the simplest approach is to permute them + # back to their original positions; otherwise, the stored gradient vector directions / phase encode + # directions / matched volume pairs are no longer appropriate + if dwi_permvols_posteddy_option: + new_eddy_output_image_path = os.path.splitext(eddy_output_image_path)[0] + '_volpermuteundo.mif' + run.command('mrconvert ' + eddy_output_image_path + dwi_permvols_posteddy_option + ' ' + new_eddy_output_image_path) + app.cleanup(eddy_output_image_path) + eddy_output_image_path = new_eddy_output_image_path + + # This section extracts the two volumes corresponding to each reversed phase-encoded volume pair, and + # derives a single image volume based on the recombination equation + combined_image_list = [ ] + progress = app.ProgressBar('Performing explicit volume recombination', len(volume_pairs)) + for index, volumes in enumerate(volume_pairs): + pe_indices = [ eddy_indices[i] for i in volumes ] + run.command('mrconvert ' + eddy_output_image_path + ' volume0.mif -coord 3 ' + str(volumes[0])) + run.command('mrconvert ' + eddy_output_image_path + ' volume1.mif -coord 3 ' + str(volumes[1])) + # Volume recombination equation described in Skare and Bammer 2010 + combined_image_path = 'combined' + str(index) + '.mif' + run.command('mrcalc volume0.mif weight' + str(pe_indices[0]) + '.mif -mult volume1.mif weight' + str(pe_indices[1]) + '.mif -mult -add weight' + str(pe_indices[0]) + '.mif weight' + str(pe_indices[1]) + '.mif -add -divide 0.0 -max ' + combined_image_path) + combined_image_list.append(combined_image_path) + run.function(os.remove, 'volume0.mif') + run.function(os.remove, 'volume1.mif') + progress.increment() + progress.done() + + app.cleanup(eddy_output_image_path) + for index in range(0, len(eddy_config)): + app.cleanup('weight' + str(index+1) + '.mif') + + # Finally the recombined volumes must be concatenated to produce the resulting image series + combine_command = ['mrcat', combined_image_list, '-', '-axis', '3', '|', \ + 'mrconvert', '-', 'result.mif', '-fslgrad', 'bvecs_combined', 'bvals_combined'] + if dwi_post_eddy_crop_option: + combine_command.extend(dwi_post_eddy_crop_option.strip().split(' ')) + combine_command.extend(stride_option.strip().split(' ')) + run.command(combine_command) + app.cleanup(combined_image_list) + + + # Grab any relevant files that eddy has created, and copy them to the requested directory + if eddyqc_path: + if app.FORCE_OVERWRITE and os.path.exists(eddyqc_path) and not os.path.isdir(eddyqc_path): + run.function(os.remove, eddyqc_path) + if not os.path.exists(eddyqc_path): + run.function(os.makedirs, eddyqc_path) + for filename in eddyqc_files: + if os.path.exists(eddyqc_prefix + '.' + filename): + # If this is an image, and axis padding was applied, want to undo the padding + if filename.endswith('.nii.gz') and dwi_post_eddy_crop_option: + run.command('mrconvert ' + eddyqc_prefix + '.' + filename + ' ' + shlex.quote(os.path.join(eddyqc_path, filename)) + dwi_post_eddy_crop_option, force=app.FORCE_OVERWRITE) + else: + run.function(shutil.copy, eddyqc_prefix + '.' + filename, os.path.join(eddyqc_path, filename)) + # Also grab any files generated by the eddy qc tool QUAD + if os.path.isdir(eddyqc_prefix + '.qc'): + if app.FORCE_OVERWRITE and os.path.exists(os.path.join(eddyqc_path, 'quad')): + run.function(shutil.rmtree, os.path.join(eddyqc_path, 'quad')) + run.function(shutil.copytree, eddyqc_prefix + '.qc', os.path.join(eddyqc_path, 'quad')) + # Also grab the brain mask that was provided to eddy if -eddyqc_all was specified + if app.ARGS.eddyqc_all: + if dwi_post_eddy_crop_option: + run.command('mrconvert eddy_mask.nii ' + shlex.quote(os.path.join(eddyqc_path, 'eddy_mask.nii')) + dwi_post_eddy_crop_option, force=app.FORCE_OVERWRITE) + else: + run.function(shutil.copy, 'eddy_mask.nii', os.path.join(eddyqc_path, 'eddy_mask.nii')) + app.cleanup('eddy_mask.nii') + + + + + keys_to_remove = [ 'MultibandAccelerationFactor', 'SliceEncodingDirection', 'SliceTiming' ] + # These keys are still relevant for the output data if no EPI distortion correction was performed + if execute_applytopup: + keys_to_remove.extend([ 'PhaseEncodingDirection', 'TotalReadoutTime', 'pe_scheme' ]) + # Get the header key-value entries from the input DWI, remove those we don't wish to keep, and + # export the result to a new JSON file so that they can be inserted into the output header + with open('dwi.json', 'r', encoding='utf-8') as input_json_file: + keyval = json.load(input_json_file) + for key in keys_to_remove: + keyval.pop(key, None) + # Make sure to use the revised diffusion gradient table rather than that of the input; + # incorporates motion correction, and possibly also the explicit volume recombination + keyval['dw_scheme'] = image.Header('result.mif').keyval()['dw_scheme'] + # 'Stash' the phase encoding scheme of the original uncorrected DWIs, since it still + # may be useful information at some point in the future but is no longer relevant + # for e.g. tracking for different volumes, or performing any geometric corrections + if execute_applytopup: + keyval['prior_pe_scheme'] = dwi_manual_pe_scheme if dwi_manual_pe_scheme else dwi_pe_scheme + with open('output.json', 'w', encoding='utf-8') as output_json_file: + json.dump(keyval, output_json_file) + + + # Finish! + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output) + grad_export_option, mrconvert_keyval='output.json', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwifslpreproc/usage.py b/python/mrtrix3/dwifslpreproc/usage.py new file mode 100644 index 0000000000..0eecd1e10d --- /dev/null +++ b/python/mrtrix3/dwifslpreproc/usage.py @@ -0,0 +1,80 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, _version #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform diffusion image pre-processing using FSL\'s eddy tool; including inhomogeneity distortion correction using FSL\'s topup tool if possible') + cmdline.add_description('This script is intended to provide convenience of use of the FSL software tools topup and eddy for performing DWI pre-processing, by encapsulating some of the surrounding image data and metadata processing steps. It is intended to simply these processing steps for most commonly-used DWI acquisition strategies, whilst also providing support for some more exotic acquisitions. The "example usage" section demonstrates the ways in which the script can be used based on the (compulsory) -rpe_* command-line options.') + cmdline.add_description('More information on use of the dwifslpreproc command can be found at the following link: \nhttps://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/dwifslpreproc.html') + cmdline.add_description('Note that the MRtrix3 command dwi2mask will automatically be called to derive a processing mask for the FSL command "eddy", which determines which voxels contribute to the estimation of geometric distortion parameters and possibly also the classification of outlier slices. If FSL command "topup" is used to estimate a susceptibility field, then dwi2mask will be executed on the resuts of running FSL command "applytopup" to the input DWIs; otherwise it will be executed directly on the input DWIs. Alternatively, the -eddy_mask option can be specified in order to manually provide such a processing mask. More information on mask derivation from DWI data can be found at: https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') + cmdline.add_description('The "-topup_options" and "-eddy_options" command-line options allow the user to pass desired command-line options directly to the FSL commands topup and eddy. The available options for those commands may vary between versions of FSL; users can interrogate such by querying the help pages of the installed software, and/or the FSL online documentation: (topup) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide ; (eddy) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide') + cmdline.add_description('The script will attempt to run the CUDA version of eddy; if this does not succeed for any reason, or is not present on the system, the CPU version will be attempted instead. By default, the CUDA eddy binary found that indicates compilation against the most recent version of CUDA will be attempted; this can be over-ridden by providing a soft-link "eddy_cuda" within your path that links to the binary you wish to be executed.') + cmdline.add_description('Note that this script does not perform any explicit registration between images provided to topup via the -se_epi option, and the DWI volumes provided to eddy. In some instances (motion between acquisitions) this can result in erroneous application of the inhomogeneity field during distortion correction. Use of the -align_seepi option is advocated in this scenario, which ensures that the first volume in the series provided to topup is also the first volume in the series provided to eddy, guaranteeing alignment. But a prerequisite for this approach is that the image contrast within the images provided to the -se_epi option must match the b=0 volumes present within the input DWI series: this means equivalent TE, TR and flip angle (note that differences in multi-band factors between two acquisitions may lead to differences in TR).') + cmdline.add_example_usage('A basic DWI acquisition, where all image volumes are acquired in a single protocol with fixed phase encoding', + 'dwifslpreproc DWI_in.mif DWI_out.mif -rpe_none -pe_dir ap -readout_time 0.55', + 'Due to use of a single fixed phase encoding, no EPI distortion correction can be applied in this case.') + cmdline.add_example_usage('DWIs all acquired with a single fixed phase encoding; but additionally a pair of b=0 images with reversed phase encoding to estimate the inhomogeneity field', + 'mrcat b0_ap.mif b0_pa.mif b0_pair.mif -axis 3; dwifslpreproc DWI_in.mif DWI_out.mif -rpe_pair -se_epi b0_pair.mif -pe_dir ap -readout_time 0.72 -align_seepi', + 'Here the two individual b=0 volumes are concatenated into a single 4D image series, and this is provided to the script via the -se_epi option. Note that with the -rpe_pair option used here, which indicates that the SE-EPI image series contains one or more pairs of b=0 images with reversed phase encoding, the FIRST HALF of the volumes in the SE-EPI series must possess the same phase encoding as the input DWI series, while the second half are assumed to contain the opposite phase encoding direction but identical total readout time. Use of the -align_seepi option is advocated as long as its use is valid (more information in the Description section).') + cmdline.add_example_usage('All DWI directions & b-values are acquired twice, with the phase encoding direction of the second acquisition protocol being reversed with respect to the first', + 'mrcat DWI_lr.mif DWI_rl.mif DWI_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_all -pe_dir lr -readout_time 0.66', + 'Here the two acquisition protocols are concatenated into a single DWI series containing all acquired volumes. The direction indicated via the -pe_dir option should be the direction of phase encoding used in acquisition of the FIRST HALF of volumes in the input DWI series; ie. the first of the two files that was provided to the mrcat command. In this usage scenario, the output DWI series will contain the same number of image volumes as ONE of the acquired DWI series (ie. half of the number in the concatenated series); this is because the script will identify pairs of volumes that possess the same diffusion sensitisation but reversed phase encoding, and perform explicit recombination of those volume pairs in such a way that image contrast in regions of inhomogeneity is determined from the stretched rather than the compressed image.') + cmdline.add_example_usage('Any acquisition scheme that does not fall into one of the example usages above', + 'mrcat DWI_*.mif DWI_all.mif -axis 3; mrcat b0_*.mif b0_all.mif -axis 3; dwifslpreproc DWI_all.mif DWI_out.mif -rpe_header -se_epi b0_all.mif -align_seepi', + 'With this usage, the relevant phase encoding information is determined entirely based on the contents of the relevant image headers, and dwifslpreproc prepares all metadata for the executed FSL commands accordingly. This can therefore be used if the particular DWI acquisition strategy used does not correspond to one of the simple examples as described in the prior examples. This usage is predicated on the headers of the input files containing appropriately-named key-value fields such that MRtrix3 tools identify them as such. In some cases, conversion from DICOM using MRtrix3 commands will automatically extract and embed this information; however this is not true for all scanner vendors and/or software versions. In the latter case it may be possible to manually provide these metadata; either using the -json_import command-line option of dwifslpreproc, or the -json_import or one of the -import_pe_* command-line options of MRtrix3\'s mrconvert command (and saving in .mif format) prior to running dwifslpreproc.') + cmdline.add_citation('Andersson, J. L. & Sotiropoulos, S. N. An integrated approach to correction for off-resonance effects and subject movement in diffusion MR imaging. NeuroImage, 2015, 125, 1063-1078', is_external=True) + cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + cmdline.add_citation('Skare, S. & Bammer, R. Jacobian weighting of distortion corrected EPI data. Proceedings of the International Society for Magnetic Resonance in Medicine, 2010, 5063', condition='If performing recombination of diffusion-weighted volume pairs with opposing phase encoding directions', is_external=True) + cmdline.add_citation('Andersson, J. L.; Skare, S. & Ashburner, J. How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging. NeuroImage, 2003, 20, 870-888', condition='If performing EPI susceptibility distortion correction', is_external=True) + cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Zsoldos, E. & Sotiropoulos, S. N. Incorporating outlier detection and replacement into a non-parametric framework for movement and distortion correction of diffusion MR images. NeuroImage, 2016, 141, 556-572', condition='If including "--repol" in -eddy_options input', is_external=True) + cmdline.add_citation('Andersson, J. L. R.; Graham, M. S.; Drobnjak, I.; Zhang, H.; Filippini, N. & Bastiani, M. Towards a comprehensive framework for movement and distortion correction of diffusion MR images: Within volume movement. NeuroImage, 2017, 152, 450-466', condition='If including "--mporder" in -eddy_options input', is_external=True) + cmdline.add_citation('Bastiani, M.; Cottaar, M.; Fitzgibbon, S.P.; Suri, S.; Alfaro-Almagro, F.; Sotiropoulos, S.N.; Jbabdi, S.; Andersson, J.L.R. Automated quality control for within and between studies diffusion MRI data using a non-parametric framework for movement and distortion correction. NeuroImage, 2019, 184, 801-812', condition='If using -eddyqc_text or -eddyqc_all option and eddy_quad is installed', is_external=True) + cmdline.add_argument('input', help='The input DWI series to be corrected') + cmdline.add_argument('output', help='The output corrected image series') + cmdline.add_argument('-json_import', metavar=('file'), help='Import image header information from an associated JSON file (may be necessary to determine phase encoding information)') + pe_options = cmdline.add_argument_group('Options for manually specifying the phase encoding of the input DWIs') + pe_options.add_argument('-pe_dir', metavar=('PE'), help='Manually specify the phase encoding direction of the input series; can be a signed axis number (e.g. -0, 1, +2), an axis designator (e.g. RL, PA, IS), or NIfTI axis codes (e.g. i-, j, k)') + pe_options.add_argument('-readout_time', metavar=('time'), type=float, help='Manually specify the total readout time of the input series (in seconds)') + distcorr_options = cmdline.add_argument_group('Options for achieving correction of susceptibility distortions') + distcorr_options.add_argument('-se_epi', metavar=('image'), help='Provide an additional image series consisting of spin-echo EPI images, which is to be used exclusively by topup for estimating the inhomogeneity field (i.e. it will not form part of the output image series)') + distcorr_options.add_argument('-align_seepi', action='store_true', help='Achieve alignment between the SE-EPI images used for inhomogeneity field estimation, and the DWIs (more information in Description section)') + distcorr_options.add_argument('-topup_options', metavar=('" TopupOptions"'), help='Manually provide additional command-line options to the topup command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to topup)') + distcorr_options.add_argument('-topup_files', metavar=('prefix'), help='Provide files generated by prior execution of the FSL "topup" command to be utilised by eddy') + cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'se_epi' ], False ) + cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'align_seepi' ], False ) + cmdline.flag_mutually_exclusive_options( [ 'topup_files', 'topup_options' ], False ) + eddy_options = cmdline.add_argument_group('Options for affecting the operation of the FSL "eddy" command') + eddy_options.add_argument('-eddy_mask', metavar=('image'), help='Provide a processing mask to use for eddy, instead of having dwifslpreproc generate one internally using dwi2mask') + eddy_options.add_argument('-eddy_slspec', metavar=('file'), help='Provide a file containing slice groupings for eddy\'s slice-to-volume registration') + eddy_options.add_argument('-eddy_options', metavar=('" EddyOptions"'), help='Manually provide additional command-line options to the eddy command (provide a string within quotation marks that contains at least one space, even if only passing a single command-line option to eddy)') + eddyqc_options = cmdline.add_argument_group('Options for utilising EddyQC') + eddyqc_options.add_argument('-eddyqc_text', metavar=('directory'), help='Copy the various text-based statistical outputs generated by eddy, and the output of eddy_qc (if installed), into an output directory') + eddyqc_options.add_argument('-eddyqc_all', metavar=('directory'), help='Copy ALL outputs generated by eddy (including images), and the output of eddy_qc (if installed), into an output directory') + cmdline.flag_mutually_exclusive_options( [ 'eddyqc_text', 'eddyqc_all' ], False ) + app.add_dwgrad_export_options(cmdline) + app.add_dwgrad_import_options(cmdline) + rpe_options = cmdline.add_argument_group('Options for specifying the acquisition phase-encoding design; note that one of the -rpe_* options MUST be provided') + rpe_options.add_argument('-rpe_none', action='store_true', help='Specify that no reversed phase-encoding image data is being provided; eddy will perform eddy current and motion correction only') + rpe_options.add_argument('-rpe_pair', action='store_true', help='Specify that a set of images (typically b=0 volumes) will be provided for use in inhomogeneity field estimation only (using the -se_epi option)') + rpe_options.add_argument('-rpe_all', action='store_true', help='Specify that ALL DWIs have been acquired with opposing phase-encoding') + rpe_options.add_argument('-rpe_header', action='store_true', help='Specify that the phase-encoding information can be found in the image header(s), and that this is the information that the script should use') + cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'rpe_pair', 'rpe_all', 'rpe_header' ], True ) + cmdline.flag_mutually_exclusive_options( [ 'rpe_none', 'se_epi' ], False ) # May still technically provide -se_epi even with -rpe_all + cmdline.flag_mutually_exclusive_options( [ 'rpe_pair', 'topup_files'] ) # Would involve two separate sources of inhomogeneity field information + cmdline.flag_mutually_exclusive_options( [ 'se_epi', 'topup_files'] ) # Would involve two separate sources of inhomogeneity field information + cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'pe_dir' ], False ) # Can't manually provide phase-encoding direction if expecting it to be in the header + cmdline.flag_mutually_exclusive_options( [ 'rpe_header', 'readout_time' ], False ) # Can't manually provide readout time if expecting it to be in the header diff --git a/python/mrtrix3/dwigradcheck/__init__.py b/python/mrtrix3/dwigradcheck/__init__.py index 0deacf64e6..e69de29bb2 100644 --- a/python/mrtrix3/dwigradcheck/__init__.py +++ b/python/mrtrix3/dwigradcheck/__init__.py @@ -1,204 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import copy, numbers, os, shutil, sys - - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app, _version #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Check the orientation of the diffusion gradient table') - cmdline.add_description('Note that the corrected gradient table can be output using the -export_grad_{mrtrix,fsl} option.') - cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' - 'derive a binary mask image to be used for streamline seeding and to constrain streamline propagation. ' - 'More information on mask derivation from DWI data can be found at the following link: \n' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') - cmdline.add_citation('Jeurissen, B.; Leemans, A.; Sijbers, J. Automated correction of improperly rotated diffusion gradient orientations in diffusion weighted MRI. Medical Image Analysis, 2014, 18(7), 953-962') - cmdline.add_argument('input', help='The input DWI series to be checked') - cmdline.add_argument('-mask', metavar='image', help='Provide a mask image within which to seed & constrain tracking') - cmdline.add_argument('-number', type=int, default=10000, help='Set the number of tracks to generate for each test') - - app.add_dwgrad_export_options(cmdline) - app.add_dwgrad_import_options(cmdline) - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - image_dimensions = image.Header(path.from_user(app.ARGS.input, False)).size() - if len(image_dimensions) != 4: - raise MRtrixError('Input image must be a 4D image') - if min(image_dimensions) == 1: - raise MRtrixError('Cannot perform tractography on an image with a unity dimension') - num_volumes = image_dimensions[3] - - app.make_scratch_dir() - - # Make sure the image data can be memory-mapped - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('data.mif') + ' -strides 0,0,0,1 -datatype float32') - - if app.ARGS.grad: - shutil.copy(path.from_user(app.ARGS.grad, False), path.to_scratch('grad.b', False)) - elif app.ARGS.fslgrad: - shutil.copy(path.from_user(app.ARGS.fslgrad[0], False), path.to_scratch('bvecs', False)) - shutil.copy(path.from_user(app.ARGS.fslgrad[1], False), path.to_scratch('bvals', False)) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - - app.goto_scratch_dir() - - # Make sure we have gradient table stored externally to header in both MRtrix and FSL formats - if not os.path.isfile('grad.b'): - if os.path.isfile('bvecs'): - run.command('mrinfo data.mif -fslgrad bvecs bvals -export_grad_mrtrix grad.b') - else: - run.command('mrinfo data.mif -export_grad_mrtrix grad.b') - - if not os.path.isfile('bvecs'): - if os.path.isfile('grad.b'): - run.command('mrinfo data.mif -grad grad.b -export_grad_fsl bvecs bvals') - else: - run.command('mrinfo data.mif -export_grad_fsl bvecs bvals') - - # Import both of these into local memory - grad_mrtrix = matrix.load_matrix('grad.b') - grad_fsl = matrix.load_matrix('bvecs') - # Is our gradient table of the correct length? - if not len(grad_mrtrix) == num_volumes: - raise MRtrixError('Number of entries in gradient table does not match number of DWI volumes') - if not len(grad_fsl) == 3 or not len(grad_fsl[0]) == num_volumes: - raise MRtrixError('Internal error (inconsistent gradient table storage)') - - - # Generate a brain mask if we weren't provided with one - # Note that gradient table must be explicitly loaded, since there may not - # be one in the image header (user may be relying on -grad or -fslgrad input options) - if not os.path.exists('mask.mif'): - run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' data.mif mask.mif -grad grad.b') - - # How many tracks are we going to generate? - number_option = ' -select ' + str(app.ARGS.number) - - - # What variations of gradient errors can we conceive? - - # Done: - # * Has an axis been flipped? (none, 0, 1, 2) - # * Have axes been swapped? (012 021 102 120 201 210) - # * For both flips & swaps, it could occur in either scanner or image space... - - # To do: - # * Have the gradients been defined with respect to image space rather than scanner space? - # * After conversion to gradients in image space, are they _then_ defined with respect to scanner space? - # (should the above two be tested independently from the axis flips / permutations?) - - - axis_flips = [ 'none', 0, 1, 2 ] - axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ] - grad_basis = [ 'scanner', 'image' ] - total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis) - - - # List where the first element is the mean length - lengths = [ ] - - progress = app.ProgressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests) - - for flip in axis_flips: - for permutation in axis_permutations: - for basis in grad_basis: - - suffix = '_flip' + str(flip) + '_perm' + ''.join(str(item) for item in permutation) + '_' + basis - - if basis == 'scanner': - - grad = copy.copy(grad_mrtrix) - - # Don't do anything if there aren't any axis flips occurring (flip == 'none') - if isinstance(flip, numbers.Number): - multiplier = [ 1.0, 1.0, 1.0, 1.0 ] - multiplier[flip] = -1.0 - grad = [ [ r*m for r,m in zip(row, multiplier) ] for row in grad ] - - grad = [ [ row[permutation[0]], row[permutation[1]], row[permutation[2]], row[3] ] for row in grad ] - - # Create the gradient table file - grad_path = 'grad' + suffix + '.b' - with open(grad_path, 'w', encoding='utf-8') as grad_file: - for line in grad: - grad_file.write (','.join([str(v) for v in line]) + '\n') - - grad_option = ' -grad ' + grad_path - - elif basis == 'image': - - grad = copy.copy(grad_fsl) - - if isinstance(flip, numbers.Number): - grad[flip] = [ -v for v in grad[flip] ] - - grad = [ grad[permutation[0]], grad[permutation[1]], grad[permutation[2]] ] - - grad_path = 'bvecs' + suffix - with open(grad_path, 'w', encoding='utf-8') as bvecs_file: - for line in grad: - bvecs_file.write (' '.join([str(v) for v in line]) + '\n') - - grad_option = ' -fslgrad ' + grad_path + ' bvals' - - # Run the tracking experiment - run.command('tckgen -algorithm tensor_det data.mif' + grad_option + ' -seed_image mask.mif -mask mask.mif' + number_option + ' -minlength 0 -downsample 5 tracks' + suffix + '.tck') - - # Get the mean track length - meanlength=float(run.command('tckstats tracks' + suffix + '.tck -output mean -ignorezero').stdout) - - # Add to the database - lengths.append([meanlength,flip,permutation,basis]) - - # Increament the progress bar - progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')') - - progress.done() - - # Sort the list to find the best gradient configuration(s) - lengths.sort() - lengths.reverse() - - - # Provide a printout of the mean streamline length of each gradient table manipulation - sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n') - for line in lengths: - if isinstance(line[1], numbers.Number): - flip_str = "{:4d}".format(line[1]) - else: - flip_str = line[1] - sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n') - - - # If requested, extract what has been detected as the best gradient table, and - # export it in the format requested by the user - grad_export_option = app.read_dwgrad_export_options() - if grad_export_option: - best = lengths[0] - suffix = '_flip' + str(best[1]) + '_perm' + ''.join(str(item) for item in best[2]) + '_' + best[3] - if best[3] == 'scanner': - grad_import_option = ' -grad grad' + suffix + '.b' - elif best[3] == 'image': - grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' - run.command('mrinfo data.mif' + grad_import_option + grad_export_option, force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwigradcheck/execute.py b/python/mrtrix3/dwigradcheck/execute.py new file mode 100644 index 0000000000..17db3b6350 --- /dev/null +++ b/python/mrtrix3/dwigradcheck/execute.py @@ -0,0 +1,182 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import copy, numbers, os, shutil, sys +from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + image_dimensions = image.Header(path.from_user(app.ARGS.input, False)).size() + if len(image_dimensions) != 4: + raise MRtrixError('Input image must be a 4D image') + if min(image_dimensions) == 1: + raise MRtrixError('Cannot perform tractography on an image with a unity dimension') + num_volumes = image_dimensions[3] + + app.make_scratch_dir() + + # Make sure the image data can be memory-mapped + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('data.mif') + ' -strides 0,0,0,1 -datatype float32') + + if app.ARGS.grad: + shutil.copy(path.from_user(app.ARGS.grad, False), path.to_scratch('grad.b', False)) + elif app.ARGS.fslgrad: + shutil.copy(path.from_user(app.ARGS.fslgrad[0], False), path.to_scratch('bvecs', False)) + shutil.copy(path.from_user(app.ARGS.fslgrad[1], False), path.to_scratch('bvals', False)) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + + app.goto_scratch_dir() + + # Make sure we have gradient table stored externally to header in both MRtrix and FSL formats + if not os.path.isfile('grad.b'): + if os.path.isfile('bvecs'): + run.command('mrinfo data.mif -fslgrad bvecs bvals -export_grad_mrtrix grad.b') + else: + run.command('mrinfo data.mif -export_grad_mrtrix grad.b') + + if not os.path.isfile('bvecs'): + if os.path.isfile('grad.b'): + run.command('mrinfo data.mif -grad grad.b -export_grad_fsl bvecs bvals') + else: + run.command('mrinfo data.mif -export_grad_fsl bvecs bvals') + + # Import both of these into local memory + grad_mrtrix = matrix.load_matrix('grad.b') + grad_fsl = matrix.load_matrix('bvecs') + # Is our gradient table of the correct length? + if not len(grad_mrtrix) == num_volumes: + raise MRtrixError('Number of entries in gradient table does not match number of DWI volumes') + if not len(grad_fsl) == 3 or not len(grad_fsl[0]) == num_volumes: + raise MRtrixError('Internal error (inconsistent gradient table storage)') + + + # Generate a brain mask if we weren't provided with one + # Note that gradient table must be explicitly loaded, since there may not + # be one in the image header (user may be relying on -grad or -fslgrad input options) + if not os.path.exists('mask.mif'): + run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' data.mif mask.mif -grad grad.b') + + # How many tracks are we going to generate? + number_option = ' -select ' + str(app.ARGS.number) + + + # What variations of gradient errors can we conceive? + + # Done: + # * Has an axis been flipped? (none, 0, 1, 2) + # * Have axes been swapped? (012 021 102 120 201 210) + # * For both flips & swaps, it could occur in either scanner or image space... + + # To do: + # * Have the gradients been defined with respect to image space rather than scanner space? + # * After conversion to gradients in image space, are they _then_ defined with respect to scanner space? + # (should the above two be tested independently from the axis flips / permutations?) + + + axis_flips = [ 'none', 0, 1, 2 ] + axis_permutations = [ ( 0, 1, 2 ), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0) ] + grad_basis = [ 'scanner', 'image' ] + total_tests = len(axis_flips) * len(axis_permutations) * len(grad_basis) + + + # List where the first element is the mean length + lengths = [ ] + + progress = app.ProgressBar('Testing gradient table alterations (0 of ' + str(total_tests) + ')', total_tests) + + for flip in axis_flips: + for permutation in axis_permutations: + for basis in grad_basis: + + suffix = '_flip' + str(flip) + '_perm' + ''.join(str(item) for item in permutation) + '_' + basis + + if basis == 'scanner': + + grad = copy.copy(grad_mrtrix) + + # Don't do anything if there aren't any axis flips occurring (flip == 'none') + if isinstance(flip, numbers.Number): + multiplier = [ 1.0, 1.0, 1.0, 1.0 ] + multiplier[flip] = -1.0 + grad = [ [ r*m for r,m in zip(row, multiplier) ] for row in grad ] + + grad = [ [ row[permutation[0]], row[permutation[1]], row[permutation[2]], row[3] ] for row in grad ] + + # Create the gradient table file + grad_path = 'grad' + suffix + '.b' + with open(grad_path, 'w', encoding='utf-8') as grad_file: + for line in grad: + grad_file.write (','.join([str(v) for v in line]) + '\n') + + grad_option = ' -grad ' + grad_path + + elif basis == 'image': + + grad = copy.copy(grad_fsl) + + if isinstance(flip, numbers.Number): + grad[flip] = [ -v for v in grad[flip] ] + + grad = [ grad[permutation[0]], grad[permutation[1]], grad[permutation[2]] ] + + grad_path = 'bvecs' + suffix + with open(grad_path, 'w', encoding='utf-8') as bvecs_file: + for line in grad: + bvecs_file.write (' '.join([str(v) for v in line]) + '\n') + + grad_option = ' -fslgrad ' + grad_path + ' bvals' + + # Run the tracking experiment + run.command('tckgen -algorithm tensor_det data.mif' + grad_option + ' -seed_image mask.mif -mask mask.mif' + number_option + ' -minlength 0 -downsample 5 tracks' + suffix + '.tck') + + # Get the mean track length + meanlength=float(run.command('tckstats tracks' + suffix + '.tck -output mean -ignorezero').stdout) + + # Add to the database + lengths.append([meanlength,flip,permutation,basis]) + + # Increament the progress bar + progress.increment('Testing gradient table alterations (' + str(len(lengths)) + ' of ' + str(total_tests) + ')') + + progress.done() + + # Sort the list to find the best gradient configuration(s) + lengths.sort() + lengths.reverse() + + + # Provide a printout of the mean streamline length of each gradient table manipulation + sys.stderr.write('Mean length Axis flipped Axis permutations Axis basis\n') + for line in lengths: + if isinstance(line[1], numbers.Number): + flip_str = "{:4d}".format(line[1]) + else: + flip_str = line[1] + sys.stderr.write("{:5.2f}".format(line[0]) + ' ' + flip_str + ' ' + str(line[2]) + ' ' + line[3] + '\n') + + + # If requested, extract what has been detected as the best gradient table, and + # export it in the format requested by the user + grad_export_option = app.read_dwgrad_export_options() + if grad_export_option: + best = lengths[0] + suffix = '_flip' + str(best[1]) + '_perm' + ''.join(str(item) for item in best[2]) + '_' + best[3] + if best[3] == 'scanner': + grad_import_option = ' -grad grad' + suffix + '.b' + elif best[3] == 'image': + grad_import_option = ' -fslgrad bvecs' + suffix + ' bvals' + run.command('mrinfo data.mif' + grad_import_option + grad_export_option, force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwigradcheck/usage.py b/python/mrtrix3/dwigradcheck/usage.py new file mode 100644 index 0000000000..33cbbfe095 --- /dev/null +++ b/python/mrtrix3/dwigradcheck/usage.py @@ -0,0 +1,32 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app, _version #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Check the orientation of the diffusion gradient table') + cmdline.add_description('Note that the corrected gradient table can be output using the -export_grad_{mrtrix,fsl} option.') + cmdline.add_description('Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to ' + 'derive a binary mask image to be used for streamline seeding and to constrain streamline propagation. ' + 'More information on mask derivation from DWI data can be found at the following link: \n' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/dwi_preprocessing/masking.html') + cmdline.add_citation('Jeurissen, B.; Leemans, A.; Sijbers, J. Automated correction of improperly rotated diffusion gradient orientations in diffusion weighted MRI. Medical Image Analysis, 2014, 18(7), 953-962') + cmdline.add_argument('input', help='The input DWI series to be checked') + cmdline.add_argument('-mask', metavar='image', help='Provide a mask image within which to seed & constrain tracking') + cmdline.add_argument('-number', type=int, default=10000, help='Set the number of tracks to generate for each test') + + app.add_dwgrad_export_options(cmdline) + app.add_dwgrad_import_options(cmdline) diff --git a/python/mrtrix3/dwinormalise/__init__.py b/python/mrtrix3/dwinormalise/__init__.py index 0225cdde37..e69de29bb2 100644 --- a/python/mrtrix3/dwinormalise/__init__.py +++ b/python/mrtrix3/dwinormalise/__init__.py @@ -1,38 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import algorithm #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Perform various forms of intensity normalisation of DWIs') - cmdline.add_description('This script provides access to different techniques for globally scaling the intensity of diffusion-weighted images. ' - 'The different algorithms have different purposes, and different requirements with respect to the data with which they must be provided & will produce as output. ' - 'Further information on the individual algorithms available can be accessed via their individual help pages; eg. "dwinormalise group -help".') - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module, import-outside-toplevel - - # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - alg.check_output_paths() - - # From here, the script splits depending on what algorithm is being used - alg.execute() diff --git a/python/mrtrix3/dwinormalise/execute.py b/python/mrtrix3/dwinormalise/execute.py new file mode 100644 index 0000000000..7a7326144f --- /dev/null +++ b/python/mrtrix3/dwinormalise/execute.py @@ -0,0 +1,25 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + # Find out which algorithm the user has requested + alg = algorithm.get(app.ARGS.algorithm) + alg.check_output_paths() + + # From here, the script splits depending on what algorithm is being used + alg.execute() diff --git a/python/mrtrix3/dwinormalise/group/__init__.py b/python/mrtrix3/dwinormalise/group/__init__.py index 77260dfbc2..e69de29bb2 100644 --- a/python/mrtrix3/dwinormalise/group/__init__.py +++ b/python/mrtrix3/dwinormalise/group/__init__.py @@ -1,122 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -import os, shlex -from mrtrix3 import MRtrixError -from mrtrix3 import app, image, path, run - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('group', parents=[base_parser]) - parser.set_author('David Raffelt (david.raffelt@florey.edu.au)') - parser.set_synopsis('Performs a global DWI intensity normalisation on a group of subjects using the median b=0 white matter value as the reference') - parser.add_description('The white matter mask is estimated from a population average FA template then warped back to each subject to perform the intensity normalisation. Note that bias field correction should be performed prior to this step.') - parser.add_description('All input DWI files must contain an embedded diffusion gradient table; for this reason, these images must all be in either .mif or .mif.gz format.') - parser.add_argument('input_dir', help='The input directory containing all DWI images') - parser.add_argument('mask_dir', help='Input directory containing brain masks, corresponding to one per input image (with the same file name prefix)') - parser.add_argument('output_dir', help='The output directory containing all of the intensity normalised DWI images') - parser.add_argument('fa_template', help='The output population specific FA template, which is threshold to estimate a white matter mask') - parser.add_argument('wm_mask', help='The output white matter mask (in template space), used to estimate the median b=0 white matter value for normalisation') - parser.add_argument('-fa_threshold', default='0.4', help='The threshold applied to the Fractional Anisotropy group template used to derive an approximate white matter mask (default: 0.4)') - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output_dir) - app.check_output_path(app.ARGS.fa_template) - app.check_output_path(app.ARGS.wm_mask) - - - -def execute(): #pylint: disable=unused-variable - - class Input: - def __init__(self, filename, prefix, mask_filename = ''): - self.filename = filename - self.prefix = prefix - self.mask_filename = mask_filename - - - input_dir = path.from_user(app.ARGS.input_dir, False) - if not os.path.exists(input_dir): - raise MRtrixError('input directory not found') - in_files = path.all_in_dir(input_dir, dir_path=False) - if len(in_files) <= 1: - raise MRtrixError('not enough images found in input directory: more than one image is needed to perform a group-wise intensity normalisation') - - app.console('performing global intensity normalisation on ' + str(len(in_files)) + ' input images') - - mask_dir = path.from_user(app.ARGS.mask_dir, False) - if not os.path.exists(mask_dir): - raise MRtrixError('mask directory not found') - mask_files = path.all_in_dir(mask_dir, dir_path=False) - if len(mask_files) != len(in_files): - raise MRtrixError('the number of images in the mask directory does not equal the number of images in the input directory') - mask_common_postfix = os.path.commonprefix([i[::-1] for i in mask_files])[::-1] - mask_prefixes = [] - for mask_file in mask_files: - mask_prefixes.append(mask_file.split(mask_common_postfix)[0]) - - common_postfix = os.path.commonprefix([i[::-1] for i in in_files])[::-1] - input_list = [] - for i in in_files: - subj_prefix = i.split(common_postfix)[0] - if subj_prefix not in mask_prefixes: - raise MRtrixError ('no matching mask image was found for input image ' + i) - image.check_3d_nonunity(os.path.join(input_dir, i)) - index = mask_prefixes.index(subj_prefix) - input_list.append(Input(i, subj_prefix, mask_files[index])) - - app.make_scratch_dir() - app.goto_scratch_dir() - - path.make_dir('fa') - progress = app.ProgressBar('Computing FA images', len(input_list)) - for i in input_list: - run.command('dwi2tensor ' + shlex.quote(os.path.join(input_dir, i.filename)) + ' -mask ' + shlex.quote(os.path.join(mask_dir, i.mask_filename)) + ' - | tensor2metric - -fa ' + os.path.join('fa', i.prefix + '.mif')) - progress.increment() - progress.done() - - app.console('Generating FA population template') - run.command('population_template fa fa_template.mif' - + ' -mask_dir ' + mask_dir - + ' -type rigid_affine_nonlinear' - + ' -rigid_scale 0.25,0.5,0.8,1.0' - + ' -affine_scale 0.7,0.8,1.0,1.0' - + ' -nl_scale 0.5,0.75,1.0,1.0,1.0' - + ' -nl_niter 5,5,5,5,5' - + ' -warp_dir warps' - + ' -linear_no_pause' - + ' -scratch population_template' - + ('' if app.DO_CLEANUP else ' -nocleanup')) - - app.console('Generating WM mask in template space') - run.command('mrthreshold fa_template.mif -abs ' + app.ARGS.fa_threshold + ' template_wm_mask.mif') - - progress = app.ProgressBar('Intensity normalising subject images', len(input_list)) - path.make_dir(path.from_user(app.ARGS.output_dir, False)) - path.make_dir('wm_mask_warped') - for i in input_list: - run.command('mrtransform template_wm_mask.mif -interp nearest -warp_full ' + os.path.join('warps', i.prefix + '.mif') + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' -from 2 -template ' + os.path.join('fa', i.prefix + '.mif')) - run.command('dwinormalise manual ' + shlex.quote(os.path.join(input_dir, i.filename)) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' temp.mif') - run.command('mrconvert temp.mif ' + path.from_user(os.path.join(app.ARGS.output_dir, i.filename)), mrconvert_keyval=path.from_user(os.path.join(input_dir, i.filename), False), force=app.FORCE_OVERWRITE) - os.remove('temp.mif') - progress.increment() - progress.done() - - app.console('Exporting template images to user locations') - run.command('mrconvert template_wm_mask.mif ' + path.from_user(app.ARGS.wm_mask), mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) - run.command('mrconvert fa_template.mif ' + path.from_user(app.ARGS.fa_template), mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwinormalise/group/check_output_paths.py b/python/mrtrix3/dwinormalise/group/check_output_paths.py new file mode 100644 index 0000000000..cb280f6e6e --- /dev/null +++ b/python/mrtrix3/dwinormalise/group/check_output_paths.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output_dir) + app.check_output_path(app.ARGS.fa_template) + app.check_output_path(app.ARGS.wm_mask) diff --git a/python/mrtrix3/dwinormalise/group/execute.py b/python/mrtrix3/dwinormalise/group/execute.py new file mode 100644 index 0000000000..9fd6b7ee5c --- /dev/null +++ b/python/mrtrix3/dwinormalise/group/execute.py @@ -0,0 +1,98 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, shlex +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run + +def execute(): #pylint: disable=unused-variable + + class Input: + def __init__(self, filename, prefix, mask_filename = ''): + self.filename = filename + self.prefix = prefix + self.mask_filename = mask_filename + + + input_dir = path.from_user(app.ARGS.input_dir, False) + if not os.path.exists(input_dir): + raise MRtrixError('input directory not found') + in_files = path.all_in_dir(input_dir, dir_path=False) + if len(in_files) <= 1: + raise MRtrixError('not enough images found in input directory: more than one image is needed to perform a group-wise intensity normalisation') + + app.console('performing global intensity normalisation on ' + str(len(in_files)) + ' input images') + + mask_dir = path.from_user(app.ARGS.mask_dir, False) + if not os.path.exists(mask_dir): + raise MRtrixError('mask directory not found') + mask_files = path.all_in_dir(mask_dir, dir_path=False) + if len(mask_files) != len(in_files): + raise MRtrixError('the number of images in the mask directory does not equal the number of images in the input directory') + mask_common_postfix = os.path.commonprefix([i[::-1] for i in mask_files])[::-1] + mask_prefixes = [] + for mask_file in mask_files: + mask_prefixes.append(mask_file.split(mask_common_postfix)[0]) + + common_postfix = os.path.commonprefix([i[::-1] for i in in_files])[::-1] + input_list = [] + for i in in_files: + subj_prefix = i.split(common_postfix)[0] + if subj_prefix not in mask_prefixes: + raise MRtrixError ('no matching mask image was found for input image ' + i) + image.check_3d_nonunity(os.path.join(input_dir, i)) + index = mask_prefixes.index(subj_prefix) + input_list.append(Input(i, subj_prefix, mask_files[index])) + + app.make_scratch_dir() + app.goto_scratch_dir() + + path.make_dir('fa') + progress = app.ProgressBar('Computing FA images', len(input_list)) + for i in input_list: + run.command('dwi2tensor ' + shlex.quote(os.path.join(input_dir, i.filename)) + ' -mask ' + shlex.quote(os.path.join(mask_dir, i.mask_filename)) + ' - | tensor2metric - -fa ' + os.path.join('fa', i.prefix + '.mif')) + progress.increment() + progress.done() + + app.console('Generating FA population template') + run.command('population_template fa fa_template.mif' + + ' -mask_dir ' + mask_dir + + ' -type rigid_affine_nonlinear' + + ' -rigid_scale 0.25,0.5,0.8,1.0' + + ' -affine_scale 0.7,0.8,1.0,1.0' + + ' -nl_scale 0.5,0.75,1.0,1.0,1.0' + + ' -nl_niter 5,5,5,5,5' + + ' -warp_dir warps' + + ' -linear_no_pause' + + ' -scratch population_template' + + ('' if app.DO_CLEANUP else ' -nocleanup')) + + app.console('Generating WM mask in template space') + run.command('mrthreshold fa_template.mif -abs ' + app.ARGS.fa_threshold + ' template_wm_mask.mif') + + progress = app.ProgressBar('Intensity normalising subject images', len(input_list)) + path.make_dir(path.from_user(app.ARGS.output_dir, False)) + path.make_dir('wm_mask_warped') + for i in input_list: + run.command('mrtransform template_wm_mask.mif -interp nearest -warp_full ' + os.path.join('warps', i.prefix + '.mif') + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' -from 2 -template ' + os.path.join('fa', i.prefix + '.mif')) + run.command('dwinormalise manual ' + shlex.quote(os.path.join(input_dir, i.filename)) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' temp.mif') + run.command('mrconvert temp.mif ' + path.from_user(os.path.join(app.ARGS.output_dir, i.filename)), mrconvert_keyval=path.from_user(os.path.join(input_dir, i.filename), False), force=app.FORCE_OVERWRITE) + os.remove('temp.mif') + progress.increment() + progress.done() + + app.console('Exporting template images to user locations') + run.command('mrconvert template_wm_mask.mif ' + path.from_user(app.ARGS.wm_mask), mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) + run.command('mrconvert fa_template.mif ' + path.from_user(app.ARGS.fa_template), mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwinormalise/group/usage.py b/python/mrtrix3/dwinormalise/group/usage.py new file mode 100644 index 0000000000..99fcb096ad --- /dev/null +++ b/python/mrtrix3/dwinormalise/group/usage.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('group', parents=[base_parser]) + parser.set_author('David Raffelt (david.raffelt@florey.edu.au)') + parser.set_synopsis('Performs a global DWI intensity normalisation on a group of subjects using the median b=0 white matter value as the reference') + parser.add_description('The white matter mask is estimated from a population average FA template then warped back to each subject to perform the intensity normalisation. Note that bias field correction should be performed prior to this step.') + parser.add_description('All input DWI files must contain an embedded diffusion gradient table; for this reason, these images must all be in either .mif or .mif.gz format.') + parser.add_argument('input_dir', help='The input directory containing all DWI images') + parser.add_argument('mask_dir', help='Input directory containing brain masks, corresponding to one per input image (with the same file name prefix)') + parser.add_argument('output_dir', help='The output directory containing all of the intensity normalised DWI images') + parser.add_argument('fa_template', help='The output population specific FA template, which is threshold to estimate a white matter mask') + parser.add_argument('wm_mask', help='The output white matter mask (in template space), used to estimate the median b=0 white matter value for normalisation') + parser.add_argument('-fa_threshold', default='0.4', help='The threshold applied to the Fractional Anisotropy group template used to derive an approximate white matter mask (default: 0.4)') diff --git a/python/mrtrix3/dwinormalise/manual/__init__.py b/python/mrtrix3/dwinormalise/manual/__init__.py index 14b290d56b..46406e7fdf 100644 --- a/python/mrtrix3/dwinormalise/manual/__init__.py +++ b/python/mrtrix3/dwinormalise/manual/__init__.py @@ -13,62 +13,4 @@ # # For more details, see http://www.mrtrix.org/. -import math -from mrtrix3 import MRtrixError -from mrtrix3 import app, path, run - - -DEFAULT_TARGET_INTENSITY=1000 - - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('manual', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') - parser.set_synopsis('Intensity normalise a DWI series based on the b=0 signal within a manually-supplied supplied mask') - parser.add_argument('input_dwi', help='The input DWI series') - parser.add_argument('input_mask', help='The mask within which a reference b=0 intensity will be sampled') - parser.add_argument('output_dwi', help='The output intensity-normalised DWI series') - parser.add_argument('-intensity', type=float, default=DEFAULT_TARGET_INTENSITY, help='Normalise the b=0 signal to a specified value (Default: ' + str(DEFAULT_TARGET_INTENSITY) + ')') - parser.add_argument('-percentile', type=int, help='Define the percentile of the b=0 image intensties within the mask used for normalisation; if this option is not supplied then the median value (50th percentile) will be normalised to the desired intensity value') - app.add_dwgrad_import_options(parser) - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output_dwi) - - - -def execute(): #pylint: disable=unused-variable - - grad_option = '' - if app.ARGS.grad: - grad_option = ' -grad ' + path.from_user(app.ARGS.grad) - elif app.ARGS.fslgrad: - grad_option = ' -fslgrad ' + path.from_user(app.ARGS.fslgrad[0]) + ' ' + path.from_user(app.ARGS.fslgrad[1]) - - if app.ARGS.percentile: - if app.ARGS.percentile < 0.0 or app.ARGS.percentile > 100.0: - raise MRtrixError('-percentile value must be between 0 and 100') - intensities = [float(value) for value in run.command('dwiextract ' + path.from_user(app.ARGS.input_dwi) + grad_option + ' -bzero - | ' + \ - 'mrmath - mean - -axis 3 | ' + \ - 'mrdump - -mask ' + path.from_user(app.ARGS.input_mask)).stdout.splitlines()] - intensities = sorted(intensities) - float_index = 0.01 * app.ARGS.percentile * len(intensities) - lower_index = int(math.floor(float_index)) - if app.ARGS.percentile == 100.0: - reference_value = intensities[-1] - else: - interp_mu = float_index - float(lower_index) - reference_value = (1.0-interp_mu)*intensities[lower_index] + interp_mu*intensities[lower_index+1] - else: - reference_value = float(run.command('dwiextract ' + path.from_user(app.ARGS.input_dwi) + grad_option + ' -bzero - | ' + \ - 'mrmath - mean - -axis 3 | ' + \ - 'mrstats - -mask ' + path.from_user(app.ARGS.input_mask) + ' -output median').stdout) - multiplier = app.ARGS.intensity / reference_value - - run.command('mrcalc ' + path.from_user(app.ARGS.input_dwi) + ' ' + str(multiplier) + ' -mult - | ' + \ - 'mrconvert - ' + path.from_user(app.ARGS.output_dwi) + grad_option, \ - mrconvert_keyval=path.from_user(app.ARGS.input_dwi, False), \ - force=app.FORCE_OVERWRITE) +DEFAULT_TARGET_INTENSITY = 1000 diff --git a/python/mrtrix3/dwinormalise/manual/check_output_paths.py b/python/mrtrix3/dwinormalise/manual/check_output_paths.py new file mode 100644 index 0000000000..d8a47e60c1 --- /dev/null +++ b/python/mrtrix3/dwinormalise/manual/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output_dwi) diff --git a/python/mrtrix3/dwinormalise/manual/execute.py b/python/mrtrix3/dwinormalise/manual/execute.py new file mode 100644 index 0000000000..0ef2e36248 --- /dev/null +++ b/python/mrtrix3/dwinormalise/manual/execute.py @@ -0,0 +1,51 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math +from mrtrix3 import MRtrixError +from mrtrix3 import app, path, run + +def execute(): #pylint: disable=unused-variable + + grad_option = '' + if app.ARGS.grad: + grad_option = ' -grad ' + path.from_user(app.ARGS.grad) + elif app.ARGS.fslgrad: + grad_option = ' -fslgrad ' + path.from_user(app.ARGS.fslgrad[0]) + ' ' + path.from_user(app.ARGS.fslgrad[1]) + + if app.ARGS.percentile: + if app.ARGS.percentile < 0.0 or app.ARGS.percentile > 100.0: + raise MRtrixError('-percentile value must be between 0 and 100') + intensities = [float(value) for value in run.command('dwiextract ' + path.from_user(app.ARGS.input_dwi) + grad_option + ' -bzero - | ' + \ + 'mrmath - mean - -axis 3 | ' + \ + 'mrdump - -mask ' + path.from_user(app.ARGS.input_mask)).stdout.splitlines()] + intensities = sorted(intensities) + float_index = 0.01 * app.ARGS.percentile * len(intensities) + lower_index = int(math.floor(float_index)) + if app.ARGS.percentile == 100.0: + reference_value = intensities[-1] + else: + interp_mu = float_index - float(lower_index) + reference_value = (1.0-interp_mu)*intensities[lower_index] + interp_mu*intensities[lower_index+1] + else: + reference_value = float(run.command('dwiextract ' + path.from_user(app.ARGS.input_dwi) + grad_option + ' -bzero - | ' + \ + 'mrmath - mean - -axis 3 | ' + \ + 'mrstats - -mask ' + path.from_user(app.ARGS.input_mask) + ' -output median').stdout) + multiplier = app.ARGS.intensity / reference_value + + run.command('mrcalc ' + path.from_user(app.ARGS.input_dwi) + ' ' + str(multiplier) + ' -mult - | ' + \ + 'mrconvert - ' + path.from_user(app.ARGS.output_dwi) + grad_option, \ + mrconvert_keyval=path.from_user(app.ARGS.input_dwi, False), \ + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwinormalise/manual/usage.py b/python/mrtrix3/dwinormalise/manual/usage.py new file mode 100644 index 0000000000..7e688c7fbc --- /dev/null +++ b/python/mrtrix3/dwinormalise/manual/usage.py @@ -0,0 +1,28 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app +from . import DEFAULT_TARGET_INTENSITY + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('manual', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') + parser.set_synopsis('Intensity normalise a DWI series based on the b=0 signal within a manually-supplied supplied mask') + parser.add_argument('input_dwi', help='The input DWI series') + parser.add_argument('input_mask', help='The mask within which a reference b=0 intensity will be sampled') + parser.add_argument('output_dwi', help='The output intensity-normalised DWI series') + parser.add_argument('-intensity', type=float, default=DEFAULT_TARGET_INTENSITY, help='Normalise the b=0 signal to a specified value (Default: ' + str(DEFAULT_TARGET_INTENSITY) + ')') + parser.add_argument('-percentile', type=int, help='Define the percentile of the b=0 image intensties within the mask used for normalisation; if this option is not supplied then the median value (50th percentile) will be normalised to the desired intensity value') + app.add_dwgrad_import_options(parser) diff --git a/python/mrtrix3/dwinormalise/mtnorm/__init__.py b/python/mrtrix3/dwinormalise/mtnorm/__init__.py index 7f9c5a7589..dc4d29ea41 100644 --- a/python/mrtrix3/dwinormalise/mtnorm/__init__.py +++ b/python/mrtrix3/dwinormalise/mtnorm/__init__.py @@ -13,163 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import math -from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import app, image, matrix, path, run - - REFERENCE_INTENSITY = 1000 - LMAXES_MULTI = [4, 0, 0] LMAXES_SINGLE = [4, 0] - - -def usage(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('mtnorm', parents=[base_parser]) - parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') - parser.set_synopsis('Normalise a DWI series to the estimated b=0 CSF intensity') - parser.add_description('This algorithm determines an appropriate global scaling factor to apply to a DWI series ' - 'such that after the scaling is applied, the b=0 CSF intensity corresponds to some ' - 'reference value (' + str(REFERENCE_INTENSITY) + ' by default).') - parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' - 'Many users may find that comprehensive solution preferable; this dwinormalise algorithm is ' - 'nevertheless provided to demonstrate specifically the global intensituy normalisation portion of that command.') - parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' - 'degree than what would be advised for analysis. This is done for computational efficiency. This ' - 'behaviour can be modified through the -lmax command-line option.') - parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' - 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' - 'NeuroImage, 2014, 103, 411-426') - parser.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' - 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' - 'In Proc. ISMRM, 2017, 26, 3541') - parser.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' - 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' - 'In Proc. ISMRM, 2021, 29, 2472') - parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' - 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' - 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') - parser.add_argument('input', help='The input DWI series') - parser.add_argument('output', help='The normalised DWI series') - options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') - options.add_argument('-lmax', - metavar='values', - help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' - 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') - options.add_argument('-mask', - metavar='image', - help='Provide a mask image for relevant calculations ' - '(if not provided, the default dwi2mask algorithm will be used)') - options.add_argument('-reference', - type=float, - metavar='value', - default=REFERENCE_INTENSITY, - help='Set the target CSF b=0 intensity in the output DWI series ' - '(default: ' + str(REFERENCE_INTENSITY) + ')') - options.add_argument('-scale', - metavar='file', - help='Write the scaling factor applied to the DWI series to a text file') - app.add_dwgrad_import_options(parser) - - - -def check_output_paths(): #pylint: disable=unused-variable - app.check_output_path(app.ARGS.output) - - - -def execute(): #pylint: disable=unused-variable - - # Verify user inputs - lmax = None - if app.ARGS.lmax: - try: - lmax = [int(i) for i in app.ARGS.lmax.split(',')] - except ValueError as exc: - raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc - if any(value < 0 or value % 2 for value in lmax): - raise MRtrixError('lmax values must be non-negative even integers') - if len(lmax) not in [2, 3]: - raise MRtrixError('Length of lmax vector expected to be either 2 or 3') - if app.ARGS.reference <= 0.0: - raise MRtrixError('Reference intensity must be positive') - - grad_option = app.read_dwgrad_import_options() - - # Get input data into the scratch directory - app.make_scratch_dir() - run.command('mrconvert ' - + path.from_user(app.ARGS.input) - + ' ' - + path.to_scratch('input.mif') - + grad_option) - if app.ARGS.mask: - run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - app.goto_scratch_dir() - - # Make sure we have a valid mask available - if app.ARGS.mask: - if not image.match('input.mif', 'mask.mif', up_to_dim=3): - raise MRtrixError('Provided mask image does not match input DWI') - else: - run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' input.mif mask.mif') - - # Determine whether we are working with single-shell or multi-shell data - bvalues = [ - int(round(float(value))) - for value in image.mrinfo('input.mif', 'shell_bvalues') \ - .strip().split()] - multishell = (len(bvalues) > 2) - if lmax is None: - lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE - elif len(lmax) == 3 and not multishell: - raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') - - # RF estimation and multi-tissue CSD - class Tissue(object): #pylint: disable=useless-object-inheritance - def __init__(self, name): - self.name = name - self.tissue_rf = 'response_' + name + '.txt' - self.fod = 'FOD_' + name + '.mif' - self.fod_norm = 'FODnorm_' + name + '.mif' - - tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] - - run.command('dwi2response dhollander input.mif -mask mask.mif ' - + ' '.join(tissue.tissue_rf for tissue in tissues)) - - # Immediately remove GM if we can't deal with it - if not multishell: - app.cleanup(tissues[1].tissue_rf) - tissues = tissues[::2] - - run.command('dwi2fod msmt_csd input.mif' - + ' -lmax ' + ','.join(str(item) for item in lmax) - + ' ' - + ' '.join(tissue.tissue_rf + ' ' + tissue.fod - for tissue in tissues)) - - # Normalisation in brain mask - run.command('maskfilter mask.mif erode - |' - + ' mtnormalise -mask - -balanced' - + ' -check_factors factors.txt ' - + ' '.join(tissue.fod + ' ' + tissue.fod_norm - for tissue in tissues)) - app.cleanup([tissue.fod for tissue in tissues]) - app.cleanup([tissue.fod_norm for tissue in tissues]) - - csf_rf = matrix.load_matrix(tissues[-1].tissue_rf) - app.cleanup([tissue.tissue_rf for tissue in tissues]) - csf_rf_bzero_lzero = csf_rf[0][0] - balance_factors = matrix.load_vector('factors.txt') - app.cleanup('factors.txt') - csf_balance_factor = balance_factors[-1] - scale_multiplier = (app.ARGS.reference * math.sqrt(4.0*math.pi)) / (csf_rf_bzero_lzero / csf_balance_factor) - - run.command('mrcalc input.mif ' + str(scale_multiplier) + ' -mult - | ' - + 'mrconvert - ' + path.from_user(app.ARGS.output), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) - - if app.ARGS.scale: - matrix.save_vector(path.from_user(app.ARGS.scale, False), [scale_multiplier]) diff --git a/python/mrtrix3/dwinormalise/mtnorm/check_output_paths.py b/python/mrtrix3/dwinormalise/mtnorm/check_output_paths.py new file mode 100644 index 0000000000..bff0fa0f2b --- /dev/null +++ b/python/mrtrix3/dwinormalise/mtnorm/check_output_paths.py @@ -0,0 +1,19 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app + +def check_output_paths(): #pylint: disable=unused-variable + app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/dwinormalise/mtnorm/execute.py b/python/mrtrix3/dwinormalise/mtnorm/execute.py new file mode 100644 index 0000000000..3c7580ce41 --- /dev/null +++ b/python/mrtrix3/dwinormalise/mtnorm/execute.py @@ -0,0 +1,116 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, image, matrix, path, run + +from . import LMAXES_MULTI, LMAXES_SINGLE + +def execute(): #pylint: disable=unused-variable + + # Verify user inputs + lmax = None + if app.ARGS.lmax: + try: + lmax = [int(i) for i in app.ARGS.lmax.split(',')] + except ValueError as exc: + raise MRtrixError('Values provided to -lmax option must be a comma-separated list of integers') from exc + if any(value < 0 or value % 2 for value in lmax): + raise MRtrixError('lmax values must be non-negative even integers') + if len(lmax) not in [2, 3]: + raise MRtrixError('Length of lmax vector expected to be either 2 or 3') + if app.ARGS.reference <= 0.0: + raise MRtrixError('Reference intensity must be positive') + + grad_option = app.read_dwgrad_import_options() + + # Get input data into the scratch directory + app.make_scratch_dir() + run.command('mrconvert ' + + path.from_user(app.ARGS.input) + + ' ' + + path.to_scratch('input.mif') + + grad_option) + if app.ARGS.mask: + run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') + app.goto_scratch_dir() + + # Make sure we have a valid mask available + if app.ARGS.mask: + if not image.match('input.mif', 'mask.mif', up_to_dim=3): + raise MRtrixError('Provided mask image does not match input DWI') + else: + run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' input.mif mask.mif') + + # Determine whether we are working with single-shell or multi-shell data + bvalues = [ + int(round(float(value))) + for value in image.mrinfo('input.mif', 'shell_bvalues') \ + .strip().split()] + multishell = (len(bvalues) > 2) + if lmax is None: + lmax = LMAXES_MULTI if multishell else LMAXES_SINGLE + elif len(lmax) == 3 and not multishell: + raise MRtrixError('User specified 3 lmax values for three-tissue decomposition, but input DWI is not multi-shell') + + # RF estimation and multi-tissue CSD + class Tissue(object): #pylint: disable=useless-object-inheritance + def __init__(self, name): + self.name = name + self.tissue_rf = 'response_' + name + '.txt' + self.fod = 'FOD_' + name + '.mif' + self.fod_norm = 'FODnorm_' + name + '.mif' + + tissues = [Tissue('WM'), Tissue('GM'), Tissue('CSF')] + + run.command('dwi2response dhollander input.mif -mask mask.mif ' + + ' '.join(tissue.tissue_rf for tissue in tissues)) + + # Immediately remove GM if we can't deal with it + if not multishell: + app.cleanup(tissues[1].tissue_rf) + tissues = tissues[::2] + + run.command('dwi2fod msmt_csd input.mif' + + ' -lmax ' + ','.join(str(item) for item in lmax) + + ' ' + + ' '.join(tissue.tissue_rf + ' ' + tissue.fod + for tissue in tissues)) + + # Normalisation in brain mask + run.command('maskfilter mask.mif erode - |' + + ' mtnormalise -mask - -balanced' + + ' -check_factors factors.txt ' + + ' '.join(tissue.fod + ' ' + tissue.fod_norm + for tissue in tissues)) + app.cleanup([tissue.fod for tissue in tissues]) + app.cleanup([tissue.fod_norm for tissue in tissues]) + + csf_rf = matrix.load_matrix(tissues[-1].tissue_rf) + app.cleanup([tissue.tissue_rf for tissue in tissues]) + csf_rf_bzero_lzero = csf_rf[0][0] + balance_factors = matrix.load_vector('factors.txt') + app.cleanup('factors.txt') + csf_balance_factor = balance_factors[-1] + scale_multiplier = (app.ARGS.reference * math.sqrt(4.0*math.pi)) / (csf_rf_bzero_lzero / csf_balance_factor) + + run.command('mrcalc input.mif ' + str(scale_multiplier) + ' -mult - | ' + + 'mrconvert - ' + path.from_user(app.ARGS.output), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) + + if app.ARGS.scale: + matrix.save_vector(path.from_user(app.ARGS.scale, False), [scale_multiplier]) diff --git a/python/mrtrix3/dwinormalise/mtnorm/usage.py b/python/mrtrix3/dwinormalise/mtnorm/usage.py new file mode 100644 index 0000000000..80d0484c34 --- /dev/null +++ b/python/mrtrix3/dwinormalise/mtnorm/usage.py @@ -0,0 +1,64 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app +from . import REFERENCE_INTENSITY, LMAXES_MULTI, LMAXES_SINGLE + +def usage(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('mtnorm', parents=[base_parser]) + parser.set_author('Robert E. Smith (robert.smith@florey.edu.au) and Arshiya Sangchooli (asangchooli@student.unimelb.edu.au)') + parser.set_synopsis('Normalise a DWI series to the estimated b=0 CSF intensity') + parser.add_description('This algorithm determines an appropriate global scaling factor to apply to a DWI series ' + 'such that after the scaling is applied, the b=0 CSF intensity corresponds to some ' + 'reference value (' + str(REFERENCE_INTENSITY) + ' by default).') + parser.add_description('The operation of this script is a subset of that performed by the script "dwibiasnormmask". ' + 'Many users may find that comprehensive solution preferable; this dwinormalise algorithm is ' + 'nevertheless provided to demonstrate specifically the global intensituy normalisation portion of that command.') + parser.add_description('The ODFs estimated within this optimisation procedure are by default of lower maximal spherical harmonic ' + 'degree than what would be advised for analysis. This is done for computational efficiency. This ' + 'behaviour can be modified through the -lmax command-line option.') + parser.add_citation('Jeurissen, B; Tournier, J-D; Dhollander, T; Connelly, A & Sijbers, J. ' + 'Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data. ' + 'NeuroImage, 2014, 103, 411-426') + parser.add_citation('Raffelt, D.; Dhollander, T.; Tournier, J.-D.; Tabbara, R.; Smith, R. E.; Pierre, E. & Connelly, A. ' + 'Bias Field Correction and Intensity Normalisation for Quantitative Analysis of Apparent Fibre Density. ' + 'In Proc. ISMRM, 2017, 26, 3541') + parser.add_citation('Dhollander, T.; Tabbara, R.; Rosnarho-Tornstrand, J.; Tournier, J.-D.; Raffelt, D. & Connelly, A. ' + 'Multi-tissue log-domain intensity and inhomogeneity normalisation for quantitative apparent fibre density. ' + 'In Proc. ISMRM, 2021, 29, 2472') + parser.add_citation('Dhollander, T.; Raffelt, D. & Connelly, A. ' + 'Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ' + 'ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5') + parser.add_argument('input', help='The input DWI series') + parser.add_argument('output', help='The normalised DWI series') + options = parser.add_argument_group('Options specific to the "mtnorm" algorithm') + options.add_argument('-lmax', + metavar='values', + help='The maximum spherical harmonic degree for the estimated FODs (see Description); ' + 'defaults are "' + ','.join(str(item) for item in LMAXES_MULTI) + '" for multi-shell and "' + ','.join(str(item) for item in LMAXES_SINGLE) + '" for single-shell data)') + options.add_argument('-mask', + metavar='image', + help='Provide a mask image for relevant calculations ' + '(if not provided, the default dwi2mask algorithm will be used)') + options.add_argument('-reference', + type=float, + metavar='value', + default=REFERENCE_INTENSITY, + help='Set the target CSF b=0 intensity in the output DWI series ' + '(default: ' + str(REFERENCE_INTENSITY) + ')') + options.add_argument('-scale', + metavar='file', + help='Write the scaling factor applied to the DWI series to a text file') + app.add_dwgrad_import_options(parser) diff --git a/python/mrtrix3/dwinormalise/usage.py b/python/mrtrix3/dwinormalise/usage.py new file mode 100644 index 0000000000..0ec5868778 --- /dev/null +++ b/python/mrtrix3/dwinormalise/usage.py @@ -0,0 +1,26 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import algorithm #pylint: disable=no-name-in-module + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Perform various forms of intensity normalisation of DWIs') + cmdline.add_description('This script provides access to different techniques for globally scaling the intensity of diffusion-weighted images. ' + 'The different algorithms have different purposes, and different requirements with respect to the data with which they must be provided & will produce as output. ' + 'Further information on the individual algorithms available can be accessed via their individual help pages; eg. "dwinormalise group -help".') + + # Import the command-line settings for all algorithms found in the relevant directory + algorithm.usage(cmdline) diff --git a/python/mrtrix3/dwishellmath/__init__.py b/python/mrtrix3/dwishellmath/__init__.py index 49d0922ebf..68b1ddd512 100644 --- a/python/mrtrix3/dwishellmath/__init__.py +++ b/python/mrtrix3/dwishellmath/__init__.py @@ -13,51 +13,4 @@ # # For more details, see http://www.mrtrix.org/. - SUPPORTED_OPS = ['mean', 'median', 'sum', 'product', 'rms', 'norm', 'var', 'std', 'min', 'max', 'absmax', 'magmax'] - - -def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Daan Christiaens (daan.christiaens@kcl.ac.uk)') - cmdline.set_synopsis('Apply an mrmath operation to each b-value shell in a DWI series') - cmdline.add_description('The output of this command is a 4D image, where ' - 'each volume corresponds to a b-value shell (in order of increasing b-value), and ' - 'the intensities within each volume correspond to the chosen statistic having been computed from across the DWI volumes belonging to that b-value shell.') - cmdline.add_argument('input', help='The input diffusion MRI series') - cmdline.add_argument('operation', choices=SUPPORTED_OPS, help='The operation to be applied to each shell; this must be one of the following: ' + ', '.join(SUPPORTED_OPS)) - cmdline.add_argument('output', help='The output image series') - cmdline.add_example_usage('To compute the mean diffusion-weighted signal in each b-value shell', - 'dwishellmath dwi.mif mean shellmeans.mif') - app.add_dwgrad_import_options(cmdline) - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - # check inputs and outputs - dwi_header = image.Header(path.from_user(app.ARGS.input, False)) - if len(dwi_header.size()) != 4: - raise MRtrixError('Input image must be a 4D image') - gradimport = app.read_dwgrad_import_options() - if not gradimport and 'dw_scheme' not in dwi_header.keyval(): - raise MRtrixError('No diffusion gradient table provided, and none present in image header') - app.check_output_path(app.ARGS.output) - # import data and gradient table - app.make_scratch_dir() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + gradimport + ' -strides 0,0,0,1') - app.goto_scratch_dir() - # run per-shell operations - files = [] - for index, bvalue in enumerate(image.mrinfo('in.mif', 'shell_bvalues').split()): - filename = 'shell-{:02d}.mif'.format(index) - run.command('dwiextract -shells ' + bvalue + ' in.mif - | mrmath -axis 3 - ' + app.ARGS.operation + ' ' + filename) - files.append(filename) - if len(files) > 1: - # concatenate to output file - run.command('mrcat -axis 3 ' + ' '.join(files) + ' out.mif') - run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) - else: - # make a 4D image with one volume - app.warn('Only one unique b-value present in DWI data; command mrmath with -axis 3 option may be preferable') - run.command('mrconvert ' + files[0] + ' ' + path.from_user(app.ARGS.output) + ' -axes 0,1,2,-1', mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwishellmath/execute.py b/python/mrtrix3/dwishellmath/execute.py new file mode 100644 index 0000000000..c217d46752 --- /dev/null +++ b/python/mrtrix3/dwishellmath/execute.py @@ -0,0 +1,45 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + # check inputs and outputs + dwi_header = image.Header(path.from_user(app.ARGS.input, False)) + if len(dwi_header.size()) != 4: + raise MRtrixError('Input image must be a 4D image') + gradimport = app.read_dwgrad_import_options() + if not gradimport and 'dw_scheme' not in dwi_header.keyval(): + raise MRtrixError('No diffusion gradient table provided, and none present in image header') + app.check_output_path(app.ARGS.output) + # import data and gradient table + app.make_scratch_dir() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif') + gradimport + ' -strides 0,0,0,1') + app.goto_scratch_dir() + # run per-shell operations + files = [] + for index, bvalue in enumerate(image.mrinfo('in.mif', 'shell_bvalues').split()): + filename = 'shell-{:02d}.mif'.format(index) + run.command('dwiextract -shells ' + bvalue + ' in.mif - | mrmath -axis 3 - ' + app.ARGS.operation + ' ' + filename) + files.append(filename) + if len(files) > 1: + # concatenate to output file + run.command('mrcat -axis 3 ' + ' '.join(files) + ' out.mif') + run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) + else: + # make a 4D image with one volume + app.warn('Only one unique b-value present in DWI data; command mrmath with -axis 3 option may be preferable') + run.command('mrconvert ' + files[0] + ' ' + path.from_user(app.ARGS.output) + ' -axes 0,1,2,-1', mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/dwishellmath/usage.py b/python/mrtrix3/dwishellmath/usage.py new file mode 100644 index 0000000000..dae291576f --- /dev/null +++ b/python/mrtrix3/dwishellmath/usage.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel +from . import SUPPORTED_OPS + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Daan Christiaens (daan.christiaens@kcl.ac.uk)') + cmdline.set_synopsis('Apply an mrmath operation to each b-value shell in a DWI series') + cmdline.add_description('The output of this command is a 4D image, where ' + 'each volume corresponds to a b-value shell (in order of increasing b-value), and ' + 'the intensities within each volume correspond to the chosen statistic having been computed from across the DWI volumes belonging to that b-value shell.') + cmdline.add_argument('input', help='The input diffusion MRI series') + cmdline.add_argument('operation', choices=SUPPORTED_OPS, help='The operation to be applied to each shell; this must be one of the following: ' + ', '.join(SUPPORTED_OPS)) + cmdline.add_argument('output', help='The output image series') + cmdline.add_example_usage('To compute the mean diffusion-weighted signal in each b-value shell', + 'dwishellmath dwi.mif mean shellmeans.mif') + app.add_dwgrad_import_options(cmdline) diff --git a/python/mrtrix3/for_each/__init__.py b/python/mrtrix3/for_each/__init__.py index 294b70aae5..588df56506 100644 --- a/python/mrtrix3/for_each/__init__.py +++ b/python/mrtrix3/for_each/__init__.py @@ -13,285 +13,8 @@ # # For more details, see http://www.mrtrix.org/. - -import os, re, sys, threading - - - # Since we're going to capture everything after the colon character and "hide" it from argparse, # we need to store the contents from there in a global so as for it to be accessible from execute() CMDSPLIT = [ ] - - -def usage(cmdline): #pylint: disable=unused-variable - global CMDSPLIT - from mrtrix3 import _version #pylint: disable=no-name-in-module, import-outside-toplevel - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') - cmdline.set_synopsis('Perform some arbitrary processing step for each of a set of inputs') - cmdline.add_description('This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs.') - cmdline.add_description('More information on use of the for_each command can be found at the following link: \n' - 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/tips_and_tricks/batch_processing_with_foreach.html') - cmdline.add_description('The way that this batch processing capability is achieved is by providing basic text substitutions, which simplify the formation of valid command strings based on the unique components of the input strings on which the script is instructed to execute. This does however mean that the items to be passed as inputs to the for_each command (e.g. file / directory names) MUST NOT contain any instances of these substitution strings, as otherwise those paths will be corrupted during the course of the substitution.') - cmdline.add_description('The available substitutions are listed below (note that the -test command-line option can be used to ensure correct command string formation prior to actually executing the commands):') - cmdline.add_description(' - IN: The full matching pattern, including leading folders. For example, if the target list contains a file "folder/image.mif", any occurrence of "IN" will be substituted with "folder/image.mif".') - cmdline.add_description(' - NAME: The basename of the matching pattern. For example, if the target list contains a file "folder/image.mif", any occurrence of "NAME" will be substituted with "image.mif".') - cmdline.add_description(' - PRE: The prefix of the input pattern (the basename stripped of its extension). For example, if the target list contains a file "folder/my.image.mif.gz", any occurrence of "PRE" will be substituted with "my.image".') - cmdline.add_description(' - UNI: The unique part of the input after removing any common prefix and common suffix. For example, if the target list contains files: "folder/001dwi.mif", "folder/002dwi.mif", "folder/003dwi.mif", any occurrence of "UNI" will be substituted with "001", "002", "003".') - cmdline.add_description('Note that due to a limitation of the Python "argparse" module, any command-line OPTIONS that the user intends to provide specifically to the for_each script must appear BEFORE providing the list of inputs on which for_each is intended to operate. While command-line options provided as such will be interpreted specifically by the for_each script, any command-line options that are provided AFTER the COLON separator will form part of the executed COMMAND, and will therefore be interpreted as command-line options having been provided to that underlying command.') - cmdline.add_example_usage('Demonstration of basic usage syntax', - 'for_each folder/*.mif : mrinfo IN', - 'This will run the "mrinfo" command for every .mif file present in "folder/". Note that the compulsory colon symbol is used to separate the list of items on which for_each is being instructed to operate, from the command that is intended to be run for each input.') - cmdline.add_example_usage('Multi-threaded use of for_each', - 'for_each -nthreads 4 freesurfer/subjects/* : recon-all -subjid NAME -all', - 'In this example, for_each is instructed to run the FreeSurfer command \'recon-all\' for all subjects within the \'subjects\' directory, with four subjects being processed in parallel at any one time. Whenever processing of one subject is completed, processing for a new unprocessed subject will commence. This technique is useful for improving the efficiency of running single-threaded commands on multi-core systems, as long as the system possesses enough memory to support such parallel processing. Note that in the case of multi-threaded commands (which includes many MRtrix3 commands), it is generally preferable to permit multi-threaded execution of the command on a single input at a time, rather than processing multiple inputs in parallel.') - cmdline.add_example_usage('Excluding specific inputs from execution', - 'for_each *.nii -exclude 001.nii : mrconvert IN PRE.mif', - 'Particularly when a wildcard is used to define the list of inputs for for_each, it is possible in some instances that this list will include one or more strings for which execution should in fact not be performed; for instance, if a command has already been executed for one or more files, and then for_each is being used to execute the same command for all other files. In this case, the -exclude option can be used to effectively remove an item from the list of inputs that would otherwise be included due to the use of a wildcard (and can be used more than once to exclude more than one string). In this particular example, mrconvert is instructed to perform conversions from NIfTI to MRtrix image formats, for all except the first image in the directory. Note that any usages of this option must appear AFTER the list of inputs. Note also that the argument following the -exclude option can alternatively be a regular expression, in which case any inputs for which a match to the expression is found will be excluded from processing.') - cmdline.add_example_usage('Testing the command string substitution', - 'for_each -test * : mrconvert IN PRE.mif', - 'By specifying the -test option, the script will print to the terminal the results of text substitutions for all of the specified inputs, but will not actually execute those commands. It can therefore be used to verify that the script is receiving the intended set of inputs, and that the text substitutions on those inputs lead to the intended command strings.') - cmdline.add_argument('inputs', help='Each of the inputs for which processing should be run', nargs='+') - cmdline.add_argument('colon', help='Colon symbol (":") delimiting the for_each inputs & command-line options from the actual command to be executed', type=str, choices=[':']) - cmdline.add_argument('command', help='The command string to run for each input, containing any number of substitutions listed in the Description section', type=str) - cmdline.add_argument('-exclude', help='Exclude one specific input string / all strings matching a regular expression from being processed (see Example Usage)', action='append', metavar='"regex"', nargs=1) - cmdline.add_argument('-test', help='Test the operation of the for_each script, by printing the command strings following string substitution but not actually executing them', action='store_true', default=False) - - # Usage of for_each needs to be handled slightly differently here: - # We want argparse to parse only the contents of the command-line before the colon symbol, - # as these are the items that pertain to the invocation of the for_each script; - # anything after the colon should instead form a part of the command that - # for_each is responsible for executing - try: - index = next(i for i,s in enumerate(sys.argv) if s == ':') - try: - CMDSPLIT = sys.argv[index+1:] - sys.argv = sys.argv[:index+1] - sys.argv.append(' '.join(CMDSPLIT)) - except IndexError: - sys.stderr.write('Erroneous usage: No command specified (colon separator cannot be the last entry provided)\n') - sys.exit(0) - except StopIteration: - if len(sys.argv) > 2: - sys.stderr.write('Erroneous usage: A colon must be used to separate for_each inputs from the command to be executed\n') - sys.exit(0) - - - - - - - - -# These need to be globals in order to be accessible from execute_parallel() -class Shared: - def __init__(self): - self._job_index = 0 - self.lock = threading.Lock() - self.stop = False - def next(self, jobs): - job = None - with self.lock: - if self._job_index < len(jobs): - job = jobs[self._job_index] - self._job_index += 1 - self.stop = self._job_index == len(jobs) - return job - -shared = Shared() #pylint: disable=invalid-name - - - KEYLIST = [ 'IN', 'NAME', 'PRE', 'UNI' ] - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import ANSI, MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, run #pylint: disable=no-name-in-module, import-outside-toplevel - - inputs = app.ARGS.inputs - app.debug('All inputs: ' + str(inputs)) - app.debug('Command: ' + str(app.ARGS.command)) - app.debug('CMDSPLIT: ' + str(CMDSPLIT)) - - if app.ARGS.exclude: - app.ARGS.exclude = [ exclude[0] for exclude in app.ARGS.exclude ] # To deal with argparse's action=append. Always guaranteed to be only one argument since nargs=1 - app.debug('To exclude: ' + str(app.ARGS.exclude)) - exclude_unmatched = [ ] - to_exclude = [ ] - for exclude in app.ARGS.exclude: - if exclude in inputs: - to_exclude.append(exclude) - else: - try: - re_object = re.compile(exclude) - regex_hits = [ ] - for arg in inputs: - search_result = re_object.search(arg) - if search_result and search_result.group(): - regex_hits.append(arg) - if regex_hits: - app.debug('Inputs excluded via regex "' + exclude + '": ' + str(regex_hits)) - to_exclude.extend(regex_hits) - else: - app.debug('Compiled exclude regex "' + exclude + '" had no hits') - exclude_unmatched.append(exclude) - except re.error: - app.debug('Exclude string "' + exclude + '" did not compile as regex') - exclude_unmatched.append(exclude) - if exclude_unmatched: - app.warn('Item' + ('s' if len(exclude_unmatched) > 1 else '') + ' specified via -exclude did not result in item exclusion, whether by direct match or compilation as regex: ' + str('\'' + exclude_unmatched[0] + '\'' if len(exclude_unmatched) == 1 else exclude_unmatched)) - inputs = [ arg for arg in inputs if arg not in to_exclude ] - if not inputs: - raise MRtrixError('No inputs remaining after application of exclusion criteri' + ('on' if len(app.ARGS.exclude) == 1 else 'a')) - app.debug('Inputs after exclusion: ' + str(inputs)) - - common_prefix = os.path.commonprefix(inputs) - common_suffix = os.path.commonprefix([i[::-1] for i in inputs])[::-1] - app.debug('Common prefix: ' + common_prefix if common_prefix else 'No common prefix') - app.debug('Common suffix: ' + common_suffix if common_suffix else 'No common suffix') - - for entry in CMDSPLIT: - if os.path.exists(entry): - keys_present = [ key for key in KEYLIST if key in entry ] - if keys_present: - app.warn('Performing text substitution of ' + str(keys_present) + ' within command: "' + entry + '"; but the original text exists as a path on the file system... is this a problematic filesystem path?') - - try: - next(entry for entry in CMDSPLIT if any(key for key in KEYLIST if key in entry)) - except StopIteration as exception: - raise MRtrixError('None of the unique for_each keys ' + str(KEYLIST) + ' appear in command string "' + app.ARGS.command + '"; no substitution can occur') from exception - - class Entry: - def __init__(self, input_text): - self.input_text = input_text - self.sub_in = input_text - self.sub_name = os.path.basename(input_text.rstrip('/')) - self.sub_pre = os.path.splitext(self.sub_name.rstrip('.gz'))[0] - if common_suffix: - self.sub_uni = input_text[len(common_prefix):-len(common_suffix)] - else: - self.sub_uni = input_text[len(common_prefix):] - - self.substitutions = { 'IN': self.sub_in, 'NAME': self.sub_name, 'PRE': self.sub_pre, 'UNI': self.sub_uni } - app.debug('Input text: ' + input_text) - app.debug('Substitutions: ' + str(self.substitutions)) - - self.cmd = [ ] - for entry in CMDSPLIT: - for (key, value) in self.substitutions.items(): - entry = entry.replace(key, value) - if ' ' in entry: - entry = '"' + entry + '"' - self.cmd.append(entry) - app.debug('Resulting command: ' + str(self.cmd)) - - self.outputtext = None - self.returncode = None - - jobs = [ ] - for i in inputs: - jobs.append(Entry(i)) - - if app.ARGS.test: - app.console('Command strings for ' + str(len(jobs)) + ' jobs:') - for job in jobs: - sys.stderr.write(ANSI.execute + 'Input:' + ANSI.clear + ' "' + job.input_text + '"\n') - sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + ' '.join(job.cmd) + '\n') - return - - parallel = app.NUM_THREADS is not None and app.NUM_THREADS > 1 - - def progress_string(): - text = str(sum(1 if job.returncode is not None else 0 for job in jobs)) + \ - '/' + \ - str(len(jobs)) + \ - ' jobs completed ' + \ - ('across ' + str(app.NUM_THREADS) + ' threads' if parallel else 'sequentially') - fail_count = sum(1 if job.returncode else 0 for job in jobs) - if fail_count: - text += ' (' + str(fail_count) + ' errors)' - return text - - progress = app.ProgressBar(progress_string(), len(jobs)) - - def execute_parallel(): - while not shared.stop: - my_job = shared.next(jobs) - if not my_job: - return - try: - result = run.command(' '.join(my_job.cmd), shell=True) - my_job.outputtext = result.stdout + result.stderr - my_job.returncode = 0 - except run.MRtrixCmdError as exception: - my_job.outputtext = str(exception) - my_job.returncode = exception.returncode - except Exception as exception: # pylint: disable=broad-except - my_job.outputtext = str(exception) - my_job.returncode = 1 - with shared.lock: - progress.increment(progress_string()) - - if parallel: - threads = [ ] - for i in range (1, app.NUM_THREADS): - thread = threading.Thread(target=execute_parallel) - thread.start() - threads.append(thread) - execute_parallel() - for thread in threads: - thread.join() - else: - for job in jobs: - try: - result = run.command(' '.join(job.cmd), shell=True) - job.outputtext = result.stdout + result.stderr - job.returncode = 0 - except run.MRtrixCmdError as exception: - job.outputtext = str(exception) - job.returncode = exception.returncode - except Exception as exception: # pylint: disable=broad-except - job.outputtext = str(exception) - job.returncode = 1 - progress.increment(progress_string()) - - progress.done() - - assert all(job.returncode is not None for job in jobs) - fail_count = sum(1 if job.returncode else 0 for job in jobs) - if fail_count: - app.warn(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') - if fail_count > 1: - app.warn('Outputs from failed commands:') - sys.stderr.write(app.EXEC_NAME + ':\n') - else: - app.warn('Output from failed command:') - for job in jobs: - if job.returncode: - if job.outputtext: - app.warn('For input "' + job.sub_in + '" (returncode = ' + str(job.returncode) + '):') - for line in job.outputtext.splitlines(): - sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') - else: - app.warn('No output from command for input "' + job.sub_in + '" (return code = ' + str(job.returncode) + ')') - if fail_count > 1: - sys.stderr.write(app.EXEC_NAME + ':\n') - raise MRtrixError(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully: ' + str([job.input_text for job in jobs if job.returncode])) - - if app.VERBOSITY > 1: - if any(job.outputtext for job in jobs): - sys.stderr.write(app.EXEC_NAME + ':\n') - for job in jobs: - if job.outputtext: - app.console('Output of command for input "' + job.sub_in + '":') - for line in job.outputtext.splitlines(): - sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') - else: - app.console('No output from command for input "' + job.sub_in + '"') - sys.stderr.write(app.EXEC_NAME + ':\n') - else: - app.console('No output from command for any inputs') - - app.console('Script reported successful completion for all inputs') diff --git a/python/mrtrix3/for_each/entry.py b/python/mrtrix3/for_each/entry.py new file mode 100644 index 0000000000..a382d6c2ae --- /dev/null +++ b/python/mrtrix3/for_each/entry.py @@ -0,0 +1,46 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + +import os +from mrtrix3 import app +from . import CMDSPLIT + +class Entry: + def __init__(self, input_text, common_prefix, common_suffix): + self.input_text = input_text + self.sub_in = input_text + self.sub_name = os.path.basename(input_text.rstrip('/')) + self.sub_pre = os.path.splitext(self.sub_name.rstrip('.gz'))[0] + if common_suffix: + self.sub_uni = input_text[len(common_prefix):-len(common_suffix)] + else: + self.sub_uni = input_text[len(common_prefix):] + + self.substitutions = { 'IN': self.sub_in, 'NAME': self.sub_name, 'PRE': self.sub_pre, 'UNI': self.sub_uni } + app.debug('Input text: ' + input_text) + app.debug('Substitutions: ' + str(self.substitutions)) + + self.cmd = [ ] + for entry in CMDSPLIT: + for (key, value) in self.substitutions.items(): + entry = entry.replace(key, value) + if ' ' in entry: + entry = '"' + entry + '"' + self.cmd.append(entry) + app.debug('Resulting command: ' + str(self.cmd)) + + self.outputtext = None + self.returncode = None diff --git a/python/mrtrix3/for_each/execute.py b/python/mrtrix3/for_each/execute.py new file mode 100644 index 0000000000..d129981111 --- /dev/null +++ b/python/mrtrix3/for_each/execute.py @@ -0,0 +1,182 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, re, sys, threading +from mrtrix3 import ANSI, MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, run #pylint: disable=no-name-in-module +from . import CMDSPLIT, KEYLIST +from . import Entry, Shared + +def execute(): #pylint: disable=unused-variable + + inputs = app.ARGS.inputs + app.debug('All inputs: ' + str(inputs)) + app.debug('Command: ' + str(app.ARGS.command)) + app.debug('CMDSPLIT: ' + str(CMDSPLIT)) + + if app.ARGS.exclude: + app.ARGS.exclude = [ exclude[0] for exclude in app.ARGS.exclude ] # To deal with argparse's action=append. Always guaranteed to be only one argument since nargs=1 + app.debug('To exclude: ' + str(app.ARGS.exclude)) + exclude_unmatched = [ ] + to_exclude = [ ] + for exclude in app.ARGS.exclude: + if exclude in inputs: + to_exclude.append(exclude) + else: + try: + re_object = re.compile(exclude) + regex_hits = [ ] + for arg in inputs: + search_result = re_object.search(arg) + if search_result and search_result.group(): + regex_hits.append(arg) + if regex_hits: + app.debug('Inputs excluded via regex "' + exclude + '": ' + str(regex_hits)) + to_exclude.extend(regex_hits) + else: + app.debug('Compiled exclude regex "' + exclude + '" had no hits') + exclude_unmatched.append(exclude) + except re.error: + app.debug('Exclude string "' + exclude + '" did not compile as regex') + exclude_unmatched.append(exclude) + if exclude_unmatched: + app.warn('Item' + ('s' if len(exclude_unmatched) > 1 else '') + ' specified via -exclude did not result in item exclusion, whether by direct match or compilation as regex: ' + str('\'' + exclude_unmatched[0] + '\'' if len(exclude_unmatched) == 1 else exclude_unmatched)) + inputs = [ arg for arg in inputs if arg not in to_exclude ] + if not inputs: + raise MRtrixError('No inputs remaining after application of exclusion criteri' + ('on' if len(app.ARGS.exclude) == 1 else 'a')) + app.debug('Inputs after exclusion: ' + str(inputs)) + + common_prefix = os.path.commonprefix(inputs) + common_suffix = os.path.commonprefix([i[::-1] for i in inputs])[::-1] + app.debug('Common prefix: ' + common_prefix if common_prefix else 'No common prefix') + app.debug('Common suffix: ' + common_suffix if common_suffix else 'No common suffix') + + for entry in CMDSPLIT: + if os.path.exists(entry): + keys_present = [ key for key in KEYLIST if key in entry ] + if keys_present: + app.warn('Performing text substitution of ' + str(keys_present) + ' within command: "' + entry + '"; but the original text exists as a path on the file system... is this a problematic filesystem path?') + + try: + next(entry for entry in CMDSPLIT if any(key for key in KEYLIST if key in entry)) + except StopIteration as exception: + raise MRtrixError('None of the unique for_each keys ' + str(KEYLIST) + ' appear in command string "' + app.ARGS.command + '"; no substitution can occur') from exception + + jobs = [ ] + for i in inputs: + jobs.append(Entry(i, common_prefix, common_suffix)) + + if app.ARGS.test: + app.console('Command strings for ' + str(len(jobs)) + ' jobs:') + for job in jobs: + sys.stderr.write(ANSI.execute + 'Input:' + ANSI.clear + ' "' + job.input_text + '"\n') + sys.stderr.write(ANSI.execute + 'Command:' + ANSI.clear + ' ' + ' '.join(job.cmd) + '\n') + return + + parallel = app.NUM_THREADS is not None and app.NUM_THREADS > 1 + + def progress_string(): + text = str(sum(1 if job.returncode is not None else 0 for job in jobs)) + \ + '/' + \ + str(len(jobs)) + \ + ' jobs completed ' + \ + ('across ' + str(app.NUM_THREADS) + ' threads' if parallel else 'sequentially') + fail_count = sum(1 if job.returncode else 0 for job in jobs) + if fail_count: + text += ' (' + str(fail_count) + ' errors)' + return text + + progress = app.ProgressBar(progress_string(), len(jobs)) + shared = Shared() + + def execute_parallel(): + while not shared.stop: + my_job = shared.next(jobs) + if not my_job: + return + try: + result = run.command(' '.join(my_job.cmd), shell=True) + my_job.outputtext = result.stdout + result.stderr + my_job.returncode = 0 + except run.MRtrixCmdError as exception: + my_job.outputtext = str(exception) + my_job.returncode = exception.returncode + except Exception as exception: # pylint: disable=broad-except + my_job.outputtext = str(exception) + my_job.returncode = 1 + with shared.lock: + progress.increment(progress_string()) + + if parallel: + threads = [ ] + for i in range (1, app.NUM_THREADS): + thread = threading.Thread(target=execute_parallel) + thread.start() + threads.append(thread) + execute_parallel() + for thread in threads: + thread.join() + else: + for job in jobs: + try: + result = run.command(' '.join(job.cmd), shell=True) + job.outputtext = result.stdout + result.stderr + job.returncode = 0 + except run.MRtrixCmdError as exception: + job.outputtext = str(exception) + job.returncode = exception.returncode + except Exception as exception: # pylint: disable=broad-except + job.outputtext = str(exception) + job.returncode = 1 + progress.increment(progress_string()) + + progress.done() + + assert all(job.returncode is not None for job in jobs) + fail_count = sum(1 if job.returncode else 0 for job in jobs) + if fail_count: + app.warn(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully') + if fail_count > 1: + app.warn('Outputs from failed commands:') + sys.stderr.write(app.EXEC_NAME + ':\n') + else: + app.warn('Output from failed command:') + for job in jobs: + if job.returncode: + if job.outputtext: + app.warn('For input "' + job.sub_in + '" (returncode = ' + str(job.returncode) + '):') + for line in job.outputtext.splitlines(): + sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') + else: + app.warn('No output from command for input "' + job.sub_in + '" (return code = ' + str(job.returncode) + ')') + if fail_count > 1: + sys.stderr.write(app.EXEC_NAME + ':\n') + raise MRtrixError(str(fail_count) + ' of ' + str(len(jobs)) + ' jobs did not complete successfully: ' + str([job.input_text for job in jobs if job.returncode])) + + if app.VERBOSITY > 1: + if any(job.outputtext for job in jobs): + sys.stderr.write(app.EXEC_NAME + ':\n') + for job in jobs: + if job.outputtext: + app.console('Output of command for input "' + job.sub_in + '":') + for line in job.outputtext.splitlines(): + sys.stderr.write(' ' * (len(app.EXEC_NAME)+2) + line + '\n') + else: + app.console('No output from command for input "' + job.sub_in + '"') + sys.stderr.write(app.EXEC_NAME + ':\n') + else: + app.console('No output from command for any inputs') + + app.console('Script reported successful completion for all inputs') diff --git a/python/mrtrix3/for_each/shared.py b/python/mrtrix3/for_each/shared.py new file mode 100644 index 0000000000..6c56f67ca6 --- /dev/null +++ b/python/mrtrix3/for_each/shared.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import threading + +class Shared: + def __init__(self): + self._job_index = 0 + self.lock = threading.Lock() + self.stop = False + def next(self, jobs): + job = None + with self.lock: + if self._job_index < len(jobs): + job = jobs[self._job_index] + self._job_index += 1 + self.stop = self._job_index == len(jobs) + return job diff --git a/python/mrtrix3/for_each/usage.py b/python/mrtrix3/for_each/usage.py new file mode 100644 index 0000000000..3017bfe1da --- /dev/null +++ b/python/mrtrix3/for_each/usage.py @@ -0,0 +1,69 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import sys +from . import CMDSPLIT + +def usage(cmdline): #pylint: disable=unused-variable + global CMDSPLIT + from mrtrix3 import _version #pylint: disable=no-name-in-module, import-outside-toplevel + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') + cmdline.set_synopsis('Perform some arbitrary processing step for each of a set of inputs') + cmdline.add_description('This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs.') + cmdline.add_description('More information on use of the for_each command can be found at the following link: \n' + 'https://mrtrix.readthedocs.io/en/' + _version.__tag__ + '/tips_and_tricks/batch_processing_with_foreach.html') + cmdline.add_description('The way that this batch processing capability is achieved is by providing basic text substitutions, which simplify the formation of valid command strings based on the unique components of the input strings on which the script is instructed to execute. This does however mean that the items to be passed as inputs to the for_each command (e.g. file / directory names) MUST NOT contain any instances of these substitution strings, as otherwise those paths will be corrupted during the course of the substitution.') + cmdline.add_description('The available substitutions are listed below (note that the -test command-line option can be used to ensure correct command string formation prior to actually executing the commands):') + cmdline.add_description(' - IN: The full matching pattern, including leading folders. For example, if the target list contains a file "folder/image.mif", any occurrence of "IN" will be substituted with "folder/image.mif".') + cmdline.add_description(' - NAME: The basename of the matching pattern. For example, if the target list contains a file "folder/image.mif", any occurrence of "NAME" will be substituted with "image.mif".') + cmdline.add_description(' - PRE: The prefix of the input pattern (the basename stripped of its extension). For example, if the target list contains a file "folder/my.image.mif.gz", any occurrence of "PRE" will be substituted with "my.image".') + cmdline.add_description(' - UNI: The unique part of the input after removing any common prefix and common suffix. For example, if the target list contains files: "folder/001dwi.mif", "folder/002dwi.mif", "folder/003dwi.mif", any occurrence of "UNI" will be substituted with "001", "002", "003".') + cmdline.add_description('Note that due to a limitation of the Python "argparse" module, any command-line OPTIONS that the user intends to provide specifically to the for_each script must appear BEFORE providing the list of inputs on which for_each is intended to operate. While command-line options provided as such will be interpreted specifically by the for_each script, any command-line options that are provided AFTER the COLON separator will form part of the executed COMMAND, and will therefore be interpreted as command-line options having been provided to that underlying command.') + cmdline.add_example_usage('Demonstration of basic usage syntax', + 'for_each folder/*.mif : mrinfo IN', + 'This will run the "mrinfo" command for every .mif file present in "folder/". Note that the compulsory colon symbol is used to separate the list of items on which for_each is being instructed to operate, from the command that is intended to be run for each input.') + cmdline.add_example_usage('Multi-threaded use of for_each', + 'for_each -nthreads 4 freesurfer/subjects/* : recon-all -subjid NAME -all', + 'In this example, for_each is instructed to run the FreeSurfer command \'recon-all\' for all subjects within the \'subjects\' directory, with four subjects being processed in parallel at any one time. Whenever processing of one subject is completed, processing for a new unprocessed subject will commence. This technique is useful for improving the efficiency of running single-threaded commands on multi-core systems, as long as the system possesses enough memory to support such parallel processing. Note that in the case of multi-threaded commands (which includes many MRtrix3 commands), it is generally preferable to permit multi-threaded execution of the command on a single input at a time, rather than processing multiple inputs in parallel.') + cmdline.add_example_usage('Excluding specific inputs from execution', + 'for_each *.nii -exclude 001.nii : mrconvert IN PRE.mif', + 'Particularly when a wildcard is used to define the list of inputs for for_each, it is possible in some instances that this list will include one or more strings for which execution should in fact not be performed; for instance, if a command has already been executed for one or more files, and then for_each is being used to execute the same command for all other files. In this case, the -exclude option can be used to effectively remove an item from the list of inputs that would otherwise be included due to the use of a wildcard (and can be used more than once to exclude more than one string). In this particular example, mrconvert is instructed to perform conversions from NIfTI to MRtrix image formats, for all except the first image in the directory. Note that any usages of this option must appear AFTER the list of inputs. Note also that the argument following the -exclude option can alternatively be a regular expression, in which case any inputs for which a match to the expression is found will be excluded from processing.') + cmdline.add_example_usage('Testing the command string substitution', + 'for_each -test * : mrconvert IN PRE.mif', + 'By specifying the -test option, the script will print to the terminal the results of text substitutions for all of the specified inputs, but will not actually execute those commands. It can therefore be used to verify that the script is receiving the intended set of inputs, and that the text substitutions on those inputs lead to the intended command strings.') + cmdline.add_argument('inputs', help='Each of the inputs for which processing should be run', nargs='+') + cmdline.add_argument('colon', help='Colon symbol (":") delimiting the for_each inputs & command-line options from the actual command to be executed', type=str, choices=[':']) + cmdline.add_argument('command', help='The command string to run for each input, containing any number of substitutions listed in the Description section', type=str) + cmdline.add_argument('-exclude', help='Exclude one specific input string / all strings matching a regular expression from being processed (see Example Usage)', action='append', metavar='"regex"', nargs=1) + cmdline.add_argument('-test', help='Test the operation of the for_each script, by printing the command strings following string substitution but not actually executing them', action='store_true', default=False) + + # Usage of for_each needs to be handled slightly differently here: + # We want argparse to parse only the contents of the command-line before the colon symbol, + # as these are the items that pertain to the invocation of the for_each script; + # anything after the colon should instead form a part of the command that + # for_each is responsible for executing + try: + index = next(i for i,s in enumerate(sys.argv) if s == ':') + try: + CMDSPLIT = sys.argv[index+1:] + sys.argv = sys.argv[:index+1] + sys.argv.append(' '.join(CMDSPLIT)) + except IndexError: + sys.stderr.write('Erroneous usage: No command specified (colon separator cannot be the last entry provided)\n') + sys.exit(0) + except StopIteration: + if len(sys.argv) > 2: + sys.stderr.write('Erroneous usage: A colon must be used to separate for_each inputs from the command to be executed\n') + sys.exit(0) diff --git a/python/mrtrix3/gen_scheme/__init__.py b/python/mrtrix3/gen_scheme/__init__.py deleted file mode 100644 index 0ffe18cfd5..0000000000 --- a/python/mrtrix3/gen_scheme/__init__.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -set -e - -if [ "$#" -eq 0 ]; then - echo " - gen_scheme: part of the MRtrix package - -SYNOPSIS - - gen_scheme numPE [ bvalue ndir ]... - - numPE the number of phase-encoding directions to be included in - the scheme (most scanners will only support a single PE - direction per sequence, so this will typically be 1). - - bvalue the b-value of the shell - - ndir the number of directions to include in the shell - - -DESCRIPTION - - This script generates a diffusion gradient table according to the - parameters specified. For most users, something like the following would be - appropriate: - - gen_scheme 1 0 5 750 20 3000 60 - - which will geneate a multi-shell diffusion gradient table with a single - phase-encode direction, consisting of 5 b=0, 20 b=750, and 60 b=3000 - volumes. - - The gradient table is generated using the following procedure: - - - The directions for each shell are optimally distributed using a bipolar - electrostatic repulsion model (using the command 'dirgen'). - - - These are then split into numPE sets (if numPE != 1) using a brute-force - random search for the most optimally-distributed subsets (using the command - 'dirsplit'). - - - Each of the resulting sets is then rearranged by inversion of individual - directions through the origin (i.e. direction vector x => -x) using a - brute-force random search to find the most optimal combination in terms - of unipolar repulsion: this ensures near-uniform distribution over the - sphere to avoid biases in terms of eddy-current distortions, as - recommended for FSL's EDDY command (this step uses the 'dirflip' command). - - - Finally, all the individual subsets are merged (using the 'dirmerge' - command) into a single gradient table, in such a way as to maintain - near-uniformity upon truncation (in as much as is possible), in both - b-value and directional domains. In other words, the approach aims to - ensure that if the acquisition is cut short, the set of volumes acquired - nonetheless contains the same relative proportions of b-values as - specified, with directions that are near-uniformly distributed. - - The primary output of this command is a file called 'dw_scheme.txt', - consisting of a 5-column table, with one line per volume. Each column - consists of [ x y z b PE ], where [ x y z ] is the unit direction vector, b - is the b-value in unit of s/mm², and PE is a integer ID from 1 to numPE. - - The command also retains all of the subsets generated along the way, which - you can safely delete once the command has completed. Since this can - consist of quite a few files, it is recommended to run this command within - its own temporary folder. - - See also the 'dirstat' command to obtain simple metrics of quality for the - set produced. -" - exit 1 -else - - nPE=$1 - if [ $nPE -ne 1 ] && [ $nPE -ne 2 ] && [ $nPE -ne 4 ]; then - echo "ERROR: numPE should be one of 1, 2, 4" - exit 1 - fi - - shift - # store args for re-use: - ARGS=( "$@" ) - - # print parsed info for sanity-checking: - echo "generating scheme with $nPE phase-encode directions, with:" - while [ ! -z "$1" ]; do - echo " b = $1: $2 directions" - shift 2 - done - - perm="" #"-perm 1000" - - # reset args: - set -- "${ARGS[@]}" - merge="" - - while [ ! -z "$1" ]; do - echo "=====================================" - echo "generating directions for b = $1..." - echo "=====================================" - - merge=$merge" "$1 - - dirgen $2 dirs-b$1-$2.txt -force - if [ $nPE -gt 1 ]; then - dirsplit dirs-b$1-$2.txt dirs-b$1-$2-{1..2}.txt -force $perm - if [ $nPE -gt 2 ]; then - dirsplit dirs-b$1-$2-1.txt dirs-b$1-$2-1{1..2}.txt -force $perm - dirsplit dirs-b$1-$2-2.txt dirs-b$1-$2-2{1..2}.txt -force $perm - # TODO: the rest... - for n in dirs-b$1-$2-{1,2}{1,2}.txt; do - dirflip $n ${n%.txt}-flip.txt -force $perm - merge=$merge" "${n%.txt}-flip.txt - done - else - for n in dirs-b$1-$2-{1,2}.txt; do - dirflip $n ${n%.txt}-flip.txt -force $perm - merge=$merge" "${n%.txt}-flip.txt - done - fi - else - dirflip dirs-b$1-$2.txt dirs-b$1-$2-flip.txt -force $perm - merge=$merge" "dirs-b$1-$2-flip.txt - fi - - shift 2 - done - - echo $merge - dirmerge $nPE $merge dw_scheme.txt -force -fi - diff --git a/python/mrtrix3/labelsgmfix/__init__.py b/python/mrtrix3/labelsgmfix/__init__.py index f2d1b63dc0..e69de29bb2 100644 --- a/python/mrtrix3/labelsgmfix/__init__.py +++ b/python/mrtrix3/labelsgmfix/__init__.py @@ -1,165 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Script for 'repairing' a FreeSurfer parcellation image -# FreeSurfer's sub-cortical structure segmentation has been observed to be highly variable -# under scan-rescan conditions. This introduces unwanted variability into the connectome, -# as the parcellations don't overlap with the sub-cortical segmentations provided by -# FIRST for the sake of Anatomically-Constrained Tractography. This script determines the -# node indices that correspond to these structures, and replaces them with estimates -# derived from FIRST. - - - -import math, os - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST') - cmdline.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) - cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) - cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. The effects of SIFT on the reproducibility and biological accuracy of the structural connectome. NeuroImage, 2015, 104, 253-265') - cmdline.add_argument('parc', help='The input FreeSurfer parcellation image') - cmdline.add_argument('t1', help='The T1 image to be provided to FIRST') - cmdline.add_argument('lut', help='The lookup table file that the parcellated image is based on') - cmdline.add_argument('output', help='The output parcellation image') - cmdline.add_argument('-premasked', action='store_true', default=False, help='Indicate that brain masking has been applied to the T1 input image') - cmdline.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Consider the amygdalae and hippocampi as sub-cortical grey matter structures, and also replace their estimates with those from FIRST') - - - - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, fsl, image, path, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - - if utils.is_windows(): - raise MRtrixError('Script cannot run on Windows due to FSL dependency') - - app.check_output_path(path.from_user(app.ARGS.output, False)) - image.check_3d_nonunity(path.from_user(app.ARGS.t1, False)) - - fsl_path = os.environ.get('FSLDIR', '') - if not fsl_path: - raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') - - first_cmd = fsl.exe_name('run_first_all') - - first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') - if not os.path.isdir(first_atlas_path): - raise MRtrixError('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager') - - # Want a mapping between FreeSurfer node names and FIRST structure names - # Just deal with the 5 that are used in ACT; FreeSurfer's hippocampus / amygdala segmentations look good enough. - structure_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', - 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', - 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', - 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', - 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } - if app.ARGS.sgm_amyg_hipp: - structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', - 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' }) - - t1_spacing = image.Header(path.from_user(app.ARGS.t1, False)).spacing() - upsample_for_first = False - # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data - if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: - app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' - 'image will be resampled to 1mm isotropic in order to maximise chance of ' - 'FSL FIRST script succeeding') - upsample_for_first = True - - app.make_scratch_dir() - - # Get the parcellation and T1 images into the scratch directory, with conversion of the T1 into the correct format for FSL - run.command('mrconvert ' + path.from_user(app.ARGS.parc) + ' ' + path.to_scratch('parc.mif')) - if upsample_for_first: - run.command('mrgrid ' + path.from_user(app.ARGS.t1) + ' regrid - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') - else: - run.command('mrconvert ' + path.from_user(app.ARGS.t1) + ' ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') - - app.goto_scratch_dir() - - # Run FIRST - first_input_is_brain_extracted = '' - if app.ARGS.premasked: - first_input_is_brain_extracted = ' -b' - run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first') - fsl.check_first('first', structure_map.keys()) - - # Generate an empty image that will be used to construct the new SGM nodes - run.command('mrcalc parc.mif 0 -min sgm.mif') - - # Read the local connectome LUT file - # This will map a structure name to an index - sgm_lut = {} - sgm_lut_file_name = 'FreeSurferSGM.txt' - sgm_lut_file_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), sgm_lut_file_name) - with open(sgm_lut_file_path, encoding='utf-8') as sgm_lut_file: - for line in sgm_lut_file: - line = line.rstrip() - if line and line[0]!='#': - line = line.split() - sgm_lut[line[1]] = line[0] # This can remain as a string - - # Convert FIRST meshes to node masks - # In this use case, don't want the PVE images; want to threshold at 0.5 - mask_list = [ ] - progress = app.ProgressBar('Generating mask images for SGM structures', len(structure_map)) - for key, value in structure_map.items(): - image_path = key + '_mask.mif' - mask_list.append(image_path) - vtk_in_path = 'first-' + key + '_first.vtk' - run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') - run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5') - # Add to the SGM image; don't worry about overlap for now - node_index = sgm_lut[value] - run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif') - if not app.CONTINUE_OPTION: - run.function(os.remove, 'sgm.mif') - run.function(os.rename, 'sgm_new.mif', 'sgm.mif') - progress.increment() - progress.done() - - # Detect any overlapping voxels between the SGM masks, and set to zero - run.command(['mrmath', mask_list, 'sum', '-', '|', \ - 'mrcalc', '-', '1', '-gt', 'sgm_overlap_mask.mif']) - run.command('mrcalc sgm_overlap_mask.mif 0 sgm.mif -if sgm_masked.mif') - - # Convert the SGM label image to the indices that are required based on the user-provided LUT file - run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.from_user(app.ARGS.lut) + ' sgm_new_labels.mif') - - # For each SGM structure: - # * Figure out what index the structure has been mapped to; this can only be done using mrstats - # * Strip that index from the parcellation image - # * Insert the new delineation of that structure - progress = app.ProgressBar('Replacing SGM parcellations', len(structure_map)) - for struct in structure_map: - image_path = struct + '_mask.mif' - index = int(image.statistics('sgm_new_labels.mif', mask=image_path).median) - run.command('mrcalc parc.mif ' + str(index) + ' -eq 0 parc.mif -if parc_removed.mif') - run.function(os.remove, 'parc.mif') - run.function(os.rename, 'parc_removed.mif', 'parc.mif') - progress.increment() - progress.done() - - # Insert the new delineations of all SGM structures in a single call - # Enforce unsigned integer datatype of output image - run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') - run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.parc, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/labelsgmfix/execute.py b/python/mrtrix3/labelsgmfix/execute.py new file mode 100644 index 0000000000..1572f2f9e9 --- /dev/null +++ b/python/mrtrix3/labelsgmfix/execute.py @@ -0,0 +1,135 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os +from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, fsl, image, path, run, utils #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + if utils.is_windows(): + raise MRtrixError('Script cannot run on Windows due to FSL dependency') + + app.check_output_path(path.from_user(app.ARGS.output, False)) + image.check_3d_nonunity(path.from_user(app.ARGS.t1, False)) + + fsl_path = os.environ.get('FSLDIR', '') + if not fsl_path: + raise MRtrixError('Environment variable FSLDIR is not set; please run appropriate FSL configuration script') + + first_cmd = fsl.exe_name('run_first_all') + + first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') + if not os.path.isdir(first_atlas_path): + raise MRtrixError('Atlases required for FSL\'s FIRST program not installed;\nPlease install fsl-first-data using your relevant package manager') + + # Want a mapping between FreeSurfer node names and FIRST structure names + # Just deal with the 5 that are used in ACT; FreeSurfer's hippocampus / amygdala segmentations look good enough. + structure_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', + 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', + 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', + 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', + 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } + if app.ARGS.sgm_amyg_hipp: + structure_map.update({ 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', + 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus' }) + + t1_spacing = image.Header(path.from_user(app.ARGS.t1, False)).spacing() + upsample_for_first = False + # If voxel size is 1.25mm or larger, make a guess that the user has erroneously re-gridded their data + if math.pow(t1_spacing[0] * t1_spacing[1] * t1_spacing[2], 1.0/3.0) > 1.225: + app.warn('Voxel size of input T1 image larger than expected for T1-weighted images (' + str(t1_spacing) + '); ' + 'image will be resampled to 1mm isotropic in order to maximise chance of ' + 'FSL FIRST script succeeding') + upsample_for_first = True + + app.make_scratch_dir() + + # Get the parcellation and T1 images into the scratch directory, with conversion of the T1 into the correct format for FSL + run.command('mrconvert ' + path.from_user(app.ARGS.parc) + ' ' + path.to_scratch('parc.mif')) + if upsample_for_first: + run.command('mrgrid ' + path.from_user(app.ARGS.t1) + ' regrid - -voxel 1.0 -interp sinc | mrcalc - 0.0 -max - | mrconvert - ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') + else: + run.command('mrconvert ' + path.from_user(app.ARGS.t1) + ' ' + path.to_scratch('T1.nii') + ' -strides -1,+2,+3') + + app.goto_scratch_dir() + + # Run FIRST + first_input_is_brain_extracted = '' + if app.ARGS.premasked: + first_input_is_brain_extracted = ' -b' + run.command(first_cmd + ' -m none -s ' + ','.join(structure_map.keys()) + ' -i T1.nii' + first_input_is_brain_extracted + ' -o first') + fsl.check_first('first', structure_map.keys()) + + # Generate an empty image that will be used to construct the new SGM nodes + run.command('mrcalc parc.mif 0 -min sgm.mif') + + # Read the local connectome LUT file + # This will map a structure name to an index + sgm_lut = {} + sgm_lut_file_name = 'FreeSurferSGM.txt' + sgm_lut_file_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), sgm_lut_file_name) + with open(sgm_lut_file_path, encoding='utf-8') as sgm_lut_file: + for line in sgm_lut_file: + line = line.rstrip() + if line and line[0]!='#': + line = line.split() + sgm_lut[line[1]] = line[0] # This can remain as a string + + # Convert FIRST meshes to node masks + # In this use case, don't want the PVE images; want to threshold at 0.5 + mask_list = [ ] + progress = app.ProgressBar('Generating mask images for SGM structures', len(structure_map)) + for key, value in structure_map.items(): + image_path = key + '_mask.mif' + mask_list.append(image_path) + vtk_in_path = 'first-' + key + '_first.vtk' + run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') + run.command('mesh2voxel first-' + key + '_transformed.vtk parc.mif - | mrthreshold - ' + image_path + ' -abs 0.5') + # Add to the SGM image; don't worry about overlap for now + node_index = sgm_lut[value] + run.command('mrcalc ' + image_path + ' ' + node_index + ' sgm.mif -if sgm_new.mif') + if not app.CONTINUE_OPTION: + run.function(os.remove, 'sgm.mif') + run.function(os.rename, 'sgm_new.mif', 'sgm.mif') + progress.increment() + progress.done() + + # Detect any overlapping voxels between the SGM masks, and set to zero + run.command(['mrmath', mask_list, 'sum', '-', '|', \ + 'mrcalc', '-', '1', '-gt', 'sgm_overlap_mask.mif']) + run.command('mrcalc sgm_overlap_mask.mif 0 sgm.mif -if sgm_masked.mif') + + # Convert the SGM label image to the indices that are required based on the user-provided LUT file + run.command('labelconvert sgm_masked.mif ' + sgm_lut_file_path + ' ' + path.from_user(app.ARGS.lut) + ' sgm_new_labels.mif') + + # For each SGM structure: + # * Figure out what index the structure has been mapped to; this can only be done using mrstats + # * Strip that index from the parcellation image + # * Insert the new delineation of that structure + progress = app.ProgressBar('Replacing SGM parcellations', len(structure_map)) + for struct in structure_map: + image_path = struct + '_mask.mif' + index = int(image.statistics('sgm_new_labels.mif', mask=image_path).median) + run.command('mrcalc parc.mif ' + str(index) + ' -eq 0 parc.mif -if parc_removed.mif') + run.function(os.remove, 'parc.mif') + run.function(os.rename, 'parc_removed.mif', 'parc.mif') + progress.increment() + progress.done() + + # Insert the new delineations of all SGM structures in a single call + # Enforce unsigned integer datatype of output image + run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') + run.command('mrconvert result.mif ' + path.from_user(app.ARGS.output), mrconvert_keyval=path.from_user(app.ARGS.parc, False), force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/labelsgmfix/usage.py b/python/mrtrix3/labelsgmfix/usage.py new file mode 100644 index 0000000000..4c3b63f64f --- /dev/null +++ b/python/mrtrix3/labelsgmfix/usage.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST') + cmdline.add_citation('Patenaude, B.; Smith, S. M.; Kennedy, D. N. & Jenkinson, M. A Bayesian model of shape and appearance for subcortical brain segmentation. NeuroImage, 2011, 56, 907-922', is_external=True) + cmdline.add_citation('Smith, S. M.; Jenkinson, M.; Woolrich, M. W.; Beckmann, C. F.; Behrens, T. E.; Johansen-Berg, H.; Bannister, P. R.; De Luca, M.; Drobnjak, I.; Flitney, D. E.; Niazy, R. K.; Saunders, J.; Vickers, J.; Zhang, Y.; De Stefano, N.; Brady, J. M. & Matthews, P. M. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 2004, 23, S208-S219', is_external=True) + cmdline.add_citation('Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. The effects of SIFT on the reproducibility and biological accuracy of the structural connectome. NeuroImage, 2015, 104, 253-265') + cmdline.add_argument('parc', help='The input FreeSurfer parcellation image') + cmdline.add_argument('t1', help='The T1 image to be provided to FIRST') + cmdline.add_argument('lut', help='The lookup table file that the parcellated image is based on') + cmdline.add_argument('output', help='The output parcellation image') + cmdline.add_argument('-premasked', action='store_true', default=False, help='Indicate that brain masking has been applied to the T1 input image') + cmdline.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Consider the amygdalae and hippocampi as sub-cortical grey matter structures, and also replace their estimates with those from FIRST') diff --git a/python/mrtrix3/mask2glass/__init__.py b/python/mrtrix3/mask2glass/__init__.py index 13ddbae8fd..e69de29bb2 100644 --- a/python/mrtrix3/mask2glass/__init__.py +++ b/python/mrtrix3/mask2glass/__init__.py @@ -1,77 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Remika Mito (remika.mito@florey.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Create a glass brain from mask input') - cmdline.add_description('The output of this command is a glass brain image, which can be viewed ' - 'using the volume render option in mrview, and used for visualisation purposes to view results in 3D.') - cmdline.add_description('While the name of this script indicates that a binary mask image is required as input, it can ' - 'also operate on a floating-point image. One way in which this can be exploited is to compute the mean ' - 'of all subject masks within template space, in which case this script will produce a smoother result ' - 'than if a binary template mask were to be used as input.') - cmdline.add_argument('input', help='The input mask image') - cmdline.add_argument('output', help='The output glass brain image') - cmdline.add_argument('-dilate', type=int, default=2, help='Provide number of passes for dilation step; default = 2') - cmdline.add_argument('-scale', type=float, default=2.0, help='Provide resolution upscaling value; default = 2.0') - cmdline.add_argument('-smooth', type=float, default=1.0, help='Provide standard deviation of smoothing (in mm); default = 1.0') - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel - - app.check_output_path(app.ARGS.output) - - # import data to scratch directory - app.make_scratch_dir() - run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif')) - app.goto_scratch_dir() - - dilate_option = ' -npass ' + str(app.ARGS.dilate) - scale_option = ' -scale ' + str(app.ARGS.scale) - smooth_option = ' -stdev ' + str(app.ARGS.smooth) - threshold_option = ' -abs 0.5' - - # check whether threshold should be fixed at 0.5 or computed automatically from the data - if image.Header('in.mif').datatype() == 'Bit': - app.debug('Input image is bitwise; no need to check image intensities') - else: - app.debug('Input image is not bitwise; checking distribution of image intensities') - result_stat = image.statistics('in.mif') - if not (result_stat.min == 0.0 and result_stat.max == 1.0): - app.warn('Input image contains values outside of range [0.0, 1.0]; threshold will not be 0.5, but will instead be determined from the image data') - threshold_option = '' - else: - app.debug('Input image values reside within [0.0, 1.0] range; fixed threshold of 0.5 will be used') - - # run upscaling step - run.command('mrgrid in.mif regrid upsampled.mif' + scale_option) - - # run smoothing step - run.command('mrfilter upsampled.mif smooth upsampled_smooth.mif' + smooth_option) - - # threshold image - run.command('mrthreshold upsampled_smooth.mif upsampled_smooth_thresh.mif' + threshold_option) - - # dilate image for subtraction - run.command('maskfilter upsampled_smooth_thresh.mif dilate upsampled_smooth_thresh_dilate.mif' + dilate_option) - - # create border - run.command('mrcalc upsampled_smooth_thresh_dilate.mif upsampled_smooth_thresh.mif -xor out.mif -datatype bit') - - # create output image - run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), - mrconvert_keyval=path.from_user(app.ARGS.input, False), - force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/mask2glass/execute.py b/python/mrtrix3/mask2glass/execute.py new file mode 100644 index 0000000000..902ef3b759 --- /dev/null +++ b/python/mrtrix3/mask2glass/execute.py @@ -0,0 +1,61 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def execute(): #pylint: disable=unused-variable + from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel + + app.check_output_path(app.ARGS.output) + + # import data to scratch directory + app.make_scratch_dir() + run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('in.mif')) + app.goto_scratch_dir() + + dilate_option = ' -npass ' + str(app.ARGS.dilate) + scale_option = ' -scale ' + str(app.ARGS.scale) + smooth_option = ' -stdev ' + str(app.ARGS.smooth) + threshold_option = ' -abs 0.5' + + # check whether threshold should be fixed at 0.5 or computed automatically from the data + if image.Header('in.mif').datatype() == 'Bit': + app.debug('Input image is bitwise; no need to check image intensities') + else: + app.debug('Input image is not bitwise; checking distribution of image intensities') + result_stat = image.statistics('in.mif') + if not (result_stat.min == 0.0 and result_stat.max == 1.0): + app.warn('Input image contains values outside of range [0.0, 1.0]; threshold will not be 0.5, but will instead be determined from the image data') + threshold_option = '' + else: + app.debug('Input image values reside within [0.0, 1.0] range; fixed threshold of 0.5 will be used') + + # run upscaling step + run.command('mrgrid in.mif regrid upsampled.mif' + scale_option) + + # run smoothing step + run.command('mrfilter upsampled.mif smooth upsampled_smooth.mif' + smooth_option) + + # threshold image + run.command('mrthreshold upsampled_smooth.mif upsampled_smooth_thresh.mif' + threshold_option) + + # dilate image for subtraction + run.command('maskfilter upsampled_smooth_thresh.mif dilate upsampled_smooth_thresh_dilate.mif' + dilate_option) + + # create border + run.command('mrcalc upsampled_smooth_thresh_dilate.mif upsampled_smooth_thresh.mif -xor out.mif -datatype bit') + + # create output image + run.command('mrconvert out.mif ' + path.from_user(app.ARGS.output), + mrconvert_keyval=path.from_user(app.ARGS.input, False), + force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/mask2glass/usage.py b/python/mrtrix3/mask2glass/usage.py new file mode 100644 index 0000000000..12ee80513c --- /dev/null +++ b/python/mrtrix3/mask2glass/usage.py @@ -0,0 +1,29 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Remika Mito (remika.mito@florey.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Create a glass brain from mask input') + cmdline.add_description('The output of this command is a glass brain image, which can be viewed ' + 'using the volume render option in mrview, and used for visualisation purposes to view results in 3D.') + cmdline.add_description('While the name of this script indicates that a binary mask image is required as input, it can ' + 'also operate on a floating-point image. One way in which this can be exploited is to compute the mean ' + 'of all subject masks within template space, in which case this script will produce a smoother result ' + 'than if a binary template mask were to be used as input.') + cmdline.add_argument('input', help='The input mask image') + cmdline.add_argument('output', help='The output glass brain image') + cmdline.add_argument('-dilate', type=int, default=2, help='Provide number of passes for dilation step; default = 2') + cmdline.add_argument('-scale', type=float, default=2.0, help='Provide resolution upscaling value; default = 2.0') + cmdline.add_argument('-smooth', type=float, default=1.0, help='Provide standard deviation of smoothing (in mm); default = 1.0') diff --git a/python/mrtrix3/mrtrix_cleanup/__init__.py b/python/mrtrix3/mrtrix_cleanup/__init__.py index 0f8282f1a0..e69de29bb2 100644 --- a/python/mrtrix3/mrtrix_cleanup/__init__.py +++ b/python/mrtrix3/mrtrix_cleanup/__init__.py @@ -1,132 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - -import math, os, re, shutil - - -POSTFIXES = [ 'B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB' ] - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') - cmdline.set_synopsis('Clean up residual temporary files & scratch directories from MRtrix3 commands') - cmdline.add_description('This script will search the file system at the specified location (and in sub-directories thereof) for any temporary files or directories that have been left behind by failed or terminated MRtrix3 commands, and attempt to delete them.') - cmdline.add_description('Note that the script\'s search for temporary items will not extend beyond the user-specified filesystem location. This means that any built-in or user-specified default location for MRtrix3 piped data and scripts will not be automatically searched. Cleanup of such locations should instead be performed explicitly: e.g. "mrtrix_cleanup /tmp/" to remove residual piped images from /tmp/.') - cmdline.add_description('This script should not be run while other MRtrix3 commands are being executed: it may delete temporary items during operation that may lead to unexpected behaviour.') - cmdline.add_argument('path', help='Path from which to commence filesystem search') - cmdline.add_argument('-test', action='store_true', help='Run script in test mode: will list identified files / directories, but not attempt to delete them') - cmdline.add_argument('-failed', metavar='file', nargs=1, help='Write list of items that the script failed to delete to a text file') - cmdline.flag_mutually_exclusive_options([ 'test', 'failed' ]) - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import CONFIG #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - - file_regex = re.compile(r"^mrtrix-tmp-[a-zA-Z0-9]{6}\..*$") - file_config_regex = re.compile(r"^" + CONFIG['TmpFilePrefix'] + r"[a-zA-Z0-9]{6}\..*$") \ - if 'TmpFilePrefix' in CONFIG and CONFIG['TmpFilePrefix'] != 'mrtrix-tmp-' \ - else None - dir_regex = re.compile(r"^\w+-tmp-[a-zA-Z0-9]{6}$") - dir_config_regex = re.compile(r"^" + CONFIG['ScriptScratchPrefix'] + r"[a-zA-Z0-9]{6}$") \ - if 'ScriptScratchPrefix' in CONFIG \ - else None - - files_to_delete = [ ] - dirs_to_delete = [ ] - root_dir = os.path.abspath(app.ARGS.path) - print_search_dir = ('' if os.path.abspath(os.getcwd()) == root_dir else ' from ' + root_dir) - def file_search(regex): - files_to_delete.extend([ os.path.join(dirname, filename) for filename in filter(regex.search, filelist) ]) - def dir_search(regex): - items = set(filter(regex.search, subdirlist)) - if items: - dirs_to_delete.extend([os.path.join(dirname, subdirname) for subdirname in items]) - subdirlist[:] = list(set(subdirlist)-items) - def print_msg(): - return 'Searching' + print_search_dir + ' (found ' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)' - progress = app.ProgressBar(print_msg) - for dirname, subdirlist, filelist in os.walk(root_dir): - file_search(file_regex) - if file_config_regex: - file_search(file_config_regex) - dir_search(dir_regex) - if dir_config_regex: - dir_search(dir_config_regex) - progress.increment() - progress.done() - - if app.ARGS.test: - if files_to_delete: - app.console('Files identified (' + str(len(files_to_delete)) + '):') - for filepath in files_to_delete: - app.console(' ' + filepath) - else: - app.console('No files' + ('' if dirs_to_delete else ' or directories') + ' found') - if dirs_to_delete: - app.console('Directories identified (' + str(len(dirs_to_delete)) + '):') - for dirpath in dirs_to_delete: - app.console(' ' + dirpath) - elif files_to_delete: - app.console('No directories identified') - elif files_to_delete or dirs_to_delete: - progress = app.ProgressBar('Deleting temporaries (' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)', len(files_to_delete) + len(dirs_to_delete)) - except_list = [ ] - size_deleted = 0 - for filepath in files_to_delete: - filesize = 0 - try: - filesize = os.path.getsize(filepath) - os.remove(filepath) - size_deleted += filesize - except OSError: - except_list.append(filepath) - progress.increment() - for dirpath in dirs_to_delete: - dirsize = 0 - try: - for dirname, subdirlist, filelist in os.walk(dirpath): - dirsize += sum(os.path.getsize(filename) for filename in filelist) - except OSError: - pass - try: - shutil.rmtree(dirpath) - size_deleted += dirsize - except OSError: - except_list.append(dirpath) - progress.increment() - progress.done() - postfix_index = int(math.floor(math.log(size_deleted, 1024))) if size_deleted else 0 - if postfix_index: - size_deleted = round(size_deleted / math.pow(1024, postfix_index), 2) - def print_freed(): - return ' (' + str(size_deleted) + POSTFIXES[postfix_index] + ' freed)' if size_deleted else '' - if except_list: - app.console(str(len(files_to_delete) + len(dirs_to_delete) - len(except_list)) + ' of ' + str(len(files_to_delete) + len(dirs_to_delete)) + ' items erased' + print_freed()) - if app.ARGS.failed: - with open(app.ARGS.failed, 'w', encoding='utf-8') as outfile: - for item in except_list: - outfile.write(item + '\n') - app.console('List of items script failed to erase written to file "' + app.ARGS.failed + '"') - else: - app.console('Items that could not be erased:') - for item in except_list: - app.console(' ' + item) - else: - app.console('All items deleted successfully' + print_freed()) - else: - app.console('No files or directories found') diff --git a/python/mrtrix3/mrtrix_cleanup/execute.py b/python/mrtrix3/mrtrix_cleanup/execute.py new file mode 100644 index 0000000000..9d7e798de3 --- /dev/null +++ b/python/mrtrix3/mrtrix_cleanup/execute.py @@ -0,0 +1,117 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + + +import math, os, re, shutil +from mrtrix3 import CONFIG #pylint: disable=no-name-in-module +from mrtrix3 import app #pylint: disable=no-name-in-module + +POSTFIXES = [ 'B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB' ] + +def execute(): #pylint: disable=unused-variable + + file_regex = re.compile(r"^mrtrix-tmp-[a-zA-Z0-9]{6}\..*$") + file_config_regex = re.compile(r"^" + CONFIG['TmpFilePrefix'] + r"[a-zA-Z0-9]{6}\..*$") \ + if 'TmpFilePrefix' in CONFIG and CONFIG['TmpFilePrefix'] != 'mrtrix-tmp-' \ + else None + dir_regex = re.compile(r"^\w+-tmp-[a-zA-Z0-9]{6}$") + dir_config_regex = re.compile(r"^" + CONFIG['ScriptScratchPrefix'] + r"[a-zA-Z0-9]{6}$") \ + if 'ScriptScratchPrefix' in CONFIG \ + else None + + files_to_delete = [ ] + dirs_to_delete = [ ] + root_dir = os.path.abspath(app.ARGS.path) + print_search_dir = ('' if os.path.abspath(os.getcwd()) == root_dir else ' from ' + root_dir) + def file_search(regex): + files_to_delete.extend([ os.path.join(dirname, filename) for filename in filter(regex.search, filelist) ]) + def dir_search(regex): + items = set(filter(regex.search, subdirlist)) + if items: + dirs_to_delete.extend([os.path.join(dirname, subdirname) for subdirname in items]) + subdirlist[:] = list(set(subdirlist)-items) + def print_msg(): + return 'Searching' + print_search_dir + ' (found ' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)' + progress = app.ProgressBar(print_msg) + for dirname, subdirlist, filelist in os.walk(root_dir): + file_search(file_regex) + if file_config_regex: + file_search(file_config_regex) + dir_search(dir_regex) + if dir_config_regex: + dir_search(dir_config_regex) + progress.increment() + progress.done() + + if app.ARGS.test: + if files_to_delete: + app.console('Files identified (' + str(len(files_to_delete)) + '):') + for filepath in files_to_delete: + app.console(' ' + filepath) + else: + app.console('No files' + ('' if dirs_to_delete else ' or directories') + ' found') + if dirs_to_delete: + app.console('Directories identified (' + str(len(dirs_to_delete)) + '):') + for dirpath in dirs_to_delete: + app.console(' ' + dirpath) + elif files_to_delete: + app.console('No directories identified') + elif files_to_delete or dirs_to_delete: + progress = app.ProgressBar('Deleting temporaries (' + str(len(files_to_delete)) + ' files, ' + str(len(dirs_to_delete)) + ' directories)', len(files_to_delete) + len(dirs_to_delete)) + except_list = [ ] + size_deleted = 0 + for filepath in files_to_delete: + filesize = 0 + try: + filesize = os.path.getsize(filepath) + os.remove(filepath) + size_deleted += filesize + except OSError: + except_list.append(filepath) + progress.increment() + for dirpath in dirs_to_delete: + dirsize = 0 + try: + for dirname, subdirlist, filelist in os.walk(dirpath): + dirsize += sum(os.path.getsize(filename) for filename in filelist) + except OSError: + pass + try: + shutil.rmtree(dirpath) + size_deleted += dirsize + except OSError: + except_list.append(dirpath) + progress.increment() + progress.done() + postfix_index = int(math.floor(math.log(size_deleted, 1024))) if size_deleted else 0 + if postfix_index: + size_deleted = round(size_deleted / math.pow(1024, postfix_index), 2) + def print_freed(): + return ' (' + str(size_deleted) + POSTFIXES[postfix_index] + ' freed)' if size_deleted else '' + if except_list: + app.console(str(len(files_to_delete) + len(dirs_to_delete) - len(except_list)) + ' of ' + str(len(files_to_delete) + len(dirs_to_delete)) + ' items erased' + print_freed()) + if app.ARGS.failed: + with open(app.ARGS.failed, 'w', encoding='utf-8') as outfile: + for item in except_list: + outfile.write(item + '\n') + app.console('List of items script failed to erase written to file "' + app.ARGS.failed + '"') + else: + app.console('Items that could not be erased:') + for item in except_list: + app.console(' ' + item) + else: + app.console('All items deleted successfully' + print_freed()) + else: + app.console('No files or directories found') diff --git a/python/mrtrix3/mrtrix_cleanup/usage.py b/python/mrtrix3/mrtrix_cleanup/usage.py new file mode 100644 index 0000000000..ae35dcf91e --- /dev/null +++ b/python/mrtrix3/mrtrix_cleanup/usage.py @@ -0,0 +1,25 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') + cmdline.set_synopsis('Clean up residual temporary files & scratch directories from MRtrix3 commands') + cmdline.add_description('This script will search the file system at the specified location (and in sub-directories thereof) for any temporary files or directories that have been left behind by failed or terminated MRtrix3 commands, and attempt to delete them.') + cmdline.add_description('Note that the script\'s search for temporary items will not extend beyond the user-specified filesystem location. This means that any built-in or user-specified default location for MRtrix3 piped data and scripts will not be automatically searched. Cleanup of such locations should instead be performed explicitly: e.g. "mrtrix_cleanup /tmp/" to remove residual piped images from /tmp/.') + cmdline.add_description('This script should not be run while other MRtrix3 commands are being executed: it may delete temporary items during operation that may lead to unexpected behaviour.') + cmdline.add_argument('path', help='Path from which to commence filesystem search') + cmdline.add_argument('-test', action='store_true', help='Run script in test mode: will list identified files / directories, but not attempt to delete them') + cmdline.add_argument('-failed', metavar='file', nargs=1, help='Write list of items that the script failed to delete to a text file') + cmdline.flag_mutually_exclusive_options([ 'test', 'failed' ]) diff --git a/python/mrtrix3/notfound/__init__.py b/python/mrtrix3/notfound/__init__.py deleted file mode 100644 index 828163e110..0000000000 --- a/python/mrtrix3/notfound/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -if [ $# -eq 0 ]; then - cat << 'HELP_PAGE' -USAGE: - $ notfound base_directory search_string - - This is a simple script designed to help identify subjects that do not yet have a specific file generated. For example when adding new patients to a study. It is designed to be used when each patient has a folder containing their images. - - For example: - $ notfound study_folder fod.mif - will identify all subject folders (e.g. study_folder/subject001, study_folder/subject002, ...) that do NOT contain a file fod.mif - - Note that this can be used in combination with the foreach script. For example: - $ foreach $(notfound study_folder fod.mif) : dwi2fod IN/dwi.mif IN/response.txt IN/fod.mif -HELP_PAGE - -exit 1 - -fi - -find ${1} -mindepth 1 -maxdepth 1 \( -type l -o -type d \) '!' -exec test -e "{}/${2}" ';' -print - diff --git a/python/mrtrix3/population_template/__init__.py b/python/mrtrix3/population_template/__init__.py index 5d35ec27b6..9cdb86ccd2 100644 --- a/python/mrtrix3/population_template/__init__.py +++ b/python/mrtrix3/population_template/__init__.py @@ -13,10 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -# Generates an unbiased group-average template via image registration of images to a midway space. - -import json, math, os, re, shlex, shutil, sys - DEFAULT_RIGID_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] DEFAULT_RIGID_LMAX = [2,2,2,4,4,4] DEFAULT_AFFINE_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] @@ -31,1453 +27,3 @@ AGGREGATION_MODES = ['mean', 'median'] IMAGEEXT = ['mif', 'nii', 'mih', 'mgh', 'mgz', 'img', 'hdr'] - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)') - - cmdline.set_synopsis('Generates an unbiased group-average template from a series of images') - cmdline.add_description('First a template is optimised with linear registration (rigid and/or affine, both by default), then non-linear registration is used to optimise the template further.') - cmdline.add_argument('input_dir', nargs='+', help='Directory containing all input images of a given contrast') - cmdline.add_argument('template', help='Output template image') - - cmdline.add_example_usage('Multi-contrast registration', - 'population_template input_WM_ODFs/ output_WM_template.mif input_GM_ODFs/ output_GM_template.mif', - 'When performing multi-contrast registration, the input directory and corresponding output template ' - 'image for a given contrast are to be provided as a pair, ' - 'with the pairs corresponding to different contrasts provided sequentially.') - - options = cmdline.add_argument_group('Multi-contrast options') - options.add_argument('-mc_weight_initial_alignment', help='Weight contribution of each contrast to the initial alignment. Comma separated, default: 1.0') - options.add_argument('-mc_weight_rigid', help='Weight contribution of each contrast to the objective of rigid registration. Comma separated, default: 1.0') - options.add_argument('-mc_weight_affine', help='Weight contribution of each contrast to the objective of affine registration. Comma separated, default: 1.0') - options.add_argument('-mc_weight_nl', help='Weight contribution of each contrast to the objective of nonlinear registration. Comma separated, default: 1.0') - - linoptions = cmdline.add_argument_group('Options for the linear registration') - linoptions.add_argument('-linear_no_pause', action='store_true', help='Do not pause the script if a linear registration seems implausible') - linoptions.add_argument('-linear_no_drift_correction', action='store_true', help='Deactivate correction of template appearance (scale and shear) over iterations') - linoptions.add_argument('-linear_estimator', help='Specify estimator for intensity difference metric. Valid choices are: l1 (least absolute: |x|), l2 (ordinary least squares), lp (least powers: |x|^1.2), Default: None (no robust estimator used)') - linoptions.add_argument('-rigid_scale', help='Specify the multi-resolution pyramid used to build the rigid template, in the form of a list of scale factors (default: %s). This and affine_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_RIGID_SCALES])) - linoptions.add_argument('-rigid_lmax', help='Specify the lmax used for rigid registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_RIGID_LMAX])) - linoptions.add_argument('-rigid_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:50 for each scale). This must be a single number or a list of same length as the linear_scale factor list') - linoptions.add_argument('-affine_scale', help='Specify the multi-resolution pyramid used to build the affine template, in the form of a list of scale factors (default: %s). This and rigid_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_AFFINE_SCALES])) - linoptions.add_argument('-affine_lmax', help='Specify the lmax used for affine registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_AFFINE_LMAX])) - linoptions.add_argument('-affine_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:500 for each scale). This must be a single number or a list of same length as the linear_scale factor list') - - nloptions = cmdline.add_argument_group('Options for the non-linear registration') - nloptions.add_argument('-nl_scale', help='Specify the multi-resolution pyramid used to build the non-linear template, in the form of a list of scale factors (default: %s). This implicitly defines the number of template levels' % ','.join([str(x) for x in DEFAULT_NL_SCALES])) - nloptions.add_argument('-nl_lmax', help='Specify the lmax used for non-linear registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_LMAX])) - nloptions.add_argument('-nl_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_NITER])) - nloptions.add_argument('-nl_update_smooth', default='2.0', help='Regularise the gradient update field with Gaussian smoothing (standard deviation in voxel units, Default 2.0 x voxel_size)') - nloptions.add_argument('-nl_disp_smooth', default='1.0', help='Regularise the displacement field with Gaussian smoothing (standard deviation in voxel units, Default 1.0 x voxel_size)') - nloptions.add_argument('-nl_grad_step', default='0.5', help='The gradient step size for non-linear registration (Default: 0.5)') - - options = cmdline.add_argument_group('Input, output and general options') - options.add_argument('-type', help='Specify the types of registration stages to perform. Options are "rigid" (perform rigid registration only which might be useful for intra-subject registration in longitudinal analysis), "affine" (perform affine registration) and "nonlinear" as well as cominations of registration types: %s. Default: rigid_affine_nonlinear' % ', '.join('"' + x + '"' for x in REGISTRATION_MODES if "_" in x), default='rigid_affine_nonlinear') - options.add_argument('-voxel_size', help='Define the template voxel size in mm. Use either a single value for isotropic voxels or 3 comma separated values.') - options.add_argument('-initial_alignment', default='mass', help='Method of alignment to form the initial template. Options are "mass" (default), "robust_mass" (requires masks), "geometric" and "none".') - options.add_argument('-mask_dir', help='Optionally input a set of masks inside a single directory, one per input image (with the same file name prefix). Using masks will speed up registration significantly. Note that masks are used for registration, not for aggregation. To exclude areas from aggregation, NaN-mask your input images.') - options.add_argument('-warp_dir', help='Output a directory containing warps from each input to the template. If the folder does not exist it will be created') - options.add_argument('-transformed_dir', help='Output a directory containing the input images transformed to the template. If the folder does not exist it will be created. For multi-contrast registration, provide comma separated list of directories.') - options.add_argument('-linear_transformations_dir', help='Output a directory containing the linear transformations used to generate the template. If the folder does not exist it will be created') - options.add_argument('-template_mask', help='Output a template mask. Only works if -mask_dir has been input. The template mask is computed as the intersection of all subject masks in template space.') - options.add_argument('-noreorientation', action='store_true', help='Turn off FOD reorientation in mrregister. Reorientation is on by default if the number of volumes in the 4th dimension corresponds to the number of coefficients in an antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc)') - options.add_argument('-leave_one_out', help='Register each input image to a template that does not contain that image. Valid choices: 0, 1, auto. (Default: auto (true if n_subjects larger than 2 and smaller than 15)) ') - options.add_argument('-aggregate', help='Measure used to aggregate information from transformed images to the template image. Valid choices: %s. Default: mean' % ', '.join(AGGREGATION_MODES)) - options.add_argument('-aggregation_weights', help='Comma separated file containing weights used for weighted image aggregation. Each row must contain the identifiers of the input image and its weight. Note that this weighs intensity values not transformations (shape).') - options.add_argument('-nanmask', action='store_true', help='Optionally apply masks to (transformed) input images using NaN values to specify include areas for registration and aggregation. Only works if -mask_dir has been input.') - options.add_argument('-copy_input', action='store_true', help='Copy input images and masks into local scratch directory.') - options.add_argument('-delete_temporary_files', action='store_true', help='Delete temporary files from scratch directory during template creation.') - -# ENH: add option to initialise warps / transformations - - - -def abspath(arg, *args): - return os.path.abspath(os.path.join(arg, *args)) - - -def relpath(arg, *args): - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel - return os.path.relpath(os.path.join(arg, *args), app.WORKING_DIR) - - -def copy(src, dst, follow_symlinks=True): - """Copy data but do not set mode bits. Return the file's destination. - - mimics shutil.copy but without setting mode bits as shutil.copymode can fail on exotic mounts - (observed on cifs with file_mode=0777). - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - if sys.version_info[0] > 2: - shutil.copyfile(src, dst, follow_symlinks=follow_symlinks) # pylint: disable=unexpected-keyword-arg - else: - shutil.copyfile(src, dst) - return dst - - -def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): - from mrtrix3 import app, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel - if max_rot is None: - max_rot = 2 * math.pi - - good = True - run.command('transformcalc ' + transformation + ' decompose ' + transformation + 'decomp') - if not os.path.isfile(transformation + 'decomp'): # does not exist if run with -continue option - app.console(transformation + 'decomp not found. skipping check') - return True - data = utils.load_keyval(transformation + 'decomp') - run.function(os.remove, transformation + 'decomp') - scaling = [float(value) for value in data['scaling']] - if any(a < 0 for a in scaling) or any(a > (1 + max_scaling) for a in scaling) or any( - a < (1 - max_scaling) for a in scaling): - app.warn("large scaling (" + str(scaling) + ") in " + transformation) - good = False - shear = [float(value) for value in data['shear']] - if any(abs(a) > max_shear for a in shear): - app.warn("large shear (" + str(shear) + ") in " + transformation) - good = False - rot_angle = float(data['angle_axis'][0]) - if abs(rot_angle) > max_rot: - app.warn("large rotation (" + str(rot_angle) + ") in " + transformation) - good = False - - if not good: - newcmd = [] - what = '' - init_rotation_found = False - skip = 0 - for element in cmd.split(): - if skip: - skip -= 1 - continue - if '_init_rotation' in element: - init_rotation_found = True - if '_init_matrix' in element: - skip = 1 - continue - if 'affine_scale' in element: - assert what != 'rigid' - what = 'affine' - elif 'rigid_scale' in element: - assert what != 'affine' - what = 'rigid' - newcmd.append(element) - newcmd = " ".join(newcmd) - if not init_rotation_found: - app.console("replacing the transformation obtained with:") - app.console(cmd) - if what: - newcmd += ' -' + what + '_init_translation mass -' + what + '_init_rotation search' - app.console("by the one obtained with:") - app.console(newcmd) - run.command(newcmd, force=True) - return check_linear_transformation(transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn=pause_on_warn) - if pause_on_warn: - app.warn("you might want to manually repeat mrregister with different parameters and overwrite the transformation file: \n%s" % transformation) - app.console('The command that failed the test was: \n' + cmd) - app.console('Working directory: \n' + os.getcwd()) - input("press enter to continue population_template") - return good - - -def aggregate(inputs, output, contrast_idx, mode, force=True): - from mrtrix3 import MRtrixError, run # pylint: disable=no-name-in-module, import-outside-toplevel - - images = [inp.ims_transformed[contrast_idx] for inp in inputs] - if mode == 'mean': - run.command(['mrmath', images, 'mean', '-keep_unary_axes', output], force=force) - elif mode == 'median': - run.command(['mrmath', images, 'median', '-keep_unary_axes', output], force=force) - elif mode == 'weighted_mean': - weights = [inp.aggregation_weight for inp in inputs] - assert not any(w is None for w in weights), weights - wsum = sum(float(w) for w in weights) - cmd = ['mrcalc'] - if wsum <= 0: - raise MRtrixError("the sum of aggregetion weights has to be positive") - for weight, image in zip(weights, images): - if float(weight) != 0: - cmd += [image, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) - cmd += ['%.16f' % wsum, '-div', output] - run.command(cmd, force=force) - else: - raise MRtrixError("aggregation mode %s not understood" % mode) - - -def inplace_nan_mask(images, masks): - from mrtrix3 import run # pylint: disable=no-name-in-module, import-outside-toplevel - assert len(images) == len(masks), (len(images), len(masks)) - for image, mask in zip(images, masks): - target_dir = os.path.split(image)[0] - masked = os.path.join(target_dir, '__' + os.path.split(image)[1]) - run.command("mrcalc " + mask + " " + image + " nan -if " + masked, force=True) - run.function(shutil.move, masked, image) - - -def calculate_isfinite(inputs, contrasts): - from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel - agg_weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] - for cid in range(contrasts.n_contrasts): - for inp in inputs: - if contrasts.n_volumes[cid] > 0: - cmd = 'mrconvert ' + inp.ims_transformed[cid] + ' -coord 3 0 - | mrcalc - -finite' - else: - cmd = 'mrcalc ' + inp.ims_transformed[cid] + ' -finite' - if inp.aggregation_weight: - cmd += ' %s -mult ' % inp.aggregation_weight - cmd += ' isfinite%s/%s.mif' % (contrasts.suff[cid], inp.uid) - run.command(cmd, force=True) - for cid in range(contrasts.n_contrasts): - cmd = ['mrmath', path.all_in_dir('isfinite%s' % contrasts.suff[cid]), 'sum'] - if agg_weights: - agg_weight_norm = str(float(len(agg_weights)) / sum(agg_weights)) - cmd += ['-', '|', 'mrcalc', '-', agg_weight_norm, '-mult'] - run.command(cmd + [contrasts.isfinite_count[cid]], force=True) - - -def get_common_postfix(file_list): - return os.path.commonprefix([i[::-1] for i in file_list])[::-1] - - -def get_common_prefix(file_list): - return os.path.commonprefix(file_list) - - -class Contrasts: - """ - Class that parses arguments and holds information specific to each image contrast - - Attributes - ---------- - suff: list of str - identifiers used for contrast-specific filenames and folders ['_c0', '_c1', ...] - - names: list of str - derived from constrast-specific input folder - - templates_out: list of str - full path to output templates - - templates: list of str - holds current template names during registration - - n_volumes: list of int - number of volumes in each contrast - - fod_reorientation: list of bool - whether to perform FOD reorientation with mrtransform - - isfinite_count: list of str - filenames of images holding (weighted) number of finite-valued voxels across all images - - mc_weight_: list of str - contrast-specific weight used during initialisation / registration - - _weight_option: list of str - weight option to be passed to mrregister, = {'initial_alignment', 'rigid', 'affine', 'nl'} - - n_contrasts: int - - """ - - def __init__(self): - from mrtrix3 import MRtrixError, path, app # pylint: disable=no-name-in-module, import-outside-toplevel - - n_contrasts = len(app.ARGS.input_dir) - - self.suff = ["_c" + c for c in map(str, range(n_contrasts))] - self.names = [os.path.relpath(f, os.path.commonprefix(app.ARGS.input_dir)) for f in app.ARGS.input_dir] - - self.templates_out = [path.from_user(t, True) for t in app.ARGS.template] - - self.mc_weight_initial_alignment = [None for _ in range(self.n_contrasts)] - self.mc_weight_rigid = [None for _ in range(self.n_contrasts)] - self.mc_weight_affine = [None for _ in range(self.n_contrasts)] - self.mc_weight_nl = [None for _ in range(self.n_contrasts)] - self.initial_alignment_weight_option = [None for _ in range(self.n_contrasts)] - self.rigid_weight_option = [None for _ in range(self.n_contrasts)] - self.affine_weight_option = [None for _ in range(self.n_contrasts)] - self.nl_weight_option = [None for _ in range(self.n_contrasts)] - - self.isfinite_count = ['isfinite' + c + '.mif' for c in self.suff] - self.templates = [None for _ in range(self.n_contrasts)] - self.n_volumes = [None for _ in range(self.n_contrasts)] - self.fod_reorientation = [None for _ in range(self.n_contrasts)] - - - for mode in ['initial_alignment', 'rigid', 'affine', 'nl']: - opt = app.ARGS.__dict__.get('mc_weight_' + mode, None) - if opt: - if n_contrasts == 1: - raise MRtrixError('mc_weight_' + mode+' requires multiple input contrasts') - opt = opt.split(',') - if len(opt) != n_contrasts: - raise MRtrixError('mc_weight_' + mode+' needs to be defined for each contrast') - else: - opt = ["1"] * n_contrasts - self.__dict__['mc_weight_%s' % mode] = opt - self.__dict__['%s_weight_option' % mode] = ' -mc_weights '+','.join(opt)+' ' if n_contrasts > 1 else '' - - if len(self.templates_out) != n_contrasts: - raise MRtrixError('number of templates (%i) does not match number of input directories (%i)' % - (len(self.templates_out), n_contrasts)) - - @property - def n_contrasts(self): - return len(self.suff) - - def __repr__(self, *args, **kwargs): - text = '' - for cid in range(self.n_contrasts): - text += '\tcontrast: %s, template: %s, suffix: %s\n' % (self.names[cid], self.templates_out[cid], self.suff[cid]) - return text - - -class Input: - """ - Class that holds input information specific to a single image (multiple contrasts) - - Attributes - ---------- - uid: str - unique identifier for these input image(s), does not contain spaces - - ims_path: list of str - full path to input images, shell quoted OR paths to cached file if cache_local was called - - msk_path: str - full path to input mask, shell quoted OR path to cached file if cache_local was called - - ims_filenames : list of str - for each contrast the input file paths stripped of their respective directories. Used for final output only. - - msk_filename: str - as ims_filenames - - ims_transformed: list of str - input_transformed/.mif - - msk_transformed: list of str - mask_transformed/.mif - - aggregation_weight: float - weights used in image aggregation that forms the template. Has to be normalised across inputs. - - _im_directories : list of str - full path to user-provided input directories containing the input images, one for each contrast - - _msk_directory: str - full path to user-provided mask directory - - _local_ims: list of str - path to cached input images - - _local_msk: str - path to cached input mask - - Methods - ------- - cache_local() - copy files into folders in current working directory. modifies _local_ims and _local_msk - - """ - def __init__(self, uid, filenames, directories, contrasts, mask_filename='', mask_directory=''): - self.contrasts = contrasts - - self.uid = uid - assert self.uid, "UID empty" - assert self.uid.count(' ') == 0, 'UID "%s" contains whitespace' % self.uid - - assert len(directories) == len(filenames) - self.ims_filenames = filenames - self._im_directories = directories - - self.msk_filename = mask_filename - self._msk_directory = mask_directory - - n_contrasts = len(contrasts) - - self.ims_transformed = [os.path.join('input_transformed'+contrasts[cid], uid + '.mif') for cid in range(n_contrasts)] - self.msk_transformed = os.path.join('mask_transformed', uid + '.mif') - - self.aggregation_weight = None - - self._local_ims = [] - self._local_msk = None - - def __repr__(self, *args, **kwargs): - text = '\nInput [' - for key in sorted([k for k in self.__dict__ if not k.startswith('_')]): - text += '\n\t' + str(key) + ': ' + str(self.__dict__[key]) - text += '\n]' - return text - - def info(self): - message = ['input: ' + self.uid] - if self.aggregation_weight: - message += ['agg weight: ' + self.aggregation_weight] - for csuff, fname in zip(self.contrasts, self.ims_filenames): - message += [((csuff + ': ') if csuff else '') + '"' + fname + '"'] - if self.msk_filename: - message += ['mask: ' + self.msk_filename] - return ', '.join(message) - - def cache_local(self): - from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel - contrasts = self.contrasts - for cid, csuff in enumerate(contrasts): - if not os.path.isdir('input' + csuff): - path.make_dir('input' + csuff) - run.command('mrconvert ' + self.ims_path[cid] + ' ' + os.path.join('input' + csuff, self.uid + '.mif')) - self._local_ims = [os.path.join('input' + csuff, self.uid + '.mif') for csuff in contrasts] - if self.msk_filename: - if not os.path.isdir('mask'): - path.make_dir('mask') - run.command('mrconvert ' + self.msk_path + ' ' + os.path.join('mask', self.uid + '.mif')) - self._local_msk = os.path.join('mask', self.uid + '.mif') - - def get_ims_path(self, quoted=True): - """ return path to input images """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel - if self._local_ims: - return self._local_ims - return [path.from_user(abspath(d, f), quoted) for d, f in zip(self._im_directories, self.ims_filenames)] - ims_path = property(get_ims_path) - - def get_msk_path(self, quoted=True): - """ return path to input mask """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel - if self._local_msk: - return self._local_msk - return path.from_user(os.path.join(self._msk_directory, self.msk_filename), quoted) if self.msk_filename else None - msk_path = property(get_msk_path) - - -def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whitespace_repl='_'): - """ - matches input images across contrasts and pair them with masks. - extracts unique identifiers from mask and image filenames by stripping common pre and postfix (per contrast and for masks) - unique identifiers contain ASCII letters, numbers and '_' but no whitespace which is replaced by whitespace_repl - - in_files: list of lists - the inner list holds filenames specific to a contrast - - mask_files: - can be empty - - returns list of Input - - checks: 3d_nonunity - TODO check if no common grid & trafo across contrasts (only relevant for robust init?) - - """ - from mrtrix3 import MRtrixError, app, path, image # pylint: disable=no-name-in-module, import-outside-toplevel - contrasts = contrasts.suff - inputs = [] - def paths_to_file_uids(paths, prefix, postfix): - """ strip pre and postfix from filename, replace whitespace characters """ - uid_path = {} - uids = [] - for path in paths: - uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(path)[1])) - uid = re.sub(r'\s+', whitespace_repl, uid) - if not uid: - raise MRtrixError('No uniquely identifiable part of filename "' + path + '" ' - 'after prefix and postfix substitution ' - 'with prefix "' + prefix + '" and postfix "' + postfix + '"') - app.debug('UID mapping: "' + path + '" --> "' + uid + '"') - if uid in uid_path: - raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + path + '" and "' + uid_path[uid] +'"') - uid_path[uid] = path - uids.append(uid) - return uids - - # mask uids - mask_uids = [] - if mask_files: - mask_common_postfix = get_common_postfix(mask_files) - if not mask_common_postfix: - raise MRtrixError('mask filenames do not have a common postfix') - mask_common_prefix = get_common_prefix([os.path.split(m)[1] for m in mask_files]) - mask_uids = paths_to_file_uids(mask_files, mask_common_prefix, mask_common_postfix) - if app.VERBOSITY > 1: - app.console('mask uids:' + str(mask_uids)) - - # images uids - common_postfix = [get_common_postfix(files) for files in in_files] - common_prefix = [get_common_prefix(files) for files in in_files] - # xcontrast_xsubject_pre_postfix: prefix and postfix of the common part across contrasts and subjects, - # without image extensions and leading or trailing '_' or '-' - xcontrast_xsubject_pre_postfix = [get_common_postfix(common_prefix).lstrip('_-'), - get_common_prefix([re.sub('.('+'|'.join(IMAGEEXT)+')(.gz)?$', '', pfix).rstrip('_-') for pfix in common_postfix])] - if app.VERBOSITY > 1: - app.console("common_postfix: " + str(common_postfix)) - app.console("common_prefix: " + str(common_prefix)) - app.console("xcontrast_xsubject_pre_postfix: " + str(xcontrast_xsubject_pre_postfix)) - for ipostfix, postfix in enumerate(common_postfix): - if not postfix: - raise MRtrixError('image filenames do not have a common postfix:\n' + '\n'.join(in_files[ipostfix])) - - c_uids = [] - for cid, files in enumerate(in_files): - c_uids.append(paths_to_file_uids(files, common_prefix[cid], common_postfix[cid])) - - if app.VERBOSITY > 1: - app.console('uids by contrast:' + str(c_uids)) - - # join images and masks - for ifile, fname in enumerate(in_files[0]): - uid = c_uids[0][ifile] - fnames = [fname] - dirs = [abspath(path.from_user(app.ARGS.input_dir[0], False))] - if len(contrasts) > 1: - for cid in range(1, len(contrasts)): - dirs.append(abspath(path.from_user(app.ARGS.input_dir[cid], False))) - image.check_3d_nonunity(os.path.join(dirs[cid], in_files[cid][ifile])) - if uid != c_uids[cid][ifile]: - raise MRtrixError('no matching image was found for image %s and contrasts %s and %s.' % (fname, dirs[0], dirs[cid])) - fnames.append(in_files[cid][ifile]) - - if mask_files: - if uid not in mask_uids: - raise MRtrixError('no matching mask image was found for input image ' + fname + ' with uid "'+uid+'". ' - 'Mask uid candidates: ' + ', '.join(['"%s"' % m for m in mask_uids])) - index = mask_uids.index(uid) - # uid, filenames, directories, contrasts, mask_filename = '', mask_directory = '', agg_weight = None - inputs.append(Input(uid, fnames, dirs, contrasts, - mask_filename=mask_files[index], mask_directory=abspath(path.from_user(app.ARGS.mask_dir, False)))) - else: - inputs.append(Input(uid, fnames, dirs, contrasts)) - - # parse aggregation weights and match to inputs - if f_agg_weight: - import csv # pylint: disable=import-outside-toplevel - try: - with open(f_agg_weight, 'r', encoding='utf-8') as fweights: - agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in csv.reader(fweights, delimiter=',', quotechar='#')) - except UnicodeDecodeError: - with open(f_agg_weight, 'r', encoding='utf-8') as fweights: - reader = csv.reader(fweights.read().decode('utf-8', errors='replace'), delimiter=',', quotechar='#') - agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in reader) - pref = '^' + re.escape(get_common_prefix(list(agg_weights.keys()))) - suff = re.escape(get_common_postfix(list(agg_weights.keys()))) + '$' - for key in agg_weights.keys(): - agg_weights[re.sub(suff, '', re.sub(pref, '', key))] = agg_weights.pop(key).strip() - - for inp in inputs: - if inp.uid not in agg_weights: - raise MRtrixError('aggregation weight not found for %s' % inp.uid) - inp.aggregation_weight = agg_weights[inp.uid] - app.console('Using aggregation weights ' + f_agg_weight) - weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] - if sum(weights) <= 0: - raise MRtrixError('Sum of aggregation weights is not positive: ' + str(weights)) - if any(w < 0 for w in weights): - app.warn('Negative aggregation weights: ' + str(weights)) - - return inputs, xcontrast_xsubject_pre_postfix - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError, app, image, matrix, path, run, EXE_LIST #pylint: disable=no-name-in-module, import-outside-toplevel - - expected_commands = ['mrgrid', 'mrregister', 'mrtransform', 'mraverageheader', 'mrconvert', 'mrmath', 'transformcalc', 'mrfilter'] - for cmd in expected_commands: - if cmd not in EXE_LIST : - raise MRtrixError("Could not find " + cmd + " in bin/. Binary commands not compiled?") - - if not app.ARGS.type in REGISTRATION_MODES: - raise MRtrixError("registration type must be one of %s. provided: %s" % (str(REGISTRATION_MODES), app.ARGS.type)) - dorigid = "rigid" in app.ARGS.type - doaffine = "affine" in app.ARGS.type - dolinear = dorigid or doaffine - dononlinear = "nonlinear" in app.ARGS.type - assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not valid" - - - input_output = app.ARGS.input_dir + [app.ARGS.template] - n_contrasts = len(input_output) // 2 - if len(input_output) != 2 * n_contrasts: - raise MRtrixError('expected two arguments per contrast, received %i: %s' % (len(input_output), ', '.join(input_output))) - if n_contrasts > 1: - app.console('Generating population template using multi-contrast registration') - - # reorder arguments for multi-contrast registration as after command line parsing app.ARGS.input_dir holds all but one argument - app.ARGS.input_dir = [] - app.ARGS.template = [] - for i_contrast in range(n_contrasts): - inargs = (input_output[i_contrast*2], input_output[i_contrast*2+1]) - if not os.path.isdir(inargs[0]): - raise MRtrixError('input directory %s not found' % inargs[0]) - app.ARGS.input_dir.append(relpath(inargs[0])) - app.ARGS.template.append(relpath(inargs[1])) - - cns = Contrasts() - app.debug(str(cns)) - - in_files = [sorted(path.all_in_dir(input_dir, dir_path=False)) for input_dir in app.ARGS.input_dir] - if len(in_files[0]) <= 1: - raise MRtrixError('Not enough images found in input directory ' + app.ARGS.input_dir[0] + - '. More than one image is needed to generate a population template') - if n_contrasts > 1: - for cid in range(1, n_contrasts): - if len(in_files[cid]) != len(in_files[0]): - raise MRtrixError('Found %i images in input directory %s ' % (len(app.ARGS.input_dir[0]), app.ARGS.input_dir[0]) + - 'but %i input images in %s.' % (len(app.ARGS.input_dir[cid]), app.ARGS.input_dir[cid])) - else: - app.console('Generating a population-average template from ' + str(len(in_files[0])) + ' input images') - if n_contrasts > 1: - app.console('using ' + str(len(in_files)) + ' contrasts for each input image') - - voxel_size = None - if app.ARGS.voxel_size: - voxel_size = app.ARGS.voxel_size.split(',') - if len(voxel_size) == 1: - voxel_size = voxel_size * 3 - try: - if len(voxel_size) != 3: - raise ValueError - [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned - except ValueError as exception: - raise MRtrixError('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.ARGS.voxel_size)) from exception - - agg_measure = 'mean' - if app.ARGS.aggregate is not None: - if not app.ARGS.aggregate in AGGREGATION_MODES: - app.error("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) - agg_measure = app.ARGS.aggregate - - agg_weights = app.ARGS.aggregation_weights - if agg_weights is not None: - agg_measure = "weighted_" + agg_measure - if agg_measure != 'weighted_mean': - app.error("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) - if not os.path.isfile(app.ARGS.aggregation_weights): - app.error("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) - - initial_alignment = app.ARGS.initial_alignment - if initial_alignment not in ["mass", "robust_mass", "geometric", "none"]: - raise MRtrixError('initial_alignment must be one of ' + " ".join(["mass", "robust_mass", "geometric", "none"]) + " provided: " + str(initial_alignment)) - - linear_estimator = app.ARGS.linear_estimator - if linear_estimator and not linear_estimator.lower() == 'none': - if not dolinear: - raise MRtrixError('linear_estimator specified when no linear registration is requested') - if linear_estimator not in ["l1", "l2", "lp"]: - raise MRtrixError('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]) + " provided: " + str(linear_estimator)) - - use_masks = False - mask_files = [] - if app.ARGS.mask_dir: - use_masks = True - app.ARGS.mask_dir = relpath(app.ARGS.mask_dir) - if not os.path.isdir(app.ARGS.mask_dir): - raise MRtrixError('mask directory not found') - mask_files = sorted(path.all_in_dir(app.ARGS.mask_dir, dir_path=False)) - if len(mask_files) < len(in_files[0]): - raise MRtrixError('there are not enough mask images for the number of images in the input directory') - - if not use_masks: - app.warn('no masks input. Use input masks to reduce computation time and improve robustness') - - if app.ARGS.template_mask and not use_masks: - raise MRtrixError('you cannot output a template mask because no subject masks were input using -mask_dir') - - nanmask_input = app.ARGS.nanmask - if nanmask_input and not use_masks: - raise MRtrixError('you cannot use NaN masking when no subject masks were input using -mask_dir') - - ins, xcontrast_xsubject_pre_postfix = parse_input_files(in_files, mask_files, cns, agg_weights) - - leave_one_out = 'auto' - if app.ARGS.leave_one_out is not None: - leave_one_out = app.ARGS.leave_one_out - if not leave_one_out in ['0', '1', 'auto']: - raise MRtrixError('leave_one_out not understood: ' + str(leave_one_out)) - if leave_one_out == 'auto': - leave_one_out = 2 < len(ins) < 15 - else: - leave_one_out = bool(int(leave_one_out)) - if leave_one_out: - app.console('performing leave-one-out registration') - # check that at sum of weights is positive for any grouping if weighted aggregation is used - weights = [float(inp.aggregation_weight) for inp in ins if inp.aggregation_weight is not None] - if weights and sum(weights) - max(weights) <= 0: - raise MRtrixError('leave-one-out registration requires positive aggregation weights in all groupings') - - noreorientation = app.ARGS.noreorientation - - do_pause_on_warn = True - if app.ARGS.linear_no_pause: - do_pause_on_warn = False - if not dolinear: - raise MRtrixError("linear option set when no linear registration is performed") - - if len(app.ARGS.template) != n_contrasts: - raise MRtrixError('mismatch between number of output templates (%i) ' % len(app.ARGS.template) + - 'and number of contrasts (%i)' % n_contrasts) - for templ in app.ARGS.template: - app.check_output_path(templ) - - if app.ARGS.warp_dir: - app.ARGS.warp_dir = relpath(app.ARGS.warp_dir) - app.check_output_path(app.ARGS.warp_dir) - - if app.ARGS.transformed_dir: - app.ARGS.transformed_dir = [relpath(d) for d in app.ARGS.transformed_dir.split(',')] - if len(app.ARGS.transformed_dir) != n_contrasts: - raise MRtrixError('require multiple comma separated transformed directories if multi-contrast registration is used') - for tdir in app.ARGS.transformed_dir: - app.check_output_path(tdir) - - if app.ARGS.linear_transformations_dir: - if not dolinear: - raise MRtrixError("linear option set when no linear registration is performed") - app.ARGS.linear_transformations_dir = relpath(app.ARGS.linear_transformations_dir) - app.check_output_path(app.ARGS.linear_transformations_dir) - - # automatically detect SH series in each contrast - do_fod_registration = False # in any contrast - cns.n_volumes = [] - cns.fod_reorientation = [] - for cid in range(n_contrasts): - header = image.Header(ins[0].get_ims_path(False)[cid]) - image_size = header.size() - if len(image_size) < 3 or len(image_size) > 4: - raise MRtrixError('only 3 and 4 dimensional images can be used to build a template') - if len(image_size) == 4: - cns.fod_reorientation.append(header.is_sh() and not noreorientation) - cns.n_volumes.append(image_size[3]) - do_fod_registration = do_fod_registration or cns.fod_reorientation[-1] - else: - cns.fod_reorientation.append(False) - cns.n_volumes.append(0) - if do_fod_registration: - app.console("SH Series detected, performing FOD registration in contrast: " + - ', '.join(app.ARGS.input_dir[cid] for cid in range(n_contrasts) if cns.fod_reorientation[cid])) - c_mrtransform_reorientation = [' -reorient_fod ' + ('yes' if cns.fod_reorientation[cid] else 'no') + ' ' - for cid in range(n_contrasts)] - - if nanmask_input: - app.console("NaN masking transformed images") - - # rigid options - if app.ARGS.rigid_scale: - rigid_scales = [float(x) for x in app.ARGS.rigid_scale.split(',')] - if not dorigid: - raise MRtrixError("rigid_scales option set when no rigid registration is performed") - else: - rigid_scales = DEFAULT_RIGID_SCALES - if app.ARGS.rigid_lmax: - if not dorigid: - raise MRtrixError("rigid_lmax option set when no rigid registration is performed") - rigid_lmax = [int(x) for x in app.ARGS.rigid_lmax.split(',')] - if do_fod_registration and len(rigid_scales) != len(rigid_lmax): - raise MRtrixError('rigid_scales and rigid_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(rigid_scales), len(rigid_lmax))) - else: - rigid_lmax = DEFAULT_RIGID_LMAX - - rigid_niter = [100] * len(rigid_scales) - if app.ARGS.rigid_niter: - if not dorigid: - raise MRtrixError("rigid_niter specified when no rigid registration is performed") - rigid_niter = [int(x) for x in app.ARGS.rigid_niter.split(',')] - if len(rigid_niter) == 1: - rigid_niter = rigid_niter * len(rigid_scales) - elif len(rigid_scales) != len(rigid_niter): - raise MRtrixError('rigid_scales and rigid_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(rigid_scales), len(rigid_niter))) - - # affine options - if app.ARGS.affine_scale: - affine_scales = [float(x) for x in app.ARGS.affine_scale.split(',')] - if not doaffine: - raise MRtrixError("affine_scale option set when no affine registration is performed") - else: - affine_scales = DEFAULT_AFFINE_SCALES - if app.ARGS.affine_lmax: - if not doaffine: - raise MRtrixError("affine_lmax option set when no affine registration is performed") - affine_lmax = [int(x) for x in app.ARGS.affine_lmax.split(',')] - if do_fod_registration and len(affine_scales) != len(affine_lmax): - raise MRtrixError('affine_scales and affine_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(affine_scales), len(affine_lmax))) - else: - affine_lmax = DEFAULT_AFFINE_LMAX - - affine_niter = [500] * len(affine_scales) - if app.ARGS.affine_niter: - if not doaffine: - raise MRtrixError("affine_niter specified when no affine registration is performed") - affine_niter = [int(x) for x in app.ARGS.affine_niter.split(',')] - if len(affine_niter) == 1: - affine_niter = affine_niter * len(affine_scales) - elif len(affine_scales) != len(affine_niter): - raise MRtrixError('affine_scales and affine_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(affine_scales), len(affine_niter))) - - linear_scales = [] - linear_lmax = [] - linear_niter = [] - linear_type = [] - if dorigid: - linear_scales += rigid_scales - linear_lmax += rigid_lmax - linear_niter += rigid_niter - linear_type += ['rigid'] * len(rigid_scales) - - if doaffine: - linear_scales += affine_scales - linear_lmax += affine_lmax - linear_niter += affine_niter - linear_type += ['affine'] * len(affine_scales) - - assert len(linear_type) == len(linear_scales) - assert len(linear_scales) == len(linear_niter) - if do_fod_registration: - if len(linear_lmax) != len(linear_niter): - mismatch = [] - if len(rigid_lmax) != len(rigid_niter): - mismatch += ['rigid: lmax stages: %s, niter stages: %s' % (len(rigid_lmax), len(rigid_niter))] - if len(affine_lmax) != len(affine_niter): - mismatch += ['affine: lmax stages: %s, niter stages: %s' % (len(affine_lmax), len(affine_niter))] - raise MRtrixError('linear registration: lmax and niter schedules are not equal in length: %s' % (', '.join(mismatch))) - app.console('-' * 60) - app.console('initial alignment of images: %s' % initial_alignment) - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - app.console('\tcontrast "%s": %s, ' % (cns.suff[cid], cns.names[cid]) + - 'objective weight: %s' % cns.mc_weight_initial_alignment[cid]) - - if dolinear: - app.console('-' * 60) - app.console('linear registration stages:') - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - msg = '\tcontrast "%s": %s' % (cns.suff[cid], cns.names[cid]) - if 'rigid' in linear_type: - msg += ', objective weight rigid: %s' % cns.mc_weight_rigid[cid] - if 'affine' in linear_type: - msg += ', objective weight affine: %s' % cns.mc_weight_affine[cid] - app.console(msg) - - if do_fod_registration: - for istage, [tpe, scale, lmax, niter] in enumerate(zip(linear_type, linear_scales, linear_lmax, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, lmax: %i' % (istage, tpe.ljust(9), scale, niter, lmax)) - else: - for istage, [tpe, scale, niter] in enumerate(zip(linear_type, linear_scales, linear_niter)): - app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' % (istage, tpe.ljust(9), scale, niter)) - - datatype_option = ' -datatype float32' - outofbounds_option = ' -nan' - - if not dononlinear: - nl_scales = [] - nl_lmax = [] - nl_niter = [] - if app.ARGS.warp_dir: - raise MRtrixError('warp_dir specified when no nonlinear registration is performed') - else: - nl_scales = [float(x) for x in app.ARGS.nl_scale.split(',')] if app.ARGS.nl_scale else DEFAULT_NL_SCALES - nl_niter = [int(x) for x in app.ARGS.nl_niter.split(',')] if app.ARGS.nl_niter else DEFAULT_NL_NITER - nl_lmax = [int(x) for x in app.ARGS.nl_lmax.split(',')] if app.ARGS.nl_lmax else DEFAULT_NL_LMAX - - if len(nl_scales) != len(nl_niter): - raise MRtrixError('nl_scales and nl_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(nl_scales), len(nl_niter))) - - app.console('-' * 60) - app.console('nonlinear registration stages:') - app.console('-' * 60) - if n_contrasts > 1: - for cid in range(n_contrasts): - app.console('\tcontrast "%s": %s, objective weight: %s' % (cns.suff[cid], cns.names[cid], cns.mc_weight_nl[cid])) - - if do_fod_registration: - if len(nl_scales) != len(nl_lmax): - raise MRtrixError('nl_scales and nl_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(nl_scales), len(nl_lmax))) - - if do_fod_registration: - for istage, [scale, lmax, niter] in enumerate(zip(nl_scales, nl_lmax, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, lmax: %i' % (istage, scale, niter, lmax)) - else: - for istage, [scale, niter] in enumerate(zip(nl_scales, nl_niter)): - app.console('(%02i) nonlinear scale: %.4f, niter: %i, no reorientation' % (istage, scale, niter)) - - app.console('-' * 60) - app.console('input images:') - app.console('-' * 60) - for inp in ins: - app.console('\t' + inp.info()) - - app.make_scratch_dir() - app.goto_scratch_dir() - - for contrast in cns.suff: - path.make_dir('input_transformed' + contrast) - - for contrast in cns.suff: - path.make_dir('isfinite' + contrast) - - path.make_dir('linear_transforms_initial') - path.make_dir('linear_transforms') - for level in range(0, len(linear_scales)): - path.make_dir('linear_transforms_%02i' % level) - for level in range(0, len(nl_scales)): - path.make_dir('warps_%02i' % level) - - if use_masks: - path.make_dir('mask_transformed') - write_log = (app.VERBOSITY >= 2) - if write_log: - path.make_dir('log') - - if initial_alignment == 'robust_mass': - if not use_masks: - raise MRtrixError('robust_mass initial alignment requires masks') - path.make_dir('robust') - - if app.ARGS.copy_input: - app.console('Copying images into scratch directory') - for inp in ins: - inp.cache_local() - - # Make initial template in average space using first contrast - app.console('Generating initial template') - input_filenames = [inp.get_ims_path(False)[0] for inp in ins] - if voxel_size is None: - run.command(['mraverageheader', input_filenames, 'average_header.mif', '-fill']) - else: - run.command(['mraverageheader', '-fill', input_filenames, '-', '|', - 'mrgrid', '-', 'regrid', '-voxel', ','.join(map(str, voxel_size)), 'average_header.mif']) - - # crop average space to extent defined by original masks - if use_masks: - progress = app.ProgressBar('Importing input masks to average space for template cropping', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_path + ' -interp nearest -template average_header.mif ' + inp.msk_transformed) - progress.increment() - progress.done() - run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_initial.mif']) - run.command('mrgrid average_header.mif crop -mask mask_initial.mif average_header_cropped.mif') - run.function(os.remove, 'mask_initial.mif') - run.function(os.remove, 'average_header.mif') - run.function(shutil.move, 'average_header_cropped.mif', 'average_header.mif') - progress = app.ProgressBar('Erasing temporary mask images', len(ins)) - for inp in ins: - run.function(os.remove, inp.msk_transformed) - progress.increment() - progress.done() - - # create average space headers for other contrasts - if n_contrasts > 1: - avh3d = 'average_header3d.mif' - avh4d = 'average_header4d.mif' - if len(image.Header('average_header.mif').size()) == 3: - run.command('mrconvert average_header.mif ' + avh3d) - else: - run.command('mrconvert average_header.mif -coord 3 0 -axes 0,1,2 ' + avh3d) - run.command('mrconvert ' + avh3d + ' -axes 0,1,2,-1 ' + avh4d) - for cid in range(n_contrasts): - if cns.n_volumes[cid] == 0: - run.function(copy, avh3d, 'average_header' + cns.suff[cid] + '.mif') - elif cns.n_volumes[cid] == 1: - run.function(copy, avh4d, 'average_header' + cns.suff[cid] + '.mif') - else: - run.command(['mrcat', [avh3d] * cns.n_volumes[cid], '-axis', '3', 'average_header' + cns.suff[cid] + '.mif']) - run.function(os.remove, avh3d) - run.function(os.remove, avh4d) - else: - run.function(shutil.move, 'average_header.mif', 'average_header' + cns.suff[0] + '.mif') - - cns.templates = ['average_header' + csuff + '.mif' for csuff in cns.suff] - - if initial_alignment == 'none': - progress = app.ProgressBar('Resampling input images to template space with no initial alignment', len(ins) * n_contrasts) - for inp in ins: - for cid in range(n_contrasts): - run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + ' -interp linear ' + - '-template ' + cns.templates[cid] + ' ' + inp.ims_transformed[cid] + - outofbounds_option + - datatype_option) - progress.increment() - progress.done() - - if use_masks: - progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_path + ' ' + inp.msk_transformed + ' ' + - '-interp nearest -template ' + cns.templates[0] + ' ' + - datatype_option) - progress.increment() - progress.done() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - if not dolinear: - for inp in ins: - with open(os.path.join('linear_transforms_initial', inp.uid + '.txt'), 'w', encoding='utf-8') as fout: - fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n') - - run.function(copy, 'average_header' + cns.suff[0] + '.mif', 'average_header.mif') - - else: - progress = app.ProgressBar('Performing initial rigid registration to template', len(ins)) - mask_option = '' - cid = 0 - lmax_option = ' -rigid_lmax 0 ' if cns.fod_reorientation[cid] else ' -noreorientation ' - contrast_weight_option = cns.initial_alignment_weight_option - for inp in ins: - output_option = ' -rigid ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path - if initial_alignment == 'robust_mass': - if not os.path.isfile('robust/template.mif'): - if cns.n_volumes[cid] > 0: - run.command('mrconvert ' + cns.templates[cid] + ' -coord 3 0 - | mrconvert - -axes 0,1,2 robust/template.mif') - else: - run.command('mrconvert ' + cns.templates[cid] + ' robust/template.mif') - if n_contrasts > 1: - cmd = ['mrcalc', inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult'] - for cid in range(1, n_contrasts): - cmd += [inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult', '-add'] - contrast_weight_option = '' - run.command(' '.join(cmd) + - ' - | mrfilter - zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' - ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') - else: - run.command('mrfilter ' + inp.ims_path[0] + ' zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + - ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') - images = 'robust/image_' + inp.uid + '.mif robust/template.mif' - mask_option = ' -mask1 ' + 'robust/mask_' + inp.uid + '.mif' - lmax_option = '' - - run.command('mrregister ' + images + - mask_option + - ' -rigid_scale 1 ' + - ' -rigid_niter 0 ' + - ' -type rigid ' + - lmax_option + - contrast_weight_option + - ' -rigid_init_translation ' + initial_alignment.replace('robust_', '') + ' ' + - datatype_option + - output_option) - # translate input images to centre of mass without interpolation - for cid in range(n_contrasts): - run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + - ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + - ' ' + inp.ims_transformed[cid] + "_translated.mif" + datatype_option) - if use_masks: - run.command('mrtransform ' + inp.msk_path + - ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + - ' ' + inp.msk_transformed + "_translated.mif" + - datatype_option) - progress.increment() - # update average space of first contrast to new extent, delete other average space images - run.command(['mraverageheader', [inp.ims_transformed[cid] + '_translated.mif' for inp in ins], 'average_header_tight.mif']) - progress.done() - - if voxel_size is None: - run.command('mrgrid average_header_tight.mif pad -uniform 10 average_header.mif', force=True) - else: - run.command('mrgrid average_header_tight.mif pad -uniform 10 - | ' - 'mrgrid - regrid -voxel ' + ','.join(map(str, voxel_size)) + ' average_header.mif', force=True) - run.function(os.remove, 'average_header_tight.mif') - for cid in range(1, n_contrasts): - run.function(os.remove, 'average_header' + cns.suff[cid] + '.mif') - - if use_masks: - # reslice masks - progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_transformed + '_translated.mif' + ' ' + inp.msk_transformed + ' ' + - '-interp nearest -template average_header.mif' + datatype_option) - progress.increment() - progress.done() - # crop average space to extent defined by translated masks - run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_translated.mif']) - run.command('mrgrid average_header.mif crop -mask mask_translated.mif average_header_cropped.mif') - # pad average space to allow for deviation from initial alignment - run.command('mrgrid average_header_cropped.mif pad -uniform 10 average_header.mif', force=True) - run.function(os.remove, 'average_header_cropped.mif') - # reslice masks - progress = app.ProgressBar('Reslicing masks to new padded average header', len(ins)) - for inp in ins: - run.command('mrtransform ' + inp.msk_transformed + '_translated.mif ' + inp.msk_transformed + ' ' + - '-interp nearest -template average_header.mif' + datatype_option, force=True) - run.function(os.remove, inp.msk_transformed + '_translated.mif') - progress.increment() - progress.done() - run.function(os.remove, 'mask_translated.mif') - - # reslice images - progress = app.ProgressBar('Reslicing input images to average header', len(ins) * n_contrasts) - for cid in range(n_contrasts): - for inp in ins: - run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_transformed[cid] + '_translated.mif ' + - inp.ims_transformed[cid] + ' ' + - ' -interp linear -template average_header.mif' + - outofbounds_option + - datatype_option) - run.function(os.remove, inp.ims_transformed[cid] + '_translated.mif') - progress.increment() - progress.done() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - cns.templates = ['initial_template' + contrast + '.mif' for contrast in cns.suff] - for cid in range(n_contrasts): - aggregate(ins, 'initial_template' + cns.suff[cid] + '.mif', cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, 'initial_template' + cns.suff[cid] + '.mif', 'tmp.mif') - run.command('mrconvert tmp.mif initial_template' + cns.suff[cid] + '.mif -axes 0,1,2,-1') - - # Optimise template with linear registration - if not dolinear: - for inp in ins: - run.function(copy, os.path.join('linear_transforms_initial', inp.uid+'.txt'), - os.path.join('linear_transforms', inp.uid+'.txt')) - else: - level = 0 - regtype = linear_type[0] - def linear_msg(): - return 'Optimising template with linear registration (stage {0} of {1}; {2})'.format(level + 1, len(linear_scales), regtype) - progress = app.ProgressBar(linear_msg, len(linear_scales) * len(ins) * (1 + n_contrasts + int(use_masks))) - for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)): - for inp in ins: - initialise_option = '' - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path - else: - mask_option = '' - lmax_option = ' -noreorientation' - metric_option = '' - mrregister_log_option = '' - if regtype == 'rigid': - scale_option = ' -rigid_scale ' + str(scale) - niter_option = ' -rigid_niter ' + str(niter) - regtype_option = ' -type rigid' - output_option = ' -rigid ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') - contrast_weight_option = cns.rigid_weight_option - initialise_option = (' -rigid_init_matrix ' + - os.path.join('linear_transforms_%02i' % (level - 1) if level > 0 else 'linear_transforms_initial', inp.uid + '.txt')) - if do_fod_registration: - lmax_option = ' -rigid_lmax ' + str(lmax) - if linear_estimator: - metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator - if app.VERBOSITY >= 2: - mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') - else: - scale_option = ' -affine_scale ' + str(scale) - niter_option = ' -affine_niter ' + str(niter) - regtype_option = ' -type affine' - output_option = ' -affine ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') - contrast_weight_option = cns.affine_weight_option - initialise_option = (' -affine_init_matrix ' + - os.path.join('linear_transforms_%02i' % (level - 1) if level > 0 else 'linear_transforms_initial', inp.uid + '.txt')) - if do_fod_registration: - lmax_option = ' -affine_lmax ' + str(lmax) - if linear_estimator: - metric_option = ' -affine_metric.diff.estimator ' + linear_estimator - if write_log: - mrregister_log_option = ' -info -affine_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') - - if leave_one_out: - tmpl = [] - for cid in range(n_contrasts): - isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) - weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' - # loo = (template * weighted sum - weight * this) / (weighted sum - weight) - run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + - ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + - ' -sub - -div loo_%s' % cns.templates[cid], force=True) - tmpl.append('loo_%s' % cns.templates[cid]) - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) - else: - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - command = 'mrregister ' + images + \ - initialise_option + \ - mask_option + \ - scale_option + \ - niter_option + \ - lmax_option + \ - regtype_option + \ - metric_option + \ - datatype_option + \ - contrast_weight_option + \ - output_option + \ - mrregister_log_option - run.command(command, force=True) - check_linear_transformation(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), command, - pause_on_warn=do_pause_on_warn) - if leave_one_out: - for im_temp in tmpl: - run.function(os.remove, im_temp) - progress.increment() - - # Here we ensure the overall template properties don't change (too much) over levels - # The reference is the initialisation as that's used to construct the average space. - # T_i: linear trafo for case i, i.e. template(x) = E [ image_i(T_i x) ] - # R_i: inital trafo for case i (identity if initial alignment is none) - # A = E[ T_i ]: average of current trafos - # B = E[ R_i ]: average of initial trafos - # C_i' = T_i B A^{-1}: "drift" corrected T_i - # T_i <- C_i - # Notes: - # - This approximately stabilises E[ T_i ], its' relatively close to B - # - Not sure whether it's preferable to stabilise E[ T_i^{-1} ] - # - If one subject's registration fails, this will affect the average and therefore the template which could result in instable behaviour. - # - The template appearance changes slightly over levels, but the template and trafos are affected in the same way so should not affect template convergence. - if not app.ARGS.linear_no_drift_correction: - run.command(['transformcalc', [os.path.join('linear_transforms_initial', inp.uid + '.txt') for _inp in ins], - 'average', 'linear_transform_average_init.txt', '-quiet'], force=True) - run.command(['transformcalc', [os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') for _inp in ins], - 'average', 'linear_transform_average_%02i_uncorrected.txt' % level, '-quiet'], force=True) - run.command(['transformcalc', 'linear_transform_average_%02i_uncorrected.txt' % level, - 'invert', 'linear_transform_average_%02i_uncorrected_inv.txt' % level, '-quiet'], force=True) - - transform_average_init = matrix.load_transform('linear_transform_average_init.txt') - transform_average_current_inv = matrix.load_transform('linear_transform_average_%02i_uncorrected_inv.txt' % level) - - transform_update = matrix.dot(transform_average_init, transform_average_current_inv) - matrix.save_transform(os.path.join('linear_transforms_%02i_drift_correction.txt' % level), transform_update, force=True) - if regtype == 'rigid': - run.command('transformcalc ' + os.path.join('linear_transforms_%02i_drift_correction.txt' % level) + - ' rigid ' + os.path.join('linear_transforms_%02i_drift_correction.txt' % level) + ' -quiet', force=True) - transform_update = matrix.load_transform(os.path.join('linear_transforms_%02i_drift_correction.txt' % level)) - - for inp in ins: - transform = matrix.load_transform('linear_transforms_%02i/' % level + inp.uid + '.txt') - transform_updated = matrix.dot(transform, transform_update) - run.function(copy, 'linear_transforms_%02i/' % level + inp.uid + '.txt', 'linear_transforms_%02i/' % level + inp.uid + '.precorrection') - matrix.save_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), transform_updated, force=True) - - # compute average trafos and its properties for easier debugging - run.command(['transformcalc', [os.path.join('linear_transforms_%02i' % level, _inp.uid + '.txt') for _inp in ins], - 'average', 'linear_transform_average_%02i.txt' % level, '-quiet'], force=True) - run.command('transformcalc linear_transform_average_%02i.txt decompose linear_transform_average_%02i.dec' % (level, level), force=True) - - - for cid in range(n_contrasts): - for inp in ins: - run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_path[cid] + - ' -template ' + cns.templates[cid] + - ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + - ' ' + inp.ims_transformed[cid] + - outofbounds_option + - datatype_option, - force=True) - progress.increment() - - if use_masks: - for inp in ins: - run.command('mrtransform ' + inp.msk_path + - ' -template ' + cns.templates[0] + - ' -interp nearest' + - ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + - ' ' + inp.msk_transformed, - force=True) - progress.increment() - - if nanmask_input: - inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], - [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - for cid in range(n_contrasts): - if level > 0 and app.ARGS.delete_temporary_files: - os.remove(cns.templates[cid]) - cns.templates[cid] = 'linear_template%02i%s.mif' % (level, cns.suff[cid]) - aggregate(ins, cns.templates[cid], cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, cns.templates[cid], 'tmp.mif') - run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') - run.function(os.remove, 'tmp.mif') - - for entry in os.listdir('linear_transforms_%02i' % level): - run.function(copy, os.path.join('linear_transforms_%02i' % level, entry), os.path.join('linear_transforms', entry)) - progress.done() - - # Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating - if use_masks and (dononlinear or app.ARGS.template_mask): - run.command(['mrmath', path.all_in_dir('mask_transformed')] + - 'min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif'.split(), force=True) - current_template_mask = 'init_nl_template_mask.mif' - - if dononlinear: - path.make_dir('warps') - level = 0 - def nonlinear_msg(): - return 'Optimising template with non-linear registration (stage {0} of {1})'.format(level + 1, len(nl_scales)) - progress = app.ProgressBar(nonlinear_msg, len(nl_scales) * len(ins)) - for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)): - for inp in ins: - if level > 0: - initialise_option = ' -nl_init ' + os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif') - scale_option = '' - else: - scale_option = ' -nl_scale ' + str(scale) - if not doaffine: # rigid or no previous linear stage - initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') - else: - initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') - - if use_masks: - mask_option = ' -mask1 ' + inp.msk_path + ' -mask2 ' + current_template_mask - else: - mask_option = '' - - if do_fod_registration: - lmax_option = ' -nl_lmax ' + str(lmax) - else: - lmax_option = ' -noreorientation' - - contrast_weight_option = cns.nl_weight_option - - if leave_one_out: - tmpl = [] - for cid in range(n_contrasts): - isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) - weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' - # loo = (template * weighted sum - weight * this) / (weighted sum - weight) - run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + - ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + - ' -sub - -div loo_%s' % cns.templates[cid], force=True) - tmpl.append('loo_%s' % cns.templates[cid]) - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) - else: - images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) - run.command('mrregister ' + images + - ' -type nonlinear' + - ' -nl_niter ' + str(nl_niter[level]) + - ' -nl_warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' -transformed ' + - ' -transformed '.join([inp.ims_transformed[cid] for cid in range(n_contrasts)]) + ' ' + - ' -nl_update_smooth ' + app.ARGS.nl_update_smooth + - ' -nl_disp_smooth ' + app.ARGS.nl_disp_smooth + - ' -nl_grad_step ' + app.ARGS.nl_grad_step + - initialise_option + - contrast_weight_option + - scale_option + - mask_option + - datatype_option + - outofbounds_option + - lmax_option, - force=True) - - if use_masks: - run.command('mrtransform ' + inp.msk_path + - ' -template ' + cns.templates[0] + - ' -warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' ' + inp.msk_transformed + - ' -interp nearest ', - force=True) - - if leave_one_out: - for im_temp in tmpl: - run.function(os.remove, im_temp) - - if level > 0: - run.function(os.remove, os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif')) - - progress.increment(nonlinear_msg()) - - if nanmask_input: - inplace_nan_mask([_inp.ims_transformed[cid] for _inp in ins for cid in range(n_contrasts)], - [_inp.msk_transformed for _inp in ins for cid in range(n_contrasts)]) - - if leave_one_out: - calculate_isfinite(ins, cns) - - for cid in range(n_contrasts): - if level > 0 and app.ARGS.delete_temporary_files: - os.remove(cns.templates[cid]) - cns.templates[cid] = 'nl_template%02i%s.mif' % (level, cns.suff[cid]) - aggregate(ins, cns.templates[cid], cid, agg_measure) - if cns.n_volumes[cid] == 1: - run.function(shutil.move, cns.templates[cid], 'tmp.mif') - run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') - run.function(os.remove, 'tmp.mif') - - if use_masks: - run.command(['mrmath', path.all_in_dir('mask_transformed')] + - 'min - | maskfilter - median - | '.split() + - ('maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif').split()) - current_template_mask = 'nl_template_mask' + str(level) + '.mif' - - if level < len(nl_scales) - 1: - if scale < nl_scales[level + 1]: - upsample_factor = nl_scales[level + 1] / scale - for inp in ins: - run.command('mrgrid ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + - ' regrid -scale %f tmp.mif' % upsample_factor, force=True) - run.function(shutil.move, 'tmp.mif', os.path.join('warps_%02i' % level, inp.uid + '.mif')) - else: - for inp in ins: - run.function(shutil.move, os.path.join('warps_%02i' % level, inp.uid + '.mif'), 'warps') - progress.done() - - for cid in range(n_contrasts): - run.command('mrconvert ' + cns.templates[cid] + ' ' + cns.templates_out[cid], - mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) - - if app.ARGS.warp_dir: - warp_path = path.from_user(app.ARGS.warp_dir, False) - if os.path.exists(warp_path): - run.function(shutil.rmtree, warp_path) - os.makedirs(warp_path) - progress = app.ProgressBar('Copying non-linear warps to output directory "' + warp_path + '"', len(ins)) - for inp in ins: - keyval = image.Header(os.path.join('warps', inp.uid + '.mif')).keyval() - keyval = dict((k, keyval[k]) for k in ('linear1', 'linear2')) - json_path = os.path.join('warps', inp.uid + '.json') - with open(json_path, 'w', encoding='utf-8') as json_file: - json.dump(keyval, json_file) - run.command('mrconvert ' + os.path.join('warps', inp.uid + '.mif') + ' ' + - shlex.quote(os.path.join(warp_path, xcontrast_xsubject_pre_postfix[0] + - inp.uid + xcontrast_xsubject_pre_postfix[1] + '.mif')), - mrconvert_keyval=json_path, force=app.FORCE_OVERWRITE) - progress.increment() - progress.done() - - if app.ARGS.linear_transformations_dir: - linear_transformations_path = path.from_user(app.ARGS.linear_transformations_dir, False) - if os.path.exists(linear_transformations_path): - run.function(shutil.rmtree, linear_transformations_path) - os.makedirs(linear_transformations_path) - for inp in ins: - trafo = matrix.load_transform(os.path.join('linear_transforms', inp.uid + '.txt')) - matrix.save_transform(os.path.join(linear_transformations_path, - xcontrast_xsubject_pre_postfix[0] + inp.uid - + xcontrast_xsubject_pre_postfix[1] + '.txt'), - trafo, - force=app.FORCE_OVERWRITE) - - if app.ARGS.transformed_dir: - for cid, trdir in enumerate(app.ARGS.transformed_dir): - transformed_path = path.from_user(trdir, False) - if os.path.exists(transformed_path): - run.function(shutil.rmtree, transformed_path) - os.makedirs(transformed_path) - progress = app.ProgressBar('Copying transformed images to output directory "' + transformed_path + '"', len(ins)) - for inp in ins: - run.command(['mrconvert', inp.ims_transformed[cid], os.path.join(transformed_path, inp.ims_filenames[cid])], - mrconvert_keyval=inp.get_ims_path(False)[cid], force=app.FORCE_OVERWRITE) - progress.increment() - progress.done() - - if app.ARGS.template_mask: - run.command('mrconvert ' + current_template_mask + ' ' + path.from_user(app.ARGS.template_mask, True), - mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/population_template/contrasts.py b/python/mrtrix3/population_template/contrasts.py new file mode 100644 index 0000000000..6e54755921 --- /dev/null +++ b/python/mrtrix3/population_template/contrasts.py @@ -0,0 +1,106 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from mrtrix3 import MRtrixError +from mrtrix3 import app, path # pylint: disable=no-name-in-module, import-outside-toplevel + +class Contrasts: + """ + Class that parses arguments and holds information specific to each image contrast + + Attributes + ---------- + suff: list of str + identifiers used for contrast-specific filenames and folders ['_c0', '_c1', ...] + + names: list of str + derived from constrast-specific input folder + + templates_out: list of str + full path to output templates + + templates: list of str + holds current template names during registration + + n_volumes: list of int + number of volumes in each contrast + + fod_reorientation: list of bool + whether to perform FOD reorientation with mrtransform + + isfinite_count: list of str + filenames of images holding (weighted) number of finite-valued voxels across all images + + mc_weight_: list of str + contrast-specific weight used during initialisation / registration + + _weight_option: list of str + weight option to be passed to mrregister, = {'initial_alignment', 'rigid', 'affine', 'nl'} + + n_contrasts: int + + """ + + def __init__(self): + + n_contrasts = len(app.ARGS.input_dir) + + self.suff = ["_c" + c for c in map(str, range(n_contrasts))] + self.names = [os.path.relpath(f, os.path.commonprefix(app.ARGS.input_dir)) for f in app.ARGS.input_dir] + + self.templates_out = [path.from_user(t, True) for t in app.ARGS.template] + + self.mc_weight_initial_alignment = [None for _ in range(self.n_contrasts)] + self.mc_weight_rigid = [None for _ in range(self.n_contrasts)] + self.mc_weight_affine = [None for _ in range(self.n_contrasts)] + self.mc_weight_nl = [None for _ in range(self.n_contrasts)] + self.initial_alignment_weight_option = [None for _ in range(self.n_contrasts)] + self.rigid_weight_option = [None for _ in range(self.n_contrasts)] + self.affine_weight_option = [None for _ in range(self.n_contrasts)] + self.nl_weight_option = [None for _ in range(self.n_contrasts)] + + self.isfinite_count = ['isfinite' + c + '.mif' for c in self.suff] + self.templates = [None for _ in range(self.n_contrasts)] + self.n_volumes = [None for _ in range(self.n_contrasts)] + self.fod_reorientation = [None for _ in range(self.n_contrasts)] + + + for mode in ['initial_alignment', 'rigid', 'affine', 'nl']: + opt = app.ARGS.__dict__.get('mc_weight_' + mode, None) + if opt: + if n_contrasts == 1: + raise MRtrixError('mc_weight_' + mode+' requires multiple input contrasts') + opt = opt.split(',') + if len(opt) != n_contrasts: + raise MRtrixError('mc_weight_' + mode+' needs to be defined for each contrast') + else: + opt = ["1"] * n_contrasts + self.__dict__['mc_weight_%s' % mode] = opt + self.__dict__['%s_weight_option' % mode] = ' -mc_weights '+','.join(opt)+' ' if n_contrasts > 1 else '' + + if len(self.templates_out) != n_contrasts: + raise MRtrixError('number of templates (%i) does not match number of input directories (%i)' % + (len(self.templates_out), n_contrasts)) + + @property + def n_contrasts(self): + return len(self.suff) + + def __repr__(self, *args, **kwargs): + text = '' + for cid in range(self.n_contrasts): + text += '\tcontrast: %s, template: %s, suffix: %s\n' % (self.names[cid], self.templates_out[cid], self.suff[cid]) + return text diff --git a/python/mrtrix3/population_template/execute.py b/python/mrtrix3/population_template/execute.py new file mode 100644 index 0000000000..f2639eaf02 --- /dev/null +++ b/python/mrtrix3/population_template/execute.py @@ -0,0 +1,925 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import json, os, shlex, shutil +from mrtrix3 import EXE_LIST, MRtrixError +from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module +from . import AGGREGATION_MODES, REGISTRATION_MODES +from . import DEFAULT_AFFINE_LMAX, DEFAULT_AFFINE_SCALES +from . import DEFAULT_RIGID_LMAX, DEFAULT_RIGID_SCALES +from . import DEFAULT_NL_LMAX, DEFAULT_NL_NITER, DEFAULT_NL_SCALES +from . import Contrasts +from .utils import aggregate, calculate_isfinite, check_linear_transformation, copy, inplace_nan_mask, parse_input_files, relpath + +def execute(): #pylint: disable=unused-variable + + expected_commands = ['mrgrid', 'mrregister', 'mrtransform', 'mraverageheader', 'mrconvert', 'mrmath', 'transformcalc', 'mrfilter'] + for cmd in expected_commands: + if cmd not in EXE_LIST : + raise MRtrixError("Could not find " + cmd + " in bin/. Binary commands not compiled?") + + if not app.ARGS.type in REGISTRATION_MODES: + raise MRtrixError("registration type must be one of %s. provided: %s" % (str(REGISTRATION_MODES), app.ARGS.type)) + dorigid = "rigid" in app.ARGS.type + doaffine = "affine" in app.ARGS.type + dolinear = dorigid or doaffine + dononlinear = "nonlinear" in app.ARGS.type + assert (dorigid + doaffine + dononlinear >= 1), "FIXME: registration type not valid" + + input_output = app.ARGS.input_dir + [app.ARGS.template] + n_contrasts = len(input_output) // 2 + if len(input_output) != 2 * n_contrasts: + raise MRtrixError('expected two arguments per contrast, received %i: %s' % (len(input_output), ', '.join(input_output))) + if n_contrasts > 1: + app.console('Generating population template using multi-contrast registration') + + # reorder arguments for multi-contrast registration as after command line parsing app.ARGS.input_dir holds all but one argument + app.ARGS.input_dir = [] + app.ARGS.template = [] + for i_contrast in range(n_contrasts): + inargs = (input_output[i_contrast*2], input_output[i_contrast*2+1]) + if not os.path.isdir(inargs[0]): + raise MRtrixError('input directory %s not found' % inargs[0]) + app.ARGS.input_dir.append(relpath(inargs[0])) + app.ARGS.template.append(relpath(inargs[1])) + + cns = Contrasts() + app.debug(str(cns)) + + in_files = [sorted(path.all_in_dir(input_dir, dir_path=False)) for input_dir in app.ARGS.input_dir] + if len(in_files[0]) <= 1: + raise MRtrixError('Not enough images found in input directory ' + app.ARGS.input_dir[0] + + '. More than one image is needed to generate a population template') + if n_contrasts > 1: + for cid in range(1, n_contrasts): + if len(in_files[cid]) != len(in_files[0]): + raise MRtrixError('Found %i images in input directory %s ' % (len(app.ARGS.input_dir[0]), app.ARGS.input_dir[0]) + + 'but %i input images in %s.' % (len(app.ARGS.input_dir[cid]), app.ARGS.input_dir[cid])) + else: + app.console('Generating a population-average template from ' + str(len(in_files[0])) + ' input images') + if n_contrasts > 1: + app.console('using ' + str(len(in_files)) + ' contrasts for each input image') + + voxel_size = None + if app.ARGS.voxel_size: + voxel_size = app.ARGS.voxel_size.split(',') + if len(voxel_size) == 1: + voxel_size = voxel_size * 3 + try: + if len(voxel_size) != 3: + raise ValueError + [float(v) for v in voxel_size] #pylint: disable=expression-not-assigned + except ValueError as exception: + raise MRtrixError('voxel size needs to be a single or three comma-separated floating point numbers; received: ' + str(app.ARGS.voxel_size)) from exception + + agg_measure = 'mean' + if app.ARGS.aggregate is not None: + if not app.ARGS.aggregate in AGGREGATION_MODES: + app.error("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) + agg_measure = app.ARGS.aggregate + + agg_weights = app.ARGS.aggregation_weights + if agg_weights is not None: + agg_measure = "weighted_" + agg_measure + if agg_measure != 'weighted_mean': + app.error("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) + if not os.path.isfile(app.ARGS.aggregation_weights): + app.error("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) + + initial_alignment = app.ARGS.initial_alignment + if initial_alignment not in ["mass", "robust_mass", "geometric", "none"]: + raise MRtrixError('initial_alignment must be one of ' + " ".join(["mass", "robust_mass", "geometric", "none"]) + " provided: " + str(initial_alignment)) + + linear_estimator = app.ARGS.linear_estimator + if linear_estimator and not linear_estimator.lower() == 'none': + if not dolinear: + raise MRtrixError('linear_estimator specified when no linear registration is requested') + if linear_estimator not in ["l1", "l2", "lp"]: + raise MRtrixError('linear_estimator must be one of ' + " ".join(["l1", "l2", "lp"]) + " provided: " + str(linear_estimator)) + + use_masks = False + mask_files = [] + if app.ARGS.mask_dir: + use_masks = True + app.ARGS.mask_dir = relpath(app.ARGS.mask_dir) + if not os.path.isdir(app.ARGS.mask_dir): + raise MRtrixError('mask directory not found') + mask_files = sorted(path.all_in_dir(app.ARGS.mask_dir, dir_path=False)) + if len(mask_files) < len(in_files[0]): + raise MRtrixError('there are not enough mask images for the number of images in the input directory') + + if not use_masks: + app.warn('no masks input. Use input masks to reduce computation time and improve robustness') + + if app.ARGS.template_mask and not use_masks: + raise MRtrixError('you cannot output a template mask because no subject masks were input using -mask_dir') + + nanmask_input = app.ARGS.nanmask + if nanmask_input and not use_masks: + raise MRtrixError('you cannot use NaN masking when no subject masks were input using -mask_dir') + + ins, xcontrast_xsubject_pre_postfix = parse_input_files(in_files, mask_files, cns, agg_weights) + + leave_one_out = 'auto' + if app.ARGS.leave_one_out is not None: + leave_one_out = app.ARGS.leave_one_out + if not leave_one_out in ['0', '1', 'auto']: + raise MRtrixError('leave_one_out not understood: ' + str(leave_one_out)) + if leave_one_out == 'auto': + leave_one_out = 2 < len(ins) < 15 + else: + leave_one_out = bool(int(leave_one_out)) + if leave_one_out: + app.console('performing leave-one-out registration') + # check that at sum of weights is positive for any grouping if weighted aggregation is used + weights = [float(inp.aggregation_weight) for inp in ins if inp.aggregation_weight is not None] + if weights and sum(weights) - max(weights) <= 0: + raise MRtrixError('leave-one-out registration requires positive aggregation weights in all groupings') + + noreorientation = app.ARGS.noreorientation + + do_pause_on_warn = True + if app.ARGS.linear_no_pause: + do_pause_on_warn = False + if not dolinear: + raise MRtrixError("linear option set when no linear registration is performed") + + if len(app.ARGS.template) != n_contrasts: + raise MRtrixError('mismatch between number of output templates (%i) ' % len(app.ARGS.template) + + 'and number of contrasts (%i)' % n_contrasts) + for templ in app.ARGS.template: + app.check_output_path(templ) + + if app.ARGS.warp_dir: + app.ARGS.warp_dir = relpath(app.ARGS.warp_dir) + app.check_output_path(app.ARGS.warp_dir) + + if app.ARGS.transformed_dir: + app.ARGS.transformed_dir = [relpath(d) for d in app.ARGS.transformed_dir.split(',')] + if len(app.ARGS.transformed_dir) != n_contrasts: + raise MRtrixError('require multiple comma separated transformed directories if multi-contrast registration is used') + for tdir in app.ARGS.transformed_dir: + app.check_output_path(tdir) + + if app.ARGS.linear_transformations_dir: + if not dolinear: + raise MRtrixError("linear option set when no linear registration is performed") + app.ARGS.linear_transformations_dir = relpath(app.ARGS.linear_transformations_dir) + app.check_output_path(app.ARGS.linear_transformations_dir) + + # automatically detect SH series in each contrast + do_fod_registration = False # in any contrast + cns.n_volumes = [] + cns.fod_reorientation = [] + for cid in range(n_contrasts): + header = image.Header(ins[0].get_ims_path(False)[cid]) + image_size = header.size() + if len(image_size) < 3 or len(image_size) > 4: + raise MRtrixError('only 3 and 4 dimensional images can be used to build a template') + if len(image_size) == 4: + cns.fod_reorientation.append(header.is_sh() and not noreorientation) + cns.n_volumes.append(image_size[3]) + do_fod_registration = do_fod_registration or cns.fod_reorientation[-1] + else: + cns.fod_reorientation.append(False) + cns.n_volumes.append(0) + if do_fod_registration: + app.console("SH Series detected, performing FOD registration in contrast: " + + ', '.join(app.ARGS.input_dir[cid] for cid in range(n_contrasts) if cns.fod_reorientation[cid])) + c_mrtransform_reorientation = [' -reorient_fod ' + ('yes' if cns.fod_reorientation[cid] else 'no') + ' ' + for cid in range(n_contrasts)] + + if nanmask_input: + app.console("NaN masking transformed images") + + # rigid options + if app.ARGS.rigid_scale: + rigid_scales = [float(x) for x in app.ARGS.rigid_scale.split(',')] + if not dorigid: + raise MRtrixError("rigid_scales option set when no rigid registration is performed") + else: + rigid_scales = DEFAULT_RIGID_SCALES + if app.ARGS.rigid_lmax: + if not dorigid: + raise MRtrixError("rigid_lmax option set when no rigid registration is performed") + rigid_lmax = [int(x) for x in app.ARGS.rigid_lmax.split(',')] + if do_fod_registration and len(rigid_scales) != len(rigid_lmax): + raise MRtrixError('rigid_scales and rigid_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(rigid_scales), len(rigid_lmax))) + else: + rigid_lmax = DEFAULT_RIGID_LMAX + + rigid_niter = [100] * len(rigid_scales) + if app.ARGS.rigid_niter: + if not dorigid: + raise MRtrixError("rigid_niter specified when no rigid registration is performed") + rigid_niter = [int(x) for x in app.ARGS.rigid_niter.split(',')] + if len(rigid_niter) == 1: + rigid_niter = rigid_niter * len(rigid_scales) + elif len(rigid_scales) != len(rigid_niter): + raise MRtrixError('rigid_scales and rigid_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(rigid_scales), len(rigid_niter))) + + # affine options + if app.ARGS.affine_scale: + affine_scales = [float(x) for x in app.ARGS.affine_scale.split(',')] + if not doaffine: + raise MRtrixError("affine_scale option set when no affine registration is performed") + else: + affine_scales = DEFAULT_AFFINE_SCALES + if app.ARGS.affine_lmax: + if not doaffine: + raise MRtrixError("affine_lmax option set when no affine registration is performed") + affine_lmax = [int(x) for x in app.ARGS.affine_lmax.split(',')] + if do_fod_registration and len(affine_scales) != len(affine_lmax): + raise MRtrixError('affine_scales and affine_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(affine_scales), len(affine_lmax))) + else: + affine_lmax = DEFAULT_AFFINE_LMAX + + affine_niter = [500] * len(affine_scales) + if app.ARGS.affine_niter: + if not doaffine: + raise MRtrixError("affine_niter specified when no affine registration is performed") + affine_niter = [int(x) for x in app.ARGS.affine_niter.split(',')] + if len(affine_niter) == 1: + affine_niter = affine_niter * len(affine_scales) + elif len(affine_scales) != len(affine_niter): + raise MRtrixError('affine_scales and affine_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(affine_scales), len(affine_niter))) + + linear_scales = [] + linear_lmax = [] + linear_niter = [] + linear_type = [] + if dorigid: + linear_scales += rigid_scales + linear_lmax += rigid_lmax + linear_niter += rigid_niter + linear_type += ['rigid'] * len(rigid_scales) + + if doaffine: + linear_scales += affine_scales + linear_lmax += affine_lmax + linear_niter += affine_niter + linear_type += ['affine'] * len(affine_scales) + + assert len(linear_type) == len(linear_scales) + assert len(linear_scales) == len(linear_niter) + if do_fod_registration: + if len(linear_lmax) != len(linear_niter): + mismatch = [] + if len(rigid_lmax) != len(rigid_niter): + mismatch += ['rigid: lmax stages: %s, niter stages: %s' % (len(rigid_lmax), len(rigid_niter))] + if len(affine_lmax) != len(affine_niter): + mismatch += ['affine: lmax stages: %s, niter stages: %s' % (len(affine_lmax), len(affine_niter))] + raise MRtrixError('linear registration: lmax and niter schedules are not equal in length: %s' % (', '.join(mismatch))) + app.console('-' * 60) + app.console('initial alignment of images: %s' % initial_alignment) + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + app.console('\tcontrast "%s": %s, ' % (cns.suff[cid], cns.names[cid]) + + 'objective weight: %s' % cns.mc_weight_initial_alignment[cid]) + + if dolinear: + app.console('-' * 60) + app.console('linear registration stages:') + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + msg = '\tcontrast "%s": %s' % (cns.suff[cid], cns.names[cid]) + if 'rigid' in linear_type: + msg += ', objective weight rigid: %s' % cns.mc_weight_rigid[cid] + if 'affine' in linear_type: + msg += ', objective weight affine: %s' % cns.mc_weight_affine[cid] + app.console(msg) + + if do_fod_registration: + for istage, [tpe, scale, lmax, niter] in enumerate(zip(linear_type, linear_scales, linear_lmax, linear_niter)): + app.console('(%02i) %s scale: %.4f, niter: %i, lmax: %i' % (istage, tpe.ljust(9), scale, niter, lmax)) + else: + for istage, [tpe, scale, niter] in enumerate(zip(linear_type, linear_scales, linear_niter)): + app.console('(%02i) %s scale: %.4f, niter: %i, no reorientation' % (istage, tpe.ljust(9), scale, niter)) + + datatype_option = ' -datatype float32' + outofbounds_option = ' -nan' + + if not dononlinear: + nl_scales = [] + nl_lmax = [] + nl_niter = [] + if app.ARGS.warp_dir: + raise MRtrixError('warp_dir specified when no nonlinear registration is performed') + else: + nl_scales = [float(x) for x in app.ARGS.nl_scale.split(',')] if app.ARGS.nl_scale else DEFAULT_NL_SCALES + nl_niter = [int(x) for x in app.ARGS.nl_niter.split(',')] if app.ARGS.nl_niter else DEFAULT_NL_NITER + nl_lmax = [int(x) for x in app.ARGS.nl_lmax.split(',')] if app.ARGS.nl_lmax else DEFAULT_NL_LMAX + + if len(nl_scales) != len(nl_niter): + raise MRtrixError('nl_scales and nl_niter schedules are not equal in length: scales stages: %s, niter stages: %s' % (len(nl_scales), len(nl_niter))) + + app.console('-' * 60) + app.console('nonlinear registration stages:') + app.console('-' * 60) + if n_contrasts > 1: + for cid in range(n_contrasts): + app.console('\tcontrast "%s": %s, objective weight: %s' % (cns.suff[cid], cns.names[cid], cns.mc_weight_nl[cid])) + + if do_fod_registration: + if len(nl_scales) != len(nl_lmax): + raise MRtrixError('nl_scales and nl_lmax schedules are not equal in length: scales stages: %s, lmax stages: %s' % (len(nl_scales), len(nl_lmax))) + + if do_fod_registration: + for istage, [scale, lmax, niter] in enumerate(zip(nl_scales, nl_lmax, nl_niter)): + app.console('(%02i) nonlinear scale: %.4f, niter: %i, lmax: %i' % (istage, scale, niter, lmax)) + else: + for istage, [scale, niter] in enumerate(zip(nl_scales, nl_niter)): + app.console('(%02i) nonlinear scale: %.4f, niter: %i, no reorientation' % (istage, scale, niter)) + + app.console('-' * 60) + app.console('input images:') + app.console('-' * 60) + for inp in ins: + app.console('\t' + inp.info()) + + app.make_scratch_dir() + app.goto_scratch_dir() + + for contrast in cns.suff: + path.make_dir('input_transformed' + contrast) + + for contrast in cns.suff: + path.make_dir('isfinite' + contrast) + + path.make_dir('linear_transforms_initial') + path.make_dir('linear_transforms') + for level in range(0, len(linear_scales)): + path.make_dir('linear_transforms_%02i' % level) + for level in range(0, len(nl_scales)): + path.make_dir('warps_%02i' % level) + + if use_masks: + path.make_dir('mask_transformed') + write_log = (app.VERBOSITY >= 2) + if write_log: + path.make_dir('log') + + if initial_alignment == 'robust_mass': + if not use_masks: + raise MRtrixError('robust_mass initial alignment requires masks') + path.make_dir('robust') + + if app.ARGS.copy_input: + app.console('Copying images into scratch directory') + for inp in ins: + inp.cache_local() + + # Make initial template in average space using first contrast + app.console('Generating initial template') + input_filenames = [inp.get_ims_path(False)[0] for inp in ins] + if voxel_size is None: + run.command(['mraverageheader', input_filenames, 'average_header.mif', '-fill']) + else: + run.command(['mraverageheader', '-fill', input_filenames, '-', '|', + 'mrgrid', '-', 'regrid', '-voxel', ','.join(map(str, voxel_size)), 'average_header.mif']) + + # crop average space to extent defined by original masks + if use_masks: + progress = app.ProgressBar('Importing input masks to average space for template cropping', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_path + ' -interp nearest -template average_header.mif ' + inp.msk_transformed) + progress.increment() + progress.done() + run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_initial.mif']) + run.command('mrgrid average_header.mif crop -mask mask_initial.mif average_header_cropped.mif') + run.function(os.remove, 'mask_initial.mif') + run.function(os.remove, 'average_header.mif') + run.function(shutil.move, 'average_header_cropped.mif', 'average_header.mif') + progress = app.ProgressBar('Erasing temporary mask images', len(ins)) + for inp in ins: + run.function(os.remove, inp.msk_transformed) + progress.increment() + progress.done() + + # create average space headers for other contrasts + if n_contrasts > 1: + avh3d = 'average_header3d.mif' + avh4d = 'average_header4d.mif' + if len(image.Header('average_header.mif').size()) == 3: + run.command('mrconvert average_header.mif ' + avh3d) + else: + run.command('mrconvert average_header.mif -coord 3 0 -axes 0,1,2 ' + avh3d) + run.command('mrconvert ' + avh3d + ' -axes 0,1,2,-1 ' + avh4d) + for cid in range(n_contrasts): + if cns.n_volumes[cid] == 0: + run.function(copy, avh3d, 'average_header' + cns.suff[cid] + '.mif') + elif cns.n_volumes[cid] == 1: + run.function(copy, avh4d, 'average_header' + cns.suff[cid] + '.mif') + else: + run.command(['mrcat', [avh3d] * cns.n_volumes[cid], '-axis', '3', 'average_header' + cns.suff[cid] + '.mif']) + run.function(os.remove, avh3d) + run.function(os.remove, avh4d) + else: + run.function(shutil.move, 'average_header.mif', 'average_header' + cns.suff[0] + '.mif') + + cns.templates = ['average_header' + csuff + '.mif' for csuff in cns.suff] + + if initial_alignment == 'none': + progress = app.ProgressBar('Resampling input images to template space with no initial alignment', len(ins) * n_contrasts) + for inp in ins: + for cid in range(n_contrasts): + run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + ' -interp linear ' + + '-template ' + cns.templates[cid] + ' ' + inp.ims_transformed[cid] + + outofbounds_option + + datatype_option) + progress.increment() + progress.done() + + if use_masks: + progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_path + ' ' + inp.msk_transformed + ' ' + + '-interp nearest -template ' + cns.templates[0] + ' ' + + datatype_option) + progress.increment() + progress.done() + + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + if not dolinear: + for inp in ins: + with open(os.path.join('linear_transforms_initial', inp.uid + '.txt'), 'w', encoding='utf-8') as fout: + fout.write('1 0 0 0\n0 1 0 0\n0 0 1 0\n0 0 0 1\n') + + run.function(copy, 'average_header' + cns.suff[0] + '.mif', 'average_header.mif') + + else: + progress = app.ProgressBar('Performing initial rigid registration to template', len(ins)) + mask_option = '' + cid = 0 + lmax_option = ' -rigid_lmax 0 ' if cns.fod_reorientation[cid] else ' -noreorientation ' + contrast_weight_option = cns.initial_alignment_weight_option + for inp in ins: + output_option = ' -rigid ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + if initial_alignment == 'robust_mass': + if not os.path.isfile('robust/template.mif'): + if cns.n_volumes[cid] > 0: + run.command('mrconvert ' + cns.templates[cid] + ' -coord 3 0 - | mrconvert - -axes 0,1,2 robust/template.mif') + else: + run.command('mrconvert ' + cns.templates[cid] + ' robust/template.mif') + if n_contrasts > 1: + cmd = ['mrcalc', inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult'] + for cid in range(1, n_contrasts): + cmd += [inp.ims_path[cid], cns.mc_weight_initial_alignment[cid], '-mult', '-add'] + contrast_weight_option = '' + run.command(' '.join(cmd) + + ' - | mrfilter - zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') + else: + run.command('mrfilter ' + inp.ims_path[0] + ' zclean -zlower 3 -zupper 3 robust/image_' + inp.uid + '.mif' + + ' -maskin ' + inp.msk_path + ' -maskout robust/mask_' + inp.uid + '.mif') + images = 'robust/image_' + inp.uid + '.mif robust/template.mif' + mask_option = ' -mask1 ' + 'robust/mask_' + inp.uid + '.mif' + lmax_option = '' + + run.command('mrregister ' + images + + mask_option + + ' -rigid_scale 1 ' + + ' -rigid_niter 0 ' + + ' -type rigid ' + + lmax_option + + contrast_weight_option + + ' -rigid_init_translation ' + initial_alignment.replace('robust_', '') + ' ' + + datatype_option + + output_option) + # translate input images to centre of mass without interpolation + for cid in range(n_contrasts): + run.command('mrtransform ' + inp.ims_path[cid] + c_mrtransform_reorientation[cid] + + ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + + ' ' + inp.ims_transformed[cid] + "_translated.mif" + datatype_option) + if use_masks: + run.command('mrtransform ' + inp.msk_path + + ' -linear ' + os.path.join('linear_transforms_initial', inp.uid + '.txt') + + ' ' + inp.msk_transformed + "_translated.mif" + + datatype_option) + progress.increment() + # update average space of first contrast to new extent, delete other average space images + run.command(['mraverageheader', [inp.ims_transformed[cid] + '_translated.mif' for inp in ins], 'average_header_tight.mif']) + progress.done() + + if voxel_size is None: + run.command('mrgrid average_header_tight.mif pad -uniform 10 average_header.mif', force=True) + else: + run.command('mrgrid average_header_tight.mif pad -uniform 10 - | ' + 'mrgrid - regrid -voxel ' + ','.join(map(str, voxel_size)) + ' average_header.mif', force=True) + run.function(os.remove, 'average_header_tight.mif') + for cid in range(1, n_contrasts): + run.function(os.remove, 'average_header' + cns.suff[cid] + '.mif') + + if use_masks: + # reslice masks + progress = app.ProgressBar('Reslicing input masks to average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_transformed + '_translated.mif' + ' ' + inp.msk_transformed + ' ' + + '-interp nearest -template average_header.mif' + datatype_option) + progress.increment() + progress.done() + # crop average space to extent defined by translated masks + run.command(['mrmath', [inp.msk_transformed for inp in ins], 'max', 'mask_translated.mif']) + run.command('mrgrid average_header.mif crop -mask mask_translated.mif average_header_cropped.mif') + # pad average space to allow for deviation from initial alignment + run.command('mrgrid average_header_cropped.mif pad -uniform 10 average_header.mif', force=True) + run.function(os.remove, 'average_header_cropped.mif') + # reslice masks + progress = app.ProgressBar('Reslicing masks to new padded average header', len(ins)) + for inp in ins: + run.command('mrtransform ' + inp.msk_transformed + '_translated.mif ' + inp.msk_transformed + ' ' + + '-interp nearest -template average_header.mif' + datatype_option, force=True) + run.function(os.remove, inp.msk_transformed + '_translated.mif') + progress.increment() + progress.done() + run.function(os.remove, 'mask_translated.mif') + + # reslice images + progress = app.ProgressBar('Reslicing input images to average header', len(ins) * n_contrasts) + for cid in range(n_contrasts): + for inp in ins: + run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_transformed[cid] + '_translated.mif ' + + inp.ims_transformed[cid] + ' ' + + ' -interp linear -template average_header.mif' + + outofbounds_option + + datatype_option) + run.function(os.remove, inp.ims_transformed[cid] + '_translated.mif') + progress.increment() + progress.done() + + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + cns.templates = ['initial_template' + contrast + '.mif' for contrast in cns.suff] + for cid in range(n_contrasts): + aggregate(ins, 'initial_template' + cns.suff[cid] + '.mif', cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, 'initial_template' + cns.suff[cid] + '.mif', 'tmp.mif') + run.command('mrconvert tmp.mif initial_template' + cns.suff[cid] + '.mif -axes 0,1,2,-1') + + # Optimise template with linear registration + if not dolinear: + for inp in ins: + run.function(copy, os.path.join('linear_transforms_initial', inp.uid+'.txt'), + os.path.join('linear_transforms', inp.uid+'.txt')) + else: + level = 0 + regtype = linear_type[0] + def linear_msg(): + return 'Optimising template with linear registration (stage {0} of {1}; {2})'.format(level + 1, len(linear_scales), regtype) + progress = app.ProgressBar(linear_msg, len(linear_scales) * len(ins) * (1 + n_contrasts + int(use_masks))) + for level, (regtype, scale, niter, lmax) in enumerate(zip(linear_type, linear_scales, linear_niter, linear_lmax)): + for inp in ins: + initialise_option = '' + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + else: + mask_option = '' + lmax_option = ' -noreorientation' + metric_option = '' + mrregister_log_option = '' + if regtype == 'rigid': + scale_option = ' -rigid_scale ' + str(scale) + niter_option = ' -rigid_niter ' + str(niter) + regtype_option = ' -type rigid' + output_option = ' -rigid ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + contrast_weight_option = cns.rigid_weight_option + initialise_option = (' -rigid_init_matrix ' + + os.path.join('linear_transforms_%02i' % (level - 1) if level > 0 else 'linear_transforms_initial', inp.uid + '.txt')) + if do_fod_registration: + lmax_option = ' -rigid_lmax ' + str(lmax) + if linear_estimator: + metric_option = ' -rigid_metric.diff.estimator ' + linear_estimator + if app.VERBOSITY >= 2: + mrregister_log_option = ' -info -rigid_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') + else: + scale_option = ' -affine_scale ' + str(scale) + niter_option = ' -affine_niter ' + str(niter) + regtype_option = ' -type affine' + output_option = ' -affine ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + contrast_weight_option = cns.affine_weight_option + initialise_option = (' -affine_init_matrix ' + + os.path.join('linear_transforms_%02i' % (level - 1) if level > 0 else 'linear_transforms_initial', inp.uid + '.txt')) + if do_fod_registration: + lmax_option = ' -affine_lmax ' + str(lmax) + if linear_estimator: + metric_option = ' -affine_metric.diff.estimator ' + linear_estimator + if write_log: + mrregister_log_option = ' -info -affine_log ' + os.path.join('log', inp.uid + contrast[cid] + "_" + str(level) + '.log') + + if leave_one_out: + tmpl = [] + for cid in range(n_contrasts): + isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) + weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' + # loo = (template * weighted sum - weight * this) / (weighted sum - weight) + run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + + ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + + ' -sub - -div loo_%s' % cns.templates[cid], force=True) + tmpl.append('loo_%s' % cns.templates[cid]) + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) + else: + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + command = 'mrregister ' + images + \ + initialise_option + \ + mask_option + \ + scale_option + \ + niter_option + \ + lmax_option + \ + regtype_option + \ + metric_option + \ + datatype_option + \ + contrast_weight_option + \ + output_option + \ + mrregister_log_option + run.command(command, force=True) + check_linear_transformation(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), command, + pause_on_warn=do_pause_on_warn) + if leave_one_out: + for im_temp in tmpl: + run.function(os.remove, im_temp) + progress.increment() + + # Here we ensure the overall template properties don't change (too much) over levels + # The reference is the initialisation as that's used to construct the average space. + # T_i: linear trafo for case i, i.e. template(x) = E [ image_i(T_i x) ] + # R_i: inital trafo for case i (identity if initial alignment is none) + # A = E[ T_i ]: average of current trafos + # B = E[ R_i ]: average of initial trafos + # C_i' = T_i B A^{-1}: "drift" corrected T_i + # T_i <- C_i + # Notes: + # - This approximately stabilises E[ T_i ], its' relatively close to B + # - Not sure whether it's preferable to stabilise E[ T_i^{-1} ] + # - If one subject's registration fails, this will affect the average and therefore the template which could result in instable behaviour. + # - The template appearance changes slightly over levels, but the template and trafos are affected in the same way so should not affect template convergence. + if not app.ARGS.linear_no_drift_correction: + run.command(['transformcalc', [os.path.join('linear_transforms_initial', inp.uid + '.txt') for _inp in ins], + 'average', 'linear_transform_average_init.txt', '-quiet'], force=True) + run.command(['transformcalc', [os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') for _inp in ins], + 'average', 'linear_transform_average_%02i_uncorrected.txt' % level, '-quiet'], force=True) + run.command(['transformcalc', 'linear_transform_average_%02i_uncorrected.txt' % level, + 'invert', 'linear_transform_average_%02i_uncorrected_inv.txt' % level, '-quiet'], force=True) + + transform_average_init = matrix.load_transform('linear_transform_average_init.txt') + transform_average_current_inv = matrix.load_transform('linear_transform_average_%02i_uncorrected_inv.txt' % level) + + transform_update = matrix.dot(transform_average_init, transform_average_current_inv) + matrix.save_transform(os.path.join('linear_transforms_%02i_drift_correction.txt' % level), transform_update, force=True) + if regtype == 'rigid': + run.command('transformcalc ' + os.path.join('linear_transforms_%02i_drift_correction.txt' % level) + + ' rigid ' + os.path.join('linear_transforms_%02i_drift_correction.txt' % level) + ' -quiet', force=True) + transform_update = matrix.load_transform(os.path.join('linear_transforms_%02i_drift_correction.txt' % level)) + + for inp in ins: + transform = matrix.load_transform('linear_transforms_%02i/' % level + inp.uid + '.txt') + transform_updated = matrix.dot(transform, transform_update) + run.function(copy, 'linear_transforms_%02i/' % level + inp.uid + '.txt', 'linear_transforms_%02i/' % level + inp.uid + '.precorrection') + matrix.save_transform(os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt'), transform_updated, force=True) + + # compute average trafos and its properties for easier debugging + run.command(['transformcalc', [os.path.join('linear_transforms_%02i' % level, _inp.uid + '.txt') for _inp in ins], + 'average', 'linear_transform_average_%02i.txt' % level, '-quiet'], force=True) + run.command('transformcalc linear_transform_average_%02i.txt decompose linear_transform_average_%02i.dec' % (level, level), force=True) + + + for cid in range(n_contrasts): + for inp in ins: + run.command('mrtransform ' + c_mrtransform_reorientation[cid] + inp.ims_path[cid] + + ' -template ' + cns.templates[cid] + + ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + + ' ' + inp.ims_transformed[cid] + + outofbounds_option + + datatype_option, + force=True) + progress.increment() + + if use_masks: + for inp in ins: + run.command('mrtransform ' + inp.msk_path + + ' -template ' + cns.templates[0] + + ' -interp nearest' + + ' -linear ' + os.path.join('linear_transforms_%02i' % level, inp.uid + '.txt') + + ' ' + inp.msk_transformed, + force=True) + progress.increment() + + if nanmask_input: + inplace_nan_mask([inp.ims_transformed[cid] for inp in ins for cid in range(n_contrasts)], + [inp.msk_transformed for inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + for cid in range(n_contrasts): + if level > 0 and app.ARGS.delete_temporary_files: + os.remove(cns.templates[cid]) + cns.templates[cid] = 'linear_template%02i%s.mif' % (level, cns.suff[cid]) + aggregate(ins, cns.templates[cid], cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, cns.templates[cid], 'tmp.mif') + run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') + run.function(os.remove, 'tmp.mif') + + for entry in os.listdir('linear_transforms_%02i' % level): + run.function(copy, os.path.join('linear_transforms_%02i' % level, entry), os.path.join('linear_transforms', entry)) + progress.done() + + # Create a template mask for nl registration by taking the intersection of all transformed input masks and dilating + if use_masks and (dononlinear or app.ARGS.template_mask): + run.command(['mrmath', path.all_in_dir('mask_transformed')] + + 'min - | maskfilter - median - | maskfilter - dilate -npass 5 init_nl_template_mask.mif'.split(), force=True) + current_template_mask = 'init_nl_template_mask.mif' + + if dononlinear: + path.make_dir('warps') + level = 0 + def nonlinear_msg(): + return 'Optimising template with non-linear registration (stage {0} of {1})'.format(level + 1, len(nl_scales)) + progress = app.ProgressBar(nonlinear_msg, len(nl_scales) * len(ins)) + for level, (scale, niter, lmax) in enumerate(zip(nl_scales, nl_niter, nl_lmax)): + for inp in ins: + if level > 0: + initialise_option = ' -nl_init ' + os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif') + scale_option = '' + else: + scale_option = ' -nl_scale ' + str(scale) + if not doaffine: # rigid or no previous linear stage + initialise_option = ' -rigid_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') + else: + initialise_option = ' -affine_init_matrix ' + os.path.join('linear_transforms', inp.uid + '.txt') + + if use_masks: + mask_option = ' -mask1 ' + inp.msk_path + ' -mask2 ' + current_template_mask + else: + mask_option = '' + + if do_fod_registration: + lmax_option = ' -nl_lmax ' + str(lmax) + else: + lmax_option = ' -noreorientation' + + contrast_weight_option = cns.nl_weight_option + + if leave_one_out: + tmpl = [] + for cid in range(n_contrasts): + isfinite = 'isfinite%s/%s.mif' % (cns.suff[cid], inp.uid) + weight = inp.aggregation_weight if inp.aggregation_weight is not None else '1' + # loo = (template * weighted sum - weight * this) / (weighted sum - weight) + run.command('mrcalc ' + cns.isfinite_count[cid] + ' ' + isfinite + ' -sub - | mrcalc ' + cns.templates[cid] + + ' ' + cns.isfinite_count[cid] + ' -mult ' + inp.ims_transformed[cid] + ' ' + weight + ' -mult ' + + ' -sub - -div loo_%s' % cns.templates[cid], force=True) + tmpl.append('loo_%s' % cns.templates[cid]) + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, tmpl)]) + else: + images = ' '.join([p + ' ' + t for p, t in zip(inp.ims_path, cns.templates)]) + run.command('mrregister ' + images + + ' -type nonlinear' + + ' -nl_niter ' + str(nl_niter[level]) + + ' -nl_warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' -transformed ' + + ' -transformed '.join([inp.ims_transformed[cid] for cid in range(n_contrasts)]) + ' ' + + ' -nl_update_smooth ' + app.ARGS.nl_update_smooth + + ' -nl_disp_smooth ' + app.ARGS.nl_disp_smooth + + ' -nl_grad_step ' + app.ARGS.nl_grad_step + + initialise_option + + contrast_weight_option + + scale_option + + mask_option + + datatype_option + + outofbounds_option + + lmax_option, + force=True) + + if use_masks: + run.command('mrtransform ' + inp.msk_path + + ' -template ' + cns.templates[0] + + ' -warp_full ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' ' + inp.msk_transformed + + ' -interp nearest ', + force=True) + + if leave_one_out: + for im_temp in tmpl: + run.function(os.remove, im_temp) + + if level > 0: + run.function(os.remove, os.path.join('warps_%02i' % (level - 1), inp.uid + '.mif')) + + progress.increment(nonlinear_msg()) + + if nanmask_input: + inplace_nan_mask([_inp.ims_transformed[cid] for _inp in ins for cid in range(n_contrasts)], + [_inp.msk_transformed for _inp in ins for cid in range(n_contrasts)]) + + if leave_one_out: + calculate_isfinite(ins, cns) + + for cid in range(n_contrasts): + if level > 0 and app.ARGS.delete_temporary_files: + os.remove(cns.templates[cid]) + cns.templates[cid] = 'nl_template%02i%s.mif' % (level, cns.suff[cid]) + aggregate(ins, cns.templates[cid], cid, agg_measure) + if cns.n_volumes[cid] == 1: + run.function(shutil.move, cns.templates[cid], 'tmp.mif') + run.command('mrconvert tmp.mif ' + cns.templates[cid] + ' -axes 0,1,2,-1') + run.function(os.remove, 'tmp.mif') + + if use_masks: + run.command(['mrmath', path.all_in_dir('mask_transformed')] + + 'min - | maskfilter - median - | '.split() + + ('maskfilter - dilate -npass 5 nl_template_mask' + str(level) + '.mif').split()) + current_template_mask = 'nl_template_mask' + str(level) + '.mif' + + if level < len(nl_scales) - 1: + if scale < nl_scales[level + 1]: + upsample_factor = nl_scales[level + 1] / scale + for inp in ins: + run.command('mrgrid ' + os.path.join('warps_%02i' % level, inp.uid + '.mif') + + ' regrid -scale %f tmp.mif' % upsample_factor, force=True) + run.function(shutil.move, 'tmp.mif', os.path.join('warps_%02i' % level, inp.uid + '.mif')) + else: + for inp in ins: + run.function(shutil.move, os.path.join('warps_%02i' % level, inp.uid + '.mif'), 'warps') + progress.done() + + for cid in range(n_contrasts): + run.command('mrconvert ' + cns.templates[cid] + ' ' + cns.templates_out[cid], + mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) + + if app.ARGS.warp_dir: + warp_path = path.from_user(app.ARGS.warp_dir, False) + if os.path.exists(warp_path): + run.function(shutil.rmtree, warp_path) + os.makedirs(warp_path) + progress = app.ProgressBar('Copying non-linear warps to output directory "' + warp_path + '"', len(ins)) + for inp in ins: + keyval = image.Header(os.path.join('warps', inp.uid + '.mif')).keyval() + keyval = dict((k, keyval[k]) for k in ('linear1', 'linear2')) + json_path = os.path.join('warps', inp.uid + '.json') + with open(json_path, 'w', encoding='utf-8') as json_file: + json.dump(keyval, json_file) + run.command('mrconvert ' + os.path.join('warps', inp.uid + '.mif') + ' ' + + shlex.quote(os.path.join(warp_path, xcontrast_xsubject_pre_postfix[0] + + inp.uid + xcontrast_xsubject_pre_postfix[1] + '.mif')), + mrconvert_keyval=json_path, force=app.FORCE_OVERWRITE) + progress.increment() + progress.done() + + if app.ARGS.linear_transformations_dir: + linear_transformations_path = path.from_user(app.ARGS.linear_transformations_dir, False) + if os.path.exists(linear_transformations_path): + run.function(shutil.rmtree, linear_transformations_path) + os.makedirs(linear_transformations_path) + for inp in ins: + trafo = matrix.load_transform(os.path.join('linear_transforms', inp.uid + '.txt')) + matrix.save_transform(os.path.join(linear_transformations_path, + xcontrast_xsubject_pre_postfix[0] + inp.uid + + xcontrast_xsubject_pre_postfix[1] + '.txt'), + trafo, + force=app.FORCE_OVERWRITE) + + if app.ARGS.transformed_dir: + for cid, trdir in enumerate(app.ARGS.transformed_dir): + transformed_path = path.from_user(trdir, False) + if os.path.exists(transformed_path): + run.function(shutil.rmtree, transformed_path) + os.makedirs(transformed_path) + progress = app.ProgressBar('Copying transformed images to output directory "' + transformed_path + '"', len(ins)) + for inp in ins: + run.command(['mrconvert', inp.ims_transformed[cid], os.path.join(transformed_path, inp.ims_filenames[cid])], + mrconvert_keyval=inp.get_ims_path(False)[cid], force=app.FORCE_OVERWRITE) + progress.increment() + progress.done() + + if app.ARGS.template_mask: + run.command('mrconvert ' + current_template_mask + ' ' + path.from_user(app.ARGS.template_mask, True), + mrconvert_keyval='NULL', force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/population_template/input.py b/python/mrtrix3/population_template/input.py new file mode 100644 index 0000000000..c2522b03a4 --- /dev/null +++ b/python/mrtrix3/population_template/input.py @@ -0,0 +1,136 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os +from .utils import abspath + +class Input: + """ + Class that holds input information specific to a single image (multiple contrasts) + + Attributes + ---------- + uid: str + unique identifier for these input image(s), does not contain spaces + + ims_path: list of str + full path to input images, shell quoted OR paths to cached file if cache_local was called + + msk_path: str + full path to input mask, shell quoted OR path to cached file if cache_local was called + + ims_filenames : list of str + for each contrast the input file paths stripped of their respective directories. Used for final output only. + + msk_filename: str + as ims_filenames + + ims_transformed: list of str + input_transformed/.mif + + msk_transformed: list of str + mask_transformed/.mif + + aggregation_weight: float + weights used in image aggregation that forms the template. Has to be normalised across inputs. + + _im_directories : list of str + full path to user-provided input directories containing the input images, one for each contrast + + _msk_directory: str + full path to user-provided mask directory + + _local_ims: list of str + path to cached input images + + _local_msk: str + path to cached input mask + + Methods + ------- + cache_local() + copy files into folders in current working directory. modifies _local_ims and _local_msk + + """ + def __init__(self, uid, filenames, directories, contrasts, mask_filename='', mask_directory=''): + self.contrasts = contrasts + + self.uid = uid + assert self.uid, "UID empty" + assert self.uid.count(' ') == 0, 'UID "%s" contains whitespace' % self.uid + + assert len(directories) == len(filenames) + self.ims_filenames = filenames + self._im_directories = directories + + self.msk_filename = mask_filename + self._msk_directory = mask_directory + + n_contrasts = len(contrasts) + + self.ims_transformed = [os.path.join('input_transformed'+contrasts[cid], uid + '.mif') for cid in range(n_contrasts)] + self.msk_transformed = os.path.join('mask_transformed', uid + '.mif') + + self.aggregation_weight = None + + self._local_ims = [] + self._local_msk = None + + def __repr__(self, *args, **kwargs): + text = '\nInput [' + for key in sorted([k for k in self.__dict__ if not k.startswith('_')]): + text += '\n\t' + str(key) + ': ' + str(self.__dict__[key]) + text += '\n]' + return text + + def info(self): + message = ['input: ' + self.uid] + if self.aggregation_weight: + message += ['agg weight: ' + self.aggregation_weight] + for csuff, fname in zip(self.contrasts, self.ims_filenames): + message += [((csuff + ': ') if csuff else '') + '"' + fname + '"'] + if self.msk_filename: + message += ['mask: ' + self.msk_filename] + return ', '.join(message) + + def cache_local(self): + from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel + contrasts = self.contrasts + for cid, csuff in enumerate(contrasts): + if not os.path.isdir('input' + csuff): + path.make_dir('input' + csuff) + run.command('mrconvert ' + self.ims_path[cid] + ' ' + os.path.join('input' + csuff, self.uid + '.mif')) + self._local_ims = [os.path.join('input' + csuff, self.uid + '.mif') for csuff in contrasts] + if self.msk_filename: + if not os.path.isdir('mask'): + path.make_dir('mask') + run.command('mrconvert ' + self.msk_path + ' ' + os.path.join('mask', self.uid + '.mif')) + self._local_msk = os.path.join('mask', self.uid + '.mif') + + def get_ims_path(self, quoted=True): + """ return path to input images """ + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + if self._local_ims: + return self._local_ims + return [path.from_user(abspath(d, f), quoted) for d, f in zip(self._im_directories, self.ims_filenames)] + ims_path = property(get_ims_path) + + def get_msk_path(self, quoted=True): + """ return path to input mask """ + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + if self._local_msk: + return self._local_msk + return path.from_user(os.path.join(self._msk_directory, self.msk_filename), quoted) if self.msk_filename else None + msk_path = property(get_msk_path) diff --git a/python/mrtrix3/population_template/usage.py b/python/mrtrix3/population_template/usage.py new file mode 100644 index 0000000000..b7989b7151 --- /dev/null +++ b/python/mrtrix3/population_template/usage.py @@ -0,0 +1,70 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)') + + cmdline.set_synopsis('Generates an unbiased group-average template from a series of images') + cmdline.add_description('First a template is optimised with linear registration (rigid and/or affine, both by default), then non-linear registration is used to optimise the template further.') + cmdline.add_argument('input_dir', nargs='+', help='Directory containing all input images of a given contrast') + cmdline.add_argument('template', help='Output template image') + + cmdline.add_example_usage('Multi-contrast registration', + 'population_template input_WM_ODFs/ output_WM_template.mif input_GM_ODFs/ output_GM_template.mif', + 'When performing multi-contrast registration, the input directory and corresponding output template ' + 'image for a given contrast are to be provided as a pair, ' + 'with the pairs corresponding to different contrasts provided sequentially.') + + options = cmdline.add_argument_group('Multi-contrast options') + options.add_argument('-mc_weight_initial_alignment', help='Weight contribution of each contrast to the initial alignment. Comma separated, default: 1.0') + options.add_argument('-mc_weight_rigid', help='Weight contribution of each contrast to the objective of rigid registration. Comma separated, default: 1.0') + options.add_argument('-mc_weight_affine', help='Weight contribution of each contrast to the objective of affine registration. Comma separated, default: 1.0') + options.add_argument('-mc_weight_nl', help='Weight contribution of each contrast to the objective of nonlinear registration. Comma separated, default: 1.0') + + linoptions = cmdline.add_argument_group('Options for the linear registration') + linoptions.add_argument('-linear_no_pause', action='store_true', help='Do not pause the script if a linear registration seems implausible') + linoptions.add_argument('-linear_no_drift_correction', action='store_true', help='Deactivate correction of template appearance (scale and shear) over iterations') + linoptions.add_argument('-linear_estimator', help='Specify estimator for intensity difference metric. Valid choices are: l1 (least absolute: |x|), l2 (ordinary least squares), lp (least powers: |x|^1.2), Default: None (no robust estimator used)') + linoptions.add_argument('-rigid_scale', help='Specify the multi-resolution pyramid used to build the rigid template, in the form of a list of scale factors (default: %s). This and affine_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_RIGID_SCALES])) + linoptions.add_argument('-rigid_lmax', help='Specify the lmax used for rigid registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_RIGID_LMAX])) + linoptions.add_argument('-rigid_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:50 for each scale). This must be a single number or a list of same length as the linear_scale factor list') + linoptions.add_argument('-affine_scale', help='Specify the multi-resolution pyramid used to build the affine template, in the form of a list of scale factors (default: %s). This and rigid_scale implicitly define the number of template levels' % ','.join([str(x) for x in DEFAULT_AFFINE_SCALES])) + linoptions.add_argument('-affine_lmax', help='Specify the lmax used for affine registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the linear_scale factor list' % ','.join([str(x) for x in DEFAULT_AFFINE_LMAX])) + linoptions.add_argument('-affine_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default:500 for each scale). This must be a single number or a list of same length as the linear_scale factor list') + + nloptions = cmdline.add_argument_group('Options for the non-linear registration') + nloptions.add_argument('-nl_scale', help='Specify the multi-resolution pyramid used to build the non-linear template, in the form of a list of scale factors (default: %s). This implicitly defines the number of template levels' % ','.join([str(x) for x in DEFAULT_NL_SCALES])) + nloptions.add_argument('-nl_lmax', help='Specify the lmax used for non-linear registration for each scale factor, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_LMAX])) + nloptions.add_argument('-nl_niter', help='Specify the number of registration iterations used within each level before updating the template, in the form of a list of integers (default: %s). The list must be the same length as the nl_scale factor list' % ','.join([str(x) for x in DEFAULT_NL_NITER])) + nloptions.add_argument('-nl_update_smooth', default='2.0', help='Regularise the gradient update field with Gaussian smoothing (standard deviation in voxel units, Default 2.0 x voxel_size)') + nloptions.add_argument('-nl_disp_smooth', default='1.0', help='Regularise the displacement field with Gaussian smoothing (standard deviation in voxel units, Default 1.0 x voxel_size)') + nloptions.add_argument('-nl_grad_step', default='0.5', help='The gradient step size for non-linear registration (Default: 0.5)') + + options = cmdline.add_argument_group('Input, output and general options') + options.add_argument('-type', help='Specify the types of registration stages to perform. Options are "rigid" (perform rigid registration only which might be useful for intra-subject registration in longitudinal analysis), "affine" (perform affine registration) and "nonlinear" as well as cominations of registration types: %s. Default: rigid_affine_nonlinear' % ', '.join('"' + x + '"' for x in REGISTRATION_MODES if "_" in x), default='rigid_affine_nonlinear') + options.add_argument('-voxel_size', help='Define the template voxel size in mm. Use either a single value for isotropic voxels or 3 comma separated values.') + options.add_argument('-initial_alignment', default='mass', help='Method of alignment to form the initial template. Options are "mass" (default), "robust_mass" (requires masks), "geometric" and "none".') + options.add_argument('-mask_dir', help='Optionally input a set of masks inside a single directory, one per input image (with the same file name prefix). Using masks will speed up registration significantly. Note that masks are used for registration, not for aggregation. To exclude areas from aggregation, NaN-mask your input images.') + options.add_argument('-warp_dir', help='Output a directory containing warps from each input to the template. If the folder does not exist it will be created') + options.add_argument('-transformed_dir', help='Output a directory containing the input images transformed to the template. If the folder does not exist it will be created. For multi-contrast registration, provide comma separated list of directories.') + options.add_argument('-linear_transformations_dir', help='Output a directory containing the linear transformations used to generate the template. If the folder does not exist it will be created') + options.add_argument('-template_mask', help='Output a template mask. Only works if -mask_dir has been input. The template mask is computed as the intersection of all subject masks in template space.') + options.add_argument('-noreorientation', action='store_true', help='Turn off FOD reorientation in mrregister. Reorientation is on by default if the number of volumes in the 4th dimension corresponds to the number of coefficients in an antipodally symmetric spherical harmonic series (i.e. 6, 15, 28, 45, 66 etc)') + options.add_argument('-leave_one_out', help='Register each input image to a template that does not contain that image. Valid choices: 0, 1, auto. (Default: auto (true if n_subjects larger than 2 and smaller than 15)) ') + options.add_argument('-aggregate', help='Measure used to aggregate information from transformed images to the template image. Valid choices: %s. Default: mean' % ', '.join(AGGREGATION_MODES)) + options.add_argument('-aggregation_weights', help='Comma separated file containing weights used for weighted image aggregation. Each row must contain the identifiers of the input image and its weight. Note that this weighs intensity values not transformations (shape).') + options.add_argument('-nanmask', action='store_true', help='Optionally apply masks to (transformed) input images using NaN values to specify include areas for registration and aggregation. Only works if -mask_dir has been input.') + options.add_argument('-copy_input', action='store_true', help='Copy input images and masks into local scratch directory.') + options.add_argument('-delete_temporary_files', action='store_true', help='Delete temporary files from scratch directory during template creation.') diff --git a/python/mrtrix3/population_template/utils.py b/python/mrtrix3/population_template/utils.py new file mode 100644 index 0000000000..309be8d29e --- /dev/null +++ b/python/mrtrix3/population_template/utils.py @@ -0,0 +1,295 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, os, re, shutil, sys +from . import IMAGEEXT +from . import Input + +def abspath(arg, *args): + return os.path.abspath(os.path.join(arg, *args)) + + +def relpath(arg, *args): + from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel + return os.path.relpath(os.path.join(arg, *args), app.WORKING_DIR) + + +def copy(src, dst, follow_symlinks=True): + """Copy data but do not set mode bits. Return the file's destination. + + mimics shutil.copy but without setting mode bits as shutil.copymode can fail on exotic mounts + (observed on cifs with file_mode=0777). + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + if sys.version_info[0] > 2: + shutil.copyfile(src, dst, follow_symlinks=follow_symlinks) # pylint: disable=unexpected-keyword-arg + else: + shutil.copyfile(src, dst) + return dst + + +def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): + from mrtrix3 import app, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel + if max_rot is None: + max_rot = 2 * math.pi + + good = True + run.command('transformcalc ' + transformation + ' decompose ' + transformation + 'decomp') + if not os.path.isfile(transformation + 'decomp'): # does not exist if run with -continue option + app.console(transformation + 'decomp not found. skipping check') + return True + data = utils.load_keyval(transformation + 'decomp') + run.function(os.remove, transformation + 'decomp') + scaling = [float(value) for value in data['scaling']] + if any(a < 0 for a in scaling) or any(a > (1 + max_scaling) for a in scaling) or any( + a < (1 - max_scaling) for a in scaling): + app.warn("large scaling (" + str(scaling) + ") in " + transformation) + good = False + shear = [float(value) for value in data['shear']] + if any(abs(a) > max_shear for a in shear): + app.warn("large shear (" + str(shear) + ") in " + transformation) + good = False + rot_angle = float(data['angle_axis'][0]) + if abs(rot_angle) > max_rot: + app.warn("large rotation (" + str(rot_angle) + ") in " + transformation) + good = False + + if not good: + newcmd = [] + what = '' + init_rotation_found = False + skip = 0 + for element in cmd.split(): + if skip: + skip -= 1 + continue + if '_init_rotation' in element: + init_rotation_found = True + if '_init_matrix' in element: + skip = 1 + continue + if 'affine_scale' in element: + assert what != 'rigid' + what = 'affine' + elif 'rigid_scale' in element: + assert what != 'affine' + what = 'rigid' + newcmd.append(element) + newcmd = " ".join(newcmd) + if not init_rotation_found: + app.console("replacing the transformation obtained with:") + app.console(cmd) + if what: + newcmd += ' -' + what + '_init_translation mass -' + what + '_init_rotation search' + app.console("by the one obtained with:") + app.console(newcmd) + run.command(newcmd, force=True) + return check_linear_transformation(transformation, newcmd, max_scaling, max_shear, max_rot, pause_on_warn=pause_on_warn) + if pause_on_warn: + app.warn("you might want to manually repeat mrregister with different parameters and overwrite the transformation file: \n%s" % transformation) + app.console('The command that failed the test was: \n' + cmd) + app.console('Working directory: \n' + os.getcwd()) + input("press enter to continue population_template") + return good + + +def aggregate(inputs, output, contrast_idx, mode, force=True): + from mrtrix3 import MRtrixError, run # pylint: disable=no-name-in-module, import-outside-toplevel + + images = [inp.ims_transformed[contrast_idx] for inp in inputs] + if mode == 'mean': + run.command(['mrmath', images, 'mean', '-keep_unary_axes', output], force=force) + elif mode == 'median': + run.command(['mrmath', images, 'median', '-keep_unary_axes', output], force=force) + elif mode == 'weighted_mean': + weights = [inp.aggregation_weight for inp in inputs] + assert not any(w is None for w in weights), weights + wsum = sum(float(w) for w in weights) + cmd = ['mrcalc'] + if wsum <= 0: + raise MRtrixError("the sum of aggregetion weights has to be positive") + for weight, image in zip(weights, images): + if float(weight) != 0: + cmd += [image, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) + cmd += ['%.16f' % wsum, '-div', output] + run.command(cmd, force=force) + else: + raise MRtrixError("aggregation mode %s not understood" % mode) + + +def inplace_nan_mask(images, masks): + from mrtrix3 import run # pylint: disable=no-name-in-module, import-outside-toplevel + assert len(images) == len(masks), (len(images), len(masks)) + for image, mask in zip(images, masks): + target_dir = os.path.split(image)[0] + masked = os.path.join(target_dir, '__' + os.path.split(image)[1]) + run.command("mrcalc " + mask + " " + image + " nan -if " + masked, force=True) + run.function(shutil.move, masked, image) + + +def calculate_isfinite(inputs, contrasts): + from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel + agg_weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] + for cid in range(contrasts.n_contrasts): + for inp in inputs: + if contrasts.n_volumes[cid] > 0: + cmd = 'mrconvert ' + inp.ims_transformed[cid] + ' -coord 3 0 - | mrcalc - -finite' + else: + cmd = 'mrcalc ' + inp.ims_transformed[cid] + ' -finite' + if inp.aggregation_weight: + cmd += ' %s -mult ' % inp.aggregation_weight + cmd += ' isfinite%s/%s.mif' % (contrasts.suff[cid], inp.uid) + run.command(cmd, force=True) + for cid in range(contrasts.n_contrasts): + cmd = ['mrmath', path.all_in_dir('isfinite%s' % contrasts.suff[cid]), 'sum'] + if agg_weights: + agg_weight_norm = str(float(len(agg_weights)) / sum(agg_weights)) + cmd += ['-', '|', 'mrcalc', '-', agg_weight_norm, '-mult'] + run.command(cmd + [contrasts.isfinite_count[cid]], force=True) + + +def get_common_postfix(file_list): + return os.path.commonprefix([i[::-1] for i in file_list])[::-1] + + +def get_common_prefix(file_list): + return os.path.commonprefix(file_list) + + + +def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whitespace_repl='_'): + """ + matches input images across contrasts and pair them with masks. + extracts unique identifiers from mask and image filenames by stripping common pre and postfix (per contrast and for masks) + unique identifiers contain ASCII letters, numbers and '_' but no whitespace which is replaced by whitespace_repl + + in_files: list of lists + the inner list holds filenames specific to a contrast + + mask_files: + can be empty + + returns list of Input + + checks: 3d_nonunity + TODO check if no common grid & trafo across contrasts (only relevant for robust init?) + + """ + from mrtrix3 import MRtrixError, app, path, image # pylint: disable=no-name-in-module, import-outside-toplevel + contrasts = contrasts.suff + inputs = [] + def paths_to_file_uids(paths, prefix, postfix): + """ strip pre and postfix from filename, replace whitespace characters """ + uid_path = {} + uids = [] + for path in paths: + uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(path)[1])) + uid = re.sub(r'\s+', whitespace_repl, uid) + if not uid: + raise MRtrixError('No uniquely identifiable part of filename "' + path + '" ' + 'after prefix and postfix substitution ' + 'with prefix "' + prefix + '" and postfix "' + postfix + '"') + app.debug('UID mapping: "' + path + '" --> "' + uid + '"') + if uid in uid_path: + raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + path + '" and "' + uid_path[uid] +'"') + uid_path[uid] = path + uids.append(uid) + return uids + + # mask uids + mask_uids = [] + if mask_files: + mask_common_postfix = get_common_postfix(mask_files) + if not mask_common_postfix: + raise MRtrixError('mask filenames do not have a common postfix') + mask_common_prefix = get_common_prefix([os.path.split(m)[1] for m in mask_files]) + mask_uids = paths_to_file_uids(mask_files, mask_common_prefix, mask_common_postfix) + if app.VERBOSITY > 1: + app.console('mask uids:' + str(mask_uids)) + + # images uids + common_postfix = [get_common_postfix(files) for files in in_files] + common_prefix = [get_common_prefix(files) for files in in_files] + # xcontrast_xsubject_pre_postfix: prefix and postfix of the common part across contrasts and subjects, + # without image extensions and leading or trailing '_' or '-' + xcontrast_xsubject_pre_postfix = [get_common_postfix(common_prefix).lstrip('_-'), + get_common_prefix([re.sub('.('+'|'.join(IMAGEEXT)+')(.gz)?$', '', pfix).rstrip('_-') for pfix in common_postfix])] + if app.VERBOSITY > 1: + app.console("common_postfix: " + str(common_postfix)) + app.console("common_prefix: " + str(common_prefix)) + app.console("xcontrast_xsubject_pre_postfix: " + str(xcontrast_xsubject_pre_postfix)) + for ipostfix, postfix in enumerate(common_postfix): + if not postfix: + raise MRtrixError('image filenames do not have a common postfix:\n' + '\n'.join(in_files[ipostfix])) + + c_uids = [] + for cid, files in enumerate(in_files): + c_uids.append(paths_to_file_uids(files, common_prefix[cid], common_postfix[cid])) + + if app.VERBOSITY > 1: + app.console('uids by contrast:' + str(c_uids)) + + # join images and masks + for ifile, fname in enumerate(in_files[0]): + uid = c_uids[0][ifile] + fnames = [fname] + dirs = [abspath(path.from_user(app.ARGS.input_dir[0], False))] + if len(contrasts) > 1: + for cid in range(1, len(contrasts)): + dirs.append(abspath(path.from_user(app.ARGS.input_dir[cid], False))) + image.check_3d_nonunity(os.path.join(dirs[cid], in_files[cid][ifile])) + if uid != c_uids[cid][ifile]: + raise MRtrixError('no matching image was found for image %s and contrasts %s and %s.' % (fname, dirs[0], dirs[cid])) + fnames.append(in_files[cid][ifile]) + + if mask_files: + if uid not in mask_uids: + raise MRtrixError('no matching mask image was found for input image ' + fname + ' with uid "'+uid+'". ' + 'Mask uid candidates: ' + ', '.join(['"%s"' % m for m in mask_uids])) + index = mask_uids.index(uid) + # uid, filenames, directories, contrasts, mask_filename = '', mask_directory = '', agg_weight = None + inputs.append(Input(uid, fnames, dirs, contrasts, + mask_filename=mask_files[index], mask_directory=abspath(path.from_user(app.ARGS.mask_dir, False)))) + else: + inputs.append(Input(uid, fnames, dirs, contrasts)) + + # parse aggregation weights and match to inputs + if f_agg_weight: + import csv # pylint: disable=import-outside-toplevel + try: + with open(f_agg_weight, 'r', encoding='utf-8') as fweights: + agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in csv.reader(fweights, delimiter=',', quotechar='#')) + except UnicodeDecodeError: + with open(f_agg_weight, 'r', encoding='utf-8') as fweights: + reader = csv.reader(fweights.read().decode('utf-8', errors='replace'), delimiter=',', quotechar='#') + agg_weights = dict((row[0].lstrip().rstrip(), row[1]) for row in reader) + pref = '^' + re.escape(get_common_prefix(list(agg_weights.keys()))) + suff = re.escape(get_common_postfix(list(agg_weights.keys()))) + '$' + for key in agg_weights.keys(): + agg_weights[re.sub(suff, '', re.sub(pref, '', key))] = agg_weights.pop(key).strip() + + for inp in inputs: + if inp.uid not in agg_weights: + raise MRtrixError('aggregation weight not found for %s' % inp.uid) + inp.aggregation_weight = agg_weights[inp.uid] + app.console('Using aggregation weights ' + f_agg_weight) + weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] + if sum(weights) <= 0: + raise MRtrixError('Sum of aggregation weights is not positive: ' + str(weights)) + if any(w < 0 for w in weights): + app.warn('Negative aggregation weights: ' + str(weights)) + + return inputs, xcontrast_xsubject_pre_postfix \ No newline at end of file diff --git a/python/mrtrix3/responsemean/__init__.py b/python/mrtrix3/responsemean/__init__.py index a3fb17e43d..e69de29bb2 100644 --- a/python/mrtrix3/responsemean/__init__.py +++ b/python/mrtrix3/responsemean/__init__.py @@ -1,76 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - - -import math, os, sys - - - -def usage(cmdline): #pylint: disable=unused-variable - cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') - cmdline.set_synopsis('Calculate the mean response function from a set of text files') - cmdline.add_description('Example usage: ' + os.path.basename(sys.argv[0]) + ' input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt') - cmdline.add_description('All response function files provided must contain the same number of unique b-values (lines), as well as the same number of coefficients per line.') - cmdline.add_description('As long as the number of unique b-values is identical across all input files, the coefficients will be averaged. This is performed on the assumption that the actual acquired b-values are identical. This is however impossible for the ' + os.path.basename(sys.argv[0]) + ' command to determine based on the data provided; it is therefore up to the user to ensure that this requirement is satisfied.') - cmdline.add_argument('inputs', help='The input response functions', nargs='+') - cmdline.add_argument('output', help='The output mean response function') - cmdline.add_argument('-legacy', action='store_true', help='Use the legacy behaviour of former command \'average_response\': average response function coefficients directly, without compensating for global magnitude differences between input files') - - - -def execute(): #pylint: disable=unused-variable - from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module, import-outside-toplevel - from mrtrix3 import app, matrix #pylint: disable=no-name-in-module, import-outside-toplevel - - app.check_output_path(app.ARGS.output) - - data = [ ] # 3D matrix: Subject, b-value, ZSH coefficient - for filepath in app.ARGS.inputs: - subject = matrix.load_matrix(filepath) - if any(len(line) != len(subject[0]) for line in subject[1:]): - raise MRtrixError('File \'' + filepath + '\' does not contain the same number of entries per line (multi-shell response functions must have the same number of coefficients per b-value; pad the data with zeroes if necessary)') - if data: - if len(subject) != len(data[0]): - raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject)) + ' b-value' + ('s' if len(subject) > 1 else '') + ' (line' + ('s' if len(subject) > 1 else '') + '); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0])) + ' line' + ('s' if len(data[0]) > 1 else '')) - if len(subject[0]) != len(data[0][0]): - raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject[0])) + ' coefficient' + ('s' if len(subject[0]) > 1 else '') + ' per b-value (line); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0][0])) + ' coefficient' + ('s' if len(data[0][0]) > 1 else '') + ' per line') - data.append(subject) - - app.console('Calculating mean RF across ' + str(len(data)) + ' inputs, with ' + str(len(data[0])) + ' b-value' + ('s' if len(data[0])>1 else '') + ' and lmax=' + str(2*(len(data[0][0])-1))) - - # Old approach: Just take the average across all subjects - # New approach: Calculate a multiplier to use for each subject, based on the geometric mean - # scaling factor required to bring the subject toward the group mean l=0 terms (across shells) - - mean_lzero_terms = [ sum(subject[row][0] for subject in data)/len(data) for row in range(len(data[0])) ] - app.debug('Mean l=0 terms: ' + str(mean_lzero_terms)) - - weighted_sum_coeffs = [[0.0] * len(data[0][0]) for _ in range(len(data[0]))] #pylint: disable=unused-variable - for subject in data: - if app.ARGS.legacy: - multiplier = 1.0 - else: - subj_lzero_terms = [line[0] for line in subject] - log_multiplier = 0.0 - for subj_lzero, mean_lzero in zip(subj_lzero_terms, mean_lzero_terms): - log_multiplier += math.log(mean_lzero / subj_lzero) - log_multiplier /= len(data[0]) - multiplier = math.exp(log_multiplier) - app.debug('Subject l=0 terms: ' + str(subj_lzero_terms)) - app.debug('Resulting multipler: ' + str(multiplier)) - weighted_sum_coeffs = [ [ a + multiplier*b for a, b in zip(linea, lineb) ] for linea, lineb in zip(weighted_sum_coeffs, subject) ] - - mean_coeffs = [ [ f/len(data) for f in line ] for line in weighted_sum_coeffs ] - matrix.save_matrix(app.ARGS.output, mean_coeffs, force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/responsemean/execute.py b/python/mrtrix3/responsemean/execute.py new file mode 100644 index 0000000000..5a7cd251ba --- /dev/null +++ b/python/mrtrix3/responsemean/execute.py @@ -0,0 +1,61 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import math, sys +from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module +from mrtrix3 import app, matrix #pylint: disable=no-name-in-module + +def execute(): #pylint: disable=unused-variable + + app.check_output_path(app.ARGS.output) + + data = [ ] # 3D matrix: Subject, b-value, ZSH coefficient + for filepath in app.ARGS.inputs: + subject = matrix.load_matrix(filepath) + if any(len(line) != len(subject[0]) for line in subject[1:]): + raise MRtrixError('File \'' + filepath + '\' does not contain the same number of entries per line (multi-shell response functions must have the same number of coefficients per b-value; pad the data with zeroes if necessary)') + if data: + if len(subject) != len(data[0]): + raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject)) + ' b-value' + ('s' if len(subject) > 1 else '') + ' (line' + ('s' if len(subject) > 1 else '') + '); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0])) + ' line' + ('s' if len(data[0]) > 1 else '')) + if len(subject[0]) != len(data[0][0]): + raise MRtrixError('File \'' + filepath + '\' contains ' + str(len(subject[0])) + ' coefficient' + ('s' if len(subject[0]) > 1 else '') + ' per b-value (line); this differs from the first file read (' + sys.argv[1] + '), which contains ' + str(len(data[0][0])) + ' coefficient' + ('s' if len(data[0][0]) > 1 else '') + ' per line') + data.append(subject) + + app.console('Calculating mean RF across ' + str(len(data)) + ' inputs, with ' + str(len(data[0])) + ' b-value' + ('s' if len(data[0])>1 else '') + ' and lmax=' + str(2*(len(data[0][0])-1))) + + # Old approach: Just take the average across all subjects + # New approach: Calculate a multiplier to use for each subject, based on the geometric mean + # scaling factor required to bring the subject toward the group mean l=0 terms (across shells) + + mean_lzero_terms = [ sum(subject[row][0] for subject in data)/len(data) for row in range(len(data[0])) ] + app.debug('Mean l=0 terms: ' + str(mean_lzero_terms)) + + weighted_sum_coeffs = [[0.0] * len(data[0][0]) for _ in range(len(data[0]))] #pylint: disable=unused-variable + for subject in data: + if app.ARGS.legacy: + multiplier = 1.0 + else: + subj_lzero_terms = [line[0] for line in subject] + log_multiplier = 0.0 + for subj_lzero, mean_lzero in zip(subj_lzero_terms, mean_lzero_terms): + log_multiplier += math.log(mean_lzero / subj_lzero) + log_multiplier /= len(data[0]) + multiplier = math.exp(log_multiplier) + app.debug('Subject l=0 terms: ' + str(subj_lzero_terms)) + app.debug('Resulting multipler: ' + str(multiplier)) + weighted_sum_coeffs = [ [ a + multiplier*b for a, b in zip(linea, lineb) ] for linea, lineb in zip(weighted_sum_coeffs, subject) ] + + mean_coeffs = [ [ f/len(data) for f in line ] for line in weighted_sum_coeffs ] + matrix.save_matrix(app.ARGS.output, mean_coeffs, force=app.FORCE_OVERWRITE) diff --git a/python/mrtrix3/responsemean/usage.py b/python/mrtrix3/responsemean/usage.py new file mode 100644 index 0000000000..0ea18d0706 --- /dev/null +++ b/python/mrtrix3/responsemean/usage.py @@ -0,0 +1,26 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +import os, sys + +def usage(cmdline): #pylint: disable=unused-variable + cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') + cmdline.set_synopsis('Calculate the mean response function from a set of text files') + cmdline.add_description('Example usage: ' + os.path.basename(sys.argv[0]) + ' input_response1.txt input_response2.txt input_response3.txt ... output_average_response.txt') + cmdline.add_description('All response function files provided must contain the same number of unique b-values (lines), as well as the same number of coefficients per line.') + cmdline.add_description('As long as the number of unique b-values is identical across all input files, the coefficients will be averaged. This is performed on the assumption that the actual acquired b-values are identical. This is however impossible for the ' + os.path.basename(sys.argv[0]) + ' command to determine based on the data provided; it is therefore up to the user to ensure that this requirement is satisfied.') + cmdline.add_argument('inputs', help='The input response functions', nargs='+') + cmdline.add_argument('output', help='The output mean response function') + cmdline.add_argument('-legacy', action='store_true', help='Use the legacy behaviour of former command \'average_response\': average response function coefficients directly, without compensating for global magnitude differences between input files') From ed2462b5a04e641f8f6753c74fd56aeee98f4c60 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 7 Mar 2024 00:49:15 +1100 Subject: [PATCH 08/11] Python: Initial pylint fixes after filesystem rearrangement --- python/mrtrix3/5ttgen/execute.py | 2 +- python/mrtrix3/5ttgen/freesurfer/execute.py | 2 +- python/mrtrix3/5ttgen/hsvs/__init__.py | 3 -- python/mrtrix3/5ttgen/hsvs/execute.py | 9 ++-- python/mrtrix3/5ttgen/hsvs/usage.py | 2 + python/mrtrix3/5ttgen/usage.py | 3 +- python/mrtrix3/__init__.py | 2 +- python/mrtrix3/algorithm.py | 3 +- python/mrtrix3/dwi2mask/execute.py | 4 +- python/mrtrix3/dwi2mask/usage.py | 2 +- python/mrtrix3/dwi2response/execute.py | 4 +- python/mrtrix3/dwi2response/usage.py | 2 +- python/mrtrix3/dwibiascorrect/execute.py | 4 +- python/mrtrix3/dwibiascorrect/usage.py | 2 +- python/mrtrix3/dwibiasnormmask/__init__.py | 1 + python/mrtrix3/dwibiasnormmask/execute.py | 4 +- python/mrtrix3/dwibiasnormmask/usage.py | 2 +- python/mrtrix3/dwicat/execute.py | 4 +- python/mrtrix3/dwifslpreproc/execute.py | 4 +- python/mrtrix3/dwifslpreproc/usage.py | 2 +- python/mrtrix3/dwigradcheck/execute.py | 4 +- python/mrtrix3/dwigradcheck/usage.py | 2 +- python/mrtrix3/dwinormalise/execute.py | 2 +- python/mrtrix3/dwinormalise/usage.py | 2 +- python/mrtrix3/dwishellmath/__init__.py | 1 + python/mrtrix3/dwishellmath/execute.py | 4 +- python/mrtrix3/dwishellmath/usage.py | 2 +- python/mrtrix3/for_each/__init__.py | 2 + python/mrtrix3/for_each/entry.py | 2 +- python/mrtrix3/for_each/execute.py | 7 +-- python/mrtrix3/for_each/shared.py | 2 +- python/mrtrix3/for_each/usage.py | 2 +- python/mrtrix3/labelsgmfix/execute.py | 6 +-- python/mrtrix3/mask2glass/execute.py | 3 +- python/mrtrix3/mrtrix_cleanup/execute.py | 4 +- python/mrtrix3/path.py | 2 +- .../mrtrix3/population_template/__init__.py | 2 + .../mrtrix3/population_template/contrasts.py | 4 +- python/mrtrix3/population_template/execute.py | 12 ++--- python/mrtrix3/population_template/input.py | 2 +- python/mrtrix3/population_template/usage.py | 5 ++ python/mrtrix3/population_template/utils.py | 50 +++++++++---------- python/mrtrix3/responsemean/execute.py | 4 +- run_pylint | 2 +- 44 files changed, 100 insertions(+), 89 deletions(-) diff --git a/python/mrtrix3/5ttgen/execute.py b/python/mrtrix3/5ttgen/execute.py index 6d3040a25e..bed0339cf2 100644 --- a/python/mrtrix3/5ttgen/execute.py +++ b/python/mrtrix3/5ttgen/execute.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, run #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/5ttgen/freesurfer/execute.py b/python/mrtrix3/5ttgen/freesurfer/execute.py index 83dec77dbb..b58e6fd45b 100644 --- a/python/mrtrix3/5ttgen/freesurfer/execute.py +++ b/python/mrtrix3/5ttgen/freesurfer/execute.py @@ -31,7 +31,7 @@ def execute(): #pylint: disable=unused-variable lut_output_file_name = 'FreeSurfer2ACT_sgm_amyg_hipp.txt' else: lut_output_file_name = 'FreeSurfer2ACT.txt' - lut_output_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), lut_output_file_name) + lut_output_path = os.path.join(path.shared_data_path(), '5ttgen', lut_output_file_name) if not os.path.isfile(lut_output_path): raise MRtrixError('Could not find lookup table file for converting FreeSurfer parcellation output to tissues (expected location: ' + lut_output_path + ')') diff --git a/python/mrtrix3/5ttgen/hsvs/__init__.py b/python/mrtrix3/5ttgen/hsvs/__init__.py index c892ffc712..35392e0ed5 100644 --- a/python/mrtrix3/5ttgen/hsvs/__init__.py +++ b/python/mrtrix3/5ttgen/hsvs/__init__.py @@ -13,9 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -import os -from mrtrix3 import MRtrixError - HIPPOCAMPI_CHOICES = [ 'subfields', 'first', 'aseg' ] THALAMI_CHOICES = [ 'nuclei', 'first', 'aseg' ] diff --git a/python/mrtrix3/5ttgen/hsvs/execute.py b/python/mrtrix3/5ttgen/hsvs/execute.py index 336499fd1b..84a60a56dc 100644 --- a/python/mrtrix3/5ttgen/hsvs/execute.py +++ b/python/mrtrix3/5ttgen/hsvs/execute.py @@ -16,7 +16,10 @@ import glob, os, re, shutil from mrtrix3 import MRtrixError from mrtrix3 import app, fsl, image, path, run - +from . import ATTEMPT_PC +from . import ASEG_STRUCTURES, AMYG_ASEG, HIPP_ASEG, THAL_ASEG, OTHER_SGM_ASEG +from . import BRAIN_STEM_ASEG, CEREBELLUM_ASEG, CORPUS_CALLOSUM_ASEG, VENTRICLE_CP_ASEG +from . import SGM_FIRST_MAP def check_file(filepath): @@ -167,10 +170,10 @@ def execute(): #pylint: disable=unused-variable raise MRtrixError('FREESURFER_HOME environment variable not set; required for use of hippocampal subfields module') freesurfer_lut_file = os.path.join(os.environ['FREESURFER_HOME'], 'FreeSurferColorLUT.txt') check_file(freesurfer_lut_file) - hipp_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'HippSubfields.txt') + hipp_lut_file = os.path.join(path.shared_data_path(), '5ttgen', 'hsvs', 'HippSubfields.txt') check_file(hipp_lut_file) if hipp_subfield_has_amyg: - amyg_lut_file = os.path.join(path.shared_data_path(), path.script_subdir_name(), 'hsvs', 'AmygSubfields.txt') + amyg_lut_file = os.path.join(path.shared_data_path(), '5ttgen', 'hsvs', 'AmygSubfields.txt') check_file(amyg_lut_file) if app.ARGS.sgm_amyg_hipp: diff --git a/python/mrtrix3/5ttgen/hsvs/usage.py b/python/mrtrix3/5ttgen/hsvs/usage.py index 73077724cb..b1a5b00113 100644 --- a/python/mrtrix3/5ttgen/hsvs/usage.py +++ b/python/mrtrix3/5ttgen/hsvs/usage.py @@ -13,6 +13,8 @@ # # For more details, see http://www.mrtrix.org/. +from . import HIPPOCAMPI_CHOICES, THALAMI_CHOICES + def usage(base_parser, subparsers): #pylint: disable=unused-variable parser = subparsers.add_parser('hsvs', parents=[base_parser]) parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/5ttgen/usage.py b/python/mrtrix3/5ttgen/usage.py index e0c37a7d66..66c8988790 100644 --- a/python/mrtrix3/5ttgen/usage.py +++ b/python/mrtrix3/5ttgen/usage.py @@ -13,7 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm #pylint: disable=no-name-in-module +from mrtrix3 import algorithm + def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/__init__.py b/python/mrtrix3/__init__.py index 12dc5ab0b8..2f997be509 100644 --- a/python/mrtrix3/__init__.py +++ b/python/mrtrix3/__init__.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import inspect, os, shlex, sys +import os, shlex, sys from collections import namedtuple from mrtrix3._version import __version__ diff --git a/python/mrtrix3/algorithm.py b/python/mrtrix3/algorithm.py index c21623b5fb..b671b513b2 100644 --- a/python/mrtrix3/algorithm.py +++ b/python/mrtrix3/algorithm.py @@ -19,6 +19,7 @@ import importlib, inspect, pkgutil, sys +from mrtrix3 import app @@ -26,7 +27,6 @@ # options common to all algorithms of a particular script to be applicable once any particular sub-parser # is invoked. Therefore this function must be called _after_ all such options are set up. def usage(cmdline): #pylint: disable=unused-variable - from mrtrix3 import app #pylint: disable=import-outside-toplevel module_name = inspect.currentframe().f_back.f_globals["__name__"] submodules = [submodule_info.name for submodule_info in pkgutil.walk_packages(sys.modules[module_name].__spec__.submodule_search_locations)] base_parser = app.Parser(description='Base parser for construction of subparsers', parents=[cmdline]) @@ -37,7 +37,6 @@ def usage(cmdline): #pylint: disable=unused-variable for submodule in submodules: module = importlib.import_module(module_name + '.' + submodule) module.usage(base_parser, subparsers) - return diff --git a/python/mrtrix3/dwi2mask/execute.py b/python/mrtrix3/dwi2mask/execute.py index 51b303057a..32ace93b99 100644 --- a/python/mrtrix3/dwi2mask/execute.py +++ b/python/mrtrix3/dwi2mask/execute.py @@ -13,8 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module +from mrtrix3 import MRtrixError +from mrtrix3 import algorithm, app, image, path, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwi2mask/usage.py b/python/mrtrix3/dwi2mask/usage.py index 56f0782be7..e728138e30 100644 --- a/python/mrtrix3/dwi2mask/usage.py +++ b/python/mrtrix3/dwi2mask/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, _version def usage(cmdline): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwi2response/execute.py b/python/mrtrix3/dwi2response/execute.py index f830e87fe3..76bd4d6ea6 100644 --- a/python/mrtrix3/dwi2response/execute.py +++ b/python/mrtrix3/dwi2response/execute.py @@ -13,8 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import algorithm, app, image, path, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwi2response/usage.py b/python/mrtrix3/dwi2response/usage.py index d556753015..d0d9d9e4e2 100644 --- a/python/mrtrix3/dwi2response/usage.py +++ b/python/mrtrix3/dwi2response/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, _version def usage(cmdline): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwibiascorrect/execute.py b/python/mrtrix3/dwibiascorrect/execute.py index 81671a33c3..ecdd4b17d5 100644 --- a/python/mrtrix3/dwibiascorrect/execute.py +++ b/python/mrtrix3/dwibiascorrect/execute.py @@ -13,8 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import algorithm, app, image, path, run #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import algorithm, app, image, path, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwibiascorrect/usage.py b/python/mrtrix3/dwibiascorrect/usage.py index bb614ac345..30fc6802f2 100644 --- a/python/mrtrix3/dwibiascorrect/usage.py +++ b/python/mrtrix3/dwibiascorrect/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app, _version def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/dwibiasnormmask/__init__.py b/python/mrtrix3/dwibiasnormmask/__init__.py index d94dcc7d90..34ae865d40 100644 --- a/python/mrtrix3/dwibiasnormmask/__init__.py +++ b/python/mrtrix3/dwibiasnormmask/__init__.py @@ -13,6 +13,7 @@ # # For more details, see http://www.mrtrix.org/. +# pylint: disable=unused-variable DWIBIASCORRECT_MAX_ITERS = 2 LMAXES_MULTI = [4,0,0] LMAXES_SINGLE = [4,0] diff --git a/python/mrtrix3/dwibiasnormmask/execute.py b/python/mrtrix3/dwibiasnormmask/execute.py index e3044a4c11..796b9d4060 100644 --- a/python/mrtrix3/dwibiasnormmask/execute.py +++ b/python/mrtrix3/dwibiasnormmask/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import math, os, shutil -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, fsl, image, matrix, path, run #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, fsl, image, matrix, path, run from . import LMAXES_MULTI, LMAXES_SINGLE, MASK_ALGO_DEFAULT, MASK_ALGOS def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwibiasnormmask/usage.py b/python/mrtrix3/dwibiasnormmask/usage.py index 75dc1d2e2e..9dd62fbfec 100644 --- a/python/mrtrix3/dwibiasnormmask/usage.py +++ b/python/mrtrix3/dwibiasnormmask/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app #pylint: disable=no-name-in-module +from mrtrix3 import app from . import DWIBIASCORRECT_MAX_ITERS from . import LMAXES_MULTI diff --git a/python/mrtrix3/dwicat/execute.py b/python/mrtrix3/dwicat/execute.py index eb0d4aec3a..8c0875e7a3 100644 --- a/python/mrtrix3/dwicat/execute.py +++ b/python/mrtrix3/dwicat/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import json, shutil -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, image, path, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwifslpreproc/execute.py b/python/mrtrix3/dwifslpreproc/execute.py index a1e816947a..af045629cb 100644 --- a/python/mrtrix3/dwifslpreproc/execute.py +++ b/python/mrtrix3/dwifslpreproc/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import glob, itertools, json, math, os, shutil, sys, shlex -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, fsl, image, matrix, path, phaseencoding, run, utils def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwifslpreproc/usage.py b/python/mrtrix3/dwifslpreproc/usage.py index 0eecd1e10d..7c95546168 100644 --- a/python/mrtrix3/dwifslpreproc/usage.py +++ b/python/mrtrix3/dwifslpreproc/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app, _version #pylint: disable=no-name-in-module +from mrtrix3 import app, _version def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/dwigradcheck/execute.py b/python/mrtrix3/dwigradcheck/execute.py index 17db3b6350..163063b9e1 100644 --- a/python/mrtrix3/dwigradcheck/execute.py +++ b/python/mrtrix3/dwigradcheck/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import copy, numbers, os, shutil, sys -from mrtrix3 import CONFIG, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG, MRtrixError +from mrtrix3 import app, image, matrix, path, run def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwigradcheck/usage.py b/python/mrtrix3/dwigradcheck/usage.py index 33cbbfe095..d617248a9e 100644 --- a/python/mrtrix3/dwigradcheck/usage.py +++ b/python/mrtrix3/dwigradcheck/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app, _version #pylint: disable=no-name-in-module +from mrtrix3 import app, _version def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/dwinormalise/execute.py b/python/mrtrix3/dwinormalise/execute.py index 7a7326144f..0c346d601d 100644 --- a/python/mrtrix3/dwinormalise/execute.py +++ b/python/mrtrix3/dwinormalise/execute.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app #pylint: disable=no-name-in-module +from mrtrix3 import algorithm, app def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/dwinormalise/usage.py b/python/mrtrix3/dwinormalise/usage.py index 0ec5868778..075db4236b 100644 --- a/python/mrtrix3/dwinormalise/usage.py +++ b/python/mrtrix3/dwinormalise/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm #pylint: disable=no-name-in-module +from mrtrix3 import algorithm def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') diff --git a/python/mrtrix3/dwishellmath/__init__.py b/python/mrtrix3/dwishellmath/__init__.py index 68b1ddd512..d52b3ac1df 100644 --- a/python/mrtrix3/dwishellmath/__init__.py +++ b/python/mrtrix3/dwishellmath/__init__.py @@ -13,4 +13,5 @@ # # For more details, see http://www.mrtrix.org/. +# pylint: disable=unused-variable SUPPORTED_OPS = ['mean', 'median', 'sum', 'product', 'rms', 'norm', 'var', 'std', 'min', 'max', 'absmax', 'magmax'] diff --git a/python/mrtrix3/dwishellmath/execute.py b/python/mrtrix3/dwishellmath/execute.py index c217d46752..23151b2e23 100644 --- a/python/mrtrix3/dwishellmath/execute.py +++ b/python/mrtrix3/dwishellmath/execute.py @@ -13,8 +13,8 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run def execute(): #pylint: disable=unused-variable # check inputs and outputs diff --git a/python/mrtrix3/dwishellmath/usage.py b/python/mrtrix3/dwishellmath/usage.py index dae291576f..6374856846 100644 --- a/python/mrtrix3/dwishellmath/usage.py +++ b/python/mrtrix3/dwishellmath/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel +from mrtrix3 import app from . import SUPPORTED_OPS def usage(cmdline): #pylint: disable=unused-variable diff --git a/python/mrtrix3/for_each/__init__.py b/python/mrtrix3/for_each/__init__.py index 588df56506..71881250b8 100644 --- a/python/mrtrix3/for_each/__init__.py +++ b/python/mrtrix3/for_each/__init__.py @@ -13,6 +13,8 @@ # # For more details, see http://www.mrtrix.org/. +# pylint: disable=unused-variable + # Since we're going to capture everything after the colon character and "hide" it from argparse, # we need to store the contents from there in a global so as for it to be accessible from execute() CMDSPLIT = [ ] diff --git a/python/mrtrix3/for_each/entry.py b/python/mrtrix3/for_each/entry.py index a382d6c2ae..0f2f615c09 100644 --- a/python/mrtrix3/for_each/entry.py +++ b/python/mrtrix3/for_each/entry.py @@ -18,7 +18,7 @@ from mrtrix3 import app from . import CMDSPLIT -class Entry: +class Entry: # pylint: disable=unused-variable def __init__(self, input_text, common_prefix, common_suffix): self.input_text = input_text self.sub_in = input_text diff --git a/python/mrtrix3/for_each/execute.py b/python/mrtrix3/for_each/execute.py index d129981111..8a3baf679e 100644 --- a/python/mrtrix3/for_each/execute.py +++ b/python/mrtrix3/for_each/execute.py @@ -14,10 +14,11 @@ # For more details, see http://www.mrtrix.org/. import os, re, sys, threading -from mrtrix3 import ANSI, MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, run #pylint: disable=no-name-in-module +from mrtrix3 import ANSI, MRtrixError +from mrtrix3 import app, run from . import CMDSPLIT, KEYLIST -from . import Entry, Shared +from .entry import Entry +from .shared import Shared def execute(): #pylint: disable=unused-variable diff --git a/python/mrtrix3/for_each/shared.py b/python/mrtrix3/for_each/shared.py index 6c56f67ca6..3d637b61c3 100644 --- a/python/mrtrix3/for_each/shared.py +++ b/python/mrtrix3/for_each/shared.py @@ -15,7 +15,7 @@ import threading -class Shared: +class Shared: # pylint: disable=unused-variable def __init__(self): self._job_index = 0 self.lock = threading.Lock() diff --git a/python/mrtrix3/for_each/usage.py b/python/mrtrix3/for_each/usage.py index 3017bfe1da..c779dd14b3 100644 --- a/python/mrtrix3/for_each/usage.py +++ b/python/mrtrix3/for_each/usage.py @@ -14,11 +14,11 @@ # For more details, see http://www.mrtrix.org/. import sys +from mrtrix3 import _version from . import CMDSPLIT def usage(cmdline): #pylint: disable=unused-variable global CMDSPLIT - from mrtrix3 import _version #pylint: disable=no-name-in-module, import-outside-toplevel cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au) and David Raffelt (david.raffelt@florey.edu.au)') cmdline.set_synopsis('Perform some arbitrary processing step for each of a set of inputs') cmdline.add_description('This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs.') diff --git a/python/mrtrix3/labelsgmfix/execute.py b/python/mrtrix3/labelsgmfix/execute.py index 1572f2f9e9..410e6c67df 100644 --- a/python/mrtrix3/labelsgmfix/execute.py +++ b/python/mrtrix3/labelsgmfix/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import math, os -from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, fsl, image, path, run, utils #pylint: disable=no-name-in-module +from mrtrix3 import MRtrixError +from mrtrix3 import app, fsl, image, path, run, utils def execute(): #pylint: disable=unused-variable @@ -80,7 +80,7 @@ def execute(): #pylint: disable=unused-variable # This will map a structure name to an index sgm_lut = {} sgm_lut_file_name = 'FreeSurferSGM.txt' - sgm_lut_file_path = os.path.join(path.shared_data_path(), path.script_subdir_name(), sgm_lut_file_name) + sgm_lut_file_path = os.path.join(path.shared_data_path(), 'labelsgmfix', sgm_lut_file_name) with open(sgm_lut_file_path, encoding='utf-8') as sgm_lut_file: for line in sgm_lut_file: line = line.rstrip() diff --git a/python/mrtrix3/mask2glass/execute.py b/python/mrtrix3/mask2glass/execute.py index 902ef3b759..c039edc225 100644 --- a/python/mrtrix3/mask2glass/execute.py +++ b/python/mrtrix3/mask2glass/execute.py @@ -13,8 +13,9 @@ # # For more details, see http://www.mrtrix.org/. +from mrtrix3 import app, image, path, run + def execute(): #pylint: disable=unused-variable - from mrtrix3 import app, image, path, run #pylint: disable=no-name-in-module, import-outside-toplevel app.check_output_path(app.ARGS.output) diff --git a/python/mrtrix3/mrtrix_cleanup/execute.py b/python/mrtrix3/mrtrix_cleanup/execute.py index 9d7e798de3..43248a63ce 100644 --- a/python/mrtrix3/mrtrix_cleanup/execute.py +++ b/python/mrtrix3/mrtrix_cleanup/execute.py @@ -15,8 +15,8 @@ import math, os, re, shutil -from mrtrix3 import CONFIG #pylint: disable=no-name-in-module -from mrtrix3 import app #pylint: disable=no-name-in-module +from mrtrix3 import CONFIG +from mrtrix3 import app POSTFIXES = [ 'B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB' ] diff --git a/python/mrtrix3/path.py b/python/mrtrix3/path.py index 96b73e36e0..89a2498bac 100644 --- a/python/mrtrix3/path.py +++ b/python/mrtrix3/path.py @@ -17,7 +17,7 @@ -import ctypes, errno, inspect, os, random, shlex, shutil, string, subprocess, time +import ctypes, errno, os, random, shlex, shutil, string, subprocess, time from mrtrix3 import CONFIG diff --git a/python/mrtrix3/population_template/__init__.py b/python/mrtrix3/population_template/__init__.py index 9cdb86ccd2..6bbcf234da 100644 --- a/python/mrtrix3/population_template/__init__.py +++ b/python/mrtrix3/population_template/__init__.py @@ -13,6 +13,8 @@ # # For more details, see http://www.mrtrix.org/. +# pylint: disable=unused-variable + DEFAULT_RIGID_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] DEFAULT_RIGID_LMAX = [2,2,2,4,4,4] DEFAULT_AFFINE_SCALES = [0.3,0.4,0.6,0.8,1.0,1.0] diff --git a/python/mrtrix3/population_template/contrasts.py b/python/mrtrix3/population_template/contrasts.py index 6e54755921..48cbbd3338 100644 --- a/python/mrtrix3/population_template/contrasts.py +++ b/python/mrtrix3/population_template/contrasts.py @@ -15,9 +15,9 @@ import os from mrtrix3 import MRtrixError -from mrtrix3 import app, path # pylint: disable=no-name-in-module, import-outside-toplevel +from mrtrix3 import app, path -class Contrasts: +class Contrasts: # pylint: disable=unused-variable """ Class that parses arguments and holds information specific to each image contrast diff --git a/python/mrtrix3/population_template/execute.py b/python/mrtrix3/population_template/execute.py index f2639eaf02..40b4ce5a1d 100644 --- a/python/mrtrix3/population_template/execute.py +++ b/python/mrtrix3/population_template/execute.py @@ -15,12 +15,12 @@ import json, os, shlex, shutil from mrtrix3 import EXE_LIST, MRtrixError -from mrtrix3 import app, image, matrix, path, run #pylint: disable=no-name-in-module +from mrtrix3 import app, image, matrix, path, run from . import AGGREGATION_MODES, REGISTRATION_MODES from . import DEFAULT_AFFINE_LMAX, DEFAULT_AFFINE_SCALES from . import DEFAULT_RIGID_LMAX, DEFAULT_RIGID_SCALES from . import DEFAULT_NL_LMAX, DEFAULT_NL_NITER, DEFAULT_NL_SCALES -from . import Contrasts +from .contrasts import Contrasts from .utils import aggregate, calculate_isfinite, check_linear_transformation, copy, inplace_nan_mask, parse_input_files, relpath def execute(): #pylint: disable=unused-variable @@ -87,16 +87,16 @@ def execute(): #pylint: disable=unused-variable agg_measure = 'mean' if app.ARGS.aggregate is not None: if not app.ARGS.aggregate in AGGREGATION_MODES: - app.error("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) + raise MRtrixError("aggregation type must be one of %s. provided: %s" % (str(AGGREGATION_MODES), app.ARGS.aggregate)) agg_measure = app.ARGS.aggregate agg_weights = app.ARGS.aggregation_weights if agg_weights is not None: agg_measure = "weighted_" + agg_measure if agg_measure != 'weighted_mean': - app.error("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) - if not os.path.isfile(app.ARGS.aggregation_weights): - app.error("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) + raise MRtrixError ("aggregation weights require '-aggregate mean' option. provided: %s" % (app.ARGS.aggregate)) + if not os.path.isfile(app.ARGS.aggregation_weights): + raise MRtrixError("aggregation weights file not found: %s" % app.ARGS.aggregation_weights) initial_alignment = app.ARGS.initial_alignment if initial_alignment not in ["mass", "robust_mass", "geometric", "none"]: diff --git a/python/mrtrix3/population_template/input.py b/python/mrtrix3/population_template/input.py index c2522b03a4..0ebe7c24cc 100644 --- a/python/mrtrix3/population_template/input.py +++ b/python/mrtrix3/population_template/input.py @@ -16,7 +16,7 @@ import os from .utils import abspath -class Input: +class Input: # pylint: disable=unused-variable """ Class that holds input information specific to a single image (multiple contrasts) diff --git a/python/mrtrix3/population_template/usage.py b/python/mrtrix3/population_template/usage.py index b7989b7151..1d9a7fe9e1 100644 --- a/python/mrtrix3/population_template/usage.py +++ b/python/mrtrix3/population_template/usage.py @@ -13,6 +13,11 @@ # # For more details, see http://www.mrtrix.org/. +from . import AGGREGATION_MODES, REGISTRATION_MODES +from . import DEFAULT_RIGID_LMAX, DEFAULT_RIGID_SCALES +from . import DEFAULT_AFFINE_LMAX, DEFAULT_AFFINE_SCALES +from . import DEFAULT_NL_LMAX, DEFAULT_NL_NITER, DEFAULT_NL_SCALES + def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('David Raffelt (david.raffelt@florey.edu.au) & Max Pietsch (maximilian.pietsch@kcl.ac.uk) & Thijs Dhollander (thijs.dhollander@gmail.com)') diff --git a/python/mrtrix3/population_template/utils.py b/python/mrtrix3/population_template/utils.py index 309be8d29e..1e3029556c 100644 --- a/python/mrtrix3/population_template/utils.py +++ b/python/mrtrix3/population_template/utils.py @@ -14,19 +14,20 @@ # For more details, see http://www.mrtrix.org/. import math, os, re, shutil, sys +from mrtrix3 import MRtrixError +from mrtrix3 import app, image, path, run, utils from . import IMAGEEXT -from . import Input +from .input import Input -def abspath(arg, *args): +def abspath(arg, *args): # pylint: disable=unused-variable return os.path.abspath(os.path.join(arg, *args)) -def relpath(arg, *args): - from mrtrix3 import app #pylint: disable=no-name-in-module, import-outside-toplevel +def relpath(arg, *args): # pylint: disable=unused-variable return os.path.relpath(os.path.join(arg, *args), app.WORKING_DIR) -def copy(src, dst, follow_symlinks=True): +def copy(src, dst, follow_symlinks=True): # pylint: disable=unused-variable """Copy data but do not set mode bits. Return the file's destination. mimics shutil.copy but without setting mode bits as shutil.copymode can fail on exotic mounts @@ -41,8 +42,7 @@ def copy(src, dst, follow_symlinks=True): return dst -def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): - from mrtrix3 import app, run, utils #pylint: disable=no-name-in-module, import-outside-toplevel +def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear=0.2, max_rot=None, pause_on_warn=True): # pylint: disable=unused-variable if max_rot is None: max_rot = 2 * math.pi @@ -106,8 +106,7 @@ def check_linear_transformation(transformation, cmd, max_scaling=0.5, max_shear= return good -def aggregate(inputs, output, contrast_idx, mode, force=True): - from mrtrix3 import MRtrixError, run # pylint: disable=no-name-in-module, import-outside-toplevel +def aggregate(inputs, output, contrast_idx, mode, force=True): # pylint: disable=unused-variable images = [inp.ims_transformed[contrast_idx] for inp in inputs] if mode == 'mean': @@ -121,27 +120,25 @@ def aggregate(inputs, output, contrast_idx, mode, force=True): cmd = ['mrcalc'] if wsum <= 0: raise MRtrixError("the sum of aggregetion weights has to be positive") - for weight, image in zip(weights, images): + for weight, imagepath in zip(weights, images): if float(weight) != 0: - cmd += [image, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) + cmd += [imagepath, weight, '-mult'] + (['-add'] if len(cmd) > 1 else []) cmd += ['%.16f' % wsum, '-div', output] run.command(cmd, force=force) else: raise MRtrixError("aggregation mode %s not understood" % mode) -def inplace_nan_mask(images, masks): - from mrtrix3 import run # pylint: disable=no-name-in-module, import-outside-toplevel +def inplace_nan_mask(images, masks): # pylint: disable=unused-variable assert len(images) == len(masks), (len(images), len(masks)) - for image, mask in zip(images, masks): + for imagepath, mask in zip(images, masks): target_dir = os.path.split(image)[0] - masked = os.path.join(target_dir, '__' + os.path.split(image)[1]) - run.command("mrcalc " + mask + " " + image + " nan -if " + masked, force=True) - run.function(shutil.move, masked, image) + masked = os.path.join(target_dir, '__' + os.path.split(imagepath)[1]) + run.command("mrcalc " + mask + " " + imagepath + " nan -if " + masked, force=True) + run.function(shutil.move, masked, imagepath) -def calculate_isfinite(inputs, contrasts): - from mrtrix3 import run, path # pylint: disable=no-name-in-module, import-outside-toplevel +def calculate_isfinite(inputs, contrasts): # pylint: disable=unused-variable agg_weights = [float(inp.aggregation_weight) for inp in inputs if inp.aggregation_weight is not None] for cid in range(contrasts.n_contrasts): for inp in inputs: @@ -161,16 +158,16 @@ def calculate_isfinite(inputs, contrasts): run.command(cmd + [contrasts.isfinite_count[cid]], force=True) -def get_common_postfix(file_list): +def get_common_postfix(file_list): # pylint: disable=unused-variable return os.path.commonprefix([i[::-1] for i in file_list])[::-1] -def get_common_prefix(file_list): +def get_common_prefix(file_list): # pylint: disable=unused-variable return os.path.commonprefix(file_list) -def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whitespace_repl='_'): +def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whitespace_repl='_'): # pylint: disable=unused-variable """ matches input images across contrasts and pair them with masks. extracts unique identifiers from mask and image filenames by stripping common pre and postfix (per contrast and for masks) @@ -188,24 +185,23 @@ def parse_input_files(in_files, mask_files, contrasts, f_agg_weight=None, whites TODO check if no common grid & trafo across contrasts (only relevant for robust init?) """ - from mrtrix3 import MRtrixError, app, path, image # pylint: disable=no-name-in-module, import-outside-toplevel contrasts = contrasts.suff inputs = [] def paths_to_file_uids(paths, prefix, postfix): """ strip pre and postfix from filename, replace whitespace characters """ uid_path = {} uids = [] - for path in paths: + for filepath in paths: uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(path)[1])) uid = re.sub(r'\s+', whitespace_repl, uid) if not uid: raise MRtrixError('No uniquely identifiable part of filename "' + path + '" ' 'after prefix and postfix substitution ' 'with prefix "' + prefix + '" and postfix "' + postfix + '"') - app.debug('UID mapping: "' + path + '" --> "' + uid + '"') + app.debug('UID mapping: "' + filepath + '" --> "' + uid + '"') if uid in uid_path: raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + path + '" and "' + uid_path[uid] +'"') - uid_path[uid] = path + uid_path[uid] = filepath uids.append(uid) return uids @@ -292,4 +288,4 @@ def paths_to_file_uids(paths, prefix, postfix): if any(w < 0 for w in weights): app.warn('Negative aggregation weights: ' + str(weights)) - return inputs, xcontrast_xsubject_pre_postfix \ No newline at end of file + return inputs, xcontrast_xsubject_pre_postfix diff --git a/python/mrtrix3/responsemean/execute.py b/python/mrtrix3/responsemean/execute.py index 5a7cd251ba..df474a4b40 100644 --- a/python/mrtrix3/responsemean/execute.py +++ b/python/mrtrix3/responsemean/execute.py @@ -14,8 +14,8 @@ # For more details, see http://www.mrtrix.org/. import math, sys -from mrtrix3 import MRtrixError #pylint: disable=no-name-in-module -from mrtrix3 import app, matrix #pylint: disable=no-name-in-module +from mrtrix3 import MRtrixError +from mrtrix3 import app, matrix def execute(): #pylint: disable=unused-variable diff --git a/run_pylint b/run_pylint index a971644bab..2364c76c7b 100755 --- a/run_pylint +++ b/run_pylint @@ -21,7 +21,7 @@ echo logging to \""$LOGFILE"\" # generate list of tests to run: tests="update_copyright" if [ $# == 0 ]; then - for lib_path in python/lib/mrtrix3/*; do + for lib_path in python/mrtrix3/*; do if [[ -d ${lib_path} && ! ( ${lib_path} == *"__pycache__"* ) ]]; then for src_file in ${lib_path}/*.py; do if [[ -f ${src_file} ]]; then From 08db1d64f29117567d5beed10d69a401571db2ab Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 8 Mar 2024 00:00:29 +1100 Subject: [PATCH 09/11] Python API: Further changes following filesystem restructuring - Remove mrtrix3.algorithm module. Its operation was incommensurate with the transition to cmake, where dependencies are expected to be known prior to execution, as it scanned the filesystem at execution time for the sake of discovering any newly added algorithms. Most of the prior functionality is replaced with overloading function app.Parser.add_subparsers(). - Greatly simplify the content of cmake-generated executables for Python commands. --- python/mrtrix3/5ttgen/__init__.py | 17 ++++++++ python/mrtrix3/5ttgen/execute.py | 15 ++++--- python/mrtrix3/5ttgen/hsvs/__init__.py | 1 + python/mrtrix3/5ttgen/usage.py | 5 +-- python/mrtrix3/CMakeLists.txt | 32 +++----------- python/mrtrix3/algorithm.py | 44 -------------------- python/mrtrix3/app.py | 26 ++++++++++-- python/mrtrix3/dwi2mask/__init__.py | 27 ++++++++++++ python/mrtrix3/dwi2mask/b02template/usage.py | 2 +- python/mrtrix3/dwi2mask/execute.py | 14 ++++--- python/mrtrix3/dwi2mask/trace/__init__.py | 1 + python/mrtrix3/dwi2mask/usage.py | 5 +-- python/mrtrix3/dwi2response/__init__.py | 17 ++++++++ python/mrtrix3/dwi2response/execute.py | 15 ++++--- python/mrtrix3/dwi2response/usage.py | 5 +-- python/mrtrix3/dwibiascorrect/__init__.py | 17 ++++++++ python/mrtrix3/dwibiascorrect/execute.py | 15 ++++--- python/mrtrix3/dwibiascorrect/usage.py | 6 +-- python/mrtrix3/dwinormalise/__init__.py | 17 ++++++++ python/mrtrix3/dwinormalise/execute.py | 12 ++++-- python/mrtrix3/dwinormalise/usage.py | 6 +-- python/mrtrix3/population_template/input.py | 7 ++-- python/mrtrix3/population_template/utils.py | 6 +-- 23 files changed, 187 insertions(+), 125 deletions(-) delete mode 100644 python/mrtrix3/algorithm.py diff --git a/python/mrtrix3/5ttgen/__init__.py b/python/mrtrix3/5ttgen/__init__.py index e69de29bb2..b00ef0cac2 100644 --- a/python/mrtrix3/5ttgen/__init__.py +++ b/python/mrtrix3/5ttgen/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=unused-variable +ALGORITHMS = ['freesurfer', 'fsl', 'gif', 'hsvs'] diff --git a/python/mrtrix3/5ttgen/execute.py b/python/mrtrix3/5ttgen/execute.py index bed0339cf2..90489ea9ea 100644 --- a/python/mrtrix3/5ttgen/execute.py +++ b/python/mrtrix3/5ttgen/execute.py @@ -13,20 +13,25 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, run +import importlib, sys +from mrtrix3 import app, run def execute(): #pylint: disable=unused-variable # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) + algorithm_module_name = 'mrtrix3._5ttgen.' + app.ARGS.algorithm + alg = sys.modules[algorithm_module_name] - alg.check_output_paths() + importlib.import_module('.check_output_paths', algorithm_module_name) + alg.check_output_paths.check_output_paths() app.make_scratch_dir() - alg.get_inputs() + importlib.import_module('.get_inputs', algorithm_module_name) + alg.get_inputs.get_inputs() app.goto_scratch_dir() - alg.execute() + importlib.import_module('.execute', algorithm_module_name) + alg.execute.execute() stderr = run.command('5ttcheck result.mif').stderr if '[WARNING]' in stderr: diff --git a/python/mrtrix3/5ttgen/hsvs/__init__.py b/python/mrtrix3/5ttgen/hsvs/__init__.py index 35392e0ed5..47d340f200 100644 --- a/python/mrtrix3/5ttgen/hsvs/__init__.py +++ b/python/mrtrix3/5ttgen/hsvs/__init__.py @@ -13,6 +13,7 @@ # # For more details, see http://www.mrtrix.org/. + HIPPOCAMPI_CHOICES = [ 'subfields', 'first', 'aseg' ] THALAMI_CHOICES = [ 'nuclei', 'first', 'aseg' ] diff --git a/python/mrtrix3/5ttgen/usage.py b/python/mrtrix3/5ttgen/usage.py index 66c8988790..d32fab9c4b 100644 --- a/python/mrtrix3/5ttgen/usage.py +++ b/python/mrtrix3/5ttgen/usage.py @@ -13,8 +13,6 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm - def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') @@ -27,5 +25,4 @@ def usage(cmdline): #pylint: disable=unused-variable common_options.add_argument('-nocrop', action='store_true', default=False, help='Do NOT crop the resulting 5TT image to reduce its size (keep the same dimensions as the input image)') common_options.add_argument('-sgm_amyg_hipp', action='store_true', default=False, help='Represent the amygdalae and hippocampi as sub-cortical grey matter in the 5TT image') - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) + cmdline.add_subparsers() diff --git a/python/mrtrix3/CMakeLists.txt b/python/mrtrix3/CMakeLists.txt index 809738a8d9..1020b218d0 100644 --- a/python/mrtrix3/CMakeLists.txt +++ b/python/mrtrix3/CMakeLists.txt @@ -15,10 +15,6 @@ file(GLOB_RECURSE PYTHON_SRC_PATHS ${CMAKE_CURRENT_SOURCE_DIR}/*/* ) -message(STATUS "PYTHON_LIB_FILES=${PYTHON_LIB_FILES}") -message(STATUS "PYTHON_SRC_PATHS=${PYTHON_SRC_PATHS}") - -# TODO Test to see if this can be done with glob exclusion set(PYTHON_COMMAND_LIST "") foreach(PYTHON_PATH ${PYTHON_ROOT_ENTRIES}) if(IS_DIRECTORY ${PYTHON_PATH}) @@ -27,8 +23,6 @@ foreach(PYTHON_PATH ${PYTHON_ROOT_ENTRIES}) endif() endforeach() -message(STATUS "PYTHON_COMMAND_LIST=${PYTHON_COMMAND_LIST}") - # We generate the version file at configure time, # so tools like Pylint can run without building the project execute_process( @@ -77,8 +71,6 @@ foreach(PYTHON_SRC_PATH ${PYTHON_SRC_PATHS}) endif() endforeach() -message(STATUS "PYTHON_DST_SUBDIRS=${PYTHON_DST_SUBDIRS}") - foreach(PYTHON_DST_SUBDIR ${PYTHON_DST_SUBDIRS}) add_custom_command( TARGET LinkPythonFiles @@ -105,38 +97,24 @@ endforeach() set(PYTHON_BIN_FILES "") foreach(CMDNAME ${PYTHON_COMMAND_LIST}) - # Strip .py extension - get_filename_component(BINNAME ${CMDNAME} NAME_WE) - set(MODULENAME ${BINNAME}) + set(MODULENAME ${CMDNAME}) if(MODULENAME MATCHES "^[0-9].*$") set(MODULENAME _${MODULENAME}) endif() - set(SPECPATH "'${MODULENAME}', '__init__.py'") - set(BINPATH "${PROJECT_BINARY_DIR}/temporary/python/${BINNAME}") + set(BINPATH "${PROJECT_BINARY_DIR}/temporary/python/${CMDNAME}") file(WRITE ${BINPATH} "#!${Python3_EXECUTABLE}\n") file(APPEND ${BINPATH} "# -*- coding: utf-8 -*-\n") - file(APPEND ${BINPATH} "import importlib.util\n") file(APPEND ${BINPATH} "import os\n") file(APPEND ${BINPATH} "import sys\n") file(APPEND ${BINPATH} "\n") - file(APPEND ${BINPATH} "api_location = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', '__init__.py'))\n") - file(APPEND ${BINPATH} "api_spec = importlib.util.spec_from_file_location('mrtrix3', api_location)\n") - file(APPEND ${BINPATH} "api_module = importlib.util.module_from_spec(api_spec)\n") - file(APPEND ${BINPATH} "sys.modules['mrtrix3'] = api_module\n") - file(APPEND ${BINPATH} "api_spec.loader.exec_module(api_module)\n") - file(APPEND ${BINPATH} "\n") - file(APPEND ${BINPATH} "src_spec = importlib.util.spec_from_file_location('${MODULENAME}',\n") - file(APPEND ${BINPATH} " os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib', 'mrtrix3', ${SPECPATH})))\n") - file(APPEND ${BINPATH} "src_module = importlib.util.module_from_spec(src_spec)\n") - file(APPEND ${BINPATH} "sys.modules[src_spec.name] = src_module\n") - file(APPEND ${BINPATH} "src_spec.loader.exec_module(src_module)\n") + file(APPEND ${BINPATH} "sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib')))\n") file(APPEND ${BINPATH} "\n") file(APPEND ${BINPATH} "from mrtrix3.app import _execute\n") - file(APPEND ${BINPATH} "import ${MODULENAME}\n") + file(APPEND ${BINPATH} "from mrtrix3 import ${MODULENAME}\n") file(APPEND ${BINPATH} "_execute(${MODULENAME})\n") file(COPY ${BINPATH} DESTINATION ${PROJECT_BINARY_DIR}/bin FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_WRITE GROUP_READ WORLD_EXECUTE WORLD_READ) - list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${BINNAME}) + list(APPEND PYTHON_BIN_FILES ${PROJECT_BINARY_DIR}/bin/${CMDNAME}) endforeach() set_target_properties(LinkPythonFiles diff --git a/python/mrtrix3/algorithm.py b/python/mrtrix3/algorithm.py deleted file mode 100644 index b671b513b2..0000000000 --- a/python/mrtrix3/algorithm.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2008-2023 the MRtrix3 contributors. -# -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -# -# Covered Software is provided under this License on an "as is" -# basis, without warranty of any kind, either expressed, implied, or -# statutory, including, without limitation, warranties that the -# Covered Software is free of defects, merchantable, fit for a -# particular purpose or non-infringing. -# See the Mozilla Public License v. 2.0 for more details. -# -# For more details, see http://www.mrtrix.org/. - -# Set of functionalities for when a single script has many 'algorithms' that may be invoked, -# i.e. the script deals with generating a particular output, but there are a number of -# processes to select from, each of which is capable of generating that output. - - -import importlib, inspect, pkgutil, sys -from mrtrix3 import app - - - -# Note: This function essentially duplicates the current state of app.cmdline in order for command-line -# options common to all algorithms of a particular script to be applicable once any particular sub-parser -# is invoked. Therefore this function must be called _after_ all such options are set up. -def usage(cmdline): #pylint: disable=unused-variable - module_name = inspect.currentframe().f_back.f_globals["__name__"] - submodules = [submodule_info.name for submodule_info in pkgutil.walk_packages(sys.modules[module_name].__spec__.submodule_search_locations)] - base_parser = app.Parser(description='Base parser for construction of subparsers', parents=[cmdline]) - subparsers = cmdline.add_subparsers(title='Algorithm choices', - help='Select the algorithm to be used to complete the script operation; ' - 'additional details and options become available once an algorithm is nominated. ' - 'Options are: ' + ', '.join(submodules), dest='algorithm') - for submodule in submodules: - module = importlib.import_module(module_name + '.' + submodule) - module.usage(base_parser, subparsers) - - - -def get(name): #pylint: disable=unused-variable - return sys.modules[inspect.currentframe().f_back.f_globals["__name__"] + '.' + name] diff --git a/python/mrtrix3/app.py b/python/mrtrix3/app.py index 6d285abb03..95f017870d 100644 --- a/python/mrtrix3/app.py +++ b/python/mrtrix3/app.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import argparse, inspect, math, os, random, shlex, shutil, signal, string, subprocess, sys, textwrap, time +import argparse, importlib, inspect, math, os, random, shlex, shutil, signal, string, subprocess, sys, textwrap, time from mrtrix3 import ANSI, CONFIG, MRtrixError, setup_ansi from mrtrix3 import utils # Needed at global level from ._version import __version__ @@ -105,6 +105,8 @@ def _execute(module): #pylint: disable=unused-variable from mrtrix3 import run #pylint: disable=import-outside-toplevel global ARGS, CMDLINE, CONTINUE_OPTION, DO_CLEANUP, FORCE_OVERWRITE, NUM_THREADS, SCRATCH_DIR, VERBOSITY + usage_module = importlib.import_module('.usage', module.__name__) + execute_module = importlib.import_module('.execute', module.__name__) # Set up signal handlers for sig in _SIGNALS: @@ -115,7 +117,7 @@ def _execute(module): #pylint: disable=unused-variable CMDLINE = Parser() try: - module.usage(CMDLINE) + usage_module.usage(CMDLINE) except AttributeError: CMDLINE = None raise @@ -204,7 +206,7 @@ def _execute(module): #pylint: disable=unused-variable sys.exit(return_code) try: - module.execute() + execute_module.execute() except (run.MRtrixCmdError, run.MRtrixFnError) as exception: is_cmd = isinstance(exception, run.MRtrixCmdError) return_code = exception.returncode if is_cmd else 1 @@ -604,7 +606,7 @@ def __init__(self, *args_in, **kwargs_in): script_options.add_argument('-nocleanup', action='store_true', help='do not delete intermediate files during script execution, and do not delete scratch directory at script completion.') script_options.add_argument('-scratch', metavar='/path/to/scratch/', help='manually specify the path in which to generate the scratch directory.') script_options.add_argument('-continue', nargs=2, dest='cont', metavar=('', ''), help='continue the script from a previous execution; must provide the scratch directory path, and the name of the last successfully-generated file.') - module_file = os.path.realpath (inspect.getsourcefile(inspect.stack()[-1][0])) + module_file = os.path.realpath(inspect.getsourcefile(inspect.stack()[-1][0])) self._is_project = os.path.abspath(os.path.join(os.path.dirname(module_file), os.pardir, 'lib', 'mrtrix3', 'app.py')) != os.path.abspath(__file__) try: with subprocess.Popen ([ 'git', 'describe', '--abbrev=8', '--dirty', '--always' ], cwd=os.path.abspath(os.path.join(os.path.dirname(module_file), os.pardir)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as process: @@ -644,6 +646,22 @@ def flag_mutually_exclusive_options(self, options, required=False): #pylint: dis raise Exception('Parser.flagMutuallyExclusiveOptions() only accepts a list of strings') self._mutually_exclusive_option_groups.append( (options, required) ) + def add_subparsers(self): # pylint: disable=arguments-differ + # Import the command-line settings for all algorithms in the relevant sub-directories + # This is expected to be being called from the 'usage' module of the relevant command + module_name = os.path.dirname(inspect.getouterframes(inspect.currentframe())[1].filename).split(os.sep)[-1] + module = sys.modules['mrtrix3.' + module_name] + base_parser = Parser(description='Base parser for construction of subparsers', parents=[self]) + subparsers = super().add_subparsers(title='Algorithm choices', + help='Select the algorithm to be used; ' + 'additional details and options become available once an algorithm is nominated. ' + 'Options are: ' + ', '.join(module.ALGORITHMS), + dest='algorithm') + for algorithm in module.ALGORITHMS: + algorithm_usage_module = importlib.import_module('.' + algorithm + '.usage', 'mrtrix3.' + module_name) + algorithm_usage_module.usage(base_parser, subparsers) + + def parse_args(self, args=None, namespace=None): if not self._author: raise Exception('Script author MUST be set in script\'s usage() function') diff --git a/python/mrtrix3/dwi2mask/__init__.py b/python/mrtrix3/dwi2mask/__init__.py index e69de29bb2..e2e3ca39e0 100644 --- a/python/mrtrix3/dwi2mask/__init__.py +++ b/python/mrtrix3/dwi2mask/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=unused-variable +ALGORITHMS = ['3dautomask', + 'ants', + 'b02template', + 'consensus', + 'fslbet', + 'hdbet', + 'legacy', + 'mean', + 'mtnorm', + 'synthstrip', + 'trace'] diff --git a/python/mrtrix3/dwi2mask/b02template/usage.py b/python/mrtrix3/dwi2mask/b02template/usage.py index 1b2318e54a..1f29898080 100644 --- a/python/mrtrix3/dwi2mask/b02template/usage.py +++ b/python/mrtrix3/dwi2mask/b02template/usage.py @@ -14,7 +14,7 @@ # For more details, see http://www.mrtrix.org/. from . import DEFAULT_SOFTWARE -from . import SOFTWARE +from . import SOFTWARES def usage(base_parser, subparsers): #pylint: disable=unused-variable parser = subparsers.add_parser('b02template', parents=[base_parser]) diff --git a/python/mrtrix3/dwi2mask/execute.py b/python/mrtrix3/dwi2mask/execute.py index 32ace93b99..7d83bf6ae5 100644 --- a/python/mrtrix3/dwi2mask/execute.py +++ b/python/mrtrix3/dwi2mask/execute.py @@ -13,13 +13,15 @@ # # For more details, see http://www.mrtrix.org/. +import importlib, sys from mrtrix3 import MRtrixError -from mrtrix3 import algorithm, app, image, path, run +from mrtrix3 import app, image, path, run def execute(): #pylint: disable=unused-variable # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) + algorithm_module_name = 'mrtrix3.dwi2mask.' + app.ARGS.algorithm + alg = sys.modules[algorithm_module_name] app.check_output_path(app.ARGS.output) @@ -35,12 +37,13 @@ def execute(): #pylint: disable=unused-variable # Get input data into the scratch directory run.command('mrconvert ' + path.from_user(app.ARGS.input) + ' ' + path.to_scratch('input.mif') + ' -strides 0,0,0,1' + grad_import_option) - alg.get_inputs() + importlib.import_module('.get_inputs', algorithm_module_name) + alg.get_inputs.get_inputs() app.goto_scratch_dir() # Generate a mean b=0 image (common task in many algorithms) - if alg.needs_mean_bzero(): + if alg.NEEDS_MEAN_BZERO: run.command('dwiextract input.mif -bzero - | ' 'mrmath - mean - -axis 3 | ' 'mrconvert - bzero.nii -strides +1,+2,+3') @@ -60,7 +63,8 @@ def execute(): #pylint: disable=unused-variable # From here, the script splits depending on what algorithm is being used # The return value of the execute() function should be the name of the # image in the scratch directory that is to be exported - mask_path = alg.execute() + importlib.import_module('.execute', algorithm_module_name) + mask_path = alg.execute.execute() # Before exporting the mask image, get a mask of voxels for which # the DWI data are valid diff --git a/python/mrtrix3/dwi2mask/trace/__init__.py b/python/mrtrix3/dwi2mask/trace/__init__.py index 3c08030957..030fee1c66 100644 --- a/python/mrtrix3/dwi2mask/trace/__init__.py +++ b/python/mrtrix3/dwi2mask/trace/__init__.py @@ -13,6 +13,7 @@ # # For more details, see http://www.mrtrix.org/. +# pylint: disable=unused-variable NEEDS_MEAN_BZERO = False DEFAULT_CLEAN_SCALE = 2 diff --git a/python/mrtrix3/dwi2mask/usage.py b/python/mrtrix3/dwi2mask/usage.py index e728138e30..6e0f2dea0d 100644 --- a/python/mrtrix3/dwi2mask/usage.py +++ b/python/mrtrix3/dwi2mask/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version +from mrtrix3 import app, _version def usage(cmdline): #pylint: disable=unused-variable @@ -28,5 +28,4 @@ def usage(cmdline): #pylint: disable=unused-variable #common_options = cmdline.add_argument_group('General dwi2mask options') app.add_dwgrad_import_options(cmdline) - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) + cmdline.add_subparsers() diff --git a/python/mrtrix3/dwi2response/__init__.py b/python/mrtrix3/dwi2response/__init__.py index e69de29bb2..445d48d800 100644 --- a/python/mrtrix3/dwi2response/__init__.py +++ b/python/mrtrix3/dwi2response/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=unused-variable +ALGORITHMS = [ 'dhollander', 'fa', 'manual', 'msmt_5tt', 'tax', 'tournier' ] diff --git a/python/mrtrix3/dwi2response/execute.py b/python/mrtrix3/dwi2response/execute.py index 76bd4d6ea6..d3aabadf6d 100644 --- a/python/mrtrix3/dwi2response/execute.py +++ b/python/mrtrix3/dwi2response/execute.py @@ -13,18 +13,21 @@ # # For more details, see http://www.mrtrix.org/. +import importlib, sys from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import algorithm, app, image, path, run +from mrtrix3 import app, image, path, run def execute(): #pylint: disable=unused-variable # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) + algorithm_module_name = 'mrtrix3.dwi2response.' + app.ARGS.algorithm + alg = sys.modules[algorithm_module_name] # Check for prior existence of output files, and grab any input files, used by the particular algorithm if app.ARGS.voxels: app.check_output_path(app.ARGS.voxels) - alg.check_output_paths() + importlib.import_module('.check_output_paths', algorithm_module_name) + alg.check_output_paths.check_output_paths() # Sanitise some inputs, and get ready for data import if app.ARGS.lmax: @@ -66,7 +69,8 @@ def execute(): #pylint: disable=unused-variable app.console('Importing mask (' + path.from_user(app.ARGS.mask) + ')...') run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit', show=False) - alg.get_inputs() + importlib.import_module('.get_inputs', algorithm_module_name) + alg.get_inputs.get_inputs() app.goto_scratch_dir() @@ -86,4 +90,5 @@ def execute(): #pylint: disable=unused-variable raise MRtrixError(('Provided' if app.ARGS.mask else 'Generated') + ' mask image does not contain any voxels') # From here, the script splits depending on what estimation algorithm is being used - alg.execute() + importlib.import_module('.execute', algorithm_module_name) + alg.execute.execute() diff --git a/python/mrtrix3/dwi2response/usage.py b/python/mrtrix3/dwi2response/usage.py index d0d9d9e4e2..3a88650b1a 100644 --- a/python/mrtrix3/dwi2response/usage.py +++ b/python/mrtrix3/dwi2response/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version +from mrtrix3 import app, _version def usage(cmdline): #pylint: disable=unused-variable @@ -36,5 +36,4 @@ def usage(cmdline): #pylint: disable=unused-variable common_options.add_argument('-lmax', help='The maximum harmonic degree(s) for response function estimation (comma-separated list in case of multiple b-values)') app.add_dwgrad_import_options(cmdline) - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) + cmdline.add_subparsers() diff --git a/python/mrtrix3/dwibiascorrect/__init__.py b/python/mrtrix3/dwibiascorrect/__init__.py index e69de29bb2..5a2de2752b 100644 --- a/python/mrtrix3/dwibiascorrect/__init__.py +++ b/python/mrtrix3/dwibiascorrect/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=unused-variable +ALGORITHMS = [ 'ants', 'fsl', 'mtnorm' ] diff --git a/python/mrtrix3/dwibiascorrect/execute.py b/python/mrtrix3/dwibiascorrect/execute.py index ecdd4b17d5..8cd60bc4d4 100644 --- a/python/mrtrix3/dwibiascorrect/execute.py +++ b/python/mrtrix3/dwibiascorrect/execute.py @@ -13,17 +13,20 @@ # # For more details, see http://www.mrtrix.org/. +import importlib, sys from mrtrix3 import CONFIG, MRtrixError -from mrtrix3 import algorithm, app, image, path, run +from mrtrix3 import app, image, path, run def execute(): #pylint: disable=unused-variable # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) + algorithm_module_name = 'mrtrix3.dwibiascorrect.' + app.ARGS.algorithm + alg = sys.modules[algorithm_module_name] app.check_output_path(app.ARGS.output) app.check_output_path(app.ARGS.bias) - alg.check_output_paths() + importlib.import_module('.check_output_paths', algorithm_module_name) + alg.check_output_paths.check_output_paths() app.make_scratch_dir() @@ -32,7 +35,8 @@ def execute(): #pylint: disable=unused-variable if app.ARGS.mask: run.command('mrconvert ' + path.from_user(app.ARGS.mask) + ' ' + path.to_scratch('mask.mif') + ' -datatype bit') - alg.get_inputs() + importlib.import_module('.get_inputs', algorithm_module_name) + alg.get_inputs.get_inputs() app.goto_scratch_dir() @@ -53,4 +57,5 @@ def execute(): #pylint: disable=unused-variable run.command('dwi2mask ' + CONFIG['Dwi2maskAlgorithm'] + ' in.mif mask.mif') # From here, the script splits depending on what estimation algorithm is being used - alg.execute() + importlib.import_module('.execute', algorithm_module_name) + alg.execute.execute() diff --git a/python/mrtrix3/dwibiascorrect/usage.py b/python/mrtrix3/dwibiascorrect/usage.py index 30fc6802f2..63f5425049 100644 --- a/python/mrtrix3/dwibiascorrect/usage.py +++ b/python/mrtrix3/dwibiascorrect/usage.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app, _version +from mrtrix3 import app, _version def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') @@ -26,6 +26,4 @@ def usage(cmdline): #pylint: disable=unused-variable common_options.add_argument('-mask', metavar='image', help='Manually provide a mask image for bias field estimation') common_options.add_argument('-bias', metavar='image', help='Output the estimated bias field') app.add_dwgrad_import_options(cmdline) - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) + cmdline.add_subparsers() diff --git a/python/mrtrix3/dwinormalise/__init__.py b/python/mrtrix3/dwinormalise/__init__.py index e69de29bb2..e53a18c534 100644 --- a/python/mrtrix3/dwinormalise/__init__.py +++ b/python/mrtrix3/dwinormalise/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2008-2023 the MRtrix3 contributors. +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# +# Covered Software is provided under this License on an "as is" +# basis, without warranty of any kind, either expressed, implied, or +# statutory, including, without limitation, warranties that the +# Covered Software is free of defects, merchantable, fit for a +# particular purpose or non-infringing. +# See the Mozilla Public License v. 2.0 for more details. +# +# For more details, see http://www.mrtrix.org/. + +# pylint: disable=unused-variable +ALGORITHMS = [ 'group', 'manual', 'mtnorm' ] diff --git a/python/mrtrix3/dwinormalise/execute.py b/python/mrtrix3/dwinormalise/execute.py index 0c346d601d..ddd243fc3a 100644 --- a/python/mrtrix3/dwinormalise/execute.py +++ b/python/mrtrix3/dwinormalise/execute.py @@ -13,13 +13,17 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm, app +import importlib, sys +from mrtrix3 import app def execute(): #pylint: disable=unused-variable # Find out which algorithm the user has requested - alg = algorithm.get(app.ARGS.algorithm) - alg.check_output_paths() + algorithm_module_name = 'mrtrix3.dwinormalise.' + app.ARGS.algorithm + alg = sys.modules[algorithm_module_name] + importlib.import_module('.check_output_paths', algorithm_module_name) + alg.check_output_paths.check_output_paths() # From here, the script splits depending on what algorithm is being used - alg.execute() + importlib.import_module('.execute', algorithm_module_name) + alg.execute.execute() diff --git a/python/mrtrix3/dwinormalise/usage.py b/python/mrtrix3/dwinormalise/usage.py index 075db4236b..52f3974b50 100644 --- a/python/mrtrix3/dwinormalise/usage.py +++ b/python/mrtrix3/dwinormalise/usage.py @@ -13,14 +13,10 @@ # # For more details, see http://www.mrtrix.org/. -from mrtrix3 import algorithm - def usage(cmdline): #pylint: disable=unused-variable cmdline.set_author('Robert E. Smith (robert.smith@florey.edu.au)') cmdline.set_synopsis('Perform various forms of intensity normalisation of DWIs') cmdline.add_description('This script provides access to different techniques for globally scaling the intensity of diffusion-weighted images. ' 'The different algorithms have different purposes, and different requirements with respect to the data with which they must be provided & will produce as output. ' 'Further information on the individual algorithms available can be accessed via their individual help pages; eg. "dwinormalise group -help".') - - # Import the command-line settings for all algorithms found in the relevant directory - algorithm.usage(cmdline) + cmdline.add_subparsers() diff --git a/python/mrtrix3/population_template/input.py b/python/mrtrix3/population_template/input.py index 0ebe7c24cc..e3776fee72 100644 --- a/python/mrtrix3/population_template/input.py +++ b/python/mrtrix3/population_template/input.py @@ -14,7 +14,6 @@ # For more details, see http://www.mrtrix.org/. import os -from .utils import abspath class Input: # pylint: disable=unused-variable """ @@ -121,7 +120,9 @@ def cache_local(self): def get_ims_path(self, quoted=True): """ return path to input images """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + # Have to import locally to avoid circular dependency + from .utils import abspath # pylint: disable=import-outside-toplevel if self._local_ims: return self._local_ims return [path.from_user(abspath(d, f), quoted) for d, f in zip(self._im_directories, self.ims_filenames)] @@ -129,7 +130,7 @@ def get_ims_path(self, quoted=True): def get_msk_path(self, quoted=True): """ return path to input mask """ - from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel + from mrtrix3 import path # pylint: disable=no-name-in-module, import-outside-toplevel if self._local_msk: return self._local_msk return path.from_user(os.path.join(self._msk_directory, self.msk_filename), quoted) if self.msk_filename else None diff --git a/python/mrtrix3/population_template/utils.py b/python/mrtrix3/population_template/utils.py index 1e3029556c..d6475bbb73 100644 --- a/python/mrtrix3/population_template/utils.py +++ b/python/mrtrix3/population_template/utils.py @@ -192,15 +192,15 @@ def paths_to_file_uids(paths, prefix, postfix): uid_path = {} uids = [] for filepath in paths: - uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(path)[1])) + uid = re.sub(re.escape(postfix)+'$', '', re.sub('^'+re.escape(prefix), '', os.path.split(filepath)[1])) uid = re.sub(r'\s+', whitespace_repl, uid) if not uid: - raise MRtrixError('No uniquely identifiable part of filename "' + path + '" ' + raise MRtrixError('No uniquely identifiable part of filename "' + filepath + '" ' 'after prefix and postfix substitution ' 'with prefix "' + prefix + '" and postfix "' + postfix + '"') app.debug('UID mapping: "' + filepath + '" --> "' + uid + '"') if uid in uid_path: - raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + path + '" and "' + uid_path[uid] +'"') + raise MRtrixError('unique file identifier is not unique: "' + uid + '" mapped to "' + filepath + '" and "' + uid_path[uid] +'"') uid_path[uid] = filepath uids.append(uid) return uids From 02b0a7e34e7e1bdeae353da55e0aa2f4d8825137 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 8 Mar 2024 14:12:57 +1100 Subject: [PATCH 10/11] Python API: Store list of MRtrix3 commands using cmake --- cmake/FindVersion.cmake | 3 +-- python/mrtrix3/CMakeLists.txt | 11 +++++++++++ python/mrtrix3/__init__.py | 5 ----- python/mrtrix3/_commands.py.in | 3 +++ python/mrtrix3/_version.py.in | 2 +- python/mrtrix3/run.py | 15 ++++++++------- 6 files changed, 24 insertions(+), 15 deletions(-) create mode 100644 python/mrtrix3/_commands.py.in diff --git a/cmake/FindVersion.cmake b/cmake/FindVersion.cmake index 579484355a..ac39d45ed0 100644 --- a/cmake/FindVersion.cmake +++ b/cmake/FindVersion.cmake @@ -34,9 +34,8 @@ if(NOT MRTRIX_VERSION) message(STATUS "Failed to determine version from Git, using default base version: ${MRTRIX_BASE_VERSION}") endif() - configure_file( ${SRC} ${DST} @ONLY -) \ No newline at end of file +) diff --git a/python/mrtrix3/CMakeLists.txt b/python/mrtrix3/CMakeLists.txt index 1020b218d0..5eda61b4f5 100644 --- a/python/mrtrix3/CMakeLists.txt +++ b/python/mrtrix3/CMakeLists.txt @@ -1,4 +1,5 @@ set(PYTHON_VERSION_FILE ${CMAKE_CURRENT_SOURCE_DIR}/_version.py) +set(PYTHON_CMDLIST_FILE ${CMAKE_CURRENT_SOURCE_DIR}/_commands.py) find_package(Git QUIET) @@ -35,6 +36,16 @@ execute_process( WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) +# TODO We need to generate a list of MRtrix3 commands; +# function run.command() does different things if it is executing an MRtrix3 command vs. an external command +execute_process( + COMMAND ${CMAKE_COMMAND} + -D DST=${PYTHON_CMDLIST_FILE} + -D SRC=${CMAKE_CURRENT_SOURCE_DIR}/_commands.py.in + -P ${PROJECT_SOURCE_DIR}/cmake/FindCommands.cmake + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} +) + add_custom_target(LinkPythonFiles ALL) add_custom_command( TARGET LinkPythonFiles diff --git a/python/mrtrix3/__init__.py b/python/mrtrix3/__init__.py index 2f997be509..04b3689bbb 100644 --- a/python/mrtrix3/__init__.py +++ b/python/mrtrix3/__init__.py @@ -36,11 +36,6 @@ class MRtrixError(MRtrixBaseError): #pylint: disable=unused-variable COMMAND_HISTORY_STRING += ' (version=' + __version__ + ')' -# Location of binaries that belong to the same MRtrix3 installation as the Python library being invoked -BIN_PATH = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.abspath(__file__))), os.pardir, os.pardir, 'bin')) -# Must remove the '.exe' from Windows binary executables -EXE_LIST = [ os.path.splitext(name)[0] for name in os.listdir(BIN_PATH) ] #pylint: disable=unused-variable - # 'CONFIG' is a dictionary containing those entries present in the MRtrix config files # Can add default values here that would otherwise appear in multiple locations diff --git a/python/mrtrix3/_commands.py.in b/python/mrtrix3/_commands.py.in new file mode 100644 index 0000000000..df58747212 --- /dev/null +++ b/python/mrtrix3/_commands.py.in @@ -0,0 +1,3 @@ +#pylint: disable=unused-variable +COMMAND_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, 'bin')) +COMMAND_LIST = [@MRTRIX_COMMAND_LIST@] diff --git a/python/mrtrix3/_version.py.in b/python/mrtrix3/_version.py.in index b7182e8f6f..ae97b8f57b 100644 --- a/python/mrtrix3/_version.py.in +++ b/python/mrtrix3/_version.py.in @@ -1,2 +1,2 @@ __version__ = "@MRTRIX_VERSION@" #pylint: disable=unused-variable -__tag__ = "@MRTRIX_GIT_TAG@" #pylint: disable=unused-variable \ No newline at end of file +__tag__ = "@MRTRIX_GIT_TAG@" #pylint: disable=unused-variable diff --git a/python/mrtrix3/run.py b/python/mrtrix3/run.py index e4d22ad853..4d5f125b31 100644 --- a/python/mrtrix3/run.py +++ b/python/mrtrix3/run.py @@ -14,7 +14,8 @@ # For more details, see http://www.mrtrix.org/. import collections, itertools, os, shlex, shutil, signal, string, subprocess, sys, tempfile, threading -from mrtrix3 import ANSI, BIN_PATH, COMMAND_HISTORY_STRING, EXE_LIST, MRtrixBaseError, MRtrixError +from mrtrix3 import ANSI, COMMAND_HISTORY_STRING, MRtrixBaseError, MRtrixError +from mrtrix3._commands import COMMAND_PATH, COMMAND_LIST IOStream = collections.namedtuple('IOStream', 'handle filename') @@ -338,7 +339,7 @@ def quote_nonpipe(item): cmdstack[-1].extend([ '-append_property', 'command_history', COMMAND_HISTORY_STRING ]) for line in cmdstack: - is_mrtrix_exe = line[0] in EXE_LIST + is_mrtrix_exe = line[0] in COMMAND_LIST if is_mrtrix_exe: line[0] = version_match(line[0]) if shared.get_num_threads() is not None: @@ -526,9 +527,9 @@ def exe_name(item): path = item elif item.endswith('.exe'): path = item - elif os.path.isfile(os.path.join(BIN_PATH, item)): + elif os.path.isfile(os.path.join(COMMAND_PATH, item)): path = item - elif os.path.isfile(os.path.join(BIN_PATH, item + '.exe')): + elif os.path.isfile(os.path.join(COMMAND_PATH, item + '.exe')): path = item + '.exe' elif shutil.which(item) is not None: path = item @@ -549,10 +550,10 @@ def exe_name(item): # which checks system32\ before PATH) def version_match(item): from mrtrix3 import app #pylint: disable=import-outside-toplevel - if not item in EXE_LIST: - app.debug('Command ' + item + ' not found in MRtrix3 bin/ directory') + if not item in COMMAND_LIST: + app.debug('Command ' + item + ' not a part of MRtrix3') return item - exe_path_manual = os.path.join(BIN_PATH, exe_name(item)) + exe_path_manual = os.path.join(COMMAND_PATH, exe_name(item)) if os.path.isfile(exe_path_manual): app.debug('Version-matched executable for ' + item + ': ' + exe_path_manual) return exe_path_manual From 9135a170c7aa8607c2d18a81ca5ae1d605387382 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 18 Mar 2024 11:10:49 +1100 Subject: [PATCH 11/11] Add file cmake/FindCommands.cmake File missing from 02b0a7e34e7e1bdeae353da55e0aa2f4d8825137. --- cmake/FindCommands.cmake | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 cmake/FindCommands.cmake diff --git a/cmake/FindCommands.cmake b/cmake/FindCommands.cmake new file mode 100644 index 0000000000..76bb28549d --- /dev/null +++ b/cmake/FindCommands.cmake @@ -0,0 +1,35 @@ +file( + GLOB CPP_COMMAND_FILES + ${CMAKE_SOURCE_DIR}/cmd/*.cpp +) + +file( + GLOB PYTHON_ROOT_ENTRIES + ${CMAKE_SOURCE_DIR}/python/mrtrix3/*/ +) + +set(MRTRIX_COMMAND_LIST "") +foreach(CPP_COMMAND_FILE ${CPP_COMMAND_FILES}) + get_filename_component(CPP_COMMAND_NAME ${CPP_COMMAND_FILE} NAME_WE) + if(MRTRIX_COMMAND_LIST STREQUAL "") + set(MRTRIX_COMMAND_LIST "\"${CPP_COMMAND_NAME}\"") + else() + set(MRTRIX_COMMAND_LIST "${MRTRIX_COMMAND_LIST},\n \"${CPP_COMMAND_NAME}\"") + endif() +endforeach() +foreach(PYTHON_ROOT_ENTRY ${PYTHON_ROOT_ENTRIES}) + if(IS_DIRECTORY ${PYTHON_ROOT_ENTRY}) + get_filename_component(PYTHON_COMMAND_NAME ${PYTHON_ROOT_ENTRY} NAME) + set(MRTRIX_COMMAND_LIST "${MRTRIX_COMMAND_LIST},\n \"${PYTHON_COMMAND_NAME}\"") + endif() +endforeach() +set(MRTRIX_COMMAND_PATH ${CMAKE_CURRENT_BUILD_DIR}/bin) +message(VERBOSE "Completed FindCommands() function") +message(VERBOSE "MRtrix3 executables location: ${MRTRIX_COMMAND_PATH}") +message(VERBOSE "List of MRtrix3 commands: ${MRTRIX_COMMAND_LIST}") + +configure_file( + ${SRC} + ${DST} + @ONLY +)