diff --git a/tedana/decomp/__init__.py b/tedana/decomposition/__init__.py similarity index 73% rename from tedana/decomp/__init__.py rename to tedana/decomposition/__init__.py index c32a35167..72625ca0f 100644 --- a/tedana/decomp/__init__.py +++ b/tedana/decomposition/__init__.py @@ -2,10 +2,10 @@ # ex: set sts=4 ts=4 sw=4 et: from .eigendecomp import ( - tedpca, tedica, eimask, + tedpca, tedica, ) __all__ = [ - 'tedpca', 'tedica', 'eimask' + 'tedpca', 'tedica', ] diff --git a/tedana/decomposition/_utils.py b/tedana/decomposition/_utils.py new file mode 100644 index 000000000..f4d1192e0 --- /dev/null +++ b/tedana/decomposition/_utils.py @@ -0,0 +1,46 @@ +""" +Utility functions for tedana decomposition +""" +import logging + +import numpy as np +from scipy import stats + +logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) +LGR = logging.getLogger(__name__) + +F_MAX = 500 +Z_MAX = 8 + + +def eimask(dd, ees=None): + """ + Returns mask for data between [0.001, 5] * 98th percentile of dd + + Parameters + ---------- + dd : (S x E x T) array_like + Input data, where `S` is samples, `E` is echos, and `T` is time + ees : (N,) list + Indices of echos to assess from `dd` in calculating output + + Returns + ------- + imask : (S x N) np.ndarray + Boolean array denoting + """ + + if ees is None: + ees = range(dd.shape[1]) + imask = np.zeros([dd.shape[0], len(ees)], dtype=bool) + for ee in ees: + LGR.info('++ Creating eimask for echo {}'.format(ee)) + perc98 = stats.scoreatpercentile(dd[:, ee, :].flatten(), 98, + interpolation_method='lower') + lthr, hthr = 0.001 * perc98, 5 * perc98 + LGR.info('++ Eimask threshold boundaries: ' + '{:.03f} {:.03f}'.format(lthr, hthr)) + m = dd[:, ee, :].mean(axis=1) + imask[np.logical_and(m > lthr, m < hthr), ee] = True + + return imask diff --git a/tedana/decomp/eigendecomp.py b/tedana/decomposition/eigendecomp.py similarity index 81% rename from tedana/decomp/eigendecomp.py rename to tedana/decomposition/eigendecomp.py index 9c31d8faa..d890b7ea0 100644 --- a/tedana/decomp/eigendecomp.py +++ b/tedana/decomposition/eigendecomp.py @@ -1,50 +1,24 @@ +""" +Signal decomposition methods for tedana +""" import pickle -import numpy as np +import logging import os.path as op + +import numpy as np from scipy import stats + from tedana import model, utils +from tedana.decomposition._utils import eimask +from tedana.selection._utils import (getelbow_cons, getelbow_mod) -import logging logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) -lgr = logging.getLogger(__name__) +LGR = logging.getLogger(__name__) F_MAX = 500 Z_MAX = 8 -def eimask(dd, ees=None): - """ - Returns mask for data between [0.001, 5] * 98th percentile of dd - - Parameters - ---------- - dd : (S x E x T) array_like - Input data, where `S` is samples, `E` is echos, and `T` is time - ees : (N,) list - Indices of echos to assess from `dd` in calculating output - - Returns - ------- - imask : (S x N) np.ndarray - Boolean array denoting - """ - - if ees is None: - ees = range(dd.shape[1]) - imask = np.zeros([dd.shape[0], len(ees)], dtype=bool) - for ee in ees: - lgr.info('++ Creating eimask for echo {}'.format(ee)) - perc98 = stats.scoreatpercentile(dd[:, ee, :].flatten(), 98, - interpolation_method='lower') - lthr, hthr = 0.001 * perc98, 5 * perc98 - lgr.info('++ Eimask threshold boundaries: ' - '{:.03f} {:.03f}'.format(lthr, hthr)) - m = dd[:, ee, :].mean(axis=1) - imask[np.logical_and(m > lthr, m < hthr), ee] = True - - return imask - - def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, ref_img, tes, kdaw, rdaw, ste=0, mlepca=True): """ @@ -94,13 +68,13 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, ste = np.array([int(ee) for ee in str(ste).split(',')]) if len(ste) == 1 and ste[0] == -1: - lgr.info('++ Computing PCA of optimally combined multi-echo data') + LGR.info('++ Computing PCA of optimally combined multi-echo data') d = OCcatd[utils.make_min_mask(OCcatd[:, np.newaxis, :])][:, np.newaxis, :] elif len(ste) == 1 and ste[0] == 0: - lgr.info('++ Computing PCA of spatially concatenated multi-echo data') + LGR.info('++ Computing PCA of spatially concatenated multi-echo data') d = catd[mask].astype('float64') else: - lgr.info('++ Computing PCA of echo #%s' % ','.join([str(ee) for ee in ste])) + LGR.info('++ Computing PCA of echo #%s' % ','.join([str(ee) for ee in ste])) d = np.stack([catd[mask, ee] for ee in ste - 1], axis=1).astype('float64') eim = np.squeeze(eimask(d)) @@ -123,7 +97,7 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, # actual variance explained (normalized) sp = s / s.sum() - eigelb = model.getelbow_mod(sp, val=True) + eigelb = getelbow_mod(sp, val=True) spdif = np.abs(np.diff(sp)) spdifh = spdif[(len(spdif)//2):] @@ -148,17 +122,17 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, ctb = np.vstack([ctb.T[:3], sp]).T # Save state - lgr.info('++ Saving PCA') + LGR.info('++ Saving PCA') pcastate = {'u': u, 's': s, 'v': v, 'ctb': ctb, 'eigelb': eigelb, 'spmin': spmin, 'spcum': spcum} try: with open('pcastate.pkl', 'wb') as handle: pickle.dump(pcastate, handle) except TypeError: - lgr.warning('++ Could not save PCA solution.') + LGR.warning('++ Could not save PCA solution.') else: # if loading existing state - lgr.info('++ Loading PCA') + LGR.info('++ Loading PCA') with open('pcastate.pkl', 'rb') as handle: pcastate = pickle.load(handle) u, s, v = pcastate['u'], pcastate['s'], pcastate['v'] @@ -171,19 +145,19 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, kappas = ctb[ctb[:, 1].argsort(), 1] rhos = ctb[ctb[:, 2].argsort(), 2] fmin, fmid, fmax = utils.getfbounds(n_echos) - kappa_thr = np.average(sorted([fmin, model.getelbow_mod(kappas, val=True)/2, fmid]), + kappa_thr = np.average(sorted([fmin, getelbow_mod(kappas, val=True)/2, fmid]), weights=[kdaw, 1, 1]) - rho_thr = np.average(sorted([fmin, model.getelbow_cons(rhos, val=True)/2, fmid]), + rho_thr = np.average(sorted([fmin, getelbow_cons(rhos, val=True)/2, fmid]), weights=[rdaw, 1, 1]) if int(kdaw) == -1: kappas_lim = kappas[utils.andb([kappas < fmid, kappas > fmin]) == 2] - kappa_thr = kappas_lim[model.getelbow_mod(kappas_lim)] + kappa_thr = kappas_lim[getelbow_mod(kappas_lim)] rhos_lim = rhos[utils.andb([rhos < fmid, rhos > fmin]) == 2] - rho_thr = rhos_lim[model.getelbow_mod(rhos_lim)] + rho_thr = rhos_lim[getelbow_mod(rhos_lim)] stabilize = True if int(kdaw) != -1 and int(rdaw) == -1: rhos_lim = rhos[utils.andb([rhos < fmid, rhos > fmin]) == 2] - rho_thr = rhos_lim[model.getelbow_mod(rhos_lim)] + rho_thr = rhos_lim[getelbow_mod(rhos_lim)] is_hik = np.array(ctb[:, 1] > kappa_thr, dtype=np.int) is_hir = np.array(ctb[:, 2] > rho_thr, dtype=np.int) @@ -202,7 +176,7 @@ def tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, stabilize, dd = u.dot(np.diag(s*np.array(pcsel, dtype=np.int))).dot(v) n_components = s[pcsel].shape[0] - lgr.info('++ Selected {0} components. Kappa threshold: {1:.02f}, ' + LGR.info('++ Selected {0} components. Kappa threshold: {1:.02f}, ' 'Rho threshold: {2:.02f}'.format(n_components, kappa_thr, rho_thr)) dd = stats.zscore(dd.T, axis=0).T # variance normalize timeseries diff --git a/tedana/io/__init__.py b/tedana/io/__init__.py deleted file mode 100644 index 2e47ec6b7..000000000 --- a/tedana/io/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- -# ex: set sts=4 ts=4 sw=4 et: - -from .input_data import ( - ctabsel, -) - - -from .output_data import ( - gscontrol_mmix, split_ts, write_split_ts, writefeats, - writect, writeresults, writeresults_echoes, -) - - -__all__ = [ - 'ctabsel', 'gscontrol_mmix', - 'split_ts', 'write_split_ts', - 'writefeats', 'writect', 'writeresults', - 'writeresults_echoes'] diff --git a/tedana/io/input_data.py b/tedana/io/input_data.py deleted file mode 100644 index f58773234..000000000 --- a/tedana/io/input_data.py +++ /dev/null @@ -1,28 +0,0 @@ -import numpy as np - - -def ctabsel(ctabfile): - """ - Loads a pre-existing component table file - - Parameters - ---------- - ctabfile : str - Filepath to existing component table - - Returns - ------- - ctab : (4,) tuple-of-arrays - Tuple containing arrays of (1) accepted, (2) rejected, (3) mid, and (4) - ignored components - """ - - with open(ctabfile, 'r') as src: - ctlines = src.readlines() - class_tags = ['#ACC', '#REJ', '#MID', '#IGN'] - class_dict = {} - for ii, ll in enumerate(ctlines): - for kk in class_tags: - if ll[:4] is kk and ll[4:].strip() is not '': - class_dict[kk] = ll[4:].split('#')[0].split(',') - return tuple([np.array(class_dict[kk], dtype=int) for kk in class_tags]) diff --git a/tedana/model/__init__.py b/tedana/model/__init__.py index 68ce5e7f5..63d0955de 100644 --- a/tedana/model/__init__.py +++ b/tedana/model/__init__.py @@ -4,19 +4,19 @@ from .fit import ( computefeats2, fitmodels_direct, get_coeffs, - getelbow_cons, getelbow_mod, - getelbow_aggr, gscontrol_raw, - spatclust, + spatclust, gscontrol_raw, ) +from .combine import ( + make_optcom +) -from .t2smap import ( - fit, make_optcom, t2sadmap, +from .monoexponential import ( + fit_decay, fit_decay_ts ) __all__ = [ 'computefeats2', 'fit', 'fitmodels_direct', - 'get_coeffs', 'getelbow_cons', 'getelbow_mod', - 'getelbow_aggr', 'gscontrol_raw', - 'make_optcom', 'spatclust', 't2sadmap'] + 'get_coeffs', 'make_optcom', 'spatclust', + 'fit_decay', 'fit_decay_ts'] diff --git a/tedana/model/combine.py b/tedana/model/combine.py new file mode 100644 index 000000000..88d357757 --- /dev/null +++ b/tedana/model/combine.py @@ -0,0 +1,66 @@ +""" +Functions to optimally combine data across echoes. +""" +import logging + +import numpy as np + +from tedana import utils + +logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) +LGR = logging.getLogger(__name__) + + +def make_optcom(data, t2s, tes, mask, combmode): + """ + Optimally combine BOLD data across TEs. + + Parameters + ---------- + data : (S x E x T) :obj:`numpy.ndarray` + Concatenated BOLD data. + t2 : (S,) :obj:`numpy.ndarray` + Estimated T2* values. + tes : :obj:`numpy.ndarray` + Array of TEs, in seconds. + mask : (S,) :obj:`numpy.ndarray` + Brain mask in 3D array. + combmode : :obj:`str` + How to combine data. Either 'ste' or 't2s'. + useG : :obj:`bool`, optional + Use G. Default is False. + + Returns + ------- + combined : (S x T) :obj:`numpy.ndarray` + Optimally combined data. + """ + + _, _, n_vols = data.shape + mdata = data[mask] + tes = np.array(tes)[np.newaxis] # (1 x E) array_like + + if t2s.ndim == 1: + LGR.info('++ Optimally combining data with voxel-wise T2 estimates') + ft2s = t2s[mask, np.newaxis] + else: + LGR.info('++ Optimally combining data with voxel- and volume-wise T2 ' + 'estimates') + ft2s = t2s[mask, :, np.newaxis] + + if combmode == 'ste': + alpha = mdata.mean(axis=-1) * tes + else: + alpha = tes * np.exp(-tes / ft2s) + + if t2s.ndim == 1: + alpha = np.tile(alpha[:, :, np.newaxis], (1, 1, n_vols)) + else: + alpha = np.swapaxes(alpha, 1, 2) + ax0_idx, ax2_idx = np.where(np.all(alpha == 0, axis=1)) + alpha[ax0_idx, :, ax2_idx] = 1. + + combined = np.average(mdata, axis=1, weights=alpha) + combined = utils.unmask(combined, mask) + + return combined diff --git a/tedana/model/fit.py b/tedana/model/fit.py index d19f8b152..73a1d10dd 100644 --- a/tedana/model/fit.py +++ b/tedana/model/fit.py @@ -1,62 +1,24 @@ -import nilearn.image as niimg -from nilearn.regions import connected_regions -from nilearn._utils import check_niimg +""" +Fit models. +""" +import logging + import numpy as np from scipy import stats -from tedana import model, utils from scipy.special import lpmv +import nilearn.image as niimg +from nilearn._utils import check_niimg +from nilearn.regions import connected_regions + +from tedana import model, utils -import logging logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) -lgr = logging.getLogger(__name__) +LGR = logging.getLogger(__name__) F_MAX = 500 Z_MAX = 8 -def computefeats2(data, mmix, mask, normalize=True): - """ - Converts `data` to component space using `mmix` - - Parameters - ---------- - data : (S x T) array_like - Input data - mmix : (T x C) array_like - Mixing matrix for converting input data to component space, where `C` - is components and `T` is the same as in `data` - mask : (S,) array-like - Boolean mask array - normalize : bool, optional - Whether to z-score output. Default: True - - Returns - ------- - data_Z : (S x C) np.ndarray - Data in component space - """ - - # demean masked data - data_vn = stats.zscore(data[mask], axis=-1) - - # get betas of `data`~`mmix` and limit to range [-0.999, 0.999] - data_R = get_coeffs(utils.unmask(data_vn, mask), mask, mmix)[mask] - data_R[data_R < -0.999] = -0.999 - data_R[data_R > 0.999] = 0.999 - - # R-to-Z transform - data_Z = np.arctanh(data_R) - if data_Z.ndim == 1: - data_Z = np.atleast_2d(data_Z).T - - # normalize data - if normalize: - data_Zm = stats.zscore(data_Z, axis=0) - data_Z = data_Zm + (data_Z.mean(axis=0, keepdims=True) / - data_Z.std(axis=0, keepdims=True)) - return data_Z - - def fitmodels_direct(catd, mmix, mask, t2s, t2sG, tes, combmode, ref_img, fout=None, reindex=False, mmixN=None, full_sel=True): """ @@ -217,7 +179,7 @@ def fitmodels_direct(catd, mmix, mask, t2s, t2sG, tes, combmode, ref_img, # full selection including clustering criteria seldict = None if full_sel: - lgr.info('++ Performing spatial clustering of components') + LGR.info('++ Performing spatial clustering of components') for i in range(n_components): # save out files out = np.zeros((n_samp, 4)) @@ -267,6 +229,49 @@ def fitmodels_direct(catd, mmix, mask, t2s, t2sG, tes, combmode, ref_img, return seldict, comptab, betas, mmix_new +def computefeats2(data, mmix, mask, normalize=True): + """ + Converts `data` to component space using `mmix` + + Parameters + ---------- + data : (S x T) array_like + Input data + mmix : (T x C) array_like + Mixing matrix for converting input data to component space, where `C` + is components and `T` is the same as in `data` + mask : (S,) array-like + Boolean mask array + normalize : bool, optional + Whether to z-score output. Default: True + + Returns + ------- + data_Z : (S x C) np.ndarray + Data in component space + """ + + # demean masked data + data_vn = stats.zscore(data[mask], axis=-1) + + # get betas of `data`~`mmix` and limit to range [-0.999, 0.999] + data_R = get_coeffs(utils.unmask(data_vn, mask), mask, mmix)[mask] + data_R[data_R < -0.999] = -0.999 + data_R[data_R > 0.999] = 0.999 + + # R-to-Z transform + data_Z = np.arctanh(data_R) + if data_Z.ndim == 1: + data_Z = np.atleast_2d(data_Z).T + + # normalize data + if normalize: + data_Zm = stats.zscore(data_Z, axis=0) + data_Z = data_Zm + (data_Z.mean(axis=0, keepdims=True) / + data_Z.std(axis=0, keepdims=True)) + return data_Z + + def get_coeffs(data, mask, X, add_const=False): """ Performs least-squares fit of `X` against `data` @@ -307,105 +312,6 @@ def get_coeffs(data, mask, X, add_const=False): return betas -def getelbow_cons(ks, val=False): - """ - Elbow using mean/variance method - conservative - - Parameters - ---------- - ks : array_like - val : bool, optional - Return the value of the elbow instead of the index. Default: False - - Returns - ------- - int or float - Either the elbow index (if val is True) or the values at the elbow - index (if val is False) - """ - - ks = np.sort(ks)[::-1] - nk = len(ks) - temp1 = [(ks[nk - 5 - ii - 1] > ks[nk - 5 - ii:nk].mean() + 2 * ks[nk - 5 - ii:nk].std()) - for ii in range(nk - 5)] - ds = np.array(temp1[::-1], dtype=np.int) - dsum = [] - c_ = 0 - for d_ in ds: - c_ = (c_ + d_) * d_ - dsum.append(c_) - e2 = np.argmax(np.array(dsum)) - elind = np.max([getelbow_mod(ks), e2]) - - if val: - return ks[elind] - else: - return elind - - -def getelbow_mod(ks, val=False): - """ - Elbow using linear projection method - moderate - - Parameters - ---------- - ks : array_like - val : bool, optional - Return the value of the elbow instead of the index. Default: False - - Returns - ------- - int or float - Either the elbow index (if val is True) or the values at the elbow - index (if val is False) - """ - - ks = np.sort(ks)[::-1] - n_components = ks.shape[0] - coords = np.array([np.arange(n_components), ks]) - p = coords - coords[:, 0].reshape(2, 1) - b = p[:, -1] - b_hat = np.reshape(b / np.sqrt((b ** 2).sum()), (2, 1)) - proj_p_b = p - np.dot(b_hat.T, p) * np.tile(b_hat, (1, n_components)) - d = np.sqrt((proj_p_b ** 2).sum(axis=0)) - k_min_ind = d.argmax() - - if val: - return ks[k_min_ind] - else: - return k_min_ind - - -def getelbow_aggr(ks, val=False): - """ - Elbow using curvature - aggressive - - Parameters - ---------- - ks : array_like - val : bool, optional - Default is False - - Returns - ------- - int or float - Either the elbow index (if val is True) or the values at the elbow - index (if val is False) - """ - - ks = np.sort(ks)[::-1] - dKdt = ks[:-1] - ks[1:] - dKdt2 = dKdt[:-1] - dKdt[1:] - curv = np.abs((dKdt2 / (1 + dKdt[:-1]**2.) ** (3. / 2.))) - curv[np.isnan(curv)] = -1 * 10**6 - maxcurv = np.argmax(curv) + 2 - - if val: - return(ks[maxcurv]) - else: - return maxcurv - - def gscontrol_raw(catd, optcom, n_echos, ref_img, dtrank=4): """ Removes global signal from individual echo `catd` and `optcom` time series @@ -438,7 +344,7 @@ def gscontrol_raw(catd, optcom, n_echos, ref_img, dtrank=4): Input `optcom` with global signal removed from time series """ - lgr.info('++ Applying amplitude-based T1 equilibration correction') + LGR.info('++ Applying amplitude-based T1 equilibration correction') # Legendre polynomial basis for denoising bounds = np.linspace(-1, 1, optcom.shape[-1]) diff --git a/tedana/model/t2smap.py b/tedana/model/monoexponential.py similarity index 74% rename from tedana/model/t2smap.py rename to tedana/model/monoexponential.py index ced02fe2d..6a52d2a1e 100644 --- a/tedana/model/t2smap.py +++ b/tedana/model/monoexponential.py @@ -1,129 +1,20 @@ -import numpy as np -from tedana import utils - +""" +Functions to estimate S0 and T2* from multi-echo data. +""" import logging -logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) -lgr = logging.getLogger(__name__) - - -def fit(data, mask, tes, masksum, start_echo): - """ - Fit voxel- and timepoint-wise monoexponential decay models to estimate - T2* and S0 timeseries. - """ - nx, ny, nz, n_echos, n_trs = data.shape - echodata = data[mask] - tes = np.array(tes) - - t2sa_ts = np.zeros([nx, ny, nz, n_trs]) - s0va_ts = np.zeros([nx, ny, nz, n_trs]) - t2saf_ts = np.zeros([nx, ny, nz, n_trs]) - s0vaf_ts = np.zeros([nx, ny, nz, n_trs]) - for vol in range(echodata.shape[-1]): - t2ss = np.zeros([nx, ny, nz, n_echos - 1]) - s0vs = t2ss.copy() - # Fit monoexponential decay first for first echo only, - # then first two echoes, etc. - for i_echo in range(start_echo, n_echos + 1): - B = np.abs(echodata[:, :i_echo, vol]) + 1 - B = np.log(B).transpose() - neg_tes = -1 * tes[:i_echo] - - # First row is constant, second is TEs for decay curve - # Independent variables for least-squares model - x = np.array([np.ones(i_echo), neg_tes]) - X = np.sort(x)[:, ::-1].transpose() - - beta, _, _, _ = np.linalg.lstsq(X, B) - t2s = 1. / beta[1, :].transpose() - s0 = np.exp(beta[0, :]).transpose() - - t2s[np.isinf(t2s)] = 500. - s0[np.isnan(s0)] = 0. - - t2ss[:, :, :, i_echo-2] = np.squeeze(utils.unmask(t2s, mask)) - s0vs[:, :, :, i_echo-2] = np.squeeze(utils.unmask(s0, mask)) - - # Limited T2* and S0 maps - fl = np.zeros([nx, ny, nz, len(tes)-1], bool) - for i_echo in range(n_echos - 1): - fl_ = np.squeeze(fl[:, :, :, i_echo]) - fl_[masksum == i_echo + 2] = True - fl[:, :, :, i_echo] = fl_ - t2sa = np.squeeze(utils.unmask(t2ss[fl], masksum > 1)) - s0va = np.squeeze(utils.unmask(s0vs[fl], masksum > 1)) - - # Full T2* maps with S0 estimation errors - t2saf = t2sa.copy() - s0vaf = s0va.copy() - t2saf[masksum == 1] = t2ss[masksum == 1, 0] - s0vaf[masksum == 1] = s0vs[masksum == 1, 0] - - t2sa_ts[:, :, :, vol] = t2sa - s0va_ts[:, :, :, vol] = s0va - t2saf_ts[:, :, :, vol] = t2saf - s0vaf_ts[:, :, :, vol] = s0vaf - - return t2sa_ts, s0va_ts, t2saf_ts, s0vaf_ts +import numpy as np +from tedana import utils -def make_optcom(data, t2s, tes, mask, combmode): - """ - Optimally combine BOLD data across TEs. +logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) +LGR = logging.getLogger(__name__) - Parameters - ---------- - data : (S x E x T) :obj:`numpy.ndarray` - Concatenated BOLD data. - t2 : (S,) :obj:`numpy.ndarray` - Estimated T2* values. - tes : :obj:`numpy.ndarray` - Array of TEs, in seconds. - mask : (S,) :obj:`numpy.ndarray` - Brain mask in 3D array. - combmode : :obj:`str` - How to combine data. Either 'ste' or 't2s'. - useG : :obj:`bool`, optional - Use G. Default is False. - Returns - ------- - fout : (S x T) :obj:`numpy.ndarray` - Optimally combined data. +def fit_decay(data, tes, mask, masksum, start_echo): """ + Fit voxel-wise monoexponential decay models to estimate T2* and S0 maps. - n_samp, n_echos, n_vols = data.shape - mdata = data[mask] - tes = np.array(tes)[np.newaxis] # (1 x E) array_like - - if t2s.ndim == 1: - lgr.info('++ Optimally combining data with voxel-wise T2 estimates') - ft2s = t2s[mask, np.newaxis] - else: - lgr.info('++ Optimally combining data with voxel- and volume-wise T2 estimates') - ft2s = t2s[mask, :, np.newaxis] - - if combmode == 'ste': - alpha = mdata.mean(axis=-1) * tes - else: - alpha = tes * np.exp(-tes / ft2s) - - if t2s.ndim == 1: - alpha = np.tile(alpha[:, :, np.newaxis], (1, 1, n_vols)) - else: - alpha = np.swapaxes(alpha, 1, 2) - ax0_idx, ax2_idx = np.where(np.all(alpha == 0, axis=1)) - alpha[ax0_idx, :, ax2_idx] = 1. - - fout = np.average(mdata, axis=1, weights=alpha) - fout = utils.unmask(fout, mask) - - return fout - - -def t2sadmap(data, tes, mask, masksum, start_echo): - """ Parameters ---------- data : (S x E x T) array_like @@ -193,3 +84,65 @@ def t2sadmap(data, tes, mask, masksum, start_echo): s0vaf[masksum == 1] = s0vs[masksum == 1, 0] return t2sa, s0va, t2ss, s0vs, t2saf, s0vaf + + +def fit_decay_ts(data, mask, tes, masksum, start_echo): + """ + Fit voxel- and timepoint-wise monoexponential decay models to estimate + T2* and S0 timeseries. + """ + nx, ny, nz, n_echos, n_trs = data.shape + echodata = data[mask] + tes = np.array(tes) + + t2sa_ts = np.zeros([nx, ny, nz, n_trs]) + s0va_ts = np.zeros([nx, ny, nz, n_trs]) + t2saf_ts = np.zeros([nx, ny, nz, n_trs]) + s0vaf_ts = np.zeros([nx, ny, nz, n_trs]) + + for vol in range(echodata.shape[-1]): + t2ss = np.zeros([nx, ny, nz, n_echos - 1]) + s0vs = t2ss.copy() + # Fit monoexponential decay first for first echo only, + # then first two echoes, etc. + for i_echo in range(start_echo, n_echos + 1): + B = np.abs(echodata[:, :i_echo, vol]) + 1 + B = np.log(B).transpose() + neg_tes = -1 * tes[:i_echo] + + # First row is constant, second is TEs for decay curve + # Independent variables for least-squares model + x = np.array([np.ones(i_echo), neg_tes]) + X = np.sort(x)[:, ::-1].transpose() + + beta, _, _, _ = np.linalg.lstsq(X, B) + t2s = 1. / beta[1, :].transpose() + s0 = np.exp(beta[0, :]).transpose() + + t2s[np.isinf(t2s)] = 500. + s0[np.isnan(s0)] = 0. + + t2ss[:, :, :, i_echo-2] = np.squeeze(utils.unmask(t2s, mask)) + s0vs[:, :, :, i_echo-2] = np.squeeze(utils.unmask(s0, mask)) + + # Limited T2* and S0 maps + fl = np.zeros([nx, ny, nz, len(tes)-1], bool) + for i_echo in range(n_echos - 1): + fl_ = np.squeeze(fl[:, :, :, i_echo]) + fl_[masksum == i_echo + 2] = True + fl[:, :, :, i_echo] = fl_ + t2sa = np.squeeze(utils.unmask(t2ss[fl], masksum > 1)) + s0va = np.squeeze(utils.unmask(s0vs[fl], masksum > 1)) + + # Full T2* maps with S0 estimation errors + t2saf = t2sa.copy() + s0vaf = s0va.copy() + t2saf[masksum == 1] = t2ss[masksum == 1, 0] + s0vaf[masksum == 1] = s0vs[masksum == 1, 0] + + t2sa_ts[:, :, :, vol] = t2sa + s0va_ts[:, :, :, vol] = s0va + t2saf_ts[:, :, :, vol] = t2saf + s0vaf_ts[:, :, :, vol] = s0vaf + + return t2sa_ts, s0va_ts, t2saf_ts, s0vaf_ts diff --git a/tedana/select/__init__.py b/tedana/selection/__init__.py similarity index 100% rename from tedana/select/__init__.py rename to tedana/selection/__init__.py diff --git a/tedana/selection/_utils.py b/tedana/selection/_utils.py new file mode 100644 index 000000000..f86473c11 --- /dev/null +++ b/tedana/selection/_utils.py @@ -0,0 +1,153 @@ +""" +Utility functions for tedana.select +""" +import logging + +import numpy as np +from sklearn import svm + +logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) +LGR = logging.getLogger(__name__) + +F_MAX = 500 +Z_MAX = 8 + + +def do_svm(X_train, y_train, X_test, svmtype=0): + """ + Implements Support Vector Classification on provided data + + Parameters + ---------- + X_train : (N1 x F) array_like + Training vectors, where n_samples is the number of samples in the + training dataset and n_features is the number of features. + y_train : (N1,) array_like + Target values (class labels in classification, real numbers in + regression) + X_test : (N2 x F) array_like + Test vectors, where n_samples is the number of samples in the test + dataset and n_features is the number of features. + svmtype : int, optional + Desired support vector machine type. Must be in [0, 1, 2]. Default: 0 + + Returns + ------- + y_pred : (N2,) np.ndarray + Predicted class labels for samples in `X_test` + clf : {:obj:`sklearn.svm.classes.SVC`, :obj:`sklearn.svm.classes.LinearSVC`} + Trained sklearn model instance + """ + + if svmtype == 0: + clf = svm.SVC(kernel='linear') + elif svmtype == 1: + clf = svm.LinearSVC(loss='squared_hinge', penalty='l1', dual=False) + elif svmtype == 2: + clf = svm.SVC(kernel='linear', probability=True) + else: + raise ValueError('Input svmtype not in [0, 1, 2]: {}'.format(svmtype)) + + clf.fit(X_train, y_train) + y_pred = clf.predict(X_test) + + return y_pred, clf + + +def getelbow_cons(ks, val=False): + """ + Elbow using mean/variance method - conservative + + Parameters + ---------- + ks : array_like + val : bool, optional + Return the value of the elbow instead of the index. Default: False + + Returns + ------- + int or float + Either the elbow index (if val is True) or the values at the elbow + index (if val is False) + """ + + ks = np.sort(ks)[::-1] + nk = len(ks) + temp1 = [(ks[nk - 5 - ii - 1] > ks[nk - 5 - ii:nk].mean() + 2 * ks[nk - 5 - ii:nk].std()) + for ii in range(nk - 5)] + ds = np.array(temp1[::-1], dtype=np.int) + dsum = [] + c_ = 0 + for d_ in ds: + c_ = (c_ + d_) * d_ + dsum.append(c_) + e2 = np.argmax(np.array(dsum)) + elind = np.max([getelbow_mod(ks), e2]) + + if val: + return ks[elind] + else: + return elind + + +def getelbow_mod(ks, val=False): + """ + Elbow using linear projection method - moderate + + Parameters + ---------- + ks : array_like + val : bool, optional + Return the value of the elbow instead of the index. Default: False + + Returns + ------- + int or float + Either the elbow index (if val is True) or the values at the elbow + index (if val is False) + """ + + ks = np.sort(ks)[::-1] + n_components = ks.shape[0] + coords = np.array([np.arange(n_components), ks]) + p = coords - coords[:, 0].reshape(2, 1) + b = p[:, -1] + b_hat = np.reshape(b / np.sqrt((b ** 2).sum()), (2, 1)) + proj_p_b = p - np.dot(b_hat.T, p) * np.tile(b_hat, (1, n_components)) + d = np.sqrt((proj_p_b ** 2).sum(axis=0)) + k_min_ind = d.argmax() + + if val: + return ks[k_min_ind] + else: + return k_min_ind + + +def getelbow_aggr(ks, val=False): + """ + Elbow using curvature - aggressive + + Parameters + ---------- + ks : array_like + val : bool, optional + Default is False + + Returns + ------- + int or float + Either the elbow index (if val is True) or the values at the elbow + index (if val is False) + """ + + ks = np.sort(ks)[::-1] + dKdt = ks[:-1] - ks[1:] + dKdt2 = dKdt[:-1] - dKdt[1:] + curv = np.abs((dKdt2 / (1 + dKdt[:-1]**2.) ** (3. / 2.))) + curv[np.isnan(curv)] = -1 * 10**6 + maxcurv = np.argmax(curv) + 2 + + if val: + return(ks[maxcurv]) + else: + return maxcurv diff --git a/tedana/select/select_comps.py b/tedana/selection/select_comps.py similarity index 87% rename from tedana/select/select_comps.py rename to tedana/selection/select_comps.py index 9c8a0bd86..65c9836ec 100644 --- a/tedana/select/select_comps.py +++ b/tedana/selection/select_comps.py @@ -1,59 +1,24 @@ +""" +Functions to identify TE-dependent and TE-independent components. +""" import json import pickle +import logging + import numpy as np from scipy import stats -from sklearn import svm from sklearn.cluster import DBSCAN -from tedana import model, utils - -import logging -logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) -lgr = logging.getLogger(__name__) - - -def do_svm(X_train, y_train, X_test, svmtype=0): - """ - Implements Support Vector Classification on provided data - - Parameters - ---------- - X_train : (N1 x F) array_like - Training vectors, where n_samples is the number of samples in the - training dataset and n_features is the number of features. - y_train : (N1,) array_like - Target values (class labels in classification, real numbers in - regression) - X_test : (N2 x F) array_like - Test vectors, where n_samples is the number of samples in the test - dataset and n_features is the number of features. - svmtype : int, optional - Desired support vector machine type. Must be in [0, 1, 2]. Default: 0 - Returns - ------- - y_pred : (N2,) np.ndarray - Predicted class labels for samples in `X_test` - clf : {:obj:`sklearn.svm.classes.SVC`, :obj:`sklearn.svm.classes.LinearSVC`} - Trained sklearn model instance - """ +from tedana import utils +from tedana.selection._utils import (getelbow_cons, getelbow_mod, + getelbow_aggr, do_svm) - if svmtype == 0: - clf = svm.SVC(kernel='linear') - elif svmtype == 1: - clf = svm.LinearSVC(loss='squared_hinge', penalty='l1', dual=False) - elif svmtype == 2: - clf = svm.SVC(kernel='linear', probability=True) - else: - raise ValueError('Input svmtype not in [0, 1, 2]: {}'.format(svmtype)) - - clf.fit(X_train, y_train) - y_pred = clf.predict(X_test) - - return y_pred, clf +logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) +LGR = logging.getLogger(__name__) -def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, oversion=99, - filecsdata=True, savecsdiag=True, strict_mode=False): +def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, + oversion=99, filecsdata=True, savecsdiag=True, strict_mode=False): """ Labels components in `mmix` @@ -99,7 +64,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o import bz2 if seldict is not None: - lgr.info('++ Saving component selection data') + LGR.info('++ Saving component selection data') with bz2.BZ2File('compseldata.pklbz', 'wb') as csstate_f: pickle.dump(seldict, csstate_f) else: @@ -107,7 +72,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o with bz2.BZ2File('compseldata.pklbz', 'rb') as csstate_f: seldict = pickle.load(csstate_f) except FileNotFoundError: - lgr.warning('++ Failed to load component selection data') + LGR.warning('++ Failed to load component selection data') return None # List of components @@ -243,32 +208,32 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o Rhos_sorted = np.array(sorted(seldict['Rhos']))[::-1] # Make an initial guess as to number of good components based on # consensus of control points across Rhos and Kappas - KRcutguesses = [model.getelbow_mod(seldict['Rhos']), model.getelbow_cons(seldict['Rhos']), - model.getelbow_aggr(seldict['Rhos']), model.getelbow_mod(seldict['Kappas']), - model.getelbow_cons(seldict['Kappas']), model.getelbow_aggr(seldict['Kappas'])] - Khighelbowval = stats.scoreatpercentile([model.getelbow_mod(seldict['Kappas'], val=True), - model.getelbow_cons(seldict['Kappas'], val=True), - model.getelbow_aggr(seldict['Kappas'], val=True)] + + KRcutguesses = [getelbow_mod(seldict['Rhos']), getelbow_cons(seldict['Rhos']), + getelbow_aggr(seldict['Rhos']), getelbow_mod(seldict['Kappas']), + getelbow_cons(seldict['Kappas']), getelbow_aggr(seldict['Kappas'])] + Khighelbowval = stats.scoreatpercentile([getelbow_mod(seldict['Kappas'], val=True), + getelbow_cons(seldict['Kappas'], val=True), + getelbow_aggr(seldict['Kappas'], val=True)] + list(utils.getfbounds(n_echos)), 75, interpolation_method='lower') KRcut = np.median(KRcutguesses) # only use exclusive when inclusive is extremely inclusive - double KRcut - cond1 = model.getelbow_cons(seldict['Kappas']) > KRcut * 2 - cond2 = model.getelbow_mod(seldict['Kappas'], val=True) < F01 + cond1 = getelbow_cons(seldict['Kappas']) > KRcut * 2 + cond2 = getelbow_mod(seldict['Kappas'], val=True) < F01 if cond1 and cond2: - Kcut = model.getelbow_mod(seldict['Kappas'], val=True) + Kcut = getelbow_mod(seldict['Kappas'], val=True) else: - Kcut = model.getelbow_cons(seldict['Kappas'], val=True) + Kcut = getelbow_cons(seldict['Kappas'], val=True) # only use inclusive when exclusive is extremely exclusive - half KRcut # (remember for Rho inclusive is higher, so want both Kappa and Rho # to defaut to lower) - if model.getelbow_cons(seldict['Rhos']) > KRcut * 2: - Rcut = model.getelbow_mod(seldict['Rhos'], val=True) + if getelbow_cons(seldict['Rhos']) > KRcut * 2: + Rcut = getelbow_mod(seldict['Rhos'], val=True) # for above, consider something like: - # min([model.getelbow_mod(Rhos,True),sorted(Rhos)[::-1][KRguess] ]) + # min([getelbow_mod(Rhos,True),sorted(Rhos)[::-1][KRguess] ]) else: - Rcut = model.getelbow_cons(seldict['Rhos'], val=True) + Rcut = getelbow_cons(seldict['Rhos'], val=True) if Rcut > Kcut: Kcut = Rcut # Rcut should never be higher than Kcut KRelbow = utils.andb([seldict['Kappas'] > Kcut, seldict['Rhos'] < Rcut]) @@ -304,9 +269,9 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o if cond1 and cond2 and cond3 and cond4: epsmap.append([ii, utils.dice(guessmask, db.labels_ == 0), np.intersect1d(nc[db.labels_ == 0], - nc[seldict['Rhos'] > model.getelbow_mod(Rhos_sorted, - val=True)]).shape[0]]) - lgr.debug('++ Found solution', ii, db.labels_) + nc[seldict['Rhos'] > getelbow_mod(Rhos_sorted, + val=True)]).shape[0]]) + LGR.debug('++ Found solution', ii, db.labels_) db = None epsmap = np.array(epsmap) @@ -316,7 +281,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o # Select index that maximizes Dice with guessmask but first # minimizes number of higher Rho components ii = int(epsmap[np.argmax(epsmap[epsmap[:, 2] == np.min(epsmap[:, 2]), 1], 0), 0]) - lgr.info('++ Component selection tuning: {:.05f}'.format(epsmap[:, 1].max())) + LGR.info('++ Component selection tuning: {:.05f}'.format(epsmap[:, 1].max())) db = DBSCAN(eps=.005+ii*.005, min_samples=3).fit(fz.T) ncl = nc[db.labels_ == 0] ncl = np.setdiff1d(ncl, rej) @@ -326,7 +291,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o to_clf = np.setdiff1d(nc, np.union1d(ncl, rej)) if len(group0) == 0 or len(group0) < len(KRguess) * .5: dbscanfailed = True - lgr.info('++ DBSCAN based guess failed. Using elbow guess method.') + LGR.info('++ DBSCAN based guess failed. Using elbow guess method.') ncl = np.setdiff1d(np.setdiff1d(nc[KRelbow == 2], rej), np.union1d(nc[tt_table[:, 0] < tt_lim], np.union1d(np.union1d(nc[spz > 1], @@ -338,7 +303,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o group_n1 = [] to_clf = np.setdiff1d(nc, np.union1d(group0, rej)) if len(group0) < 2 or (len(group0) < 4 and float(len(rej))/len(group0) > 3): - lgr.warning('++ Extremely limited reliable BOLD signal space. ' + LGR.warning('++ Extremely limited reliable BOLD signal space. ' 'Not filtering further into midk etc.') midkfailed = True min_acc = np.array([]) @@ -482,7 +447,7 @@ def selcomps(seldict, mmix, mask, ref_img, manacc, n_echos, t2s, s0, olevel=2, o to_ign = [] - minK_ign = np.max([F05, model.getelbow_cons(seldict['Kappas'], val=True)]) + minK_ign = np.max([F05, getelbow_cons(seldict['Kappas'], val=True)]) newcest = len(group0) + len(toacc_hi[seldict['Kappas'][toacc_hi] > minK_ign]) phys_art = np.setdiff1d(nc[utils.andb([phys_var_z > 3.5, seldict['Kappas'] < minK_ign]) == 2], group0) diff --git a/tedana/utils/__init__.py b/tedana/utils/__init__.py index 914619181..07e5f1d4c 100644 --- a/tedana/utils/__init__.py +++ b/tedana/utils/__init__.py @@ -10,9 +10,20 @@ ) +from .io import ( + gscontrol_mmix, split_ts, write_split_ts, writefeats, + writect, writeresults, writeresults_echoes, ctabsel, +) + + __all__ = [ 'load_image', 'load_data', 'get_dtype', 'getfbounds', 'make_min_mask', 'make_adaptive_mask', 'unmask', 'filewrite', 'new_nii_like', - 'fitgaussian', 'dice', 'andb'] + 'fitgaussian', 'dice', 'andb', + 'ctabsel', 'gscontrol_mmix', + 'split_ts', 'write_split_ts', + 'writefeats', 'writect', 'writeresults', + 'writeresults_echoes', + ] diff --git a/tedana/io/output_data.py b/tedana/utils/io.py similarity index 94% rename from tedana/io/output_data.py rename to tedana/utils/io.py index 3d68f9783..590e61a9a 100644 --- a/tedana/io/output_data.py +++ b/tedana/utils/io.py @@ -1,3 +1,6 @@ +""" +Functions to handle file input/output +""" import textwrap import numpy as np import os.path as op @@ -349,3 +352,30 @@ def writeresults_echoes(catd, mmix, mask, acc, rej, midk, ref_img): lgr.info('++ Writing Kappa-filtered echo #{:01d} timeseries'.format(i_echo+1)) write_split_ts(catd[:, i_echo, :], mmix, mask, acc, rej, midk, ref_img, suffix='e%i' % (i_echo+1)) + + +def ctabsel(ctabfile): + """ + Loads a pre-existing component table file + + Parameters + ---------- + ctabfile : str + Filepath to existing component table + + Returns + ------- + ctab : (4,) tuple-of-arrays + Tuple containing arrays of (1) accepted, (2) rejected, (3) mid, and (4) + ignored components + """ + + with open(ctabfile, 'r') as src: + ctlines = src.readlines() + class_tags = ['#ACC', '#REJ', '#MID', '#IGN'] + class_dict = {} + for ii, ll in enumerate(ctlines): + for kk in class_tags: + if ll[:4] is kk and ll[4:].strip() is not '': + class_dict[kk] = ll[4:].split('#')[0].split(',') + return tuple([np.array(class_dict[kk], dtype=int) for kk in class_tags]) diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index 9ab4859d6..ac1f5d82c 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -36,7 +36,7 @@ def main(options): utils.filewrite(masksum, 'masksum%s' % suf, ref_img, copy_header=False) lgr.info("++ Computing Adaptive T2* map") - t2s, s0, t2ss, s0vs, t2saf, s0vaf = model.t2sadmap(catd, tes, mask, masksum, 2) + t2s, s0, t2ss, s0vs, t2saf, s0vaf = model.fit_decay(catd, tes, mask, masksum, 2) utils.filewrite(t2ss, 't2ss%s' % suf, ref_img, copy_header=False) utils.filewrite(s0vs, 's0vs%s' % suf, ref_img, copy_header=False) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 513bfd854..14bd8479c 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -3,7 +3,7 @@ import numpy as np import os.path as op from scipy import stats -from tedana import (decomp, io, model, select, utils) +from tedana import (decomposition, model, selection, utils) import logging logging.basicConfig(format='[%(levelname)s]: %(message)s', level=logging.INFO) @@ -122,8 +122,8 @@ def main(data, tes, mixm=None, ctab=None, manacc=None, strict=False, mask, masksum = utils.make_adaptive_mask(catd, minimum=False, getsum=True) lgr.info('++ Computing T2* map') - t2s, s0, t2ss, s0s, t2sG, s0G = model.t2sadmap(catd, tes, mask, masksum, - start_echo=1) + t2s, s0, t2ss, s0s, t2sG, s0G = model.fit_decay(catd, tes, mask, masksum, + start_echo=1) # set a hard cap for the T2* map # anything that is 10x higher than the 99.5 %ile will be reset to 99.5 %ile @@ -146,11 +146,11 @@ def main(data, tes, mixm=None, ctab=None, manacc=None, strict=False, if mixm is None: lgr.info("++ Doing ME-PCA and ME-ICA") - n_components, dd = decomp.tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, - stabilize, ref_img, - tes=tes, kdaw=kdaw, rdaw=rdaw, ste=ste) - mmix_orig = decomp.tedica(n_components, dd, conv, fixed_seed, cost=initcost, - final_cost=finalcost) + n_components, dd = decomposition.tedpca(catd, OCcatd, combmode, mask, t2s, t2sG, + stabilize, ref_img, + tes=tes, kdaw=kdaw, rdaw=rdaw, ste=ste) + mmix_orig = decomposition.tedica(n_components, dd, conv, fixed_seed, cost=initcost, + final_cost=finalcost) np.savetxt(op.join(out_dir, '__meica_mix.1D'), mmix_orig) seldict, comptable, betas, mmix = model.fitmodels_direct(catd, mmix_orig, mask, t2s, t2sG, @@ -160,9 +160,9 @@ def main(data, tes, mixm=None, ctab=None, manacc=None, strict=False, reindex=True) np.savetxt(op.join(out_dir, 'meica_mix.1D'), mmix) - acc, rej, midk, empty = select.selcomps(seldict, mmix, mask, ref_img, manacc, - n_echos, t2s, s0, strict_mode=strict, - filecsdata=filecsdata) + acc, rej, midk, empty = selection.selcomps(seldict, mmix, mask, ref_img, manacc, + n_echos, t2s, s0, strict_mode=strict, + filecsdata=filecsdata) else: mmix_orig = np.loadtxt(op.join(out_dir, 'meica_mix.1D')) seldict, comptable, betas, mmix = model.fitmodels_direct(catd, mmix_orig, @@ -171,17 +171,17 @@ def main(data, tes, mixm=None, ctab=None, manacc=None, strict=False, ref_img, fout=fout) if ctab is None: - acc, rej, midk, empty = select.selcomps(seldict, mmix, mask, ref_img, manacc, - n_echos, t2s, s0, - filecsdata=filecsdata, - strict_mode=strict) + acc, rej, midk, empty = selection.selcomps(seldict, mmix, mask, ref_img, manacc, + n_echos, t2s, s0, + filecsdata=filecsdata, + strict_mode=strict) else: - acc, rej, midk, empty = io.ctabsel(ctab) + acc, rej, midk, empty = utils.ctabsel(ctab) if len(acc) == 0: lgr.warning('++ No BOLD components detected!!! Please check data and results!') - io.writeresults(OCcatd, mask, comptable, mmix, n_vols, acc, rej, midk, empty, ref_img) - io.gscontrol_mmix(OCcatd, mmix, mask, acc, rej, midk, ref_img) + utils.writeresults(OCcatd, mask, comptable, mmix, n_vols, acc, rej, midk, empty, ref_img) + utils.gscontrol_mmix(OCcatd, mmix, mask, acc, rej, midk, ref_img) if dne: - io.writeresults_echoes(catd, mmix, mask, acc, rej, midk, ref_img) + utils.writeresults_echoes(catd, mmix, mask, acc, rej, midk, ref_img)