diff --git a/.codecov.yml b/.codecov.yml
index 008ae097a..d5fe2b824 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -16,5 +16,4 @@ coverage:
ignore:
- "tedana/tests/"
- - "tedana/due.py"
- "tedana/_version.py"
diff --git a/docs/api.rst b/docs/api.rst
index 1cc3bf7eb..dfe600209 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -198,6 +198,30 @@ API
tedana.stats.getfbounds
+.. _api_bibtex_ref:
+
+*********************************************************
+:mod:`tedana.bibtex`: Tools for working with BibTeX files
+*********************************************************
+
+.. automodule:: tedana.bibtex
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: tedana.bibtex
+
+.. autosummary::
+ :toctree: generated/
+ :template: function.rst
+
+ tedana.bibtex.find_braces
+ tedana.bibtex.reduce_idx
+ tedana.bibtex.index_bibtex_identifiers
+ tedana.bibtex.find_citations
+ tedana.bibtex.reduce_references
+ tedana.bibtex.get_description_references
+
+
.. _api_utils_ref:
**************************************
diff --git a/docs/conf.py b/docs/conf.py
index 856a557b6..44aa32495 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,6 +20,8 @@
import os
import sys
+import sphinx_rtd_theme
+
sys.path.insert(0, os.path.abspath("sphinxext"))
sys.path.insert(0, os.path.abspath(os.path.pardir))
@@ -50,6 +52,7 @@
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinxarg.ext",
+ "sphinxcontrib.bibtex", # for foot-citations
]
import sphinx
@@ -127,8 +130,6 @@
# a list of builtin themes.
#
# installing theme package
-import sphinx_rtd_theme
-
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
@@ -153,6 +154,13 @@ def setup(app):
html_favicon = "_static/tedana_favicon.png"
+# -----------------------------------------------------------------------------
+# sphinxcontrib-bibtex
+# -----------------------------------------------------------------------------
+bibtex_bibfiles = ["../tedana/resources/references.bib"]
+bibtex_style = "unsrt"
+bibtex_reference_style = "author_year"
+bibtex_footbibliography_header = ""
# -- Options for HTMLHelp output ------------------------------------------
diff --git a/docs/faq.rst b/docs/faq.rst
index cc6dceff0..72ef2b439 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -97,18 +97,6 @@ the v3.2 code, with the goal of revisiting it when ``tedana`` is more stable.
Anyone interested in using v3.2 may compile and install an earlier release (<=0.0.4) of ``tedana``.
-*************************************************
-[tedana] What is the warning about ``duecredit``?
-*************************************************
-
-``duecredit`` is a python package that is used, but not required by ``tedana``.
-These warnings do not affect any of the processing within the ``tedana``.
-To avoid this warning, you can install ``duecredit`` with ``pip install duecredit``.
-For more information about ``duecredit`` and concerns about
-the citation and visibility of software or methods, visit the `duecredit`_ GitHub repository.
-
-.. _duecredit: https://github.com/duecredit/duecredit
-
.. _here: https://bitbucket.org/prantikk/me-ica/commits/906bd1f6db7041f88cd0efcac8a74074d673f4f5
.. _NeuroStars: https://neurostars.org
diff --git a/docs/index.rst b/docs/index.rst
index 491d8d874..9a0a43508 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -112,9 +112,9 @@ When using tedana, please include the following citations:
}
- tedana
- This link is for the most recent version of the code and that page has links to DOIs
- for older versions. To support reproducibility, please cite the version you used:
+ tedana
+ This link is for the most recent version of the code and that page has links to DOIs
+ for older versions. To support reproducibility, please cite the version you used:
https://doi.org/10.5281/zenodo.1250561
@@ -143,19 +143,12 @@ When using tedana, please include the following citations:
Proceedings of the National Academy of Sciences, 110, 16187-16192.
-Alternatively, you can automatically compile relevant citations by running your
-tedana code with `duecredit`_. For example, if you plan to run a script using
-tedana (in this case, ``tedana_script.py``):
-
-.. code-block:: bash
-
- python -m duecredit tedana_script.py
+Alternatively, you can use the text and citations produced by the tedana workflow.
You can also learn more about `why citing software is important`_.
.. _Differentiating BOLD and non-BOLD signals in fMRI time series using multi-echo EPI.: https://doi.org/10.1016/j.neuroimage.2011.12.028
.. _Integrated strategy for improving functional connectivity mapping using multiecho fMRI.: https://doi.org/10.1073/pnas.1301725110
-.. _duecredit: https://github.com/duecredit/duecredit
.. _`why citing software is important`: https://www.software.ac.uk/how-cite-software
diff --git a/docs/installation.rst b/docs/installation.rst
index c65c654f3..4cba104c9 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -13,9 +13,6 @@ packages will need to be installed:
- scipy
- mapca
-You can also install several optional dependencies, notably ``duecredit``.
-Please see the :doc:`FAQ ` for more information on how tedana uses ``duecredit``.
-
After installing relevant dependencies, you can then install ``tedana`` with:
.. code-block:: bash
diff --git a/docs/outputs.rst b/docs/outputs.rst
index 6db20f8bf..797926134 100644
--- a/docs/outputs.rst
+++ b/docs/outputs.rst
@@ -71,6 +71,8 @@ desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient f
desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps
report.txt A summary report for the workflow with relevant
citations.
+references.bib The BibTeX entries for references cited in
+ report.txt.
tedana_report.html The interactive HTML report.
================================================ =====================================================
@@ -400,28 +402,129 @@ The report is saved in a plain-text file, report.txt, in the output directory.
An example report
- TE-dependence analysis was performed on input data. An initial mask was generated from the first echo using nilearn's compute_epi_mask function. An adaptive mask was then generated, in which each voxel's value reflects the number of echoes with 'good' data. A monoexponential model was fit to the data at each voxel using nonlinear model fitting in order to estimate T2* and S0 maps, using T2*/S0 estimates from a log-linear fit as initial values. For each voxel, the value from the adaptive mask was used to determine which echoes would be used to estimate T2* and S0. In cases of model fit failure, T2*/S0 estimates from the log-linear fit were retained instead. Multi-echo data were then optimally combined using the T2* combination method (Posse et al., 1999). Principal component analysis in which the number of components was determined based on a variance explained threshold was applied to the optimally combined data for dimensionality reduction. A series of TE-dependence metrics were calculated for each component, including Kappa, Rho, and variance explained. Independent component analysis was then used to decompose the dimensionally reduced dataset. A series of TE-dependence metrics were calculated for each component, including Kappa, Rho, and variance explained. Next, component selection was performed to identify BOLD (TE-dependent), non-BOLD (TE-independent), and uncertain (low-variance) components using the Kundu decision tree (v2.5; Kundu et al., 2013). Rejected components' time series were then orthogonalized with respect to accepted components' time series.
+ .. note::
- This workflow used numpy (Van Der Walt, Colbert, & Varoquaux, 2011), scipy (Jones et al., 2001), pandas (McKinney, 2010), scikit-learn (Pedregosa et al., 2011), nilearn, and nibabel (Brett et al., 2019).
+ The boilerplate text includes citations in LaTeX format.
+ \\citep refers to parenthetical citations, while \\cite refers to textual ones.
- This workflow also used the Dice similarity index (Dice, 1945; Sørensen, 1948).
+ TE-dependence analysis was performed on input data using the tedana workflow \\citep{dupre2021te}.
+ An adaptive mask was then generated, in which each voxel's value reflects the number of echoes with 'good' data.
+ A two-stage masking procedure was applied, in which a liberal mask (including voxels with good data in at least the first echo) was used for optimal combination, T2*/S0 estimation, and denoising, while a more conservative mask (restricted to voxels with good data in at least the first three echoes) was used for the component classification procedure.
+ Multi-echo data were then optimally combined using the T2* combination method \\citep{posse1999enhancement}.
+ Next, components were manually classified as BOLD (TE-dependent), non-BOLD (TE-independent), or uncertain (low-variance).
+ This workflow used numpy \\citep{van2011numpy}, scipy \\citep{virtanen2020scipy}, pandas \\citep{mckinney2010data,reback2020pandas}, scikit-learn \\citep{pedregosa2011scikit}, nilearn, bokeh \\citep{bokehmanual}, matplotlib \\citep{Hunter:2007}, and nibabel \\citep{brett_matthew_2019_3233118}.
+ This workflow also used the Dice similarity index \\citep{dice1945measures,sorensen1948method}.
References
- Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., Cipollini, B., McCarthy, P., … freec84. (2019, May 28). nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118
-
- Dice, L. R. (1945). Measures of the amount of ecologic association between species. Ecology, 26(3), 297-302.
-
- Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source Scientific Tools for Python, 2001-, http://www.scipy.org/
-
- Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. (2013). Integrated strategy for improving functional connectivity mapping using multiecho fMRI. Proceedings of the National Academy of Sciences, 110(40), 16187-16192.
-
- McKinney, W. (2010, June). Data structures for statistical computing in python. In Proceedings of the 9th Python in Science Conference (Vol. 445, pp. 51-56).
-
- Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). Scikit-learn: Machine learning in Python. Journal of machine learning research, 12(Oct), 2825-2830.
-
- Posse, S., Wiese, S., Gembris, D., Mathiak, K., Kessler, C., Grosse‐Ruyken, M. L., ... & Kiselev, V. G. (1999). Enhancement of BOLD‐contrast sensitivity by single‐shot multi‐echo functional MR imaging. Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine, 42(1), 87-97.
-
- Sørensen, T. J. (1948). A method of establishing groups of equal amplitude in plant sociology based on similarity of species content and its application to analyses of the vegetation on Danish commons. I kommission hos E. Munksgaard.
-
- Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The NumPy array: a structure for efficient numerical computation. Computing in Science & Engineering, 13(2), 22.
+ .. note::
+
+ The references are also provided in the ``references.bib`` output file.
+
+ .. code-block:: bibtex
+
+ @Manual{bokehmanual,
+ title = {Bokeh: Python library for interactive visualization},
+ author = {{Bokeh Development Team}},
+ year = {2018},
+ url = {https://bokeh.pydata.org/en/latest/},
+ }
+ @article{dice1945measures,
+ title={Measures of the amount of ecologic association between species},
+ author={Dice, Lee R},
+ journal={Ecology},
+ volume={26},
+ number={3},
+ pages={297--302},
+ year={1945},
+ publisher={JSTOR},
+ url={https://doi.org/10.2307/1932409},
+ doi={10.2307/1932409}
+ }
+ @article{dupre2021te,
+ title={TE-dependent analysis of multi-echo fMRI with* tedana},
+ author={DuPre, Elizabeth and Salo, Taylor and Ahmed, Zaki and Bandettini, Peter A and Bottenhorn, Katherine L and Caballero-Gaudes, C{\'e}sar and Dowdle, Logan T and Gonzalez-Castillo, Javier and Heunis, Stephan and Kundu, Prantik and others},
+ journal={Journal of Open Source Software},
+ volume={6},
+ number={66},
+ pages={3669},
+ year={2021},
+ url={https://doi.org/10.21105/joss.03669},
+ doi={10.21105/joss.03669}
+ }
+ @inproceedings{mckinney2010data,
+ title={Data structures for statistical computing in python},
+ author={McKinney, Wes and others},
+ booktitle={Proceedings of the 9th Python in Science Conference},
+ volume={445},
+ number={1},
+ pages={51--56},
+ year={2010},
+ organization={Austin, TX},
+ url={https://doi.org/10.25080/Majora-92bf1922-00a},
+ doi={10.25080/Majora-92bf1922-00a}
+ }
+ @article{pedregosa2011scikit,
+ title={Scikit-learn: Machine learning in Python},
+ author={Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and others},
+ journal={the Journal of machine Learning research},
+ volume={12},
+ pages={2825--2830},
+ year={2011},
+ publisher={JMLR. org},
+ url={http://jmlr.org/papers/v12/pedregosa11a.html}
+ }
+ @article{posse1999enhancement,
+ title={Enhancement of BOLD-contrast sensitivity by single-shot multi-echo functional MR imaging},
+ author={Posse, Stefan and Wiese, Stefan and Gembris, Daniel and Mathiak, Klaus and Kessler, Christoph and Grosse-Ruyken, Maria-Liisa and Elghahwagi, Barbara and Richards, Todd and Dager, Stephen R and Kiselev, Valerij G},
+ journal={Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine},
+ volume={42},
+ number={1},
+ pages={87--97},
+ year={1999},
+ publisher={Wiley Online Library},
+ url={https://doi.org/10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O},
+ doi={10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O}
+ }
+ @software{reback2020pandas,
+ author = {The pandas development team},
+ title = {pandas-dev/pandas: Pandas},
+ month = feb,
+ year = 2020,
+ publisher = {Zenodo},
+ version = {latest},
+ doi = {10.5281/zenodo.3509134},
+ url = {https://doi.org/10.5281/zenodo.3509134}
+ }
+ @article{sorensen1948method,
+ title={A method of establishing groups of equal amplitude in plant sociology based on similarity of species content and its application to analyses of the vegetation on Danish commons},
+ author={Sorensen, Th A},
+ journal={Biol. Skar.},
+ volume={5},
+ pages={1--34},
+ year={1948}
+ }
+ @article{van2011numpy,
+ title={The NumPy array: a structure for efficient numerical computation},
+ author={Van Der Walt, Stefan and Colbert, S Chris and Varoquaux, Gael},
+ journal={Computing in science \& engineering},
+ volume={13},
+ number={2},
+ pages={22--30},
+ year={2011},
+ publisher={IEEE},
+ url={https://doi.org/10.1109/MCSE.2011.37},
+ doi={10.1109/MCSE.2011.37}
+ }
+ @article{virtanen2020scipy,
+ title={SciPy 1.0: fundamental algorithms for scientific computing in Python},
+ author={Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and others},
+ journal={Nature methods},
+ volume={17},
+ number={3},
+ pages={261--272},
+ year={2020},
+ publisher={Nature Publishing Group},
+ url={https://doi.org/10.1038/s41592-019-0686-2},
+ doi={10.1038/s41592-019-0686-2}
+ }
diff --git a/setup.cfg b/setup.cfg
index 8a817bab7..375e2e04f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -41,6 +41,7 @@ doc =
sphinx>=1.5.3
sphinx_rtd_theme
sphinx-argparse
+ sphinxcontrib-bibtex
tests =
codecov
coverage<5.0
@@ -50,10 +51,7 @@ tests =
pytest
pytest-cov
requests
-duecredit =
- duecredit
all =
- %(duecredit)s
%(doc)s
%(tests)s
diff --git a/tedana/__init__.py b/tedana/__init__.py
index d8f26af05..a26b9ef10 100644
--- a/tedana/__init__.py
+++ b/tedana/__init__.py
@@ -8,42 +8,10 @@
import warnings
from ._version import get_versions
-from .due import BibTeX, Doi, due
__version__ = get_versions()["version"]
# cmp is not used, so ignore nipype-generated warnings
warnings.filterwarnings("ignore", r"cmp not installed")
-# Citation for the package JOSS paper.
-due.cite(
- Doi("10.21105/joss.03669"),
- description="Publication introducing tedana.",
- path="tedana",
- cite_module=True,
-)
-
-# Citation for the algorithm.
-due.cite(
- Doi("10.1016/j.neuroimage.2011.12.028"),
- description="Introduces MEICA and tedana.",
- path="tedana",
- cite_module=True,
-)
-due.cite(
- Doi("10.1073/pnas.1301725110"),
- description="Improves MEICA and tedana.",
- path="tedana",
- cite_module=True,
-)
-
-# Citation for package version.
-due.cite(
- Doi("10.5281/zenodo.1250561"),
- description="The tedana package",
- version=__version__,
- path="tedana",
- cite_module=True,
-)
-
del get_versions
diff --git a/tedana/bibtex.py b/tedana/bibtex.py
new file mode 100644
index 000000000..4734bbfc7
--- /dev/null
+++ b/tedana/bibtex.py
@@ -0,0 +1,193 @@
+"""
+Utilities for tedana package
+"""
+import logging
+import os.path as op
+import re
+
+import numpy as np
+import pandas as pd
+
+from tedana.utils import get_resource_path
+
+LGR = logging.getLogger("GENERAL")
+RepLGR = logging.getLogger("REPORT")
+
+
+def find_braces(string):
+ """Search a string for matched braces.
+
+ This is used to identify pairs of braces in BibTeX files.
+ The outside-most pairs should correspond to BibTeX entries.
+
+ Parameters
+ ----------
+ string : :obj:`str`
+ A long string to search for paired braces.
+
+ Returns
+ -------
+ :obj:`list` of :obj:`tuple` of :obj:`int`
+ A list of two-element tuples of indices of matched braces.
+ """
+ toret = {}
+ pstack = []
+
+ for idx, char in enumerate(string):
+ if char == "{":
+ pstack.append(idx)
+ elif char == "}":
+ if len(pstack) == 0:
+ raise IndexError(f"No matching closing parens at: {idx}")
+
+ toret[pstack.pop()] = idx
+
+ if len(pstack) > 0:
+ raise IndexError(f"No matching opening parens at: {pstack.pop()}")
+
+ toret = list(toret.items())
+ return toret
+
+
+def reduce_idx(idx_list):
+ """Identify outermost brace indices in list of indices.
+
+ The purpose here is to find the brace pairs that correspond to BibTeX entries,
+ while discarding brace pairs that appear within the entries
+ (e.g., braces around article titles).
+
+ Parameters
+ ----------
+ idx_list : :obj:`list` of :obj:`tuple` of :obj:`int`
+ A list of two-element tuples of indices of matched braces.
+
+ Returns
+ -------
+ reduced_idx_list : :obj:`list` of :obj:`tuple` of :obj:`int`
+ A list of two-element tuples of indices of matched braces corresponding to BibTeX entries.
+ """
+ idx_list2 = [idx_item[0] for idx_item in idx_list]
+ idx = np.argsort(idx_list2)
+ idx_list = [idx_list[i] for i in idx]
+
+ df = pd.DataFrame(data=idx_list, columns=["start", "end"])
+
+ good_idx = []
+ df["within"] = False
+ for i, row in df.iterrows():
+ df["within"] = df["within"] | ((df["start"] > row["start"]) & (df["end"] < row["end"]))
+ if not df.iloc[i]["within"]:
+ good_idx.append(i)
+
+ idx_list = [idx_list[i] for i in good_idx]
+ return idx_list
+
+
+def index_bibtex_identifiers(string, idx_list):
+ """Identify the BibTeX entry identifier before each entry.
+
+ The purpose of this function is to take the raw BibTeX string and a list of indices of entries,
+ starting and ending with the braces of each entry, and then extract the identifier before each.
+
+ Parameters
+ ----------
+ string : :obj:`str`
+ The full BibTeX file, as a string.
+ idx_list : :obj:`list` of :obj:`tuple` of :obj:`int`
+ A list of two-element tuples of indices of matched braces corresponding to BibTeX entries.
+
+ Returns
+ -------
+ idx_list : :obj:`list` of :obj:`tuple` of :obj:`int`
+ A list of two-element tuples of indices of BibTeX entries,
+ from the starting @ to the final }.
+ """
+ at_idx = [(a.start(), a.end() - 1) for a in re.finditer("@[a-zA-Z0-9]+{", string)]
+ df = pd.DataFrame(at_idx, columns=["real_start", "false_start"])
+ df2 = pd.DataFrame(idx_list, columns=["false_start", "end"])
+ df = pd.merge(left=df, right=df2, left_on="false_start", right_on="false_start")
+ new_idx_list = list(zip(df.real_start, df.end))
+ return new_idx_list
+
+
+def find_citations(description):
+ r"""Find citations in a text description.
+
+ It looks for cases of \\citep{} and \\cite{} in a string.
+
+ Parameters
+ ----------
+ description : :obj:`str`
+ Description of a method, optionally with citations.
+
+ Returns
+ -------
+ all_citations : :obj:`list` of :obj:`str`
+ A list of all identifiers for citations.
+ """
+ paren_citations = re.findall(r"\\citep{([a-zA-Z0-9,/\.]+)}", description)
+ intext_citations = re.findall(r"\\cite{([a-zA-Z0-9,/\.]+)}", description)
+ inparen_citations = re.findall(r"\\citealt{([a-zA-Z0-9,/\.]+)}", description)
+ all_citations = ",".join(paren_citations + intext_citations + inparen_citations)
+ all_citations = all_citations.split(",")
+ all_citations = sorted(list(set(all_citations)))
+ return all_citations
+
+
+def reduce_references(citations, reference_list):
+ """Reduce the list of references to only include ones associated with requested citations.
+
+ Parameters
+ ----------
+ citations : :obj:`list` of :obj:`str`
+ A list of all identifiers for citations.
+ reference_list : :obj:`list` of :obj:`str`
+ List of all available BibTeX entries.
+
+ Returns
+ -------
+ reduced_reference_list : :obj:`list` of :obj:`str`
+ List of BibTeX entries for citations only.
+ """
+ reduced_reference_list = []
+ for citation in citations:
+ citation_found = False
+ for reference in reference_list:
+ check_string = "@[a-zA-Z]+{" + citation + ","
+ if re.match(check_string, reference):
+ reduced_reference_list.append(reference)
+ citation_found = True
+ continue
+
+ if not citation_found:
+ LGR.warning(f"Citation {citation} not found.")
+
+ return reduced_reference_list
+
+
+def get_description_references(description):
+ """Find BibTeX references for citations in a methods description.
+
+ Parameters
+ ----------
+ description : :obj:`str`
+ Description of a method, optionally with citations.
+
+ Returns
+ -------
+ bibtex_string : :obj:`str`
+ A string containing BibTeX entries, limited only to the citations in the description.
+ """
+ bibtex_file = op.join(get_resource_path(), "references.bib")
+ with open(bibtex_file, "r") as fo:
+ bibtex_string = fo.read()
+
+ braces_idx = find_braces(bibtex_string)
+ red_braces_idx = reduce_idx(braces_idx)
+ bibtex_idx = index_bibtex_identifiers(bibtex_string, red_braces_idx)
+ citations = find_citations(description)
+ reference_list = [bibtex_string[start : end + 1] for start, end in bibtex_idx]
+ reduced_reference_list = reduce_references(citations, reference_list)
+
+ bibtex_string = "\n".join(reduced_reference_list)
+ return bibtex_string
diff --git a/tedana/combine.py b/tedana/combine.py
index 1bb3f36e8..449491e56 100644
--- a/tedana/combine.py
+++ b/tedana/combine.py
@@ -5,22 +5,17 @@
import numpy as np
-from tedana.due import Doi, due
-
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
-@due.dcite(
- Doi("10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O"),
- description="T2* method of combining data across echoes using monoexponential equation.",
-)
def _combine_t2s(data, tes, ft2s, report=True):
"""
Combine data across echoes using weighted averaging according to voxel-
(and sometimes volume-) wise estimates of T2*.
+ This method was proposed in :footcite:t:`posse1999enhancement`.
+
Parameters
----------
data : (M x E x T) array_like
@@ -39,26 +34,14 @@ def _combine_t2s(data, tes, ft2s, report=True):
References
----------
- * Posse, S., Wiese, S., Gembris, D., Mathiak, K., Kessler,
- C., Grosse‐Ruyken, M. L., ... & Kiselev, V. G. (1999).
- Enhancement of BOLD‐contrast sensitivity by single‐shot
- multi‐echo functional MR imaging. Magnetic Resonance in
- Medicine: An Official Journal of the International Society
- for Magnetic Resonance in Medicine, 42(1), 87-97.
+ .. footbibliography::
"""
if report:
RepLGR.info(
"Multi-echo data were then optimally combined using the "
- "T2* combination method (Posse et al., 1999)."
- )
- RefLGR.info(
- "Posse, S., Wiese, S., Gembris, D., Mathiak, K., Kessler, "
- "C., Grosse‐Ruyken, M. L., ... & Kiselev, V. G. (1999). "
- "Enhancement of BOLD‐contrast sensitivity by single‐shot "
- "multi‐echo functional MR imaging. Magnetic Resonance in "
- "Medicine: An Official Journal of the International Society "
- "for Magnetic Resonance in Medicine, 42(1), 87-97."
+ "T2* combination method \\citep{posse1999enhancement}."
)
+
n_vols = data.shape[-1]
alpha = tes * np.exp(-tes / ft2s)
if alpha.ndim == 2:
@@ -77,16 +60,14 @@ def _combine_t2s(data, tes, ft2s, report=True):
return combined
-@due.dcite(
- Doi("10.1002/mrm.20900"),
- description="PAID method of combining data across echoes using just SNR/signal and TE.",
-)
def _combine_paid(data, tes, report=True):
"""
Combine data across echoes using SNR/signal and TE via the
parallel-acquired inhomogeneity desensitized (PAID) ME-fMRI combination
method.
+ This method was first proposed in :footcite:t:`poser2006bold`.
+
Parameters
----------
data : (M x E x T) array_like
@@ -103,29 +84,15 @@ def _combine_paid(data, tes, report=True):
References
----------
- * Poser, B. A., Versluis, M. J., Hoogduin, J. M., & Norris,
- D. G. (2006). BOLD contrast sensitivity enhancement and
- artifact reduction with multiecho EPI: parallel‐acquired
- inhomogeneity‐desensitized fMRI.
- Magnetic Resonance in Medicine: An Official Journal of the
- International Society for Magnetic Resonance in Medicine,
- 55(6), 1227-1235.
+ .. footbibliography::
"""
if report:
RepLGR.info(
"Multi-echo data were then optimally combined using the "
"parallel-acquired inhomogeneity desensitized (PAID) "
- "combination method."
- )
- RefLGR.info(
- "Poser, B. A., Versluis, M. J., Hoogduin, J. M., & Norris, "
- "D. G. (2006). BOLD contrast sensitivity enhancement and "
- "artifact reduction with multiecho EPI: parallel‐acquired "
- "inhomogeneity‐desensitized fMRI. "
- "Magnetic Resonance in Medicine: An Official Journal of the "
- "International Society for Magnetic Resonance in Medicine, "
- "55(6), 1227-1235."
+ "combination method \\citep{poser2006bold}."
)
+
n_vols = data.shape[-1]
snr = data.mean(axis=-1) / data.std(axis=-1)
alpha = snr * tes
@@ -168,9 +135,9 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True
Notes
-----
- This function supports both the ``'t2s'`` method [1]_ and the ``'paid'``
- method [2]_. The ``'t2s'`` method operates according to the following
- logic:
+ This function supports both the ``'t2s'`` method :footcite:p:`posse1999enhancement`
+ and the ``'paid'`` method :footcite:p:`poser2006bold`.
+ The ``'t2s'`` method operates according to the following logic:
1. Estimate voxel- and TE-specific weights based on estimated :math:`T_2^*`:
@@ -182,19 +149,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode="t2s", verbose=True
References
----------
- .. [1] Posse, S., Wiese, S., Gembris, D., Mathiak, K., Kessler,
- C., Grosse‐Ruyken, M. L., ... & Kiselev, V. G. (1999).
- Enhancement of BOLD‐contrast sensitivity by single‐shot
- multi‐echo functional MR imaging. Magnetic Resonance in
- Medicine: An Official Journal of the International Society
- for Magnetic Resonance in Medicine, 42(1), 87-97.
- .. [2] Poser, B. A., Versluis, M. J., Hoogduin, J. M., & Norris,
- D. G. (2006). BOLD contrast sensitivity enhancement and
- artifact reduction with multiecho EPI: parallel‐acquired
- inhomogeneity‐desensitized fMRI.
- Magnetic Resonance in Medicine: An Official Journal of the
- International Society for Magnetic Resonance in Medicine,
- 55(6), 1227-1235.
+ .. footbibliography::
See Also
--------
diff --git a/tedana/decay.py b/tedana/decay.py
index 5b2fbe2e7..92677ed8d 100644
--- a/tedana/decay.py
+++ b/tedana/decay.py
@@ -11,7 +11,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def _apply_t2s_floor(t2s, echo_times):
diff --git a/tedana/decomposition/_utils.py b/tedana/decomposition/_utils.py
index d25aabe7a..a6c1adf8d 100644
--- a/tedana/decomposition/_utils.py
+++ b/tedana/decomposition/_utils.py
@@ -8,7 +8,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def eimask(dd, ees=None):
diff --git a/tedana/decomposition/ica.py b/tedana/decomposition/ica.py
index 73885e55b..ff69626a7 100644
--- a/tedana/decomposition/ica.py
+++ b/tedana/decomposition/ica.py
@@ -10,7 +10,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def tedica(data, n_components, fixed_seed, maxit=500, maxrestart=10):
diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py
index cbaa9c479..ce18fe840 100644
--- a/tedana/decomposition/pca.py
+++ b/tedana/decomposition/pca.py
@@ -17,7 +17,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def low_mem_pca(data):
@@ -97,7 +96,7 @@ def tedpca(
Method with which to select components in TEDPCA. PCA
decomposition with the mdl, kic and aic options are based on a Moving Average
(stationary Gaussian) process and are ordered from most to least aggressive
- (see Li et al., 2007).
+ (see :footcite:p:`li2007estimating`).
If a float is provided, then it is assumed to represent percentage of variance
explained (0-1) to retain from PCA.
If an int is provided, then it is assumed to be the number of components
@@ -176,6 +175,10 @@ def tedpca(
decomposition
=========================== =============================================
+ References
+ ----------
+ .. footbibliography::
+
See Also
--------
:func:`tedana.utils.make_adaptive_mask` : The function used to create
@@ -184,27 +187,13 @@ def tedpca(
various naming conventions
"""
if algorithm == "kundu":
- alg_str = "followed by the Kundu component selection decision tree (Kundu et al., 2013)"
- RefLGR.info(
- "Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
- "Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
- "(2013). Integrated strategy for improving functional "
- "connectivity mapping using multiecho fMRI. Proceedings "
- "of the National Academy of Sciences, 110(40), "
- "16187-16192."
+ alg_str = (
+ "followed by the Kundu component selection decision tree \\citep{kundu2013integrated}"
)
elif algorithm == "kundu-stabilize":
alg_str = (
"followed by the 'stabilized' Kundu component "
- "selection decision tree (Kundu et al., 2013)"
- )
- RefLGR.info(
- "Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
- "Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
- "(2013). Integrated strategy for improving functional "
- "connectivity mapping using multiecho fMRI. Proceedings "
- "of the National Academy of Sciences, 110(40), "
- "16187-16192."
+ "selection decision tree \\citep{kundu2013integrated}"
)
elif isinstance(algorithm, Number):
if isinstance(algorithm, float):
@@ -217,19 +206,13 @@ def tedpca(
else:
alg_str = (
"based on the PCA component estimation with a Moving Average"
- "(stationary Gaussian) process (Li et al., 2007)"
- )
- RefLGR.info(
- "Li, Y.O., Adalı, T. and Calhoun, V.D., (2007). "
- "Estimating the number of independent components for "
- "functional magnetic resonance imaging data. "
- "Human brain mapping, 28(11), pp.1251-1266."
+ "(stationary Gaussian) process \\citep{li2007estimating}"
)
RepLGR.info(
- "Principal component analysis {0} was applied to "
+ f"Principal component analysis {alg_str} was applied to "
"the optimally combined data for dimensionality "
- "reduction.".format(alg_str)
+ "reduction."
)
n_samp, n_echos, n_vols = data_cat.shape
diff --git a/tedana/due.py b/tedana/due.py
deleted file mode 100644
index 8c2e56166..000000000
--- a/tedana/due.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# emacs: at the end of the file
-# ex: set sts=4 ts=4 sw=4 et:
-# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### #
-"""
-Stub file for a guaranteed safe import of duecredit constructs: if duecredit
-is not available.
-To use it, place it into your project codebase to be imported, e.g. copy as
- cp stub.py /path/tomodule/module/due.py
-Note that it might be better to avoid naming it duecredit.py to avoid shadowing
-installed duecredit.
-Then use in your code as
- from .due import due, Doi, BibTeX
-See https://github.com/duecredit/duecredit/blob/master/README.md for examples.
-Origin: Originally a part of the duecredit
-Copyright: 2015-2016 DueCredit developers
-License: BSD-2
-"""
-
-from builtins import object, str
-
-__version__ = "0.0.5"
-
-
-class InactiveDueCreditCollector(object):
- """Just a stub at the Collector which would not do anything"""
-
- def _donothing(self, *args, **kwargs):
- """Perform no good and no bad"""
- pass
-
- def dcite(self, *args, **kwargs):
- """If I could cite I would"""
-
- def nondecorating_decorator(func):
- return func
-
- return nondecorating_decorator
-
- cite = load = add = _donothing
-
- def __repr__(self):
- return self.__class__.__name__ + "()"
-
-
-def _donothing_func(*args, **kwargs):
- """Perform no good and no bad"""
- pass
-
-
-try:
- from duecredit import BibTeX, Doi, Url, due
-
- if "due" in locals() and not hasattr(due, "cite"):
- raise RuntimeError("Imported due lacks .cite. DueCredit is now disabled")
-except Exception as e:
- if type(e).__name__ != "ImportError":
- import logging
-
- logging.getLogger("duecredit").error(
- 'Module `duecredit` not successfully imported due to "%s". '
- "Package functionality unaffected.",
- str(e),
- )
-
- # Initiate due stub
- due = InactiveDueCreditCollector()
- BibTeX = Doi = Url = _donothing_func
-
-# Emacs mode definitions
-# Local Variables:
-# mode: python
-# py-indent-offset: 4
-# tab-width: 4
-# indent-tabs-mode: nil
-# End:
diff --git a/tedana/gscontrol.py b/tedana/gscontrol.py
index 33d184f57..61e8cedc5 100644
--- a/tedana/gscontrol.py
+++ b/tedana/gscontrol.py
@@ -9,11 +9,9 @@
from scipy.special import lpmv
from tedana import utils
-from tedana.due import Doi, due
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def gscontrol_raw(catd, optcom, n_echos, io_generator, dtrank=4):
@@ -118,17 +116,13 @@ def gscontrol_raw(catd, optcom, n_echos, io_generator, dtrank=4):
return dm_catd, dm_optcom
-@due.dcite(
- Doi("10.1073/pnas.1301725110"),
- description="Minimum image regression to remove T1-like effects from the denoised data.",
-)
def minimum_image_regression(optcom_ts, mmix, mask, comptable, io_generator):
"""
Perform minimum image regression (MIR) to remove T1-like effects from
BOLD-like components.
While this method has not yet been described in detail in any publications,
- we recommend that users cite [1]_.
+ we recommend that users cite :footcite:t:`kundu2013integrated`.
Parameters
----------
@@ -174,23 +168,12 @@ def minimum_image_regression(optcom_ts, mmix, mask, comptable, io_generator):
References
----------
- .. [1] Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., Vértes, P. E.,
- Inati, S. J., ... & Bullmore, E. T. (2013).
- Integrated strategy for improving functional connectivity mapping
- using multiecho fMRI.
- Proceedings of the National Academy of Sciences, 110(40), 16187-16192.
+ .. footbibliography::
"""
LGR.info("Performing minimum image regression to remove spatially-diffuse noise")
RepLGR.info(
"Minimum image regression was then applied to the "
- "data in order to remove spatially diffuse noise (Kundu et al., 2013)."
- )
- RefLGR.info(
- "Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., Vértes, P. E., "
- "Inati, S. J., ... & Bullmore, E. T. (2013). "
- "Integrated strategy for improving functional connectivity mapping "
- "using multiecho fMRI. "
- "Proceedings of the National Academy of Sciences, 110(40), 16187-16192."
+ "data in order to remove spatially diffuse noise \\citep{kundu2013integrated}."
)
all_comps = comptable.index.values
diff --git a/tedana/io.py b/tedana/io.py
index e3b7506af..5e4ff6c90 100644
--- a/tedana/io.py
+++ b/tedana/io.py
@@ -22,7 +22,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
class OutputGenerator:
diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py
index 0e1b56bbb..51c4097a9 100644
--- a/tedana/metrics/collect.py
+++ b/tedana/metrics/collect.py
@@ -13,7 +13,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def generate_metrics(
diff --git a/tedana/metrics/dependence.py b/tedana/metrics/dependence.py
index 4fc7a4a26..1cdfe7852 100644
--- a/tedana/metrics/dependence.py
+++ b/tedana/metrics/dependence.py
@@ -9,7 +9,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def calculate_weights(data_optcom, mixing):
diff --git a/tedana/reporting/data/html/report_body_template.html b/tedana/reporting/data/html/report_body_template.html
index e06f18b04..8dc06e06e 100644
--- a/tedana/reporting/data/html/report_body_template.html
+++ b/tedana/reporting/data/html/report_body_template.html
@@ -171,6 +171,9 @@ Carpet plots
About tedana
$about
+
+ References
+ $references
$javascript
diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py
index 5477035b0..956898e9a 100644
--- a/tedana/reporting/html_report.py
+++ b/tedana/reporting/html_report.py
@@ -50,7 +50,7 @@ def _generate_buttons(out_dir):
return buttons_html
-def _update_template_bokeh(bokeh_id, about, bokeh_js, buttons):
+def _update_template_bokeh(bokeh_id, about, references, bokeh_js, buttons):
"""
Populate a report with content.
@@ -60,6 +60,8 @@ def _update_template_bokeh(bokeh_id, about, bokeh_js, buttons):
HTML div created by bokeh.embed.components
about : str
Reporting information for a given run
+ references : str
+ BibTeX references associated with the reporting information
bokeh_js : str
Javascript created by bokeh.embed.components
Returns
@@ -72,7 +74,9 @@ def _update_template_bokeh(bokeh_id, about, bokeh_js, buttons):
body_template_path = resource_path.joinpath(body_template_name)
with open(str(body_template_path), "r") as body_file:
body_tpl = Template(body_file.read())
- body = body_tpl.substitute(content=bokeh_id, about=about, javascript=bokeh_js, buttons=buttons)
+ body = body_tpl.substitute(
+ content=bokeh_id, about=about, references=references, javascript=bokeh_js, buttons=buttons
+ )
return body
@@ -96,7 +100,8 @@ def _save_as_html(body):
def generate_report(io_generator, tr):
- """
+ """Generate an HTML report.
+
Parameters
----------
io_generator : tedana.io.OutputGenerator
@@ -165,7 +170,16 @@ def generate_report(io_generator, tr):
with open(opj(io_generator.out_dir, "report.txt"), "r+") as f:
about = f.read()
- body = _update_template_bokeh(kr_div, about, kr_script, buttons_html)
+ with open(opj(io_generator.out_dir, "references.bib"), "r") as f:
+ references = f.read()
+
+ body = _update_template_bokeh(
+ bokeh_id=kr_div,
+ about=about,
+ references=references,
+ bokeh_js=kr_script,
+ buttons=buttons_html,
+ )
html = _save_as_html(body)
with open(opj(io_generator.out_dir, "tedana_report.html"), "wb") as f:
f.write(html.encode("utf-8"))
diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py
index 41615636a..74e24793d 100644
--- a/tedana/reporting/static_figures.py
+++ b/tedana/reporting/static_figures.py
@@ -17,7 +17,6 @@
MPL_LGR = logging.getLogger("matplotlib")
MPL_LGR.setLevel(logging.WARNING)
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def _trim_edge_zeros(arr):
diff --git a/tedana/resources/references.bib b/tedana/resources/references.bib
new file mode 100644
index 000000000..006f2f779
--- /dev/null
+++ b/tedana/resources/references.bib
@@ -0,0 +1,315 @@
+@article{dupre2021te,
+ title={TE-dependent analysis of multi-echo fMRI with* tedana},
+ author={DuPre, Elizabeth and Salo, Taylor and Ahmed, Zaki and Bandettini, Peter A and Bottenhorn, Katherine L and Caballero-Gaudes, C{\'e}sar and Dowdle, Logan T and Gonzalez-Castillo, Javier and Heunis, Stephan and Kundu, Prantik and others},
+ journal={Journal of Open Source Software},
+ volume={6},
+ number={66},
+ pages={3669},
+ year={2021},
+ url={https://doi.org/10.21105/joss.03669},
+ doi={10.21105/joss.03669}
+}
+
+@article{kundu2012differentiating,
+ title={Differentiating BOLD and non-BOLD signals in fMRI time series using multi-echo EPI},
+ author={Kundu, Prantik and Inati, Souheil J and Evans, Jennifer W and Luh, Wen-Ming and Bandettini, Peter A},
+ journal={Neuroimage},
+ volume={60},
+ number={3},
+ pages={1759--1770},
+ year={2012},
+ publisher={Elsevier},
+ url={https://doi.org/10.1016/j.neuroimage.2011.12.028},
+ doi={10.1016/j.neuroimage.2011.12.028}
+}
+
+@article{kundu2013integrated,
+ title={Integrated strategy for improving functional connectivity mapping using multiecho fMRI},
+ author={Kundu, Prantik and Brenowitz, Noah D and Voon, Valerie and Worbe, Yulia and V{\'e}rtes, Petra E and Inati, Souheil J and Saad, Ziad S and Bandettini, Peter A and Bullmore, Edward T},
+ journal={Proceedings of the National Academy of Sciences},
+ volume={110},
+ number={40},
+ pages={16187--16192},
+ year={2013},
+ publisher={National Acad Sciences},
+ url={https://doi.org/10.1073/pnas.1301725110},
+ doi={10.1073/pnas.1301725110}
+}
+
+@software{the_tedana_community_2022_6461353,
+ author = {The tedana Community and
+ Ahmed, Zaki and
+ Bandettini, Peter A. and
+ Bottenhorn, Katherine L. and
+ Caballero-Gaudes, César and
+ Dowdle, Logan T. and
+ DuPre, Elizabeth and
+ Gonzalez-Castillo, Javier and
+ Handwerker, Dan and
+ Heunis, Stephan and
+ Kundu, Prantik and
+ Laird, Angela R. and
+ Markello, Ross and
+ Markiewicz, Christopher J. and
+ Maullin-Sapey, Thomas and
+ Moia, Stefano and
+ Salo, Taylor and
+ Staden, Isla and
+ Teves, Joshua and
+ Uruñuela, Eneko and
+ Vaziri-Pashkam, Maryam and
+ Whitaker, Kirstie},
+ title = {ME-ICA/tedana: 0.0.12},
+ month = apr,
+ year = 2022,
+ publisher = {Zenodo},
+ version = {0.0.12},
+ doi = {10.5281/zenodo.6461353},
+ url = {https://doi.org/10.5281/zenodo.6461353}
+}
+
+@article{posse1999enhancement,
+ title={Enhancement of BOLD-contrast sensitivity by single-shot multi-echo functional MR imaging},
+ author={Posse, Stefan and Wiese, Stefan and Gembris, Daniel and Mathiak, Klaus and Kessler, Christoph and Grosse-Ruyken, Maria-Liisa and Elghahwagi, Barbara and Richards, Todd and Dager, Stephen R and Kiselev, Valerij G},
+ journal={Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine},
+ volume={42},
+ number={1},
+ pages={87--97},
+ year={1999},
+ publisher={Wiley Online Library},
+ url={https://doi.org/10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O},
+ doi={10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O}
+}
+
+@article{poser2006bold,
+ title={BOLD contrast sensitivity enhancement and artifact reduction with multiecho EPI: parallel-acquired inhomogeneity-desensitized fMRI},
+ author={Poser, Benedikt A and Versluis, Maarten J and Hoogduin, Johannes M and Norris, David G},
+ journal={Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine},
+ volume={55},
+ number={6},
+ pages={1227--1235},
+ year={2006},
+ publisher={Wiley Online Library},
+ url={https://doi.org/10.1002/mrm.20900},
+ doi={10.1002/mrm.20900}
+}
+
+@misc{sochat2015ttoz,
+ author = {Sochat, Vanessa},
+ title = {TtoZ Original Release},
+ month = oct,
+ year = 2015,
+ publisher = {Zenodo},
+ doi = {10.5281/zenodo.32508},
+ url = {https://doi.org/10.5281/zenodo.32508}
+}
+
+@article{hughett2008accurate,
+ title={Accurate computation of the F-to-z and t-to-z transforms for large arguments},
+ author={Hughett, Paul},
+ journal={Journal of Statistical Software},
+ volume={23},
+ pages={1--5},
+ year={2008},
+ url={https://doi.org/10.18637/jss.v023.c01},
+ doi={10.18637/jss.v023.c01}
+}
+
+@article{li2007estimating,
+ title={Estimating the number of independent components for functional magnetic resonance imaging data},
+ author={Li, Yi-Ou and Adal{\i}, T{\"u}lay and Calhoun, Vince D},
+ journal={Human brain mapping},
+ volume={28},
+ number={11},
+ pages={1251--1266},
+ year={2007},
+ publisher={Wiley Online Library},
+ url={https://doi.org/10.1002/hbm.20359},
+ doi={10.1002/hbm.20359}
+}
+
+@article{van2011numpy,
+ title={The NumPy array: a structure for efficient numerical computation},
+ author={Van Der Walt, Stefan and Colbert, S Chris and Varoquaux, Gael},
+ journal={Computing in science \& engineering},
+ volume={13},
+ number={2},
+ pages={22--30},
+ year={2011},
+ publisher={IEEE},
+ url={https://doi.org/10.1109/MCSE.2011.37},
+ doi={10.1109/MCSE.2011.37}
+}
+
+@article{virtanen2020scipy,
+ title={SciPy 1.0: fundamental algorithms for scientific computing in Python},
+ author={Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and others},
+ journal={Nature methods},
+ volume={17},
+ number={3},
+ pages={261--272},
+ year={2020},
+ publisher={Nature Publishing Group},
+ url={https://doi.org/10.1038/s41592-019-0686-2},
+ doi={10.1038/s41592-019-0686-2}
+}
+
+@inproceedings{mckinney2010data,
+ title={Data structures for statistical computing in python},
+ author={McKinney, Wes and others},
+ booktitle={Proceedings of the 9th Python in Science Conference},
+ volume={445},
+ number={1},
+ pages={51--56},
+ year={2010},
+ organization={Austin, TX},
+ url={https://doi.org/10.25080/Majora-92bf1922-00a},
+ doi={10.25080/Majora-92bf1922-00a}
+}
+
+@article{pedregosa2011scikit,
+ title={Scikit-learn: Machine learning in Python},
+ author={Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and others},
+ journal={the Journal of machine Learning research},
+ volume={12},
+ pages={2825--2830},
+ year={2011},
+ publisher={JMLR. org},
+ url={http://jmlr.org/papers/v12/pedregosa11a.html}
+}
+
+@software{brett_matthew_2019_3233118,
+ author = {Brett, Matthew and
+ Markiewicz, Christopher J. and
+ Hanke, Michael and
+ Côté, Marc-Alexandre and
+ Cipollini, Ben and
+ McCarthy, Paul and
+ Cheng, Christopher P. and
+ Halchenko, Yaroslav O. and
+ Cottaar, Michiel and
+ Ghosh, Satrajit and
+ Larson, Eric and
+ Wassermann, Demian and
+ Gerhard, Stephan and
+ Lee, Gregory R. and
+ Kastman, Erik and
+ Rokem, Ariel and
+ Madison, Cindee and
+ Morency, Félix C. and
+ Moloney, Brendan and
+ Burns, Christopher and
+ Millman, Jarrod and
+ Gramfort, Alexandre and
+ Leppäkangas, Jaakko and
+ Markello, Ross and
+ van den Bosch, Jasper J.F. and
+ Vincent, Robert D. and
+ Subramaniam, Krish and
+ Raamana, Pradeep Reddy and
+ Nichols, B. Nolan and
+ Baker, Eric M. and
+ Goncalves, Mathias and
+ Hayashi, Soichi and
+ Pinsard, Basile and
+ Haselgrove, Christian and
+ Hymers, Mark and
+ Koudoro, Serge and
+ Oosterhof, Nikolaas N. and
+ Amirbekian, Bago and
+ Nimmo-Smith, Ian and
+ Nguyen, Ly and
+ Reddigari, Samir and
+ St-Jean, Samuel and
+ Garyfallidis, Eleftherios and
+ Varoquaux, Gael and
+ Kaczmarzyk, Jakub and
+ Legarreta, Jon Haitz and
+ Hahn, Kevin S. and
+ Hinds, Oliver P. and
+ Fauber, Bennet and
+ Poline, Jean-Baptiste and
+ Stutters, Jon and
+ Jordan, Kesshi and
+ Cieslak, Matthew and
+ Moreno, Miguel Estevan and
+ Haenel, Valentin and
+ Schwartz, Yannick and
+ Thirion, Bertrand and
+ Papadopoulos Orfanos, Dimitri and
+ Pérez-García, Fernando and
+ Solovey, Igor and
+ Gonzalez, Ivan and
+ Lecher, Justin and
+ Leinweber, Katrin and
+ Raktivan, Konstantinos and
+ Fischer, Peter and
+ Gervais, Philippe and
+ Gadde, Syam and
+ Ballinger, Thomas and
+ Roos, Thomas and
+ Reddam, Venkateswara Reddy and
+ freec84},
+ title = {nipy/nibabel: 2.4.1},
+ month = may,
+ year = 2019,
+ publisher = {Zenodo},
+ version = {2.4.1},
+ doi = {10.5281/zenodo.3233118},
+ url = {https://doi.org/10.5281/zenodo.3233118}
+}
+
+@article{dice1945measures,
+ title={Measures of the amount of ecologic association between species},
+ author={Dice, Lee R},
+ journal={Ecology},
+ volume={26},
+ number={3},
+ pages={297--302},
+ year={1945},
+ publisher={JSTOR},
+ url={https://doi.org/10.2307/1932409},
+ doi={10.2307/1932409}
+}
+
+@article{sorensen1948method,
+ title={A method of establishing groups of equal amplitude in plant sociology based on similarity of species content and its application to analyses of the vegetation on Danish commons},
+ author={Sorensen, Th A},
+ journal={Biol. Skar.},
+ volume={5},
+ pages={1--34},
+ year={1948}
+}
+
+@software{reback2020pandas,
+ author = {The pandas development team},
+ title = {pandas-dev/pandas: Pandas},
+ month = feb,
+ year = 2020,
+ publisher = {Zenodo},
+ version = {latest},
+ doi = {10.5281/zenodo.3509134},
+ url = {https://doi.org/10.5281/zenodo.3509134}
+}
+
+@Manual{bokehmanual,
+ title = {Bokeh: Python library for interactive visualization},
+ author = {{Bokeh Development Team}},
+ year = {2018},
+ url = {https://bokeh.pydata.org/en/latest/},
+}
+
+@Article{Hunter:2007,
+ Author = {Hunter, J. D.},
+ Title = {Matplotlib: A 2D graphics environment},
+ Journal = {Computing in Science \& Engineering},
+ Volume = {9},
+ Number = {3},
+ Pages = {90--95},
+ abstract = {Matplotlib is a 2D graphics package used for Python for
+ application development, interactive scripting, and publication-quality
+ image generation across user interfaces and operating systems.},
+ publisher = {IEEE COMPUTER SOC},
+ doi = {10.1109/MCSE.2007.55},
+ year = 2007
+}
diff --git a/tedana/selection/_utils.py b/tedana/selection/_utils.py
index 906567cac..9f163b959 100644
--- a/tedana/selection/_utils.py
+++ b/tedana/selection/_utils.py
@@ -7,7 +7,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def clean_dataframe(comptable):
diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py
index 854387f8f..fc9d9536d 100644
--- a/tedana/selection/tedica.py
+++ b/tedana/selection/tedica.py
@@ -12,7 +12,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def manual_selection(comptable, acc=None, rej=None):
@@ -116,7 +115,10 @@ def kundu_selection_v2(comptable, n_echos, n_vols):
-----
The selection algorithm used in this function was originated in ME-ICA
by Prantik Kundu, and his original implementation is available at:
- https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py
+ https://github.com/ME-ICA/me-ica/blob/\
+ b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py
+
+ The appropriate citation is :footcite:t:`kundu2013integrated`.
This component selection process uses multiple, previously calculated
metrics that include kappa, rho, variance explained, noise and spatial
@@ -128,28 +130,16 @@ def kundu_selection_v2(comptable, n_echos, n_vols):
References
----------
- * Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y.,
- Vértes, P. E., Inati, S. J., ... & Bullmore, E. T.
- (2013). Integrated strategy for improving functional
- connectivity mapping using multiecho fMRI. Proceedings
- of the National Academy of Sciences, 110(40),
- 16187-16192.
+ .. footbibliography::
"""
LGR.info("Performing ICA component selection with Kundu decision tree v2.5")
RepLGR.info(
"Next, component selection was performed to identify "
"BOLD (TE-dependent), non-BOLD (TE-independent), and "
"uncertain (low-variance) components using the Kundu "
- "decision tree (v2.5; Kundu et al., 2013)."
- )
- RefLGR.info(
- "Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
- "Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
- "(2013). Integrated strategy for improving functional "
- "connectivity mapping using multiecho fMRI. Proceedings "
- "of the National Academy of Sciences, 110(40), "
- "16187-16192."
+ "decision tree (v2.5) \\citep{kundu2013integrated}."
)
+
comptable["classification"] = "accepted"
comptable["rationale"] = ""
diff --git a/tedana/selection/tedpca.py b/tedana/selection/tedpca.py
index da24bce85..143d15572 100644
--- a/tedana/selection/tedpca.py
+++ b/tedana/selection/tedpca.py
@@ -12,7 +12,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
F_MAX = 500
diff --git a/tedana/stats.py b/tedana/stats.py
index 143929688..2be30864e 100644
--- a/tedana/stats.py
+++ b/tedana/stats.py
@@ -7,11 +7,9 @@
from scipy import stats
from tedana import utils
-from tedana.due import BibTeX, Doi, due
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def getfbounds(n_echos):
@@ -165,25 +163,6 @@ def get_coeffs(data, X, mask=None, add_const=False):
return betas
-@due.dcite(
- BibTeX(
- """
- @article{hughett2007accurate,
- title={Accurate Computation of the F-to-z and t-to-z Transforms
- for Large Arguments},
- author={Hughett, Paul},
- journal={Journal of Statistical Software},
- volume={23},
- number={1},
- pages={1--5},
- year={2007},
- publisher={Foundation for Open Access Statistics}
- }
- """
- ),
- description="Introduces T-to-Z transform.",
-)
-@due.dcite(Doi("10.5281/zenodo.32508"), description="Python implementation of T-to-Z transform.")
def t_to_z(t_values, dof):
"""
Convert t-values to z-values.
@@ -210,16 +189,8 @@ def t_to_z(t_values, dof):
RepLGR.info(
"T-statistics were converted to z-statistics using Dr. "
- "Vanessa Sochat's implementation (Sochat, 2015) of the method "
- "described in Hughett (2007)."
- )
- RefLGR.info(
- "Sochat, V. (2015). TtoZ Original Release. Zenodo. http://doi.org/10.5281/zenodo.32508."
- )
- RefLGR.info(
- "Hughett, P. (2007). Accurate Computation of the F-to-z and "
- "t-to-z Transforms for Large Arguments. Journal of "
- "Statistical Software, 23(1), 1-5."
+ "Vanessa Sochat's implementation \\citep{sochat2015ttoz} of the method "
+ "described in \\citep{hughett2008accurate}."
)
# Select just the nonzero voxels
diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt
index 4e45b1773..5240740ba 100644
--- a/tedana/tests/data/cornell_three_echo_outputs.txt
+++ b/tedana/tests/data/cornell_three_echo_outputs.txt
@@ -96,5 +96,6 @@ figures/comp_067.png
figures/comp_068.png
figures/pca_criteria.png
figures/pca_variance_explained.png
+references.bib
report.txt
tedana_report.html
diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt
index 7e9ce1169..ca2f1f7aa 100644
--- a/tedana/tests/data/fiu_four_echo_outputs.txt
+++ b/tedana/tests/data/fiu_four_echo_outputs.txt
@@ -69,6 +69,7 @@ echo-4_desc-PCAT2ModelPredictions_components.nii.gz
echo-4_desc-PCAS0ModelPredictions_components.nii.gz
echo-4_desc-PCA_components.nii.gz
echo-4_desc-Rejected_bold.nii.gz
+references.bib
report.txt
tedana_report.html
figures
diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt
index 907b4ec49..234bf6619 100644
--- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt
+++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt
@@ -70,6 +70,7 @@ echo-5_desc-PCAT2ModelPredictions_components.nii.gz
echo-5_desc-PCAS0ModelPredictions_components.nii.gz
echo-5_desc-PCA_components.nii.gz
echo-5_desc-Rejected_bold.nii.gz
+references.bib
report.txt
tedana_report.html
figures
diff --git a/tedana/utils.py b/tedana/utils.py
index 7011068c1..4728d4595 100644
--- a/tedana/utils.py
+++ b/tedana/utils.py
@@ -10,11 +10,8 @@
from scipy import ndimage
from sklearn.utils import check_array
-from tedana.due import BibTeX, due
-
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def reshape_niimg(data):
@@ -141,40 +138,14 @@ def unmask(data, mask):
return out
-@due.dcite(
- BibTeX(
- "@article{dice1945measures,"
- "author={Dice, Lee R},"
- "title={Measures of the amount of ecologic association between species},"
- "year = {1945},"
- "publisher = {Wiley Online Library},"
- "journal = {Ecology},"
- "volume={26},"
- "number={3},"
- "pages={297--302}}"
- ),
- description="Introduction of Sorenson-Dice index by Dice in 1945.",
-)
-@due.dcite(
- BibTeX(
- "@article{sorensen1948method,"
- "author={S{\\o}rensen, Thorvald},"
- "title={A method of establishing groups of equal amplitude "
- "in plant sociology based on similarity of species and its "
- "application to analyses of the vegetation on Danish commons},"
- "year = {1948},"
- "publisher = {Wiley Online Library},"
- "journal = {Biol. Skr.},"
- "volume={5},"
- "pages={1--34}}"
- ),
- description="Introduction of Sorenson-Dice index by Sorenson in 1948.",
-)
def dice(arr1, arr2, axis=None):
"""
Compute Dice's similarity index between two numpy arrays. Arrays will be
binarized before comparison.
+ This method was first proposed in :footcite:t:`dice1945measures` and
+ :footcite:t:`sorensen1948method`.
+
Parameters
----------
arr1, arr2 : array_like
@@ -188,11 +159,14 @@ def dice(arr1, arr2, axis=None):
dsi : :obj:`float`
Dice-Sorenson index.
+ Notes
+ -----
+ This implementation was based on
+ https://gist.github.com/brunodoamaral/e130b4e97aa4ebc468225b7ce39b3137.
+
References
----------
- REF_
-
- .. _REF: https://gist.github.com/brunodoamaral/e130b4e97aa4ebc468225b7ce39b3137
+ .. footbibliography::
"""
arr1 = np.array(arr1 != 0).astype(int)
arr2 = np.array(arr2 != 0).astype(int)
@@ -386,7 +360,7 @@ def millisec2sec(arr):
return arr / 1000.0
-def setup_loggers(logname=None, repname=None, refname=None, quiet=False, debug=False):
+def setup_loggers(logname=None, repname=None, quiet=False, debug=False):
# Set up the general logger
log_formatter = logging.Formatter(
"%(asctime)s\t%(module)s.%(funcName)-12s\t%(levelname)-8s\t%(message)s",
@@ -421,16 +395,9 @@ def setup_loggers(logname=None, repname=None, refname=None, quiet=False, debug=F
RepLGR.addHandler(rep_handler)
RepLGR.propagate = False
- if refname:
- ref_handler = logging.FileHandler(refname)
- ref_handler.setFormatter(text_formatter)
- RefLGR.setLevel(logging.INFO)
- RefLGR.addHandler(ref_handler)
- RefLGR.propagate = False
-
def teardown_loggers():
- for local_logger in (RefLGR, RepLGR, LGR):
+ for local_logger in (RepLGR, LGR):
for handler in local_logger.handlers[:]:
handler.close()
local_logger.removeHandler(handler)
diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py
index f2cee3497..0711f969b 100644
--- a/tedana/workflows/t2smap.py
+++ b/tedana/workflows/t2smap.py
@@ -15,7 +15,6 @@
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def _get_parser():
@@ -161,7 +160,7 @@ def t2smap_workflow(
"""
Estimate T2 and S0, and optimally combine data across TEs.
- Please remember to cite [1]_.
+ Please remember to cite :footcite:t:`dupre2021te`.
Parameters
----------
@@ -224,13 +223,7 @@ def t2smap_workflow(
References
----------
- .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L.,
- Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S.,
- Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S.,
- Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M.,
- Whitaker, K., & Handwerker, D. A. (2021).
- TE-dependent analysis of multi-echo fMRI with tedana.
- Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.
+ .. footbibliography::
"""
out_dir = op.abspath(out_dir)
if not op.isdir(out_dir):
diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py
index f2c66ac17..02e14bd8e 100644
--- a/tedana/workflows/tedana.py
+++ b/tedana/workflows/tedana.py
@@ -29,12 +29,12 @@
selection,
utils,
)
+from tedana.bibtex import get_description_references
from tedana.stats import computefeats2
from tedana.workflows.parser_utils import check_tedpca_value, is_valid_file
LGR = logging.getLogger("GENERAL")
RepLGR = logging.getLogger("REPORT")
-RefLGR = logging.getLogger("REFERENCES")
def _get_parser():
@@ -346,7 +346,7 @@ def tedana_workflow(
"""
Run the "canonical" TE-Dependent ANAlysis workflow.
- Please remember to cite [1]_.
+ Please remember to cite :footcite:t:`dupre2021te`.
Parameters
----------
@@ -433,13 +433,7 @@ def tedana_workflow(
References
----------
- .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L.,
- Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S.,
- Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S.,
- Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M.,
- Whitaker, K., & Handwerker, D. A. (2021).
- TE-dependent analysis of multi-echo fMRI with tedana.
- Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669.
+ .. footbibliography::
"""
out_dir = op.abspath(out_dir)
if not op.isdir(out_dir):
@@ -449,6 +443,7 @@ def tedana_workflow(
basename = "report"
extension = "txt"
repname = op.join(out_dir, (basename + "." + extension))
+ bibtex_file = op.join(out_dir, "references.bib")
repex = op.join(out_dir, (basename + "*"))
previousreps = glob(repex)
previousreps.sort(reverse=True)
@@ -456,14 +451,13 @@ def tedana_workflow(
previousparts = op.splitext(f)
newname = previousparts[0] + "_old" + previousparts[1]
os.rename(f, newname)
- refname = op.join(out_dir, "_references.txt")
# create logfile name
basename = "tedana_"
extension = "tsv"
start_time = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S")
logname = op.join(out_dir, (basename + start_time + "." + extension))
- utils.setup_loggers(logname, repname, refname, quiet=quiet, debug=debug)
+ utils.setup_loggers(logname, repname, quiet=quiet, debug=debug)
LGR.info("Using output directory: {}".format(out_dir))
@@ -547,16 +541,7 @@ def tedana_workflow(
RepLGR.info(
"TE-dependence analysis was performed on input data using the tedana workflow "
- "(DuPre, Salo et al., 2021)."
- )
- RefLGR.info(
- "DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., "
- "Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., "
- "Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., "
- "Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., "
- "Whitaker, K., & Handwerker, D. A. (2021). "
- "TE-dependent analysis of multi-echo fMRI with tedana. "
- "Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669."
+ "\\citep{dupre2021te}."
)
if mask and not t2smap:
@@ -833,61 +818,31 @@ def tedana_workflow(
json.dump(derivative_metadata, fo, sort_keys=True, indent=4)
RepLGR.info(
- "This workflow used numpy (Van Der Walt, Colbert, & "
- "Varoquaux, 2011), scipy (Jones et al., 2001), pandas "
- "(McKinney, 2010), scikit-learn (Pedregosa et al., 2011), "
- "nilearn, and nibabel (Brett et al., 2019)."
- )
- RefLGR.info(
- "Van Der Walt, S., Colbert, S. C., & Varoquaux, G. (2011). The "
- "NumPy array: a structure for efficient numerical computation. "
- "Computing in Science & Engineering, 13(2), 22."
- )
- RefLGR.info(
- "Jones E, Oliphant E, Peterson P, et al. SciPy: Open Source "
- "Scientific Tools for Python, 2001-, http://www.scipy.org/"
- )
- RefLGR.info(
- "McKinney, W. (2010, June). Data structures for statistical "
- "computing in python. In Proceedings of the 9th Python in "
- "Science Conference (Vol. 445, pp. 51-56)."
- )
- RefLGR.info(
- "Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., "
- "Thirion, B., Grisel, O., ... & Vanderplas, J. (2011). "
- "Scikit-learn: Machine learning in Python. Journal of machine "
- "learning research, 12(Oct), 2825-2830."
- )
- RefLGR.info(
- "Brett, M., Markiewicz, C. J., Hanke, M., Côté, M.-A., "
- "Cipollini, B., McCarthy, P., … freec84. (2019, May 28). "
- "nipy/nibabel. Zenodo. http://doi.org/10.5281/zenodo.3233118"
+ "This workflow used numpy \\citep{van2011numpy}, scipy \\citep{virtanen2020scipy}, "
+ "pandas \\citep{mckinney2010data,reback2020pandas}, "
+ "scikit-learn \\citep{pedregosa2011scikit}, "
+ "nilearn, bokeh \\citep{bokehmanual}, matplotlib \\citep{Hunter:2007}, "
+ "and nibabel \\citep{brett_matthew_2019_3233118}."
)
RepLGR.info(
- "This workflow also used the Dice similarity index " "(Dice, 1945; Sørensen, 1948)."
- )
- RefLGR.info(
- "Dice, L. R. (1945). Measures of the amount of ecologic "
- "association between species. Ecology, 26(3), 297-302."
- )
- RefLGR.info(
- "Sørensen, T. J. (1948). A method of establishing groups of "
- "equal amplitude in plant sociology based on similarity of "
- "species content and its application to analyses of the "
- "vegetation on Danish commons. I kommission hos E. Munksgaard."
+ "This workflow also used the Dice similarity index "
+ "\\citep{dice1945measures,sorensen1948method}."
)
with open(repname, "r") as fo:
report = [line.rstrip() for line in fo.readlines()]
report = " ".join(report)
- with open(refname, "r") as fo:
- reference_list = sorted(list(set(fo.readlines())))
- references = "\n".join(reference_list)
- report += "\n\nReferences:\n\n" + references
+
with open(repname, "w") as fo:
fo.write(report)
+ # Collect BibTeX entries for cited papers
+ references = get_description_references(report)
+
+ with open(bibtex_file, "w") as fo:
+ fo.write(references)
+
if not no_reports:
LGR.info("Making figures folder with static component maps and timecourse plots.")
@@ -923,7 +878,6 @@ def tedana_workflow(
LGR.info("Workflow completed")
utils.teardown_loggers()
- os.remove(refname)
def _main(argv=None):