diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0c7abcd7..4503f1b9 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -53,10 +53,6 @@ jobs: extra-specs: | python=${{ matrix.python-version }} - - name: Install alchemtest - run: | - python -m pip install https://github.com/alchemistry/alchemtest/archive/master.zip - - name: Install package (with no dependencies) run: | python -m pip install . --no-deps diff --git a/CHANGES b/CHANGES index 1924039c..31deb4e0 100644 --- a/CHANGES +++ b/CHANGES @@ -17,13 +17,16 @@ The rules for this file: * 2.1.0 +Changes + - Use loguru instead of logging for log (issue #301, PR #303). + Enhancements - Add a parser to read serialised pandas dataframe (parquet) (issue #316, PR#317). - workflow.ABFE allow parquet as input (issue #316, PR#317). Fixes - Fix the case where visualisation.plot_convergence would fail when the final - error is NaN (issue #318, PR#317). + error is NaN (issue #318, PR#319). 06/04/2023 xiki-tempula diff --git a/devtools/conda-envs/test_env.yaml b/devtools/conda-envs/test_env.yaml index b9dcd4e0..87470b55 100644 --- a/devtools/conda-envs/test_env.yaml +++ b/devtools/conda-envs/test_env.yaml @@ -9,11 +9,20 @@ dependencies: - scipy - scikit-learn - matplotlib +- loguru - pyarrow - # Testing +# Testing - pytest - pytest-cov - pytest-xdist - pytest-black - codecov + +# Doc +- sphinx +- sphinx_rtd_theme + +- pip: + - alchemtest @ https://github.com/alchemistry/alchemtest/archive/master.zip + diff --git a/docs/conf.py b/docs/conf.py index 9844d99b..a2e7ae3e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,31 +30,33 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', - 'sphinx.ext.autosummary', - 'sphinx.ext.napoleon'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'alchemlyb' -author = u'''Irfan Alibay, Bryce Allen, Mohammad S. Barhaghi, Oliver +project = "alchemlyb" +author = """Irfan Alibay, Bryce Allen, Mohammad S. Barhaghi, Oliver Beckstein, David Dotson, Jérôme Hénin, Travis Jensen, Thomas T. Joseph, Ian Kenney, Hyungro Lee, Victoria Lim, Shuai Liu, Domenico -Marson, Pascal Merz, Alexander Schlaich, Dominik Wille, Zhiyi Wu''' -copyright = u'2017-2022, ' + author +Marson, Pascal Merz, Alexander Schlaich, Dominik Wille, Zhiyi Wu""" +copyright = "2017-2022, " + author # The version info for the project you're documenting, acts as replacement for @@ -67,12 +69,13 @@ # desired, see the commented out code from alchemlyb._version import get_versions -release = get_versions()['version'] + +release = get_versions()["version"] del get_versions version = release -#version = release.split("+")[0] # only major.minor.patch -#version = "."join(release.split(".")[:2]) # only major.minor +# version = release.split("+")[0] # only major.minor.patch +# version = "."join(release.split(".")[:2]) # only major.minor # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -84,10 +87,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -99,7 +102,8 @@ # a list of builtin themes. # import sphinx_rtd_theme -html_theme = 'sphinx_rtd_theme' + +html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme @@ -117,7 +121,7 @@ # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'alchemlybdoc' +htmlhelp_basename = "alchemlybdoc" # -- Options for LaTeX output --------------------------------------------- @@ -126,15 +130,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -144,8 +145,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'alchemlyb.tex', u'alchemlyb Documentation', - author, 'manual'), + (master_doc, "alchemlyb.tex", "alchemlyb Documentation", author, "manual"), ] @@ -153,10 +153,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'alchemlyb', u'alchemlyb Documentation', - [author], 1) -] +man_pages = [(master_doc, "alchemlyb", "alchemlyb Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -165,22 +162,30 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'alchemlyb', u'alchemlyb Documentation', - author, 'alchemlyb', 'The simple alchemistry library.', - 'Miscellaneous'), + ( + master_doc, + "alchemlyb", + "alchemlyb Documentation", + author, + "alchemlyb", + "The simple alchemistry library.", + "Miscellaneous", + ), ] # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None, - 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), - 'pymbar': ('https://pymbar.readthedocs.io/en/latest/', None), - 'alchemtest': ( - 'https://alchemtest.readthedocs.io/en/latest/', None), - 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None)} +intersphinx_mapping = { + "https://docs.python.org/": None, + "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), + "pymbar": ("https://pymbar.readthedocs.io/en/latest/", None), + "alchemtest": ("https://alchemtest.readthedocs.io/en/latest/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), + "loguru": ("https://loguru.readthedocs.io/en/stable/", None), +} # generate stub pages where directed with autosummary -#autosummary_generate = True +# autosummary_generate = True autosummary_generate_overwrite = False # Ensure that the documentation is generated not by alphabetic order. -autodoc_member_order = 'bysource' +autodoc_member_order = "bysource" diff --git a/docs/estimators/alchemlyb.estimators.MBAR.rst b/docs/estimators/alchemlyb.estimators.MBAR.rst index f8e3b0b3..2c7ca71e 100644 --- a/docs/estimators/alchemlyb.estimators.MBAR.rst +++ b/docs/estimators/alchemlyb.estimators.MBAR.rst @@ -5,11 +5,6 @@ MBAR The :class:`~alchemlyb.estimators.MBAR` estimator is a light wrapper around the reference implementation of MBAR [Shirts2008]_ from :mod:`pymbar` (:class:`pymbar.mbar.MBAR`). As a generalization of BAR, it uses information from all sampled states to generate an estimate for the free energy difference between each state. -A more robust version of :class:`~alchemlyb.estimators.MBAR` is provided as -:class:`~alchemlyb.estimators.AutoMBAR`, where the class will iteratively -try different means of solving the MBAR estimate to avoid unconverged results. -The process of iterating different methods is documented in the logger -*alchemlyb.estimators.AutoMBAR*. API Reference ------------- @@ -17,4 +12,3 @@ API Reference :members: :inherited-members: -.. autoclass:: alchemlyb.estimators.AutoMBAR diff --git a/docs/index.rst b/docs/index.rst index fe10e410..edd9b8cf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -92,6 +92,7 @@ We also welcome code contributions: have a look at our `Developer Guide`_. Open postprocessing visualisation workflows + miscellaneous references .. toctree:: diff --git a/docs/miscellaneous.rst b/docs/miscellaneous.rst new file mode 100644 index 00000000..040b0d0d --- /dev/null +++ b/docs/miscellaneous.rst @@ -0,0 +1,10 @@ +Miscellaneous +============= + +This page includes aspects that would improve your usage of **alchemlyb**. + +.. toctree:: + :maxdepth: 2 + + miscellaneous/logging + diff --git a/docs/miscellaneous/logging.rst b/docs/miscellaneous/logging.rst new file mode 100644 index 00000000..3a676b2a --- /dev/null +++ b/docs/miscellaneous/logging.rst @@ -0,0 +1,37 @@ +.. _logging_section: + +Logging +======= + +In **alchemlyb**, we use :mod:`loguru` for logging. By default, the +:mod:`loguru` will print the logging information into the +`sys.stderr `_. + +Print to the stderr +------------------- + +If you want to customise the printing to the `stderr`, you could remove the +existing sink first :: + + from loguru import logger + logger.remove() + +Then add other custom sink :: + + logger.add(sys.stderr, format="{time} {level} {message}", level="INFO") + +The loguru logger is compatible with the :mod:`logging` module of the Python +standard library and can easily be +`configured to log to a logging handler `_. + + +Save to a file +-------------- + +Alternatively, one could save to a file simply with :: + + from loguru import logger + logger.add("file_{time}.log") + +See `configure to log to a file `_ +for more explanation. diff --git a/docs/workflows/alchemlyb.workflows.ABFE.rst b/docs/workflows/alchemlyb.workflows.ABFE.rst index b6460c03..d29737ca 100644 --- a/docs/workflows/alchemlyb.workflows.ABFE.rst +++ b/docs/workflows/alchemlyb.workflows.ABFE.rst @@ -18,13 +18,10 @@ are to complete workflow. For a GROMACS ABFE simulation, executing the workflow would look similar -to the following code (The log is configured by logger). :: +to the following code (:ref:`See how to configure the logger `). :: >>> from alchemtest.gmx import load_ABFE >>> from alchemlyb.workflows import ABFE - >>> # Enable the logger - >>> import logging - >>> logging.basicConfig(filename='ABFE.log', level=logging.INFO) >>> # Obtain the path of the data >>> import os >>> dir = os.path.dirname(load_ABFE()['data']['complex'][0]) diff --git a/setup.py b/setup.py index e95a2f9b..2dbfed19 100755 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ "scipy", "scikit-learn", "matplotlib", + "loguru", "pyarrow", ], ) diff --git a/src/alchemlyb/convergence/convergence.py b/src/alchemlyb/convergence/convergence.py index 8758c45a..4c2d9349 100644 --- a/src/alchemlyb/convergence/convergence.py +++ b/src/alchemlyb/convergence/convergence.py @@ -1,10 +1,10 @@ """Functions for assessing convergence of free energy estimates and raw data.""" -import logging from warnings import warn import numpy as np import pandas as pd +from loguru import logger from .. import concat from ..estimators import BAR, TI, MBAR, FEP_ESTIMATORS, TI_ESTIMATORS @@ -65,7 +65,6 @@ def forward_backward_convergence(df_list, estimator="MBAR", num=10, **kwargs): Use pymbar.MBAR instead of the AutoMBAR option. """ - logger = logging.getLogger("alchemlyb.convergence." "forward_backward_convergence") logger.info("Start convergence analysis.") logger.info("Check data availability.") if estimator.upper() != estimator: @@ -335,7 +334,6 @@ def A_c(series_list, precision=0.01, tol=2): https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8397498/#FD18 """ - logger = logging.getLogger("alchemlyb.convergence.A_c") n_R_c = len(series_list) R_c_list = [fwdrev_cumavg_Rc(series, precision, tol)[0] for series in series_list] logger.info(f"R_c list: {R_c_list}") diff --git a/src/alchemlyb/estimators/mbar_.py b/src/alchemlyb/estimators/mbar_.py index ee680753..ed1f79e7 100644 --- a/src/alchemlyb/estimators/mbar_.py +++ b/src/alchemlyb/estimators/mbar_.py @@ -1,5 +1,3 @@ -import logging - import pandas as pd import pymbar from sklearn.base import BaseEstimator @@ -25,12 +23,11 @@ class MBAR(BaseEstimator, _EstimatorMixOut): method : str, optional, default="robust" The optimization routine to use. This can be any of the methods - available via :func:`scipy.optimize.minimize` or + available via :func:`scipy.optimize.minimize` or :func:`scipy.optimize.root`. verbose : bool, optional Set to ``True`` if verbose debug output from :mod:`pymbar` is desired. - Output from alchemlyb is logged via :mod:`logging`. Attributes ---------- @@ -77,7 +74,6 @@ def __init__( self.initial_f_k = initial_f_k self.method = method self.verbose = verbose - self.logger = logging.getLogger("alchemlyb.estimators.MBAR") # handle for pymbar.MBAR object self._mbar = None diff --git a/src/alchemlyb/parsing/amber.py b/src/alchemlyb/parsing/amber.py index d129a064..61a46a1e 100644 --- a/src/alchemlyb/parsing/amber.py +++ b/src/alchemlyb/parsing/amber.py @@ -11,18 +11,16 @@ """ -import logging import re import numpy as np import pandas as pd +from loguru import logger from . import _init_attrs_dict from .util import anyopen from ..postprocessors.units import R_kJmol, kJ2kcal -logger = logging.getLogger("alchemlyb.parsers.Amber") - k_b = R_kJmol * kJ2kcal _FP_RE = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?" @@ -345,7 +343,7 @@ def extract(outfile, T): if None in mbar: msg = "Something strange parsing the following MBAR section." msg += "\nMaybe the mbar_lambda values are incorrect?" - logger.error("%s\n%s", msg, mbar) + logger.error("{}\n{}", msg, mbar) raise ValueError(msg) reference_energy = mbar[file_datum.mbar_lambda_idx] diff --git a/src/alchemlyb/parsing/namd.py b/src/alchemlyb/parsing/namd.py index 1647467c..5eacf8c4 100644 --- a/src/alchemlyb/parsing/namd.py +++ b/src/alchemlyb/parsing/namd.py @@ -1,19 +1,17 @@ """Parsers for extracting alchemical data from `NAMD `_ output files. """ -import logging from os.path import basename from re import split import numpy as np import pandas as pd +from loguru import logger from . import _init_attrs from .util import anyopen from ..postprocessors.units import R_kJmol, kJ2kcal -logger = logging.getLogger("alchemlyb.parsers.NAMD") - k_b = R_kJmol * kJ2kcal diff --git a/src/alchemlyb/tests/conftest.py b/src/alchemlyb/tests/conftest.py index 88235545..ef4b296d 100644 --- a/src/alchemlyb/tests/conftest.py +++ b/src/alchemlyb/tests/conftest.py @@ -3,6 +3,7 @@ concat should be done at local level.""" import pytest +from _pytest.logging import LogCaptureFixture from alchemtest.amber import load_bace_example, load_simplesolvated from alchemtest.gmx import ( load_benzene, @@ -21,6 +22,7 @@ load_restarted, load_restarted_reversed, ) +from loguru import logger from alchemlyb.parsing import gmx, amber, gomc, namd @@ -269,3 +271,10 @@ def namd_idws_restarted_reversed(): u_nk = namd.extract_u_nk(dataset["data"]["both"], T=300) return u_nk + + +@pytest.fixture +def caplog(caplog: LogCaptureFixture): + handler_id = logger.add(caplog.handler, format="{message}") + yield caplog + logger.remove(handler_id) diff --git a/src/alchemlyb/workflows/abfe.py b/src/alchemlyb/workflows/abfe.py index 4a00fbab..ba59e2a1 100644 --- a/src/alchemlyb/workflows/abfe.py +++ b/src/alchemlyb/workflows/abfe.py @@ -1,4 +1,3 @@ -import logging import os import warnings from os.path import join @@ -7,6 +6,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd +from loguru import logger from .base import WorkflowBase from .. import concat @@ -80,9 +80,9 @@ def __init__( outdirectory=os.path.curdir, ): super().__init__(units, software, T, outdirectory) - self.logger.info("Initialise Alchemlyb ABFE Workflow") + logger.info("Initialise Alchemlyb ABFE Workflow") self.update_units(units) - self.logger.info( + logger.info( f"Finding files with prefix: {prefix}, suffix: " f"{suffix} under directory {dir} produced by " f"{software}" @@ -99,11 +99,11 @@ def __init__( if len(self.file_list) == 0: raise ValueError(f"No file has been matched to {reg_exp}.") - self.logger.info(f"Found {len(self.file_list)} xvg files.") - self.logger.info("Unsorted file list: \n %s", "\n".join(self.file_list)) + logger.info(f"Found {len(self.file_list)} xvg files.") + logger.info("Unsorted file list: \n {}", "\n".join(self.file_list)) if software == "GROMACS": - self.logger.info(f"Using {software} parser to read the data.") + logger.info(f"Using {software} parser to read the data.") self._extract_u_nk = gmx.extract_u_nk self._extract_dHdl = gmx.extract_dHdl elif software == "AMBER": @@ -115,9 +115,6 @@ def __init__( else: raise NotImplementedError(f"{software} parser not found.") - def _logger_setup(self): - self.logger = logging.getLogger("alchemlyb.workflows.ABFE") - def read(self, read_u_nk=True, read_dHdl=True): """Read the u_nk and dHdL data from the :attr:`~alchemlyb.workflows.ABFE.file_list` @@ -145,26 +142,26 @@ def read(self, read_u_nk=True, read_dHdl=True): if read_u_nk: try: u_nk = self._extract_u_nk(file, T=self.T) - self.logger.info(f"Reading {len(u_nk)} lines of u_nk from {file}") + logger.info(f"Reading {len(u_nk)} lines of u_nk from {file}") u_nk_list.append(u_nk) except Exception as exc: msg = f"Error reading u_nk from {file}." - self.logger.error(msg) + logger.error(msg) raise OSError(msg) from exc if read_dHdl: try: dhdl = self._extract_dHdl(file, T=self.T) - self.logger.info(f"Reading {len(dhdl)} lines of dhdl from {file}") + logger.info(f"Reading {len(dhdl)} lines of dhdl from {file}") dHdl_list.append(dhdl) except Exception as exc: msg = f"Error reading dHdl from {file}." - self.logger.error(msg) + logger.error(msg) raise OSError(msg) from exc # Sort the files according to the state if read_u_nk: - self.logger.info("Sort files according to the u_nk.") + logger.info("Sort files according to the u_nk.") column_names = u_nk_list[0].columns.values.tolist() index_list = sorted( range(len(self.file_list)), @@ -173,7 +170,7 @@ def read(self, read_u_nk=True, read_dHdl=True): ), ) elif read_dHdl: - self.logger.info("Sort files according to the dHdl.") + logger.info("Sort files according to the dHdl.") index_list = sorted( range(len(self.file_list)), key=lambda x: dHdl_list[x].reset_index("time").index.values[0], @@ -184,7 +181,7 @@ def read(self, read_u_nk=True, read_dHdl=True): return self.file_list = [self.file_list[i] for i in index_list] - self.logger.info("Sorted file list: \n%s", "\n".join(self.file_list)) + logger.info("Sorted file list: \n{}", "\n".join(self.file_list)) if read_u_nk: self.u_nk_list = [u_nk_list[i] for i in index_list] else: @@ -267,7 +264,7 @@ def run( f"Estimator {estimator} is not supported. Choose one from " f"{FEP_ESTIMATORS + TI_ESTIMATORS}." ) - self.logger.error(msg) + logger.error(msg) raise ValueError(msg) self.read(use_FEP, use_TI) @@ -307,7 +304,7 @@ def update_units(self, units=None): """ if units is not None: - self.logger.info(f"Set unit to {units}.") + logger.info(f"Set unit to {units}.") self.units = units or None def preprocess(self, skiptime=0, uncorr="dE", threshold=50): @@ -333,15 +330,13 @@ def preprocess(self, skiptime=0, uncorr="dE", threshold=50): dHdl_sample_list : list The list of dHdl after decorrelation. """ - self.logger.info( + logger.info( f"Start preprocessing with skiptime of {skiptime} " f"uncorrelation method of {uncorr} and threshold of " f"{threshold}" ) if len(self.u_nk_list) > 0: - self.logger.info( - f"Processing the u_nk data set with skiptime of {skiptime}." - ) + logger.info(f"Processing the u_nk data set with skiptime of {skiptime}.") self.u_nk_sample_list = [] for index, u_nk in enumerate(self.u_nk_list): @@ -351,21 +346,21 @@ def preprocess(self, skiptime=0, uncorr="dE", threshold=50): subsample = decorrelate_u_nk(u_nk, uncorr, remove_burnin=True) if len(subsample) < threshold: - self.logger.warning( + logger.warning( f"Number of u_nk {len(subsample)} " f"for state {index} is less than the " f"threshold {threshold}." ) - self.logger.info(f"Take all the u_nk for state {index}.") + logger.info(f"Take all the u_nk for state {index}.") self.u_nk_sample_list.append(u_nk) else: - self.logger.info( + logger.info( f"Take {len(subsample)} uncorrelated " f"u_nk for state {index}." ) self.u_nk_sample_list.append(subsample) else: - self.logger.info("No u_nk data being subsampled") + logger.info("No u_nk data being subsampled") if len(self.dHdl_list) > 0: self.dHdl_sample_list = [] @@ -373,21 +368,21 @@ def preprocess(self, skiptime=0, uncorr="dE", threshold=50): dHdl = dHdl[dHdl.index.get_level_values("time") >= skiptime] subsample = decorrelate_dhdl(dHdl, remove_burnin=True) if len(subsample) < threshold: - self.logger.warning( + logger.warning( f"Number of dHdl {len(subsample)} for " f"state {index} is less than the " f"threshold {threshold}." ) - self.logger.info(f"Take all the dHdl for state {index}.") + logger.info(f"Take all the dHdl for state {index}.") self.dHdl_sample_list.append(dHdl) else: - self.logger.info( + logger.info( f"Take {len(subsample)} uncorrelated " f"dHdl for state {index}." ) self.dHdl_sample_list.append(subsample) else: - self.logger.info("No dHdl data being subsampled") + logger.info("No dHdl data being subsampled") def estimate(self, estimators=("MBAR", "BAR", "TI"), **kwargs): """Estimate the free energy using the selected estimator. @@ -416,10 +411,10 @@ def estimate(self, estimators=("MBAR", "BAR", "TI"), **kwargs): for estimator in estimators: if estimator not in (FEP_ESTIMATORS + TI_ESTIMATORS): msg = f"Estimator {estimator} is not available in {FEP_ESTIMATORS + TI_ESTIMATORS}." - self.logger.error(msg) + logger.error(msg) raise ValueError(msg) - self.logger.info(f"Start running estimator: {','.join(estimators)}.") + logger.info(f"Start running estimator: {','.join(estimators)}.") self.estimator = {} # Use unprocessed data if preprocess is not performed. if "TI" in estimators: @@ -427,26 +422,26 @@ def estimate(self, estimators=("MBAR", "BAR", "TI"), **kwargs): dHdl = concat(self.dHdl_sample_list) else: dHdl = concat(self.dHdl_list) - self.logger.warning("dHdl has not been preprocessed.") - self.logger.info(f"A total {len(dHdl)} lines of dHdl is used.") + logger.warning("dHdl has not been preprocessed.") + logger.info(f"A total {len(dHdl)} lines of dHdl is used.") if "BAR" in estimators or "MBAR" in estimators: if self.u_nk_sample_list is not None: u_nk = concat(self.u_nk_sample_list) else: u_nk = concat(self.u_nk_list) - self.logger.warning("u_nk has not been preprocessed.") - self.logger.info(f"A total {len(u_nk)} lines of u_nk is used.") + logger.warning("u_nk has not been preprocessed.") + logger.info(f"A total {len(u_nk)} lines of u_nk is used.") for estimator in estimators: if estimator == "MBAR": - self.logger.info("Run MBAR estimator.") + logger.info("Run MBAR estimator.") self.estimator[estimator] = MBAR(**kwargs).fit(u_nk) elif estimator == "BAR": - self.logger.info("Run BAR estimator.") + logger.info("Run BAR estimator.") self.estimator[estimator] = BAR(**kwargs).fit(u_nk) elif estimator == "TI": - self.logger.info("Run TI estimator.") + logger.info("Run TI estimator.") self.estimator[estimator] = TI(**kwargs).fit(dHdl) def generate_result(self): @@ -499,9 +494,9 @@ def generate_result(self): """ # Write estimate - self.logger.info("Summarise the estimate into a dataframe.") + logger.info("Summarise the estimate into a dataframe.") # Make the header name - self.logger.info("Generate the row names.") + logger.info("Generate the row names.") estimator_names = list(self.estimator.keys()) num_states = len(self.estimator[estimator_names[0]].states_) data_dict = {"name": [], "state": []} @@ -512,11 +507,11 @@ def generate_result(self): try: u_nk = self.u_nk_list[0] stages = u_nk.reset_index("time").index.names - self.logger.info("use the stage name from u_nk") + logger.info("use the stage name from u_nk") except: dHdl = self.dHdl_list[0] stages = dHdl.reset_index("time").index.names - self.logger.info("use the stage name from dHdl") + logger.info("use the stage name from dHdl") for stage in stages: data_dict["name"].append(stage.split("-")[0]) @@ -526,7 +521,7 @@ def generate_result(self): col_names = [] for estimator_name, estimator in self.estimator.items(): - self.logger.info(f"Read the results from estimator {estimator_name}") + logger.info(f"Read the results from estimator {estimator_name}") # Do the unit conversion delta_f_ = estimator.delta_f_ @@ -543,9 +538,7 @@ def generate_result(self): d_delta_f_.iloc[index - 1, index] ) - self.logger.info( - f"Generate the staged result from estimator {estimator_name}" - ) + logger.info(f"Generate the staged result from estimator {estimator_name}") for index, stage in enumerate(stages): if len(stages) == 1: start = 0 @@ -564,7 +557,7 @@ def generate_result(self): start = list(reversed(states)).index(lambda_min) start = num_states - start - 1 end = states.index(lambda_max) - self.logger.info(f"Stage {stage} is from state {start} to state {end}.") + logger.info(f"Stage {stage} is from state {start} to state {end}.") # This assumes that the indexes are sorted as the # preprocessing should sort the index of the df. result = delta_f_.iloc[start, end] @@ -606,7 +599,7 @@ def generate_result(self): converter = get_unit_converter(self.units) summary = converter(summary) self.summary = summary - self.logger.info(f"Write results:\n{summary.to_string()}") + logger.info(f"Write results:\n{summary.to_string()}") return summary def plot_overlap_matrix(self, overlap="O_MBAR.pdf", ax=None): @@ -626,16 +619,14 @@ def plot_overlap_matrix(self, overlap="O_MBAR.pdf", ax=None): matplotlib.axes.Axes An axes with the overlap matrix drawn. """ - self.logger.info("Plot overlap matrix.") + logger.info("Plot overlap matrix.") if "MBAR" in self.estimator: ax = plot_mbar_overlap_matrix(self.estimator["MBAR"].overlap_matrix, ax=ax) ax.figure.savefig(join(self.out, overlap)) - self.logger.info(f"Plot overlap matrix to {self.out} under {overlap}.") + logger.info(f"Plot overlap matrix to {self.out} under {overlap}.") return ax else: - self.logger.warning( - "MBAR estimator not found. " "Overlap matrix not plotted." - ) + logger.warning("MBAR estimator not found. " "Overlap matrix not plotted.") def plot_ti_dhdl(self, dhdl_TI="dhdl_TI.pdf", labels=None, colors=None, ax=None): """Plot the dHdl for TI estimator using @@ -659,7 +650,7 @@ def plot_ti_dhdl(self, dhdl_TI="dhdl_TI.pdf", labels=None, colors=None, ax=None) matplotlib.axes.Axes An axes with the TI dhdl drawn. """ - self.logger.info("Plot TI dHdl.") + logger.info("Plot TI dHdl.") if "TI" in self.estimator: ax = plot_ti_dhdl( self.estimator["TI"], @@ -669,7 +660,7 @@ def plot_ti_dhdl(self, dhdl_TI="dhdl_TI.pdf", labels=None, colors=None, ax=None) ax=ax, ) ax.figure.savefig(join(self.out, dhdl_TI)) - self.logger.info(f"Plot TI dHdl to {dhdl_TI} under {self.out}.") + logger.info(f"Plot TI dHdl to {dhdl_TI} under {self.out}.") return ax else: raise ValueError("No TI data available in estimators.") @@ -703,7 +694,7 @@ def plot_dF_state( matplotlib.figure.Figure An Figure with the dF states drawn. """ - self.logger.info("Plot dF states.") + logger.info("Plot dF states.") fig = plot_dF_state( self.estimator.values(), labels=labels, @@ -713,7 +704,7 @@ def plot_dF_state( nb=nb, ) fig.savefig(join(self.out, dF_state)) - self.logger.info(f"Plot dF state to {dF_state} under {self.out}.") + logger.info(f"Plot dF state to {dF_state} under {self.out}.") return fig def check_convergence( @@ -750,17 +741,17 @@ def check_convergence( An axes with the convergence drawn. """ - self.logger.info("Start convergence analysis.") - self.logger.info("Checking data availability.") + logger.info("Start convergence analysis.") + logger.info("Checking data availability.") if estimator in FEP_ESTIMATORS: if self.u_nk_sample_list is not None: u_nk_list = self.u_nk_sample_list - self.logger.info("Subsampled u_nk is available.") + logger.info("Subsampled u_nk is available.") else: if self.u_nk_list is not None: u_nk_list = self.u_nk_list - self.logger.info( + logger.info( "Subsampled u_nk not available, " "use original data instead." ) else: @@ -770,26 +761,26 @@ def check_convergence( f"run ABFE.check_convergence(estimator='TI') to " f"use a TI estimator." ) - self.logger.error(msg) + logger.error(msg) raise ValueError(msg) convergence = forward_backward_convergence( u_nk_list, estimator=estimator, num=forwrev, **kwargs ) elif estimator in TI_ESTIMATORS: - self.logger.warning( + logger.warning( "No valid FEP estimator or dataset found. " "Fallback to TI." ) if self.dHdl_sample_list is not None: dHdl_list = self.dHdl_sample_list - self.logger.info("Subsampled dHdl is available.") + logger.info("Subsampled dHdl is available.") else: if self.dHdl_list is not None: dHdl_list = self.dHdl_list - self.logger.info( + logger.info( "Subsampled dHdl not available, " "use original data instead." ) else: - self.logger.error(f"dHdl is needed for the f{estimator} estimator.") + logger.error(f"dHdl is needed for the f{estimator} estimator.") raise ValueError(f"dHdl is needed for the f{estimator} estimator.") convergence = forward_backward_convergence( dHdl_list, estimator=estimator, num=forwrev, **kwargs @@ -799,12 +790,12 @@ def check_convergence( f"Estimator {estimator} is not supported. Choose one from " f"{FEP_ESTIMATORS + TI_ESTIMATORS}." ) - self.logger.error(msg) + logger.error(msg) raise ValueError(msg) self.convergence = get_unit_converter(self.units)(convergence) - self.logger.info(f"Plot convergence analysis to {dF_t} under {self.out}.") + logger.info(f"Plot convergence analysis to {dF_t} under {self.out}.") ax = plot_convergence(self.convergence, units=self.units, ax=ax) ax.figure.savefig(join(self.out, dF_t)) diff --git a/src/alchemlyb/workflows/base.py b/src/alchemlyb/workflows/base.py index 1a7137c2..96859a65 100644 --- a/src/alchemlyb/workflows/base.py +++ b/src/alchemlyb/workflows/base.py @@ -1,9 +1,9 @@ """Basic building blocks for free energy workflows.""" -import logging from pathlib import Path import pandas as pd +from loguru import logger from .. import __version__ @@ -47,22 +47,18 @@ class WorkflowBase: def __init__( self, units="kT", software="Gromacs", T=298, out="./", *args, **kwargs ): - self._logger_setup() - self.logger.info(f"Alchemlyb Version: f{__version__}") - self.logger.info(f"Set Temperature to {T} K.") + logger.info(f"Alchemlyb Version: f{__version__}") + logger.info(f"Set Temperature to {T} K.") self.T = T - self.logger.info(f"Set Software to {software}.") + logger.info(f"Set Software to {software}.") self.software = software self.unit = units self.file_list = [] self.out = out if not Path(out).is_dir(): - self.logger.info(f"Make output folder {out}.") + logger.info(f"Make output folder {out}.") Path(out).mkdir(parents=True) - def _logger_setup(self): - self.logger = logging.getLogger("alchemlyb.workflows.WorkflowBase") - def run(self, *args, **kwargs): """Run the workflow in an automatic fashion.