From b54b19feb37c310320c16cd765b8f6815180d091 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Wed, 24 Jul 2019 15:10:30 -0400 Subject: [PATCH 001/155] Update and refactor Travis build configuration Change Travis build language to minimal Remove the sudo setting which is deprecated Move default test stage out of jobs section --- .travis.yml | 55 +++++++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7e49d730a8..1d63a07135 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,4 @@ -language: c -sudo: false +language: minimal env: global: @@ -33,31 +32,34 @@ before_install: - git config --global user.name "Travis Deploy" - git config --global user.email "rmg_dev@mit.edu" +install: + # Clone RMG-database + - git clone https://github.com/ReactionMechanismGenerator/RMG-database.git + - cd RMG-Py + # Create and activate environment + - conda env create -q -f environment_linux.yml + - source activate rmg_env + # Install codecov for coverage report + - conda install -y -c conda-forge codecov + - conda list + # Setup MOPAC license key + - yes 'Yes' | $HOME/miniconda/envs/rmg_env/bin/mopac $MOPACKEY > /dev/null + # Install Q2DTor + - git clone -b arkane https://github.com/mjohnson541/Q2DTor.git external/Q2DTor + # Compile RMG + - make + +script: + - make test-unittests + - make test-functional + - make test-database + +after_success: + - codecov + - bash ./deploy.sh + jobs: include: - - stage: test - install: - # Clone RMG-database - - git clone https://github.com/ReactionMechanismGenerator/RMG-database.git - # install Q2DTor - - git clone https://github.com/mjohnson541/Q2DTor.git RMG-Py/external/Q2DTor --branch arkane - # build environment and activate - - cd RMG-Py - - conda env create -q -f environment_linux.yml - - source activate rmg_env - # Install codecov for coverage report - - conda install -y -c conda-forge codecov - - conda list - # Setup MOPAC license key - - yes 'Yes' | $HOME/miniconda/envs/rmg_env/bin/mopac $MOPACKEY > /dev/null - - make - script: - - make test-unittests - - make test-functional - - make test-database - after_success: - - codecov - - bash ./deploy.sh - stage: documentation install: - cd RMG-Py @@ -73,6 +75,7 @@ jobs: - export COMMITMESSAGE="Automatic documentation rebuild" script: - make travis_setup clean html publish + after_success: skip - stage: deploy install: # Setup conda build @@ -85,6 +88,7 @@ jobs: - conda config --set anaconda_upload yes script: - conda build --token $CONDA_TOKEN --user rmg RMG-Py/.conda + after_success: skip - os: osx install: # Setup conda build @@ -97,3 +101,4 @@ jobs: - conda config --set anaconda_upload yes script: - conda build --token $CONDA_TOKEN --user rmg RMG-Py/.conda + after_success: skip From 9281d861388a699668bbe6ad6ece6951b6632b13 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Wed, 24 Jul 2019 15:17:16 -0400 Subject: [PATCH 002/155] Add conda environment file for Python 3 Copy of environment_linux.yml with a few changes Removed packages whose Py3 replacements are still in progress Removed version requirement on pydot --- environment_py3.yml | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 environment_py3.yml diff --git a/environment_py3.yml b/environment_py3.yml new file mode 100644 index 0000000000..f4620703be --- /dev/null +++ b/environment_py3.yml @@ -0,0 +1,44 @@ +name: rmg_env +channels: + - defaults + - rmg + - rdkit + - cantera + - anaconda +dependencies: + - cairo + - cairocffi + - cantera >=2.3.0 + - coolprop + - coverage + - cython >=0.25.2 + - ffmpeg + - gprof2dot + - graphviz + - jinja2 + - jupyter + - lpsolve55 + - markupsafe + - matplotlib >=1.5 + - mock + - mopac + - mpmath + - networkx + - nose + - numpy >=1.10.0 + - openbabel + - psutil + - pydas >=1.0.1 + - pydot + - pydqed >=1.0.0 + - pymongo + - pyparsing + - pyrdl + - python >=3.7 + - pyyaml + - quantities + - rdkit >=2018 + - scikit-learn + - scipy + - symmetry + - xlwt From f83b48cc7e0c4ce104457b885f5162e7b8cd32bd Mon Sep 17 00:00:00 2001 From: Max Liu Date: Wed, 24 Jul 2019 15:23:58 -0400 Subject: [PATCH 003/155] Adjust Travis build to also test on Python 3 Note that the build matrix will be expanded for all test stages Since this is not the master or stable branch, it won't matter However, this will need to be updated before merging into master --- .travis.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1d63a07135..e390ccb5cb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,13 @@ env: - secure: "cfosGf5hvUhIlPoGJu0d/HFddrMwIFU7FfLwd8yRrMGkYv0ePOwAW9kmhFSxUYvuXkxzgD75cIICMFY2fSm6VXBXXzfPQD7vwzoApXf7a8vi0C64XhinXhdEyUYb5/v8fswa0zheUENYhUS1tOqDXT/h8EPNZT5wKizaA3O2Wa8=" - secure: "QXuqKYuwCocqsTMePBc5OugBbQC4/t+335TYLdkletiateP/rF/eDsVRG792/BVq5gKRZgz3NH9ipTNm5pZoCbAEPt9+eDpfts8WeAbxmjdcEjfBxxwZ69wUTPAVrezTGn2k7W2UBdFrWeUNKPAVCKIkoviXqOHFitqJEC+c6JY=" - secure: "jIyBEzR10l5SWvY5ouEYzA8YzPHIZNMXMBdcXwuwte8NCU8GBYUqhHA1L67nTaBdLhWbrZ2NireVKPQWJp3ctcI0IB6xZzaYlVpgN/udGPO+1MZd9Xhp9TWuJWrGZ9EoWGB9L5H+O7RYwcDMVH5CUrCIBdsSJuyE8aDpky1/IVE=" + matrix: + - ENV_FILE='environment_linux.yml' + - ENV_FILE='environment_py3.yml' + +matrix: + allow_failures: + - env: ENV_FILE='environment_py3.yml' stages: - test @@ -37,7 +44,7 @@ install: - git clone https://github.com/ReactionMechanismGenerator/RMG-database.git - cd RMG-Py # Create and activate environment - - conda env create -q -f environment_linux.yml + - conda env create -q -f $ENV_FILE - source activate rmg_env # Install codecov for coverage report - conda install -y -c conda-forge codecov From c1c74bea211e6975d38afcaa2fb00b03688c43ac Mon Sep 17 00:00:00 2001 From: Max Liu Date: Fri, 26 Jul 2019 14:29:45 -0400 Subject: [PATCH 004/155] Change print syntax in Makefile and setup.py --- Makefile | 8 ++++---- setup.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 5bfb25c3cc..18d36a2e24 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,8 @@ # ################################################################################ -DASPK=$(shell python -c 'import pydas.daspk; print pydas.daspk.__file__') -DASSL=$(shell python -c 'import pydas.dassl; print pydas.dassl.__file__') +DASPK=$(shell python -c 'import pydas.daspk; print(pydas.daspk.__file__)') +DASSL=$(shell python -c 'import pydas.dassl; print(pydas.dassl.__file__)') .PHONY : all minimal main solver check cantherm clean install decython documentation mopac_travis @@ -16,7 +16,7 @@ minimal: main: @ echo "Checking you have PyDQED..." - @ python -c 'import pydqed; print pydqed.__file__' + @ python -c 'import pydqed; print(pydqed.__file__)' python setup.py build_ext main --build-lib . --build-temp build --pyrex-c-in-temp solver: @@ -65,7 +65,7 @@ endif install: @ echo "Checking you have PyDQED..." - @ python -c 'import pydqed; print pydqed.__file__' + @ python -c 'import pydqed; print(pydqed.__file__)' ifneq ($(DASPK),) @ echo "DASPK solver found. Compiling with DASPK and sensitivity analysis capability..." @ (echo DEF DASPK = 1) > rmgpy/solver/settings.pxi diff --git a/setup.py b/setup.py index 6fcad69732..9d8823b1a6 100644 --- a/setup.py +++ b/setup.py @@ -35,18 +35,18 @@ from distutils.core import setup from distutils.extension import Extension except ImportError: - print 'The distutils package is required to build or install RMG Py.' + print('The distutils package is required to build or install RMG Py.') try: from Cython.Distutils import build_ext import Cython.Compiler.Options except ImportError: - print 'Cython (http://www.cython.org/) is required to build or install RMG Py.' + print('Cython (http://www.cython.org/) is required to build or install RMG Py.') try: import numpy except ImportError: - print 'NumPy (http://numpy.scipy.org/) is required to build or install RMG Py.' + print('NumPy (http://numpy.scipy.org/) is required to build or install RMG Py.') # Create annotated HTML files for each of the Cython modules Cython.Compiler.Options.annotate = True From af2e7a1ccb5f611af99a83af6524415f36aa91ea Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 29 Jul 2019 15:05:58 -0400 Subject: [PATCH 005/155] Import functools.reduce in reaction.py For compatibility with Python 2/3 --- rmgpy/reaction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rmgpy/reaction.py b/rmgpy/reaction.py index ad17025b1d..5344747197 100644 --- a/rmgpy/reaction.py +++ b/rmgpy/reaction.py @@ -48,6 +48,7 @@ import os.path from copy import copy, deepcopy import urllib +from functools import reduce import rmgpy.constants as constants from rmgpy.molecule.molecule import Molecule, Atom From 603c87e0666158e5ee6be9b35b0899ba68a07604 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 29 Jul 2019 15:31:07 -0400 Subject: [PATCH 006/155] Decode bytes returned by subprocess.check_output in utilities.py --- utilities.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utilities.py b/utilities.py index aaa62f807a..3bf2cc8c85 100644 --- a/utilities.py +++ b/utilities.py @@ -56,14 +56,14 @@ def check_dependencies(): print('{0:<30}{1}'.format('symmetry', 'Not found. Please install in order to use QM.')) missing = True else: - match = re.search(r'\$Revision: (\S*) \$', result) + match = re.search(r'\$Revision: (\S*) \$', result.decode()) version = match.group(1) if platform.system() == 'Windows': location = subprocess.check_output('where symmetry', shell=True) else: location = subprocess.check_output('which symmetry', shell=True) - print('{0:<15}{1:<15}{2}'.format('symmetry', version, location.strip())) + print('{0:<15}{1:<15}{2}'.format('symmetry', version, location.strip().decode())) # Check for RDKit try: From aba2d5af53bde6fc8137c514d4955da0d5dabba1 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Mon, 5 Aug 2019 15:57:10 -0400 Subject: [PATCH 007/155] Remove external cclib and use cclib conda package --- environment_linux.yml | 1 + environment_mac.yml | 1 + environment_py3.yml | 1 + environment_windows.yml | 1 + external/cclib/LICENSE | 504 ---------- external/cclib/__init__.py | 18 - external/cclib/bridge/__init__.py | 25 - external/cclib/bridge/cclib2biopython.py | 35 - external/cclib/bridge/cclib2openbabel.py | 39 - external/cclib/bridge/cclib2pyquante.py | 27 - external/cclib/method/__init__.py | 15 - external/cclib/method/calculationmethod.py | 44 - external/cclib/method/cda.py | 123 --- external/cclib/method/cspa.py | 111 --- external/cclib/method/density.py | 85 -- external/cclib/method/fragments.py | 134 --- external/cclib/method/lpa.py | 132 --- external/cclib/method/mbo.py | 121 --- external/cclib/method/mpa.py | 118 --- external/cclib/method/opa.py | 141 --- external/cclib/method/population.py | 94 -- external/cclib/method/volume.py | 264 ----- external/cclib/parser/__init__.py | 29 - external/cclib/parser/adfparser.py | 882 ----------------- external/cclib/parser/ccopen.py | 101 -- external/cclib/parser/data.py | 199 ---- external/cclib/parser/gamessparser.py | 912 ----------------- external/cclib/parser/gamessukparser.py | 524 ---------- external/cclib/parser/gaussianparser.py | 1026 -------------------- external/cclib/parser/jaguarparser.py | 474 --------- external/cclib/parser/logfileparser.py | 300 ------ external/cclib/parser/mm4parser.py | 260 ----- external/cclib/parser/molproparser.py | 644 ------------ external/cclib/parser/mopacparser.py | 215 ---- external/cclib/parser/orcaparser.py | 407 -------- external/cclib/parser/utils.py | 71 -- external/cclib/progress/__init__.py | 16 - external/cclib/progress/qt4progress.py | 42 - external/cclib/progress/qtprogress.py | 41 - external/cclib/progress/textprogress.py | 54 -- requirements.txt | 1 + rmgpy/qm/gaussian.py | 2 +- rmgpy/qm/mopac.py | 2 +- 43 files changed, 7 insertions(+), 8229 deletions(-) delete mode 100644 external/cclib/LICENSE delete mode 100644 external/cclib/__init__.py delete mode 100644 external/cclib/bridge/__init__.py delete mode 100644 external/cclib/bridge/cclib2biopython.py delete mode 100644 external/cclib/bridge/cclib2openbabel.py delete mode 100644 external/cclib/bridge/cclib2pyquante.py delete mode 100644 external/cclib/method/__init__.py delete mode 100644 external/cclib/method/calculationmethod.py delete mode 100644 external/cclib/method/cda.py delete mode 100644 external/cclib/method/cspa.py delete mode 100644 external/cclib/method/density.py delete mode 100644 external/cclib/method/fragments.py delete mode 100644 external/cclib/method/lpa.py delete mode 100644 external/cclib/method/mbo.py delete mode 100644 external/cclib/method/mpa.py delete mode 100644 external/cclib/method/opa.py delete mode 100644 external/cclib/method/population.py delete mode 100644 external/cclib/method/volume.py delete mode 100644 external/cclib/parser/__init__.py delete mode 100644 external/cclib/parser/adfparser.py delete mode 100644 external/cclib/parser/ccopen.py delete mode 100644 external/cclib/parser/data.py delete mode 100644 external/cclib/parser/gamessparser.py delete mode 100644 external/cclib/parser/gamessukparser.py delete mode 100644 external/cclib/parser/gaussianparser.py delete mode 100644 external/cclib/parser/jaguarparser.py delete mode 100644 external/cclib/parser/logfileparser.py delete mode 100644 external/cclib/parser/mm4parser.py delete mode 100644 external/cclib/parser/molproparser.py delete mode 100644 external/cclib/parser/mopacparser.py delete mode 100644 external/cclib/parser/orcaparser.py delete mode 100644 external/cclib/parser/utils.py delete mode 100644 external/cclib/progress/__init__.py delete mode 100644 external/cclib/progress/qt4progress.py delete mode 100644 external/cclib/progress/qtprogress.py delete mode 100644 external/cclib/progress/textprogress.py diff --git a/environment_linux.yml b/environment_linux.yml index d499d6fa0b..e3a06c0991 100644 --- a/environment_linux.yml +++ b/environment_linux.yml @@ -10,6 +10,7 @@ dependencies: - cairo - cairocffi - cantera >=2.3.0 + - cclib - coolprop - coverage - cython >=0.25.2 diff --git a/environment_mac.yml b/environment_mac.yml index 86dd9c4faa..9ce08ae1da 100644 --- a/environment_mac.yml +++ b/environment_mac.yml @@ -10,6 +10,7 @@ dependencies: - cairo - cairocffi - cantera >=2.3.0 + - cclib - coolprop - coverage - cython >=0.25.2 diff --git a/environment_py3.yml b/environment_py3.yml index f4620703be..5e9f6493b5 100644 --- a/environment_py3.yml +++ b/environment_py3.yml @@ -9,6 +9,7 @@ dependencies: - cairo - cairocffi - cantera >=2.3.0 + - cclib - coolprop - coverage - cython >=0.25.2 diff --git a/environment_windows.yml b/environment_windows.yml index 507ead32c8..23ef34828e 100644 --- a/environment_windows.yml +++ b/environment_windows.yml @@ -10,6 +10,7 @@ dependencies: - cairo - cairocffi - cantera >=2.3.0 + - cclib - coolprop - coverage - cython >=0.25.2 diff --git a/external/cclib/LICENSE b/external/cclib/LICENSE deleted file mode 100644 index 5ab7695ab8..0000000000 --- a/external/cclib/LICENSE +++ /dev/null @@ -1,504 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it becomes -a de-facto standard. To achieve this, non-free programs must be -allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control compilation -and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at - least three years, to give the same user the materials - specified in Subsection 6a, above, for a charge no more - than the cost of performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply, -and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License may add -an explicit geographical distribution limitation excluding those countries, -so that distribution is permitted only in or among countries not thus -excluded. In such case, this License incorporates the limitation as if -written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms of the -ordinary General Public License). - - To apply these terms, attach the following notices to the library. It is -safest to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the library, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - diff --git a/external/cclib/__init__.py b/external/cclib/__init__.py deleted file mode 100644 index 1c94f946c3..0000000000 --- a/external/cclib/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006-2010, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 888 $" -__version__ = "1.0" - -import parser -import progress -import method -import bridge - -# The test module can be imported if it was installed with cclib. -try: - import test -except: - pass diff --git a/external/cclib/bridge/__init__.py b/external/cclib/bridge/__init__.py deleted file mode 100644 index 109ce6fb7f..0000000000 --- a/external/cclib/bridge/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 839 $" - -try: - import openbabel -except Exception: - pass -else: - from cclib2openbabel import makeopenbabel - -try: - import PyQuante -except ImportError: - pass -else: - from cclib2pyquante import makepyquante - -try: - from cclib2biopython import makebiopython -except ImportError: - pass diff --git a/external/cclib/bridge/cclib2biopython.py b/external/cclib/bridge/cclib2biopython.py deleted file mode 100644 index a4ff08dc16..0000000000 --- a/external/cclib/bridge/cclib2biopython.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 709 $" - -from Bio.PDB.Atom import Atom -from cclib.parser.utils import PeriodicTable - -def makebiopython(atomcoords, atomnos): - """Create a list of BioPython Atoms. - - This creates a list of BioPython Atoms suitable for use - by Bio.PDB.Superimposer, for example. - - >>> import numpy - >>> from Bio.PDB.Superimposer import Superimposer - >>> atomnos = numpy.array([1,8,1],"i") - >>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f") - >>> b = numpy.array([[1.1,2,0],[1,1,0],[2,1,0]],"f") - >>> si = Superimposer() - >>> si.set_atoms(makebiopython(a,atomnos),makebiopython(b,atomnos)) - >>> print si.rms - 0.29337859596 - """ - pt = PeriodicTable() - bioatoms = [] - for coords, atomno in zip(atomcoords, atomnos): - bioatoms.append(Atom(pt.element[atomno], coords, 0, 0, 0, 0, 0)) - return bioatoms - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/external/cclib/bridge/cclib2openbabel.py b/external/cclib/bridge/cclib2openbabel.py deleted file mode 100644 index fd645ea5a6..0000000000 --- a/external/cclib/bridge/cclib2openbabel.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 867 $" - -import openbabel as ob - -def makeopenbabel(atomcoords, atomnos, charge=0, mult=1): - """Create an Open Babel molecule. - - >>> import numpy, openbabel - >>> atomnos = numpy.array([1,8,1],"i") - >>> coords = numpy.array([[-1.,1.,0.],[0.,0.,0.],[1.,1.,0.]]) - >>> obmol = makeopenbabel(coords, atomnos) - >>> obconversion = openbabel.OBConversion() - >>> formatok = obconversion.SetOutFormat("inchi") - >>> print obconversion.WriteString(obmol).strip() - InChI=1/H2O/h1H2 - """ - obmol = ob.OBMol() - for i in range(len(atomnos)): - # Note that list(atomcoords[i]) is not equivalent!!! - coords = atomcoords[i].tolist() - atomno = int(atomnos[i]) - obatom = ob.OBAtom() - obatom.SetAtomicNum(atomno) - obatom.SetVector(*coords) - obmol.AddAtom(obatom) - obmol.ConnectTheDots() - obmol.PerceiveBondOrders() - obmol.SetTotalSpinMultiplicity(mult) - obmol.SetTotalCharge(charge) - return obmol - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/external/cclib/bridge/cclib2pyquante.py b/external/cclib/bridge/cclib2pyquante.py deleted file mode 100644 index fc24f84cff..0000000000 --- a/external/cclib/bridge/cclib2pyquante.py +++ /dev/null @@ -1,27 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 737 $" - -from PyQuante.Molecule import Molecule - -def makepyquante(atomcoords, atomnos, charge=0, mult=1): - """Create a PyQuante Molecule. - - >>> import numpy - >>> from PyQuante.hartree_fock import hf - >>> atomnos = numpy.array([1,8,1],"i") - >>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f") - >>> pyqmol = makepyquante(a,atomnos) - >>> en,orbe,orbs = hf(pyqmol) - >>> print int(en * 10) / 10. # Should be around -73.8 - -73.8 - """ - return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom", - charge=charge, multiplicity=mult) - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/external/cclib/method/__init__.py b/external/cclib/method/__init__.py deleted file mode 100644 index b8f2d2f0e4..0000000000 --- a/external/cclib/method/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 631 $" - -from density import Density -from cspa import CSPA -from mpa import MPA -from lpa import LPA -from opa import OPA -from mbo import MBO -from fragments import FragmentAnalysis -from cda import CDA diff --git a/external/cclib/method/calculationmethod.py b/external/cclib/method/calculationmethod.py deleted file mode 100644 index 914d88b26e..0000000000 --- a/external/cclib/method/calculationmethod.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import logging -import sys - - -class Method(object): - """Abstract class for logfile objects. - - Subclasses defined by cclib: - Density, Fragments, OPA, Population - - Attributes: - data - ccData source data object - """ - def __init__(self, data, progress=None, - loglevel=logging.INFO, logname="Log"): - """Initialise the Logfile object. - - Typically called by subclasses in their own __init__ methods. - """ - - self.data = data - self.progress = progress - self.loglevel = loglevel - self.logname = logname - - # Set up the logger. - self.logger = logging.getLogger('%s %s' % (self.logname, self.data)) - self.logger.setLevel(self.loglevel) - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter( - "[%(name)s %(levelname)s] %(message)s")) - self.logger.addHandler(handler) - - -if __name__ == "__main__": - import doctest, calculationmethod - doctest.testmod(calculationmethod, verbose=False) diff --git a/external/cclib/method/cda.py b/external/cclib/method/cda.py deleted file mode 100644 index bcc0d61c8a..0000000000 --- a/external/cclib/method/cda.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 453 $" - -import random # For sometimes running the progress updater - -import numpy - -from fragments import FragmentAnalysis - - -class CDA(FragmentAnalysis): - """Charge Decomposition Analysis (CDA)""" - - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(FragmentAnalysis, self).__init__(logname="CDA", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "CDA of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'CDA("%s")' % (self.data) - - def calculate(self, fragments, cupdate=0.05): - """Perform the charge decomposition analysis. - - Inputs: - fragments - list of ccData data objects - """ - - - retval = super(CDA, self).calculate(fragments, cupdate) - if not retval: - return False - - # At this point, there should be a mocoeffs and fooverlaps - # in analogy to a ccData object. - - donations = [] - bdonations = [] - repulsions = [] - residuals = [] - - if len(self.mocoeffs) == 2: - occs = 1 - else: - occs = 2 - - # Intialize progress if available. - nstep = self.data.homos[0] - if len(self.data.homos) == 2: - nstep += self.data.homos[1] - if self.progress: - self.progress.initialize(nstep) - - # Begin the actual method. - step = 0 - for spin in range(len(self.mocoeffs)): - - size = len(self.mocoeffs[spin]) - homo = self.data.homos[spin] - - if len(fragments[0].homos) == 2: - homoa = fragments[0].homos[spin] - else: - homoa = fragments[0].homos[0] - - if len(fragments[1].homos) == 2: - homob = fragments[1].homos[spin] - else: - homob = fragments[1].homos[0] - - offset = fragments[0].nbasis - - self.logger.info("Creating donations, bdonations, and repulsions: array[]") - donations.append(numpy.zeros(size, "d")) - bdonations.append(numpy.zeros(size, "d")) - repulsions.append(numpy.zeros(size, "d")) - residuals.append(numpy.zeros(size, "d")) - - for i in range(self.data.homos[spin] + 1): - - # Calculate donation for each MO. - for k in range(0, homoa + 1): - for n in range(offset + homob + 1, self.data.nbasis): - donations[spin][i] += 2 * occs * self.mocoeffs[spin][i,k] \ - * self.mocoeffs[spin][i,n] * self.fooverlaps[k][n] - - for l in range(offset, offset + homob + 1): - for m in range(homoa + 1, offset): - bdonations[spin][i] += 2 * occs * self.mocoeffs[spin][i,l] \ - * self.mocoeffs[spin][i,m] * self.fooverlaps[l][m] - - for k in range(0, homoa + 1): - for m in range(offset, offset+homob + 1): - repulsions[spin][i] += 2 * occs * self.mocoeffs[spin][i,k] \ - * self.mocoeffs[spin][i, m] * self.fooverlaps[k][m] - - for m in range(homoa + 1, offset): - for n in range(offset + homob + 1, self.data.nbasis): - residuals[spin][i] += 2 * occs * self.mocoeffs[spin][i,m] \ - * self.mocoeffs[spin][i, n] * self.fooverlaps[m][n] - - step += 1 - if self.progress and random.random() < cupdate: - self.progress.update(step, "Charge Decomposition Analysis...") - - if self.progress: - self.progress.update(nstep, "Done.") - - self.donations = donations - self.bdonations = bdonations - self.repulsions = repulsions - self.residuals = residuals - - return True diff --git a/external/cclib/method/cspa.py b/external/cclib/method/cspa.py deleted file mode 100644 index d457002c5e..0000000000 --- a/external/cclib/method/cspa.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import random # For sometimes running the progress updater - -import numpy - -from population import Population - - -class CSPA(Population): - """The C-squared population analysis.""" - - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(CSPA, self).__init__(logname="CSPA", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "CSPA of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'CSPA("%s")' % (self.data) - - def calculate(self, indices=None, fupdate=0.05): - """Perform the C squared population analysis. - - Inputs: - indices - list of lists containing atomic orbital indices of fragments - """ - - # Do we have the needed info in the parser? - if not hasattr(self.data, "mocoeffs"): - self.logger.error("Missing mocoeffs") - return False - if not hasattr(self.data, "nbasis"): - self.logger.error("Missing nbasis") - return False - if not hasattr(self.data, "homos"): - self.logger.error("Missing homos") - return False - - self.logger.info("Creating attribute aoresults: array[3]") - - # Determine number of steps, and whether process involves beta orbitals. - unrestricted = (len(self.data.mocoeffs)==2) - nbasis = self.data.nbasis - self.aoresults = [] - alpha = len(self.data.mocoeffs[0]) - self.aoresults.append(numpy.zeros([alpha, nbasis], "d")) - nstep = alpha - if unrestricted: - beta = len(self.data.mocoeffs[1]) - self.aoresults.append(numpy.zeros([beta, nbasis], "d")) - nstep += beta - - # Intialize progress if available. - if self.progress: - self.progress.initialize(nstep) - - step = 0 - for spin in range(len(self.data.mocoeffs)): - - for i in range(len(self.data.mocoeffs[spin])): - - if self.progress and random.random() < fupdate: - self.progress.update(step, "C^2 Population Analysis") - - submocoeffs = self.data.mocoeffs[spin][i] - scale = numpy.inner(submocoeffs, submocoeffs) - tempcoeffs = numpy.multiply(submocoeffs, submocoeffs) - tempvec = tempcoeffs/scale - self.aoresults[spin][i] = numpy.divide(tempcoeffs, scale).astype("d") - - step += 1 - - if self.progress: - self.progress.update(nstep, "Done") - - retval = super(CSPA, self).partition(indices) - - if not retval: - self.logger.error("Error in partitioning results") - return False - - self.logger.info("Creating fragcharges: array[1]") - size = len(self.fragresults[0][0]) - self.fragcharges = numpy.zeros([size], "d") - - for spin in range(len(self.fragresults)): - - for i in range(self.data.homos[spin] + 1): - - temp = numpy.reshape(self.fragresults[spin][i], (size,)) - self.fragcharges = numpy.add(self.fragcharges, temp) - - if not unrestricted: - self.fragcharges = numpy.multiply(self.fragcharges, 2) - - return True - - -if __name__ == "__main__": - import doctest, cspa - doctest.testmod(cspa, verbose=False) diff --git a/external/cclib/method/density.py b/external/cclib/method/density.py deleted file mode 100644 index 7def358f1c..0000000000 --- a/external/cclib/method/density.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import random # For sometimes running the progress updater -import logging - -import numpy - -from calculationmethod import Method - - -class Density(Method): - """Calculate the density matrix""" - def __init__(self, data, progress=None, loglevel=logging.INFO, - logname="Density"): - - # Call the __init__ method of the superclass. - super(Density, self).__init__(data, progress, loglevel, logname) - - def __str__(self): - """Return a string representation of the object.""" - return "Density matrix of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'Density matrix("%s")' % (self.data) - - def calculate(self, fupdate=0.05): - """Calculate the density matrix.""" - - # Do we have the needed info in the data object? - if not hasattr(self.data, "mocoeffs"): - self.logger.error("Missing mocoeffs") - return False - if not hasattr(self.data,"nbasis"): - self.logger.error("Missing nbasis") - return False - if not hasattr(self.data,"homos"): - self.logger.error("Missing homos") - return False - - self.logger.info("Creating attribute density: array[3]") - size = self.data.nbasis - unrestricted = (len(self.data.mocoeffs) == 2) - - #determine number of steps, and whether process involves beta orbitals - nstep = self.data.homos[0] + 1 - if unrestricted: - self.density = numpy.zeros([2, size, size], "d") - nstep += self.data.homos[1] + 1 - else: - self.density = numpy.zeros([1, size, size], "d") - - #intialize progress if available - if self.progress: - self.progress.initialize(nstep) - - step = 0 - for spin in range(len(self.data.mocoeffs)): - - for i in range(self.data.homos[spin] + 1): - - if self.progress and random.random() < fupdate: - self.progress.update(step, "Density Matrix") - - col = numpy.reshape(self.data.mocoeffs[spin][i], (size, 1)) - colt = numpy.reshape(col, (1, size)) - - tempdensity = numpy.dot(col, colt) - self.density[spin] = numpy.add(self.density[spin], - tempdensity) - - step += 1 - - if not unrestricted: #multiply by two to account for second electron - self.density[0] = numpy.add(self.density[0], self.density[0]) - - if self.progress: - self.progress.update(nstep, "Done") - - return True #let caller know we finished density diff --git a/external/cclib/method/fragments.py b/external/cclib/method/fragments.py deleted file mode 100644 index 87517016cf..0000000000 --- a/external/cclib/method/fragments.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 238 $" - -import random # For sometimes running the progress updater - -import numpy -numpy.inv = numpy.linalg.inv - -from calculationmethod import * - - -class FragmentAnalysis(Method): - """Convert a molecule's basis functions from atomic-based to fragment MO-based""" - def __init__(self, data, progress=None, loglevel=logging.INFO, - logname="FragmentAnalysis of"): - - # Call the __init__ method of the superclass. - super(FragmentAnalysis, self).__init__(data, progress, loglevel, logname) - self.parsed = False - - def __str__(self): - """Return a string representation of the object.""" - return "Fragment molecule basis of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'Fragment molecular basis("%s")' % (self.data) - - def calculate(self, fragments, cupdate=0.05): - - nFragBasis = 0 - nFragAlpha = 0 - nFragBeta = 0 - self.fonames = [] - - unrestricted = ( len(self.data.mocoeffs) == 2 ) - - self.logger.info("Creating attribute fonames[]") - - # Collect basis info on the fragments. - for j in range(len(fragments)): - nFragBasis += fragments[j].nbasis - nFragAlpha += fragments[j].homos[0] + 1 - if unrestricted and len(fragments[j].homos) == 1: - nFragBeta += fragments[j].homos[0] + 1 #assume restricted fragment - elif unrestricted and len(fragments[j].homos) == 2: - nFragBeta += fragments[j].homos[1] + 1 #assume unrestricted fragment - - #assign fonames based on fragment name and MO number - for i in range(fragments[j].nbasis): - if hasattr(fragments[j],"name"): - self.fonames.append("%s_%i"%(fragments[j].name,i+1)) - else: - self.fonames.append("noname%i_%i"%(j,i+1)) - - nBasis = self.data.nbasis - nAlpha = self.data.homos[0] + 1 - if unrestricted: - nBeta = self.data.homos[1] + 1 - - # Check to make sure calcs have the right properties. - if nBasis != nFragBasis: - self.logger.error("Basis functions don't match") - return False - - if nAlpha != nFragAlpha: - self.logger.error("Alpha electrons don't match") - return False - - if unrestricted and nBeta != nFragBeta: - self.logger.error("Beta electrons don't match") - return False - - if len(self.data.atomcoords) != 1: - self.logger.warning("Molecule calc appears to be an optimization") - - for frag in fragments: - if len(frag.atomcoords) != 1: - self.logger.warning("One or more fragment appears to be an optimization") - break - - last = 0 - for frag in fragments: - size = frag.natom - if self.data.atomcoords[0][last:last+size].tolist() != frag.atomcoords[0].tolist(): - self.logger.error("Atom coordinates aren't aligned") - return False - - last += size - - # And let's begin! - self.mocoeffs = [] - self.logger.info("Creating mocoeffs in new fragment MO basis: mocoeffs[]") - - for spin in range(len(self.data.mocoeffs)): - blockMatrix = numpy.zeros((nBasis,nBasis), "d") - pos = 0 - - # Build up block-diagonal matrix from fragment mocoeffs. - # Need to switch ordering from [mo,ao] to [ao,mo]. - for i in range(len(fragments)): - size = fragments[i].nbasis - if len(fragments[i].mocoeffs) == 1: - blockMatrix[pos:pos+size,pos:pos+size] = numpy.transpose(fragments[i].mocoeffs[0]) - else: - blockMatrix[pos:pos+size,pos:pos+size] = numpy.transpose(fragments[i].mocoeffs[spin]) - pos += size - - # Invert and mutliply to result in fragment MOs as basis. - iBlockMatrix = numpy.inv(blockMatrix) - results = numpy.transpose(numpy.dot(iBlockMatrix, numpy.transpose(self.data.mocoeffs[spin]))) - self.mocoeffs.append(results) - - if hasattr(self.data, "aooverlaps"): - tempMatrix = numpy.dot(self.data.aooverlaps, blockMatrix) - tBlockMatrix = numpy.transpose(blockMatrix) - if spin == 0: - self.fooverlaps = numpy.dot(tBlockMatrix, tempMatrix) - self.logger.info("Creating fooverlaps: array[x,y]") - elif spin == 1: - self.fooverlaps2 = numpy.dot(tBlockMatrix, tempMatrix) - self.logger.info("Creating fooverlaps (beta): array[x,y]") - else: - self.logger.warning("Overlap matrix missing") - - self.parsed = True - self.nbasis = nBasis - self.homos = self.data.homos - - return True diff --git a/external/cclib/method/lpa.py b/external/cclib/method/lpa.py deleted file mode 100644 index 56c0af93bc..0000000000 --- a/external/cclib/method/lpa.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 837 $" - -import random - -import numpy - -from population import Population - - -class LPA(Population): - """The Lowdin population analysis""" - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(LPA, self).__init__(logname="LPA", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "LPA of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'LPA("%s")' % (self.data) - - def calculate(self, indices=None, x=0.5, fupdate=0.05): - """Perform a calculation of Lowdin population analysis. - - Inputs: - indices - list of lists containing atomic orbital indices of fragments - x - overlap matrix exponent in wavefunxtion projection (x=0.5 for Lowdin) - """ - - # Do we have the needed info in the parser? - if not hasattr(self.data,"mocoeffs"): - self.logger.error("Missing mocoeffs") - return False - if not (hasattr(self.data, "aooverlaps") \ - or hasattr(self.data, "fooverlaps") ): - self.logger.error("Missing overlap matrix") - return False - if not hasattr(self.data, "nbasis"): - self.logger.error("Missing nbasis") - return False - if not hasattr(self.data, "homos"): - self.logger.error("Missing homos") - return False - - unrestricted = (len(self.data.mocoeffs) == 2) - nbasis = self.data.nbasis - - # Determine number of steps, and whether process involves beta orbitals. - self.logger.info("Creating attribute aoresults: [array[2]]") - alpha = len(self.data.mocoeffs[0]) - self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ] - nstep = alpha - - if unrestricted: - beta = len(self.data.mocoeffs[1]) - self.aoresults.append(numpy.zeros([beta, nbasis], "d")) - nstep += beta - - #intialize progress if available - if self.progress: - self.progress.initialize(nstep) - - if hasattr(self.data, "aooverlaps"): - S = self.data.aooverlaps - elif hasattr(self.data, "fooverlaps"): - S = self.data.fooverlaps - - # Get eigenvalues and matrix of eigenvectors for transformation decomposition (U). - # Find roots of diagonal elements, and transform backwards using eigevectors. - # We need two matrices here, one for S^x, another for S^(1-x). - # We don't need to invert U, since S is symmetrical. - eigenvalues, U = numpy.linalg.eig(S) - UI = U.transpose() - Sdiagroot1 = numpy.identity(len(S))*numpy.power(eigenvalues, x) - Sdiagroot2 = numpy.identity(len(S))*numpy.power(eigenvalues, 1-x) - Sroot1 = numpy.dot(U, numpy.dot(Sdiagroot1, UI)) - Sroot2 = numpy.dot(U, numpy.dot(Sdiagroot2, UI)) - - step = 0 - for spin in range(len(self.data.mocoeffs)): - - for i in range(len(self.data.mocoeffs[spin])): - - if self.progress and random.random() < fupdate: - self.progress.update(step, "Lowdin Population Analysis") - - ci = self.data.mocoeffs[spin][i] - - temp1 = numpy.dot(ci, Sroot1) - temp2 = numpy.dot(ci, Sroot2) - self.aoresults[spin][i] = numpy.multiply(temp1, temp2).astype("d") - - step += 1 - - if self.progress: - self.progress.update(nstep, "Done") - - retval = super(LPA, self).partition(indices) - - if not retval: - self.logger.error("Error in partitioning results") - return False - - # Create array for charges. - self.logger.info("Creating fragcharges: array[1]") - size = len(self.fragresults[0][0]) - self.fragcharges = numpy.zeros([size], "d") - - for spin in range(len(self.fragresults)): - - for i in range(self.data.homos[spin] + 1): - - temp = numpy.reshape(self.fragresults[spin][i], (size,)) - self.fragcharges = numpy.add(self.fragcharges, temp) - - if not unrestricted: - self.fragcharges = numpy.multiply(self.fragcharges, 2) - - return True - - -if __name__ == "__main__": - import doctest, lpa - doctest.testmod(lpa, verbose=False) diff --git a/external/cclib/method/mbo.py b/external/cclib/method/mbo.py deleted file mode 100644 index f3ea5346a9..0000000000 --- a/external/cclib/method/mbo.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import random # For sometimes running the progress updater - -import numpy - -from density import Density - - -class MBO(Density): - """Calculate the density matrix.""" - - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(MBO, self).__init__(logname="MBO", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "Mayer's bond order of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'Mayer\'s bond order("%s")' % (self.data) - - def calculate(self, indices=None, fupdate=0.05): - """Calculate Mayer's bond orders.""" - - retval = super(MBO, self).calculate(fupdate) - if not retval: #making density didn't work - return False - - # Do we have the needed info in the ccData object? - if not (hasattr(self.data, "aooverlaps") - or hasattr(self.data, "fooverlaps")): - self.logger.error("Missing overlap matrix") - return False #let the caller of function know we didn't finish - - if not indices: - - # Build list of groups of orbitals in each atom for atomresults. - if hasattr(self.data, "aonames"): - names = self.data.aonames - overlaps = self.data.aooverlaps - elif hasattr(self.data, "fonames"): - names = self.data.fonames - overlaps = self.data.fooverlaps - else: - self.logger.error("Missing aonames or fonames") - return False - - atoms = [] - indices = [] - - name = names[0].split('_')[0] - atoms.append(name) - indices.append([0]) - - for i in range(1, len(names)): - name = names[i].split('_')[0] - try: - index = atoms.index(name) - except ValueError: #not found in atom list - atoms.append(name) - indices.append([i]) - else: - indices[index].append(i) - - self.logger.info("Creating attribute fragresults: array[3]") - size = len(indices) - - # Determine number of steps, and whether process involves beta orbitals. - PS = [] - PS.append(numpy.dot(self.density[0], overlaps)) - nstep = size**2 #approximately quadratic in size - unrestricted = (len(self.data.mocoeffs) == 2) - if unrestricted: - self.fragresults = numpy.zeros([2, size, size], "d") - PS.append(numpy.dot(self.density[1], overlaps)) - else: - self.fragresults = numpy.zeros([1, size, size], "d") - - # Intialize progress if available. - if self.progress: - self.progress.initialize(nstep) - - step = 0 - for i in range(len(indices)): - - if self.progress and random.random() < fupdate: - self.progress.update(step, "Mayer's Bond Order") - - for j in range(i+1, len(indices)): - - tempsumA = 0 - tempsumB = 0 - - for a in indices[i]: - - for b in indices[j]: - - tempsumA += 2 * PS[0][a][b] * PS[0][b][a] - if unrestricted: - tempsumB += 2 * PS[1][a][b] * PS[1][b][a] - - self.fragresults[0][i, j] = tempsumA - self.fragresults[0][j, i] = tempsumA - - if unrestricted: - self.fragresults[1][i, j] = tempsumB - self.fragresults[1][j, i] = tempsumB - - if self.progress: - self.progress.update(nstep, "Done") - - return True diff --git a/external/cclib/method/mpa.py b/external/cclib/method/mpa.py deleted file mode 100644 index 0690c5f47f..0000000000 --- a/external/cclib/method/mpa.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import random - -import numpy - -from population import Population - - -class MPA(Population): - """The Mulliken population analysis.""" - - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(MPA, self).__init__(logname="MPA", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "MPA of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'MPA("%s")' % (self.data) - - def calculate(self, indices=None, fupdate=0.05): - """Perform a Mulliken population analysis.""" - - # Do we have the needed attributes in the data object? - if not hasattr(self.data, "mocoeffs"): - self.logger.error("Missing mocoeffs") - return False - if not (hasattr(self.data, "aooverlaps") \ - or hasattr(self.data, "fooverlaps") ): - self.logger.error("Missing overlap matrix") - return False - if not hasattr(self.data, "nbasis"): - self.logger.error("Missing nbasis") - return False - if not hasattr(self.data, "homos"): - self.logger.error("Missing homos") - return False - - - # Determine number of steps, and whether process involves beta orbitals. - self.logger.info("Creating attribute aoresults: [array[2]]") - nbasis = self.data.nbasis - alpha = len(self.data.mocoeffs[0]) - self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ] - nstep = alpha - unrestricted = (len(self.data.mocoeffs) == 2) - if unrestricted: - beta = len(self.data.mocoeffs[1]) - self.aoresults.append(numpy.zeros([beta, nbasis], "d")) - nstep += beta - - # Intialize progress if available. - if self.progress: - self.progress.initialize(nstep) - - step = 0 - for spin in range(len(self.data.mocoeffs)): - - for i in range(len(self.data.mocoeffs[spin])): - - if self.progress and random.random() < fupdate: - self.progress.update(step, "Mulliken Population Analysis") - - #X_{ai} = \sum_b c_{ai} c_{bi} S_{ab} - # = c_{ai} \sum_b c_{bi} S_{ab} - # = c_{ai} C(i) \cdot S(a) - # X = C(i) * [C(i) \cdot S] - # C(i) is 1xn and S is nxn, result of matrix mult is 1xn - - ci = self.data.mocoeffs[spin][i] - if hasattr(self.data, "aooverlaps"): - temp = numpy.dot(ci, self.data.aooverlaps) - elif hasattr(self.data, "fooverlaps"): - temp = numpy.dot(ci, self.data.fooverlaps) - - self.aoresults[spin][i] = numpy.multiply(ci, temp).astype("d") - - step += 1 - - if self.progress: - self.progress.update(nstep, "Done") - - retval = super(MPA, self).partition(indices) - - if not retval: - self.logger.error("Error in partitioning results") - return False - - # Create array for mulliken charges. - self.logger.info("Creating fragcharges: array[1]") - size = len(self.fragresults[0][0]) - self.fragcharges = numpy.zeros([size], "d") - - for spin in range(len(self.fragresults)): - - for i in range(self.data.homos[spin] + 1): - - temp = numpy.reshape(self.fragresults[spin][i], (size,)) - self.fragcharges = numpy.add(self.fragcharges, temp) - - if not unrestricted: - self.fragcharges = numpy.multiply(self.fragcharges, 2) - - return True - -if __name__ == "__main__": - import doctest, mpa - doctest.testmod(mpa, verbose=False) diff --git a/external/cclib/method/opa.py b/external/cclib/method/opa.py deleted file mode 100644 index 14a6e22e10..0000000000 --- a/external/cclib/method/opa.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 739 $" - -import random - -import numpy - -from calculationmethod import Method - - -def func(x): - if x==1: - return 1 - else: - return x+func(x-1) - - -class OPA(Method): - """The overlap population analysis.""" - - def __init__(self, *args): - - # Call the __init__ method of the superclass. - super(OPA, self).__init__(logname="OPA", *args) - - def __str__(self): - """Return a string representation of the object.""" - return "OPA of" % (self.data) - - def __repr__(self): - """Return a representation of the object.""" - return 'OPA("%s")' % (self.data) - - def calculate(self, indices=None, fupdate=0.05): - """Perform an overlap population analysis given the results of a parser""" - - # Do we have the needed info in the ccData object? - if not hasattr(self.data, "mocoeffs") \ - and not ( hasattr(self.data, "aooverlaps") \ - or hasattr(self.data, "fooverlaps") ) \ - and not hasattr(self.data, "nbasis"): - self.logger.error("Missing mocoeffs, aooverlaps/fooverlaps or nbasis") - return False #let the caller of function know we didn't finish - - if not indices: - - # Build list of groups of orbitals in each atom for atomresults. - if hasattr(self.data, "aonames"): - names = self.data.aonames - elif hasattr(self.data, "foonames"): - names = self.data.fonames - - atoms = [] - indices = [] - - name = names[0].split('_')[0] - atoms.append(name) - indices.append([0]) - - for i in range(1, len(names)): - name = names[i].split('_')[0] - try: - index = atoms.index(name) - except ValueError: #not found in atom list - atoms.append(name) - indices.append([i]) - else: - indices[index].append(i) - - # Determine number of steps, and whether process involves beta orbitals. - nfrag = len(indices) #nfrag - nstep = func(nfrag - 1) - unrestricted = (len(self.data.mocoeffs) == 2) - alpha = len(self.data.mocoeffs[0]) - nbasis = self.data.nbasis - - self.logger.info("Creating attribute results: array[4]") - results= [ numpy.zeros([nfrag, nfrag, alpha], "d") ] - if unrestricted: - beta = len(self.data.mocoeffs[1]) - results.append(numpy.zeros([nfrag, nfrag, beta], "d")) - nstep *= 2 - - if hasattr(self.data, "aooverlaps"): - overlap = self.data.aooverlaps - elif hasattr(self.data,"fooverlaps"): - overlap = self.data.fooverlaps - - #intialize progress if available - if self.progress: - self.progress.initialize(nstep) - - size = len(self.data.mocoeffs[0]) - step = 0 - - preresults = [] - for spin in range(len(self.data.mocoeffs)): - two = numpy.array([2.0]*len(self.data.mocoeffs[spin]),"d") - - - # OP_{AB,i} = \sum_{a in A} \sum_{b in B} 2 c_{ai} c_{bi} S_{ab} - - for A in range(len(indices)-1): - - for B in range(A+1, len(indices)): - - if self.progress: #usually only a handful of updates, so remove random part - self.progress.update(step, "Overlap Population Analysis") - - for a in indices[A]: - - ca = self.data.mocoeffs[spin][:,a] - - for b in indices[B]: - - cb = self.data.mocoeffs[spin][:,b] - temp = ca * cb * two *overlap[a,b] - results[spin][A,B] = numpy.add(results[spin][A,B],temp) - results[spin][B,A] = numpy.add(results[spin][B,A],temp) - - step += 1 - - temparray2 = numpy.swapaxes(results[0],1,2) - self.results = [ numpy.swapaxes(temparray2,0,1) ] - if unrestricted: - temparray2 = numpy.swapaxes(results[1],1,2) - self.results.append(numpy.swapaxes(temparray2, 0, 1)) - - if self.progress: - self.progress.update(nstep, "Done") - - return True - - -if __name__ == "__main__": - import doctest, opa - doctest.testmod(opa, verbose=False) diff --git a/external/cclib/method/population.py b/external/cclib/method/population.py deleted file mode 100644 index 7964a6bafb..0000000000 --- a/external/cclib/method/population.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 733 $" - -import logging - -import numpy - -from calculationmethod import Method - - -class Population(Method): - """A base class for all population-type methods.""" - - def __init__(self, data, progress=None, \ - loglevel=logging.INFO, logname="Log"): - - # Call the __init__ method of the superclass. - super(Population, self).__init__(data, progress, loglevel, logname) - self.fragresults = None - - def __str__(self): - """Return a string representation of the object.""" - return "Population" - - def __repr__(self): - """Return a representation of the object.""" - return "Population" - - def partition(self, indices=None): - - if not hasattr(self, "aoresults"): - self.calculate() - - if not indices: - - # Build list of groups of orbitals in each atom for atomresults. - if hasattr(self.data, "aonames"): - names = self.data.aonames - elif hasattr(self.data, "fonames"): - names = self.data.fonames - - atoms = [] - indices = [] - - name = names[0].split('_')[0] - atoms.append(name) - indices.append([0]) - - for i in range(1, len(names)): - name = names[i].split('_')[0] - try: - index = atoms.index(name) - except ValueError: #not found in atom list - atoms.append(name) - indices.append([i]) - else: - indices[index].append(i) - - natoms = len(indices) - nmocoeffs = len(self.aoresults[0]) - - # Build results numpy array[3]. - alpha = len(self.aoresults[0]) - results = [] - results.append(numpy.zeros([alpha, natoms], "d")) - - if len(self.aoresults) == 2: - beta = len(self.aoresults[1]) - results.append(numpy.zeros([beta, natoms], "d")) - - # For each spin, splice numpy array at ao index, - # and add to correct result row. - for spin in range(len(results)): - - for i in range(natoms): # Number of groups. - - for j in range(len(indices[i])): # For each group. - - temp = self.aoresults[spin][:, indices[i][j]] - results[spin][:, i] = numpy.add(results[spin][:, i], temp) - - self.logger.info("Saving partitioned results in fragresults: [array[2]]") - self.fragresults = results - - return True - - -if __name__ == "__main__": - import doctest, population - doctest.testmod(population, verbose=False) diff --git a/external/cclib/method/volume.py b/external/cclib/method/volume.py deleted file mode 100644 index 08cc2d7c4e..0000000000 --- a/external/cclib/method/volume.py +++ /dev/null @@ -1,264 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 742 $" - -import copy - -import numpy - -try: - from PyQuante.CGBF import CGBF - module_pyq = True -except: - module_pyq = False - -try: - from pyvtk import * - from pyvtk.DataSetAttr import * - module_pyvtk = True -except: - module_pyvtk = False - -from cclib.bridge import makepyquante -from cclib.parser.utils import convertor - - -class Volume(object): - """Represent a volume in space. - - Required parameters: - origin -- the bottom left hand corner of the volume - topcorner -- the top right hand corner - spacing -- the distance between the points in the cube - - Attributes: - data -- a numpy array of values for each point in the volume - (set to zero at initialisation) - numpts -- the numbers of points in the (x,y,z) directions - - """ - - def __init__(self, origin, topcorner, spacing): - - self.origin = origin - self.spacing = spacing - self.topcorner = topcorner - self.numpts = [] - for i in range(3): - self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) ) - self.data = numpy.zeros( tuple(self.numpts), "d") - - def __str__(self): - """Return a string representation.""" - return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner, - self.spacing) - - def write(self, filename, format="Cube"): - """Write the volume to file.""" - - format = format.upper() - - if format.upper() not in ["VTK", "CUBE"]: - raise "Format must be either VTK or Cube" - elif format=="VTK": - self.writeasvtk(filename) - else: - self.writeascube(filename) - - def writeasvtk(self, filename): - if not module_pyvtk: - raise Exception, "You need to have pyvtk installed" - ranges = (numpy.arange(self.data.shape[2]), - numpy.arange(self.data.shape[1]), - numpy.arange(self.data.shape[0])) - v = VtkData(RectilinearGrid(*ranges), "Test", - PointData(Scalars(self.data.ravel(), "from cclib", "default"))) - v.tofile(filename) - - def integrate(self): - boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] * - convertor(1, "Angstrom", "bohr")**3) - return sum(self.data.ravel()) * boxvol - - def integrate_square(self): - boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] * - convertor(1, "Angstrom", "bohr")**3) - return sum(self.data.ravel()**2) * boxvol - - def writeascube(self, filename): - # Remember that the units are bohr, not Angstroms - convert = lambda x : convertor(x, "Angstrom", "bohr") - ans = [] - ans.append("Cube file generated by cclib") - ans.append("") - format = "%4d%12.6f%12.6f%12.6f" - origin = [convert(x) for x in self.origin] - ans.append(format % (0, origin[0], origin[1], origin[2])) - ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0)) - ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0)) - ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2]))) - line = [] - for i in range(self.data.shape[0]): - for j in range(self.data.shape[1]): - for k in range(self.data.shape[2]): - line.append(scinotation(self.data[i][j][k])) - if len(line)==6: - ans.append(" ".join(line)) - line = [] - if line: - ans.append(" ".join(line)) - line = [] - outputfile = open(filename, "w") - outputfile.write("\n".join(ans)) - outputfile.close() - -def scinotation(num): - """Write in scientific notation - - >>> scinotation(1./654) - ' 1.52905E-03' - >>> scinotation(-1./654) - '-1.52905E-03' - """ - ans = "%10.5E" % num - broken = ans.split("E") - exponent = int(broken[1]) - if exponent<-99: - return " 0.000E+00" - if exponent<0: - sign="-" - else: - sign="+" - return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12) - -def getbfs(coords, gbasis): - """Convenience function for both wavefunction and density based on PyQuante Ints.py.""" - mymol = makepyquante(coords, [0 for x in coords]) - - sym2powerlist = { - 'S' : [(0,0,0)], - 'P' : [(1,0,0),(0,1,0),(0,0,1)], - 'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)], - 'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2), - (0,3,0),(0,2,1),(0,1,2), (0,0,3)] - } - - bfs = [] - for i,atom in enumerate(mymol): - bs = gbasis[i] - for sym,prims in bs: - for power in sym2powerlist[sym]: - bf = CGBF(atom.pos(),power) - for expnt,coef in prims: - bf.add_primitive(expnt,coef) - bf.normalize() - bfs.append(bf) - - return bfs - -def wavefunction(coords, mocoeffs, gbasis, volume): - """Calculate the magnitude of the wavefunction at every point in a volume. - - Attributes: - coords -- the coordinates of the atoms - mocoeffs -- mocoeffs for one eigenvalue - gbasis -- gbasis from a parser object - volume -- a template Volume object (will not be altered) - """ - bfs = getbfs(coords, gbasis) - - wavefn = copy.copy(volume) - wavefn.data = numpy.zeros( wavefn.data.shape, "d") - - conversion = convertor(1,"bohr","Angstrom") - x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion - y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion - z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion - - for bs in range(len(bfs)): - data = numpy.zeros( wavefn.data.shape, "d") - for i,xval in enumerate(x): - for j,yval in enumerate(y): - for k,zval in enumerate(z): - data[i, j, k] = bfs[bs].amp(xval,yval,zval) - numpy.multiply(data, mocoeffs[bs], data) - numpy.add(wavefn.data, data, wavefn.data) - - return wavefn - -def electrondensity(coords, mocoeffslist, gbasis, volume): - """Calculate the magnitude of the electron density at every point in a volume. - - Attributes: - coords -- the coordinates of the atoms - mocoeffs -- mocoeffs for all of the occupied eigenvalues - gbasis -- gbasis from a parser object - volume -- a template Volume object (will not be altered) - - Note: mocoeffs is a list of numpy arrays. The list will be of length 1 - for restricted calculations, and length 2 for unrestricted. - """ - bfs = getbfs(coords, gbasis) - - density = copy.copy(volume) - density.data = numpy.zeros( density.data.shape, "d") - - conversion = convertor(1,"bohr","Angstrom") - x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion - y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion - z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion - - for mocoeffs in mocoeffslist: - for mocoeff in mocoeffs: - wavefn = numpy.zeros( density.data.shape, "d") - for bs in range(len(bfs)): - data = numpy.zeros( density.data.shape, "d") - for i,xval in enumerate(x): - for j,yval in enumerate(y): - tmp = [] - for k,zval in enumerate(z): - tmp.append(bfs[bs].amp(xval, yval, zval)) - data[i,j,:] = tmp - numpy.multiply(data, mocoeff[bs], data) - numpy.add(wavefn, data, wavefn) - density.data += wavefn**2 - - if len(mocoeffslist) == 1: - density.data = density.data*2. # doubly-occupied - - return density - - -if __name__=="__main__": - - try: - import psyco - psyco.full() - except ImportError: - pass - - from cclib.parser import ccopen - import logging - a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log") - a.logger.setLevel(logging.ERROR) - c = a.parse() - - b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out") - b.logger.setLevel(logging.ERROR) - d = b.parse() - - vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) ) - wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]], - c.gbasis, vol) - assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns - assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns - print wavefn.integrate(), wavefn.integrate_square() - - vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) ) - frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]] - density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol) - assert abs(density.integrate()-8.00)<1E-2 - print "Combined Density of 4 Frontier orbitals=",density.integrate() diff --git a/external/cclib/parser/__init__.py b/external/cclib/parser/__init__.py deleted file mode 100644 index 8b4d5224a5..0000000000 --- a/external/cclib/parser/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -gmagoon 4/5/10-4/6/10 (this notice added 4/29/10): Gregory Magoon modified this file from cclib 1.0 -""" - -__revision__ = "$Revision: 863 $" - -# These import statements are added for the convenience of users... - -# Rather than having to type: -# from cclib.parser.gaussianparser import Gaussian -# they can use: -# from cclib.parser import Gaussian - -from adfparser import ADF -from gamessparser import GAMESS -from gamessukparser import GAMESSUK -from gaussianparser import Gaussian -from jaguarparser import Jaguar -from molproparser import Molpro -from orcaparser import ORCA -from mopacparser import Mopac -from mm4parser import MM4 - -# This allow users to type: -# from cclib.parser import ccopen - -from ccopen import ccopen diff --git a/external/cclib/parser/adfparser.py b/external/cclib/parser/adfparser.py deleted file mode 100644 index 237ee879db..0000000000 --- a/external/cclib/parser/adfparser.py +++ /dev/null @@ -1,882 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 861 $" - - -import numpy - -import logfileparser -import utils - - -class ADF(logfileparser.Logfile): - """An ADF log file""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(ADF, self).__init__(logname="ADF", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "ADF log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'ADF("%s")' % (self.filename) - - def normalisesym(self, label): - """Use standard symmetry labels instead of ADF labels. - - To normalise: - (1) any periods are removed (except in the case of greek letters) - (2) XXX is replaced by X, and a " added. - (3) XX is replaced by X, and a ' added. - (4) The greek letters Sigma, Pi, Delta and Phi are replaced by - their lowercase equivalent. - - >>> sym = ADF("dummyfile").normalisesym - >>> labels = ['A','s','A1','A1.g','Sigma','Pi','Delta','Phi','Sigma.g','A.g','AA','AAA','EE1','EEE1'] - >>> map(sym,labels) - ['A', 's', 'A1', 'A1g', 'sigma', 'pi', 'delta', 'phi', 'sigma.g', 'Ag', "A'", 'A"', "E1'", 'E1"'] - """ - greeks = ['Sigma', 'Pi', 'Delta', 'Phi'] - for greek in greeks: - if label.startswith(greek): - return label.lower() - - ans = label.replace(".", "") - if ans[1:3] == "''": - temp = ans[0] + '"' - ans = temp - - l = len(ans) - if l > 1 and ans[0] == ans[1]: # Python only tests the second condition if the first is true - if l > 2 and ans[1] == ans[2]: - ans = ans.replace(ans[0]*3, ans[0]) + '"' - else: - ans = ans.replace(ans[0]*2, ans[0]) + "'" - return ans - - def normalisedegenerates(self, label, num, ndict=None): - """Generate a string used for matching degenerate orbital labels - - To normalise: - (1) if label is E or T, return label:num - (2) if label is P or D, look up in dict, and return answer - """ - - if not ndict: - ndict = { 'P': {0:"P:x", 1:"P:y", 2:"P:z"},\ - 'D': {0:"D:z2", 1:"D:x2-y2", 2:"D:xy", 3:"D:xz", 4:"D:yz"}} - - if ndict.has_key(label): - if ndict[label].has_key(num): - return ndict[label][num] - else: - return "%s:%i"%(label,num+1) - else: - return "%s:%i"%(label,num+1) - - def before_parsing(self): - - # Used to avoid extracting the final geometry twice in a GeoOpt - self.NOTFOUND, self.GETLAST, self.NOMORE = range(3) - self.finalgeometry = self.NOTFOUND - - # Used for calculating the scftarget (variables names taken from the ADF manual) - self.accint = self.SCFconv = self.sconv2 = None - - # keep track of nosym and unrestricted case to parse Energies since it doens't have an all Irreps section - self.nosymflag = False - self.unrestrictedflag = False - - SCFCNV, SCFCNV2 = range(2) #used to index self.scftargets[] - maxelem, norm = range(2) # used to index scf.values - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line.find("INPUT FILE") >= 0: - #check to make sure we aren't parsing Create jobs - while line: - - self.updateprogress(inputfile, "Unsupported Information", self.fupdate) - - if line.find("INPUT FILE") >=0 and hasattr(self,"scftargets"): - #does this file contain multiple calculations? - #if so, print a warning and skip to end of file - self.logger.warning("Skipping remaining calculations") - inputfile.seek(0,2) - break - - if line.find("INPUT FILE") >= 0: - line2 = inputfile.next() - else: - line2 = None - - if line2 and len(line2) <= 2: - #make sure that it's not blank like in the NiCO4 regression - line2 = inputfile.next() - - if line2 and (line2.find("Create") < 0 and line2.find("create") < 0): - break - - line = inputfile.next() - - if line[1:10] == "Symmetry:": - info = line.split() - if info[1] == "NOSYM": - self.nosymflag = True - - # Use this to read the subspecies of irreducible representations. - # It will be a list, with each element representing one irrep. - if line.strip() == "Irreducible Representations, including subspecies": - dashes = inputfile.next() - self.irreps = [] - line = inputfile.next() - while line.strip() != "": - self.irreps.append(line.split()) - line = inputfile.next() - - if line[4:13] == 'Molecule:': - info = line.split() - if info[1] == 'UNrestricted': - self.unrestrictedflag = True - - if line[1:6] == "ATOMS": - # Find the number of atoms and their atomic numbers - # Also extract the starting coordinates (for a GeoOpt anyway) - self.updateprogress(inputfile, "Attributes", self.cupdate) - - self.atomnos = [] - self.atomcoords = [] - self.coreelectrons = [] - - underline = inputfile.next() #clear pointless lines - label1 = inputfile.next() # - label2 = inputfile.next() # - line = inputfile.next() - atomcoords = [] - while len(line)>2: #ensure that we are reading no blank lines - info = line.split() - element = info[1].split('.')[0] - self.atomnos.append(self.table.number[element]) - atomcoords.append(map(float, info[2:5])) - self.coreelectrons.append(int(float(info[5]) - float(info[6]))) - line = inputfile.next() - self.atomcoords.append(atomcoords) - - self.natom = len(self.atomnos) - self.atomnos = numpy.array(self.atomnos, "i") - - if line[1:10] == "FRAGMENTS": - header = inputfile.next() - - self.frags = [] - self.fragnames = [] - - line = inputfile.next() - while len(line) > 2: #ensure that we are reading no blank lines - info = line.split() - - if len(info) == 7: #fragment name is listed here - self.fragnames.append("%s_%s"%(info[1],info[0])) - self.frags.append([]) - self.frags[-1].append(int(info[2]) - 1) - - elif len(info) == 5: #add atoms into last fragment - self.frags[-1].append(int(info[0]) - 1) - - line = inputfile.next() - - # Extract charge - if line[1:11] == "Net Charge": - self.charge = int(line.split()[2]) - line = inputfile.next() - if len(line.strip()): - # Spin polar: 1 (Spin_A minus Spin_B electrons) - self.mult = int(line.split()[2]) + 1 - # (Not sure about this for higher multiplicities) - else: - self.mult = 1 - - if line[1:22] == "S C F U P D A T E S": - # find targets for SCF convergence - - if not hasattr(self,"scftargets"): - self.scftargets = [] - - #underline, blank, nr - for i in range(3): - inputfile.next() - - line = inputfile.next() - self.SCFconv = float(line.split()[-1]) - line = inputfile.next() - self.sconv2 = float(line.split()[-1]) - - if line[1:11] == "CYCLE 1": - - self.updateprogress(inputfile, "QM convergence", self.fupdate) - - newlist = [] - line = inputfile.next() - - if not hasattr(self,"geovalues"): - # This is the first SCF cycle - self.scftargets.append([self.sconv2*10, self.sconv2]) - elif self.finalgeometry in [self.GETLAST, self.NOMORE]: - # This is the final SCF cycle - self.scftargets.append([self.SCFconv*10, self.SCFconv]) - else: - # This is an intermediate SCF cycle - oldscftst = self.scftargets[-1][1] - grdmax = self.geovalues[-1][1] - scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint))) - self.scftargets.append([scftst*10, scftst]) - - while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1: - if line[4:12] == "SCF test": - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - - info = line.split() - newlist.append([float(info[4]), abs(float(info[6]))]) - try: - line = inputfile.next() - except StopIteration: #EOF reached? - self.logger.warning("SCF did not converge, so attributes may be missing") - break - - if line.find("SCF not fully converged, result acceptable") > 0: - self.logger.warning("SCF not fully converged, results acceptable") - - if line.find("SCF NOT CONVERGED") > 0: - self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable") - - if hasattr(self, "scfvalues"): - self.scfvalues.append(newlist) - - # Parse SCF energy for SP calcs from bonding energy decomposition section. - # It seems ADF does not print it earlier for SP calcualtions. - # If it does (does it?), parse that instead. - # Check that scfenergies does not exist, becuase gopt runs also print this, - # repeating the values in the last "Geometry Convergence Tests" section. - if "Total Bonding Energy:" in line: - if not hasattr(self, "scfenergies"): - energy = utils.convertor(float(line.split()[3]), "hartree", "eV") - self.scfenergies = [energy] - - if line[51:65] == "Final Geometry": - self.finalgeometry = self.GETLAST - - if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]: - # Get the coordinates from each step of the GeoOpt - if not hasattr(self, "atomcoords"): - self.atomcoords = [] - equals = inputfile.next() - blank = inputfile.next() - title = inputfile.next() - title = inputfile.next() - hyphens = inputfile.next() - - atomcoords = [] - line = inputfile.next() - while line != hyphens: - atomcoords.append(map(float, line.split()[5:8])) - line = inputfile.next() - self.atomcoords.append(atomcoords) - if self.finalgeometry == self.GETLAST: # Don't get any more coordinates - self.finalgeometry = self.NOMORE - - if line[1:27] == 'Geometry Convergence Tests': - # Extract Geometry convergence information - if not hasattr(self, "geotargets"): - self.geovalues = [] - self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d") - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - equals = inputfile.next() - blank = inputfile.next() - line = inputfile.next() - temp = inputfile.next().strip().split() - self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV")) - for i in range(6): - line = inputfile.next() - values = [] - for i in range(5): - temp = inputfile.next().split() - self.geotargets[i] = float(temp[-3]) - values.append(float(temp[-4])) - self.geovalues.append(values) - - if line[1:27] == 'General Accuracy Parameter': - # Need to know the accuracy of the integration grid to - # calculate the scftarget...note that it changes with time - self.accint = float(line.split()[-1]) - - if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag: - #Extracting orbital symmetries and energies, homos for nosym case - #Should only be for restricted case because there is a better text block for unrestricted and nosym - - self.mosyms = [[]] - - self.moenergies = [[]] - - underline = inputfile.next() - header = inputfile.next() - underline = inputfile.next() - label = inputfile.next() - line = inputfile.next() - - info = line.split() - - if not info[0] == '1': - self.logger.warning("MO info up to #%s is missing" % info[0]) - - #handle case where MO information up to a certain orbital are missing - while int(info[0]) - 1 != len(self.moenergies[0]): - self.moenergies[0].append(99999) - self.mosyms[0].append('A') - - homoA = None - - while len(line) > 10: - info = line.split() - self.mosyms[0].append('A') - self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV')) - if info[1] == '0.000' and not hasattr(self, 'homos'): - self.homos = [len(self.moenergies[0]) - 2] - line = inputfile.next() - - self.moenergies = [numpy.array(self.moenergies[0], "d")] - self.homos = numpy.array(self.homos, "i") - - if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag: - #Extracting orbital symmetries and energies, homos for nosym case - #should only be here if unrestricted and nosym - - self.mosyms = [[], []] - - moenergies = [[], []] - - underline = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - underline = inputfile.next() - line = inputfile.next() - - homoa = 0 - homob = None - - while len(line) > 5: - info = line.split() - if info[2] == 'A': - self.mosyms[0].append('A') - moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV')) - if info[3] != '0.00': - homoa = len(moenergies[0]) - 1 - elif info[2] == 'B': - self.mosyms[1].append('A') - moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV')) - if info[3] != '0.00': - homob = len(moenergies[1]) - 1 - else: - print "Error reading line: %s" % line - - line = inputfile.next() - - self.moenergies = [numpy.array(x, "d") for x in moenergies] - self.homos = numpy.array([homoa, homob], "i") - - - if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"): - #Extracting orbital symmetries and energies, homos - self.mosyms = [[]] - self.symlist = {} - - self.moenergies = [[]] - - underline = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - underline2 = inputfile.next() - line = inputfile.next() - - homoa = None - homob = None - - #multiple = {'E':2, 'T':3, 'P':3, 'D':5} - # The above is set if there are no special irreps - names = [irrep[0].split(':')[0] for irrep in self.irreps] - counts = [len(irrep) for irrep in self.irreps] - multiple = dict(zip(names, counts)) - irrepspecies = {} - for n in range(len(names)): - indices = range(counts[n]) - subspecies = self.irreps[n] - irrepspecies[names[n]] = dict(zip(indices, subspecies)) - - while line.strip(): - info = line.split() - if len(info) == 5: #this is restricted - #count = multiple.get(info[0][0],1) - count = multiple.get(info[0],1) - for repeat in range(count): # i.e. add E's twice, T's thrice - self.mosyms[0].append(self.normalisesym(info[0])) - self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV')) - - sym = info[0] - if count > 1: # add additional sym label - sym = self.normalisedegenerates(info[0],repeat,ndict=irrepspecies) - - try: - self.symlist[sym][0].append(len(self.moenergies[0])-1) - except KeyError: - self.symlist[sym]=[[]] - self.symlist[sym][0].append(len(self.moenergies[0])-1) - - if info[2] == '0.00' and not hasattr(self, 'homos'): - self.homos = [len(self.moenergies[0]) - (count + 1)] #count, because need to handle degenerate cases - line = inputfile.next() - elif len(info) == 6: #this is unrestricted - if len(self.moenergies) < 2: #if we don't have space, create it - self.moenergies.append([]) - self.mosyms.append([]) -# count = multiple.get(info[0][0], 1) - count = multiple.get(info[0], 1) - if info[2] == 'A': - for repeat in range(count): # i.e. add E's twice, T's thrice - self.mosyms[0].append(self.normalisesym(info[0])) - self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV')) - - sym = info[0] - if count > 1: #add additional sym label - sym = self.normalisedegenerates(info[0],repeat) - - try: - self.symlist[sym][0].append(len(self.moenergies[0])-1) - except KeyError: - self.symlist[sym]=[[],[]] - self.symlist[sym][0].append(len(self.moenergies[0])-1) - - if info[3] == '0.00' and homoa == None: - homoa = len(self.moenergies[0]) - (count + 1) #count because degenerate cases need to be handled - - if info[2] == 'B': - for repeat in range(count): # i.e. add E's twice, T's thrice - self.mosyms[1].append(self.normalisesym(info[0])) - self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV')) - - sym = info[0] - if count > 1: #add additional sym label - sym = self.normalisedegenerates(info[0],repeat) - - try: - self.symlist[sym][1].append(len(self.moenergies[1])-1) - except KeyError: - self.symlist[sym]=[[],[]] - self.symlist[sym][1].append(len(self.moenergies[1])-1) - - if info[3] == '0.00' and homob == None: - homob = len(self.moenergies[1]) - (count + 1) - - line = inputfile.next() - - else: #different number of lines - print "Error", info - - if len(info) == 6: #still unrestricted, despite being out of loop - self.homos = [homoa, homob] - - self.moenergies = [numpy.array(x, "d") for x in self.moenergies] - self.homos = numpy.array(self.homos, "i") - - if line[1:28] == "Vibrations and Normal Modes": - # Section on extracting vibdisps - # Also contains vibfreqs, but these are extracted in the - # following section (see below) - self.vibdisps = [] - equals = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - header = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - - freqs = inputfile.next() - while freqs.strip()!="": - minus = inputfile.next() - p = [ [], [], [] ] - for i in range(len(self.atomnos)): - broken = map(float, inputfile.next().split()[1:]) - for j in range(0, len(broken), 3): - p[j/3].append(broken[j:j+3]) - self.vibdisps.extend(p[:(len(broken)/3)]) - blank = inputfile.next() - blank = inputfile.next() - freqs = inputfile.next() - self.vibdisps = numpy.array(self.vibdisps, "d") - - if line[1:24] == "List of All Frequencies": - # Start of the IR/Raman frequency section - self.updateprogress(inputfile, "Frequency information", self.fupdate) - - # self.vibsyms = [] # Need to look into this a bit more - self.vibirs = [] - self.vibfreqs = [] - for i in range(8): - line = inputfile.next() - line = inputfile.next().strip() - while line: - temp = line.split() - self.vibfreqs.append(float(temp[0])) - self.vibirs.append(float(temp[2])) # or is it temp[1]? - line = inputfile.next().strip() - self.vibfreqs = numpy.array(self.vibfreqs, "d") - self.vibirs = numpy.array(self.vibirs, "d") - if hasattr(self, "vibramans"): - self.vibramans = numpy.array(self.vibramans, "d") - - - #******************************************************************************************************************8 - #delete this after new implementation using smat, eigvec print,eprint? - if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)": - # Extract the number of basis sets - self.nbasis = int(line.split(":")[1].split()[0]) - - # now that we're here, let's extract aonames - - self.fonames = [] - self.start_indeces = {} - - blank = inputfile.next() - note = inputfile.next() - symoffset = 0 - - blank = inputfile.next() - blank = inputfile.next() - if len(blank) > 2: #fix for ADF2006.01 as it has another note - blank = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - - self.nosymreps = [] - while len(self.fonames) < self.nbasis: - - symline = inputfile.next() - sym = symline.split()[1] - line = inputfile.next() - num = int(line.split(':')[1].split()[0]) - self.nosymreps.append(num) - - #read until line "--------..." is found - while line.find('-----') < 0: - line = inputfile.next() - - line = inputfile.next() # the start of the first SFO - - while len(self.fonames) < symoffset + num: - info = line.split() - - #index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9 - if not sym in self.start_indeces.keys(): - #have we already set the start index for this symmetry? - self.start_indeces[sym] = int(info[1]) - - orbname = info[8] - orbital = info[7] + orbname.replace(":", "") - - fragname = info[5] - frag = fragname + info[9] - - coeff = float(info[6]) - - line = inputfile.next() - while line.strip() and not line[:7].strip(): # while it's the same SFO - # i.e. while not completely blank, but blank at the start - info = line[43:].split() - if len(info)>0: # len(info)==0 for the second line of dvb_ir.adfout - frag += "+" + fragname + info[-1] - coeff = float(info[-4]) - if coeff < 0: - orbital += '-' + info[-3] + info[-2].replace(":", "") - else: - orbital += '+' + info[-3] + info[-2].replace(":", "") - line = inputfile.next() - # At this point, we are either at the start of the next SFO or at - # a blank line...the end - - self.fonames.append("%s_%s" % (frag, orbital)) - symoffset += num - - # blankline blankline - inputfile.next(); inputfile.next() - - if line[1:32] == "S F O P O P U L A T I O N S ,": - #Extract overlap matrix - - self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d") - - symoffset = 0 - - for nosymrep in self.nosymreps: - - line = inputfile.next() - while line.find('===') < 10: #look for the symmetry labels - line = inputfile.next() - #blank blank text blank col row - for i in range(6): - inputfile.next() - - base = 0 - while base < nosymrep: #have we read all the columns? - - for i in range(nosymrep - base): - - self.updateprogress(inputfile, "Overlap", self.fupdate) - line = inputfile.next() - parts = line.split()[1:] - for j in range(len(parts)): - k = float(parts[j]) - self.fooverlaps[base + symoffset + j, base + symoffset +i] = k - self.fooverlaps[base + symoffset + i, base + symoffset + j] = k - - #blank, blank, column - for i in range(3): - inputfile.next() - - base += 4 - - symoffset += nosymrep - base = 0 - -# The commented code below makes the atombasis attribute based on the BAS function in ADF, -# but this is probably not so useful, since SFOs are used to build MOs in ADF. -# if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions": -# -# self.atombasis = [] -# -# # There will be some text, followed by a line: -# # (power of) X Y Z R Alpha on Atom -# while not line[1:11] == "(power of)": -# line = inputfile.next() -# dashes = inputfile.next() -# blank = inputfile.next() -# line = inputfile.next() -# # There will be two blank lines when there are no more atom types. -# while line.strip() != "": -# atoms = [int(i)-1 for i in line.split()[1:]] -# for n in range(len(atoms)): -# self.atombasis.append([]) -# dashes = inputfile.next() -# line = inputfile.next() -# while line.strip() != "": -# indices = [int(i)-1 for i in line.split()[5:]] -# for i in range(len(indices)): -# self.atombasis[atoms[i]].append(indices[i]) -# line = inputfile.next() -# line = inputfile.next() - - if line[48:67] == "SFO MO coefficients": - - self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")] - spin = 0 - symoffset = 0 - lastrow = 0 - - # Section ends with "1" at beggining of a line. - while line[0] != "1": - line = inputfile.next() - - # If spin is specified, then there will be two coefficient matrices. - if line.strip() == "***** SPIN 1 *****": - self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"), - numpy.zeros((self.nbasis, self.nbasis), "d")] - - # Bump up the spin. - if line.strip() == "***** SPIN 2 *****": - spin = 1 - symoffset = 0 - lastrow = 0 - - # Next symmetry. - if line.strip()[:4] == "=== ": - sym = line.split()[1] - if self.nosymflag: - aolist = range(self.nbasis) - else: - aolist = self.symlist[sym][spin] - # Add to the symmetry offset of AO ordering. - symoffset += lastrow - - # Blocks with coefficient always start with "MOs :". - if line[1:6] == "MOs :": - # Next line has the MO index contributed to. - monumbers = [int(n) for n in line[6:].split()] - occup = inputfile.next() - label = inputfile.next() - line = inputfile.next() - # The table can end with a blank line or "1". - row = 0 - while not line.strip() in ["", "1"]: - info = line.split() - - if int(info[0]) < self.start_indeces[sym]: - #check to make sure we aren't parsing CFs - line = inputfile.next() - continue - - self.updateprogress(inputfile, "Coefficients", self.fupdate) - row += 1 - coeffs = [float(x) for x in info[1:]] - moindices = [aolist[n-1] for n in monumbers] - # The AO index is 1 less than the row. - aoindex = symoffset + row - 1 - for i in range(len(monumbers)): - self.mocoeffs[spin][moindices[i],aoindex] = coeffs[i] - line = inputfile.next() - lastrow = row - - if line[4:53] == "Final excitation energies from Davidson algorithm": - - # move forward in file past some various algorthm info - - # * Final excitation energies from Davidson algorithm * - # * * - # ************************************************************************** - - # Number of loops in Davidson routine = 20 - # Number of matrix-vector multiplications = 24 - # Type of excitations = SINGLET-SINGLET - - inputfile.next(); inputfile.next(); inputfile.next() - inputfile.next(); inputfile.next(); inputfile.next() - inputfile.next(); inputfile.next() - - symm = self.normalisesym(inputfile.next().split()[1]) - - # move forward in file past some more txt and header info - - # Excitation energies E in a.u. and eV, dE wrt prev. cycle, - # oscillator strengths f in a.u. - - # no. E/a.u. E/eV f dE/a.u. - # ----------------------------------------------------- - - inputfile.next(); inputfile.next(); inputfile.next() - inputfile.next(); inputfile.next(); inputfile.next() - - # now start parsing etenergies and etoscs - - etenergies = [] - etoscs = [] - etsyms = [] - - line = inputfile.next() - while len(line) > 2: - info = line.split() - etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1")) - etoscs.append(float(info[3])) - etsyms.append(symm) - line = inputfile.next() - - # move past next section - while line[1:53] != "Major MO -> MO transitions for the above excitations": - line = inputfile.next() - - # move past headers - - # Excitation Occupied to virtual Contribution - # Nr. orbitals weight contribibutions to - # (sum=1) transition dipole moment - # x y z - - inputfile.next(), inputfile.next(), inputfile.next() - inputfile.next(), inputfile.next(), inputfile.next() - - # before we start handeling transitions, we need - # to create mosyms with indices - # only restricted calcs are possible in ADF - - counts = {} - syms = [] - for mosym in self.mosyms[0]: - if counts.keys().count(mosym) == 0: - counts[mosym] = 1 - else: - counts[mosym] += 1 - - syms.append(str(counts[mosym]) + mosym) - - import re - etsecs = [] - printed_warning = False - - for i in range(len(etenergies)): - etsec = [] - line = inputfile.next() - info = line.split() - while len(info) > 0: - - match = re.search('[^0-9]', info[1]) - index1 = int(info[1][:match.start(0)]) - text = info[1][match.start(0):] - symtext = text[0].upper() + text[1:] - sym1 = str(index1) + self.normalisesym(symtext) - - match = re.search('[^0-9]', info[3]) - index2 = int(info[3][:match.start(0)]) - text = info[3][match.start(0):] - symtext = text[0].upper() + text[1:] - sym2 = str(index2) + self.normalisesym(symtext) - - try: - index1 = syms.index(sym1) - except ValueError: - if not printed_warning: - self.logger.warning("Etsecs are not accurate!") - printed_warning = True - - try: - index2 = syms.index(sym2) - except ValueError: - if not printed_warning: - self.logger.warning("Etsecs are not accurate!") - printed_warning = True - - etsec.append([(index1, 0), (index2, 0), float(info[4])]) - - line = inputfile.next() - info = line.split() - - etsecs.append(etsec) - - - if not hasattr(self, "etenergies"): - self.etenergies = etenergies - else: - self.etenergies += etenergies - - if not hasattr(self, "etoscs"): - self.etoscs = etoscs - else: - self.etoscs += etoscs - - if not hasattr(self, "etsyms"): - self.etsyms = etsyms - else: - self.etsyms += etsyms - - if not hasattr(self, "etsecs"): - self.etsecs = etsecs - else: - self.etsecs += etsecs - -if __name__ == "__main__": - import doctest, adfparser - doctest.testmod(adfparser, verbose=False) diff --git a/external/cclib/parser/ccopen.py b/external/cclib/parser/ccopen.py deleted file mode 100644 index edcef85bad..0000000000 --- a/external/cclib/parser/ccopen.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 860 $" - - -import types - -import logfileparser - -import adfparser -import gamessparser -import gamessukparser -import gaussianparser -import jaguarparser -import molproparser -import orcaparser - - -def ccopen(source, *args, **kargs): - """Guess the identity of a particular log file and return an instance of it. - - Inputs: - source - a single logfile, a list of logfiles, or an input stream - - Returns: - one of ADF, GAMESS, GAMESS UK, Gaussian, Jaguar, Molpro, ORCA, or - None (if it cannot figure it out or the file does not exist). - """ - - filetype = None - - # Try to open the logfile(s), using openlogfile. - if isinstance(source,types.StringTypes) or \ - isinstance(source,list) and all([isinstance(s,types.StringTypes) for s in source]): - try: - inputfile = logfileparser.openlogfile(source) - except IOError, (errno, strerror): - print "I/O error %s (%s): %s" %(errno, source, strerror) - return None - isstream = False - elif hasattr(source, "read"): - inputfile = source - isstream = True - else: - raise ValueError - - # Read through the logfile(s) and search for a clue. - for line in inputfile: - - if line.find("Amsterdam Density Functional") >= 0: - filetype = adfparser.ADF - break - - # Don't break in this case as it may be a GAMESS-UK file. - elif line.find("GAMESS") >= 0: - filetype = gamessparser.GAMESS - - # This can break, since it is non-GAMESS-UK specific. - elif line.find("GAMESS VERSION") >= 0: - filetype = gamessparser.GAMESS - break - - elif line.find("G A M E S S - U K") >= 0: - filetype = gamessukparser.GAMESSUK - break - - elif line.find("Gaussian, Inc.") >= 0: - filetype = gaussianparser.Gaussian - break - - elif line.find("Jaguar") >= 0: - filetype = jaguarparser.Jaguar - break - - elif line.find("PROGRAM SYSTEM MOLPRO") >= 0: - filetype = molproparser.Molpro - break - - # Molpro log files don't have the line above. Set this only if - # nothing else is detected, and notice it can be overwritten, - # since it does not break the loop. - elif line[0:8] == "1PROGRAM" and not filetype: - filetype = molproparser.Molpro - - elif line.find("O R C A") >= 0: - filetype = orcaparser.ORCA - break - - # Need to close file before creating a instance. - if not isstream: - inputfile.close() - - # Return an instance of the chosen class. - try: - return filetype(source, *args, **kargs) - except TypeError: - print "Log file type not identified." - raise diff --git a/external/cclib/parser/data.py b/external/cclib/parser/data.py deleted file mode 100644 index 33fc963196..0000000000 --- a/external/cclib/parser/data.py +++ /dev/null @@ -1,199 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2007, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -gmagoon 4/5/10-4/6/10 (this notice added 4/29/10): Gregory Magoon modified this file from cclib 1.0 -""" - - -import cPickle as pickle -import os -import sys - -import numpy - - -class ccData(object): - """Class for objects containing data from cclib parsers and methods. - - Description of cclib attributes: - aonames -- atomic orbital names (list) - aooverlaps -- atomic orbital overlap matrix (array[2]) - atombasis -- indices of atomic orbitals on each atom (list of lists) - atomcoords -- atom coordinates (array[3], angstroms) - atomnos -- atomic numbers (array[1]) - charge -- net charge of the system (integer) - ccenergies -- molecular energies with Coupled-Cluster corrections (array[2], eV) - coreelectrons -- number of core electrons in atom pseudopotentials (array[1]) - etenergies -- energies of electronic transitions (array[1], 1/cm) - etoscs -- oscillator strengths of electronic transitions (array[1]) - etrotats -- rotatory strengths of electronic transitions (array[1], ??) - etsecs -- singly-excited configurations for electronic transitions (list of lists) - etsyms -- symmetries of electronic transitions (list) - fonames -- fragment orbital names (list) - fooverlaps -- fragment orbital overlap matrix (array[2]) - fragnames -- names of fragments (list) - frags -- indices of atoms in a fragment (list of lists) - gbasis -- coefficients and exponents of Gaussian basis functions (PyQuante format) - geotargets -- targets for convergence of geometry optimization (array[1]) - geovalues -- current values for convergence of geometry optmization (array[1]) - homos -- molecular orbital indices of HOMO(s) (array[1]) - mocoeffs -- molecular orbital coefficients (list of arrays[2]) - moenergies -- molecular orbital energies (list of arrays[1], eV) - mosyms -- orbital symmetries (list of lists) - mpenergies -- molecular electronic energies with Moller-Plesset corrections (array[2], eV) - mult -- multiplicity of the system (integer) - natom -- number of atoms (integer) - nbasis -- number of basis functions (integer) - nmo -- number of molecular orbitals (integer) - nocoeffs -- natural orbital coefficients (array[2]) - scfenergies -- molecular electronic energies after SCF (Hartree-Fock, DFT) (array[1], eV) - scftargets -- targets for convergence of the SCF (array[2]) - scfvalues -- current values for convergence of the SCF (list of arrays[2]) - stericenergy -- final steric energy (for MM4 calculations) - vibdisps -- cartesian displacement vectors (array[3], delta angstrom) - vibfreqs -- vibrational frequencies (array[1], 1/cm) - vibirs -- IR intensities (array[1], km/mol) - vibramans -- Raman intensities (array[1], A^4/Da) - vibsyms -- symmetries of vibrations (list) - (1) The term 'array' refers to a numpy array - (2) The number of dimensions of an array is given in square brackets - (3) Python indexes arrays/lists starting at zero, so if homos==[10], then - the 11th molecular orbital is the HOMO - """ - - def __init__(self, attributes=None): - """Initialize the cclibData object. - - Normally called in the parse() method of a Logfile subclass. - - Inputs: - attributes - dictionary of attributes to load - """ - - # Names of all supported attributes. - self._attrlist = ['aonames', 'aooverlaps', 'atombasis', - 'atomcoords', 'atomnos', - 'ccenergies', 'charge', 'coreelectrons', - 'etenergies', 'etoscs', 'etrotats', 'etsecs', 'etsyms', - 'fonames', 'fooverlaps', 'fragnames', 'frags', - 'gbasis', 'geotargets', 'geovalues', 'grads', - 'hessian', 'homos', - 'mocoeffs', 'moenergies', 'molmass', 'mosyms', 'mpenergies', 'mult', - 'natom', 'nbasis', 'nmo', 'nocoeffs', 'rotcons', 'rotsymm', - 'scfenergies', 'scftargets', 'scfvalues', 'stericenergy', - 'vibdisps', 'vibfreqs', 'vibirs', 'vibramans', 'vibsyms'] - - # The expected types for all supported attributes. - #gmagoon 5/27/09: added rotsymm type above and below - #gmagoon 6/8/09: added molmass (previously (maybe 5/28) I had added rotcons) - self._attrtypes = { "aonames": list, - "aooverlaps": numpy.ndarray, - "atombasis": list, - "atomcoords": numpy.ndarray, - "atomnos": numpy.ndarray, - "charge": int, - "coreelectrons": numpy.ndarray, - "etenergies": numpy.ndarray, - "etoscs": numpy.ndarray, - "etrotats": numpy.ndarray, - "etsecs": list, - "etsyms": list, - 'gbasis': list, - "geotargets": numpy.ndarray, - "geovalues": numpy.ndarray, - "grads": numpy.ndarray, - "hessian": numpy.ndarray, - "homos": numpy.ndarray, - "mocoeffs": list, - "moenergies": list, - "molmass": float, - "mosyms": list, - "mpenergies": numpy.ndarray, - "mult": int, - "natom": int, - "nbasis": int, - "nmo": int, - "nocoeffs": numpy.ndarray, - "rotcons": list, - "rotsymm": int, - "scfenergies": numpy.ndarray, - "scftargets": numpy.ndarray, - "scfvalues": list, - "stericenergy": float, - "vibdisps": numpy.ndarray, - "vibfreqs": numpy.ndarray, - "vibirs": numpy.ndarray, - "vibramans": numpy.ndarray, - "vibsyms": list, - } - - # Arrays are double precision by default, but these will be integer arrays. - self._intarrays = ['atomnos', 'coreelectrons', 'homos'] - - # Attributes that should be lists of arrays (double precision). - self._listsofarrays = ['mocoeffs', 'moenergies', 'scfvalues', 'rotcons']#gmagoon 5/28/09: added rotcons - - if attributes: - self.setattributes(attributes) - - def listify(self): - """Converts all attributes that are arrays or lists of arrays to lists.""" - - for k, v in self._attrtypes.iteritems(): - if hasattr(self, k): - if v == numpy.ndarray: - setattr(self, k, getattr(self, k).tolist()) - elif v == list and k in self._listsofarrays: - setattr(self, k, [x.tolist() for x in getattr(self, k)]) - - def arrayify(self): - """Converts appropriate attributes to arrays or lists of arrays.""" - - for k, v in self._attrtypes.iteritems(): - if hasattr(self, k): - precision = 'd' - if k in self._intarrays: - precision = 'i' - if v == numpy.ndarray: - setattr(self, k, numpy.array(getattr(self, k), precision)) - elif v == list and k in self._listsofarrays: - setattr(self, k, [numpy.array(x, precision) - for x in getattr(self, k)]) - - def getattributes(self, tolists=False): - """Returns a dictionary of existing data attributes. - - Inputs: - tolists - flag to convert attributes to lists where applicable - """ - - if tolists: - self.listify() - attributes = {} - for attr in self._attrlist: - if hasattr(self, attr): - attributes[attr] = getattr(self,attr) - if tolists: - self.arrayofy() - return attributes - - def setattributes(self, attributes): - """Sets data attributes given in a dictionary. - - Inputs: - attributes - dictionary of attributes to set - Outputs: - invalid - list of attributes names that were not set, which - means they are not specified in self._attrlist - """ - - if type(attributes) is not dict: - raise TypeError, "attributes must be in a dictionary" - - valid = [a for a in attributes if a in self._attrlist] - invalid = [a for a in attributes if a not in self._attrlist] - - for attr in valid: - setattr(self, attr, attributes[attr]) - self.arrayify() - return invalid diff --git a/external/cclib/parser/gamessparser.py b/external/cclib/parser/gamessparser.py deleted file mode 100644 index 2d0d6a70e9..0000000000 --- a/external/cclib/parser/gamessparser.py +++ /dev/null @@ -1,912 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 892 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class GAMESS(logfileparser.Logfile): - """A GAMESS log file.""" - SCFRMS, SCFMAX, SCFENERGY = range(3) # Used to index self.scftargets[] - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(GAMESS, self).__init__(logname="GAMESS", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "GAMESS log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'GAMESS("%s")' % (self.filename) - - def normalisesym(self, label): - """Normalise the symmetries used by GAMESS. - - To normalise, two rules need to be applied: - (1) Occurences of U/G in the 2/3 position of the label - must be lower-cased - (2) Two single quotation marks must be replaced by a double - - >>> t = GAMESS("dummyfile").normalisesym - >>> labels = ['A', 'A1', 'A1G', "A'", "A''", "AG"] - >>> answers = map(t, labels) - >>> print answers - ['A', 'A1', 'A1g', "A'", 'A"', 'Ag'] - """ - if label[1:] == "''": - end = '"' - else: - end = label[1:].replace("U", "u").replace("G", "g") - return label[0] + end - - def before_parsing(self): - - self.firststdorient = True # Used to decide whether to wipe the atomcoords clean - self.geooptfinished = False # Used to avoid extracting the final geometry twice - self.cihamtyp = "none" # Type of CI Hamiltonian: saps or dets. - self.scftype = "none" # Type of SCF calculation: BLYP, RHF, ROHF, etc. - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line [1:12] == "INPUT CARD>": - return - - # We are looking for this line: - # PARAMETERS CONTROLLING GEOMETRY SEARCH ARE - # ... - # OPTTOL = 1.000E-04 RMIN = 1.500E-03 - if line[10:18] == "OPTTOL =": - if not hasattr(self, "geotargets"): - opttol = float(line.split()[2]) - self.geotargets = numpy.array([opttol, 3. / opttol], "d") - - if line.find("FINAL") == 1: - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - # Has to deal with such lines as: - # FINAL R-B3LYP ENERGY IS -382.0507446475 AFTER 10 ITERATIONS - # FINAL ENERGY IS -379.7594673378 AFTER 9 ITERATIONS - # ...so take the number after the "IS" - temp = line.split() - self.scfenergies.append(utils.convertor(float(temp[temp.index("IS") + 1]), "hartree", "eV")) - - # Total energies after Moller-Plesset corrections - if (line.find("RESULTS OF MOLLER-PLESSET") >= 0 or - line[6:37] == "SCHWARZ INEQUALITY TEST SKIPPED"): - # Output looks something like this: - # RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE - # E(0)= -285.7568061536 - # E(1)= 0.0 - # E(2)= -0.9679419329 - # E(MP2)= -286.7247480864 - # where E(MP2) = E(0) + E(2) - # - # with GAMESS-US 12 Jan 2009 (R3) the preceding text is different: - ## DIRECT 4-INDEX TRANSFORMATION - ## SCHWARZ INEQUALITY TEST SKIPPED 0 INTEGRAL BLOCKS - ## E(SCF)= -76.0088477471 - ## E(2)= -0.1403745370 - ## E(MP2)= -76.1492222841 - if not hasattr(self, "mpenergies"): - self.mpenergies = [] - # Each iteration has a new print-out - self.mpenergies.append([]) - # GAMESS-US presently supports only second order corrections (MP2) - # PC GAMESS also has higher levels (3rd and 4th), with different output - # Only the highest level MP4 energy is gathered (SDQ or SDTQ) - while re.search("DONE WITH MP(\d) ENERGY", line) is None: - line = inputfile.next() - if len(line.split()) > 0: - # Only up to MP2 correction - if line.split()[0] == "E(MP2)=": - mp2energy = float(line.split()[1]) - self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV")) - # MP2 before higher order calculations - if line.split()[0] == "E(MP2)": - mp2energy = float(line.split()[2]) - self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV")) - if line.split()[0] == "E(MP3)": - mp3energy = float(line.split()[2]) - self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV")) - if line.split()[0] in ["E(MP4-SDQ)", "E(MP4-SDTQ)"]: - mp4energy = float(line.split()[2]) - self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV")) - - # Total energies after Coupled Cluster calculations - # Only the highest Coupled Cluster level result is gathered - if line[12:23] == "CCD ENERGY:": - if not hasattr(self, "ccenergies"): - self.ccenergies = [] - ccenergy = float(line.split()[2]) - self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV")) - if line.find("CCSD") >= 0 and line.split()[0:2] == ["CCSD", "ENERGY:"]: - if not hasattr(self, "ccenergies"): - self.ccenergies = [] - ccenergy = float(line.split()[2]) - line = inputfile.next() - if line[8:23] == "CCSD[T] ENERGY:": - ccenergy = float(line.split()[2]) - line = inputfile.next() - if line[8:23] == "CCSD(T) ENERGY:": - ccenergy = float(line.split()[2]) - self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV")) - # Also collect MP2 energies, which are always calculated before CC - if line [8:23] == "MBPT(2) ENERGY:": - if not hasattr(self, "mpenergies"): - self.mpenergies = [] - self.mpenergies.append([]) - mp2energy = float(line.split()[2]) - self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV")) - - # Extract charge and multiplicity - if line[1:19] == "CHARGE OF MOLECULE": - self.charge = int(line.split()[-1]) - self.mult = int(inputfile.next().split()[-1]) - - # etenergies (used only for CIS runs now) - if "EXCITATION ENERGIES" in line and line.find("DONE WITH") < 0: - if not hasattr(self, "etenergies"): - self.etenergies = [] - header = inputfile.next().rstrip() - get_etosc = False - if header.endswith("OSC. STR."): - # water_cis_dets.out does not have the oscillator strength - # in this table...it is extracted from a different section below - get_etosc = True - self.etoscs = [] - dashes = inputfile.next() - line = inputfile.next() - broken = line.split() - while len(broken) > 0: - # Take hartree value with more numbers, and convert. - # Note that the values listed after this are also less exact! - etenergy = float(broken[1]) - self.etenergies.append(utils.convertor(etenergy, "hartree", "cm-1")) - if get_etosc: - etosc = float(broken[-1]) - self.etoscs.append(etosc) - broken = inputfile.next().split() - - # Detect the CI hamiltonian type, if applicable. - # Should always be detected if CIS is done. - if line[8:64] == "RESULTS FROM SPIN-ADAPTED ANTISYMMETRIZED PRODUCT (SAPS)": - self.cihamtyp = "saps" - if line[8:64] == "RESULTS FROM DETERMINANT BASED ATOMIC ORBITAL CI-SINGLES": - self.cihamtyp = "dets" - - # etsecs (used only for CIS runs for now) - if line[1:14] == "EXCITED STATE": - if not hasattr(self, 'etsecs'): - self.etsecs = [] - if not hasattr(self, 'etsyms'): - self.etsyms = [] - statenumber = int(line.split()[2]) - spin = int(float(line.split()[7])) - if spin == 0: - sym = "Singlet" - if spin == 1: - sym = "Triplet" - sym += '-' + line.split()[-1] - self.etsyms.append(sym) - # skip 5 lines - for i in range(5): - line = inputfile.next() - line = inputfile.next() - CIScontribs = [] - while line.strip()[0] != "-": - MOtype = 0 - # alpha/beta are specified for hamtyp=dets - if self.cihamtyp == "dets": - if line.split()[0] == "BETA": - MOtype = 1 - fromMO = int(line.split()[-3])-1 - toMO = int(line.split()[-2])-1 - coeff = float(line.split()[-1]) - # With the SAPS hamiltonian, the coefficients are multiplied - # by sqrt(2) so that they normalize to 1. - # With DETS, both alpha and beta excitations are printed. - # if self.cihamtyp == "saps": - # coeff /= numpy.sqrt(2.0) - CIScontribs.append([(fromMO,MOtype),(toMO,MOtype),coeff]) - line = inputfile.next() - self.etsecs.append(CIScontribs) - - # etoscs (used only for CIS runs now) - if line[1:50] == "TRANSITION FROM THE GROUND STATE TO EXCITED STATE": - if not hasattr(self, "etoscs"): - self.etoscs = [] - statenumber = int(line.split()[-1]) - # skip 7 lines - for i in range(8): - line = inputfile.next() - strength = float(line.split()[3]) - self.etoscs.append(strength) - - # TD-DFT for GAMESS-US - if line[14:29] == "LET EXCITATIONS": # TRIPLET and SINGLET - self.etenergies = [] - self.etoscs = [] - self.etsecs = [] - etsyms = [] - minus = inputfile.next() - blank = inputfile.next() - line = inputfile.next() - # Loop starts on the STATE line - while line.find("STATE") >= 0: - broken = line.split() - self.etenergies.append(utils.convertor(float(broken[-2]), "eV", "cm-1")) - broken = inputfile.next().split() - self.etoscs.append(float(broken[-1])) - sym = inputfile.next() # Not always present - if sym.find("SYMMETRY")>=0: - etsyms.append(sym.split()[-1]) - header = inputfile.next() - minus = inputfile.next() - CIScontribs = [] - line = inputfile.next() - while line.strip(): - broken = line.split() - fromMO, toMO = [int(broken[x]) - 1 for x in [2, 4]] - CIScontribs.append([(fromMO, 0), (toMO, 0), float(broken[1])]) - line = inputfile.next() - self.etsecs.append(CIScontribs) - line = inputfile.next() - if etsyms: # Not always present - self.etsyms = etsyms - - # Maximum and RMS gradients. - if "MAXIMUM GRADIENT" in line or "RMS GRADIENT" in line: - - if not hasattr(self, "geovalues"): - self.geovalues = [] - - parts = line.split() - - # Newer versions (around 2006) have both maximum and RMS on one line: - # MAXIMUM GRADIENT = 0.0531540 RMS GRADIENT = 0.0189223 - if len(parts) == 8: - maximum = float(parts[3]) - rms = float(parts[7]) - - # In older versions of GAMESS, this spanned two lines, like this: - # MAXIMUM GRADIENT = 0.057578167 - # RMS GRADIENT = 0.027589766 - if len(parts) == 4: - maximum = float(parts[3]) - line = inputfile.next() - parts = line.split() - rms = float(parts[3]) - - - # FMO also prints two final one- and two-body gradients (see exam37): - # (1) MAXIMUM GRADIENT = 0.0531540 RMS GRADIENT = 0.0189223 - if len(parts) == 9: - maximum = float(parts[4]) - rms = float(parts[8]) - - self.geovalues.append([maximum, rms]) - - if line[11:50] == "ATOMIC COORDINATES": - # This is the input orientation, which is the only data available for - # SP calcs, but which should be overwritten by the standard orientation - # values, which is the only information available for all geoopt cycles. - if not hasattr(self, "atomcoords"): - self.atomcoords = [] - self.atomnos = [] - line = inputfile.next() - atomcoords = [] - atomnos = [] - line = inputfile.next() - while line.strip(): - temp = line.strip().split() - atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[2:5]]) - atomnos.append(int(round(float(temp[1])))) # Don't use the atom name as this is arbitary - line = inputfile.next() - self.atomnos = numpy.array(atomnos, "i") - self.atomcoords.append(atomcoords) - - if line[12:40] == "EQUILIBRIUM GEOMETRY LOCATED": - # Prevent extraction of the final geometry twice - self.geooptfinished = True - - if line[1:29] == "COORDINATES OF ALL ATOMS ARE" and not self.geooptfinished: - # This is the standard orientation, which is the only coordinate - # information available for all geometry optimisation cycles. - # The input orientation will be overwritten if this is a geometry optimisation - # We assume that a previous Input Orientation has been found and - # used to extract the atomnos - if self.firststdorient: - self.firststdorient = False - # Wipes out the single input coordinate at the start of the file - self.atomcoords = [] - - line = inputfile.next() - hyphens = inputfile.next() - - atomcoords = [] - line = inputfile.next() - - for i in range(self.natom): - temp = line.strip().split() - atomcoords.append(map(float, temp[2:5])) - line = inputfile.next() - self.atomcoords.append(atomcoords) - - # Section with SCF information. - # - # The space at the start of the search string is to differentiate from MCSCF. - # Everything before the search string is stored as the type of SCF. - # SCF types may include: BLYP, RHF, ROHF, UHF, etc. - # - # For example, in exam17 the section looks like this (note that this is GVB): - # ------------------------ - # ROHF-GVB SCF CALCULATION - # ------------------------ - # GVB STEP WILL USE 119875 WORDS OF MEMORY. - # - # MAXIT= 30 NPUNCH= 2 SQCDF TOL=1.0000E-05 - # NUCLEAR ENERGY= 6.1597411978 - # EXTRAP=T DAMP=F SHIFT=F RSTRCT=F DIIS=F SOSCF=F - # - # ITER EX TOTAL ENERGY E CHANGE SQCDF DIIS ERROR - # 0 0 -38.298939963 -38.298939963 0.131784454 0.000000000 - # 1 1 -38.332044339 -0.033104376 0.026019716 0.000000000 - # ... and will be terminated by a blank line. - if line.rstrip()[-16:] == " SCF CALCULATION": - - # Remember the type of SCF. - self.scftype = line.strip()[:-16] - - dashes = inputfile.next() - - while line [:5] != " ITER": - - # GVB uses SQCDF for checking convergence (for example in exam17). - if "GVB" in self.scftype and "SQCDF TOL=" in line: - scftarget = float(line.split("=")[-1]) - - # Normally however the density is used as the convergence criterium. - # Deal with various versions: - # (GAMESS VERSION = 12 DEC 2003) - # DENSITY MATRIX CONV= 2.00E-05 DFT GRID SWITCH THRESHOLD= 3.00E-04 - # (GAMESS VERSION = 22 FEB 2006) - # DENSITY MATRIX CONV= 1.00E-05 - # (PC GAMESS version 6.2, Not DFT?) - # DENSITY CONV= 1.00E-05 - elif "DENSITY CONV" in line or "DENSITY MATRIX CONV" in line: - scftarget = float(line.split()[-1]) - - line = inputfile.next() - - if not hasattr(self, "scftargets"): - self.scftargets = [] - - self.scftargets.append([scftarget]) - - if not hasattr(self,"scfvalues"): - self.scfvalues = [] - - line = inputfile.next() - - # Normally the iteration print in 6 columns. - # For ROHF, however, it is 5 columns, thus this extra parameter. - if "ROHF" in self.scftype: - valcol = 4 - else: - valcol = 5 - - # SCF iterations are terminated by a blank line. - # The first four characters usually contains the step number. - # However, lines can also contain messages, including: - # * * * INITIATING DIIS PROCEDURE * * * - # CONVERGED TO SWOFF, SO DFT CALCULATION IS NOW SWITCHED ON - # DFT CODE IS SWITCHING BACK TO THE FINER GRID - values = [] - while line.strip(): - try: - temp = int(line[0:4]) - except ValueError: - pass - else: - values.append([float(line.split()[valcol])]) - line = inputfile.next() - self.scfvalues.append(values) - - if line.find("NORMAL COORDINATE ANALYSIS IN THE HARMONIC APPROXIMATION") >= 0: - # GAMESS has... - # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - # - # FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2, - # REDUCED MASSES IN AMU. - # - # 1 2 3 4 5 - # FREQUENCY: 52.49 41.45 17.61 9.23 10.61 - # REDUCED MASS: 3.92418 3.77048 5.43419 6.44636 5.50693 - # IR INTENSITY: 0.00013 0.00001 0.00004 0.00000 0.00003 - - # ...or in the case of a numerical Hessian job... - - # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - # - # FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2, - # REDUCED MASSES IN AMU. - # - # 1 2 3 4 5 - # FREQUENCY: 0.05 0.03 0.03 30.89 30.94 - # REDUCED MASS: 8.50125 8.50137 8.50136 1.06709 1.06709 - - - # whereas PC-GAMESS has... - # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - # - # FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2 - # - # 1 2 3 4 5 - # FREQUENCY: 5.89 1.46 0.01 0.01 0.01 - # IR INTENSITY: 0.00000 0.00000 0.00000 0.00000 0.00000 - - # If Raman is present we have (for PC-GAMESS)... - # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - # - # FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2 - # RAMAN INTENSITIES IN ANGSTROM**4/AMU, DEPOLARIZATIONS ARE DIMENSIONLESS - # - # 1 2 3 4 5 - # FREQUENCY: 5.89 1.46 0.04 0.03 0.01 - # IR INTENSITY: 0.00000 0.00000 0.00000 0.00000 0.00000 - # RAMAN INTENSITY: 12.675 1.828 0.000 0.000 0.000 - # DEPOLARIZATION: 0.750 0.750 0.124 0.009 0.750 - - # If PC-GAMESS has not reached the stationary point we have - # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - # - # FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2 - # - # ******************************************************* - # * THIS IS NOT A STATIONARY POINT ON THE MOLECULAR PES * - # * THE VIBRATIONAL ANALYSIS IS NOT VALID !!! * - # ******************************************************* - # - # 1 2 3 4 5 - - # MODES 2 TO 7 ARE TAKEN AS ROTATIONS AND TRANSLATIONS. - - self.vibfreqs = [] - self.vibirs = [] - self.vibdisps = [] - - # Need to get to the modes line - warning = False - while line.find("MODES") == -1: - line = inputfile.next() - if line.find("THIS IS NOT A STATIONARY POINT")>=0: - warning = True - startrot = int(line.split()[1]) - endrot = int(line.split()[3]) - blank = inputfile.next() - - line = inputfile.next() # FREQUENCIES, etc. - while line != blank: - line = inputfile.next() - if warning: # Get past the second warning - line = inputfile.next() - while line!= blank: - line = inputfile.next() - self.logger.warning("This is not a stationary point on the molecular" - "PES. The vibrational analysis is not valid.") - - freqNo = inputfile.next() - while freqNo.find("SAYVETZ") == -1: - freq = inputfile.next().strip().split()[1:] - # May include imaginary frequencies - # FREQUENCY: 825.18 I 111.53 12.62 10.70 0.89 - newfreq = [] - for i, x in enumerate(freq): - if x!="I": - newfreq.append(float(x)) - else: - newfreq[-1] = -newfreq[-1] - self.vibfreqs.extend(newfreq) - line = inputfile.next() - if line.find("REDUCED") >= 0: # skip the reduced mass (not always present) - line = inputfile.next() - if line.find("IR INTENSITY") >= 0: - # Not present if a numerical Hessian calculation - irIntensity = map(float, line.strip().split()[2:]) - self.vibirs.extend([utils.convertor(x, "Debye^2/amu-Angstrom^2", "km/mol") for x in irIntensity]) - line = inputfile.next() - if line.find("RAMAN") >= 0: - if not hasattr(self,"vibramans"): - self.vibramans = [] - ramanIntensity = line.strip().split() - self.vibramans.extend(map(float, ramanIntensity[2:])) - depolar = inputfile.next() - line = inputfile.next() - assert line == blank - - # Extract the Cartesian displacement vectors - p = [ [], [], [], [], [] ] - for j in range(len(self.atomnos)): - q = [ [], [], [], [], [] ] - for k in range(3): # x, y, z - line = inputfile.next()[21:] - broken = map(float, line.split()) - for l in range(len(broken)): - q[l].append(broken[l]) - for k in range(len(broken)): - p[k].append(q[k]) - self.vibdisps.extend(p[:len(broken)]) - - # Skip the Sayvetz stuff at the end - for j in range(10): - line = inputfile.next() - blank = inputfile.next() - freqNo = inputfile.next() - # Exclude rotations and translations - self.vibfreqs = numpy.array(self.vibfreqs[:startrot-1]+self.vibfreqs[endrot:], "d") - self.vibirs = numpy.array(self.vibirs[:startrot-1]+self.vibirs[endrot:], "d") - self.vibdisps = numpy.array(self.vibdisps[:startrot-1]+self.vibdisps[endrot:], "d") - if hasattr(self, "vibramans"): - self.vibramans = numpy.array(self.vibramans[:startrot-1]+self.vibramans[endrot:], "d") - - if line[5:21] == "ATOMIC BASIS SET": - self.gbasis = [] - line = inputfile.next() - while line.find("SHELL")<0: - line = inputfile.next() - blank = inputfile.next() - atomname = inputfile.next() - # shellcounter stores the shell no of the last shell - # in the previous set of primitives - shellcounter = 1 - while line.find("TOTAL NUMBER")<0: - blank = inputfile.next() - line = inputfile.next() - shellno = int(line.split()[0]) - shellgap = shellno - shellcounter - gbasis = [] # Stores basis sets on one atom - shellsize = 0 - while len(line.split())!=1 and line.find("TOTAL NUMBER")<0: - shellsize += 1 - coeff = {} - # coefficients and symmetries for a block of rows - while line.strip(): - temp = line.strip().split() - sym = temp[1] - assert sym in ['S', 'P', 'D', 'F', 'G', 'L'] - if sym == "L": # L refers to SP - if len(temp)==6: # GAMESS US - coeff.setdefault("S", []).append( (float(temp[3]), float(temp[4])) ) - coeff.setdefault("P", []).append( (float(temp[3]), float(temp[5])) ) - else: # PC GAMESS - assert temp[6][-1] == temp[9][-1] == ')' - coeff.setdefault("S", []).append( (float(temp[3]), float(temp[6][:-1])) ) - coeff.setdefault("P", []).append( (float(temp[3]), float(temp[9][:-1])) ) - else: - if len(temp)==5: # GAMESS US - coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[4])) ) - else: # PC GAMESS - assert temp[6][-1] == ')' - coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[6][:-1])) ) - line = inputfile.next() - # either a blank or a continuation of the block - if sym == "L": - gbasis.append( ('S', coeff['S'])) - gbasis.append( ('P', coeff['P'])) - else: - gbasis.append( (sym, coeff[sym])) - line = inputfile.next() - # either the start of the next block or the start of a new atom or - # the end of the basis function section - - numtoadd = 1 + (shellgap / shellsize) - shellcounter = shellno + shellsize - for x in range(numtoadd): - self.gbasis.append(gbasis) - - if line.find("EIGENVECTORS") == 10 or line.find("MOLECULAR OBRITALS") == 10: - # The details returned come from the *final* report of evalues and - # the last list of symmetries in the log file. - # Should be followed by lines like this: - # ------------ - # EIGENVECTORS - # ------------ - # - # 1 2 3 4 5 - # -10.0162 -10.0161 -10.0039 -10.0039 -10.0029 - # BU AG BU AG AG - # 1 C 1 S 0.699293 0.699290 -0.027566 0.027799 0.002412 - # 2 C 1 S 0.031569 0.031361 0.004097 -0.004054 -0.000605 - # 3 C 1 X 0.000908 0.000632 -0.004163 0.004132 0.000619 - # 4 C 1 Y -0.000019 0.000033 0.000668 -0.000651 0.005256 - # 5 C 1 Z 0.000000 0.000000 0.000000 0.000000 0.000000 - # 6 C 2 S -0.699293 0.699290 0.027566 0.027799 0.002412 - # 7 C 2 S -0.031569 0.031361 -0.004097 -0.004054 -0.000605 - # 8 C 2 X 0.000908 -0.000632 -0.004163 -0.004132 -0.000619 - # 9 C 2 Y -0.000019 -0.000033 0.000668 0.000651 -0.005256 - # 10 C 2 Z 0.000000 0.000000 0.000000 0.000000 0.000000 - # 11 C 3 S -0.018967 -0.019439 0.011799 -0.014884 -0.452328 - # 12 C 3 S -0.007748 -0.006932 0.000680 -0.000695 -0.024917 - # 13 C 3 X 0.002628 0.002997 0.000018 0.000061 -0.003608 - # and so forth... with blanks lines between blocks of 5 orbitals each. - # Warning! There are subtle differences between GAMESS-US and PC-GAMES - # in the formatting of the first four columns. - # - # Watch out for F orbitals... - # PC GAMESS - # 19 C 1 YZ 0.000000 0.000000 0.000000 0.000000 0.000000 - # 20 C XXX 0.000000 0.000000 0.000000 0.000000 0.002249 - # 21 C YYY 0.000000 0.000000 -0.025555 0.000000 0.000000 - # 22 C ZZZ 0.000000 0.000000 0.000000 0.002249 0.000000 - # 23 C XXY 0.000000 0.000000 0.001343 0.000000 0.000000 - # GAMESS US - # 55 C 1 XYZ 0.000000 0.000000 0.000000 0.000000 0.000000 - # 56 C 1XXXX -0.000014 -0.000067 0.000000 0.000000 0.000000 - # - # This is fine for GeoOpt and SP, but may be weird for TD and Freq. - - # This is the stuff that we can read from these blocks. - self.moenergies = [[]] - self.mosyms = [[]] - if not hasattr(self, "nmo"): - self.nmo = self.nbasis - self.mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")] - readatombasis = False - if not hasattr(self, "atombasis"): - self.atombasis = [] - self.aonames = [] - for i in range(self.natom): - self.atombasis.append([]) - self.aonames = [] - readatombasis = True - - dashes = inputfile.next() - for base in range(0, self.nmo, 5): - - line = inputfile.next() - # Make sure that this section does not end prematurely - checked by regression test 2CO.ccsd.aug-cc-pVDZ.out. - if line.strip() != "": - break; - - numbers = inputfile.next() # Eigenvector numbers. - - # Sometimes there are some blank lines here. - while not line.strip(): - line = inputfile.next() - - # Eigenvalues for these orbitals (in hartrees). - try: - self.moenergies[0].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()]) - except: - self.logger.warning('MO section found but could not be parsed!') - break; - - # Orbital symmetries. - line = inputfile.next() - if line.strip(): - self.mosyms[0].extend(map(self.normalisesym, line.split())) - - # Now we have nbasis lines. - # Going to use the same method as for normalise_aonames() - # to extract basis set information. - p = re.compile("(\d+)\s*([A-Z][A-Z]?)\s*(\d+)\s*([A-Z]+)") - oldatom ='0' - for i in range(self.nbasis): - line = inputfile.next() - - # If line is empty, break (ex. for FMO in exam37). - if not line.strip(): break - - # Fill atombasis and aonames only first time around - if readatombasis and base == 0: - aonames = [] - start = line[:17].strip() - m = p.search(start) - if m: - g = m.groups() - aoname = "%s%s_%s" % (g[1].capitalize(), g[2], g[3]) - oldatom = g[2] - atomno = int(g[2])-1 - orbno = int(g[0])-1 - else: # For F orbitals, as shown above - g = [x.strip() for x in line.split()] - aoname = "%s%s_%s" % (g[1].capitalize(), oldatom, g[2]) - atomno = int(oldatom)-1 - orbno = int(g[0])-1 - self.atombasis[atomno].append(orbno) - self.aonames.append(aoname) - coeffs = line[15:] # Strip off the crud at the start. - j = 0 - while j*11+4 < len(coeffs): - self.mocoeffs[0][base+j, i] = float(coeffs[j * 11:(j + 1) * 11]) - j += 1 - - line = inputfile.next() - # If it's restricted and no more properties: - # ...... END OF RHF/DFT CALCULATION ...... - # If there are more properties (DENSITY MATRIX): - # -------------- - # - # If it's unrestricted we have: - # - # ----- BETA SET ----- - # - # ------------ - # EIGENVECTORS - # ------------ - # - # 1 2 3 4 5 - # ... and so forth. - line = inputfile.next() - if line[2:22] == "----- BETA SET -----": - self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d")) - self.moenergies.append([]) - self.mosyms.append([]) - for i in range(4): - line = inputfile.next() - for base in range(0, self.nmo, 5): - blank = inputfile.next() - line = inputfile.next() # Eigenvector no - line = inputfile.next() - self.moenergies[1].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()]) - line = inputfile.next() - self.mosyms[1].extend(map(self.normalisesym, line.split())) - for i in range(self.nbasis): - line = inputfile.next() - temp = line[15:] # Strip off the crud at the start - j = 0 - while j * 11 + 4 < len(temp): - self.mocoeffs[1][base+j, i] = float(temp[j * 11:(j + 1) * 11]) - j += 1 - line = inputfile.next() - self.moenergies = [numpy.array(x, "d") for x in self.moenergies] - - # Natural orbitals - presently support only CIS. - # Looks basically the same as eigenvectors, without symmetry labels. - if line[10:30] == "CIS NATURAL ORBITALS": - - self.nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d") - - dashes = inputfile.next() - for base in range(0, self.nmo, 5): - - blank = inputfile.next() - numbers = inputfile.next() # Eigenvector numbers. - - # Eigenvalues for these natural orbitals (not in hartrees!). - # Sometimes there are some blank lines before it. - line = inputfile.next() - while not line.strip(): - line = inputfile.next() - eigenvalues = line - - # Orbital symemtry labels are normally here for MO coefficients. - line = inputfile.next() - - # Now we have nbasis lines with the coefficients. - for i in range(self.nbasis): - - line = inputfile.next() - coeffs = line[15:] - j = 0 - while j*11+4 < len(coeffs): - self.nocoeffs[base+j, i] = float(coeffs[j * 11:(j + 1) * 11]) - j += 1 - - # We cannot trust this self.homos until we come to the phrase: - # SYMMETRIES FOR INITAL GUESS ORBITALS FOLLOW - # which either is followed by "ALPHA" or "BOTH" at which point we can say - # for certain that it is an un/restricted calculations. - # Note that MCSCF calcs also print this search string, so make sure - # that self.homos does not exist yet. - if line[1:28] == "NUMBER OF OCCUPIED ORBITALS" and not hasattr(self,'homos'): - homos = [int(line.split()[-1])-1] - line = inputfile.next() - homos.append(int(line.split()[-1])-1) - self.homos = numpy.array(homos, "i") - - - if line.find("SYMMETRIES FOR INITIAL GUESS ORBITALS FOLLOW") >= 0: - # Not unrestricted, so lop off the second index. - # In case the search string above was not used (ex. FMO in exam38), - # we can try to use the next line which should also contain the - # number of occupied orbitals. - if line.find("BOTH SET(S)") >= 0: - nextline = inputfile.next() - if "ORBITALS ARE OCCUPIED" in nextline: - homos = int(nextline.split()[0])-1 - if hasattr(self,"homos"): - try: - assert self.homos[0] == homos - except AssertionError: - self.logger.warning("Number of occupied orbitals not consistent. This is normal for ECP and FMO jobs.") - else: - self.homos = [homos] - self.homos = numpy.resize(self.homos, [1]) - - # Set the total number of atoms, only once. - # Normally GAMESS print TOTAL NUMBER OF ATOMS, however in some cases - # this is slightly different (ex. lower case for FMO in exam37). - if not hasattr(self,"natom") and "NUMBER OF ATOMS" in line.upper(): - self.natom = int(line.split()[-1]) - - if line.find("NUMBER OF CARTESIAN GAUSSIAN BASIS") == 1 or line.find("TOTAL NUMBER OF BASIS FUNCTIONS") == 1: - # The first is from Julien's Example and the second is from Alexander's - # I think it happens if you use a polar basis function instead of a cartesian one - self.nbasis = int(line.strip().split()[-1]) - - elif line.find("SPHERICAL HARMONICS KEPT IN THE VARIATION SPACE") >= 0: - # Note that this line is present if ISPHER=1, e.g. for C_bigbasis - self.nmo = int(line.strip().split()[-1]) - - elif line.find("TOTAL NUMBER OF MOS IN VARIATION SPACE") == 1: - # Note that this line is not always present, so by default - # NBsUse is set equal to NBasis (see below). - self.nmo = int(line.split()[-1]) - - elif line.find("OVERLAP MATRIX") == 0 or line.find("OVERLAP MATRIX") == 1: - # The first is for PC-GAMESS, the second for GAMESS - # Read 1-electron overlap matrix - if not hasattr(self, "aooverlaps"): - self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d") - else: - self.logger.info("Reading additional aooverlaps...") - base = 0 - while base < self.nbasis: - blank = inputfile.next() - line = inputfile.next() # Basis fn number - blank = inputfile.next() - for i in range(self.nbasis - base): # Fewer lines each time - line = inputfile.next() - temp = line.split() - for j in range(4, len(temp)): - self.aooverlaps[base+j-4, i+base] = float(temp[j]) - self.aooverlaps[i+base, base+j-4] = float(temp[j]) - base += 5 - - # ECP Pseudopotential information - if "ECP POTENTIALS" in line: - if not hasattr(self, "coreelectrons"): - self.coreelectrons = [0]*self.natom - dashes = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - while header.split()[0] == "PARAMETERS": - name = header[17:25] - atomnum = int(header[34:40]) - # The pseudopotnetial is given explicitely - if header[40:50] == "WITH ZCORE": - zcore = int(header[50:55]) - lmax = int(header[63:67]) - self.coreelectrons[atomnum-1] = zcore - # The pseudopotnetial is copied from another atom - if header[40:55] == "ARE THE SAME AS": - atomcopy = int(header[60:]) - self.coreelectrons[atomnum-1] = self.coreelectrons[atomcopy-1] - line = inputfile.next() - while line.split() <> []: - line = inputfile.next() - header = inputfile.next() - - # This was used before refactoring the parser, geotargets was set here after parsing. - #if not hasattr(self, "geotargets"): - # opttol = 1e-4 - # self.geotargets = numpy.array([opttol, 3. / opttol], "d") - #if hasattr(self,"geovalues"): self.geovalues = numpy.array(self.geovalues, "d") - - -if __name__ == "__main__": - import doctest, gamessparser - doctest.testmod(gamessparser, verbose=False) diff --git a/external/cclib/parser/gamessukparser.py b/external/cclib/parser/gamessukparser.py deleted file mode 100644 index 3b54a82129..0000000000 --- a/external/cclib/parser/gamessukparser.py +++ /dev/null @@ -1,524 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 861 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class GAMESSUK(logfileparser.Logfile): - """A GAMESS UK log file""" - SCFRMS, SCFMAX, SCFENERGY = range(3) # Used to index self.scftargets[] - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(GAMESSUK, self).__init__(logname="GAMESSUK", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "GAMESS UK log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'GAMESSUK("%s")' % (self.filename) - - def normalisesym(self, label): - """Use standard symmetry labels instead of GAMESS UK labels. - - >>> t = GAMESSUK("dummyfile.txt") - >>> labels = ['a', 'a1', 'ag', "a'", 'a"', "a''", "a1''", 'a1"'] - >>> labels.extend(["e1+", "e1-"]) - >>> answer = [t.normalisesym(x) for x in labels] - >>> answer - ['A', 'A1', 'Ag', "A'", 'A"', 'A"', 'A1"', 'A1"', 'E1', 'E1'] - """ - label = label.replace("''", '"').replace("+", "").replace("-", "") - ans = label[0].upper() + label[1:] - - return ans - - def before_parsing(self): - - # This will be used to detect the first set of "nuclear coordinates" in - # a geometry-optimization - self.firstnuccoords = True - - # used for determining whether to add a second mosyms, etc. - self.betamosyms = self.betamoenergies = self.betamocoeffs = False - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line[1:22] == "total number of atoms": - if not hasattr(self, "natom"): - self.natom = int(line.split()[-1]) - - if line[3:44] == "convergence threshold in optimization run": - # Assuming that this is only found in the case of OPTXYZ - # (i.e. an optimization in Cartesian coordinates) - self.geotargets = [float(line.split()[-2])] - - if line[32:61] == "largest component of gradient": - # This is the geotarget in the case of OPTXYZ - if not hasattr(self, "geovalues"): - self.geovalues = [] - self.geovalues.append([float(line.split()[4])]) - - if line[37:49] == "convergence?": - # Get the geovalues and geotargets for OPTIMIZE - if not hasattr(self, "geovalues"): - self.geovalues = [] - self.geotargets = [] - geotargets = [] - geovalues = [] - for i in range(4): - temp = line.split() - geovalues.append(float(temp[2])) - if not self.geotargets: - geotargets.append(float(temp[-2])) - line = inputfile.next() - self.geovalues.append(geovalues) - if not self.geotargets: - self.geotargets = geotargets - - if line[40:58] == "molecular geometry": - # Only one set of atomcoords is taken from this section - # For geo-opts, more coordinates are taken from the "nuclear coordinates" - if not hasattr(self, "atomcoords"): - self.atomcoords = [] - self.atomnos = [] - - stop = " "*9 + "*"*79 - line = inputfile.next() - while not line.startswith(stop): - line = inputfile.next() - line = inputfile.next() - while not line.startswith(stop): - line = inputfile.next() - empty = inputfile.next() - - atomcoords = [] - empty = inputfile.next() - while not empty.startswith(stop): - line = inputfile.next().split() # the coordinate data - atomcoords.append(map(float,line[3:6])) - self.atomnos.append(int(round(float(line[2])))) - while line!=empty: - line = inputfile.next() - # at this point, line is an empty line, right after - # 1 or more lines containing basis set information - empty = inputfile.next() - # empty is either a row of asterisks or the empty line - # before the row of coordinate data - - self.atomcoords.append(atomcoords) - self.atomnos = numpy.array(self.atomnos, "i") - - if line[40:59] == "nuclear coordinates": - # We need not remember the first geometry in the geo-opt as this will - # be recorded already, in the "molecular geometry" section - # (note: single-point calculations have no "nuclear coordinates" only - # "molecular geometry") - if self.firstnuccoords: - self.firstnuccoords = False - return - # This was continue (in loop) before parser refactoring. - # continue - if not hasattr(self, "atomcoords"): - self.atomcoords = [] - self.atomnos = [] - - asterisk = inputfile.next() - blank = inputfile.next() - colmname = inputfile.next() - equals = inputfile.next() - - atomcoords = [] - atomnos = [] - line = inputfile.next() - while line != equals: - temp = line.strip().split() - atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[0:3]]) - if not hasattr(self, "atomnos") or len(self.atomnos) == 0: - atomnos.append(int(float(temp[3]))) - - line = inputfile.next() - - self.atomcoords.append(atomcoords) - if not hasattr(self, "atomnos") or len(self.atomnos) == 0: - self.atomnos = atomnos - - if line[1:32] == "total number of basis functions": - self.nbasis = int(line.split()[-1]) - while line.find("charge of molecule")<0: - line = inputfile.next() - self.charge = int(line.split()[-1]) - self.mult = int(inputfile.next().split()[-1]) - - alpha = int(inputfile.next().split()[-1])-1 - beta = int(inputfile.next().split()[-1])-1 - if self.mult==1: - self.homos = numpy.array([alpha], "i") - else: - self.homos = numpy.array([alpha,beta], "i") - - if line[37:69] == "s-matrix over gaussian basis set": - self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d") - - minus = inputfile.next() - blank = inputfile.next() - i = 0 - while i < self.nbasis: - blank = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - - for j in range(self.nbasis): - temp = map(float, inputfile.next().split()[1:]) - self.aooverlaps[j,(0+i):(len(temp)+i)] = temp - - i += len(temp) - - if line[18:43] == 'EFFECTIVE CORE POTENTIALS': - self.coreelectrons = numpy.zeros(self.natom, 'i') - asterisk = inputfile.next() - line = inputfile.next() - while line[15:46]!="*"*31: - if line.find("for atoms ...")>=0: - atomindex = [] - line = inputfile.next() - while line.find("core charge")<0: - broken = line.split() - atomindex.extend([int(x.split("-")[0]) for x in broken]) - line = inputfile.next() - charge = float(line.split()[4]) - for idx in atomindex: - self.coreelectrons[idx-1] = self.atomnos[idx-1] - charge - line = inputfile.next() - - if line[3:27] == "Wavefunction convergence": - self.scftarget = float(line.split()[-2]) - self.scftargets = [] - - if line[11:22] == "normal mode": - if not hasattr(self, "vibfreqs"): - self.vibfreqs = [] - self.vibirs = [] - - units = inputfile.next() - xyz = inputfile.next() - equals = inputfile.next() - line = inputfile.next() - while line!=equals: - temp = line.split() - self.vibfreqs.append(float(temp[1])) - self.vibirs.append(float(temp[-2])) - line = inputfile.next() - # Use the length of the vibdisps to figure out - # how many rotations and translations to remove - self.vibfreqs = self.vibfreqs[-len(self.vibdisps):] - self.vibirs = self.vibirs[-len(self.vibdisps):] - - if line[44:73] == "normalised normal coordinates": - self.vibdisps = [] - equals = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - freqnum = inputfile.next() - while freqnum.find("=")<0: - blank = inputfile.next() - equals = inputfile.next() - freqs = inputfile.next() - equals = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - equals = inputfile.next() - p = [ [] for x in range(9) ] - for i in range(len(self.atomnos)): - brokenx = map(float, inputfile.next()[25:].split()) - brokeny = map(float, inputfile.next()[25:].split()) - brokenz = map(float, inputfile.next()[25:].split()) - for j,x in enumerate(zip(brokenx, brokeny, brokenz)): - p[j].append(x) - self.vibdisps.extend(p) - - blank = inputfile.next() - blank = inputfile.next() - freqnum = inputfile.next() - - if line[26:36] == "raman data": - self.vibramans = [] - - stars = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - - blank = inputfile.next() - line = inputfile.next() - while line[1]!="*": - self.vibramans.append(float(line.split()[3])) - blank = inputfile.next() - line = inputfile.next() - # Use the length of the vibdisps to figure out - # how many rotations and translations to remove - self.vibramans = self.vibramans[-len(self.vibdisps):] - - if line[3:11] == "SCF TYPE": - self.scftype = line.split()[-2] - assert self.scftype in ['rhf', 'uhf', 'gvb'], "%s not one of 'rhf', 'uhf' or 'gvb'" % self.scftype - - if line[15:31] == "convergence data": - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - self.scftargets.append([self.scftarget]) # Assuming it does not change over time - while line[1:10] != "="*9: - line = inputfile.next() - line = inputfile.next() - tester = line.find("tester") # Can be in a different place depending - assert tester>=0 - while line[1:10] != "="*9: # May be two or three lines (unres) - line = inputfile.next() - - scfvalues = [] - line = inputfile.next() - while line.strip(): - if line[2:6]!="****": - # e.g. **** recalulation of fock matrix on iteration 4 (examples/chap12/pyridine.out) - scfvalues.append([float(line[tester-5:tester+6])]) - line = inputfile.next() - self.scfvalues.append(scfvalues) - - if line[10:22] == "total energy" and len(line.split()) == 3: - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - scfenergy = utils.convertor(float(line.split()[-1]), "hartree", "eV") - self.scfenergies.append(scfenergy) - - # Total energies after Moller-Plesset corrections - # Second order correction is always first, so its first occurance - # triggers creation of mpenergies (list of lists of energies) - # Further corrections are appended as found - # Note: GAMESS-UK sometimes prints only the corrections, - # so they must be added to the last value of scfenergies - if line[10:32] == "mp2 correlation energy" or \ - line[10:42] == "second order perturbation energy": - if not hasattr(self, "mpenergies"): - self.mpenergies = [] - self.mpenergies.append([]) - self.mp2correction = self.float(line.split()[-1]) - self.mp2energy = self.scfenergies[-1] + self.mp2correction - self.mpenergies[-1].append(utils.convertor(self.mp2energy, "hartree", "eV")) - if line[10:41] == "third order perturbation energy": - self.mp3correction = self.float(line.split()[-1]) - self.mp3energy = self.mp2energy + self.mp3correction - self.mpenergies[-1].append(utils.convertor(self.mp3energy, "hartree", "eV")) - - if line[40:59] == "molecular basis set": - self.gbasis = [] - line = inputfile.next() - while line.find("contraction coefficients")<0: - line = inputfile.next() - equals = inputfile.next() - blank = inputfile.next() - atomname = inputfile.next() - basisregexp = re.compile("\d*(\D+)") # Get everything after any digits - shellcounter = 1 - while line!=equals: - gbasis = [] # Stores basis sets on one atom - blank = inputfile.next() - blank = inputfile.next() - line = inputfile.next() - shellno = int(line.split()[0]) - shellgap = shellno - shellcounter - shellsize = 0 - while len(line.split())!=1 and line!=equals: - if line.split(): - shellsize += 1 - coeff = {} - # coefficients and symmetries for a block of rows - while line.strip() and line!=equals: - temp = line.strip().split() - # temp[1] may be either like (a) "1s" and "1sp", or (b) "s" and "sp" - # See GAMESS-UK 7.0 distribution/examples/chap12/pyridine2_21m10r.out - # for an example of the latter - sym = basisregexp.match(temp[1]).groups()[0] - assert sym in ['s', 'p', 'd', 'f', 'sp'], "'%s' not a recognized symmetry" % sym - if sym == "sp": - coeff.setdefault("S", []).append( (float(temp[3]), float(temp[6])) ) - coeff.setdefault("P", []).append( (float(temp[3]), float(temp[10])) ) - else: - coeff.setdefault(sym.upper(), []).append( (float(temp[3]), float(temp[6])) ) - line = inputfile.next() - # either a blank or a continuation of the block - if coeff: - if sym == "sp": - gbasis.append( ('S', coeff['S'])) - gbasis.append( ('P', coeff['P'])) - else: - gbasis.append( (sym.upper(), coeff[sym.upper()])) - if line==equals: - continue - line = inputfile.next() - # either the start of the next block or the start of a new atom or - # the end of the basis function section (signified by a line of equals) - numtoadd = 1 + (shellgap / shellsize) - shellcounter = shellno + shellsize - for x in range(numtoadd): - self.gbasis.append(gbasis) - - if line[50:70] == "----- beta set -----": - self.betamosyms = True - self.betamoenergies = True - self.betamocoeffs = True - # betamosyms will be turned off in the next - # SYMMETRY ASSIGNMENT section - - if line[31:50] == "SYMMETRY ASSIGNMENT": - if not hasattr(self, "mosyms"): - self.mosyms = [] - - multiple = {'a':1, 'b':1, 'e':2, 't':3, 'g':4, 'h':5} - - equals = inputfile.next() - line = inputfile.next() - while line != equals: # There may be one or two lines of title (compare mg10.out and duhf_1.out) - line = inputfile.next() - - mosyms = [] - line = inputfile.next() - while line != equals: - temp = line[25:30].strip() - if temp[-1]=='?': - # e.g. e? or t? or g? (see example/chap12/na7mg_uhf.out) - # for two As, an A and an E, and two Es of the same energy respectively. - t = line[91:].strip().split() - for i in range(1,len(t),2): - for j in range(multiple[t[i][0]]): # add twice for 'e', etc. - mosyms.append(self.normalisesym(t[i])) - else: - for j in range(multiple[temp[0]]): - mosyms.append(self.normalisesym(temp)) # add twice for 'e', etc. - line = inputfile.next() - assert len(mosyms) == self.nmo, "mosyms: %d but nmo: %d" % (len(mosyms), self.nmo) - if self.betamosyms: - # Only append if beta (otherwise with IPRINT SCF - # it will add mosyms for every step of a geo opt) - self.mosyms.append(mosyms) - self.betamosyms = False - elif self.scftype=='gvb': - # gvb has alpha and beta orbitals but they are identical - self.mosysms = [mosyms, mosyms] - else: - self.mosyms = [mosyms] - - if line[50:62] == "eigenvectors": - # Mocoeffs...can get evalues from here too - # (only if using FORMAT HIGH though will they all be present) - if not hasattr(self, "mocoeffs"): - self.aonames = [] - aonames = [] - minus = inputfile.next() - - mocoeffs = numpy.zeros( (self.nmo, self.nbasis), "d") - readatombasis = False - if not hasattr(self, "atombasis"): - self.atombasis = [] - for i in range(self.natom): - self.atombasis.append([]) - readatombasis = True - - blank = inputfile.next() - blank = inputfile.next() - evalues = inputfile.next() - - p = re.compile(r"\d+\s+(\d+)\s*(\w+) (\w+)") - oldatomname = "DUMMY VALUE" - - mo = 0 - while mo < self.nmo: - blank = inputfile.next() - blank = inputfile.next() - nums = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - for basis in range(self.nbasis): - line = inputfile.next() - # Fill atombasis only first time around. - if readatombasis: - orbno = int(line[1:5])-1 - atomno = int(line[6:9])-1 - self.atombasis[atomno].append(orbno) - if not self.aonames: - pg = p.match(line[:18].strip()).groups() - atomname = "%s%s%s" % (pg[1][0].upper(), pg[1][1:], pg[0]) - if atomname!=oldatomname: - aonum = 1 - oldatomname = atomname - name = "%s_%d%s" % (atomname, aonum, pg[2].upper()) - if name in aonames: - aonum += 1 - name = "%s_%d%s" % (atomname, aonum, pg[2].upper()) - aonames.append(name) - temp = map(float, line[19:].split()) - mocoeffs[mo:(mo+len(temp)), basis] = temp - # Fill atombasis only first time around. - readatombasis = False - if not self.aonames: - self.aonames = aonames - - line = inputfile.next() # blank line - while line==blank: - line = inputfile.next() - evalues = line - if evalues[:17].strip(): # i.e. if these aren't evalues - break # Not all the MOs are present - mo += len(temp) - mocoeffs = mocoeffs[0:(mo+len(temp)), :] # In case some aren't present - if self.betamocoeffs: - self.mocoeffs.append(mocoeffs) - else: - self.mocoeffs = [mocoeffs] - - if line[7:12] == "irrep": - ########## eigenvalues ########### - # This section appears once at the start of a geo-opt and once at the end - # unless IPRINT SCF is used (when it appears at every step in addition) - if not hasattr(self, "moenergies"): - self.moenergies = [] - - equals = inputfile.next() - while equals[1:5] != "====": # May be one or two lines of title (compare duhf_1.out and mg10.out) - equals = inputfile.next() - - moenergies = [] - line = inputfile.next() - if not line.strip(): # May be a blank line here (compare duhf_1.out and mg10.out) - line = inputfile.next() - - while line.strip() and line != equals: # May end with a blank or equals - temp = line.strip().split() - moenergies.append(utils.convertor(float(temp[2]), "hartree", "eV")) - line = inputfile.next() - self.nmo = len(moenergies) - if self.betamoenergies: - self.moenergies.append(moenergies) - self.betamoenergies = False - elif self.scftype=='gvb': - self.moenergies = [moenergies, moenergies] - else: - self.moenergies = [moenergies] - - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/external/cclib/parser/gaussianparser.py b/external/cclib/parser/gaussianparser.py deleted file mode 100644 index 834c5fe28f..0000000000 --- a/external/cclib/parser/gaussianparser.py +++ /dev/null @@ -1,1026 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -gmagoon 4/5/10-4/6/10 (this notice added 4/29/10): Gregory Magoon modified this file from cclib 1.0 -""" - -__revision__ = "$Revision: 882 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class Gaussian(logfileparser.Logfile): - """A Gaussian 98/03 log file.""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(Gaussian, self).__init__(logname="Gaussian", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "Gaussian log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'Gaussian("%s")' % (self.filename) - - def normalisesym(self, label): - """Use standard symmetry labels instead of Gaussian labels. - - To normalise: - (1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta] - (2) replace any G or U by their lowercase equivalent - - >>> sym = Gaussian("dummyfile").normalisesym - >>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG'] - >>> map(sym, labels) - ['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g'] - """ - # note: DLT must come after DLTA - greek = [('SG', 'sigma'), ('PI', 'pi'), ('PHI', 'phi'), - ('DLTA', 'delta'), ('DLT', 'delta')] - for k,v in greek: - if label.startswith(k): - tmp = label[len(k):] - label = v - if tmp: - label = v + "." + tmp - - ans = label.replace("U", "u").replace("G", "g") - return ans - - def before_parsing(self): - - # Used to index self.scftargets[]. - SCFRMS, SCFMAX, SCFENERGY = range(3) - - # Flag that indicates whether it has reached the end of a geoopt. - self.optfinished = False - - # Flag for identifying Coupled Cluster runs. - self.coupledcluster = False - - # Fragment number for counterpoise calculations (normally zero). - self.counterpoise = 0 - - # Flag for identifying ONIOM calculations. - self.oniom = False - - def after_parsing(self): - - # Correct the percent values in the etsecs in the case of - # a restricted calculation. The following has the - # effect of including each transition twice. - if hasattr(self, "etsecs") and len(self.homos) == 1: - new_etsecs = [[(x[0], x[1], x[2] * numpy.sqrt(2)) for x in etsec] - for etsec in self.etsecs] - self.etsecs = new_etsecs - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - # Number of atoms. - if line[1:8] == "NAtoms=": - - self.updateprogress(inputfile, "Attributes", self.fupdate) - - natom = int(line.split()[1]) - if not hasattr(self, "natom"): - self.natom = natom - - # Catch message about completed optimization. - if line[1:23] == "Optimization completed": - self.optfinished = True - - # Extract the atomic numbers and coordinates from the input orientation, - # in the event the standard orientation isn't available. - if not self.optfinished and line.find("Input orientation") > -1 or line.find("Z-Matrix orientation") > -1: - - # If this is a counterpoise calculation, this output means that - # the supermolecule is now being considered, so we can set: - self.counterpoise = 0 - - self.updateprogress(inputfile, "Attributes", self.cupdate) - - if not hasattr(self, "inputcoords"): - self.inputcoords = [] - self.inputatoms = [] - - hyphens = inputfile.next() - colmNames = inputfile.next() - colmNames = inputfile.next() - hyphens = inputfile.next() - - atomcoords = [] - line = inputfile.next() - while line != hyphens: - broken = line.split() - self.inputatoms.append(int(broken[1])) - atomcoords.append(map(float, broken[3:6])) - line = inputfile.next() - - self.inputcoords.append(atomcoords) - - if not hasattr(self, "natom"): - self.atomnos = numpy.array(self.inputatoms, 'i') - self.natom = len(self.atomnos) - - # Extract the atomic numbers and coordinates of the atoms. - if not self.optfinished and line.strip() == "Standard orientation:": - - self.updateprogress(inputfile, "Attributes", self.cupdate) - - # If this is a counterpoise calculation, this output means that - # the supermolecule is now being considered, so we can set: - self.counterpoise = 0 - - if not hasattr(self, "atomcoords"): - self.atomcoords = [] - - hyphens = inputfile.next() - colmNames = inputfile.next() - colmNames = inputfile.next() - hyphens = inputfile.next() - - atomnos = [] - atomcoords = [] - line = inputfile.next() - while line != hyphens: - broken = line.split() - atomnos.append(int(broken[1])) - atomcoords.append(map(float, broken[-3:])) - line = inputfile.next() - self.atomcoords.append(atomcoords) - if not hasattr(self, "natom"): - self.atomnos = numpy.array(atomnos, 'i') - self.natom = len(self.atomnos) - - # Find the targets for SCF convergence (QM calcs). - if line[1:44] == 'Requested convergence on RMS density matrix': - - if not hasattr(self, "scftargets"): - self.scftargets = [] - - scftargets = [] - # The RMS density matrix. - scftargets.append(self.float(line.split('=')[1].split()[0])) - line = inputfile.next() - # The MAX density matrix. - scftargets.append(self.float(line.strip().split('=')[1][:-1])) - line = inputfile.next() - # For G03, there's also the energy (not for G98). - if line[1:10] == "Requested": - scftargets.append(self.float(line.strip().split('=')[1][:-1])) - - self.scftargets.append(scftargets) - - # Extract SCF convergence information (QM calcs). - if line[1:10] == 'Cycle 1': - - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - - scfvalues = [] - line = inputfile.next() - while line.find("SCF Done") == -1: - - self.updateprogress(inputfile, "QM convergence", self.fupdate) - - if line.find(' E=') == 0: - self.logger.debug(line) - - # RMSDP=3.74D-06 MaxDP=7.27D-05 DE=-1.73D-07 OVMax= 3.67D-05 - # or - # RMSDP=1.13D-05 MaxDP=1.08D-04 OVMax= 1.66D-04 - if line.find(" RMSDP") == 0: - - parts = line.split() - newlist = [self.float(x.split('=')[1]) for x in parts[0:2]] - energy = 1.0 - if len(parts) > 4: - energy = parts[2].split('=')[1] - if energy == "": - energy = self.float(parts[3]) - else: - energy = self.float(energy) - if len(self.scftargets[0]) == 3: # Only add the energy if it's a target criteria - newlist.append(energy) - scfvalues.append(newlist) - - try: - line = inputfile.next() - # May be interupted by EOF. - except StopIteration: - break - - self.scfvalues.append(scfvalues) - - # Extract SCF convergence information (AM1 calcs). - if line[1:4] == 'It=': - - self.scftargets = numpy.array([1E-7], "d") # This is the target value for the rms - self.scfvalues = [[]] - - line = inputfile.next() - while line.find(" Energy") == -1: - - if self.progress: - step = inputfile.tell() - if step != oldstep: - self.progress.update(step, "AM1 Convergence") - oldstep = step - - if line[1:4] == "It=": - parts = line.strip().split() - self.scfvalues[0].append(self.float(parts[-1][:-1])) - line = inputfile.next() - - # Note: this needs to follow the section where 'SCF Done' is used - # to terminate a loop when extracting SCF convergence information. - if line[1:9] == 'SCF Done': - - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - - self.scfenergies.append(utils.convertor(self.float(line.split()[4]), "hartree", "eV")) - #gmagoon 5/27/09: added scfenergies reading for PM3 case where line begins with Energy= - #example line: " Energy= -0.077520562724 NIter= 14." - if line[1:8] == 'Energy=': - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - self.scfenergies.append(utils.convertor(self.float(line.split()[1]), "hartree", "eV")) - #gmagoon 6/8/09: added molecular mass parsing (units will be amu) - #example line: " Molecular mass: 208.11309 amu." - if line[1:16] == 'Molecular mass:': - self.molmass = self.float(line.split()[2]) - - #gmagoon 5/27/09: added rotsymm for reading rotational symmetry number - #it would probably be better to read in point group (or calculate separately with OpenBabel, and I probably won't end up using this - #example line: " Rotational symmetry number 1." - if line[1:27] == 'Rotational symmetry number': - self.rotsymm = int(self.float(line.split()[3])) - - #gmagoon 5/28/09: added rotcons for rotational constants (at each step) in GHZ - #example line: Rotational constants (GHZ): 17.0009421 5.8016756 4.5717439 - #could also read in moment of inertia, but this should just differ by a constant: rot cons= h/(8*Pi^2*I) - #note that the last occurence of this in the thermochemistry section has reduced precision, so we will want to use the 2nd to last instance - if line[1:28] == 'Rotational constants (GHZ):': - if not hasattr(self, "rotcons"): - self.rotcons = [] - - #some linear cases (e.g. if linearity is not recognized) can have asterisks ****... for the first rotational constant; e.g.: - # Rotational constants (GHZ): ************ 12.73690 12.73690 - # or: - # Rotational constants (GHZ):*************** 10.4988228 10.4988223 - # if this is the case, replace the asterisks with a 0.0 - #we can also have cases like this: - # Rotational constants (GHZ):6983905.3278703 11.8051382 11.8051183 - #if line[28:29] == '*' or line.split()[3].startswith('*'): - if line[37:38] == '*': - self.rotcons.append([0.0]+map(float, line[28:].split()[-2:])) #record last 0.0 and last 2 numbers (words) in the string following the prefix - else: - self.rotcons.append(map(float, line[28:].split()[-3:])) #record last 3 numbers (words) in the string following the prefix - - # Total energies after Moller-Plesset corrections. - # Second order correction is always first, so its first occurance - # triggers creation of mpenergies (list of lists of energies). - # Further MP2 corrections are appended as found. - # - # Example MP2 output line: - # E2 = -0.9505918144D+00 EUMP2 = -0.28670924198852D+03 - # Warning! this output line is subtly different for MP3/4/5 runs - if "EUMP2" in line[27:34]: - - if not hasattr(self, "mpenergies"): - self.mpenergies = [] - self.mpenergies.append([]) - mp2energy = self.float(line.split("=")[2]) - self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV")) - - # Example MP3 output line: - # E3= -0.10518801D-01 EUMP3= -0.75012800924D+02 - if line[34:39] == "EUMP3": - - mp3energy = self.float(line.split("=")[2]) - self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV")) - - # Example MP4 output lines: - # E4(DQ)= -0.31002157D-02 UMP4(DQ)= -0.75015901139D+02 - # E4(SDQ)= -0.32127241D-02 UMP4(SDQ)= -0.75016013648D+02 - # E4(SDTQ)= -0.32671209D-02 UMP4(SDTQ)= -0.75016068045D+02 - # Energy for most substitutions is used only (SDTQ by default) - if line[34:42] == "UMP4(DQ)": - - mp4energy = self.float(line.split("=")[2]) - line = inputfile.next() - if line[34:43] == "UMP4(SDQ)": - mp4energy = self.float(line.split("=")[2]) - line = inputfile.next() - if line[34:44] == "UMP4(SDTQ)": - mp4energy = self.float(line.split("=")[2]) - self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV")) - - # Example MP5 output line: - # DEMP5 = -0.11048812312D-02 MP5 = -0.75017172926D+02 - if line[29:32] == "MP5": - mp5energy = self.float(line.split("=")[2]) - self.mpenergies[-1].append(utils.convertor(mp5energy, "hartree", "eV")) - - # Total energies after Coupled Cluster corrections. - # Second order MBPT energies (MP2) are also calculated for these runs, - # but the output is the same as when parsing for mpenergies. - # First turn on flag for Coupled Cluster runs. - if line[1:23] == "Coupled Cluster theory" or line[1:8] == "CCSD(T)": - - self.coupledcluster = True - if not hasattr(self, "ccenergies"): - self.ccenergies = [] - - # Now read the consecutive correlated energies when , - # but append only the last one to ccenergies. - # Only the highest level energy is appended - ex. CCSD(T), not CCSD. - if self.coupledcluster and line[27:35] == "E(CORR)=": - self.ccenergy = self.float(line.split()[3]) - if self.coupledcluster and line[1:9] == "CCSD(T)=": - self.ccenergy = self.float(line.split()[1]) - # Append when leaving link 913 - if self.coupledcluster and line[1:16] == "Leave Link 913": - self.ccenergies.append(utils.convertor(self.ccenergy, "hartree", "eV")) - - # Geometry convergence information. - if line[49:59] == 'Converged?': - - if not hasattr(self, "geotargets"): - self.geovalues = [] - self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0], "d") - - newlist = [0]*4 - for i in range(4): - line = inputfile.next() - self.logger.debug(line) - parts = line.split() - try: - value = self.float(parts[2]) - except ValueError: - value = -1.0 - #self.logger.error("Problem parsing the value for geometry optimisation: %s is not a number." % parts[2]) - #gmagoon 20111202: because the value can become **** (as shown below, I'm changing this to not report an error, and instead just set the value to -1.0 -# Item Value Threshold Converged? -# Maximum Force ******** 0.000015 NO -# RMS Force 1.813626 0.000010 NO -# Maximum Displacement 0.915407 0.000060 NO -# RMS Displacement 0.280831 0.000040 NO - else: - newlist[i] = value - self.geotargets[i] = self.float(parts[3]) - - self.geovalues.append(newlist) - - # Gradients. - # Read in the cartesian energy gradients (forces) from a block like this: - # ------------------------------------------------------------------- - # Center Atomic Forces (Hartrees/Bohr) - # Number Number X Y Z - # ------------------------------------------------------------------- - # 1 1 -0.012534744 -0.021754635 -0.008346094 - # 2 6 0.018984731 0.032948887 -0.038003451 - # 3 1 -0.002133484 -0.006226040 0.023174772 - # 4 1 -0.004316502 -0.004968213 0.023174772 - # -2 -0.001830728 -0.000743108 -0.000196625 - # ------------------------------------------------------------------ - # - # The "-2" line is for a dummy atom - # - # Then optimization is done in internal coordinates, Gaussian also - # print the forces in internal coordinates, which can be produced from - # the above. This block looks like this: - # Variable Old X -DE/DX Delta X Delta X Delta X New X - # (Linear) (Quad) (Total) - # ch 2.05980 0.01260 0.00000 0.01134 0.01134 2.07114 - # hch 1.75406 0.09547 0.00000 0.24861 0.24861 2.00267 - # hchh 2.09614 0.01261 0.00000 0.16875 0.16875 2.26489 - # Item Value Threshold Converged? - if line[37:43] == "Forces": - - if not hasattr(self, "grads"): - self.grads = [] - - header = inputfile.next() - dashes = inputfile.next() - line = inputfile.next() - forces = [] - while line != dashes: - broken = line.split() - Fx, Fy, Fz = broken[-3:] - forces.append([float(Fx),float(Fy),float(Fz)]) - line = inputfile.next() - self.grads.append(forces) - - # Charge and multiplicity. - # If counterpoise correction is used, multiple lines match. - # The first one contains charge/multiplicity of the whole molecule.: - # Charge = 0 Multiplicity = 1 in supermolecule - # Charge = 0 Multiplicity = 1 in fragment 1. - # Charge = 0 Multiplicity = 1 in fragment 2. - if line[1:7] == 'Charge' and line.find("Multiplicity")>=0: - - regex = ".*=(.*)Mul.*=\s*(\d+).*" - match = re.match(regex, line) - assert match, "Something unusual about the line: '%s'" % line - - self.charge = int(match.groups()[0]) - self.mult = int(match.groups()[1]) - - # Orbital symmetries. - if line[1:20] == 'Orbital symmetries:' and not hasattr(self, "mosyms"): - - # For counterpoise fragments, skip these lines. - if self.counterpoise != 0: return - - self.updateprogress(inputfile, "MO Symmetries", self.fupdate) - - self.mosyms = [[]] - line = inputfile.next() - unres = False - if line.find("Alpha Orbitals") == 1: - unres = True - line = inputfile.next() - i = 0 - while len(line) > 18 and line[17] == '(': - if line.find('Virtual') >= 0: - self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays - parts = line[17:].split() - for x in parts: - self.mosyms[0].append(self.normalisesym(x.strip('()'))) - i += 1 - line = inputfile.next() - if unres: - line = inputfile.next() - # Repeat with beta orbital information - i = 0 - self.mosyms.append([]) - while len(line) > 18 and line[17] == '(': - if line.find('Virtual')>=0: - if (hasattr(self, "homos")):#if there was also an alpha virtual orbital (here we consider beta) we will store two indices in the array - self.homos.resize([2]) # Extend the array to two elements - self.homos[1] = i-1 # 'HOMO' indexes the HOMO in the arrays - else:#otherwise (e.g. for O triplet) there is no alpha virtual orbital, only beta virtual orbitals, and we initialize the array with one element - self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays - parts = line[17:].split() - for x in parts: - self.mosyms[1].append(self.normalisesym(x.strip('()'))) - i += 1 - line = inputfile.next() - - # Alpha/Beta electron eigenvalues. - if line[1:6] == "Alpha" and line.find("eigenvalues") >= 0: - - # For counterpoise fragments, skip these lines. - if self.counterpoise != 0: return - - # For ONIOM calcs, ignore this section in order to bypass assertion failure. - if self.oniom: return - - self.updateprogress(inputfile, "Eigenvalues", self.fupdate) - self.moenergies = [[]] - HOMO = -2 - - while line.find('Alpha') == 1: - if line.split()[1] == "virt." and HOMO == -2: - - # If there aren't any symmetries, this is a good way to find the HOMO. - # Also, check for consistency if homos was already parsed. - HOMO = len(self.moenergies[0])-1 - if hasattr(self, "homos"): - assert HOMO == self.homos[0] - else: - self.homos = numpy.array([HOMO], "i") - - part = line[28:] - i = 0 - while i*10+4 < len(part): - x = part[i*10:(i+1)*10] - self.moenergies[0].append(utils.convertor(self.float(x), "hartree", "eV")) - i += 1 - line = inputfile.next() - # If, at this point, self.homos is unset, then there were not - # any alpha virtual orbitals - if not hasattr(self, "homos"): - HOMO = len(self.moenergies[0])-1 - self.homos = numpy.array([HOMO], "i") - - - if line.find('Beta') == 2: - self.moenergies.append([]) - - HOMO = -2 - while line.find('Beta') == 2: - if line.split()[1] == "virt." and HOMO == -2: - - # If there aren't any symmetries, this is a good way to find the HOMO. - # Also, check for consistency if homos was already parsed. - HOMO = len(self.moenergies[1])-1 - if len(self.homos) == 2: - assert HOMO == self.homos[1] - else: - self.homos.resize([2]) - self.homos[1] = HOMO - - part = line[28:] - i = 0 - while i*10+4 < len(part): - x = part[i*10:(i+1)*10] - self.moenergies[1].append(utils.convertor(self.float(x), "hartree", "eV")) - i += 1 - line = inputfile.next() - - self.moenergies = [numpy.array(x, "d") for x in self.moenergies] - - # Gaussian Rev <= B.0.3 (?) - # AO basis set in the form of general basis input: - # 1 0 - # S 3 1.00 0.000000000000 - # 0.7161683735D+02 0.1543289673D+00 - # 0.1304509632D+02 0.5353281423D+00 - # 0.3530512160D+01 0.4446345422D+00 - # SP 3 1.00 0.000000000000 - # 0.2941249355D+01 -0.9996722919D-01 0.1559162750D+00 - # 0.6834830964D+00 0.3995128261D+00 0.6076837186D+00 - # 0.2222899159D+00 0.7001154689D+00 0.3919573931D+00 - if line[1:16] == "AO basis set in": - - # For counterpoise fragment calcualtions, skip these lines. - if self.counterpoise != 0: return - - self.gbasis = [] - line = inputfile.next() - while line.strip(): - gbasis = [] - line = inputfile.next() - while line.find("*")<0: - temp = line.split() - symtype = temp[0] - numgau = int(temp[1]) - gau = [] - for i in range(numgau): - temp = map(self.float, inputfile.next().split()) - gau.append(temp) - - for i,x in enumerate(symtype): - newgau = [(z[0],z[i+1]) for z in gau] - gbasis.append( (x,newgau) ) - line = inputfile.next() # i.e. "****" or "SP ...." - self.gbasis.append(gbasis) - line = inputfile.next() # i.e. "20 0" or blank line - - # Start of the IR/Raman frequency section. - # Caution is advised here, as additional frequency blocks - # can be printed by Gaussian (with slightly different formats), - # often doubling the information printed. - # See, for a non-standard exmaple, regression Gaussian98/test_H2.log - if line[1:14] == "Harmonic freq": - - self.updateprogress(inputfile, "Frequency Information", self.fupdate) - - # The whole block should not have any blank lines. - while line.strip() != "": - - # Lines with symmetries and symm. indices begin with whitespace. - if line[1:15].strip() == "" and not line[15:22].strip().isdigit(): - - if not hasattr(self, 'vibsyms'): - self.vibsyms = [] - syms = line.split() - self.vibsyms.extend(syms) - - if line[1:15] == "Frequencies --": - - if not hasattr(self, 'vibfreqs'): - self.vibfreqs = [] - freqs = [self.float(f) for f in line[15:].split()] - self.vibfreqs.extend(freqs) - - if line[1:15] == "IR Inten --": - - if not hasattr(self, 'vibirs'): - self.vibirs = [] - irs = [self.float(f) for f in line[15:].split()] - self.vibirs.extend(irs) - - if line[1:15] == "Raman Activ --": - - if not hasattr(self, 'vibramans'): - self.vibramans = [] - ramans = [self.float(f) for f in line[15:].split()] - self.vibramans.extend(ramans) - - # Block with displacement should start with this. - # Remember, it is possible to have less than three columns! - # There should be as many lines as there are atoms. - if line[1:29] == "Atom AN X Y Z": - - if not hasattr(self, 'vibdisps'): - self.vibdisps = [] - disps = [] - for n in range(self.natom): - line = inputfile.next() - numbers = [float(s) for s in line[10:].split()] - N = len(numbers) / 3 - if not disps: - for n in range(N): - disps.append([]) - for n in range(N): - disps[n].append(numbers[3*n:3*n+3]) - self.vibdisps.extend(disps) - - line = inputfile.next() - -# Below is the old code for the IR/Raman frequency block, can probably be removed. -# while len(line[:15].split()) == 0: -# self.logger.debug(line) -# self.vibsyms.extend(line.split()) # Adding new symmetry -# line = inputfile.next() -# # Read in frequencies. -# freqs = [self.float(f) for f in line.split()[2:]] -# self.vibfreqs.extend(freqs) -# line = inputfile.next() -# line = inputfile.next() -# line = inputfile.next() -# irs = [self.float(f) for f in line.split()[3:]] -# self.vibirs.extend(irs) -# line = inputfile.next() # Either the header or a Raman line -# if line.find("Raman") >= 0: -# if not hasattr(self, "vibramans"): -# self.vibramans = [] -# ramans = [self.float(f) for f in line.split()[3:]] -# self.vibramans.extend(ramans) -# line = inputfile.next() # Depolar (P) -# line = inputfile.next() # Depolar (U) -# line = inputfile.next() # Header -# line = inputfile.next() # First line of cartesian displacement vectors -# p = [[], [], []] -# while len(line[:15].split()) > 0: -# # Store the cartesian displacement vectors -# broken = map(float, line.strip().split()[2:]) -# for i in range(0, len(broken), 3): -# p[i/3].append(broken[i:i+3]) -# line = inputfile.next() -# self.vibdisps.extend(p[0:len(broken)/3]) -# line = inputfile.next() # Should be the line with symmetries -# self.vibfreqs = numpy.array(self.vibfreqs, "d") -# self.vibirs = numpy.array(self.vibirs, "d") -# self.vibdisps = numpy.array(self.vibdisps, "d") -# if hasattr(self, "vibramans"): -# self.vibramans = numpy.array(self.vibramans, "d") - - # Electronic transitions. - if line[1:14] == "Excited State": - - if not hasattr(self, "etenergies"): - self.etenergies = [] - self.etoscs = [] - self.etsyms = [] - self.etsecs = [] - # Need to deal with lines like: - # (restricted calc) - # Excited State 1: Singlet-BU 5.3351 eV 232.39 nm f=0.1695 - # (unrestricted calc) (first excited state is 2!) - # Excited State 2: ?Spin -A 0.1222 eV 10148.75 nm f=0.0000 - # (Gaussian 09 ZINDO) - # Excited State 1: Singlet-?Sym 2.5938 eV 478.01 nm f=0.0000 =0.000 - p = re.compile(":(?P.*?)(?P-?\d*\.\d*) eV") - groups = p.search(line).groups() - self.etenergies.append(utils.convertor(self.float(groups[1]), "eV", "cm-1")) - self.etoscs.append(self.float(line.split("f=")[-1].split()[0])) - self.etsyms.append(groups[0].strip()) - - line = inputfile.next() - - p = re.compile("(\d+)") - CIScontrib = [] - while line.find(" ->") >= 0: # This is a contribution to the transition - parts = line.split("->") - self.logger.debug(parts) - # Has to deal with lines like: - # 32 -> 38 0.04990 - # 35A -> 45A 0.01921 - frommoindex = 0 # For restricted or alpha unrestricted - fromMO = parts[0].strip() - if fromMO[-1] == "B": - frommoindex = 1 # For beta unrestricted - fromMO = int(p.match(fromMO).group())-1 # subtract 1 so that it is an index into moenergies - - t = parts[1].split() - tomoindex = 0 - toMO = t[0] - if toMO[-1] == "B": - tomoindex = 1 - toMO = int(p.match(toMO).group())-1 # subtract 1 so that it is an index into moenergies - - percent = self.float(t[1]) - # For restricted calculations, the percentage will be corrected - # after parsing (see after_parsing() above). - CIScontrib.append([(fromMO, frommoindex), (toMO, tomoindex), percent]) - line = inputfile.next() - self.etsecs.append(CIScontrib) - -# Circular dichroism data (different for G03 vs G09) - -# G03 - -## <0|r|b> * (Au), Rotatory Strengths (R) in -## cgs (10**-40 erg-esu-cm/Gauss) -## state X Y Z R(length) -## 1 0.0006 0.0096 -0.0082 -0.4568 -## 2 0.0251 -0.0025 0.0002 -5.3846 -## 3 0.0168 0.4204 -0.3707 -15.6580 -## 4 0.0721 0.9196 -0.9775 -3.3553 - -# G09 - -## 1/2[<0|r|b>* + (<0|rxdel|b>*)*] -## Rotatory Strengths (R) in cgs (10**-40 erg-esu-cm/Gauss) -## state XX YY ZZ R(length) R(au) -## 1 -0.3893 -6.7546 5.7736 -0.4568 -0.0010 -## 2 -17.7437 1.7335 -0.1435 -5.3845 -0.0114 -## 3 -11.8655 -297.2604 262.1519 -15.6580 -0.0332 - - if (line[1:52] == "<0|r|b> * (Au), Rotatory Strengths (R)" or - line[1:50] == "1/2[<0|r|b>* + (<0|rxdel|b>*)*]"): - - self.etrotats = [] - inputfile.next() # Units - headers = inputfile.next() # Headers - Ncolms = len(headers.split()) - line = inputfile.next() - parts = line.strip().split() - while len(parts) == Ncolms: - try: - R = self.float(parts[4]) - except ValueError: - # nan or -nan if there is no first excited state - # (for unrestricted calculations) - pass - else: - self.etrotats.append(R) - line = inputfile.next() - temp = line.strip().split() - parts = line.strip().split() - self.etrotats = numpy.array(self.etrotats, "d") - - # Number of basis sets functions. - # Has to deal with lines like: - # NBasis = 434 NAE= 97 NBE= 97 NFC= 34 NFV= 0 - # and... - # NBasis = 148 MinDer = 0 MaxDer = 0 - # Although the former is in every file, it doesn't occur before - # the overlap matrix is printed. - if line[1:7] == "NBasis" or line[4:10] == "NBasis": - - # For counterpoise fragment, skip these lines. - if self.counterpoise != 0: return - - # For ONIOM calcs, ignore this section in order to bypass assertion failure. - if self.oniom: return - - # If nbasis was already parsed, check if it changed. - nbasis = int(line.split('=')[1].split()[0]) - if hasattr(self, "nbasis"): - assert nbasis == self.nbasis - else: - self.nbasis = nbasis - - # Number of linearly-independent basis sets. - if line[1:7] == "NBsUse": - - # For counterpoise fragment, skip these lines. - if self.counterpoise != 0: return - - # For ONIOM calcs, ignore this section in order to bypass assertion failure. - if self.oniom: return - - # If nmo was already parsed, check if it changed. - nmo = int(line.split('=')[1].split()[0]) - if hasattr(self, "nmo"): - assert nmo == self.nmo - else: - self.nmo = nmo - - # For AM1 calculations, set nbasis by a second method, - # as nmo may not always be explicitly stated. - if line[7:22] == "basis functions, ": - - nbasis = int(line.split()[0]) - if hasattr(self, "nbasis"): - assert nbasis == self.nbasis - else: - self.nbasis = nbasis - - # Molecular orbital overlap matrix. - # Has to deal with lines such as: - # *** Overlap *** - # ****** Overlap ****** - if line[1:4] == "***" and (line[5:12] == "Overlap" - or line[8:15] == "Overlap"): - - self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d") - # Overlap integrals for basis fn#1 are in aooverlaps[0] - base = 0 - colmNames = inputfile.next() - while base < self.nbasis: - - self.updateprogress(inputfile, "Overlap", self.fupdate) - - for i in range(self.nbasis-base): # Fewer lines this time - line = inputfile.next() - parts = line.split() - for j in range(len(parts)-1): # Some lines are longer than others - k = float(parts[j+1].replace("D", "E")) - self.aooverlaps[base+j, i+base] = k - self.aooverlaps[i+base, base+j] = k - base += 5 - colmNames = inputfile.next() - self.aooverlaps = numpy.array(self.aooverlaps, "d") - - # Molecular orbital coefficients (mocoeffs). - # Essentially only produced for SCF calculations. - # This is also the place where aonames and atombasis are parsed. - if line[5:35] == "Molecular Orbital Coefficients" or line[5:41] == "Alpha Molecular Orbital Coefficients" or line[5:40] == "Beta Molecular Orbital Coefficients": - - if line[5:40] == "Beta Molecular Orbital Coefficients": - beta = True - if self.popregular: - return - # This was continue before refactoring the parsers. - #continue # Not going to extract mocoeffs - # Need to add an extra array to self.mocoeffs - self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d")) - else: - beta = False - self.aonames = [] - self.atombasis = [] - mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")] - - base = 0 - self.popregular = False - for base in range(0, self.nmo, 5): - - self.updateprogress(inputfile, "Coefficients", self.fupdate) - - colmNames = inputfile.next() - - if not colmNames.split(): - self.logger.warning("Molecular coefficients header found but no coefficients.") - break; - - if base==0 and int(colmNames.split()[0])!=1: - # Implies that this is a POP=REGULAR calculation - # and so, only aonames (not mocoeffs) will be extracted - self.popregular = True - symmetries = inputfile.next() - eigenvalues = inputfile.next() - for i in range(self.nbasis): - - line = inputfile.next() - if base == 0 and not beta: # Just do this the first time 'round - # Changed below from :12 to :11 to deal with Elmar Neumann's example - parts = line[:11].split() - if len(parts) > 1: # New atom - if i>0: - self.atombasis.append(atombasis) - atombasis = [] - atomname = "%s%s" % (parts[2], parts[1]) - orbital = line[11:20].strip() - self.aonames.append("%s_%s" % (atomname, orbital)) - atombasis.append(i) - - part = line[21:].replace("D", "E").rstrip() - temp = [] - for j in range(0, len(part), 10): - temp.append(float(part[j:j+10])) - if beta: - self.mocoeffs[1][base:base + len(part) / 10, i] = temp - else: - mocoeffs[0][base:base + len(part) / 10, i] = temp - if base == 0 and not beta: # Do the last update of atombasis - self.atombasis.append(atombasis) - if self.popregular: - # We now have aonames, so no need to continue - break - if not self.popregular and not beta: - self.mocoeffs = mocoeffs - - # Natural Orbital Coefficients (nocoeffs) - alternative for mocoeffs. - # Most extensively formed after CI calculations, but not only. - # Like for mocoeffs, this is also where aonames and atombasis are parsed. - if line[5:33] == "Natural Orbital Coefficients": - - self.aonames = [] - self.atombasis = [] - nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d") - - base = 0 - self.popregular = False - for base in range(0, self.nmo, 5): - - self.updateprogress(inputfile, "Coefficients", self.fupdate) - - colmNames = inputfile.next() - if base==0 and int(colmNames.split()[0])!=1: - # Implies that this is a POP=REGULAR calculation - # and so, only aonames (not mocoeffs) will be extracted - self.popregular = True - - # No symmetry line for natural orbitals. - # symmetries = inputfile.next() - eigenvalues = inputfile.next() - - for i in range(self.nbasis): - - line = inputfile.next() - - # Just do this the first time 'round. - if base == 0: - - # Changed below from :12 to :11 to deal with Elmar Neumann's example. - parts = line[:11].split() - # New atom. - if len(parts) > 1: - if i>0: - self.atombasis.append(atombasis) - atombasis = [] - atomname = "%s%s" % (parts[2], parts[1]) - orbital = line[11:20].strip() - self.aonames.append("%s_%s" % (atomname, orbital)) - atombasis.append(i) - - part = line[21:].replace("D", "E").rstrip() - temp = [] - - for j in range(0, len(part), 10): - temp.append(float(part[j:j+10])) - - nocoeffs[base:base + len(part) / 10, i] = temp - - # Do the last update of atombasis. - if base == 0: - self.atombasis.append(atombasis) - - # We now have aonames, so no need to continue. - if self.popregular: - break - - if not self.popregular: - self.nocoeffs = nocoeffs - - # Pseudopotential charges. - if line.find("Pseudopotential Parameters") > -1: - - dashes = inputfile.next() - label1 = inputfile.next() - label2 = inputfile.next() - dashes = inputfile.next() - - line = inputfile.next() - if line.find("Centers:") < 0: - return - # This was continue before parser refactoring. - # continue - - centers = map(int, line.split()[1:]) - centers.sort() # Not always in increasing order - - self.coreelectrons = numpy.zeros(self.natom, "i") - - for center in centers: - line = inputfile.next() - front = line[:10].strip() - while not (front and int(front) == center): - line = inputfile.next() - front = line[:10].strip() - info = line.split() - self.coreelectrons[center-1] = int(info[1]) - int(info[2]) - - # This will be printed for counterpoise calcualtions only. - # To prevent crashing, we need to know which fragment is being considered. - # Other information is also printed in lines that start like this. - if line[1:14] == 'Counterpoise:': - - if line[42:50] == "fragment": - self.counterpoise = int(line[51:54]) - - # This will be printed only during ONIOM calcs; use it to set a flag - # that will allow assertion failures to be bypassed in the code. - if line[1:7] == "ONIOM:": - self.oniom = True - -if __name__ == "__main__": - import doctest, gaussianparser - doctest.testmod(gaussianparser, verbose=False) diff --git a/external/cclib/parser/jaguarparser.py b/external/cclib/parser/jaguarparser.py deleted file mode 100644 index 50ea95200b..0000000000 --- a/external/cclib/parser/jaguarparser.py +++ /dev/null @@ -1,474 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 861 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class Jaguar(logfileparser.Logfile): - """A Jaguar output file""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(Jaguar, self).__init__(logname="Jaguar", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "Jaguar output file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'Jaguar("%s")' % (self.filename) - - def normalisesym(self, label): - """Normalise the symmetries used by Jaguar. - - To normalise, three rules need to be applied: - (1) To handle orbitals of E symmetry, retain everything before the / - (2) Replace two p's by " - (2) Replace any remaining single p's by ' - - >>> t = Jaguar("dummyfile").normalisesym - >>> labels = ['A', 'A1', 'Ag', 'Ap', 'App', "A1p", "A1pp", "E1pp/Ap"] - >>> answers = map(t, labels) - >>> print answers - ['A', 'A1', 'Ag', "A'", 'A"', "A1'", 'A1"', 'E1"'] - """ - ans = label.split("/")[0].replace("pp", '"').replace("p", "'") - return ans - - def before_parsing(self): - - self.geoopt = False # Is this a GeoOpt? Needed for SCF targets/values. - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line[0:4] == "etot": - # Get SCF convergence information - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - self.scftargets = [[5E-5, 5E-6]] - values = [] - while line[0:4] == "etot": - # Jaguar 4.2 - # etot 1 N N 0 N -382.08751886450 2.3E-03 1.4E-01 - # etot 2 Y Y 0 N -382.27486023153 1.9E-01 1.4E-03 5.7E-02 - # Jaguar 6.5 - # etot 1 N N 0 N -382.08751881733 2.3E-03 1.4E-01 - # etot 2 Y Y 0 N -382.27486018708 1.9E-01 1.4E-03 5.7E-02 - temp = line.split()[7:] - if len(temp)==3: - denergy = float(temp[0]) - else: - denergy = 0 # Should really be greater than target value - # or should we just ignore the values in this line - ddensity = float(temp[-2]) - maxdiiserr = float(temp[-1]) - if not self.geoopt: - values.append([denergy, ddensity]) - else: - values.append([ddensity]) - line = inputfile.next() - self.scfvalues.append(values) - - # Hartree-Fock energy after SCF - if line[1:18] == "SCFE: SCF energy:": - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - temp = line.strip().split() - scfenergy = float(temp[temp.index("hartrees") - 1]) - scfenergy = utils.convertor(scfenergy, "hartree", "eV") - self.scfenergies.append(scfenergy) - - # Energy after LMP2 correction - if line[1:18] == "Total LMP2 Energy": - if not hasattr(self, "mpenergies"): - self.mpenergies = [[]] - lmp2energy = float(line.split()[-1]) - lmp2energy = utils.convertor(lmp2energy, "hartree", "eV") - self.mpenergies[-1].append(lmp2energy) - - if line[2:14] == "new geometry" or line[1:21] == "Symmetrized geometry" or line.find("Input geometry") > 0: - # Get the atom coordinates - if not hasattr(self, "atomcoords") or line[1:21] == "Symmetrized geometry": - # Wipe the "Input geometry" if "Symmetrized geometry" present - self.atomcoords = [] - p = re.compile("(\D+)\d+") # One/more letters followed by a number - atomcoords = [] - atomnos = [] - angstrom = inputfile.next() - title = inputfile.next() - line = inputfile.next() - while line.strip(): - temp = line.split() - element = p.findall(temp[0])[0] - atomnos.append(self.table.number[element]) - atomcoords.append(map(float, temp[1:])) - line = inputfile.next() - self.atomcoords.append(atomcoords) - self.atomnos = numpy.array(atomnos, "i") - self.natom = len(atomcoords) - - # Extract charge and multiplicity - if line[2:22] == "net molecular charge": - self.charge = int(line.split()[-1]) - self.mult = int(inputfile.next().split()[-1]) - - if line[2:24] == "start of program geopt": - if not self.geoopt: - # Need to keep only the RMS density change info - # if this is a geoopt - self.scftargets = [[self.scftargets[0][0]]] - if hasattr(self, "scfvalues"): - self.scfvalues[0] = [[x[0]] for x in self.scfvalues[0]] - self.geoopt = True - else: - self.scftargets.append([5E-5]) - - if line[2:28] == "geometry optimization step": - # Get Geometry Opt convergence information - if not hasattr(self, "geovalues"): - self.geovalues = [] - self.geotargets = numpy.zeros(5, "d") - gopt_step = int(line.split()[-1]) - energy = inputfile.next() - # quick hack for messages of the sort: - # ** restarting optimization from step 2 ** - # as found in regression file ptnh3_2_H2O_2_2plus.out - if inputfile.next().strip(): - blank = inputfile.next() - line = inputfile.next() - values = [] - target_index = 0 - if gopt_step == 1: - # The first optimization step does not produce an energy change - values.append(0.0) - target_index = 1 - while line.strip(): - if len(line) > 40 and line[41] == "(": - # A new geo convergence value - values.append(float(line[26:37])) - self.geotargets[target_index] = float(line[43:54]) - target_index += 1 - line = inputfile.next() - self.geovalues.append(values) - - if line.find("number of occupied orbitals") > 0: - # Get number of MOs - occs = int(line.split()[-1]) - line = inputfile.next() - virts = int(line.split()[-1]) - self.nmo = occs + virts - self.homos = numpy.array([occs-1], "i") - - self.unrestrictedflag = False - - if line.find("number of alpha occupied orb") > 0: - # Get number of MOs for an unrestricted calc - - aoccs = int(line.split()[-1]) - line = inputfile.next() - avirts = int(line.split()[-1]) - line = inputfile.next() - boccs = int(line.split()[-1]) - line = inputfile.next() - bvirt = int(line.split()[-1]) - - self.nmo = aoccs + avirts - self.homos = numpy.array([aoccs-1,boccs-1], "i") - self.unrestrictedflag = True - - # MO energies and symmetries. - # Jaguar 7.0: provides energies and symmetries for both - # restricted and unrestricted calculations, like this: - # Alpha Orbital energies/symmetry label: - # -10.25358 Bu -10.25353 Ag -10.21931 Bu -10.21927 Ag - # -10.21792 Bu -10.21782 Ag -10.21773 Bu -10.21772 Ag - # ... - # Jaguar 6.5: prints both only for restricted calculations, - # so for unrestricted calculations the output it looks like this: - # Alpha Orbital energies: - # -10.25358 -10.25353 -10.21931 -10.21927 -10.21792 -10.21782 - # -10.21773 -10.21772 -10.21537 -10.21537 -1.02078 -0.96193 - # ... - # Presence of 'Orbital energies' is enough to catch all versions. - if "Orbital energies" in line: - - # Parsing results is identical for restricted/unrestricted - # calculations, just assert later that alpha/beta order is OK. - spin = int(line[2:6] == "Beta") - - # Check if symmetries are printed also. - issyms = "symmetry label" in line - - if not hasattr(self, "moenergies"): - self.moenergies = [] - if issyms and not hasattr(self, "mosyms"): - self.mosyms = [] - - # Grow moeneriges/mosyms and make sure they are empty when - # parsed multiple times - currently cclib returns only - # the final output (ex. in a geomtry optimization). - if len(self.moenergies) < spin+1: - self.moenergies.append([]) - self.moenergies[spin] = [] - if issyms: - if len(self.mosyms) < spin+1: - self.mosyms.append([]) - self.mosyms[spin] = [] - - line = inputfile.next().split() - while len(line) > 0: - if issyms: - energies = [float(line[2*i]) for i in range(len(line)/2)] - syms = [line[2*i+1] for i in range(len(line)/2)] - else: - energies = [float(e) for e in line] - energies = [utils.convertor(e, "hartree", "eV") for e in energies] - self.moenergies[spin].extend(energies) - if issyms: - syms = [self.normalisesym(s) for s in syms] - self.mosyms[spin].extend(syms) - line = inputfile.next().split() - - # There should always be an extra blank line after all this. - line = inputfile.next() - - if line.find("Occupied + virtual Orbitals- final wvfn") > 0: - - blank = inputfile.next() - stars = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - - if not hasattr(self,"mocoeffs"): - if self.unrestrictedflag: - spin = 2 - else: - spin = 1 - - self.mocoeffs = [] - - - aonames = [] - lastatom = "X" - - readatombasis = False - if not hasattr(self, "atombasis"): - self.atombasis = [] - for i in range(self.natom): - self.atombasis.append([]) - readatombasis = True - - offset = 0 - - for s in range(spin): - mocoeffs = numpy.zeros((len(self.moenergies[s]), self.nbasis), "d") - - if s == 1: #beta case - stars = inputfile.next() - blank = inputfile.next() - title = inputfile.next() - blank = inputfile.next() - stars = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - - for k in range(0,len(self.moenergies[s]),5): - - numbers = inputfile.next() - eigens = inputfile.next() - line = inputfile.next() - - for i in range(self.nbasis): - - info = line.split() - - # Fill atombasis only first time around. - if readatombasis and k == 0: - orbno = int(info[0]) - atom = info[1] - if atom[1].isalpha(): - atomno = int(atom[2:]) - else: - atomno = int(atom[1:]) - self.atombasis[atomno-1].append(orbno-1) - - if not hasattr(self,"aonames"): - if lastatom != info[1]: - scount = 1 - pcount = 3 - dcount = 6 #six d orbitals in Jaguar - - if info[2] == 'S': - aonames.append("%s_%i%s"%(info[1], scount, info[2])) - scount += 1 - - if info[2] == 'X' or info[2] == 'Y' or info[2] == 'Z': - aonames.append("%s_%iP%s"%(info[1], pcount / 3, info[2])) - pcount += 1 - - if info[2] == 'XX' or info[2] == 'YY' or info[2] == 'ZZ' or \ - info[2] == 'XY' or info[2] == 'XZ' or info[2] == 'YZ': - - aonames.append("%s_%iD%s"%(info[1], dcount / 6, info[2])) - dcount += 1 - - lastatom = info[1] - - for j in range(len(info[3:])): - mocoeffs[j+k,i] = float(info[3+j]) - - line = inputfile.next() - - if not hasattr(self,"aonames"): - self.aonames = aonames - - offset += 5 - self.mocoeffs.append(mocoeffs) - - - if line[2:6] == "olap": - if line[6]=="-": - return - # This was continue (in loop) before parser refactoring. - # continue # avoid "olap-dev" - self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d") - - for i in range(0, self.nbasis, 5): - blank = inputfile.next() - header = inputfile.next() - for j in range(i, self.nbasis): - temp = map(float, inputfile.next().split()[1:]) - self.aooverlaps[j, i:(i+len(temp))] = temp - self.aooverlaps[i:(i+len(temp)), j] = temp - - if line[1:28] == "number of occupied orbitals": - self.homos = numpy.array([float(line.strip().split()[-1])-1], "i") - - if line[2:27] == "number of basis functions": - self.nbasis = int(line.strip().split()[-1]) - - # IR output looks like this: - # frequencies 72.45 113.25 176.88 183.76 267.60 312.06 - # symmetries Au Bg Au Bu Ag Bg - # intensities 0.07 0.00 0.28 0.52 0.00 0.00 - # reduc. mass 1.90 0.74 1.06 1.42 1.19 0.85 - # force const 0.01 0.01 0.02 0.03 0.05 0.05 - # C1 X 0.00000 0.00000 0.00000 -0.05707 -0.06716 0.00000 - # C1 Y 0.00000 0.00000 0.00000 0.00909 -0.02529 0.00000 - # C1 Z 0.04792 -0.06032 -0.01192 0.00000 0.00000 0.11613 - # C2 X 0.00000 0.00000 0.00000 -0.06094 -0.04635 0.00000 - # ... etc. ... - # This is a complete ouput, some files will not have intensities, - # and older Jaguar versions sometimes skip the symmetries. - if line[2:23] == "start of program freq": - - self.vibfreqs = [] - self.vibdisps = [] - forceconstants = False - intensities = False - blank = inputfile.next() - line = inputfile.next() - while line.strip(): - if "force const" in line: - forceconstants = True - if "intensities" in line: - intensities = True - line = inputfile.next() - freqs = inputfile.next() - - # The last block has an extra blank line after it - catch it. - while freqs.strip(): - - # Number of modes (columns printed in this block). - nmodes = len(freqs.split())-1 - - # Append the frequencies. - self.vibfreqs.extend(map(float, freqs.split()[1:])) - line = inputfile.next().split() - - # May skip symmetries (older Jaguar versions). - if line[0] == "symmetries": - if not hasattr(self, "vibsyms"): - self.vibsyms = [] - self.vibsyms.extend(map(self.normalisesym, line[1:])) - line = inputfile.next().split() - if intensities: - if not hasattr(self, "vibirs"): - self.vibirs = [] - self.vibirs.extend(map(float, line[1:])) - line = inputfile.next().split() - if forceconstants: - line = inputfile.next() - - # Start parsing the displacements. - # Variable 'q' holds up to 7 lists of triplets. - q = [ [] for i in range(7) ] - for n in range(self.natom): - # Variable 'p' holds up to 7 triplets. - p = [ [] for i in range(7) ] - for i in range(3): - line = inputfile.next() - disps = [float(disp) for disp in line.split()[2:]] - for j in range(nmodes): - p[j].append(disps[j]) - for i in range(nmodes): - q[i].append(p[i]) - - self.vibdisps.extend(q[:nmodes]) - blank = inputfile.next() - freqs = inputfile.next() - - # Convert new data to arrays. - self.vibfreqs = numpy.array(self.vibfreqs, "d") - self.vibdisps = numpy.array(self.vibdisps, "d") - if hasattr(self, "vibirs"): - self.vibirs = numpy.array(self.vibirs, "d") - - # Parse excited state output (for CIS calculations). - # Jaguar calculates only singlet states. - if line[2:15] == "Excited State": - if not hasattr(self, "etenergies"): - self.etenergies = [] - if not hasattr(self, "etoscs"): - self.etoscs = [] - if not hasattr(self, "etsecs"): - self.etsecs = [] - self.etsyms = [] - etenergy = float(line.split()[3]) - etenergy = utils.convertor(etenergy, "eV", "cm-1") - self.etenergies.append(etenergy) - # Skip 4 lines - for i in range(5): - line = inputfile.next() - self.etsecs.append([]) - # Jaguar calculates only singlet states. - self.etsyms.append('Singlet-A') - while line.strip() != "": - fromMO = int(line.split()[0])-1 - toMO = int(line.split()[2])-1 - coeff = float(line.split()[-1]) - self.etsecs[-1].append([(fromMO,0),(toMO,0),coeff]) - line = inputfile.next() - # Skip 3 lines - for i in range(4): - line = inputfile.next() - strength = float(line.split()[-1]) - self.etoscs.append(strength) - - -if __name__ == "__main__": - import doctest, jaguarparser - doctest.testmod(jaguarparser, verbose=False) diff --git a/external/cclib/parser/logfileparser.py b/external/cclib/parser/logfileparser.py deleted file mode 100644 index b7d8bfd5d2..0000000000 --- a/external/cclib/parser/logfileparser.py +++ /dev/null @@ -1,300 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 879 $" - - -import StringIO - -try: - import bz2 # New in Python 2.3. -except ImportError: - bz2 = None -import fileinput -import gzip -import inspect -import logging -logging.logMultiprocessing = 0 # To avoid a problem with Avogadro -import os -import random -try: - set # Standard type from Python 2.4+. -except NameError: - from sets import Set as set -import sys -import types -import zipfile - -import numpy - -import utils -from data import ccData - - -def openlogfile(filename): - """Return a file object given a filename. - - Given the filename of a log file or a gzipped, zipped, or bzipped - log file, this function returns a regular Python file object. - - Given an address starting with http://, this function retrieves the url - and returns a file object using a temporary file. - - Given a list of filenames, this function returns a FileInput object, - which can be used for seamless iteration without concatenation. - """ - - # If there is a single string argument given. - if type(filename) in [str, unicode]: - - extension = os.path.splitext(filename)[1] - - if extension == ".gz": - fileobject = gzip.open(filename, "r") - - elif extension == ".zip": - zip = zipfile.ZipFile(filename, "r") - assert len(zip.namelist()) == 1, "ERROR: Zip file contains more than 1 file" - fileobject = StringIO.StringIO(zip.read(zip.namelist()[0])) - - elif extension in ['.bz', '.bz2']: - # Module 'bz2' is not always importable. - assert bz2 != None, "ERROR: module bz2 cannot be imported" - fileobject = bz2.BZ2File(filename, "r") - - else: - fileobject = open(filename, "r") - - return fileobject - - elif hasattr(filename, "__iter__"): - - # Compression (gzip and bzip) is supported as of Python 2.5. - if sys.version_info[0] >= 2 and sys.version_info[1] >= 5: - fileobject = fileinput.input(filename, openhook=fileinput.hook_compressed) - else: - fileobject = fileinput.input(filename) - - return fileobject - - -class Logfile(object): - """Abstract class for logfile objects. - - Subclasses defined by cclib: - ADF, GAMESS, GAMESSUK, Gaussian, Jaguar, Molpro, ORCA - - """ - - def __init__(self, source, progress=None, - loglevel=logging.INFO, logname="Log", logstream=sys.stdout, - fupdate=0.05, cupdate=0.002, - datatype=ccData): - """Initialise the Logfile object. - - This should be called by a ubclass in its own __init__ method. - - Inputs: - source - a single logfile, a list of logfiles, or input stream - """ - - # Set the filename to source if it is a string or a list of filenames. - # In the case of an input stream, set some arbitrary name and the stream. - # Elsewise, raise an Exception. - if isinstance(source,types.StringTypes): - self.filename = source - self.isstream = False - elif isinstance(source,list) and all([isinstance(s,types.StringTypes) for s in source]): - self.filename = source - self.isstream = False - elif hasattr(source, "read"): - self.filename = "stream %s" %str(type(source)) - self.isstream = True - self.stream = source - else: - raise ValueError - - # Progress indicator. - self.progress = progress - self.fupdate = fupdate - self.cupdate = cupdate - - # Set up the logger. - # Note that calling logging.getLogger() with one name always returns the same instance. - # Presently in cclib, all parser instances of the same class use the same logger, - # which means that care needs to be taken not to duplicate handlers. - self.loglevel = loglevel - self.logname = logname - self.logger = logging.getLogger('%s %s' % (self.logname,self.filename)) - self.logger.setLevel(self.loglevel) - if len(self.logger.handlers) == 0: - handler = logging.StreamHandler(logstream) - handler.setFormatter(logging.Formatter("[%(name)s %(levelname)s] %(message)s")) - self.logger.addHandler(handler) - - # Periodic table of elements. - self.table = utils.PeriodicTable() - - # This is the class that will be used in the data object returned by parse(), - # and should normally be ccData or a subclass. - self.datatype = datatype - - def __setattr__(self, name, value): - - # Send info to logger if the attribute is in the list self._attrlist. - if name in getattr(self, "_attrlist", {}) and hasattr(self, "logger"): - - # Call logger.info() only if the attribute is new. - if not hasattr(self, name): - if type(value) in [numpy.ndarray, list]: - self.logger.info("Creating attribute %s[]" %name) - else: - self.logger.info("Creating attribute %s: %s" %(name, str(value))) - - # Set the attribute. - object.__setattr__(self, name, value) - - def parse(self, fupdate=None, cupdate=None): - """Parse the logfile, using the assumed extract method of the child.""" - - # Check that the sub-class has an extract attribute, - # that is callable with the proper number of arguemnts. - if not hasattr(self, "extract"): - raise AttributeError, "Class %s has no extract() method." %self.__class__.__name__ - return -1 - if not callable(self.extract): - raise AttributeError, "Method %s._extract not callable." %self.__class__.__name__ - return -1 - if len(inspect.getargspec(self.extract)[0]) != 3: - raise AttributeError, "Method %s._extract takes wrong number of arguments." %self.__class__.__name__ - return -1 - - # Save the current list of attributes to keep after parsing. - # The dict of self should be the same after parsing. - _nodelete = list(set(self.__dict__.keys())) - - # Initiate the FileInput object for the input files. - # Remember that self.filename can be a list of files. - if not self.isstream: - inputfile = openlogfile(self.filename) - else: - inputfile = self.stream - - # Intialize self.progress. - if self.progress: - inputfile.seek(0,2) - nstep = inputfile.tell() - inputfile.seek(0) - self.progress.initialize(nstep) - self.progress.step = 0 - if fupdate: - self.fupdate = fupdate - if cupdate: - self.cupdate = cupdate - - # Initialize the ccData object that will be returned. - # This is normally ccData, but can be changed by passing - # the datatype argument to __init__(). - data = self.datatype() - - # Copy the attribute list, so that the parser knows what to expect, - # specifically in __setattr__(). - # The class self.datatype (normally ccData) must have this attribute. - self._attrlist = data._attrlist - - # Maybe the sub-class has something to do before parsing. - if hasattr(self, "before_parsing"): - self.before_parsing() - - # Loop over lines in the file object and call extract(). - # This is where the actual parsing is done. - for line in inputfile: - - self.updateprogress(inputfile, "Unsupported information", cupdate) - - # This call should check if the line begins a section of extracted data. - # If it does, it parses some lines and sets the relevant attributes (to self). - # Any attributes can be freely set and used across calls, however only those - # in data._attrlist will be moved to final data object that is returned. - self.extract(inputfile, line) - - # Close input file object. - if not self.isstream: - inputfile.close() - - # Maybe the sub-class has something to do after parsing. - if hasattr(self, "after_parsing"): - self.after_parsing() - - # If atomcoords were not parsed, but some input coordinates were ("inputcoords"). - # This is originally from the Gaussian parser, a regression fix. - if not hasattr(self, "atomcoords") and hasattr(self, "inputcoords"): - self.atomcoords = numpy.array(self.inputcoords, 'd') - - # Set nmo if not set already - to nbasis. - if not hasattr(self, "nmo") and hasattr(self, "nbasis"): - self.nmo = self.nbasis - - # Creating deafult coreelectrons array. - if not hasattr(self, "coreelectrons") and hasattr(self, "natom"): - self.coreelectrons = numpy.zeros(self.natom, "i") - - # Move all cclib attributes to the ccData object. - # To be moved, an attribute must be in data._attrlist. - for attr in data._attrlist: - if hasattr(self, attr): - setattr(data, attr, getattr(self, attr)) - - # Now make sure that the cclib attributes in the data object - # are all the correct type (including arrays and lists of arrays). - data.arrayify() - - # Delete all temporary attributes (including cclib attributes). - # All attributes should have been moved to a data object, - # which will be returned. - for attr in self.__dict__.keys(): - if not attr in _nodelete: - self.__delattr__(attr) - - # Update self.progress as done. - if self.progress: - self.progress.update(nstep, "Done") - - # Return the ccData object that was generated. - return data - - def updateprogress(self, inputfile, msg, xupdate=0.05): - """Update progress.""" - - if self.progress and random.random() < xupdate: - newstep = inputfile.tell() - if newstep != self.progress.step: - self.progress.update(newstep, msg) - self.progress.step = newstep - - def normalisesym(self,symlabel): - """Standardise the symmetry labels between parsers. - - This method should be overwritten by individual parsers, and should - contain appropriate doctests. If is not overwritten, this is detected - as an error by unit tests. - """ - return "ERROR: This should be overwritten by this subclass" - - def float(self,number): - """Convert a string to a float avoiding the problem with Ds. - - >>> t = Logfile("dummyfile") - >>> t.float("123.2323E+02") - 12323.23 - >>> t.float("123.2323D+02") - 12323.23 - """ - number = number.replace("D","E") - return float(number) - -if __name__=="__main__": - import doctest - doctest.testmod() diff --git a/external/cclib/parser/mm4parser.py b/external/cclib/parser/mm4parser.py deleted file mode 100644 index f431ef07e1..0000000000 --- a/external/cclib/parser/mm4parser.py +++ /dev/null @@ -1,260 +0,0 @@ -""" -gmagoon 05/03/10: new class for MM4 parsing, based on mopacparser.py, which, in turn, is based on gaussianparser.py from cclib, described below: -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 814 $" - - -#import re - -import numpy -import math -import utils -import logfileparser - - -def symbol2int(symbol): - t = utils.PeriodicTable() - return t.number[symbol] - -class MM4(logfileparser.Logfile): - """An MM4 output file.""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(MM4, self).__init__(logname="MM4", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "MM4 log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'MM4("%s")' % (self.filename) - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - # Number of atoms. - # Example: THE COORDINATES OF 20 ATOMS ARE READ IN. - if line[0:28] == ' THE COORDINATES OF': - - self.updateprogress(inputfile, "Attributes", self.fupdate) - natom = int(line.split()[-5]) #fifth to last component should be number of atoms - if hasattr(self, "natom"): - assert self.natom == natom - else: - self.natom = natom - - # Extract the atomic numbers and coordinates from the optimized (final) geometry - - # Example: -# FINAL ATOMIC COORDINATE -# ATOM X Y Z TYPE -# C( 1) -3.21470 -0.22058 0.00000 ( 1) -# H( 2) -3.30991 -0.87175 0.89724 ( 5) -# H( 3) -3.30991 -0.87174 -0.89724 ( 5) -# H( 4) -4.08456 0.47380 0.00000 ( 5) -# C( 5) -1.88672 0.54893 0.00000 ( 1) -# H( 6) -1.84759 1.21197 -0.89488 ( 5) -# H( 7) -1.84759 1.21197 0.89488 ( 5) -# C( 8) -0.66560 -0.38447 0.00000 ( 1) -# H( 9) -0.70910 -1.04707 -0.89471 ( 5) -# H( 10) -0.70910 -1.04707 0.89471 ( 5) -# C( 11) 0.66560 0.38447 0.00000 ( 1) -# H( 12) 0.70910 1.04707 0.89471 ( 5) -# H( 13) 0.70910 1.04707 -0.89471 ( 5) -# C( 14) 1.88672 -0.54893 0.00000 ( 1) -# H( 15) 1.84759 -1.21197 -0.89488 ( 5) -# H( 16) 1.84759 -1.21197 0.89488 ( 5) -# C( 17) 3.21470 0.22058 0.00000 ( 1) -# H( 18) 3.30991 0.87174 0.89724 ( 5) -# H( 19) 4.08456 -0.47380 0.00000 ( 5) -# H( 20) 3.30991 0.87175 -0.89724 ( 5) - - if line[0:29] == ' FINAL ATOMIC COORDINATE': - - - self.updateprogress(inputfile, "Attributes", self.cupdate) - - self.inputcoords = [] - self.inputatoms = [] - - headerline = inputfile.next() - - atomcoords = [] - line = inputfile.next() - while len(line.split()) > 0: - broken = line.split() - self.inputatoms.append(symbol2int(line[0:10].strip())) - xc = float(line[17:29]) - yc = float(line[29:41]) - zc = float(line[41:53]) - atomcoords.append([xc,yc,zc]) - line = inputfile.next() - - self.inputcoords.append(atomcoords) - - if not hasattr(self, "atomnos"): - self.atomnos = numpy.array(self.inputatoms, 'i') - if not hasattr(self, "natom"): - self.natom = len(self.atomnos) - - -#read energy (in kcal/mol, converted to eV) -# Example: HEAT OF FORMATION (HFN) AT 298.2 K = -42.51 KCAL/MOLE - if line[0:31] == ' HEAT OF FORMATION (HFN) AT': - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - self.scfenergies.append(utils.convertor(self.float(line.split()[-2])/627.5095, "hartree", "eV")) #note conversion from kcal/mol to hartree - - #molecular mass parsing (units will be amu); note that this can occur multiple times in the file, but all values should be the same - #Example: FORMULA WEIGHT : 86.112 - if line[0:33] == ' FORMULA WEIGHT :': - self.updateprogress(inputfile, "Attributes", self.fupdate) - molmass = self.float(line.split()[-1]) - if hasattr(self, "molmass"): - assert self.molmass == molmass #check that subsequent occurences match the original value - else: - self.molmass = molmass - - #rotational constants (converted to GHZ) - #Example: -# THE MOMENTS OF INERTIA CALCULATED FROM R(g), R(z) VALUES -# (also from R(e), R(alpha), R(s) VALUES) -# -# Note: (1) All calculations are based on principle isotopes. -# (2) R(z) values include harmonic vibration (Coriolis) -# contribution indicated in parentheses. -# -# -# (1) UNIT = 10**(-39) GM*CM**2 -# -# IX IY IZ -# -# R(e) 5.7724 73.4297 76.0735 -# R(z) 5.7221(-0.0518) 74.0311(-0.0285) 76.7102(-0.0064) -# -# (2) UNIT = AU A**2 -# -# IX IY IZ -# -# R(e) 34.7661 442.2527 458.1757 -# R(z) 34.4633(-0.3117) 445.8746(-0.1714) 462.0104(-0.0385) - #moments of inertia converted into rotational constants via rot cons= h/(8*Pi^2*I) - #we will use the equilibrium values (R(e)) in units of 10**-39 GM*CM**2 (these units are less precise (fewer digits) than AU A**2 units but it is simpler as it doesn't require use of Avogadro's number - #***even R(e) may include temperature dependent effects, though, and maybe the one I actually want is r(mm4) (not reported) - if line[0:33] == ' (1) UNIT = 10**(-39) GM*CM**2': - dummyline = inputfile.next(); - dummyline = inputfile.next(); - dummyline = inputfile.next(); - rotinfo=inputfile.next(); - if not hasattr(self, "rotcons"): - self.rotcons = [] - broken = rotinfo.split() - h = 6.62606896E3 #Planck constant in 10^-37 J-s = 10^-37 kg m^2/s cf. http://physics.nist.gov/cgi-bin/cuu/Value?h#mid - a = h/(8*math.pi*math.pi*float(broken[1])) - b = h/(8*math.pi*math.pi*float(broken[2])) - c = h/(8*math.pi*math.pi*float(broken[3])) - self.rotcons.append([a, b, c]) - - # Start of the IR/Raman frequency section. -#Example: -#0 FUNDAMENTAL NORMAL VIBRATIONAL FREQUENCIES -# ( THEORETICALLY 54 VALUES ) -# -# Frequency : in 1/cm -# A(i) : IR intensity (vs,s,m,w,vw,-- or in 10**6 cm/mole) -# A(i) = -- : IR inactive -# -# -# no Frequency Symmetry A(i) -# -# 1. 2969.6 (Bu ) s -# 2. 2969.6 (Bu ) w -# 3. 2967.6 (Bu ) w -# 4. 2967.6 (Bu ) s -# 5. 2931.2 (Au ) vs -# 6. 2927.8 (Bg ) -- -# 7. 2924.9 (Au ) m -# 8. 2923.6 (Bg ) -- -# 9. 2885.8 (Ag ) -- -# 10. 2883.9 (Bu ) w -# 11. 2879.8 (Ag ) -- -# 12. 2874.6 (Bu ) w -# 13. 2869.6 (Ag ) -- -# 14. 2869.2 (Bu ) s -# 15. 1554.4 (Ag ) -- -# 16. 1494.3 (Bu ) w -# 17. 1449.7 (Bg ) -- -# 18. 1449.5 (Au ) w -# 19. 1444.8 (Ag ) -- -# 20. 1438.5 (Bu ) w -# 21. 1421.5 (Ag ) -- -# 22. 1419.3 (Ag ) -- -# 23. 1416.5 (Bu ) w -# 24. 1398.8 (Bu ) w -# 25. 1383.9 (Ag ) -- -# 26. 1363.7 (Bu ) m -# 27. 1346.3 (Ag ) -- -# 28. 1300.2 (Au ) vw -# 29. 1298.7 (Bg ) -- -# 30. 1283.4 (Bu ) m -# 31. 1267.4 (Bg ) -- -# 32. 1209.6 (Au ) w -# 33. 1132.2 (Bg ) -- -# 34. 1094.4 (Ag ) -- -# 35. 1063.4 (Bu ) w -# 36. 1017.8 (Bu ) w -# 37. 1011.6 (Ag ) -- -# 38. 1004.2 (Au ) w -# 39. 990.2 (Ag ) -- -# 40. 901.8 (Ag ) -- -# 41. 898.4 (Bg ) -- -# 42. 875.9 (Bu ) w -# 43. 795.4 (Au ) w -# 44. 725.0 (Bg ) -- -# 45. 699.6 (Au ) w -# 46. 453.4 (Bu ) w -# 47. 352.1 (Ag ) -- -# 48. 291.1 (Ag ) -- -# 49. 235.9 (Au ) vw -# 50. 225.2 (Bg ) -- -# 51. 151.6 (Bg ) -- -# 52. 147.7 (Bu ) w -# 53. 108.0 (Au ) vw -# 54. 77.1 (Au ) vw -# 55. ( 0.0) (t/r ) -# 56. ( 0.0) (t/r ) -# 57. ( 0.0) (t/r ) -# 58. ( 0.0) (t/r ) -# 59. ( 0.0) (t/r ) -# 60. ( 0.0) (t/r ) - - if line[0:52] == ' no Frequency Symmetry A(i)': - blankline = inputfile.next() - self.updateprogress(inputfile, "Frequency Information", self.fupdate) - - if not hasattr(self, 'vibfreqs'): - self.vibfreqs = [] - line = inputfile.next() - while(line[15:31].find('(') < 0):#terminate once we reach zero frequencies (which include parentheses) - freq = self.float(line[15:31]) - self.vibfreqs.append(freq) - line = inputfile.next() - #parsing of final steric energy in eV (for purposes of providing a baseline for possible subsequent hindered rotor calculations) - #example line:" FINAL STERIC ENERGY IS 0.8063 KCAL/MOL." - if line[6:28] == 'FINAL STERIC ENERGY IS': - stericenergy = utils.convertor(self.float(line.split()[4])/627.5095, "hartree", "eV") #note conversion from kcal/mol to hartree - if hasattr(self, "stericenergy"): - assert self.stericenergy == stericenergy #check that subsequent occurences match the original value - else: - self.stericenergy = stericenergy - - -if __name__ == "__main__": - import doctest, mm4parser - doctest.testmod(mm4parser, verbose=False) diff --git a/external/cclib/parser/molproparser.py b/external/cclib/parser/molproparser.py deleted file mode 100644 index 7f4f1fab1f..0000000000 --- a/external/cclib/parser/molproparser.py +++ /dev/null @@ -1,644 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 661 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class Molpro(logfileparser.Logfile): - """Molpro file parser""" - - def __init__(self, *args, **kwargs): - # Call the __init__ method of the superclass - super(Molpro, self).__init__(logname="Molpro", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "Molpro log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'Molpro("%s")' % (self.filename) - - def normalisesym(self, label): - """Normalise the symmetries used by Molpro.""" - ans = label.replace("`", "'").replace("``", "''") - return ans - - def before_parsing(self): - - self.electronorbitals = "" - self.insidescf = False - - def after_parsing(self): - - # If optimization thresholds are default, they are normally not printed. - if not hasattr(self, "geotargets"): - self.geotargets = [] - # Default THRGRAD (required accuracy of the optimized gradient). - self.geotargets.append(3E-4) - # Default THRENERG (required accuracy of the optimized energy). - self.geotargets.append(1E-6) - # Default THRSTEP (convergence threshold for the geometry optimization step). - self.geotargets.append(3E-4) - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line[1:19] == "ATOMIC COORDINATES": - - if not hasattr(self,"atomcoords"): - self.atomcoords = [] - self.atomnos = [] - line = inputfile.next() - line = inputfile.next() - line = inputfile.next() - atomcoords = [] - atomnos = [] - - line = inputfile.next() - while line.strip(): - temp = line.strip().split() - atomcoords.append([utils.convertor(float(x),"bohr","Angstrom") for x in temp[3:6]]) #bohrs to angs - atomnos.append(int(round(float(temp[2])))) - line = inputfile.next() - - self.atomnos = numpy.array(atomnos, "i") - self.atomcoords.append(atomcoords) - self.natom = len(self.atomnos) - - # Use BASIS DATA to parse input for aonames and atombasis. - # This is always the first place this information is printed, so no attribute check is needed. - if line[1:11] == "BASIS DATA": - - blank = inputfile.next() - header = inputfile.next() - blank = inputfile.next() - self.aonames = [] - self.atombasis = [] - self.gbasis = [] - for i in range(self.natom): - self.atombasis.append([]) - self.gbasis.append([]) - - line = "dummy" - while line.strip() != "": - line = inputfile.next() - funcnr = line[1:6] - funcsym = line[7:9] - funcatom_ = line[11:14] - functype_ = line[16:22] - funcexp = line[25:38] - funccoeffs = line[38:] - - # If a new function type is printed or the BASIS DATA block ends, - # then the previous functions can be added to gbasis. - # When translating the Molpro function type name into a gbasis code, - # note that Molpro prints all components, and we want to add - # only one to gbasis, with the proper code (S,P,D,F,G). - # Warning! The function types differ for cartesian/spherical functions. - # Skip the first printed function type, however (line[3] != '1'). - if (functype_.strip() and line[1:4] != ' 1') or line.strip() == "": - funcbasis = None - if functype in ['1s', 's']: - funcbasis = 'S' - if functype in ['x', '2px']: - funcbasis = 'P' - if functype in ['xx', '3d0']: - funcbasis = 'D' - if functype in ['xxx', '4f0']: - funcbasis = 'F' - if functype in ['xxxx', '5g0']: - funcbasis = 'G' - if funcbasis: - - # The function is split into as many columns as there are. - for i in range(len(coefficients[0])): - func = (funcbasis, []) - for j in range(len(exponents)): - func[1].append((exponents[j],coefficients[j][i])) - self.gbasis[funcatom-1].append(func) - - # If it is a new type, set up the variables for the next shell(s). - if functype_.strip(): - exponents = [] - coefficients = [] - functype = functype_.strip() - funcatom = int(funcatom_.strip()) - - # Add exponents and coefficients to lists. - if line.strip(): - funcexp = float(funcexp) - funccoeffs = [float(s) for s in funccoeffs.split()] - exponents.append(funcexp) - coefficients.append(funccoeffs) - - # If the function number is there, add to atombasis and aonames. - if funcnr.strip(): - funcnr = int(funcnr.split('.')[0]) - self.atombasis[funcatom-1].append(funcnr-1) - element = self.table.element[self.atomnos[funcatom-1]] - aoname = "%s%i_%s" %(element, funcatom, functype) - self.aonames.append(aoname) - - if line[1:23] == "NUMBER OF CONTRACTIONS": - - nbasis = int(line.split()[3]) - if hasattr(self, "nbasis"): - assert nbasis == self.nbasis - else: - self.nbasis = nbasis - - # This is used to signalize whether we are inside an SCF calculation. - if line[1:8] == "PROGRAM" and line[14:18] == "-SCF": - - self.insidescf = True - - # Use this information instead of 'SETTING ...', in case the defaults are standard. - # Note that this is sometimes printed in each geometry optimization step. - if line[1:20] == "NUMBER OF ELECTRONS": - - spinup = int(line.split()[3][:-1]) - spindown = int(line.split()[4][:-1]) - # Nuclear charges (atomnos) should be parsed by now. - nuclear = numpy.sum(self.atomnos) - charge = nuclear - spinup - spindown - mult = spinup - spindown + 1 - - # Copy charge, or assert for exceptions if already exists. - if not hasattr(self, "charge"): - self.charge = charge - else: - assert self.charge == charge - - # Copy multiplicity, or assert for exceptions if already exists. - if not hasattr(self, "mult"): - self.mult = mult - else: - assert self.mult == mult - - # Convergenve thresholds for SCF cycle, should be contained in a line such as: - # CONVERGENCE THRESHOLDS: 1.00E-05 (Density) 1.40E-07 (Energy) - if self.insidescf and line[1:24] == "CONVERGENCE THRESHOLDS:": - - if not hasattr(self, "scftargets"): - self.scftargets = [] - - scftargets = map(float, line.split()[2::2]) - self.scftargets.append(scftargets) - # Usually two criteria, but save the names this just in case. - self.scftargetnames = line.split()[3::2] - - # Read in the print out of the SCF cycle - for scfvalues. For RHF looks like: - # ITERATION DDIFF GRAD ENERGY 2-EL.EN. DIPOLE MOMENTS DIIS - # 1 0.000D+00 0.000D+00 -379.71523700 1159.621171 0.000000 0.000000 0.000000 0 - # 2 0.000D+00 0.898D-02 -379.74469736 1162.389787 0.000000 0.000000 0.000000 1 - # 3 0.817D-02 0.144D-02 -379.74635529 1162.041033 0.000000 0.000000 0.000000 2 - # 4 0.213D-02 0.571D-03 -379.74658063 1162.159929 0.000000 0.000000 0.000000 3 - # 5 0.799D-03 0.166D-03 -379.74660889 1162.144256 0.000000 0.000000 0.000000 4 - if self.insidescf and line[1:10] == "ITERATION": - - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - - line = inputfile.next() - energy = 0.0 - scfvalues = [] - while line.strip() != "": - if line.split()[0].isdigit(): - - ddiff = float(line.split()[1].replace('D','E')) - newenergy = float(line.split()[3]) - ediff = newenergy - energy - energy = newenergy - - # The convergence thresholds must have been read above. - # Presently, we recognize MAX DENSITY and MAX ENERGY thresholds. - numtargets = len(self.scftargetnames) - values = [numpy.nan]*numtargets - for n,name in zip(range(numtargets),self.scftargetnames): - if "ENERGY" in name.upper(): - values[n] = ediff - elif "DENSITY" in name.upper(): - values[n] = ddiff - scfvalues.append(values) - - line = inputfile.next() - self.scfvalues.append(numpy.array(scfvalues)) - - # SCF result - RHF/UHF and DFT (RKS) energies. - if line[1:5] in ["!RHF", "!UHF", "!RKS"] and line[16:22] == "ENERGY": - - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - scfenergy = float(line.split()[4]) - self.scfenergies.append(utils.convertor(scfenergy, "hartree", "eV")) - - # We are now done with SCF cycle (after a few lines). - self.insidescf = False - - # MP2 energies. - if line[1:5] == "!MP2": - - if not hasattr(self, 'mpenergies'): - self.mpenergies = [] - mp2energy = float(line.split()[-1]) - mp2energy = utils.convertor(mp2energy, "hartree", "eV") - self.mpenergies.append([mp2energy]) - - # MP2 energies if MP3 or MP4 is also calculated. - if line[1:5] == "MP2:": - - if not hasattr(self, 'mpenergies'): - self.mpenergies = [] - mp2energy = float(line.split()[2]) - mp2energy = utils.convertor(mp2energy, "hartree", "eV") - self.mpenergies.append([mp2energy]) - - # MP3 (D) and MP4 (DQ or SDQ) energies. - if line[1:8] == "MP3(D):": - - mp3energy = float(line.split()[2]) - mp2energy = utils.convertor(mp3energy, "hartree", "eV") - line = inputfile.next() - self.mpenergies[-1].append(mp2energy) - if line[1:9] == "MP4(DQ):": - mp4energy = float(line.split()[2]) - line = inputfile.next() - if line[1:10] == "MP4(SDQ):": - mp4energy = float(line.split()[2]) - mp4energy = utils.convertor(mp4energy, "hartree", "eV") - self.mpenergies[-1].append(mp4energy) - - # The CCSD program operates all closed-shel coupled cluster runs. - if line[1:15] == "PROGRAM * CCSD": - - if not hasattr(self, "ccenergies"): - self.ccenergies = [] - while line[1:20] != "Program statistics:": - # The last energy (most exact) will be read last and thus saved. - if line[1:5] == "!CCD" or line[1:6] == "!CCSD" or line[1:9] == "!CCSD(T)": - ccenergy = float(line.split()[-1]) - ccenergy = utils.convertor(ccenergy, "hartree", "eV") - line = inputfile.next() - self.ccenergies.append(ccenergy) - - # Read the occupancy (index of HOMO s). - # For restricted calculations, there is one line here. For unrestricted, two: - # Final alpha occupancy: ... - # Final beta occupancy: ... - if line[1:17] == "Final occupancy:": - self.homos = [int(line.split()[-1])-1] - if line[1:23] == "Final alpha occupancy:": - self.homos = [int(line.split()[-1])-1] - line = inputfile.next() - self.homos.append(int(line.split()[-1])-1) - - # From this block atombasis, moenergies, and mocoeffs can be parsed. - # Note that Molpro does not print this by default, you must add this in the input: - # GPRINT,ORBITALS - # What's more, this prints only the occupied orbitals. To get virtuals, add also: - # ORBPTIN,NVIRT - # where NVIRT is how many to print (can be some large number, like 99999, to print all). - # The block is in general flipped when compared to other programs (GAMESS, Gaussian), and - # MOs in the rows. Also, it does not cut the table into parts, rather each MO row has - # as many lines as it takes to print all the coefficients, as shown below: - # - # ELECTRON ORBITALS - # ================= - # - # - # Orb Occ Energy Couls-En Coefficients - # - # 1 1s 1 1s 1 2px 1 2py 1 2pz 2 1s (...) - # 3 1s 3 1s 3 2px 3 2py 3 2pz 4 1s (...) - # (...) - # - # 1.1 2 -11.0351 -43.4915 0.701460 0.025696 -0.000365 -0.000006 0.000000 0.006922 (...) - # -0.006450 0.004742 -0.001028 -0.002955 0.000000 -0.701460 (...) - # (...) - # - # For unrestricted calcualtions, ELECTRON ORBITALS is followed on the same line - # by FOR POSITIVE SPIN or FOR NEGATIVE SPIN. - # For examples, see data/Molpro/basicMolpro2006/dvb_sp*. - if line[1:18] == "ELECTRON ORBITALS" or self.electronorbitals: - # Detect if we are reading beta (negative spin) orbitals. - spin = 0 - if line[19:36] == "FOR NEGATIVE SPIN" or self.electronorbitals[19:36] == "FOR NEGATIVE SPIN": - spin = 1 - - if not self.electronorbitals: - dashes = inputfile.next() - blank = inputfile.next() - blank = inputfile.next() - headers = inputfile.next() - blank = inputfile.next() - - # Parse the list of atomic orbitals if atombasis or aonames is missing. - line = inputfile.next() - if not hasattr(self, "atombasis") or not hasattr(self, "aonames"): - self.atombasis = [] - for i in range(self.natom): - self.atombasis.append([]) - self.aonames = [] - aonum = 0 - while line.strip(): - for s in line.split(): - if s.isdigit(): - atomno = int(s) - self.atombasis[atomno-1].append(aonum) - aonum += 1 - else: - functype = s - element = self.table.element[self.atomnos[atomno-1]] - aoname = "%s%i_%s" %(element, atomno, functype) - self.aonames.append(aoname) - line = inputfile.next() - else: - while line.strip(): - line = inputfile.next() - - # Now there can be one or two blank lines. - while not line.strip(): - line = inputfile.next() - - # Create empty moenergies and mocoeffs if they don't exist. - if not hasattr(self, "moenergies"): - self.moenergies = [[]] - self.mocoeffs = [[]] - # Do the same if they exist and are being read again (spin=0), - # this means only the last print-out of these data are saved, - # which consistent with current cclib practices. - elif len(self.moenergies) == 1 and spin == 0: - self.moenergies = [[]] - self.mocoeffs = [[]] - else: - self.moenergies.append([]) - self.mocoeffs.append([]) - - while line.strip() and not "ORBITALS" in line: - coeffs = [] - while line.strip() != "": - if line[:30].strip(): - moenergy = float(line.split()[2]) - moenergy = utils.convertor(moenergy, "hartree", "eV") - self.moenergies[spin].append(moenergy) - line = line[31:] - # Each line has 10 coefficients in 10.6f format. - num = len(line)/10 - for i in range(num): - try: - coeff = float(line[10*i:10*(i+1)]) - # Molpro prints stars when coefficients are huge. - except ValueError, detail: - self.logger.warn("Set coefficient to zero: %s" %detail) - coeff = 0.0 - coeffs.append(coeff) - line = inputfile.next() - self.mocoeffs[spin].append(coeffs) - line = inputfile.next() - - # Check if last line begins the next ELECTRON ORBITALS section. - if line[1:18] == "ELECTRON ORBITALS": - self.electronorbitals = line - else: - self.electronorbitals = "" - - # If the MATROP program was called appropriately, - # the atomic obital overlap matrix S is printed. - # The matrix is printed straight-out, ten elements in each row, both halves. - # Note that is the entire matrix is not printed, then aooverlaps - # will not have dimensions nbasis x nbasis. - if line[1:9] == "MATRIX S": - - blank = inputfile.next() - symblocklabel = inputfile.next() - if not hasattr(self, "aooverlaps"): - self.aooverlaps = [[]] - line = inputfile.next() - while line.strip() != "": - elements = [float(s) for s in line.split()] - if len(self.aooverlaps[-1]) + len(elements) <= self.nbasis: - self.aooverlaps[-1] += elements - else: - n = len(self.aooverlaps[-1]) + len(elements) - self.nbasis - self.aooverlaps[-1] += elements[:-n] - self.aooverlaps.append([]) - self.aooverlaps[-1] += elements[-n:] - line = inputfile.next() - - # Thresholds are printed only if the defaults are changed with GTHRESH. - # In that case, we can fill geotargets with non-default values. - # The block should look like this as of Molpro 2006.1: - # THRESHOLDS: - - # ZERO = 1.00D-12 ONEINT = 1.00D-12 TWOINT = 1.00D-11 PREFAC = 1.00D-14 LOCALI = 1.00D-09 EORDER = 1.00D-04 - # ENERGY = 0.00D+00 ETEST = 0.00D+00 EDENS = 0.00D+00 THRDEDEF= 1.00D-06 GRADIENT= 1.00D-02 STEP = 1.00D-03 - # ORBITAL = 1.00D-05 CIVEC = 1.00D-05 COEFF = 1.00D-04 PRINTCI = 5.00D-02 PUNCHCI = 9.90D+01 OPTGRAD = 3.00D-04 - # OPTENERG= 1.00D-06 OPTSTEP = 3.00D-04 THRGRAD = 2.00D-04 COMPRESS= 1.00D-11 VARMIN = 1.00D-07 VARMAX = 1.00D-03 - # THRDOUB = 0.00D+00 THRDIV = 1.00D-05 THRRED = 1.00D-07 THRPSP = 1.00D+00 THRDC = 1.00D-10 THRCS = 1.00D-10 - # THRNRM = 1.00D-08 THREQ = 0.00D+00 THRDE = 1.00D+00 THRREF = 1.00D-05 SPARFAC = 1.00D+00 THRDLP = 1.00D-07 - # THRDIA = 1.00D-10 THRDLS = 1.00D-07 THRGPS = 0.00D+00 THRKEX = 0.00D+00 THRDIS = 2.00D-01 THRVAR = 1.00D-10 - # THRLOC = 1.00D-06 THRGAP = 1.00D-06 THRLOCT = -1.00D+00 THRGAPT = -1.00D+00 THRORB = 1.00D-06 THRMLTP = 0.00D+00 - # THRCPQCI= 1.00D-10 KEXTA = 0.00D+00 THRCOARS= 0.00D+00 SYMTOL = 1.00D-06 GRADTOL = 1.00D-06 THROVL = 1.00D-08 - # THRORTH = 1.00D-08 GRID = 1.00D-06 GRIDMAX = 1.00D-03 DTMAX = 0.00D+00 - if line [1:12] == "THRESHOLDS": - - blank = inputfile.next() - line = inputfile.next() - while line.strip(): - - if "OPTENERG" in line: - start = line.find("OPTENERG") - optenerg = line[start+10:start+20] - if "OPTGRAD" in line: - start = line.find("OPTGRAD") - optgrad = line[start+10:start+20] - if "OPTSTEP" in line: - start = line.find("OPTSTEP") - optstep = line[start+10:start+20] - line = inputfile.next() - - self.geotargets = [optenerg, optgrad, optstep] - - # The optimization history is the source for geovlues: - # END OF GEOMETRY OPTIMIZATION. TOTAL CPU: 246.9 SEC - # - # ITER. ENERGY(OLD) ENERGY(NEW) DE GRADMAX GRADNORM GRADRMS STEPMAX STEPLEN STEPRMS - # 1 -382.02936898 -382.04914450 -0.01977552 0.11354875 0.20127947 0.01183997 0.12972761 0.20171740 0.01186573 - # 2 -382.04914450 -382.05059234 -0.00144784 0.03299860 0.03963339 0.00233138 0.05577169 0.06687650 0.00393391 - # 3 -382.05059234 -382.05069136 -0.00009902 0.00694359 0.01069889 0.00062935 0.01654549 0.02016307 0.00118606 - # 4 -382.05069136 -382.05069130 0.00000006 0.00295497 0.00363023 0.00021354 0.00234307 0.00443525 0.00026090 - # 5 -382.05069130 -382.05069206 -0.00000075 0.00098220 0.00121031 0.00007119 0.00116863 0.00140452 0.00008262 - # 6 -382.05069206 -382.05069209 -0.00000003 0.00011350 0.00022306 0.00001312 0.00013321 0.00024526 0.00001443 - if line[1:30] == "END OF GEOMETRY OPTIMIZATION.": - - blank = inputfile.next() - headers = inputfile.next() - - # Although criteria can be changed, the printed format should not change. - # In case it does, retrieve the columns for each parameter. - headers = headers.split() - index_THRENERG = headers.index('DE') - index_THRGRAD = headers.index('GRADMAX') - index_THRSTEP = headers.index('STEPMAX') - - line = inputfile.next() - self.geovalues = [] - while line.strip() != "": - - line = line.split() - geovalues = [] - geovalues.append(float(line[index_THRENERG])) - geovalues.append(float(line[index_THRGRAD])) - geovalues.append(float(line[index_THRSTEP])) - self.geovalues.append(geovalues) - line = inputfile.next() - - # This block should look like this: - # Normal Modes - # - # 1 Au 2 Bu 3 Ag 4 Bg 5 Ag - # Wavenumbers [cm-1] 151.81 190.88 271.17 299.59 407.86 - # Intensities [km/mol] 0.33 0.28 0.00 0.00 0.00 - # Intensities [relative] 0.34 0.28 0.00 0.00 0.00 - # CX1 0.00000 -0.01009 0.02577 0.00000 0.06008 - # CY1 0.00000 -0.05723 -0.06696 0.00000 0.06349 - # CZ1 -0.02021 0.00000 0.00000 0.11848 0.00000 - # CX2 0.00000 -0.01344 0.05582 0.00000 -0.02513 - # CY2 0.00000 -0.06288 -0.03618 0.00000 0.00349 - # CZ2 -0.05565 0.00000 0.00000 0.07815 0.00000 - # ... - # Molpro prints low frequency modes in a subsequent section with the same format, - # which also contains zero frequency modes, with the title: - # Normal Modes of low/zero frequencies - if line[1:13] == "Normal Modes": - - if line[1:37] == "Normal Modes of low/zero frequencies": - islow = True - else: - islow = False - - blank = inputfile.next() - - # Each portion of five modes is followed by a single blank line. - # The whole block is followed by an additional blank line. - line = inputfile.next() - while line.strip(): - - if line[1:25].isspace(): - numbers = map(int, line.split()[::2]) - vibsyms = line.split()[1::2] - - if line[1:12] == "Wavenumbers": - vibfreqs = map(float, line.strip().split()[2:]) - - if line[1:21] == "Intensities [km/mol]": - vibirs = map(float, line.strip().split()[2:]) - - # There should always by 3xnatom displacement rows. - if line[1:11].isspace() and line[13:25].strip().isdigit(): - - # There are a maximum of 5 modes per line. - nmodes = len(line.split())-1 - - vibdisps = [] - for i in range(nmodes): - vibdisps.append([]) - for n in range(self.natom): - vibdisps[i].append([]) - for i in range(nmodes): - disp = float(line.split()[i+1]) - vibdisps[i][0].append(disp) - for i in range(self.natom*3 - 1): - line = inputfile.next() - iatom = (i+1)/3 - for i in range(nmodes): - disp = float(line.split()[i+1]) - vibdisps[i][iatom].append(disp) - - line = inputfile.next() - if not line.strip(): - - if not hasattr(self, "vibfreqs"): - self.vibfreqs = [] - if not hasattr(self, "vibsyms"): - self.vibsyms = [] - if not hasattr(self, "vibirs") and "vibirs" in dir(): - self.vibirs = [] - if not hasattr(self, "vibdisps") and "vibdisps" in dir(): - self.vibdisps = [] - - if not islow: - self.vibfreqs.extend(vibfreqs) - self.vibsyms.extend(vibsyms) - if "vibirs" in dir(): - self.vibirs.extend(vibirs) - if "vibdisps" in dir(): - self.vibdisps.extend(vibdisps) - else: - nonzero = [f > 0 for f in vibfreqs] - vibfreqs = [f for f in vibfreqs if f > 0] - self.vibfreqs = vibfreqs + self.vibfreqs - vibsyms = [vibsyms[i] for i in range(len(vibsyms)) if nonzero[i]] - self.vibsyms = vibsyms + self.vibsyms - if "vibirs" in dir(): - vibirs = [vibirs[i] for i in range(len(vibirs)) if nonzero[i]] - self.vibirs = vibirs + self.vibirs - if "vibdisps" in dir(): - vibdisps = [vibdisps[i] for i in range(len(vibdisps)) if nonzero[i]] - self.vibdisps = vibdisps + self.vibdisps - - line = inputfile.next() - - if line[1:16] == "Force Constants": - - self.logger.info("Creating attribute hessian") - self.hessian = [] - line = inputfile.next() - hess = [] - tmp = [] - - while line.strip(): - try: map(float, line.strip().split()[2:]) - except: - line = inputfile.next() - line.strip().split()[1:] - hess.extend([map(float,line.strip().split()[1:])]) - line = inputfile.next() - lig = 0 - - while (lig==0) or (len(hess[0]) > 1): - tmp.append(hess.pop(0)) - lig += 1 - k = 5 - - while len(hess) != 0: - tmp[k] += hess.pop(0) - k += 1 - if (len(tmp[k-1]) == lig): break - if k >= lig: k = len(tmp[-1]) - for l in tmp: self.hessian += l - - if line[1:14] == "Atomic Masses" and hasattr(self,"hessian"): - - line = inputfile.next() - self.amass = map(float, line.strip().split()[2:]) - - while line.strip(): - line = inputfile.next() - self.amass += map(float, line.strip().split()[2:]) - - -if __name__ == "__main__": - import doctest, molproparser - doctest.testmod(molproparser, verbose=False) diff --git a/external/cclib/parser/mopacparser.py b/external/cclib/parser/mopacparser.py deleted file mode 100644 index 7b8731f78d..0000000000 --- a/external/cclib/parser/mopacparser.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -gmagoon 07/06/09: new class for MOPAC parsing, based on gaussianparser.py from cclib, described below: -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 814 $" - - -#import re - -import numpy - -import utils -import logfileparser - - -def symbol2int(symbol): - t = utils.PeriodicTable() - return t.number[symbol] - #if symbol == 'C': return 6 - #elif symbol == 'H': return 1 - #elif symbol == 'O': return 8 - #elif symbol == 'Si': return 14 - #else: return -1 - -class Mopac(logfileparser.Logfile): - """A MOPAC2009 output file.""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(Mopac, self).__init__(logname="Mopac", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "Mopac log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'Mopac("%s")' % (self.filename) - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - # Number of atoms. (I think this section of code may be redundant and not needed) - # Example: Empirical Formula: C H2 O = 4 atoms - if line.find("Empirical Formula:") > -1: - - self.updateprogress(inputfile, "Attributes", self.fupdate) - #locate the component that beg - natom = int(line.split()[-2]) #second to last component should be number of atoms (last element is "atoms" (or possibly "atom"?)) - if hasattr(self, "natom"): - assert self.natom == natom - else: - self.natom = natom - - # Extract the atomic numbers and coordinates from the optimized geometry - # note that cartesian coordinates section occurs multiple times in the file, and we want to end up using the last instance - # also, note that the section labeled cartesian coordinates doesn't have as many decimal places as the one used here - # Example 1 (not used): -# CARTESIAN COORDINATES -# -# NO. ATOM X Y Z -# -# 1 O 4.7928 -0.8461 0.3641 -# 2 O 5.8977 -0.3171 0.0092 -# 3 C 3.8616 0.0654 0.8629 -# 4 O 2.9135 0.0549 -0.0719 -# 5 Si -0.6125 -0.0271 0.0487 -# 6 O 0.9200 0.2818 -0.6180 -# 7 O -1.3453 -1.2462 -0.8684 -# 8 O -1.4046 1.4708 0.0167 -# 9 O -0.5716 -0.5263 1.6651 -# 10 C 1.8529 1.0175 0.0716 -# 11 C -1.5193 -1.0359 -2.2416 -# 12 C -2.7764 1.5044 0.2897 -# 13 C -0.0136 -1.7640 2.0001 -# 14 C 2.1985 2.3297 -0.6413 -# 15 C -2.2972 -2.2169 -2.8050 -# 16 C -3.2205 2.9603 0.3151 -# 17 C 1.2114 -1.5689 2.8841 -# 18 H 4.1028 0.8832 1.5483 -# ... - # Example 2 (used): -# ATOM CHEMICAL X Y Z -# NUMBER SYMBOL (ANGSTROMS) (ANGSTROMS) (ANGSTROMS) -# -# 1 O 4.79280259 * -0.84610232 * 0.36409474 * -# 2 O 5.89768035 * -0.31706418 * 0.00917035 * -# 3 C 3.86164836 * 0.06535206 * 0.86290800 * -# 4 O 2.91352871 * 0.05485130 * -0.07194851 * -# 5 Si -0.61245484 * -0.02707117 * 0.04871188 * -# 6 O 0.91999240 * 0.28181302 * -0.61800545 * -# 7 O -1.34526429 * -1.24617340 * -0.86844046 * -# 8 O -1.40457125 * 1.47080489 * 0.01671181 * -# 9 O -0.57162101 * -0.52628027 * 1.66508989 * -# 10 C 1.85290140 * 1.01752620 * 0.07159039 * -# 11 C -1.51932072 * -1.03592573 * -2.24160046 * -# 12 C -2.77644395 * 1.50443941 * 0.28973441 * -# 13 C -0.01360776 * -1.76397803 * 2.00010724 * -# 14 C 2.19854080 * 2.32966388 * -0.64131311 * -# 15 C -2.29721668 * -2.21688022 * -2.80495545 * -# 16 C -3.22047132 * 2.96028967 * 0.31511890 * -# 17 C 1.21142471 * -1.56886315 * 2.88414255 * -# 18 H 4.10284938 * 0.88318846 * 1.54829483 * -# 19 H 1.60266809 * 1.19314394 * 1.14931859 * -# 20 H -2.06992519 * -0.08909329 * -2.41564011 * -# 21 H -0.53396028 * -0.94280520 * -2.73816125 * -# 22 H -2.99280631 * 1.01386560 * 1.25905636 * -# 23 H -3.32412961 * 0.94305635 * -0.49427315 * -# 24 H -0.81149878 * -2.30331548 * 2.54543351 * -# 25 H 0.24486568 * -2.37041735 * 1.10943219 * -# 26 H 2.46163770 * 2.17667287 * -1.69615441 * -# 27 H 1.34364456 * 3.01690600 * -0.61108044 * -# 28 H 3.04795301 * 2.82487051 * -0.15380555 * -# 29 H -1.76804185 * -3.16646015 * -2.65234745 * -# 30 H -3.28543199 * -2.31880074 * -2.33789659 * -# 31 H -2.45109195 * -2.09228197 * -3.88420787 * -# 32 H -3.02567427 * 3.46605770 * -0.63952294 * -# 33 H -4.29770055 * 3.02763638 * 0.51281387 * -# 34 H -2.70317481 * 3.53302115 * 1.09570604 * -# 35 H 2.01935375 * -1.03805729 * 2.35810565 * -# 36 H 1.60901654 * -2.53904354 * 3.20705714 * -# 37 H 0.97814118 * -0.98964976 * 3.78695207 * - if (line.find("NUMBER SYMBOL (ANGSTROMS) (ANGSTROMS) (ANGSTROMS)") > -1 or line.find("NUMBER SYMBOL (ANGSTROMS) (ANGSTROMS) (ANGSTROMS)") > -1): - - - - self.updateprogress(inputfile, "Attributes", self.cupdate) - - self.inputcoords = [] - self.inputatoms = [] - - blankline = inputfile.next() - - atomcoords = [] - line = inputfile.next() - # while line != blankline: - while len(line.split()) > 6: # MOPAC Version 14.019L 64BITS suddenly appends this block with "CARTESIAN COORDINATES" block with no blank line. - broken = line.split() - self.inputatoms.append(symbol2int(broken[1])) - xc = float(broken[2]) - yc = float(broken[4]) - zc = float(broken[6]) - atomcoords.append([xc,yc,zc]) - line = inputfile.next() - - self.inputcoords.append(atomcoords) - - if not hasattr(self, "natom"): - self.atomnos = numpy.array(self.inputatoms, 'i') - self.natom = len(self.atomnos) - -#read energy (in kcal/mol, converted to eV) -# Example: FINAL HEAT OF FORMATION = -333.88606 KCAL = -1396.97927 KJ - if line[0:35] == ' FINAL HEAT OF FORMATION =': - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - self.scfenergies.append(utils.convertor(self.float(line.split()[5])/627.5095, "hartree", "eV")) #note conversion from kcal/mol to hartree - - #molecular mass parsing (units will be amu) - #Example: MOLECULAR WEIGHT = - if line[0:35] == ' MOLECULAR WEIGHT =': - self.molmass = self.float(line.split()[3]) - - #rotational constants (converted to GHZ) - #Example: - -# ROTATIONAL CONSTANTS IN CM(-1) -# -# A = 0.01757641 B = 0.00739763 C = 0.00712013 - #could also read in moment of inertia, but this should just differ by a constant: rot cons= h/(8*Pi^2*I) - #note that the last occurence of this in the thermochemistry section has reduced precision, so we will want to use the 2nd to last instance - if line[0:40] == ' ROTATIONAL CONSTANTS IN CM(-1)': - blankline = inputfile.next(); - rotinfo=inputfile.next(); - if not hasattr(self, "rotcons"): - self.rotcons = [] - broken = rotinfo.split() - sol = 29.9792458 #speed of light in vacuum in 10^9 cm/s, cf. http://physics.nist.gov/cgi-bin/cuu/Value?c|search_for=universal_in! - a = float(broken[2])*sol - b = float(broken[5])*sol - c = float(broken[8])*sol - self.rotcons.append([a, b, c]) - - # Start of the IR/Raman frequency section. -#Example: -# VIBRATION 1 1A ATOM PAIR ENERGY CONTRIBUTION RADIAL -# FREQ. 15.08 C 12 -- C 16 +7.9% (999.0%) 0.0% -# T-DIPOLE 0.2028 C 16 -- H 34 +5.8% (999.0%) 28.0% -# TRAVEL 0.0240 C 16 -- H 32 +5.6% (999.0%) 35.0% -# RED. MASS 1.7712 O 1 -- O 4 +5.2% (999.0%) 0.4% -# EFF. MASS7752.8338 -# -# VIBRATION 2 2A ATOM PAIR ENERGY CONTRIBUTION RADIAL -# FREQ. 42.22 C 11 -- C 15 +9.0% (985.8%) 0.0% -# T-DIPOLE 0.1675 C 15 -- H 31 +6.6% (843.6%) 3.3% -# TRAVEL 0.0359 C 15 -- H 29 +6.0% (802.8%) 24.5% -# RED. MASS 1.7417 C 13 -- C 17 +5.8% (792.7%) 0.0% -# EFF. MASS1242.2114 - if line[1:10] == 'VIBRATION': - line = inputfile.next() - self.updateprogress(inputfile, "Frequency Information", self.fupdate) - - if not hasattr(self, 'vibfreqs'): - self.vibfreqs = [] - freq = self.float(line.split()[1]) - #self.vibfreqs.extend(freqs) - self.vibfreqs.append(freq) - - -if __name__ == "__main__": - import doctest, mopacparser - doctest.testmod(mopacparser, verbose=False) diff --git a/external/cclib/parser/orcaparser.py b/external/cclib/parser/orcaparser.py deleted file mode 100644 index 6e80ac0b32..0000000000 --- a/external/cclib/parser/orcaparser.py +++ /dev/null @@ -1,407 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 668 $" - - -import re - -import numpy - -import logfileparser -import utils - - -class ORCA(logfileparser.Logfile): - """An ORCA log file.""" - - def __init__(self, *args, **kwargs): - - # Call the __init__ method of the superclass - super(ORCA, self).__init__(logname="ORCA", *args, **kwargs) - - def __str__(self): - """Return a string representation of the object.""" - return "ORCA log file %s" % (self.filename) - - def __repr__(self): - """Return a representation of the object.""" - return 'ORCA("%s")' % (self.filename) - - def normalisesym(self, label): - """Use standard symmetry labels instead of Gaussian labels. - - To normalise: - (1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta] - (2) replace any G or U by their lowercase equivalent - - >>> sym = Gaussian("dummyfile").normalisesym - >>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG'] - >>> map(sym, labels) - ['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g'] - """ - - def before_parsing(self): - - # Used to index self.scftargets[]. - SCFRMS, SCFMAX, SCFENERGY = range(3) - # Flag that indicates whether it has reached the end of a geoopt. - self.optfinished = False - # Flag for identifying Coupled Cluster runs. - self.coupledcluster = False - - def extract(self, inputfile, line): - """Extract information from the file object inputfile.""" - - if line[0:15] == "Number of atoms": - - natom = int(line.split()[-1]) - if hasattr(self, "natom"): - # I wonder whether this code will ever be executed. - assert self.natom == natom - else: - self.natom = natom - - if line[1:13] == "Total Charge": -#get charge and multiplicity info - self.charge = int(line.split()[-1]) - line = inputfile.next() - self.mult = int(line.split()[-1]) - - if line[25:50] == "Geometry Optimization Run": -#get geotarget info - line = inputfile.next() - while line[0:23] != "Convergence Tolerances:": - line = inputfile.next() - - self.geotargets = numpy.zeros((5,), "d") - for i in range(5): - line = inputfile.next() - self.geotargets[i] = float(line.split()[-2]) - - # Read in scfvalues. - if line [:14] == "SCF ITERATIONS": - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - dashes = inputfile.next() - line = inputfile.next().split() - assert line[1] == "Energy" - assert line[2] == "Delta-E" - assert line[3] == "Max-DP" - self.scfvalues.append([]) - while line != []: - if line[0].isdigit(): - energy = float(line[1]) - deltaE = float(line[2]) - maxDP = float(line[3]) - rmsDP = float(line[4]) - self.scfvalues[-1].append([deltaE, maxDP, rmsDP]) - line = inputfile.next().split() - - # Read in values for last SCF iteration and scftargets. - if line[:15] == "SCF CONVERGENCE": - if not hasattr(self, "scfvalues"): - self.scfvalues = [] - if not hasattr(self, "scftargets"): - self.scftargets = [] - dashes = inputfile.next() - blank = inputfile.next() - line = inputfile.next() - assert line[:29].strip() == "Last Energy change" - deltaE_value = float(line[33:46]) - deltaE_target = float(line[60:72]) - line = inputfile.next() - assert line[:29].strip() == "Last MAX-Density change" - maxDP_value = float(line[33:46]) - maxDP_target = float(line[60:72]) - line = inputfile.next() - assert line[:29].strip() == "Last RMS-Density change" - rmsDP_value = float(line[33:46]) - rmsDP_target = float(line[60:72]) - line = inputfile.next() - assert line[:29].strip() == "Last DIIS Error" - self.scfvalues[-1].append([deltaE_value,maxDP_value,rmsDP_value]) - self.scftargets.append([deltaE_target,maxDP_target,rmsDP_target]) - - # Read in SCF energy, at least in SP calculation. - if line [:16] == "TOTAL SCF ENERGY": - if not hasattr(self, "scfenergies"): - self.scfenergies = [] - dashes = inputfile.next() - blank = inputfile.next() - line = inputfile.next() - if line[:12] == "Total Energy": - energy = float(line[50:67]) - self.scfenergies.append(energy) - - if line[33:53] == "Geometry convergence": -#get geometry convergence criteria - if not hasattr(self, "geovalues"): - self.geovalues = [ ] - - newlist = [] - headers = inputfile.next() - dashes = inputfile.next() - - #check if energy change is present (steps > 1) - line = inputfile.next() - if line.find("Energy change") > 0: - newlist.append(float(line.split()[2])) - line = inputfile.next() - else: - newlist.append(0.0) - - #get rest of info - for i in range(4): - newlist.append(float(line.split()[2])) - line = inputfile.next() - - self.geovalues.append(newlist) - - if line[0:21] == "CARTESIAN COORDINATES" and not hasattr(self, "atomcoords"): -#if not an optimization, determine structure used - dashes = inputfile.next() - - atomnos = [] - atomcoords = [] - line = inputfile.next() - while len(line) > 1: - broken = line.split() - atomnos.append(self.table.number[broken[0]]) - atomcoords.append(map(float, broken[1:4])) - line = inputfile.next() - - self.atomcoords = [atomcoords] - if not hasattr(self, "atomnos"): - self.atomnos = atomnos - self.natom = len(atomnos) - - if line[26:53] == "GEOMETRY OPTIMIZATION CYCLE": -#parse geometry coords - stars = inputfile.next() - dashes = inputfile.next() - text = inputfile.next() - dashes = inputfile.next() - - if not hasattr(self,"atomcoords"): - self.atomcoords = [] - - atomnos = [] - atomcoords = [] - for i in range(self.natom): - line = inputfile.next() - broken = line.split() - atomnos.append(self.table.number[broken[0]]) - atomcoords.append(map(float, broken[1:4])) - - self.atomcoords.append(atomcoords) - if not hasattr(self, "atomnos"): - self.atomnos = numpy.array(atomnos,'i') - - if line[21:68] == "FINAL ENERGY EVALUATION AT THE STATIONARY POINT": - text = inputfile.next() - broken = text.split() - assert int(broken[2]) == len(self.atomcoords) - stars = inputfile.next() - dashes = inputfile.next() - text = inputfile.next() - dashes = inputfile.next() - - atomcoords = [] - for i in range(self.natom): - line = inputfile.next() - broken = line.split() - atomcoords.append(map(float, broken[1:4])) - - self.atomcoords.append(atomcoords) - - if line[0:16] == "ORBITAL ENERGIES": -#parser orbial energy information - dashes = inputfile.next() - text = inputfile.next() - text = inputfile.next() - - self.moenergies = [[]] - self.homos = [[0]] - - line = inputfile.next() - while len(line) > 20: #restricted calcs are terminated by ------ - info = line.split() - self.moenergies[0].append(float(info[3])) - if float(info[1]) > 0.00: #might be 1 or 2, depending on restricted-ness - self.homos[0] = int(info[0]) - line = inputfile.next() - - line = inputfile.next() - - #handle beta orbitals - if line[17:35] == "SPIN DOWN ORBITALS": - text = inputfile.next() - - self.moenergies.append([]) - self.homos.append(0) - - line = inputfile.next() - while len(line) > 20: #actually terminated by ------ - info = line.split() - self.moenergies[1].append(float(info[3])) - if float(info[1]) == 1.00: - self.homos[1] = int(info[0]) - line = inputfile.next() - - if line[1:32] == "# of contracted basis functions": - self.nbasis = int(line.split()[-1]) - - if line[0:14] == "OVERLAP MATRIX": -#parser the overlap matrix - dashes = inputfile.next() - - self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d") - for i in range(0, self.nbasis, 6): - header = inputfile.next() - size = len(header.split()) - - for j in range(self.nbasis): - line = inputfile.next() - broken = line.split() - self.aooverlaps[j, i:i+size] = map(float, broken[1:size+1]) - - # Molecular orbital coefficients. - # This is also where atombasis is parsed. - if line[0:18] == "MOLECULAR ORBITALS": - - dashses = inputfile.next() - - mocoeffs = [ numpy.zeros((self.nbasis, self.nbasis), "d") ] - self.aonames = [] - self.atombasis = [] - for n in range(self.natom): - self.atombasis.append([]) - - for spin in range(len(self.moenergies)): - - if spin == 1: - blank = inputfile.next() - mocoeffs.append(numpy.zeros((self.nbasis, self.nbasis), "d")) - - for i in range(0, self.nbasis, 6): - numbers = inputfile.next() - energies = inputfile.next() - occs = inputfile.next() - dashes = inputfile.next() - broken = dashes.split() - size = len(broken) - - for j in range(self.nbasis): - line = inputfile.next() - broken = line.split() - - #only need this on the first time through - if spin == 0 and i == 0: - atomname = line[3:5].split()[0] - num = int(line[0:3]) - orbital = broken[1].upper() - - self.aonames.append("%s%i_%s"%(atomname, num+1, orbital)) - self.atombasis[num].append(j) - - temp = [] - vals = line[16:-1] #-1 to remove the last blank space - for k in range(0, len(vals), 10): - temp.append(float(vals[k:k+10])) - mocoeffs[spin][i:i+size, j] = temp - - self.mocoeffs = mocoeffs - - if line[0:18] == "TD-DFT/TDA EXCITED": - sym = "Triplet" # Could be singlets or triplets - if line.find("SINGLETS") >= 0: - sym = "Singlet" - self.etsecs = [] - self.etenergies = [] - self.etsyms = [] - lookup = {'a':0, 'b':1} - line = inputfile.next() - while line.find("STATE") < 0: - line = inputfile.next() - # Contains STATE or is blank - while line.find("STATE") >= 0: - broken = line.split() - self.etenergies.append(float(broken[-2])) - self.etsyms.append(sym) - line = inputfile.next() - sec = [] - # Contains SEC or is blank - while line.strip(): - start = line[0:8].strip() - start = (int(start[:-1]), lookup[start[-1]]) - end = line[10:17].strip() - end = (int(end[:-1]), lookup[end[-1]]) - contrib = float(line[35:47].strip()) - sec.append([start, end, contrib]) - line = inputfile.next() - self.etsecs.append(sec) - line = inputfile.next() - - if line[25:44] == "ABSORPTION SPECTRUM": - minus = inputfile.next() - header = inputfile.next() - header = inputfile.next() - minus = inputfile.next() - self.etoscs = [] - for x in self.etsyms: - osc = inputfile.next().split()[3] - if osc == "spin": # "spin forbidden" - osc = 0 - else: - osc = float(osc) - self.etoscs.append(osc) - - if line[0:23] == "VIBRATIONAL FREQUENCIES": -#parse the vibrational frequencies - dashes = inputfile.next() - blank = inputfile.next() - - self.vibfreqs = numpy.zeros((3 * self.natom,),"d") - - for i in range(3 * self.natom): - line = inputfile.next() - self.vibfreqs[i] = float(line.split()[1]) - - if line[0:11] == "IR SPECTRUM": -#parse ir intensities - dashes = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - dashes = inputfile.next() - - self.vibirs = numpy.zeros((3 * self.natom,),"d") - - line = inputfile.next() - while len(line) > 2: - num = int(line[0:4]) - self.vibirs[num] = float(line.split()[2]) - line = inputfile.next() - - if line[0:14] == "RAMAN SPECTRUM": -#parser raman intensities - dashes = inputfile.next() - blank = inputfile.next() - header = inputfile.next() - dashes = inputfile.next() - - self.vibramans = numpy.zeros((3 * self.natom,),"d") - - line = inputfile.next() - while len(line) > 2: - num = int(line[0:4]) - self.vibramans[num] = float(line.split()[2]) - line = inputfile.next() - - - -if __name__ == "__main__": - import doctest, orcaparser - doctest.testmod(orcaparser, verbose=False) diff --git a/external/cclib/parser/utils.py b/external/cclib/parser/utils.py deleted file mode 100644 index ba8f9177b2..0000000000 --- a/external/cclib/parser/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 865 $" - - -def convertor(value, fromunits, tounits): - """Convert from one set of units to another. - - >>> print "%.1f" % convertor(8, "eV", "cm-1") - 64524.8 - """ - - _convertor = {"eV_to_cm-1": lambda x: x*8065.6, - "hartree_to_eV": lambda x: x*27.2113845, - "bohr_to_Angstrom": lambda x: x*0.529177, - "Angstrom_to_bohr": lambda x: x*1.889716, - "nm_to_cm-1": lambda x: 1e7/x, - "cm-1_to_nm": lambda x: 1e7/x, - "hartree_to_cm-1": lambda x: x*219474.6, - # Taken from GAMESS docs, "Further information", - # "Molecular Properties and Conversion Factors" - "Debye^2/amu-Angstrom^2_to_km/mol": lambda x: x*42.255} - - return _convertor["%s_to_%s" % (fromunits, tounits)] (value) - - -class PeriodicTable(object): - """Allows conversion between element name and atomic no. - - >>> t = PeriodicTable() - >>> t.element[6] - 'C' - >>> t.number['C'] - 6 - >>> t.element[44] - 'Ru' - >>> t.number['Au'] - 79 - """ - - def __init__(self): - self.element = [None, - 'H', 'He', - 'Li', 'Be', - 'B', 'C', 'N', 'O', 'F', 'Ne', - 'Na', 'Mg', - 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', - 'K', 'Ca', - 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', - 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', - 'Rb', 'Sr', - 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', - 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', - 'Cs', 'Ba', - 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', - 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', - 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', - 'Fr', 'Ra', - 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', - 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Uub'] - self.number = {} - for i in range(1, len(self.element)): - self.number[self.element[i]] = i - - -if __name__ == "__main__": - import doctest, utils - doctest.testmod(utils, verbose=False) diff --git a/external/cclib/progress/__init__.py b/external/cclib/progress/__init__.py deleted file mode 100644 index f31f42edb0..0000000000 --- a/external/cclib/progress/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 620 $" - - -import sys - -if 'qt' in sys.modules.keys(): - from qtprogress import QtProgress -if 'PyQt4' in sys.modules.keys(): - from qt4progress import Qt4Progress - -from textprogress import TextProgress diff --git a/external/cclib/progress/qt4progress.py b/external/cclib/progress/qt4progress.py deleted file mode 100644 index 10927b9618..0000000000 --- a/external/cclib/progress/qt4progress.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 238 $" - - -from PyQt4 import QtGui,QtCore - - -class Qt4Progress(QtGui.QProgressDialog): - - def __init__(self, title, parent=None): - - QtGui.QProgressDialog.__init__(self, parent) - - self.nstep = 0 - self.text = None - self.oldprogress = 0 - self.progress = 0 - self.calls = 0 - self.loop=QtCore.QEventLoop(self) - self.setWindowTitle(title) - - def initialize(self, nstep, text=None): - - self.nstep = nstep - self.text = text - self.setRange(0,nstep) - if text: - self.setLabelText(text) - self.setValue(1) - #sys.stdout.write("\n") - - def update(self, step, text=None): - - if text: - self.setLabelText(text) - self.setValue(step) - self.loop.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) - diff --git a/external/cclib/progress/qtprogress.py b/external/cclib/progress/qtprogress.py deleted file mode 100644 index 941ecc51f2..0000000000 --- a/external/cclib/progress/qtprogress.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 620 $" - - -from qt import QProgressDialog - - -class QtProgress(QProgressDialog): - - def __init__(self, parent): - - QProgressDialog.__init__(self, parent, "progress", True) - - self.nstep = 0 - self.text = None - self.oldprogress = 0 - self.progress = 0 - self.calls = 0 - - self.setCaption("Progress...") - - def initialize(self, nstep, text=None): - - self.nstep = nstep - self.text = text - self.setTotalSteps(nstep) - if text: - self.setLabelText(text) - self.setProgress(1) - #sys.stdout.write("\n") - - def update(self, step, text=None): - - self.setLabelText(text) - self.setProgress(step) - - return diff --git a/external/cclib/progress/textprogress.py b/external/cclib/progress/textprogress.py deleted file mode 100644 index 79a6f3d1ef..0000000000 --- a/external/cclib/progress/textprogress.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -cclib (http://cclib.sf.net) is (c) 2006, the cclib development team -and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). -""" - -__revision__ = "$Revision: 620 $" - - -import sys - - -class TextProgress: - - def __init__(self): - - self.nstep = 0 - self.text = None - self.oldprogress = 0 - self.progress = 0 - self.calls = 0 - - def initialize(self, nstep, text=None): - - self.nstep = float(nstep) - self.text = text - - #sys.stdout.write("\n") - - def update(self, step, text=None): - - self.progress = int(step * 100 / self.nstep) - - if self.progress/2 >= self.oldprogress/2+1 or self.text != text: -# just went through at least an interval of ten, ie. from 39 to 41, so update - - mystr = "\r[" - prog = self.progress / 10 - mystr += prog*"="+(10-prog)*"-" - mystr += "] %3i" % self.progress + "%" - - if text: - mystr += " "+text - - sys.stdout.write("\r"+70*" ") - sys.stdout.flush() - sys.stdout.write(mystr) - sys.stdout.flush() - self.oldprogress = self.progress - - if self.progress >= 100 and text == "Done": - print " " - - - return diff --git a/requirements.txt b/requirements.txt index aff4494247..7796b051f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ MarkupSafe # this makes Jinja2 faster Jinja2 # this is for rendering the HTML output files cairocffi yaml +cclib # needs to be 1.6.1.rmg # For postprocessing the profiling data argparse diff --git a/rmgpy/qm/gaussian.py b/rmgpy/qm/gaussian.py index 4cc21f2da9..f637839479 100644 --- a/rmgpy/qm/gaussian.py +++ b/rmgpy/qm/gaussian.py @@ -31,7 +31,7 @@ import os import distutils.spawn -import external.cclib as cclib +import cclib as cclib import itertools import logging from subprocess import Popen diff --git a/rmgpy/qm/mopac.py b/rmgpy/qm/mopac.py index f9277352f4..d77d64fe52 100644 --- a/rmgpy/qm/mopac.py +++ b/rmgpy/qm/mopac.py @@ -30,7 +30,7 @@ import os import re -import external.cclib as cclib +import cclib as cclib import logging from subprocess import Popen, PIPE import distutils.spawn From ba25f01c98b75e3c0256143b17f59eba08440dc4 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 12 Aug 2019 12:29:44 -0400 Subject: [PATCH 008/155] Remove reduction module Our implementation of model reduction is slow to the point of being infeasible for any moderately sized mechanism which requries reduction. It is also tightly integrated with parallel processing via scoop, which is also deprecated and no longer works properly. --- rmgpy/reduction/__init__.py | 29 -- rmgpy/reduction/input.py | 84 ---- rmgpy/reduction/main.py | 115 ----- rmgpy/reduction/model.py | 84 ---- rmgpy/reduction/modelTest.py | 89 ---- rmgpy/reduction/optimization.py | 148 ------ rmgpy/reduction/optimizationTest.py | 108 ----- rmgpy/reduction/output.py | 46 -- rmgpy/reduction/rates.py | 205 --------- rmgpy/reduction/reduction.py | 427 ------------------ rmgpy/reduction/reductionTest.py | 132 ------ .../minimal/chemkin/chem_annotated.inp | 409 ----------------- .../minimal/chemkin/species_dictionary.txt | 155 ------- rmgpy/reduction/test_data/minimal/input.py | 60 --- .../test_data/minimal/reduction_input.py | 2 - 15 files changed, 2093 deletions(-) delete mode 100644 rmgpy/reduction/__init__.py delete mode 100644 rmgpy/reduction/input.py delete mode 100644 rmgpy/reduction/main.py delete mode 100644 rmgpy/reduction/model.py delete mode 100644 rmgpy/reduction/modelTest.py delete mode 100644 rmgpy/reduction/optimization.py delete mode 100644 rmgpy/reduction/optimizationTest.py delete mode 100644 rmgpy/reduction/output.py delete mode 100644 rmgpy/reduction/rates.py delete mode 100644 rmgpy/reduction/reduction.py delete mode 100644 rmgpy/reduction/reductionTest.py delete mode 100644 rmgpy/reduction/test_data/minimal/chemkin/chem_annotated.inp delete mode 100644 rmgpy/reduction/test_data/minimal/chemkin/species_dictionary.txt delete mode 100644 rmgpy/reduction/test_data/minimal/input.py delete mode 100644 rmgpy/reduction/test_data/minimal/reduction_input.py diff --git a/rmgpy/reduction/__init__.py b/rmgpy/reduction/__init__.py deleted file mode 100644 index 930c261434..0000000000 --- a/rmgpy/reduction/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### diff --git a/rmgpy/reduction/input.py b/rmgpy/reduction/input.py deleted file mode 100644 index bd7bbfdb1e..0000000000 --- a/rmgpy/reduction/input.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import os.path - -from rmgpy.tools.loader import loadRMGPyJob -from rmgpy.scoop_framework.util import logger as logging - -def loadReductionInput(reductionFile): - """ - Load an reduction job from the input file located at `reductionFile` - """ - - targets = None - tolerance = -1 - - full_path = os.path.abspath(os.path.expandvars(reductionFile)) - try: - f = open(full_path) - except IOError: - logging.error('The input file "{0}" could not be opened.'.format(full_path)) - logging.info('Check that the file exists and that you have read access.') - raise - - logging.info('Reading input file "{0}"...'.format(full_path)) - - global_context = { '__builtins__': None } - local_context = { - '__builtins__': None, - 'targets': targets, - 'tolerance': tolerance - } - - try: - exec f in global_context, local_context - - targets = local_context['targets'] - tolerance = local_context['tolerance'] - - except (NameError, TypeError, SyntaxError) as e: - logging.error('The input file "{0}" was invalid:'.format(full_path)) - logging.exception(e) - raise - finally: - f.close() - - assert targets is not None - assert tolerance != -1 - - return targets, tolerance - -def load(rmgInputFile, reductionFile, chemkinFile, speciesDict): - - rmg = loadRMGPyJob(rmgInputFile, chemkinFile, speciesDict, generateImages=False, useChemkinNames=True) - targets, tolerance = loadReductionInput(reductionFile) - - return rmg, targets, tolerance diff --git a/rmgpy/reduction/main.py b/rmgpy/reduction/main.py deleted file mode 100644 index 997c7e0af1..0000000000 --- a/rmgpy/reduction/main.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import os.path -import argparse - -from input import load -from output import writeModel -from optimization import optimize -from reduction import computeObservables, initialize -from logging import INFO, DEBUG, WARNING - -from rmgpy.scoop_framework.util import logger - - -def parseCommandLineArguments(): - """Parse the command-line arguments for the stand-alone model-reduction module""" - parser = argparse.ArgumentParser(description='RMG Model Reduction Tool') - parser.add_argument('requiredFiles', metavar='FILE', type=str, nargs=4, - help='File Order: input.py reduction_input.py chem_annotated.inp species_dictionary.txt') - - # Options for controlling the amount of information printed to the console - # By default a moderate level of information is printed; you can either - # ask for less (quiet), more (verbose), or much more (debug) - group = parser.add_mutually_exclusive_group() - group.add_argument('-q', '--quiet', action='store_true', help='only print warnings and errors') - group.add_argument('-v', '--verbose', action='store_true', help='print more verbose output') - group.add_argument('-d', '--debug', action='store_true', help='print debug information') - - return parser.parse_args() - - -def main(): - args = parseCommandLineArguments() - - level = INFO - if args.debug: level = 0 - elif args.verbose: level = DEBUG - elif args.quiet: level = WARNING - initializeLog(level) - - inputFile, reductionFile, chemkinFile, spcDict = args.requiredFiles[-4:] - - for f in [inputFile, reductionFile, chemkinFile, spcDict]: - assert os.path.isfile(f), 'Could not find {}'.format(f) - - inputDirectory = os.path.abspath(os.path.dirname(inputFile)) - output_directory = inputDirectory - - rmg, targets, error = load(inputFile, reductionFile, chemkinFile, spcDict) - logger.info('Allowed error in target observables: {0:.0f}%'.format(error * 100)) - - reactionModel = rmg.reactionModel - initialize(rmg.outputDirectory, reactionModel.core.reactions) - - atol, rtol = rmg.absoluteTolerance, rmg.relativeTolerance - index = 0 - reactionSystem = rmg.reactionSystems[index] - - #compute original target observables - observables = computeObservables(targets, reactionModel, reactionSystem, \ - rmg.absoluteTolerance, rmg.relativeTolerance) - - logger.info('Observables of original model:') - for target, observable in zip(targets, observables): - logger.info('{}: {:.2f}%'.format(target, observable * 100)) - - # optimize reduction tolerance - tol, importantReactions = optimize(targets, reactionModel, rmg, index, error, observables) - logger.info('Optimized tolerance: {:.0E}'.format(10**tol)) - logger.info('Number of reactions in optimized reduced model : {}'.format(len(importantReactions))) - - # plug the important reactions into the RMG object and write: - rmg.reactionModel.core.reactions = importantReactions - writeModel(rmg) - -def initializeLog(level): - """ - Set up a logger for reduction to use to print output to stdout. The - `level` parameter is an integer specifying the amount of log text seen - at the console; the levels correspond to those of the :data:`logging` module. - """ - # Create logger - logger.setLevel(level) - - -if __name__ == '__main__': - main() diff --git a/rmgpy/reduction/model.py b/rmgpy/reduction/model.py deleted file mode 100644 index 73471486dd..0000000000 --- a/rmgpy/reduction/model.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -from collections import Counter - -class ReductionReaction(object): - """ - A class that enhances RMG-Py's Reaction object - by providing storage for the forward (kf) and backward - (kb) rate coefficient. - - Once k is computed, it is stored and fetched - when requested. - - """ - def __init__(self, rmgReaction): - super(ReductionReaction, self).__init__() - self.rmgReaction = rmgReaction - self.reactants = rmgReaction.reactants - self.products = rmgReaction.products - self.kf = None - self.kb = None - self.stoichio = {} - self.createStoichio() - - def __str__(self): - return str(self.rmgReaction) - - def __reduce__(self): - """ - A helper function used when pickling an object. - """ - return (self.__class__, (self.rmgReaction, )) - - - def getRateCoefficient(self, T,P): - if self.kf is None: - self.kf = self.rmgReaction.getRateCoefficient(T,P) - return self.kf - else: return self.kf - - def getReverseRateCoefficient(self, T, P): - if self.kb is None: - kf = self.getRateCoefficient(T,P) - self.kb = kf / self.rmgReaction.getEquilibriumConstant(T) - return self.kb - else: return self.kb - - def createStoichio(self): - cReactants = Counter([mol.label for mol in self.reactants]) - self.stoichio['reactant'] = cReactants - - cProducts = Counter([mol.label for mol in self.products]) - self.stoichio['product'] = cProducts - - def getStoichiometricCoefficient(self, spc, reactantOrProduct): - return self.stoichio[reactantOrProduct][spc.label] diff --git a/rmgpy/reduction/modelTest.py b/rmgpy/reduction/modelTest.py deleted file mode 100644 index 50c21e1153..0000000000 --- a/rmgpy/reduction/modelTest.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import unittest - -from .model import * - - -class MockMolecule(object): - """docstring for MockMolecule""" - def __init__(self, label): - super(MockMolecule, self).__init__() - self.label = label - -class ReductionReactionTest(unittest.TestCase): - - def setUp(self): - from rmgpy.reaction import Reaction - from .model import ReductionReaction - - mol1 = MockMolecule(label='mol1') - mol2 = MockMolecule(label='mol2') - mol3 = MockMolecule(label='mol3') - mol4 = MockMolecule(label='mol4') - - self.rxn = Reaction(reactants=[mol1, mol2], products=[mol3, mol4]) - - self.rrxn = ReductionReaction(self.rxn) - - - def tearDown(self): - del self.rrxn - - - def testConstructor(self): - rrxn = self.rrxn - rxn = self.rxn - - self.assertIsNotNone(rrxn) - - # attributes - self.assertIsNotNone(rrxn.reactants, rxn.reactants) - self.assertIs(rrxn.products, rxn.products) - self.assertIs(rrxn.rmgReaction, rxn) - self.assertIsNotNone(rrxn.stoichio) - self.assertIsNone(rrxn.kf) - self.assertIsNone(rrxn.kb) - - - # stoichio - for k,d in self.rrxn.stoichio.iteritems(): - for k,v in d.iteritems(): - self.assertEquals(v, 1) - - - - def testReduce(self): - import pickle - reaction = pickle.loads(pickle.dumps(self.rrxn)) - -if __name__ == '__main__': - unittest.main() diff --git a/rmgpy/reduction/optimization.py b/rmgpy/reduction/optimization.py deleted file mode 100644 index d2bbbee945..0000000000 --- a/rmgpy/reduction/optimization.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import numpy as np - -from reduction import reduceModel -from output import writeModel -from rmgpy.scoop_framework.util import logger as logging - -def optimize(target_label, reactionModel, rmg, reactionSystemIndex, error, orig_observable): - """ - The optimization algorithm that searches for the most reduced model that satisfies the - applied constraints. - - The error introduced by the reduced model for a response variable - of a target is used as the objective function. - - The optimization algorithm increments the trial tolerance from a very low value - until the introduced error is greater than the user-provided threshold. - """ - - - low = -30 - high = 0 - - """ - Tolerance to decide whether a reaction is unimportant for the formation/destruction of a species - - Tolerance is a floating point value between 0 and 1. - - A high tolerance means that many reactions will be deemed unimportant, and the reduced model will be drastically - smaller. - - A low tolerance means that few reactions will be deemed unimportant, and the reduced model will only differ from the full - model by a few reactions. - """ - - tol, importantReactions = \ - bisect(low, high, error, target_label, reactionModel, rmg, reactionSystemIndex, orig_observable) - - return tol, importantReactions - -def computeDeviation(original, reduced, targets): - """ - Computes the relative deviation between the observables of the - original and reduced model. - - Assumes the observables are numpy arrays. - """ - devs = np.abs((reduced - original) / original) - - logging.info('Deviations: '.format()) - for dev, target in zip(devs, targets): - logging.info('Deviation for {}: {:.2f}%'.format(target, dev * 100)) - - return devs - -def isInvalid(devs, error): - """ - Check if the reduced observables differ from the original - observables more than the parameter error threshold. - """ - invalid = np.any(devs > error) - return invalid - -def bisect(low, high, error, targets, reactionModel, rmg, reactionSystemIndex, orig_observable): - """ - Bisect method in log space. - - Interrupt iterations when two consecutive, successful iterations differ less than a - threshold value. - """ - - THRESHOLD = 0.05 - - importantReactions = None - final_devs = None - old_trial = low - while True: - midpoint = (low + high) / 2.0 - reduced_observable, newImportantReactions = evaluate(midpoint, targets, reactionModel, rmg, reactionSystemIndex) - - devs = computeDeviation(orig_observable, reduced_observable, targets) - - if isInvalid(devs, error): - high = midpoint - else: - if len(newImportantReactions) == 0: - logging.error('Model reduction resulted in a model with 0 reactions.') - logging.error('Perhaps change reactor conditions to allow for more adequate reduction. Exiting...') - break - low = midpoint - importantReactions = newImportantReactions - final_devs = devs - writeModel(rmg, chemkin_name='chem_reduced_{}.inp'.format(len(importantReactions))) - - if np.abs((midpoint - old_trial) / old_trial) < THRESHOLD: - break - - old_trial = low - - if not importantReactions: - logging.error("Could not find a good guess...") - importantReactions = [] - - logging.info('Final deviations: '.format()) - for dev, target in zip(final_devs, targets): - logging.info('Final deviation for {}: {:.2f}%'.format(target, dev * 100)) - - - return low, importantReactions - -def evaluate(guess, targets, reactionModel, rmg, reactionSystemIndex): - """ - - """ - logging.info('Trial tolerance: {:.2E}'.format(10**guess)) - - observable, newImportantReactions = reduceModel(10**guess, targets, reactionModel, rmg, reactionSystemIndex) - - return observable, newImportantReactions diff --git a/rmgpy/reduction/optimizationTest.py b/rmgpy/reduction/optimizationTest.py deleted file mode 100644 index 5de6c084db..0000000000 --- a/rmgpy/reduction/optimizationTest.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import os -import unittest -from external.wip import work_in_progress - -from rmgpy.scoop_framework.framework import TestScoopCommon -from rmgpy.scoop_framework.util import logger as logging - -import rmgpy - -try: - from scoop import futures -except ImportError: - logging.debug("Could not properly import SCOOP.") - -from .input import load -from .reduction import initialize, computeObservables - -from .optimization import * - -def funcOptimize(rmg, targets): - reactionModel = rmg.reactionModel - - initialize(rmg.outputDirectory, reactionModel.core.reactions) - - error = OptimizeTest.error - - index = 0 - reactionSystem = rmg.reactionSystems[index] - - #compute original target observables - observables = computeObservables(targets, reactionModel, reactionSystem, - rmg.absoluteTolerance, rmg.relativeTolerance) - - # optimize reduction tolerance - tol, importantRxns = optimize(targets, reactionModel, rmg, index, error, observables) - - try: - assert len(importantRxns) == 30 - except AssertionError: - return False - - return True - -@work_in_progress -class OptimizeTest(TestScoopCommon): - - #MINIMAL - wd = os.path.join(os.path.dirname(rmgpy.__file__),'reduction/test_data/minimal/') - inputFile = os.path.join(wd, 'input.py') - reductionFile = os.path.join(wd, 'reduction_input.py') - chemkinFile = os.path.join(wd, 'chemkin','chem_annotated.inp') - spcDict = os.path.join(wd, 'chemkin','species_dictionary.txt') - - def __init__(self, *args, **kwargs): - # Parent initialization - super(self.__class__, self).__init__(*args, **kwargs) - - # Only setup the scoop framework once, and not in every test method: - super(self.__class__, self).setUp() - - @classmethod - def setUpClass(cls): - super(OptimizeTest, cls).setUpClass() - rmg, targets, error = load(cls.inputFile, cls.reductionFile, cls.chemkinFile, cls.spcDict) - cls.rmg = rmg - cls.targets = targets - cls.error = error - - - def testOptimize(self): - rmg = OptimizeTest.rmg - targets = OptimizeTest.targets - - result = futures._startup(funcOptimize, rmg, targets) - self.assertEquals(result, True) - -if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1": - unittest.main() diff --git a/rmgpy/reduction/output.py b/rmgpy/reduction/output.py deleted file mode 100644 index 79a279be8c..0000000000 --- a/rmgpy/reduction/output.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import os -import os.path - -from rmgpy.chemkin import saveChemkinFile - -from rmgpy.scoop_framework.util import logger as logging - -def writeModel(rmg, chemkin_name='chem_reduced.inp'): - """ - Writes the reduced reaction model to a chemkin compatible files. - """ - logging.info('Writing reduced model to {}'.format(chemkin_name)) - speciesList = rmg.reactionModel.core.species - rxnList = rmg.reactionModel.core.reactions - path = os.path.join(os.getcwd(), chemkin_name) - saveChemkinFile(path, speciesList, rxnList, verbose = True, checkForDuplicates=True) diff --git a/rmgpy/reduction/rates.py b/rmgpy/reduction/rates.py deleted file mode 100644 index 2d3aa12198..0000000000 --- a/rmgpy/reduction/rates.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import numpy as np -from rmgpy.scoop_framework.util import logger as logging - -CLOSE_TO_ZERO = 1E-20 - -def computeReactionRate(rxn, forward, T, P, coreSpeciesConcentrations): - """ - - Computes reaction rate r as follows: - - r = k * Product(Ci^nuij, for all j) - with: - - k = rate coefficient for rxn, - Cij = the concentration for molecule i , - nuij = the stoichiometric coefficient for molecule i in reaction j. - - ... - """ - - speciesList = rxn.reactants if forward == 'reactants' else rxn.products - - totconc = 1.0 - for spc in speciesList: - ci = coreSpeciesConcentrations[spc.label] - if abs(ci) < CLOSE_TO_ZERO: - return 0. - nui = rxn.getStoichiometricCoefficient(spc, forward) - conc = ci**nui - - totconc *= conc - - k = rxn.getRateCoefficient(T,P) if forward == 'reactants' else rxn.getReverseRateCoefficient(T,P) - r = k * totconc - - return r - - -def calcRij(rxn, spc, isReactant, T, P, coreSpeciesConcentrations): - """ - This function computes the rate of formation of species i - through the reaction j. - - This function multiplies: - - nu(i): stoichiometric coefficient of spc in rxn - - r(rxn): reaction rate of rxn - - Returns a reaction rate - - Units: mol / m^3 s - """ - - nui = rxn.getStoichiometricCoefficient(spc, isReactant) - sign = -1 if isReactant else 1 - - forward = isReactant - - rj = computeReactionRate(rxn, forward, T, P, coreSpeciesConcentrations) - - rij = nui * sign * rj - return rij - - -def calcRf(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations, formationOrConsumption): - """ - Calculates the total rate of formation/consumption of species i. - - Computes the sum of the rates of formation/consumption of spc for all of - the reactions in which spc is a product. - - if formationOrConsumption == 'formation', spc will be compared to the - products of the reaction. Else, spc will be compared to the reactants of - the reaction. - - units of rate: mol/(m^3.s) - """ - rate = 0.0 - - for reaction in reactions: - molecules = reaction.products if formationOrConsumption == 'formation:' else reaction.reactants - labels = [mol.label for mol in molecules] - if spc.label in labels: - rij = calcRij(reaction, spc, reactantOrProduct, T, P, coreSpeciesConcentrations) - rate = rate + rij - - logging.debug('Rf: {rate}'.format(**locals())) - - return rate - -def calcRfClosure(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations): - """ - Closure to avoid replicating function calls to calcRf. - """ - def myfilter(formationOrConsumption): - return calcRf(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations, formationOrConsumption) - - return myfilter - -def calcRi(spc,rij, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations): - """ - - Checks whether the sign of rij to decide to compute the - total rate of formation or consumption of spc. - - units of rate: mol/(m^3.s) - """ - - closure = calcRfClosure(spc, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations) - - if rij > 0: - return closure('formation') - elif rij < 0: - return closure('consumption') - elif np.absolute([rij]) < CLOSE_TO_ZERO: - """Pick the largest value so that the ratio rij / RX remains small.""" - Rf = closure('formation') - Rb = closure('consumption') - - """What happens when Rf ~ Rb <<< 1?""" - return max(abs(Rf),abs(Rb)) - -def isImportant(rxn, spc, reactions, reactantOrProduct, tolerance, T, P, coreSpeciesConcentrations): - """ - This function computes: - - Ri = R(spc) - - rij = r(rxn) - - alpha = ratio of rij / Ri - - Range of values of alpha: - 0 <= alpha <= 1 - - This function also compares alpha to a user-defined tolerance TOLERANCE. - if alpha >= tolerance: - this reaction is important for this species. - else: - this reaction is unimportant for this species. - - Returns whether or not rxn is important for spc. - keep = True - remove = False - """ - - - rij = calcRij(rxn, spc, reactantOrProduct, T, P, coreSpeciesConcentrations) - Ri = calcRi(spc, rij, reactions, reactantOrProduct, T, P, coreSpeciesConcentrations) - - logging.debug("rij: {rij}, Ri: {Ri}, rxn: {rxn}, species: {spc}, reactant: {reactantOrProduct}, tol: {tolerance}"\ - .format(**locals())) - - if np.any(np.absolute([rij, Ri]) < CLOSE_TO_ZERO): - return False - - else: - assert Ri != 0, "rij: {0}, Ri: {1}, rxn: {2}, species: {3}, reactant: {4}".format(rij, Ri, rxn, spc, reactantOrProduct) - alpha = rij / Ri - if alpha < 0: return False - - - if alpha > tolerance : - """ - If both values are very close to 1, then the comparison of alpha and the tolerance - might sometimes return an unexpected value. - - When we set the tolerance to a value of 1, we want all the reactions to be unimportant, - regardless of the value of alpha. - - """ - if np.allclose([tolerance, alpha], [1.0, 1.0]): - return False - - return True - #where tolerance is user specified tolerance - - elif alpha <= tolerance: - return False diff --git a/rmgpy/reduction/reduction.py b/rmgpy/reduction/reduction.py deleted file mode 100644 index c2d2cfa8b3..0000000000 --- a/rmgpy/reduction/reduction.py +++ /dev/null @@ -1,427 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -#global imports -import os.path -import numpy as np -import re - -#local imports -from rmgpy.chemkin import getSpeciesIdentifier -from rmgpy.scoop_framework.util import broadcast, get, map_ -from rmgpy.scoop_framework.util import logger as logging -from rmgpy.rmg.settings import ModelSettings, SimulatorSettings - -from model import ReductionReaction -from rates import isImportant - - -#global variables -reactions = None - - -def simulateOne(reactionModel, atol, rtol, reactionSystem): - """ - - Simulates one reaction system, listener registers results, - which are returned at the end. - - - The returned data consists of a array of the species names, - and the concentration data. - - The concentration data consists of a number of elements for each timestep - the solver took to reach the end time of the batch reactor simulation. - - Each element consists of the time and the concentration data of the species at that - particular timestep in the order of the species names. - - """ - - #register as a listener - listener = ConcentrationListener() - - coreSpecies = reactionModel.core.species - regex = r'\([0-9]+\)'#cut of '(one or more digits)' - speciesNames = [] - for spc in coreSpecies: - name = getSpeciesIdentifier(spc) - name_cutoff = re.split(regex, name)[0] - speciesNames.append(name_cutoff) - - listener.speciesNames = speciesNames - - reactionSystem.attach(listener) - - pdepNetworks = [] - for source, networks in reactionModel.networkDict.items(): - pdepNetworks.extend(networks) - - simulatorSettings = SimulatorSettings(atol,rtol) - modelSettings = ModelSettings(toleranceKeepInEdge=0,toleranceMoveToCore=1,toleranceInterruptSimulation=1) - - terminated,resurrected,obj,sspcs,srxns,t,conv = reactionSystem.simulate( - coreSpecies = reactionModel.core.species, - coreReactions = reactionModel.core.reactions, - edgeSpecies = reactionModel.edge.species, - edgeReactions = reactionModel.edge.reactions, - surfaceSpecies = [], - surfaceReactions = [], - pdepNetworks = pdepNetworks, - modelSettings = modelSettings, - simulatorSettings=simulatorSettings, - ) - - assert terminated - - #unregister as a listener - reactionSystem.detach(listener) - - return listener.speciesNames, listener.data - -def simulateAll(rmg): - """ - Simulate the RMG job, - for each of the simulated reaction systems. - - Each element i of the data corresponds to a reaction system. - """ - reactionModel = rmg.reactionModel - - data = [] - - atol, rtol = rmg.simulatorSettingsList[-1].atol, rmg.simulatorSettingsList[-1].rtol - for reactionSystem in rmg.reactionSystems: - data.append(simulateOne(reactionModel, atol, rtol, reactionSystem)) - - return data - - -def initialize(wd, rxns): - global working_dir, reactions - working_dir = wd - assert os.path.isdir(working_dir) - - #set global variable here such that functions executed in the root worker have access to it. - - reactions = [ReductionReaction(rxn) for rxn in rxns] - broadcast(reactions, 'reactions') - - -def retrieveReactions(): - """ - Reactions can be retrieved either through the global variable 'reactions' if parallel computing - is not used. - - With the use of multiple workers, the reactions are retrieved from the previously broadcasted - constant. - - In any case, the references to the original reactions of the reaction model are assumed to be - broken. - - """ - global reactions - - broadcastedReactions = get('reactions') - if broadcastedReactions: - reactions = broadcastedReactions - return reactions - -def findImportantReactions(rmg, tolerance): - """ - This function: - - - loops over all the species involved in a specific reaction - - decides whether the specific reaction is important for the species. - - Whenever it is found that a reaction is important for a species, we break - the species loop, and keep the reaction in the model. - - - Returns: - a list of rxns that can be removed. - """ - - # run the simulation, creating concentration profiles for each reaction system defined in input. - simdata = simulateAll(rmg) - - - reduceReactions = retrieveReactions() - - def chunks(l, n): - """Yield successive n-sized chunks from l.""" - for i in xrange(0, len(l), n): - yield l[i:i+n] - - CHUNKSIZE = 40 - boolean_array = [] - for chunk in chunks(reduceReactions,CHUNKSIZE): - N = len(chunk) - partial_results = list( - map_( - assessReaction, chunk, [rmg.reactionSystems] * N, [tolerance] * N, [simdata] * N - ) - ) - boolean_array.extend(partial_results) - - """ - Assuming that the order of the reduced reactions array and the core reactions of the reaction model - are identical, iterate over the boolean array and retain those reactions of the reaction model - that are deemed 'important'. - """ - importantRxns = [] - for isImport, rxn in zip(boolean_array, rmg.reactionModel.core.reactions): - logging.debug('Is rxn {rxn} important? {isImport}'.format(**locals())) - if isImport: - importantRxns.append(rxn) - - - return importantRxns - -def assessReaction(rxn, reactionSystems, tolerance, data): - """ - Returns whether the reaction is important or not in the reactions. - - It iterates over the reaction systems, and loads the concentration profile - of each reaction system. - - It iterates over a number of samples in profile and - evaluates the importance of the reaction at every sample. - - """ - - - logging.debug('Assessing reaction {}'.format(rxn)) - - reactions = retrieveReactions() - - # read in the intermediate state variables - - for datum, reactionSystem in zip(data, reactionSystems): - T, P = reactionSystem.T.value_si, reactionSystem.P.value_si - - speciesNames, profile = datum - - # take N evenly spaced indices from the table with simulation results: - - """ - - Number of time steps between start and end time of the batch reactor simulation at which the importance of - reactions should be evaluated. - - - - The more timesteps, the less chance we have to remove an important reactions, but the more simulations - need to be carried out. - """ - - timesteps = len(profile) / 2 - logging.debug('Evaluating the importance of a reaction at {} time samples.'.format(timesteps)) - - assert timesteps <= len(profile) - indices = map(int, np.linspace(0, len(profile)-1, num = timesteps)) - for index in indices: - assert profile[index] is not None - timepoint, coreSpeciesConcentrations = profile[index] - - coreSpeciesConcentrations = {key: float(value) for (key, value) in zip(speciesNames, coreSpeciesConcentrations)} - - for species_i in rxn.reactants: - if isImportant(rxn, species_i, reactions, 'reactant', tolerance, T, P, coreSpeciesConcentrations): - return True - - #only continue if the reaction is not important yet. - for species_i in rxn.products: - if isImportant(rxn, species_i, reactions, 'product', tolerance, T, P, coreSpeciesConcentrations): - return True - - return False - - -def searchTargetIndex(targetLabel, reactionModel): - """ - Searches for the Species object in the core species - of the reaction that has the same label as the parameter string. - reactionModel must be of class CoreEdgeReactionModel - - Has known issues dealing with duplicate labels. See reductionTest.py - for a unittest of this issue. - """ - for i, spc in enumerate(reactionModel.core.species): - if spc.label == targetLabel: - return i - - raise Exception('{} could not be found...'.format(targetLabel)) - - -def computeObservables(targets, reactionModel, reactionSystem, atol, rtol): - """ - Computes the observables of the targets, provided in the function signature. - - Currently, the species mole fractions at the end time of the - batch reactor simulation are the only observables that can be computed. - - - resetting the reaction system, initialing with empty variables - - running the simulation at the conditions stored in the reaction system - """ - simulatorSettings = SimulatorSettings(atol,rtol) - reactionSystem.initializeModel(\ - reactionModel.core.species, reactionModel.core.reactions,\ - reactionModel.edge.species, reactionModel.edge.reactions, \ - [],[],[],atol=simulatorSettings.atol,rtol=simulatorSettings.rtol, - sens_atol=simulatorSettings.sens_atol, sens_rtol=simulatorSettings.sens_rtol) - - #run the simulation: - simulateOne(reactionModel, atol, rtol, reactionSystem) - - observables = computeMoleFractions(targets, reactionModel, reactionSystem) - - return observables - -def computeMoleFractions(targets, reactionModel, reactionSystem): - """ - Computes the mole fractions of the targets, identified by the list - of species names in the function signature. - - Returns a numpy array with the mole fractions at the end time of the reactor - simulation. - - - searching the index of the target species in the core species - of the global reduction variable - - fetching the computed moles variable y - - """ - moleFractions = np.zeros(len(targets), np.float64) - - for i, label in enumerate(targets): - targetIndex = searchTargetIndex(label, reactionModel) - - moleFractions[i] = reactionSystem.y[targetIndex] - - return moleFractions - -def computeConversion(targetLabel, reactionModel, reactionSystem, atol, rtol): - """ - Computes the conversion of a target molecule by - - - searching the index of the target species in the core species - of the global reduction variable - - resetting the reaction system, initialing with empty variables - - fetching the initial moles variable y0 - - running the simulation at the conditions stored in the reaction system - - fetching the computed moles variable y - - computing conversion - """ - - targetIndex = searchTargetIndex(targetLabel, reactionModel) - - #reset reaction system variables: - logging.info('No. of rxns in core reactions: {}'.format(len(reactionModel.core.reactions))) - - simulatorSettings = SimulatorSettings(atol,rtol) - - reactionSystem.initializeModel(\ - reactionModel.core.species, reactionModel.core.reactions,\ - reactionModel.edge.species, reactionModel.edge.reactions, \ - [],[],[],atol=simulatorSettings.atol,rtol=simulatorSettings.rtol, - sens_atol=simulatorSettings.sens_atol,sens_rtol=simulatorSettings.sens_rtol) - - #get the initial moles: - y0 = reactionSystem.y.copy() - - #run the simulation: - simulateOne(reactionModel, atol, rtol, reactionSystem) - - #compute conversion: - conv = 1 - (reactionSystem.y[targetIndex] / y0[targetIndex]) - return conv - -def reduceModel(tolerance, targets, reactionModel, rmg, reactionSystemIndex): - """ - Reduces the model for the given tolerance and evaluates the - target observables. - """ - - # reduce model with the tolerance specified earlier: - importantReactions = findImportantReactions(rmg, tolerance) - - no_importantReactions = len(importantReactions) - logging.info('No. of reactions in tested reduced model: {}'.format(no_importantReactions)) - - #set the core reactions to the reduced reaction set: - originalReactions = reactionModel.core.reactions - rmg.reactionModel.core.reactions = importantReactions - - #re-compute observables: - observables = computeObservables(targets, rmg.reactionModel,\ - rmg.reactionSystems[reactionSystemIndex],\ - rmg.simulatorSettingsList[-1].atol, rmg.simulatorSettingsList[-1].rtol) - - #reset the reaction model to its original state: - rmg.reactionModel.core.reactions = originalReactions - - logging.info('Observables of reduced model ({} rxns):'.format(no_importantReactions)) - for target, observable in zip(targets, observables): - logging.info('Observable in reduced model: {}: {:.2f}%'.format(target, observable * 100)) - - return observables, importantReactions - -class ConcentrationListener(object): - """Returns the species concentration profiles at each time step.""" - - def __init__(self): - self.speciesNames = [] - self.data = [] - - def update(self, subject): - """ - Register the time (t) and the species mole fractions at the - given time. - - The snapshots variable stores time and Volume as the first two - elements in the array. - """ - data = subject.snapshots - self.data = process(data) - -def process(data): - """ - The data is structured as a list of lists. - - Each list contains [time, Volume, [species mole fractions]] - - The volume is cut out of each list, the remaining part is stored as a tuple. - """ - processed = [] - - for d in data: - processed.append((d[0], d[2:])) - - return processed diff --git a/rmgpy/reduction/reductionTest.py b/rmgpy/reduction/reductionTest.py deleted file mode 100644 index 6b73674578..0000000000 --- a/rmgpy/reduction/reductionTest.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -import os.path -import unittest - -import rmgpy -from external.wip import work_in_progress -from nose.plugins.attrib import attr -from .reduction import * - -from rmgpy.rmg.model import CoreEdgeReactionModel -from rmgpy.species import Species -from rmgpy.rmg.settings import SimulatorSettings - -@attr('functional') -class ReduceFunctionalTest(unittest.TestCase): - - #MINIMAL - wd = os.path.join(os.path.dirname(rmgpy.__file__), 'reduction/test_data/minimal/') - inputFile = os.path.join(wd, 'input.py') - reductionFile = os.path.join(wd, 'reduction_input.py') - chemkinFile = os.path.join(wd, 'chemkin','chem_annotated.inp') - spcDict = os.path.join(wd, 'chemkin','species_dictionary.txt') - - - @classmethod - def setUpClass(cls): - from .input import load - - super(ReduceFunctionalTest, cls).setUpClass() - - rmg, targets, error = load(cls.inputFile, cls.reductionFile, cls.chemkinFile, cls.spcDict) - cls.rmg = rmg - cls.targets = targets - cls.error = error - - reactionModel = rmg.reactionModel - initialize(rmg.outputDirectory, reactionModel.core.reactions) - - - def testComputeConversion(self): - rmg = ReduceFunctionalTest.rmg - target = ReduceFunctionalTest.targets[0] - reactionModel = rmg.reactionModel - - simulatorSettings = SimulatorSettings() - atol, rtol = simulatorSettings.atol, simulatorSettings.rtol - - index = 0 - reactionSystem = rmg.reactionSystems[index] - - conv = computeConversion(target, reactionModel, reactionSystem,\ - rmg.simulatorSettingsList[-1].atol, rmg.simulatorSettingsList[-1].rtol) - self.assertIsNotNone(conv) - - - def testReduceCompute(self): - rmg = ReduceFunctionalTest.rmg - targets = ReduceFunctionalTest.targets - reactionModel = rmg.reactionModel - - simulatorSettings = SimulatorSettings() - atol, rtol = simulatorSettings.atol, simulatorSettings.rtol - index = 0 - reactionSystem = rmg.reactionSystems[index] - - observables = computeObservables(targets, reactionModel, reactionSystem, \ - simulatorSettings.atol, simulatorSettings.rtol) - - tols = [0.7, 1e-3, 1e-6] - for tol in tols: - conv, importantRxns = reduceModel(tol, targets, reactionModel, rmg, index) - self.assertIsNotNone(conv) - -class ReduceUnitTest(unittest.TestCase): - - - @work_in_progress - def testAllEntriesAccessibleInSearchTargetIndex(self): - butene1 = Species() - butene1.fromSMILES('C=CCC') - butene1.label = 'C4H8' - - butene2 = Species() - butene2.fromSMILES('CC=CC') - butene2.label = 'C4H8' - - - species_list =[butene1,butene2] - # make sure different species with same label - assert not species_list[0].isIsomorphic(species_list[1]) - assert species_list[0].label == species_list[1].label - - # make fake reactionModel object to fit in with the unittest - reaction_model = CoreEdgeReactionModel() - reaction_model.core.species = species_list - - # ensure second species index is returned when it's label is used - # in `searchTargetIndex`. - input_index = 1 - output_index = searchTargetIndex(species_list[input_index].label,reaction_model) - self.assertEqual(input_index,output_index,'searchTargetIndex will not return the second occurance of species with the same label.') -if __name__ == '__main__': - unittest.main() diff --git a/rmgpy/reduction/test_data/minimal/chemkin/chem_annotated.inp b/rmgpy/reduction/test_data/minimal/chemkin/chem_annotated.inp deleted file mode 100644 index 90c977c157..0000000000 --- a/rmgpy/reduction/test_data/minimal/chemkin/chem_annotated.inp +++ /dev/null @@ -1,409 +0,0 @@ -ELEMENTS H C O N Ne Ar He Si S Cl END - -SPECIES - Ar ! Ar - He ! He - Ne ! Ne - N2 ! N2 - ethane(1) ! ethane(1) - CH3(2) ! [CH3](2) - C2H5(3) ! C[CH2](3) - H(4) ! [H](4) - C(6) ! C(6) - C2H4(8) ! C=C(8) - H2(12) ! [H][H](12) - C2H3(13) ! [CH]=C(13) - C3H7(14) ! [CH2]CC(14) - C#C(25) ! C#C(25) - C4H7(28) ! [CH2]CC=C(28) - C4H6(30) ! C=CC=C(30) - C3H5(32) ! [CH]=CC(32) - C4H7(38) ! [CH2]C1CC1(38) - C4H7(42) ! C=C[CH]C(42) -END - - - -THERM ALL - 300.000 1000.000 5000.000 - -! Thermo library: primaryThermoLibrary -Ar Ar1 G200.000 6000.000 1000.00 1 - 2.50000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 2 --7.45375000E+02 4.37967000E+00 2.50000000E+00 0.00000000E+00 0.00000000E+00 3 - 0.00000000E+00 0.00000000E+00-7.45375000E+02 4.37967000E+00 4 - -! Thermo library: primaryThermoLibrary -He He1 G200.000 6000.000 1000.00 1 - 2.50000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 2 --7.45375000E+02 9.28724000E-01 2.50000000E+00 0.00000000E+00 0.00000000E+00 3 - 0.00000000E+00 0.00000000E+00-7.45375000E+02 9.28724000E-01 4 - -! Thermo library: primaryThermoLibrary -Ne Ne1 G200.000 6000.000 1000.00 1 - 2.50000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 2 --7.45375000E+02 3.35532000E+00 2.50000000E+00 0.00000000E+00 0.00000000E+00 3 - 0.00000000E+00 0.00000000E+00-7.45375000E+02 3.35532000E+00 4 - -! Thermo library: primaryThermoLibrary -N2 N 2 G200.000 6000.000 1000.00 1 - 2.95258000E+00 1.39690000E-03-4.92632000E-07 7.86010000E-11-4.60755000E-15 2 --9.23949000E+02 5.87189000E+00 3.53101000E+00-1.23661000E-04-5.02999000E-07 3 - 2.43531000E-09-1.40881000E-12-1.04698000E+03 2.96747000E+00 4 - -! Thermo group additivity estimation: group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) + group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) -ethane(1) C 2 H 6 G100.000 5000.000 954.52 1 - 4.58992443E+00 1.41506139E-02-4.75952818E-06 8.60272058E-10-6.21698116E-14 2 --1.27218039E+04-3.61791414E+00 3.78030756E+00-3.24229362E-03 5.52368270E-05 3 --6.38564474E-08 2.28629627E-11-1.16203397E+04 5.21043319E+00 4 - -! Thermo library: primaryThermoLibrary + radical(CH3) -CH3(2) C 1 H 3 G100.000 5000.000 1337.63 1 - 3.54145615E+00 4.76787036E-03-1.82148533E-06 3.28876825E-10-2.22545782E-14 2 - 1.62239586E+04 1.66035734E+00 3.91546770E+00 1.84154228E-03 3.48742038E-06 3 --3.32747899E-09 8.49957866E-13 1.62856394E+04 3.51741153E-01 4 - -! Thermo group additivity estimation: group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) + group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) + radical(CCJ) -C2H5(3) C 2 H 5 G100.000 5000.000 900.30 1 - 5.15612045E+00 9.43138098E-03-1.81955176E-06 2.21217842E-10-1.43499849E-14 2 - 1.20641180E+04-2.91049253E+00 3.82186937E+00-3.43402515E-03 5.09273311E-05 3 --6.20234394E-08 2.37084005E-11 1.30660115E+04 7.61631555E+00 4 - -! Thermo library: primaryThermoLibrary -H(4) H 1 G100.000 5000.000 4549.80 1 - 2.49952070E+00 4.12340064E-07-1.32961237E-10 1.90456912E-14-1.02253194E-18 2 - 2.54746633E+04-4.41924068E-01 2.50000000E+00 6.38129443E-13-8.20026313E-16 3 - 3.42836147E-19-4.40751862E-23 2.54742178E+04-4.44972896E-01 4 - -! Thermo library: primaryThermoLibrary -C(6) C 1 H 4 G100.000 5000.000 1084.13 1 - 9.08292799E-01 1.14540412E-02-4.57171312E-06 8.29185827E-10-5.66310109E-14 2 --9.71998630E+03 1.39929410E+01 4.20541054E+00-5.35551951E-03 2.51121444E-05 3 --2.13760599E-08 5.97514944E-12-1.01619431E+04-9.21262349E-01 4 - -! Thermo group additivity estimation: group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) -C2H4(8) C 2 H 4 G100.000 5000.000 940.45 1 - 5.20299363E+00 7.82442509E-03-2.12683430E-06 3.79690601E-10-2.94670753E-14 2 - 3.93628141E+03-6.62410772E+00 3.97974449E+00-7.57559988E-03 5.52973272E-05 3 --6.36221735E-08 2.31767218E-11 5.07746085E+03 4.04622733E+00 4 - -! Thermo library: primaryThermoLibrary -H2(12) H 2 G100.000 5000.000 1959.07 1 - 2.78818503E+00 5.87615996E-04 1.59022095E-07-5.52762465E-11 4.34328068E-15 2 --5.96155598E+02 1.12618838E-01 3.43536393E+00 2.12711949E-04-2.78628661E-07 3 - 3.40270004E-10-7.76039025E-14-1.03135983E+03-3.90841661E+00 4 - -! Thermo group additivity estimation: group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + radical(Cds_P) -C2H3(13) C 2 H 3 G100.000 5000.000 931.98 1 - 5.44807708E+00 4.98336725E-03-1.08809396E-06 1.79810110E-10-1.45073526E-14 2 - 3.38297291E+04-4.87870770E+00 3.90666899E+00-4.06195903E-03 3.86763240E-05 3 --4.62953172E-08 1.72889823E-11 3.47971798E+04 6.09801911E+00 4 - -! Thermo group additivity estimation: group(Cs-CsCsHH) + gauche(Cs(CsCsRR)) + other(R) + group(Cs-CsHHH) + gauche(Cs(Cs(CsRR)RRR)) + other(R) + group -! (Cs-CsHHH) + gauche(Cs(Cs(CsRR)RRR)) + other(R) + radical(RCCJ) -C3H7(14) C 3 H 7 G100.000 5000.000 995.41 1 - 5.69427034E+00 1.96034073E-02-7.42053514E-06 1.35883869E-09-9.56222459E-14 2 - 8.87586485E+03-4.32867676E+00 3.09192284E+00 1.32171297E-02 2.75851762E-05 3 --3.90854358E-08 1.43315676E-11 1.02284113E+04 1.24057522E+01 4 - -! Thermo group additivity estimation: group(Ct-CtH) + other(R) + group(Ct-CtH) + other(R) -C#C(25) C 2 H 2 G100.000 5000.000 888.62 1 - 5.76202825E+00 2.37161764E-03-1.49600381E-07-2.19112041E-11 2.21743320E-15 2 - 2.50944568E+04-9.82599070E+00 3.03575406E+00 7.71230534E-03 2.53525578E-06 3 --1.08137801E-08 5.50779157E-12 2.58526440E+04 4.54458931E+00 4 - -! Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + gauche(Cs(CsRRR)) + other(R) + group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) + group -! (Cds-CdsCsH) + gauche(CsOsCdSs) + other(R) + group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + radical(RCCJ) -C4H7(28) C 4 H 7 G100.000 5000.000 1000.95 1 - 7.59472085E+00 2.06425927E-02-7.89790209E-06 1.45966082E-09-1.03414855E-13 2 - 2.08073390E+04-1.19155237E+01 2.68061380E+00 2.10825767E-02 2.02123102E-05 3 --3.64243390E-08 1.41444958E-11 2.27528011E+04 1.66008573E+01 4 - -! Thermo group additivity estimation: group(Cds-Cds(Cds-Cds)H) + gauche(CsOsCdSs) + other(R) + group(Cds-Cds(Cds-Cds)H) + gauche(CsOsCdSs) + other(R) + -! group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + group(Cds-CdsHH) + gauche(CsOsCdSs) + other(R) -C4H6(30) C 4 H 6 G100.000 5000.000 940.95 1 - 1.10824010E+01 1.17734358E-02-3.11408946E-06 5.37732639E-10-4.10612114E-14 2 - 8.42125717E+03-3.51699097E+01 2.68203512E+00 1.69324935E-02 3.73640674E-05 3 --6.26469256E-08 2.59141682E-11 1.13546025E+04 1.20324810E+01 4 - -! Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + gauche(Cs(RRRR)) + other(R) + group(Cds-CdsCsH) + gauche(CsOsCdSs) + other(R) + group -! (Cds-CdsHH) + gauche(CsOsCdSs) + other(R) + radical(Cds_P) -C3H5(32) C 3 H 5 G100.000 5000.000 997.87 1 - 5.66468879E+00 1.44326477E-02-5.46740136E-06 1.00158411E-09-7.04864021E-14 2 - 2.93870948E+04-4.48499695E+00 3.23408709E+00 1.18207627E-02 1.70308200E-05 3 --2.64369653E-08 9.91232841E-12 3.04873063E+04 1.03182585E+01 4 - -! Thermo group additivity estimation: group(Cs-CsCsCsH) + other(R) + group(Cs-CsCsHH) + other(R) + group(Cs-CsCsHH) + other(R) + group(Cs-CsHHH) + -! other(R) + ring(Cyclopropane) + radical(Isobutyl) -C4H7(38) C 4 H 7 G100.000 5000.000 926.06 1 - 1.02344344E+01 1.41135272E-02-2.99946670E-06 4.56671489E-10-3.49842348E-14 2 - 2.27934344E+04-2.92337530E+01 3.04743537E+00 5.45484672E-03 7.53340606E-05 3 --1.02231100E-07 4.01848593E-11 2.58269358E+04 1.40788596E+01 4 - -! Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + gauche(Cs(RRRR)) + other(R) + group(Cs-(Cds-Cds)HHH) + gauche(Cs(RRRR)) + other(R) + -! group(Cds-CdsCsH) + gauche(CsOsCdSs) + other(R) + group(Cds-CdsCsH) + gauche(CsOsCdSs) + other(R) + radical(Allyl_P) -C4H7(42) C 4 H 7 G100.000 5000.000 998.55 1 - 7.82793471E+00 2.08402109E-02-7.96761602E-06 1.47339533E-09-1.04527039E-13 2 - 1.26472721E+04-1.65747703E+01 2.64216174E+00 2.15950451E-02 2.09696389E-05 3 --3.79226516E-08 1.47852038E-11 1.46809419E+04 1.34334016E+01 4 - -END - - - -REACTIONS KCAL/MOLE MOLES - -! Reaction index: Chemkin #1; RMG #1 -! Template reaction: R_Recombination -! Flux pairs: CH3(2), ethane(1); CH3(2), ethane(1); -! Exact match found for rate rule (C_methyl;C_methyl) -CH3(2)+CH3(2)=ethane(1) 8.260e+17 -1.400 1.000 - -! Reaction index: Chemkin #2; RMG #4 -! Template reaction: H_Abstraction -! Flux pairs: ethane(1), C2H5(3); CH3(2), C(6); -! Estimated using template (C/H3/Cs;C_methyl) for rate rule (C/H3/Cs\H3;C_methyl) -! Multiplied by reaction path degeneracy 6 -ethane(1)+CH3(2)=C(6)+C2H5(3) 4.488e-05 4.990 8.000 - -! Reaction index: Chemkin #3; RMG #2 -! Template reaction: R_Recombination -! Flux pairs: C2H5(3), ethane(1); H(4), ethane(1); -! Exact match found for rate rule (C_rad/H2/Cs;H_rad) -C2H5(3)+H(4)=ethane(1) 1.000e+14 0.000 0.000 - -! Reaction index: Chemkin #4; RMG #13 -! Template reaction: R_Recombination -! Flux pairs: CH3(2), C(6); H(4), C(6); -! Exact match found for rate rule (C_methyl;H_rad) -CH3(2)+H(4)=C(6) 1.930e+14 0.000 0.270 - -! Reaction index: Chemkin #5; RMG #6 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: C2H4(8), C2H5(3); H(4), C2H5(3); -! Exact match found for rate rule (Cds-HH_Cds-HH;HJ) -! Multiplied by reaction path degeneracy 2 -H(4)+C2H4(8)=C2H5(3) 4.620e+08 1.640 1.010 - -! Reaction index: Chemkin #6; RMG #8 -! Template reaction: Disproportionation -! Flux pairs: CH3(2), C(6); C2H5(3), C2H4(8); -! Exact match found for rate rule (C_methyl;Cmethyl_Csrad) -! Multiplied by reaction path degeneracy 3 -C2H5(3)+CH3(2)=C(6)+C2H4(8) 6.570e+14 -0.680 0.000 - -! Reaction index: Chemkin #7; RMG #11 -! Template reaction: Disproportionation -! Flux pairs: C2H5(3), ethane(1); C2H5(3), C2H4(8); -! Exact match found for rate rule (C_rad/H2/Cs;Cmethyl_Csrad) -! Multiplied by reaction path degeneracy 3 -C2H5(3)+C2H5(3)=ethane(1)+C2H4(8) 6.900e+13 -0.350 0.000 - -! Reaction index: Chemkin #8; RMG #15 -! Template reaction: H_Abstraction -! Flux pairs: ethane(1), C2H5(3); H(4), H2(12); -! Estimated using template (C/H3/Cs;H_rad) for rate rule (C/H3/Cs\H3;H_rad) -! Multiplied by reaction path degeneracy 6 -ethane(1)+H(4)=C2H5(3)+H2(12) 6.180e+03 3.240 7.100 - -! Reaction index: Chemkin #9; RMG #17 -! Template reaction: Disproportionation -! Flux pairs: C2H5(3), C2H4(8); H(4), H2(12); -! Exact match found for rate rule (H_rad;Cmethyl_Csrad) -! Multiplied by reaction path degeneracy 6 -C2H5(3)+H(4)=H2(12)+C2H4(8) 2.166e+13 0.000 0.000 - -! Reaction index: Chemkin #10; RMG #18 -! Template reaction: H_Abstraction -! Flux pairs: C(6), CH3(2); H(4), H2(12); -! Exact match found for rate rule (C_methane;H_rad) -! Multiplied by reaction path degeneracy 4 -C(6)+H(4)=CH3(2)+H2(12) 8.760e-01 4.340 8.200 - -! Reaction index: Chemkin #11; RMG #19 -! Template reaction: R_Recombination -! Flux pairs: H(4), H2(12); H(4), H2(12); -! Exact match found for rate rule (H_rad;H_rad) -H(4)+H(4)=H2(12) 1.090e+11 0.000 1.500 - -! Reaction index: Chemkin #12; RMG #23 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: CH3(2), C3H7(14); C2H4(8), C3H7(14); -! Exact match found for rate rule (Cds-HH_Cds-HH;CsJ-HHH) -! Multiplied by reaction path degeneracy 2 -CH3(2)+C2H4(8)=C3H7(14) 4.180e+04 2.410 5.630 - -! Reaction index: Chemkin #13; RMG #20 -! Template reaction: R_Recombination -! Flux pairs: C2H3(13), C2H4(8); H(4), C2H4(8); -! Exact match found for rate rule (Cd_pri_rad;H_rad) -H(4)+C2H3(13)=C2H4(8) 1.210e+14 0.000 0.000 - -! Reaction index: Chemkin #14; RMG #24 -! Template reaction: H_Abstraction -! Flux pairs: C(6), CH3(2); C2H3(13), C2H4(8); -! Estimated using template (C_methane;Cd_pri_rad) for rate rule (C_methane;Cd_Cd\H2_pri_rad) -! Multiplied by reaction path degeneracy 4 -C(6)+C2H3(13)=CH3(2)+C2H4(8) 2.236e-02 4.340 5.700 - -! Reaction index: Chemkin #15; RMG #27 -! Template reaction: H_Abstraction -! Flux pairs: ethane(1), C2H5(3); C2H3(13), C2H4(8); -! Estimated using template (C/H3/Cs;Cd_Cd\H2_pri_rad) for rate rule (C/H3/Cs\H3;Cd_Cd\H2_pri_rad) -! Multiplied by reaction path degeneracy 6 -ethane(1)+C2H3(13)=C2H5(3)+C2H4(8) 1.080e-03 4.550 3.500 - -! Reaction index: Chemkin #16; RMG #28 -! Template reaction: H_Abstraction -! Flux pairs: H2(12), H(4); C2H3(13), C2H4(8); -! Estimated using template (H2;Cd_pri_rad) for rate rule (H2;Cd_Cd\H2_pri_rad) -! Multiplied by reaction path degeneracy 2 -H2(12)+C2H3(13)=H(4)+C2H4(8) 9.460e+03 2.560 5.030 - -! Reaction index: Chemkin #17; RMG #29 -! Template reaction: Disproportionation -! Flux pairs: C2H3(13), C2H4(8); C2H5(3), C2H4(8); -! Exact match found for rate rule (Cd_pri_rad;Cmethyl_Csrad) -! Multiplied by reaction path degeneracy 3 -C2H5(3)+C2H3(13)=C2H4(8)+C2H4(8) 4.560e+14 -0.700 0.000 - -! Reaction index: Chemkin #18; RMG #59 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: C#C(25), C2H3(13); H(4), C2H3(13); -! Exact match found for rate rule (Ct-H_Ct-H;HJ) -! Multiplied by reaction path degeneracy 2 -H(4)+C#C(25)=C2H3(13) 1.030e+09 1.640 2.110 - -! Reaction index: Chemkin #19; RMG #61 -! Template reaction: Disproportionation -! Flux pairs: CH3(2), C(6); C2H3(13), C#C(25); -! Estimated using template (C_methyl;CH_d_Rrad) for rate rule (C_methyl;Cd_Cdrad) -! Multiplied by reaction path degeneracy 2 -CH3(2)+C2H3(13)=C(6)+C#C(25) 2.277e+06 1.870 -1.110 - -! Reaction index: Chemkin #20; RMG #65 -! Template reaction: Disproportionation -! Flux pairs: C2H5(3), ethane(1); C2H3(13), C#C(25); -! Estimated using template (Cs_rad;XH_d_Rrad) for rate rule (C_rad/H2/Cs;Cd_Cdrad) -! Multiplied by reaction path degeneracy 2 -C2H5(3)+C2H3(13)=ethane(1)+C#C(25) 1.932e+06 1.870 -1.110 - -! Reaction index: Chemkin #21; RMG #68 -! Template reaction: Disproportionation -! Flux pairs: H(4), H2(12); C2H3(13), C#C(25); -! Estimated using template (H_rad;CH_d_Rrad) for rate rule (H_rad;Cd_Cdrad) -! Multiplied by reaction path degeneracy 4 -H(4)+C2H3(13)=H2(12)+C#C(25) 1.358e+09 1.500 -0.890 - -! Reaction index: Chemkin #22; RMG #77 -! Template reaction: Disproportionation -! Flux pairs: C2H3(13), C2H4(8); C2H3(13), C#C(25); -! Estimated using template (Y_rad;XH_Rrad) for rate rule (Cd_pri_rad;Cd_Cdrad) -! Multiplied by reaction path degeneracy 2 -C2H3(13)+C2H3(13)=C2H4(8)+C#C(25) 4.670e+09 0.969 -3.686 - -! Reaction index: Chemkin #23; RMG #71 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: C2H4(8), C4H7(28); C2H3(13), C4H7(28); -! Exact match found for rate rule (Cds-HH_Cds-HH;CdsJ-H) -! Multiplied by reaction path degeneracy 2 -C2H3(13)+C2H4(8)=C4H7(28) 2.860e+04 2.410 1.800 - -! Reaction index: Chemkin #24; RMG #97 -! Template reaction: Intra_R_Add_Exocyclic -! Flux pairs: C4H7(28), C4H7(38); -! Exact match found for rate rule (R4_S_D;doublebond_intra_2H_pri;radadd_intra_cs2H) -C4H7(28)=C4H7(38) 3.840e+10 0.210 8.780 - -! Reaction index: Chemkin #25; RMG #82 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: CH3(2), C3H5(32); C#C(25), C3H5(32); -! Exact match found for rate rule (Ct-H_Ct-H;CsJ-HHH) -! Multiplied by reaction path degeneracy 2 -CH3(2)+C#C(25)=C3H5(32) 1.338e+05 2.410 6.770 - -! Reaction index: Chemkin #26; RMG #79 -! Template reaction: R_Recombination -! Flux pairs: C2H3(13), C4H6(30); C2H3(13), C4H6(30); -! Exact match found for rate rule (Cd_pri_rad;Cd_pri_rad) -C2H3(13)+C2H3(13)=C4H6(30) 7.230e+13 0.000 0.000 - -! Reaction index: Chemkin #27; RMG #98 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: C4H6(30), C4H7(28); H(4), C4H7(28); -! Exact match found for rate rule (Cds-CdH_Cds-HH;HJ) -! Multiplied by reaction path degeneracy 2 -C4H6(30)+H(4)=C4H7(28) 3.240e+08 1.640 2.400 - -! Reaction index: Chemkin #28; RMG #111 -! Template reaction: Disproportionation -! Flux pairs: CH3(2), C(6); C4H7(28), C4H6(30); -! Estimated using template (C_methyl;Cpri_Rrad) for rate rule (C_methyl;C/H2/De_Csrad) -! Multiplied by reaction path degeneracy 2 -CH3(2)+C4H7(28)=C4H6(30)+C(6) 2.300e+13 -0.320 0.000 - -! Reaction index: Chemkin #29; RMG #120 -! Template reaction: Disproportionation -! Flux pairs: C2H5(3), ethane(1); C4H7(28), C4H6(30); -! Estimated using template (C_pri_rad;Cpri_Rrad) for rate rule (C_rad/H2/Cs;C/H2/De_Csrad) -! Multiplied by reaction path degeneracy 2 -C2H5(3)+C4H7(28)=ethane(1)+C4H6(30) 2.009e+12 0.000 -0.043 - -! Reaction index: Chemkin #30; RMG #132 -! Template reaction: Disproportionation -! Flux pairs: H(4), H2(12); C4H7(28), C4H6(30); -! Estimated using template (H_rad;Cpri_Rrad) for rate rule (H_rad;C/H2/De_Csrad) -! Multiplied by reaction path degeneracy 4 -H(4)+C4H7(28)=C4H6(30)+H2(12) 7.240e+12 0.000 0.000 - -! Reaction index: Chemkin #31; RMG #162 -! Template reaction: Disproportionation -! Flux pairs: C2H3(13), C2H4(8); C4H7(28), C4H6(30); -! Estimated using template (Cd_pri_rad;Cpri_Rrad) for rate rule (Cd_pri_rad;C/H2/De_Csrad) -! Multiplied by reaction path degeneracy 2 -C2H3(13)+C4H7(28)=C4H6(30)+C2H4(8) 2.420e+12 0.000 0.000 - -! Reaction index: Chemkin #32; RMG #104 -! Template reaction: intra_H_migration -! Flux pairs: C4H7(42), C4H7(28); -! Exact match found for rate rule (R2H_S;C_rad_out_H/OneDe;Cs_H_out_2H) -! Multiplied by reaction path degeneracy 3 -C4H7(42)=C4H7(28) 6.180e+09 1.220 47.800 - -! Reaction index: Chemkin #33; RMG #312 -! Template reaction: Disproportionation -! Flux pairs: C2H5(3), ethane(1); C4H7(42), C4H6(30); -! Estimated using template (C_rad/H2/Cs;Cmethyl_Csrad) for rate rule (C_rad/H2/Cs;Cmethyl_Csrad/H/Cd) -! Multiplied by reaction path degeneracy 3 -C4H7(42)+C2H5(3)=ethane(1)+C4H6(30) 6.900e+13 -0.350 0.000 - -! Reaction index: Chemkin #34; RMG #325 -! Template reaction: Disproportionation -! Flux pairs: CH3(2), C(6); C4H7(42), C4H6(30); -! Estimated using template (C_methyl;Cmethyl_Csrad) for rate rule (C_methyl;Cmethyl_Csrad/H/Cd) -! Multiplied by reaction path degeneracy 3 -C4H7(42)+CH3(2)=C4H6(30)+C(6) 6.570e+14 -0.680 0.000 - -! Reaction index: Chemkin #35; RMG #326 -! Template reaction: R_Addition_MultipleBond -! Flux pairs: H(4), C4H7(42); C4H6(30), C4H7(42); -! Exact match found for rate rule (Cds-HH_Cds-CdH;HJ) -! Multiplied by reaction path degeneracy 2 -C4H6(30)+H(4)=C4H7(42) 4.620e+08 1.640 -0.470 - -! Reaction index: Chemkin #36; RMG #329 -! Template reaction: Disproportionation -! Flux pairs: C2H3(13), C4H6(30); C4H7(42), C2H4(8); -! Estimated using template (Cd_pri_rad;Cmethyl_Csrad) for rate rule (Cd_pri_rad;Cmethyl_Csrad/H/Cd) -! Multiplied by reaction path degeneracy 3 -C4H7(42)+C2H3(13)=C4H6(30)+C2H4(8) 4.560e+14 -0.700 0.000 - -! Reaction index: Chemkin #37; RMG #335 -! Template reaction: Disproportionation -! Flux pairs: H(4), H2(12); C4H7(42), C4H6(30); -! Estimated using template (H_rad;Cmethyl_Csrad) for rate rule (H_rad;Cmethyl_Csrad/H/Cd) -! Multiplied by reaction path degeneracy 6 -C4H7(42)+H(4)=C4H6(30)+H2(12) 2.166e+13 0.000 0.000 - -END - diff --git a/rmgpy/reduction/test_data/minimal/chemkin/species_dictionary.txt b/rmgpy/reduction/test_data/minimal/chemkin/species_dictionary.txt deleted file mode 100644 index 2063369c1a..0000000000 --- a/rmgpy/reduction/test_data/minimal/chemkin/species_dictionary.txt +++ /dev/null @@ -1,155 +0,0 @@ -Ar -1 Ar u0 p4 c0 - -He -1 He u0 p1 c0 - -Ne -1 Ne u0 p4 c0 - -N2 -1 N u0 p1 c0 {2,T} -2 N u0 p1 c0 {1,T} - -ethane(1) -1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S} -2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {2,S} -7 H u0 p0 c0 {2,S} -8 H u0 p0 c0 {2,S} - -CH3(2) -multiplicity 2 -1 C u1 p0 c0 {2,S} {3,S} {4,S} -2 H u0 p0 c0 {1,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} - -C2H5(3) -multiplicity 2 -1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S} -2 C u1 p0 c0 {1,S} {6,S} {7,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {2,S} -7 H u0 p0 c0 {2,S} - -C(6) -1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S} -2 H u0 p0 c0 {1,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {1,S} - -H(4) -multiplicity 2 -1 H u1 p0 c0 - -C2H4(8) -1 C u0 p0 c0 {2,D} {3,S} {4,S} -2 C u0 p0 c0 {1,D} {5,S} {6,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {2,S} -6 H u0 p0 c0 {2,S} - -H2(12) -1 H u0 p0 c0 {2,S} -2 H u0 p0 c0 {1,S} - -C3H7(14) -multiplicity 2 -1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S} -2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S} -3 C u1 p0 c0 {1,S} {9,S} {10,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {2,S} -7 H u0 p0 c0 {2,S} -8 H u0 p0 c0 {2,S} -9 H u0 p0 c0 {3,S} -10 H u0 p0 c0 {3,S} - -C2H3(13) -multiplicity 2 -1 C u0 p0 c0 {2,D} {3,S} {4,S} -2 C u1 p0 c0 {1,D} {5,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {2,S} - -C#C(25) -1 C u0 p0 c0 {2,T} {3,S} -2 C u0 p0 c0 {1,T} {4,S} -3 H u0 p0 c0 {1,S} -4 H u0 p0 c0 {2,S} - -C4H7(28) -multiplicity 2 -1 C u0 p0 c0 {2,S} {3,S} {5,S} {6,S} -2 C u0 p0 c0 {1,S} {4,D} {7,S} -3 C u1 p0 c0 {1,S} {8,S} {9,S} -4 C u0 p0 c0 {2,D} {10,S} {11,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {1,S} -7 H u0 p0 c0 {2,S} -8 H u0 p0 c0 {3,S} -9 H u0 p0 c0 {3,S} -10 H u0 p0 c0 {4,S} -11 H u0 p0 c0 {4,S} - -C4H7(38) -multiplicity 2 -1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S} -2 C u0 p0 c0 {1,S} {3,S} {6,S} {7,S} -3 C u0 p0 c0 {1,S} {2,S} {8,S} {9,S} -4 C u1 p0 c0 {1,S} {10,S} {11,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {2,S} -7 H u0 p0 c0 {2,S} -8 H u0 p0 c0 {3,S} -9 H u0 p0 c0 {3,S} -10 H u0 p0 c0 {4,S} -11 H u0 p0 c0 {4,S} - -C3H5(32) -multiplicity 2 -1 C u0 p0 c0 {2,S} {4,S} {5,S} {6,S} -2 C u0 p0 c0 {1,S} {3,D} {7,S} -3 C u1 p0 c0 {2,D} {8,S} -4 H u0 p0 c0 {1,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {1,S} -7 H u0 p0 c0 {2,S} -8 H u0 p0 c0 {3,S} - -C4H6(30) -1 C u0 p0 c0 {2,S} {3,D} {5,S} -2 C u0 p0 c0 {1,S} {4,D} {6,S} -3 C u0 p0 c0 {1,D} {7,S} {8,S} -4 C u0 p0 c0 {2,D} {9,S} {10,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {2,S} -7 H u0 p0 c0 {3,S} -8 H u0 p0 c0 {3,S} -9 H u0 p0 c0 {4,S} -10 H u0 p0 c0 {4,S} - -C4H7(42) -multiplicity 2 -1 C u0 p0 c0 {2,S} {5,S} {6,S} {7,S} -2 C u0 p0 c0 {1,S} {3,D} {8,S} -3 C u0 p0 c0 {2,D} {4,S} {9,S} -4 C u1 p0 c0 {3,S} {10,S} {11,S} -5 H u0 p0 c0 {1,S} -6 H u0 p0 c0 {1,S} -7 H u0 p0 c0 {1,S} -8 H u0 p0 c0 {2,S} -9 H u0 p0 c0 {3,S} -10 H u0 p0 c0 {4,S} -11 H u0 p0 c0 {4,S} - diff --git a/rmgpy/reduction/test_data/minimal/input.py b/rmgpy/reduction/test_data/minimal/input.py deleted file mode 100644 index a0910211d3..0000000000 --- a/rmgpy/reduction/test_data/minimal/input.py +++ /dev/null @@ -1,60 +0,0 @@ -# Data sources -database( - thermoLibraries = ['primaryThermoLibrary'], - reactionLibraries = [], - seedMechanisms = [], - kineticsDepositories = ['training'], - kineticsFamilies = 'default', - kineticsEstimator = 'rate rules', -) - -# List of species -species( - label='ethane', - reactive=True, - structure=SMILES("CC"), -) - -# Reaction systems -simpleReactor( - temperature=(1350,'K'), - pressure=(1.0,'bar'), - initialMoleFractions={ - "ethane": 1.0, - }, - # terminationConversion={ - # 'ethane': 0.9, - # }, - terminationTime=(1e-3,'s'), -) - -# simpleReactor( -# temperature=(1750,'K'), -# pressure=(10.0,'bar'), -# initialMoleFractions={ -# "ethane": 1.0, -# }, -# # terminationConversion={ -# # 'ethane': 0.9, -# # }, -# terminationTime=(1e-2,'s'), -# ) - -simulator( - atol=1e-16, - rtol=1e-8, -) - -model( - toleranceKeepInEdge=0.0, - toleranceMoveToCore=0.1, - toleranceInterruptSimulation=0.1, - maximumEdgeSpecies=100000 -) - -options( - units='si', - generatePlots=False, - saveEdgeSpecies=True, - saveSimulationProfiles=True, -) diff --git a/rmgpy/reduction/test_data/minimal/reduction_input.py b/rmgpy/reduction/test_data/minimal/reduction_input.py deleted file mode 100644 index 501c01e7ec..0000000000 --- a/rmgpy/reduction/test_data/minimal/reduction_input.py +++ /dev/null @@ -1,2 +0,0 @@ -targets = ['ethane', 'C'] -tolerance = .05 From cc6285876a15138b2ca65fdefdbb68c6539323ef Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 12 Aug 2019 12:34:22 -0400 Subject: [PATCH 009/155] Remove documentation for reduction module --- .../source/users/rmg/modules/index.rst | 1 - .../source/users/rmg/modules/reduction.rst | 81 ------------------- 2 files changed, 82 deletions(-) delete mode 100644 documentation/source/users/rmg/modules/reduction.rst diff --git a/documentation/source/users/rmg/modules/index.rst b/documentation/source/users/rmg/modules/index.rst index 51883a51fe..5db2c622c3 100644 --- a/documentation/source/users/rmg/modules/index.rst +++ b/documentation/source/users/rmg/modules/index.rst @@ -21,5 +21,4 @@ otherwise. convertFAME databaseScripts standardizeModelSpeciesNames - reduction isotopes diff --git a/documentation/source/users/rmg/modules/reduction.rst b/documentation/source/users/rmg/modules/reduction.rst deleted file mode 100644 index e5b91861c5..0000000000 --- a/documentation/source/users/rmg/modules/reduction.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. _reduction: - -*********************************** -Reaction Reduction in an RMG Job -*********************************** - -This script is located at ``RMG-Py/rmgpy/reduction/main.py`` instead of the usual -``RMG-Py/scripts`` folder. - -RMG's method of generating reactions between all species in a core mechanism and -including them in the resulting model -is a robust process to obtain all chemistry. However, the huge number of cross reactions -lead to a non-sparse matrix, which can increase computational time when using -the resulting models in other simulations. - -To help reduce the complexity of RMG produced mechanisms, a mechanism reduction -script was written that eliminates unimportant reactions up to a set threshold. -Though this method will reduce number of reactions and guarantee target species -concentrations at the given conditions are minimally affected, no guarantee is given -that it will result in optimally reduced mechanism. - -To reduce an RMG job, you will need an additional file ``reduction_input.py``. -This file contains two terms that tell the reduction algorithm what to do. The -example file located in ``rmgpy/reduction/test_data/minimal/chemkin`` is written -as followed. :: - - targets = ['ethane', 'C'] - tolerance = .05 - -``targets`` is a list of species labels whose concentration change should be minimized, and ``tolerance`` -is the percent change the user can tolerate at the end of simulation. In the above -example, this would be 5%. -Higher values of ``tolerance`` lead to fewer final reactions with more error in -output rates. - -To run a simulation, type :: - - python $RMG/rmgpy/reduction/main.py input.py reduction_input.py chem_annotated.inp species_dictionary.txt - -A command line interface to the reduction driver script is contained in -``rmgpy/reduction/main.py``. It accepts four files: - -* ``input.py``: RMG-Py input file containing the settings to evaluate state variables. -* ``reduction_input.py``: Reduction input file containing the target variables and associated error tolerances to allow in the reduced model -* ``chem_annotated.inp``: the reaction mechanism to reduce. -* ``species_dictionary.txt``: the species dictionary associated with the reaction mechanism to reduce. - -The algorithm will reduce the number of reactions until the tolerance is no -longer met. If everything goes as planned, a ``chem_reduced.inp`` is generated -containing the reduced mechanism. In addition, a number of files -``chem_reduced_{i}.inp`` are created and correspond to the intermediate -reduced mechanisms. They can be used in place of the final reduced model, in case -the reduction algorithm does not terminate normally. - -You can go to ``$RMG/examples/reduction`` to try this module. - -Background ----------- - -The reduction algorithm computes the ratio of species reaction rate -(:math:`r_{ij}`) to the total rate of formation/consumption (:math:`R_i`) of all species i, -and compares this ratio to a tolerance (:math:`\epsilon`), with values of epsilon -between 0 and 1. If the ratio of a reaction is greater than epsilon it -is deemed *important* for the species in question. When a reaction is -not important for a single species, at any given time between t=0 and -the user-defined end time, then it is deemed unimportant for the given -system. As a result, the reaction is removed from the mechanism. - -The value of epsilon is determined by an optimization algorithm that -attempts to reduce the model as much as possible given the constraints -of the user-defined target variables. A logarithmic bisection -optimization algorithm is used to provide guesses for the value of -epsilon based on the two previous guesses that undershoot and overshoot -the user-defined relative deviation of the target variables - -A value of 5% for the relative deviation of the target variable implies -that the mole fraction of the target variable at the end time of the -batch reactor simulation as computed by the reduced mechanism may -deviate up to 5% w.r.t. to the mole fraction of the target variable at -the end time of the batch reactor simulation as computed by the full -mechanism. From ff031123a7655aee9e304e92361f625e9d731dd8 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 12 Aug 2019 12:40:18 -0400 Subject: [PATCH 010/155] Remove scoop framework and all usages Parallelization using scoop has been deprecated and replaced by multiprocessing. This removes all of the remaining code. rmgpy/rmg/parreactTest.py and rmgpy/thermo/thermoengineTest.py were removed because they don't contain any tests that run. --- .../users/rmg/installation/dependencies.rst | 1 - requirements.txt | 7 - rmgpy/data/rmg.py | 22 +-- rmgpy/rmg/input.py | 16 +- rmgpy/rmg/parreactTest.py | 127 ------------ rmgpy/scoop_framework/__init__.py | 29 --- rmgpy/scoop_framework/framework.py | 146 -------------- rmgpy/scoop_framework/util.py | 184 ------------------ rmgpy/scoop_framework/utilTest.py | 140 ------------- rmgpy/thermo/thermoengineTest.py | 140 ------------- 10 files changed, 3 insertions(+), 809 deletions(-) delete mode 100644 rmgpy/rmg/parreactTest.py delete mode 100644 rmgpy/scoop_framework/__init__.py delete mode 100644 rmgpy/scoop_framework/framework.py delete mode 100644 rmgpy/scoop_framework/util.py delete mode 100644 rmgpy/scoop_framework/utilTest.py delete mode 100644 rmgpy/thermo/thermoengineTest.py diff --git a/documentation/source/users/rmg/installation/dependencies.rst b/documentation/source/users/rmg/installation/dependencies.rst index f798ed81d4..1a23eb9d83 100644 --- a/documentation/source/users/rmg/installation/dependencies.rst +++ b/documentation/source/users/rmg/installation/dependencies.rst @@ -46,7 +46,6 @@ Briefly, RMG depends on the following packages, almost all of which can be found * **quantities:** unit conversion * **rdkit:** open-source cheminformatics toolkit * **scipy:** fast mathematical toolkit -* **scoop:** parallelization of Python code * **setuptools:** for packaging Python projects * **sphinx:** documentation generation * **symmetry:** calculating symmetry numbers of chemical point groups diff --git a/requirements.txt b/requirements.txt index 7796b051f7..4935f43bec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,13 +31,6 @@ coverage # for generating the documentation Sphinx - -# SCOOP Dependencies: -pyzmq - -# SCOOP for parallel computing -git+https://github.com/soravux/scoop.git@0.7.0 - # For unit tests mock diff --git a/rmgpy/data/rmg.py b/rmgpy/data/rmg.py index d9cf507eed..0b1df81779 100644 --- a/rmgpy/data/rmg.py +++ b/rmgpy/data/rmg.py @@ -43,7 +43,6 @@ from statmech import StatmechDatabase from solvation import SolvationDatabase from rmgpy.exceptions import DatabaseError -from rmgpy.scoop_framework.util import get, broadcast # Module-level variable to store the (only) instance of RMGDatabase in use. database = None @@ -114,7 +113,6 @@ def loadThermo(self, path, thermoLibraries=None, depository=True): """ self.thermo = ThermoDatabase() self.thermo.load(path, thermoLibraries, depository) - broadcast(self.thermo, 'thermo') def loadTransport(self, path, transportLibraries=None): """ @@ -123,8 +121,7 @@ def loadTransport(self, path, transportLibraries=None): """ self.transport = TransportDatabase() self.transport.load(path, transportLibraries) - broadcast(self.transport, 'transport') - + def loadForbiddenStructures(self, path = None): """ Load the RMG forbidden structures from the given `path` on disk, where @@ -135,7 +132,6 @@ def loadForbiddenStructures(self, path = None): self.forbiddenStructures = ForbiddenStructures() if path is not None: self.forbiddenStructures.load(path) - broadcast(self.forbiddenStructures, 'forbidden') def loadKinetics(self, path, @@ -169,8 +165,6 @@ def loadKinetics(self, depositories=kineticsDepositories ) - broadcast(self.kinetics, 'kinetics') - def loadSolvation(self, path): """ Load the RMG solvation database from the given `path` on disk, where @@ -178,8 +172,7 @@ def loadSolvation(self, path): """ self.solvation = SolvationDatabase() self.solvation.load(path) - broadcast(self.solvation, 'solvation') - + def loadStatmech(self, path, statmechLibraries=None, depository=True): """ Load the RMG statmech database from the given `path` on disk, where @@ -187,7 +180,6 @@ def loadStatmech(self, path, statmechLibraries=None, depository=True): """ self.statmech = StatmechDatabase() self.statmech.load(path, statmechLibraries, depository) - broadcast(self.statmech, 'statmech') def loadOld(self, path): """ @@ -259,15 +251,5 @@ def getDB(name=''): return database.forbiddenStructures else: raise Exception('Unrecognized database keyword: {}'.format(name)) - else: - try: - db = get(name) - if db: - return db - else: - raise DatabaseError - except DatabaseError: - logging.debug("Did not find a way to obtain the broadcasted database for {}.".format(name)) - raise raise DatabaseError('Could not get database with name: {}'.format(name)) diff --git a/rmgpy/rmg/input.py b/rmgpy/rmg/input.py index ad6602c064..a560d34fc9 100644 --- a/rmgpy/rmg/input.py +++ b/rmgpy/rmg/input.py @@ -47,7 +47,6 @@ from rmgpy.rmg.settings import ModelSettings, SimulatorSettings from model import CoreEdgeReactionModel -from rmgpy.scoop_framework.util import broadcast, get from rmgpy.exceptions import InputError ################################################################################ @@ -893,7 +892,6 @@ def readInputFile(path, rmg0): f.close() rmg.speciesConstraints['explicitlyAllowedMolecules'] = [] - broadcast(rmg.speciesConstraints, 'speciesConstraints') # convert keys from species names into species objects. for reactionSystem in rmg.reactionSystems: @@ -902,7 +900,6 @@ def readInputFile(path, rmg0): if rmg.quantumMechanics: rmg.quantumMechanics.setDefaultOutputDirectory(rmg.outputDirectory) rmg.quantumMechanics.initialize() - broadcast(rmg.quantumMechanics, 'quantumMechanics') logging.info('') @@ -961,8 +958,7 @@ def readThermoInputFile(path, rmg0): if rmg.quantumMechanics: rmg.quantumMechanics.setDefaultOutputDirectory(rmg.outputDirectory) rmg.quantumMechanics.initialize() - broadcast(rmg.quantumMechanics, 'quantumMechanics') - + logging.info('') ################################################################################ @@ -1153,15 +1149,5 @@ def getInput(name): return rmg.thermoCentralDatabase else: raise Exception('Unrecognized keyword: {}'.format(name)) - else: - try: - obj = get(name) - if obj: - return obj - else: - raise Exception - except Exception: - logging.debug("Did not find a way to obtain the variable for {}.".format(name)) - raise raise Exception('Could not get variable with name: {}'.format(name)) diff --git a/rmgpy/rmg/parreactTest.py b/rmgpy/rmg/parreactTest.py deleted file mode 100644 index 74b8fad9ad..0000000000 --- a/rmgpy/rmg/parreactTest.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -""" -This module contains unit tests of the rmgpy.parallel module. -""" - -import itertools -import os -import sys -import unittest -import logging -from external.wip import work_in_progress - -from rmgpy import settings -from rmgpy.data.kinetics import TemplateReaction -from rmgpy.data.rmg import RMGDatabase, database -from rmgpy.rmg.main import RMG -from rmgpy.rmg.model import Species - -from rmgpy.scoop_framework.framework import TestScoopCommon - -from rmgpy.rmg.react import * - -try: - from scoop import futures, _control, shared -except ImportError: - import logging as logging - logging.debug("Could not properly import SCOOP.") - -TESTFAMILY = 'H_Abstraction' - -def tearDown(): - """ - Reset the loaded database - """ - import rmgpy.data.rmg - rmgpy.data.rmg.database = None - -def load(): - """ - A method that is run before each unit test in this class. - """ - tearDown() - # set-up RMG object - rmg = RMG() - - # load kinetic database and forbidden structures - rmg.database = RMGDatabase() - path = os.path.join(settings['database.directory']) - - # forbidden structure loading - rmg.database.loadForbiddenStructures(os.path.join(path, 'forbiddenStructures.py')) - # kinetics family loading - rmg.database.loadKinetics(os.path.join(path, 'kinetics'), - kineticsFamilies=[TESTFAMILY], - reactionLibraries=[] - ) - -def generate(): - """ - Test that reaction generation from the available families works. - """ - load() - spcA = Species().fromSMILES('[OH]') - spcs = [Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')] - spcTuples = [(spcA, spc) for spc in spcs] - procnum = 2 - - reactionList = list(itertools.chain.from_iterable(react(spcTuples, procnum))) - - if not reactionList: return False - - for rxn in reactionList: - if not isinstance(rxn, TemplateReaction): return False - - return True - -@work_in_progress -class ParallelReactTest(TestScoopCommon): - - def __init__(self, *args, **kwargs): - # Parent initialization - super(self.__class__, self).__init__(*args, **kwargs) - - # Only setup the scoop framework once, and not in every test method: - super(self.__class__, self).setUp() - - @unittest.skipUnless(sys.platform.startswith("linux"), - "test currently only runs on linux") - def test(self): - """ - Test that we can generate reactions in parallel. - """ - - result = futures._startup(generate) - self.assertEquals(result, True) - -if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1": - unittest.main() diff --git a/rmgpy/scoop_framework/__init__.py b/rmgpy/scoop_framework/__init__.py deleted file mode 100644 index 930c261434..0000000000 --- a/rmgpy/scoop_framework/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### diff --git a/rmgpy/scoop_framework/framework.py b/rmgpy/scoop_framework/framework.py deleted file mode 100644 index 4f63dabdcb..0000000000 --- a/rmgpy/scoop_framework/framework.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -""" -This module contains functionality for the correct execution of unit tests -that use the SCOOP framework. -""" - -import unittest -import subprocess -import time -import os -import sys -import signal - -from rmgpy.scoop_framework.util import logger as logging - -try: - - import scoop - scoop.DEBUG = False - - from scoop import futures, _control, utils, shared - from scoop._types import FutureQueue - from scoop.broker.structs import BrokerInfo - -except ImportError: - logging.debug("Could not properly import SCOOP.") - - -subprocesses = [] -def cleanSubprocesses(): - [a.kill() for a in subprocesses] - -try: - signal.signal(signal.SIGQUIT, cleanSubprocesses) -except AttributeError: - # SIGQUIT doesn't exist on Windows - signal.signal(signal.SIGTERM, cleanSubprocesses) - - -def port_ready(port, socket): - """Checks if a given port is already binded""" - try: - socket.connect(('127.0.0.1', port)) - except IOError: - return False - else: - socket.shutdown(2) - socket.close() - return True - - -class TestScoopCommon(unittest.TestCase): - def __init__(self, *args, **kwargs): - # Parent initialization - super(TestScoopCommon, self).__init__(*args, **kwargs) - - @classmethod - def setUpClass(cls): - global subprocesses - import socket, datetime, time - - # Start the server - cls.server = subprocess.Popen([sys.executable, "-m", "scoop.broker.__main__", - "--tPort", "5555", "--mPort", "5556"]) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - begin = datetime.datetime.now() - while not port_ready(5555, s): - if (datetime.datetime.now() - begin > datetime.timedelta(seconds=3)): - raise Exception('Could not start server!') - subprocesses.append(cls.server) - - # Setup worker environment - scoop.IS_RUNNING = True - scoop.IS_ORIGIN = True - scoop.WORKER_NAME = 'origin'.encode() - scoop.BROKER_NAME = 'broker'.encode() - scoop.BROKER = BrokerInfo("127.0.0.1", - 5555, - 5556, - "127.0.0.1") - scoop.worker = (scoop.WORKER_NAME, scoop.BROKER_NAME) - scoop.MAIN_MODULE = "tests.py" - scoop.VALID = True - scoop.DEBUG = False - scoop.SIZE = 1 - _control.execQueue = FutureQueue() - - - @classmethod - def tearDownClass(cls): - global subprocesses - import socket, datetime, time - _control.execQueue.shutdown() - del _control.execQueue - _control.futureDict.clear() - try: - cls.w.terminate() - cls.w.wait() - except: - pass - # Destroy the server - if cls.server.poll() == None: - try: - cls.server.terminate() - cls.server.wait() - except: - pass - # Stabilise zmq after a deleted socket - del subprocesses[:] - - # Wait for the previous server to be correctly terminated - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - begin = datetime.datetime.now() - while port_ready(5555, s): - if (datetime.datetime.now() - begin > datetime.timedelta(seconds=3)): - raise Exception('Could not terminate server!') - s.close() diff --git a/rmgpy/scoop_framework/util.py b/rmgpy/scoop_framework/util.py deleted file mode 100644 index 900dd37cad..0000000000 --- a/rmgpy/scoop_framework/util.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -""" -This module contains functionality for the parallel execution of RMG-Py. -""" - -import sys -import traceback -import warnings -from functools import wraps - -logger = None - -try: - from scoop.futures import map, submit - from scoop import shared - from scoop import logger as scooplogger - logger = scooplogger - # logger.setLevel(20)#10 : debug, 20: info -except ImportError: - import logging as logging - logger = logging.getLogger() - logger.debug("Could not properly import SCOOP.") - -def warnScoopStartedProperly(func): - @wraps(func) - def wrapper(*args, **kwargs): - - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - - futures_not_loaded = 'scoop.futures' not in sys.modules - - warnings.simplefilter('ignore', RuntimeWarning) - - try: - controller_not_started = not ( - sys.modules['scoop.futures'].__dict__.get("_controller", None) - ) - except KeyError: - warnings.warn( - "SCOOP was not started properly.\n" - "Be sure to start your program with the " - "'-m scoop' parameter. You can find " - "further information in the " - "documentation.\n", - RuntimeWarning - ) - return - - if futures_not_loaded or controller_not_started: - warnings.warn( - "SCOOP was not started properly.\n" - "Be sure to start your program with the " - "'-m scoop' parameter. You can find " - "further information in the " - "documentation.\n", - RuntimeWarning - ) - - return - - return func(*args, **kwargs) - return wrapper - -class WorkerWrapper(object): - """ - This class can be used to expose the exception trace of a worker - that was running on a remote worker. - - Use it as follows: - - Wrap the function that will be running on the remote worker with the current class: - - futures.map(WorkerWrapper(f), mapped_data, ...) or - futures.submit(WorkerWrapper(f), mapped_data, ...) - - """ - __name__ = 'WorkerWrapper' - - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - - def __init__(self, myfn): - self.myfn = myfn - - def __call__(self, *args, **kwargs): - try: - return self.myfn(*args, **kwargs) - except: - type, value, tb = sys.exc_info() - lines = traceback.format_exception(type, value, tb) - print ''.join(lines) - raise - -@warnScoopStartedProperly -def broadcast(obj, key): - """ - Broadcasts the object across the workers using the key parameter as the key. - """ - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - - # kwargs = {key : obj} - # try: - # if shared.getConst(key): - # logger.debug('An object with the key {} was already broadcasted.'.format(key)) - # else: - # shared.setConst(**kwargs) - # except NameError, e: - # """ - # Name error will be caught when the SCOOP library is not imported properly. - # """ - # logger.debug('SCOOP not loaded. Not broadcasting the object {}'.format(obj)) - -@warnScoopStartedProperly -def get(key): - """ - Searches for the shared variable to retrieve identified by the - parameter key. - """ - - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - # try: - # data = shared.getConst(key, timeout=1e-9) - # return data - # except NameError: - # """ - # Name error will be caught when the SCOOP library is not imported properly. - # """ - # logger.debug('SCOOP not loaded. Not retrieving the shared object with key {}'.format(key)) - -def map_(*args, **kwargs): - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - return map(WorkerWrapper(args[0]), *args[1:], **kwargs) - -def submit_(func, *args, **kwargs): - """ - Task submission of a function. - - returns the return value of the called function, or - when SCOOP is loaded, the future object. - """ - warnings.warn("The option scoop is no longer supported" - "and may be removed after Version: 2.4 ", DeprecationWarning) - try: - task = submit(WorkerWrapper(func), *args, **kwargs)#returns immediately - return task - except Exception: - """ - Name error will be caught when the SCOOP library is not imported properly. - """ - logger.debug('SCOOP not loaded. Submitting serial mode.') - return func(*args, **kwargs) diff --git a/rmgpy/scoop_framework/utilTest.py b/rmgpy/scoop_framework/utilTest.py deleted file mode 100644 index 4bb6dc332c..0000000000 --- a/rmgpy/scoop_framework/utilTest.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -""" -This module contains unit tests of the rmgpy.parallel module. -""" - -import os -import sys -import unittest -from external.wip import work_in_progress - -from rmgpy.scoop_framework.framework import TestScoopCommon - -try: - from scoop import futures, _control, shared -except ImportError: - import logging as logging - logging.debug("Could not properly import SCOOP.") - -from .util import * - -def boom(): - return 0 / 0 - -class WorkerWrapperTest(unittest.TestCase): - - def test_WorkerWrapper(self): - """ - Test that the WorkerWrapper correctly redirects the error - message. - """ - f = WorkerWrapper(boom) - with self.assertRaises(ZeroDivisionError): - f() - - -def funcBroadcast(): - """ - Broadcast the data with the given key, - and retrieve it again by querying the key again. - """ - data = 'foo' - key = 'bar' - - broadcast(data, key) - - try: - assert data == shared.getConst(key) - except AssertionError: - return False - - return True - -def funcRetrieve(): - """ - Broadcast the data with the given key, - retrieve it again by querying the key again. - """ - - data = 'foo' - key = 'bar' - - - broadcast(data, key) - - try: - assert data == get(key) - except AssertionError: - return False - - return True - -@work_in_progress -class BroadcastTest(TestScoopCommon): - - def __init__(self, *args, **kwargs): - # Parent initialization - super(self.__class__, self).__init__(*args, **kwargs) - - # Only setup the scoop framework once, and not in every test method: - super(self.__class__, self).setUp() - - @unittest.skipUnless(sys.platform.startswith("linux"), - "test currently only runs on linux") - def test_generic(self): - """ - Test that we can broadcast a simple string. - """ - - result = futures._startup(funcBroadcast) - self.assertEquals(result, True) - -@work_in_progress -class GetTest(TestScoopCommon): - - def __init__(self, *args, **kwargs): - # Parent initialization - super(self.__class__, self).__init__(*args, **kwargs) - - # Only setup the scoop framework once, and not in every test method: - super(self.__class__, self).setUp() - - def test_generic(self): - """ - Test that we can retrieve a simple shared string. - """ - - result = futures._startup(funcRetrieve) - self.assertEquals(result, True) - -if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1": - unittest.main() diff --git a/rmgpy/thermo/thermoengineTest.py b/rmgpy/thermo/thermoengineTest.py deleted file mode 100644 index 2a879a3e79..0000000000 --- a/rmgpy/thermo/thermoengineTest.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -""" -This module contains unit tests of the rmgpy.parallel module. -""" - -import os -import unittest -import random - -from rmgpy import settings -from rmgpy.data.rmg import RMGDatabase -from rmgpy.rmg.main import RMG - -from rmgpy.species import Species -from rmgpy.thermo.thermoengine import submit - -try: - from scoop import futures, _control, shared -except ImportError: - import logging as logging - logging.debug("Could not properly import SCOOP.") - - -def load(): - tearDown() - rmg = RMG()#for solvent - database = RMGDatabase() - database.loadThermo(os.path.join(settings['database.directory'], 'thermo')) - database.loadTransport(os.path.join(settings['database.directory'], 'transport')) - database.loadSolvation(os.path.join(settings['database.directory'], 'solvation')) - -def tearDown(): - """ - Reset the loaded database - """ - import rmgpy.data.rmg - rmgpy.data.rmg.database = None - -def funcSubmit(): - """ - Test that we can submit a number of species. - """ - load() - - spcs = [ - Species().fromSMILES('C'),\ - Species().fromSMILES('CC'), \ - Species().fromSMILES('CCC') - ] - - for spc in spcs: - submit(spc) - - return True - -def funcGet(): - """ - Test if we can retrieve thermo of species even before we have submitted them explicitly. - """ - load() - - spcs = [ - Species().fromSMILES('C'), - Species().fromSMILES('CC'), \ - Species().fromSMILES('CCC') - ] - - output = [] - for spc in spcs: - data = spc.getThermoData() - output.append((spc, data)) - - for spc, data in output: - if not data: - return False - - return True - -def funcSubmitGet(): - """ - Test if we can retrieve thermo of species after submitting some of them. - """ - load() - - spcs = [ - Species().fromSMILES('C'),\ - Species().fromSMILES('CC'), \ - Species().fromSMILES('CCC') - ] - - for spc in spcs: - submit(spc) - - absent = Species().fromSMILES('[CH3]') - data = absent.getThermoData() - if not data: return False - - present = Species().fromSMILES('CC') - data = present.getThermoData() - if not data: return False - - random.shuffle(spcs) - for spc in spcs: - data = spc.getThermoData() - if not data: return False - - return True - - -if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1": - unittest.main() From 4c443e729715c02e32c245b2ba0152dd2d8d5ff8 Mon Sep 17 00:00:00 2001 From: alongd Date: Fri, 16 Aug 2019 00:12:29 -0400 Subject: [PATCH 011/155] Various PEP-8 modifications and futurization to Arkane Add list() for iterators Standardized imports in Arkane (and import numpy as np) Replaced 'xrange' with 'range' Replaced itervalues() and iteritems() with values() and items() Replace deprecated 'warn' with 'warning' Standardized module docstrings in Arkane Use CamelCase for class names Make classes in Arkane inherit from object Execute a file correctly in exec() --- Arkane.py | 7 +- arkane/__init__.py | 14 +- arkane/common.py | 49 ++-- arkane/commonTest.py | 44 +-- arkane/data/Benzyl/benzyl.py | 10 +- arkane/data/Benzyl/input.py | 2 +- arkane/data/methoxy.py | 309 ++++++++++----------- arkane/data/methoxy_explore.py | 334 +++++++++++------------ arkane/encorr/__init__.py | 8 +- arkane/encorr/corr.py | 8 +- arkane/encorr/data.py | 3 - arkane/encorr/mbac.py | 9 +- arkane/encorr/pbac.py | 4 +- arkane/exceptions.py | 1 + arkane/explorer.py | 133 +++++----- arkane/explorerTest.py | 20 +- arkane/gaussian.py | 110 ++++---- arkane/gaussianTest.py | 48 ++-- arkane/input.py | 60 ++--- arkane/inputTest.py | 34 ++- arkane/kinetics.py | 290 ++++++++++---------- arkane/kineticsTest.py | 16 +- arkane/log.py | 6 +- arkane/main.py | 97 +++---- arkane/mainTest.py | 13 +- arkane/molpro.py | 46 ++-- arkane/molproTest.py | 33 ++- arkane/output.py | 9 +- arkane/pdep.py | 92 +++---- arkane/pdepTest.py | 10 +- arkane/qchem.py | 72 ++--- arkane/qchemTest.py | 10 +- arkane/sensitivity.py | 34 +-- arkane/statmech.py | 472 +++++++++++++++++---------------- arkane/statmechTest.py | 8 +- arkane/thermo.py | 37 ++- arkane/thermoTest.py | 7 +- arkane/util.py | 14 +- 38 files changed, 1258 insertions(+), 1215 deletions(-) diff --git a/Arkane.py b/Arkane.py index 244950e7ad..fb4b8f3f05 100755 --- a/Arkane.py +++ b/Arkane.py @@ -33,7 +33,7 @@ reaction rates and other properties used in detailed kinetics models using various methodologies and theories. To run Arkane, use the command :: - $ python arkane.py FILE + $ python Arkane.py FILE where ``FILE`` is the path to an Arkane input file describing the job to execute. Arkane will run the specified job, writing the output to @@ -41,7 +41,7 @@ files appearing in the same directory as the input file. Some additional command-line arguments are available; run the command :: - $ python arkane.py -h + $ python Arkane.py -h for more information. """ @@ -49,7 +49,7 @@ import os import logging -from arkane.main import * +from arkane.main import Arkane arkane = Arkane() @@ -61,6 +61,7 @@ try: import psutil + process = psutil.Process(os.getpid()) memory_info = process.memory_info() logging.info('Memory used: %.2f MB' % (memory_info.rss / 1024.0 / 1024.0)) diff --git a/arkane/__init__.py b/arkane/__init__.py index 11c5e81bf2..c726571551 100644 --- a/arkane/__init__.py +++ b/arkane/__init__.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -initialize imports -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -32,9 +28,13 @@ # # ############################################################################### +""" +initialize imports +""" + +from arkane.common import ArkaneSpecies +from arkane.kinetics import KineticsJob from arkane.main import Arkane +from arkane.pdep import PressureDependenceJob from arkane.statmech import StatMechJob from arkane.thermo import ThermoJob -from arkane.kinetics import KineticsJob -from arkane.pdep import PressureDependenceJob -from arkane.common import ArkaneSpecies diff --git a/arkane/common.py b/arkane/common.py index 4eef1ec963..22ddbe07df 100644 --- a/arkane/common.py +++ b/arkane/common.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -Arkane common module -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -32,33 +28,36 @@ # # ############################################################################### -import numpy -import os.path +""" +Arkane common module +""" + import logging -import time +import os.path import string +import time +import numpy as np import yaml -from rmgpy.rmgobject import RMGObject +import rmgpy.constants as constants from rmgpy import __version__ -from rmgpy.quantity import ScalarQuantity, ArrayQuantity from rmgpy.molecule.element import elementList from rmgpy.molecule.translator import toInChI, toInChIKey +from rmgpy.pdep.collision import SingleExponentialDown +from rmgpy.quantity import ScalarQuantity, ArrayQuantity +from rmgpy.rmgobject import RMGObject +from rmgpy.species import Species, TransitionState from rmgpy.statmech.conformer import Conformer from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor from rmgpy.statmech.torsion import HinderedRotor, FreeRotor from rmgpy.statmech.translation import IdealGasTranslation from rmgpy.statmech.vibration import HarmonicOscillator -from rmgpy.pdep.collision import SingleExponentialDown -from rmgpy.transport import TransportData from rmgpy.thermo import NASA, Wilhoit, ThermoData, NASAPolynomial -from rmgpy.species import Species, TransitionState -import rmgpy.constants as constants +from rmgpy.transport import TransportData from arkane.pdep import PressureDependenceJob - ################################################################################ @@ -137,7 +136,7 @@ def __repr__(self): """ result = '{0!r}'.format(self.__class__.__name__) result += '{' - for key, value in self.as_dict().iteritems(): + for key, value in self.as_dict().items(): if key != 'class': result += '{0!r}: {1!r}'.format(str(key), str(value)) result += '}' @@ -183,7 +182,7 @@ def update_species_attributes(self, species=None): data = species.getThermoData() h298 = data.getEnthalpy(298) / 4184. s298 = data.getEntropy(298) / 4.184 - temperatures = numpy.array([300, 400, 500, 600, 800, 1000, 1500, 2000, 2400]) + temperatures = np.array([300, 400, 500, 600, 800, 1000, 1500, 2000, 2400]) cp = [] for t in temperatures: cp.append(data.getHeatCapacity(t) / 4.184) @@ -279,7 +278,7 @@ def load_yaml(self, path, label=None, pdep=False): 'NASA': NASA, 'NASAPolynomial': NASAPolynomial, 'ThermoData': ThermoData, - 'np_array': numpy.array, + 'np_array': np.array, } freq_data = None if 'imaginary_frequency' in data: @@ -331,17 +330,17 @@ def check_conformer_energy(Vlist, path): is not 0.5 kcal/mol (or more) higher than any other energies in the scan. If so, print and log a warning message. """ - Vlist = numpy.array(Vlist, numpy.float64) - Vdiff = (Vlist[0] - numpy.min(Vlist)) * constants.E_h * constants.Na / 1000 - if Vdiff >= 2: # we choose 2 kJ/mol to be the critical energy + v_list = np.array(Vlist, np.float64) + v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000 + if v_diff >= 2: # we choose 2 kJ/mol to be the critical energy logging.warning('the species corresponding to {path} is different in energy from the lowest energy conformer ' - 'by {diff} kJ/mol. This can cause significant errors in your computed rate constants.'.format( - path=os.path.basename(path), diff=Vdiff)) + 'by {diff} kJ/mol. This can cause significant errors in your computed rate constants.' + .format(path=os.path.basename(path), diff=v_diff)) def get_element_mass(input_element, isotope=None): """ - Returns the mass and z number of the requested isotop for a given element. + Returns the mass and z number of the requested isotope for a given element. 'input_element' can be wither the atomic number (integer) or an element symbol. 'isotope' is an integer of the atomic z number. If 'isotope' is None, returns the most common isotope. Data taken from NIST, https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl (accessed October 2018) @@ -368,13 +367,13 @@ def get_element_mass(input_element, isotope=None): mass = iso_mass[1] break else: - raise ValueError("Could not find requested isotop {0} for element {1}".format(isotope, symbol)) + raise ValueError("Could not find requested isotope {0} for element {1}".format(isotope, symbol)) else: # no specific isotope is required if len(mass_list[0]) == 2: # isotope weight is unavailable, use the first entry mass = mass_list[0][1] - logging.warn("Assuming isotop {0} is representative of element {1}".format(mass_list[0][0], symbol)) + logging.warning('Assuming isotope {0} is representative of element {1}'.format(mass_list[0][0], symbol)) else: # use the most common isotope max_weight = mass_list[0][2] diff --git a/arkane/commonTest.py b/arkane/commonTest.py index 65536b50de..0d8daf37d5 100644 --- a/arkane/commonTest.py +++ b/arkane/commonTest.py @@ -29,27 +29,27 @@ ############################################################################### """ -This script contains unit tests of the :mod:`arkane.common` module. +This module contains unit tests of the :mod:`arkane.common` module. """ -import unittest -import numpy +import logging import os import shutil -import logging +import unittest + +import numpy as np import rmgpy import rmgpy.constants as constants from rmgpy.pdep.collision import SingleExponentialDown -from rmgpy.species import Species, TransitionState from rmgpy.quantity import ScalarQuantity +from rmgpy.species import Species, TransitionState from rmgpy.thermo import NASA, ThermoData from arkane import Arkane, input from arkane.common import ArkaneSpecies, get_element_mass -from arkane.statmech import InputError, StatMechJob from arkane.input import jobList - +from arkane.statmech import InputError, StatMechJob ################################################################################ @@ -67,8 +67,8 @@ def test_check_conformer_energy(self): -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643, -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743, -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749] - v_list = numpy.array(v_list, numpy.float64) - v_diff = (v_list[0] - numpy.min(v_list)) * constants.E_h * constants.Na / 1000 + v_list = np.array(v_list, np.float64) + v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000 self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5) @@ -81,10 +81,10 @@ class TestArkaneJob(unittest.TestCase): def setUp(cls): """A method that is run before each unit test in this class""" arkane = Arkane() - jobList = arkane.loadInputFile(os.path.join(os.path.dirname(os.path.abspath(__file__)), - 'data', 'methoxy.py')) - pdepjob = jobList[-1] - cls.kineticsjob = jobList[0] + job_list = arkane.loadInputFile(os.path.join(os.path.dirname(os.path.abspath(__file__)), + 'data', 'methoxy.py')) + pdepjob = job_list[-1] + cls.kineticsjob = job_list[0] pdepjob.activeJRotor = True network = pdepjob.network cls.Nisom = len(network.isomers) @@ -153,7 +153,7 @@ def testTemperaturesList(self): """ Test the temperature list. """ - self.assertTrue(numpy.array_equal(self.TlistValue, numpy.array([450, 500, 678, 700]))) + self.assertTrue(np.array_equal(self.TlistValue, np.array([450, 500, 678, 700]))) def testPminValue(self): """ @@ -177,7 +177,7 @@ def testPressureList(self): """ Test the pressure list. """ - self.assertTrue(numpy.array_equal(self.PlistValue, numpy.array([0.01, 0.1, 1, 3, 10, 100, 1000]))) + self.assertTrue(np.array_equal(self.PlistValue, np.array([0.01, 0.1, 1, 3, 10, 100, 1000]))) def testGenerateTemperatureList(self): """ @@ -330,8 +330,8 @@ def test_dump_yaml(self): """ Test properly dumping the ArkaneSpecies object and respective sub-objects """ - jobList = self.arkane.loadInputFile(self.dump_input_path) - for job in jobList: + job_list = self.arkane.loadInputFile(self.dump_input_path) + for job in job_list: job.execute(output_directory=self.dump_path) self.assertTrue(os.path.isfile(self.dump_output_file)) @@ -340,12 +340,12 @@ def test_create_and_load_yaml(self): Test properly loading the ArkaneSpecies object and respective sub-objects """ # Create YAML file by running Arkane - jobList = self.arkane.loadInputFile(self.dump_input_path) - for job in jobList: + job_list = self.arkane.loadInputFile(self.dump_input_path) + for job in job_list: job.execute(output_directory=self.dump_path) # Load in newly created YAML file - arkane_spc_old = jobList[0].arkane_species + arkane_spc_old = job_list[0].arkane_species arkane_spc = ArkaneSpecies.__new__(ArkaneSpecies) arkane_spc.load_yaml(path=os.path.join(self.dump_path, 'species', arkane_spc_old.label + '.yml')) @@ -358,7 +358,7 @@ def test_create_and_load_yaml(self): self.assertEqual(arkane_spc.smiles, 'CC') self.assertTrue('8 H u0 p0 c0 {2,S}' in arkane_spc.adjacency_list) self.assertEqual(arkane_spc.label, 'C2H6') - self.assertEqual(arkane_spc.frequency_scale_factor, 0.99*1.014) # checks float conversion + self.assertEqual(arkane_spc.frequency_scale_factor, 0.99 * 1.014) # checks float conversion self.assertFalse(arkane_spc.use_bond_corrections) self.assertAlmostEqual(arkane_spc.conformer.modes[2].frequencies.value_si[0], 830.38202, 4) # HarmonicOsc. self.assertIsInstance(arkane_spc.energy_transfer_model, SingleExponentialDown) @@ -426,7 +426,7 @@ class TestGetMass(unittest.TestCase): """ def test_get_mass(self): - """Test that the correct mass/number/isotop is returned from get_element_mass""" + """Test that the correct mass/number/isotope is returned from get_element_mass""" self.assertEquals(get_element_mass(1), (1.00782503224, 1)) # test input by integer self.assertEquals(get_element_mass('Si'), (27.97692653465, 14)) # test string input and most common isotope self.assertEquals(get_element_mass('C', 13), (13.00335483507, 6)) # test specific isotope diff --git a/arkane/data/Benzyl/benzyl.py b/arkane/data/Benzyl/benzyl.py index 6502c5aff2..975f4fd868 100755 --- a/arkane/data/Benzyl/benzyl.py +++ b/arkane/data/Benzyl/benzyl.py @@ -7,8 +7,8 @@ } bonds = { - 'C=C': 3, - 'C-C': 4, + 'C=C': 3, + 'C-C': 4, 'C-H': 7, } @@ -28,6 +28,6 @@ frequencies = GaussianLog('BenzylFreq.log') -rotors = [ - HinderedRotor(scanLog=GaussianLog('BenzylRot1.log'), pivots=[12,4], top=[12,13,14], symmetry=2, fit='best'), - ] +rotors = [ + HinderedRotor(scanLog=GaussianLog('BenzylRot1.log'), pivots=[12, 4], top=[12, 13, 14], symmetry=2, fit='best'), +] diff --git a/arkane/data/Benzyl/input.py b/arkane/data/Benzyl/input.py index cc64e23b63..5ff735a696 100755 --- a/arkane/data/Benzyl/input.py +++ b/arkane/data/Benzyl/input.py @@ -9,4 +9,4 @@ species('C7H7', 'benzyl.py') statmech('C7H7') -thermo('C7H7', 'NASA') \ No newline at end of file +thermo('C7H7', 'NASA') diff --git a/arkane/data/methoxy.py b/arkane/data/methoxy.py index bfa13b0d4c..e67728d816 100644 --- a/arkane/data/methoxy.py +++ b/arkane/data/methoxy.py @@ -1,226 +1,231 @@ - title = 'methoxy decomposition to H + CH2O' -description = \ +description = '' frequencyScaleFactor = 1.0 """ This example illustrates how to manually set up an Arkane input file for a small P-dep reaction system [using only the -RRHO assumption, and without tunneling, although this can be easily implemented]. Such a calculation is desireable if -the user wishes to supply experimentally determined freqeuncies, for example. Althgou some coommented notes below may be +RRHO assumption, and without tunneling, although this can be easily implemented]. Such a calculation is desirable if +the user wishes to supply experimentally determined frequencies, for example. Although some commented notes below may be useful, see http://reactionmechanismgenerator.github.io/RMG-Py/users/arkane/index.html for more documented information about Arkane and creating input files. (information pertaining this file is adopted by Dames and Golden, 2013, JPCA 117 (33) 7686-96.) """ transitionState( - label = 'TS3', - E0 = (34.1,'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol) - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-967,'cm^-1'), - modes = [ # these modes are used to compute the partition functions - HarmonicOscillator(frequencies=([466,581,1169,1242,1499,1659,2933,3000],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) #this must be included for every species/ts + label='TS3', + E0=(34.1, 'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol) + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-967, 'cm^-1'), + modes=[ # these modes are used to compute the partition functions + HarmonicOscillator(frequencies=([466, 581, 1169, 1242, 1499, 1659, 2933, 3000], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) # this must be included for every species/ts ], ) transitionState( - label = 'TS2', - E0 = (38.9,'kcal/mol'), - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-1934,'cm^-1'), - modes = [ - HarmonicOscillator(frequencies=([792, 987 ,1136, 1142, 1482 ,2441 ,3096, 3183],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.928,0.962,5.807],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) + label='TS2', + E0=(38.9, 'kcal/mol'), + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-1934, 'cm^-1'), + modes=[ + HarmonicOscillator(frequencies=([792, 987, 1136, 1142, 1482, 2441, 3096, 3183], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.928, 0.962, 5.807], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) ], ) transitionState( - label = 'TS1', - E0 = (39.95,'kcal/mol'), - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-1756,'cm^-1'), - modes = [ - HarmonicOscillator(frequencies=([186 ,626 ,1068, 1234, 1474, 1617, 2994 ,3087],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.966,0.986,5.253],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) + label='TS1', + E0=(39.95, 'kcal/mol'), + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-1756, 'cm^-1'), + modes=[ + HarmonicOscillator(frequencies=([186, 626, 1068, 1234, 1474, 1617, 2994, 3087], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.966, 0.986, 5.253], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) ], ) species( - label = 'methoxy', - structure = SMILES('C[O]'), - E0 = (9.44,'kcal/mol'), - modes = [ - HarmonicOscillator(frequencies=([758,960,1106 ,1393,1403,1518,2940,3019,3065],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251],"cm^-1"),symmetry=3, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")), + label='methoxy', + structure=SMILES('C[O]'), + E0=(9.44, 'kcal/mol'), + modes=[ + HarmonicOscillator(frequencies=([758, 960, 1106, 1393, 1403, 1518, 2940, 3019, 3065], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251], "cm^-1"), symmetry=3, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")), ], - spinMultiplicity = 3.88, # 3+exp(-89/T) - opticalIsomers = 1, - molecularWeight = (31.01843,'amu'), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), + spinMultiplicity=3.88, # 3+exp(-89/T) + opticalIsomers=1, + molecularWeight=(31.01843, 'amu'), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), ) - species( - label = 'CH2O', - E0 = (28.69,'kcal/mol'), - molecularWeight = (30.0106,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - spinMultiplicity = 1, - opticalIsomers = 1, - modes = [ - HarmonicOscillator(frequencies=([1180,1261,1529,1764,2931,2999],'cm^-1')), - NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524],"cm^-1"),symmetry=2, quantum=False), - IdealGasTranslation(mass=(30.0106,"g/mol")), + label='CH2O', + E0=(28.69, 'kcal/mol'), + molecularWeight=(30.0106, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + spinMultiplicity=1, + opticalIsomers=1, + modes=[ + HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')), + NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2, + quantum=False), + IdealGasTranslation(mass=(30.0106, "g/mol")), ], ) species( - label = 'H', - E0 = (0.000,'kcal/mol'), - molecularWeight = (1.00783,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - modes = [ - IdealGasTranslation(mass=(1.00783,"g/mol")), + label='H', + E0=(0.000, 'kcal/mol'), + molecularWeight=(1.00783, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + modes=[ + IdealGasTranslation(mass=(1.00783, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ) species( - label = 'CH2Ob', #this is a special system with two chemically equivalent product channels. Thus, different labels are used. - E0 = (28.69,'kcal/mol'), - molecularWeight = (30.0106,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - spinMultiplicity = 1, - opticalIsomers = 1, - modes = [ - HarmonicOscillator(frequencies=([1180,1261,1529,1764,2931,2999],'cm^-1')), - NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524],"cm^-1"),symmetry=2, quantum=False), - IdealGasTranslation(mass=(30.0106,"g/mol")), + label='CH2Ob', + # this is a special system with two chemically equivalent product channels. Thus, different labels are used. + E0=(28.69, 'kcal/mol'), + molecularWeight=(30.0106, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + spinMultiplicity=1, + opticalIsomers=1, + modes=[ + HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')), + NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2, + quantum=False), + IdealGasTranslation(mass=(30.0106, "g/mol")), ], ) species( - label = 'Hb', - E0 = (0.0001,'kcal/mol'), - molecularWeight = (1.00783,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - modes = [ - IdealGasTranslation(mass=(1.00783,"g/mol")), + label='Hb', + E0=(0.0001, 'kcal/mol'), + molecularWeight=(1.00783, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + modes=[ + IdealGasTranslation(mass=(1.00783, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ) species( - label = 'CH2OH', - E0 = (0.00,'kcal/mol'), - molecularWeight = (31.01843,"g/mol"), - modes = [ - HarmonicOscillator(frequencies=([418,595, 1055, 1198, 1368, 1488, 3138, 3279, 3840],'cm^-1')), + label='CH2OH', + E0=(0.00, 'kcal/mol'), + molecularWeight=(31.01843, "g/mol"), + modes=[ + HarmonicOscillator(frequencies=([418, 595, 1055, 1198, 1368, 1488, 3138, 3279, 3840], 'cm^-1')), # below is an example of how to include hindered rotors - #HinderedRotor(inertia=(5.75522e-47,'kg*m^2'), symmetry=1, barrier=(22427.8,'J/mol'), semiclassical=False), - NonlinearRotor(rotationalConstant=([0.868,0.993,6.419],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")), + # HinderedRotor(inertia=(5.75522e-47,'kg*m^2'), symmetry=1, barrier=(22427.8,'J/mol'), semiclassical=False), + NonlinearRotor(rotationalConstant=([0.868, 0.993, 6.419], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 2, - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), + spinMultiplicity=2, + opticalIsomers=2, + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), ) species( - label = 'He', -# freqScaleFactor = 1, # TypeError: species() got an unexpected keyword argument 'freqScaleFactor'. - structure = SMILES('[He]'), - molecularWeight = (4.003,'amu'), - collisionModel = TransportData(sigma=(2.55e-10,'m'), epsilon=(0.0831,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""He""", comment="""Thermo library: primaryThermoLibrary"""), + label='He', + # freqScaleFactor = 1, # TypeError: species() got an unexpected keyword argument 'freqScaleFactor'. + structure=SMILES('[He]'), + molecularWeight=(4.003, 'amu'), + collisionModel=TransportData(sigma=(2.55e-10, 'm'), epsilon=(0.0831, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + thermo=NASA( + polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(200, 'K'), Tmax=(1000, 'K')), + NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 0.928724], Tmin=(1000, 'K'), Tmax=(6000, 'K'))], + Tmin=(200, 'K'), Tmax=(6000, 'K'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862, 'J/(mol*K)'), label="""He""", + comment="""Thermo library: primaryThermoLibrary"""), ) reaction( - label = 'CH2O+H=Methoxy', -# label = 'Methoxy = CH2O+H', - reactants = ['CH2O','H'], - products = ['methoxy'], -# reactants = ['methoxy'], -# products = ['CH2O', 'H'], - transitionState = 'TS3', - #tunneling='Eckart', + label='CH2O+H=Methoxy', + # label = 'Methoxy = CH2O+H', + reactants=['CH2O', 'H'], + products=['methoxy'], + # reactants = ['methoxy'], + # products = ['CH2O', 'H'], + transitionState='TS3', + # tunneling='Eckart', ) reaction( - # label = 'CH2Ob+Hb=CH2OH', - label = 'CH2OH = CH2Ob+Hb', -# products = ['CH2OH'], - reactants = ['CH2OH'], -# reactants = ['CH2Ob','Hb'], - products = ['CH2Ob', 'Hb'], - transitionState = 'TS1', - #tunneling='Eckart', + # label = 'CH2Ob+Hb=CH2OH', + label='CH2OH = CH2Ob+Hb', + # products = ['CH2OH'], + reactants=['CH2OH'], + # reactants = ['CH2Ob','Hb'], + products=['CH2Ob', 'Hb'], + transitionState='TS1', + # tunneling='Eckart', ) reaction( - label = 'CH2OH = Methoxy', -# reactants = ['methoxy'], -# products = ['CH2OH'], -# label = 'Methoxy = CH2OH', - products = ['methoxy'], - reactants = ['CH2OH'], - transitionState = 'TS2', - #tunneling='Eckart', + label='CH2OH = Methoxy', + # reactants = ['methoxy'], + # products = ['CH2OH'], + # label = 'Methoxy = CH2OH', + products=['methoxy'], + reactants=['CH2OH'], + transitionState='TS2', + # tunneling='Eckart', ) kinetics('CH2O+H=Methoxy') -#kinetics('Methoxy = CH2O+H' ) -#kinetics('Methoxy = CH2OH' ) +# kinetics('Methoxy = CH2O+H' ) +# kinetics('Methoxy = CH2OH' ) kinetics('CH2OH = Methoxy') -kinetics('CH2OH = CH2Ob+Hb' ) -#kinetics('CH2Ob+Hb=CH2OH') +kinetics('CH2OH = CH2Ob+Hb') +# kinetics('CH2Ob+Hb=CH2OH') network( - label = 'methoxy', - isomers = [ + label='methoxy', + isomers=[ 'methoxy', - 'CH2OH', + 'CH2OH', ], - reactants = [ - ('CH2O','H'), -# ('CH2Ob','Hb'), - ], + reactants=[ + ('CH2O', 'H'), + # ('CH2Ob','Hb'), + ], - bathGas = { + bathGas={ 'He': 1, }, ) pressureDependence( - label = 'methoxy', - Tmin = (450,'K'), Tmax = (1200,'K'), Tcount = 4, - Tlist = ([450,500,678,700],'K'), - Pmin = (0.01,'atm'), Pmax = (1000,'atm'), Pcount = 7, - Plist = ([0.01,0.1,1,3,10,100,1000],'atm'), - maximumGrainSize = (0.5,'kcal/mol'), - minimumGrainCount = 500, - method = 'modified strong collision', - #Other methods include: 'reservoir state', 'chemically-significant eigenvalues', - interpolationModel = ('pdeparrhenius'), - activeKRotor = True, -# activeJRotor = False, # causes Arkane to crash - rmgmode = False, + label='methoxy', + Tmin=(450, 'K'), Tmax=(1200, 'K'), Tcount=4, + Tlist=([450, 500, 678, 700], 'K'), + Pmin=(0.01, 'atm'), Pmax=(1000, 'atm'), Pcount=7, + Plist=([0.01, 0.1, 1, 3, 10, 100, 1000], 'atm'), + maximumGrainSize=(0.5, 'kcal/mol'), + minimumGrainCount=500, + method='modified strong collision', + # Other methods include: 'reservoir state', 'chemically-significant eigenvalues', + interpolationModel=('pdeparrhenius'), + activeKRotor=True, + # activeJRotor = False, # causes Arkane to crash + rmgmode=False, ) diff --git a/arkane/data/methoxy_explore.py b/arkane/data/methoxy_explore.py index 3376c0f078..b3ccc3be90 100644 --- a/arkane/data/methoxy_explore.py +++ b/arkane/data/methoxy_explore.py @@ -1,242 +1,244 @@ title = 'methoxy decomposition to H + CH2O' -description = \ -""" +description = """ This example illustrates how to manually set up an Arkane input file for a exploration of a P-dep reaction system """ + database( - thermoLibraries = ['primaryThermoLibrary'], - reactionLibraries = [], - kineticsDepositories = ['training'], - kineticsFamilies = 'default', - kineticsEstimator = 'rate rules', + thermoLibraries=['primaryThermoLibrary'], + reactionLibraries=[], + kineticsDepositories=['training'], + kineticsFamilies='default', + kineticsEstimator='rate rules', ) transitionState( - label = 'TS3', - E0 = (34.1,'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol) - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-967,'cm^-1'), - modes = [ # these modes are used to compute the partition functions - HarmonicOscillator(frequencies=([466,581,1169,1242,1499,1659,2933,3000],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) #this must be included for every species/ts + label='TS3', + E0=(34.1, 'kcal/mol'), # this INCLUDES the ZPE. Note that other energy units are also possible (e.g., kJ/mol) + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-967, 'cm^-1'), + modes=[ # these modes are used to compute the partition functions + HarmonicOscillator(frequencies=([466, 581, 1169, 1242, 1499, 1659, 2933, 3000], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.970, 1.029, 3.717], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) # this must be included for every species/ts ], ) transitionState( - label = 'TS2', - E0 = (38.9,'kcal/mol'), - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-1934,'cm^-1'), - modes = [ - HarmonicOscillator(frequencies=([792, 987 ,1136, 1142, 1482 ,2441 ,3096, 3183],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.928,0.962,5.807],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) + label='TS2', + E0=(38.9, 'kcal/mol'), + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-1934, 'cm^-1'), + modes=[ + HarmonicOscillator(frequencies=([792, 987, 1136, 1142, 1482, 2441, 3096, 3183], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.928, 0.962, 5.807], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) ], ) transitionState( - label = 'TS1', - E0 = (39.95,'kcal/mol'), - spinMultiplicity = 2, - opticalIsomers = 1, - frequency = (-1756,'cm^-1'), - modes = [ - HarmonicOscillator(frequencies=([186 ,626 ,1068, 1234, 1474, 1617, 2994 ,3087],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.966,0.986,5.253],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")) + label='TS1', + E0=(39.95, 'kcal/mol'), + spinMultiplicity=2, + opticalIsomers=1, + frequency=(-1756, 'cm^-1'), + modes=[ + HarmonicOscillator(frequencies=([186, 626, 1068, 1234, 1474, 1617, 2994, 3087], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.966, 0.986, 5.253], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")) ], ) species( - label = 'methoxy', - structure = SMILES('C[O]'), - E0 = (9.44,'kcal/mol'), - modes = [ - HarmonicOscillator(frequencies=([758,960,1106 ,1393,1403,1518,2940,3019,3065],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251],"cm^-1"),symmetry=3, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")), + label='methoxy', + structure=SMILES('C[O]'), + E0=(9.44, 'kcal/mol'), + modes=[ + HarmonicOscillator(frequencies=([758, 960, 1106, 1393, 1403, 1518, 2940, 3019, 3065], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.916, 0.921, 5.251], "cm^-1"), symmetry=3, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")), ], - spinMultiplicity = 3.88, # 3+exp(-89/T) - opticalIsomers = 1, - molecularWeight = (31.01843,'amu'), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), + spinMultiplicity=3.88, # 3+exp(-89/T) + opticalIsomers=1, + molecularWeight=(31.01843, 'amu'), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), ) - species( - label = 'CH2O', - structure = SMILES('C=O'), - E0 = (28.69,'kcal/mol'), - molecularWeight = (30.0106,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - spinMultiplicity = 1, - opticalIsomers = 1, - modes = [ - HarmonicOscillator(frequencies=([1180,1261,1529,1764,2931,2999],'cm^-1')), - NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524],"cm^-1"),symmetry=2, quantum=False), - IdealGasTranslation(mass=(30.0106,"g/mol")), + label='CH2O', + structure=SMILES('C=O'), + E0=(28.69, 'kcal/mol'), + molecularWeight=(30.0106, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + spinMultiplicity=1, + opticalIsomers=1, + modes=[ + HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')), + NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2, + quantum=False), + IdealGasTranslation(mass=(30.0106, "g/mol")), ], ) species( - label = 'H', - structure = SMILES('[H]'), - E0 = (0.000,'kcal/mol'), - molecularWeight = (1.00783,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - modes = [ - IdealGasTranslation(mass=(1.00783,"g/mol")), + label='H', + structure=SMILES('[H]'), + E0=(0.000, 'kcal/mol'), + molecularWeight=(1.00783, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + modes=[ + IdealGasTranslation(mass=(1.00783, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ) species( - label = 'CH2Ob', #this is a special system with two chemically equivalent product channels. Thus, different labels are used. - structure = SMILES('C=O'), - E0 = (28.69,'kcal/mol'), - molecularWeight = (30.0106,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - spinMultiplicity = 1, - opticalIsomers = 1, - modes = [ - HarmonicOscillator(frequencies=([1180,1261,1529,1764,2931,2999],'cm^-1')), - NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524],"cm^-1"),symmetry=2, quantum=False), - IdealGasTranslation(mass=(30.0106,"g/mol")), + label='CH2Ob', + # this is a special system with two chemically equivalent product channels. Thus, different labels are used. + structure=SMILES('C=O'), + E0=(28.69, 'kcal/mol'), + molecularWeight=(30.0106, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + spinMultiplicity=1, + opticalIsomers=1, + modes=[ + HarmonicOscillator(frequencies=([1180, 1261, 1529, 1764, 2931, 2999], 'cm^-1')), + NonlinearRotor(rotationalConstant=([1.15498821005263, 1.3156969584727, 9.45570474524524], "cm^-1"), symmetry=2, + quantum=False), + IdealGasTranslation(mass=(30.0106, "g/mol")), ], ) species( - label = 'Hb', - structure = SMILES('[H]'), - E0 = (0.0001,'kcal/mol'), - molecularWeight = (1.00783,"g/mol"), - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), - modes = [ - IdealGasTranslation(mass=(1.00783,"g/mol")), + label='Hb', + structure=SMILES('[H]'), + E0=(0.0001, 'kcal/mol'), + molecularWeight=(1.00783, "g/mol"), + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), + modes=[ + IdealGasTranslation(mass=(1.00783, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ) species( - label = 'CH2OH', - structure = SMILES('[CH2]O'), - E0 = (0.00,'kcal/mol'), - molecularWeight = (31.01843,"g/mol"), - modes = [ - HarmonicOscillator(frequencies=([418,595, 1055, 1198, 1368, 1488, 3138, 3279, 3840],'cm^-1')), - NonlinearRotor(rotationalConstant=([0.868,0.993,6.419],"cm^-1"),symmetry=1, quantum=False), - IdealGasTranslation(mass=(31.01843,"g/mol")), + label='CH2OH', + structure=SMILES('[CH2]O'), + E0=(0.00, 'kcal/mol'), + molecularWeight=(31.01843, "g/mol"), + modes=[ + HarmonicOscillator(frequencies=([418, 595, 1055, 1198, 1368, 1488, 3138, 3279, 3840], 'cm^-1')), + NonlinearRotor(rotationalConstant=([0.868, 0.993, 6.419], "cm^-1"), symmetry=1, quantum=False), + IdealGasTranslation(mass=(31.01843, "g/mol")), ], - spinMultiplicity = 2, - opticalIsomers = 2, - collisionModel = TransportData(sigma=(3.69e-10,'m'), epsilon=(4.0,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), + spinMultiplicity=2, + opticalIsomers=2, + collisionModel=TransportData(sigma=(3.69e-10, 'm'), epsilon=(4.0, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), ) species( - label = 'He', - structure = SMILES('[He]'), + label='He', + structure=SMILES('[He]'), reactive=False, - molecularWeight = (4.003,'amu'), - collisionModel = TransportData(sigma=(2.55e-10,'m'), epsilon=(0.0831,'kJ/mol')), - energyTransferModel = SingleExponentialDown(alpha0=(0.956,'kJ/mol'), T0=(300,'K'), n=0.95), + molecularWeight=(4.003, 'amu'), + collisionModel=TransportData(sigma=(2.55e-10, 'm'), epsilon=(0.0831, 'kJ/mol')), + energyTransferModel=SingleExponentialDown(alpha0=(0.956, 'kJ/mol'), T0=(300, 'K'), n=0.95), ) reaction( - label = 'CH2O+H=Methoxy', - reactants = ['CH2O','H'], - products = ['methoxy'], - transitionState = 'TS3', - kinetics = Arrhenius( - A = (1.5339e+09, 'cm^3/(mol*s)'), - n = 1.3717, - Ea = (18.6161, 'kJ/mol'), - T0 = (1, 'K'), - Tmin = (303.03, 'K'), - Tmax = (2500, 'K'), - comment = 'Fitted to 59 data points; dA = *|/ 1.06037, dn = +|- 0.00769361, dEa = +|- 0.0423225 kJ/mol', + label='CH2O+H=Methoxy', + reactants=['CH2O', 'H'], + products=['methoxy'], + transitionState='TS3', + kinetics=Arrhenius( + A=(1.5339e+09, 'cm^3/(mol*s)'), + n=1.3717, + Ea=(18.6161, 'kJ/mol'), + T0=(1, 'K'), + Tmin=(303.03, 'K'), + Tmax=(2500, 'K'), + comment='Fitted to 59 data points; dA = *|/ 1.06037, dn = +|- 0.00769361, dEa = +|- 0.0423225 kJ/mol', ), ) reaction( - label = 'CH2OH = CH2Ob+Hb', - reactants = ['CH2OH'], - products = ['CH2Ob', 'Hb'], - transitionState = 'TS1', - kinetics = Arrhenius( - A = (5.51244e+10, 's^-1'), - n = 0.868564, - Ea = (168.41, 'kJ/mol'), - T0 = (1, 'K'), - Tmin = (303.03, 'K'), - Tmax = (2500, 'K'), - comment = 'Fitted to 59 data points; dA = *|/ 1.05152, dn = +|- 0.00659302, dEa = +|- 0.0362682 kJ/mol', + label='CH2OH = CH2Ob+Hb', + reactants=['CH2OH'], + products=['CH2Ob', 'Hb'], + transitionState='TS1', + kinetics=Arrhenius( + A=(5.51244e+10, 's^-1'), + n=0.868564, + Ea=(168.41, 'kJ/mol'), + T0=(1, 'K'), + Tmin=(303.03, 'K'), + Tmax=(2500, 'K'), + comment='Fitted to 59 data points; dA = *|/ 1.05152, dn = +|- 0.00659302, dEa = +|- 0.0362682 kJ/mol', ), ) reaction( - label = 'CH2OH = Methoxy', - products = ['methoxy'], - reactants = ['CH2OH'], - transitionState = 'TS2', - kinetics = Arrhenius( - A = (5.63501e+11, 's^-1'), - n = 0.320211, - Ea = (163.376, 'kJ/mol'), - T0 = (1, 'K'), - Tmin = (303.03, 'K'), - Tmax = (2500, 'K'), - comment = 'Fitted to 59 data points; dA = *|/ 1.02731, dn = +|- 0.00353557, dEa = +|- 0.0194492 kJ/mol', + label='CH2OH = Methoxy', + products=['methoxy'], + reactants=['CH2OH'], + transitionState='TS2', + kinetics=Arrhenius( + A=(5.63501e+11, 's^-1'), + n=0.320211, + Ea=(163.376, 'kJ/mol'), + T0=(1, 'K'), + Tmin=(303.03, 'K'), + Tmax=(2500, 'K'), + comment='Fitted to 59 data points; dA = *|/ 1.02731, dn = +|- 0.00353557, dEa = +|- 0.0194492 kJ/mol', ), ) network( - label = 'methoxy', - isomers = [ + label='methoxy', + isomers=[ 'methoxy', - 'CH2OH', + 'CH2OH', ], - reactants = [ - ('CH2O','H'), - ], + reactants=[ + ('CH2O', 'H'), + ], - bathGas = { + bathGas={ 'He': 1, }, ) pressureDependence( - label = 'methoxy', - Tmin = (450,'K'), Tmax = (1200,'K'), Tcount = 3, - Tlist = ([450,800,1000,1200],'K'), - Pmin = (0.01,'atm'), Pmax = (1000.0,'atm'), Pcount = 3, - Plist = ([.01,1.0,1000.0],'atm'), - maximumGrainSize = (0.5,'kcal/mol'), - minimumGrainCount = 500, - method = 'modified strong collision', - interpolationModel = ('pdeparrhenius'), - activeKRotor = True, - rmgmode = False, + label='methoxy', + Tmin=(450, 'K'), Tmax=(1200, 'K'), Tcount=3, + Tlist=([450, 800, 1000, 1200], 'K'), + Pmin=(0.01, 'atm'), Pmax=(1000.0, 'atm'), Pcount=3, + Plist=([.01, 1.0, 1000.0], 'atm'), + maximumGrainSize=(0.5, 'kcal/mol'), + minimumGrainCount=500, + method='modified strong collision', + interpolationModel=('pdeparrhenius'), + activeKRotor=True, + rmgmode=False, ) explorer( - source=['methoxy'], - explore_tol=0.01, - energy_tol=4.5e1, - flux_tol=1e-15, + source=['methoxy'], + explore_tol=0.01, + energy_tol=4.5e1, + flux_tol=1e-15, ) diff --git a/arkane/encorr/__init__.py b/arkane/encorr/__init__.py index 3337d4925a..6382bb3341 100644 --- a/arkane/encorr/__init__.py +++ b/arkane/encorr/__init__.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -initialize imports -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -31,3 +27,7 @@ # DEALINGS IN THE SOFTWARE. # # # ############################################################################### + +""" +initialize imports +""" diff --git a/arkane/encorr/corr.py b/arkane/encorr/corr.py index 29c563e220..a0a8d5a943 100644 --- a/arkane/encorr/corr.py +++ b/arkane/encorr/corr.py @@ -33,14 +33,16 @@ corrections. """ -import rmgpy.constants as constants import logging -from arkane.exceptions import AtomEnergyCorrectionError, BondAdditivityCorrectionError +import rmgpy.constants as constants import arkane.encorr.data as data -import arkane.encorr.pbac as pbac import arkane.encorr.mbac as mbac +import arkane.encorr.pbac as pbac +from arkane.exceptions import AtomEnergyCorrectionError, BondAdditivityCorrectionError + +################################################################################ def get_energy_correction(model_chemistry, atoms, bonds, coords, nums, multiplicity=1, diff --git a/arkane/encorr/data.py b/arkane/encorr/data.py index 9b6667e221..66ddbea946 100644 --- a/arkane/encorr/data.py +++ b/arkane/encorr/data.py @@ -70,7 +70,6 @@ # Constants of Diatomic Molecules, Van Nostrand Reinhold Co., 1979 SOC = {'H': 0.0, 'N': 0.0, 'O': -0.000355, 'C': -0.000135, 'S': -0.000893, 'P': 0.0, 'I': -0.011547226} - # Atomic energies # All model chemistries here should be lower-case because the user input is changed to lower-case atom_energies = { @@ -328,7 +327,6 @@ } - # Petersson-type bond additivity correction parameters pbac = { @@ -405,6 +403,5 @@ } - # Melius-type bond additivity correction parameters mbac = {} diff --git a/arkane/encorr/mbac.py b/arkane/encorr/mbac.py index 89a8550b12..16538d53e8 100644 --- a/arkane/encorr/mbac.py +++ b/arkane/encorr/mbac.py @@ -39,8 +39,11 @@ from rmgpy.molecule import Molecule, Atom, Bond, getElement -from arkane.exceptions import BondAdditivityCorrectionError import arkane.encorr.data as data +from arkane.exceptions import BondAdditivityCorrectionError + +################################################################################ + atom_spins = { 'H': 0.5, 'C': 1.0, 'N': 1.5, 'O': 1.0, 'F': 0.5, 'Si': 1.0, 'P': 1.5, 'S': 1.0, 'Cl': 0.5, 'Br': 0.5, 'I': 0.5 @@ -94,11 +97,11 @@ def get_bac(model_chemistry, coords, nums, multiplicity=1, mol_corr=0.0): bac_bond += length_corr * np.exp(-alpha * length) # Neighbor correction - for other_atom, other_bond in mol.getBonds(atom1).iteritems(): # Atoms adjacent to atom1 + for other_atom, other_bond in mol.getBonds(atom1).items(): # Atoms adjacent to atom1 if other_bond is not bond: other_symbol = other_atom.element.symbol bac_bond += bond_corr_neighbor[symbol1] + bond_corr_neighbor[other_symbol] - for other_atom, other_bond in mol.getBonds(atom2).iteritems(): # Atoms adjacent to atom2 + for other_atom, other_bond in mol.getBonds(atom2).items(): # Atoms adjacent to atom2 if other_bond is not bond: other_symbol = other_atom.element.symbol bac_bond += bond_corr_neighbor[symbol2] + bond_corr_neighbor[other_symbol] diff --git a/arkane/encorr/pbac.py b/arkane/encorr/pbac.py index 0f03ebd858..e84636e53f 100644 --- a/arkane/encorr/pbac.py +++ b/arkane/encorr/pbac.py @@ -37,8 +37,10 @@ import logging import re -from arkane.exceptions import BondAdditivityCorrectionError import arkane.encorr.data as data +from arkane.exceptions import BondAdditivityCorrectionError + +################################################################################ def get_bac(model_chemistry, bonds): diff --git a/arkane/exceptions.py b/arkane/exceptions.py index e808fc616a..211b5e9a29 100644 --- a/arkane/exceptions.py +++ b/arkane/exceptions.py @@ -32,6 +32,7 @@ This module provides custom Exception classes for use in Arkane. """ + class AtomEnergyCorrectionError(Exception): """ An exception to be raised when an error occurs while applying atom diff --git a/arkane/explorer.py b/arkane/explorer.py index 5ba764be44..b6f92c2f8e 100644 --- a/arkane/explorer.py +++ b/arkane/explorer.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -The Arkane Explorer module -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -32,18 +28,22 @@ # # ############################################################################### -import os -import numpy as np +""" +The Arkane Explorer module +""" + import logging +import os import shutil from copy import deepcopy +import numpy as np + import rmgpy -from rmgpy.rmg.main import RMG -from rmgpy.rmg.model import CoreEdgeReactionModel from rmgpy.data.rmg import getDB from rmgpy.exceptions import InputError - +from rmgpy.rmg.main import RMG +from rmgpy.rmg.model import CoreEdgeReactionModel ################################################################################ @@ -109,18 +109,18 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis if outputFile: reaction_model.pressureDependence.outputFile = os.path.dirname(outputFile) - kineticsDatabase = getDB('kinetics') - thermoDatabase = getDB('thermo') + kinetics_database = getDB('kinetics') + thermo_database = getDB('thermo') - thermoDatabase.libraries['thermojobs'] = thermoLibrary - thermoDatabase.libraryOrder.insert(0, 'thermojobs') + thermo_database.libraries['thermojobs'] = thermoLibrary + thermo_database.libraryOrder.insert(0, 'thermojobs') - kineticsDatabase.libraries['kineticsjobs'] = kineticsLibrary - kineticsDatabase.libraryOrder.insert(0, ('kineticsjobs', 'Reaction Library')) + kinetics_database.libraries['kineticsjobs'] = kineticsLibrary + kinetics_database.libraryOrder.insert(0, ('kineticsjobs', 'Reaction Library')) - jobRxns = [rxn for rxn in reaction_model.core.reactions] + job_rxns = [rxn for rxn in reaction_model.core.reactions] - self.jobRxns = jobRxns + self.jobRxns = job_rxns if outputFile is not None: if not os.path.exists(os.path.join(reaction_model.pressureDependence.outputFile, 'pdep')): @@ -139,8 +139,8 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis form = mmol.getFormula() - for spec in self.bathGas.keys() + self.source: - nspec, isNew = reaction_model.makeNewSpecies(spec, reactive=False) + for spec in list(self.bathGas.keys()) + self.source: + nspec, is_new = reaction_model.makeNewSpecies(spec, reactive=False) flags = np.array([s.molecule[0].getFormula() == form for s in reaction_model.core.species]) reaction_model.enlarge(nspec, reactEdge=False, unimolecularReact=flags, bimolecularReact=np.zeros((len(reaction_model.core.species), @@ -148,7 +148,7 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis reaction_model.addSeedMechanismToCore('kineticsjobs') - for lib in kineticsDatabase.libraryOrder: + for lib in kinetics_database.libraryOrder: if lib[0] != 'kineticsjobs': reaction_model.addReactionLibraryToEdge(lib[0]) @@ -163,8 +163,8 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis biflags = np.zeros((len(reaction_model.core.species), len(reaction_model.core.species))) elif len(self.source) == 2: flags = np.array([False for s in reaction_model.core.species]) - biflags = np.array([[False for i in xrange(len(reaction_model.core.species))] - for j in xrange(len(reaction_model.core.species))]) + biflags = np.array([[False for i in range(len(reaction_model.core.species))] + for j in range(len(reaction_model.core.species))]) biflags[reaction_model.core.species.index(self.source[0]), reaction_model.core.species.index( self.source[1])] = True else: @@ -193,68 +193,68 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis # determine T and P combinations if self.pdepjob.Tlist: - Tlist = self.pdepjob.Tlist.value_si + t_list = self.pdepjob.Tlist.value_si else: - Tlist = np.linspace(self.pdepjob.Tmin.value_si, self.pdepjob.Tmax.value_si, self.pdepjob.Tcount) + t_list = np.linspace(self.pdepjob.Tmin.value_si, self.pdepjob.Tmax.value_si, self.pdepjob.Tcount) if self.pdepjob.Plist: - Plist = self.pdepjob.Plist.value_si + p_list = self.pdepjob.Plist.value_si else: - Plist = np.linspace(self.pdepjob.Pmin.value_si, self.pdepjob.Pmax.value_si, self.pdepjob.Pcount) + p_list = np.linspace(self.pdepjob.Pmin.value_si, self.pdepjob.Pmax.value_si, self.pdepjob.Pcount) # generate the network - forbiddenStructures = getDB('forbidden') + forbidden_structures = getDB('forbidden') incomplete = True - checkedSpecies = [] + checked_species = [] while incomplete: incomplete = False - for T in Tlist: - for P in Plist: + for temperature in t_list: + for pressure in p_list: for network in self.networks: # compute the characteristic rate coefficient by summing all rate coefficients # from the reactant channel for spc in reaction_model.edge.species: - if spc in checkedSpecies: + if spc in checked_species: continue - if forbiddenStructures.isMoleculeForbidden(spc.molecule[0]): + if forbidden_structures.isMoleculeForbidden(spc.molecule[0]): reaction_model.removeSpeciesFromEdge(reaction_model.reactionSystems, spc) reaction_model.removeEmptyPdepNetworks() else: - checkedSpecies.append(spc) + checked_species.append(spc) kchar = 0.0 for rxn in network.netReactions: # reaction_model.core.reactions+reaction_model.edge.reactions: if (set(rxn.reactants) == set(self.source) and rxn.products[0].molecule[0].getFormula() == form): - kchar += rxn.kinetics.getRateCoefficient(T=T, P=P) + kchar += rxn.kinetics.getRateCoefficient(T=temperature, P=pressure) elif (set(rxn.products) == set(self.source) and rxn.reactants[0].molecule[0].getFormula() == form): kchar += rxn.generateReverseRateCoefficient(network_kinetics=True).getRateCoefficient( - T=T, P=P) + T=temperature, P=pressure) - if network.getLeakCoefficient(T=T, P=P) > self.explore_tol * kchar: + if network.getLeakCoefficient(T=temperature, P=pressure) > self.explore_tol * kchar: incomplete = True - spc = network.getMaximumLeakSpecies(T=T, P=P) + spc = network.getMaximumLeakSpecies(T=temperature, P=pressure) logging.info('adding new isomer {0} to network'.format(spc)) flags = np.array([s.molecule[0].getFormula() == form for s in reaction_model.core.species]) reaction_model.enlarge((network, spc), reactEdge=False, unimolecularReact=flags, - bimolecularReact=np.zeros((len(reaction_model.core.species), - len(reaction_model.core.species)))) + bimolecularReact=np.zeros((len(reaction_model.core.species), + len(reaction_model.core.species)))) flags = np.array([s.molecule[0].getFormula() == form for s in reaction_model.core.species]) reaction_model.enlarge(reactEdge=True, unimolecularReact=flags, - bimolecularReact=np.zeros((len(reaction_model.core.species), - len(reaction_model.core.species)))) + bimolecularReact=np.zeros((len(reaction_model.core.species), + len(reaction_model.core.species)))) for network in self.networks: - rmRxns = [] + rm_rxns = [] for rxn in network.pathReactions: # remove reactions with forbidden species for r in rxn.reactants + rxn.products: - if forbiddenStructures.isMoleculeForbidden(r.molecule[0]): - rmRxns.append(rxn) + if forbidden_structures.isMoleculeForbidden(r.molecule[0]): + rm_rxns.append(rxn) - for rxn in rmRxns: + for rxn in rm_rxns: logging.info('Removing forbidden reaction: {0}'.format(rxn)) network.pathReactions.remove(rxn) @@ -271,7 +271,7 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis warns = [] - for rxn in jobRxns: + for rxn in job_rxns: if rxn not in network.pathReactions: warns.append('Reaction {0} in the input file was not explored during network expansion and was ' 'not included in the full network. This is likely because your explore_tol value is ' @@ -281,41 +281,40 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True, speciesLis for network in self.networks: if self.energy_tol != np.inf or self.flux_tol != 0.0: - rxnSet = None - productSet = None + rxn_set = None + product_set = None - for T in Tlist: + for temperature in t_list: if self.energy_tol != np.inf: - rxns = network.get_energy_filtered_reactions(T, self.energy_tol) - if rxnSet is not None: - rxnSet &= set(rxns) + rxns = network.get_energy_filtered_reactions(temperature, self.energy_tol) + if rxn_set is not None: + rxn_set &= set(rxns) else: - rxnSet = set(rxns) + rxn_set = set(rxns) - for P in Plist: + for pressure in p_list: if self.flux_tol != 0.0: - products = network.get_rate_filtered_products(T, P, self.flux_tol) + products = network.get_rate_filtered_products(temperature, pressure, self.flux_tol) products = [tuple(x) for x in products] - if productSet is not None: - productSet &= set(products) + if product_set is not None: + product_set &= set(products) else: - productSet = set(products) - + product_set = set(products) - if rxnSet: + if rxn_set: logging.info('removing reactions during reduction:') - for rxn in rxnSet: + for rxn in rxn_set: logging.info(rxn) - rxnSet = list(rxnSet) - if productSet: + rxn_set = list(rxn_set) + if product_set: logging.info('removing products during reduction:') - for prod in productSet: + for prod in product_set: logging.info([x.label for x in prod]) - productSet = list(productSet) + product_set = list(product_set) - network.remove_reactions(reaction_model, rxns=rxnSet, prods=productSet) + network.remove_reactions(reaction_model, rxns=rxn_set, prods=product_set) - for rxn in jobRxns: + for rxn in job_rxns: if rxn not in network.pathReactions: warns.append( 'Reaction {0} in the input file was not included in the reduced model.'.format(rxn)) diff --git a/arkane/explorerTest.py b/arkane/explorerTest.py index 9245f49b6c..962687e79e 100644 --- a/arkane/explorerTest.py +++ b/arkane/explorerTest.py @@ -28,19 +28,23 @@ # # ############################################################################### -import unittest +""" +This module contains unit tests of the :mod:`arkane.explorer` module. +""" + import os +import unittest + from nose.plugins.attrib import attr from arkane import Arkane from arkane.explorer import ExplorerJob - ################################################################################ @attr('functional') -class testExplorerJob(unittest.TestCase): +class TestExplorerJob(unittest.TestCase): """ Contains tests for ExplorerJob class execute method """ @@ -56,12 +60,12 @@ def setUpClass(cls): if not isinstance(job, ExplorerJob): job.execute(outputFile=None, plot=None) else: - thermoLibrary, kineticsLibrary, speciesList = arkane.getLibraries() - job.execute(outputFile=None, plot=None, speciesList=speciesList, thermoLibrary=thermoLibrary, - kineticsLibrary=kineticsLibrary) + thermo_library, kinetics_library, species_list = arkane.getLibraries() + job.execute(outputFile=None, plot=None, speciesList=species_list, thermoLibrary=thermo_library, + kineticsLibrary=kinetics_library) - cls.thermoLibrary = thermoLibrary - cls.kineticsLibrary = kineticsLibrary + cls.thermoLibrary = thermo_library + cls.kineticsLibrary = kinetics_library cls.explorerjob = cls.jobList[-1] cls.pdepjob = cls.jobList[-2] diff --git a/arkane/gaussian.py b/arkane/gaussian.py index 7f81cc0520..ec2c34e150 100644 --- a/arkane/gaussian.py +++ b/arkane/gaussian.py @@ -1,11 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -Arkane Gaussian module -Used to parse Gaussian output files -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -33,14 +28,20 @@ # # ############################################################################### -import math -import numpy +""" +Arkane Gaussian module +Used to parse Gaussian output files +""" + import logging +import math import os.path +import numpy as np + import rmgpy.constants as constants -from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer from rmgpy.exceptions import InputError +from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer from arkane.common import check_conformer_energy, get_element_mass from arkane.log import Log @@ -64,21 +65,21 @@ def getNumberOfAtoms(self): Return the number of atoms in the molecular configuration used in the Gaussian log file. """ - Natoms = 0 + n_atoms = 0 with open(self.path, 'r') as f: line = f.readline() - while line != '' and Natoms == 0: + while line != '' and n_atoms == 0: # Automatically determine the number of atoms - if 'Input orientation:' in line and Natoms == 0: + if 'Input orientation:' in line and n_atoms == 0: for i in range(5): line = f.readline() while '---------------------------------------------------------------------' not in line: - Natoms += 1 + n_atoms += 1 line = f.readline() line = f.readline() - return Natoms + return n_atoms def loadForceConstantMatrix(self): """ @@ -90,31 +91,31 @@ def loadForceConstantMatrix(self): are J/m^2. If no force constant matrix can be found in the log file, ``None`` is returned. """ - F = None + force = None - Natoms = self.getNumberOfAtoms() - Nrows = Natoms * 3 + n_atoms = self.getNumberOfAtoms() + n_rows = n_atoms * 3 with open(self.path, 'r') as f: line = f.readline() while line != '': # Read force constant matrix if 'Force constants in Cartesian coordinates:' in line: - F = numpy.zeros((Nrows, Nrows), numpy.float64) - for i in range(int(math.ceil(Nrows / 5.0))): + force = np.zeros((n_rows, n_rows), np.float64) + for i in range(int(math.ceil(n_rows / 5.0))): # Header row line = f.readline() # Matrix element rows - for j in range(i * 5, Nrows): + for j in range(i * 5, n_rows): data = f.readline().split() for k in range(len(data) - 1): - F[j, i * 5 + k] = float(data[k + 1].replace('D', 'E')) - F[i * 5 + k, j] = F[j, i * 5 + k] + force[j, i * 5 + k] = float(data[k + 1].replace('D', 'E')) + force[i * 5 + k, j] = force[j, i * 5 + k] # Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2 - F *= 4.35974417e-18 / 5.291772108e-11 ** 2 + force *= 4.35974417e-18 / 5.291772108e-11 ** 2 line = f.readline() - return F + return force def loadGeometry(self): """ @@ -144,9 +145,9 @@ def loadGeometry(self): for num in number: mass1, _ = get_element_mass(num) mass.append(mass1) - coord = numpy.array(coord, numpy.float64) - number = numpy.array(number, numpy.int) - mass = numpy.array(mass, numpy.float64) + coord = np.array(coord, np.float64) + number = np.array(number, np.int) + mass = np.array(mass, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: raise InputError('Unable to read atoms from Gaussian geometry output file {0}. ' 'Make sure the output file is not corrupt.\nNote: if your species has ' @@ -181,12 +182,13 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, # Read the spin multiplicity if not explicitly given if spinMultiplicity == 0 and 'Multiplicity =' in line: spinMultiplicity = int(line.split()[-1]) - logging.debug('Conformer {0} is assigned a spin multiplicity of {1}'.format(label, spinMultiplicity)) + logging.debug('Conformer {0} is assigned a spin multiplicity of {1}' + .format(label, spinMultiplicity)) # The data we want is in the Thermochemistry section of the output if '- Thermochemistry -' in line: modes = [] - inPartitionFunctions = False + in_partition_functions = False line = f.readline() while line != '': @@ -204,14 +206,14 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, elif 'Rotational constants (GHZ):' in line: inertia = [float(d) for d in line.split()[-3:]] for i in range(3): - inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9)\ + inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9) \ * constants.Na * 1e23 rotation = NonlinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) modes.append(rotation) elif 'Rotational constant (GHZ):' in line: inertia = [float(line.split()[3])] - inertia[0] = constants.h / (8 * constants.pi * constants.pi * inertia[0] * 1e9)\ - * constants.Na * 1e23 + inertia[0] = constants.h / (8 * constants.pi * constants.pi * inertia[0] * 1e9) \ + * constants.Na * 1e23 rotation = LinearRotor(inertia=(inertia[0], "amu*angstrom^2"), symmetry=symmetry) modes.append(rotation) @@ -237,11 +239,11 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, e0 = float(line.split()[6]) * 4.35974394e-18 * constants.Na # Read spin multiplicity if above method was unsuccessful - elif 'Electronic' in line and inPartitionFunctions and spinMultiplicity == 0: + elif 'Electronic' in line and in_partition_functions and spinMultiplicity == 0: spinMultiplicity = int(float(line.split()[1].replace('D', 'E'))) elif 'Log10(Q)' in line: - inPartitionFunctions = True + in_partition_functions = True # Read the next line in the file line = f.readline() @@ -329,10 +331,10 @@ def loadScanEnergies(self): Extract the optimized energies in J/mol from a log file, e.g. the result of a Gaussian "Scan" quantum chemistry calculation. """ - optfreq = False - rigidScan = False + opt_freq = False + rigid_scan = False - Vlist = [] # The array of potentials at each scan angle + vlist = [] # The array of potentials at each scan angle # Parse the Gaussian log file, extracting the energies of each # optimized conformer in the scan @@ -341,48 +343,48 @@ def loadScanEnergies(self): while line != '': # If the job contains a "freq" then we want to ignore the last energy if ' freq ' in line: - optfreq = True + opt_freq = True # if # scan is keyword instead of # opt, then this is a rigid scan job # and parsing the energies is done a little differently if '# scan' in line: - rigidScan = True + rigid_scan = True # The lines containing "SCF Done" give the energy at each # iteration (even the intermediate ones) if 'SCF Done:' in line: - E = float(line.split()[4]) + energy = float(line.split()[4]) # rigid scans will only not optimize, so just append every time it finds an energy. - if rigidScan: - Vlist.append(E) - # We want to keep the values of E that come most recently before + if rigid_scan: + vlist.append(energy) + # We want to keep the values of energy that come most recently before # the line containing "Optimization completed", since it refers # to the optimized geometry if 'Optimization completed' in line: - Vlist.append(E) + vlist.append(energy) line = f.readline() # give warning in case this assumption is not true - if rigidScan: - print ' Assuming', os.path.basename(self.path), 'is the output from a rigid scan...' + if rigid_scan: + print(' Assuming', os.path.basename(self.path), 'is the output from a rigid scan...') - Vlist = numpy.array(Vlist, numpy.float64) + vlist = np.array(vlist, np.float64) # check to see if the scanlog indicates that a one of your reacting species may not be # the lowest energy conformer - check_conformer_energy(Vlist, self.path) + check_conformer_energy(vlist, self.path) # Adjust energies to be relative to minimum energy conformer # Also convert units from Hartree/particle to J/mol - Vlist -= numpy.min(Vlist) - Vlist *= constants.E_h * constants.Na + vlist -= np.min(vlist) + vlist *= constants.E_h * constants.Na - if optfreq: - Vlist = Vlist[:-1] + if opt_freq: + vlist = vlist[:-1] # Determine the set of dihedral angles corresponding to the loaded energies # This assumes that you start at 0.0, finish at 360.0, and take # constant step sizes in between - angle = numpy.arange(0.0, 2 * math.pi + 0.00001, 2 * math.pi / (len(Vlist) - 1), numpy.float64) + angle = np.arange(0.0, 2 * math.pi + 0.00001, 2 * math.pi / (len(vlist) - 1), np.float64) - return Vlist, angle + return vlist, angle def _load_scan_specs(self, letter_spec): """ @@ -413,7 +415,7 @@ def _load_scan_specs(self, letter_spec): action_index = 3 # bond length with 2 terms else: raise ValueError('This file has an option not supported by arkane.' - 'Unable to read scan specs for line: {}'.format(line)) + 'Unable to read scan specs for line: {}'.format(line)) if len(terms) > action_index: # specified type explicitly if terms[action_index] == letter_spec: diff --git a/arkane/gaussianTest.py b/arkane/gaussianTest.py index 8e71f96e8d..3db20b3d36 100644 --- a/arkane/gaussianTest.py +++ b/arkane/gaussianTest.py @@ -28,18 +28,22 @@ # # ############################################################################### -import numpy -import unittest +""" +This module contains unit tests of the :mod:`arkane.gaussian` module. +""" + import os +import unittest + +import numpy as np -from rmgpy.statmech import IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, HinderedRotor import rmgpy.constants as constants from external.wip import work_in_progress +from rmgpy.statmech import IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, HinderedRotor from arkane.gaussian import GaussianLog from arkane.statmech import determine_qm_software - ################################################################################ @@ -58,7 +62,7 @@ def testLoadEthyleneFromGaussianLog_CBSQB3(self): log = GaussianLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene.log')) conformer, unscaled_frequencies = log.loadConformer() - E0 = log.loadEnergy() + e0 = log.loadEnergy() self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1) self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)]) == 1) @@ -68,12 +72,12 @@ def testLoadEthyleneFromGaussianLog_CBSQB3(self): trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0] rot = [mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)][0] vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0] - Tlist = numpy.array([298.15], numpy.float64) - self.assertAlmostEqual(trans.getPartitionFunction(Tlist), 5.83338e6, delta=1e1) - self.assertAlmostEqual(rot.getPartitionFunction(Tlist), 2.59622e3, delta=1e-2) - self.assertAlmostEqual(vib.getPartitionFunction(Tlist), 1.0481e0, delta=1e-4) + t_list = np.array([298.15], np.float64) + self.assertAlmostEqual(trans.getPartitionFunction(t_list), 5.83338e6, delta=1e1) + self.assertAlmostEqual(rot.getPartitionFunction(t_list), 2.59622e3, delta=1e-2) + self.assertAlmostEqual(vib.getPartitionFunction(t_list), 1.0481e0, delta=1e-4) - self.assertAlmostEqual(E0 / constants.Na / constants.E_h, -78.467452, 4) + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.467452, 4) self.assertEqual(conformer.spinMultiplicity, 1) self.assertEqual(conformer.opticalIsomers, 1) @@ -85,7 +89,7 @@ def testLoadOxygenFromGaussianLog(self): log = GaussianLog(os.path.join(os.path.dirname(__file__), 'data', 'oxygen.log')) conformer, unscaled_frequencies = log.loadConformer() - E0 = log.loadEnergy() + e0 = log.loadEnergy() self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1) self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, LinearRotor)]) == 1) @@ -95,12 +99,12 @@ def testLoadOxygenFromGaussianLog(self): trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0] rot = [mode for mode in conformer.modes if isinstance(mode, LinearRotor)][0] vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0] - Tlist = numpy.array([298.15], numpy.float64) - self.assertAlmostEqual(trans.getPartitionFunction(Tlist), 7.11169e6, delta=1e1) - self.assertAlmostEqual(rot.getPartitionFunction(Tlist), 7.13316e1, delta=1e-4) - self.assertAlmostEqual(vib.getPartitionFunction(Tlist), 1.00037e0, delta=1e-4) + t_list = np.array([298.15], np.float64) + self.assertAlmostEqual(trans.getPartitionFunction(t_list), 7.11169e6, delta=1e1) + self.assertAlmostEqual(rot.getPartitionFunction(t_list), 7.13316e1, delta=1e-4) + self.assertAlmostEqual(vib.getPartitionFunction(t_list), 1.00037e0, delta=1e-4) - self.assertAlmostEqual(E0 / constants.Na / constants.E_h, -150.3784877, 4) + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -150.3784877, 4) self.assertEqual(conformer.spinMultiplicity, 3) self.assertEqual(conformer.opticalIsomers, 1) @@ -113,7 +117,7 @@ def testLoadEthyleneFromGaussianLog_G3(self): log = GaussianLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene_G3.log')) conformer, unscaled_frequencies = log.loadConformer() - E0 = log.loadEnergy() + e0 = log.loadEnergy() self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)]) == 1) self.assertTrue(len([mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)]) == 1) @@ -123,13 +127,13 @@ def testLoadEthyleneFromGaussianLog_G3(self): trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0] rot = [mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)][0] vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0] - Tlist = numpy.array([298.15], numpy.float64) + t_list = np.array([298.15], np.float64) - self.assertAlmostEqual(trans.getPartitionFunction(Tlist), 5.83338e6, delta=1e1) - self.assertAlmostEqual(rot.getPartitionFunction(Tlist), 2.53410e3, delta=1e-2) - self.assertAlmostEqual(vib.getPartitionFunction(Tlist), 1.0304e0, delta=1e-4) + self.assertAlmostEqual(trans.getPartitionFunction(t_list), 5.83338e6, delta=1e1) + self.assertAlmostEqual(rot.getPartitionFunction(t_list), 2.53410e3, delta=1e-2) + self.assertAlmostEqual(vib.getPartitionFunction(t_list), 1.0304e0, delta=1e-4) - self.assertAlmostEqual(E0 / constants.Na / constants.E_h, -78.562189, 4) + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.562189, 4) self.assertEqual(conformer.spinMultiplicity, 1) self.assertEqual(conformer.opticalIsomers, 1) diff --git a/arkane/input.py b/arkane/input.py index 76a24bde3c..d36514e1b7 100644 --- a/arkane/input.py +++ b/arkane/input.py @@ -32,51 +32,41 @@ This module contains functionality for parsing Arkane input files. """ -import os.path import logging +import os.path + import numpy as np from rmgpy import settings -from rmgpy.exceptions import InputError, DatabaseError from rmgpy.data.rmg import RMGDatabase from rmgpy.data.rmg import getDB - +from rmgpy.exceptions import InputError, DatabaseError +from rmgpy.kinetics.arrhenius import Arrhenius +from rmgpy.kinetics.model import PDepKineticsModel, TunnelingModel +from rmgpy.kinetics.tunneling import Wigner, Eckart +from rmgpy.molecule import Molecule +from rmgpy.pdep.collision import SingleExponentialDown +from rmgpy.pdep.configuration import Configuration +from rmgpy.pdep.network import Network +from rmgpy.reaction import Reaction from rmgpy.rmg.model import CoreEdgeReactionModel - from rmgpy.species import Species, TransitionState -from rmgpy.quantity import Quantity - -from rmgpy.statmech.translation import Translation, IdealGasTranslation -from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor -from rmgpy.statmech.vibration import Vibration, HarmonicOscillator -from rmgpy.statmech.torsion import Torsion, HinderedRotor, FreeRotor from rmgpy.statmech.conformer import Conformer - -from rmgpy.thermo.thermodata import ThermoData +from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor +from rmgpy.statmech.torsion import HinderedRotor, FreeRotor +from rmgpy.statmech.translation import IdealGasTranslation +from rmgpy.statmech.vibration import HarmonicOscillator from rmgpy.thermo.nasa import NASAPolynomial, NASA +from rmgpy.thermo.thermodata import ThermoData from rmgpy.thermo.wilhoit import Wilhoit - -from rmgpy.kinetics.arrhenius import Arrhenius, ArrheniusEP, PDepArrhenius, MultiArrhenius, MultiPDepArrhenius -from rmgpy.kinetics.chebyshev import Chebyshev -from rmgpy.kinetics.falloff import ThirdBody, Lindemann, Troe -from rmgpy.kinetics.kineticsdata import KineticsData, PDepKineticsData -from rmgpy.kinetics.tunneling import Wigner, Eckart -from rmgpy.kinetics.model import PDepKineticsModel, TunnelingModel - -from rmgpy.pdep.configuration import Configuration -from rmgpy.pdep.network import Network -from rmgpy.pdep.collision import SingleExponentialDown - -from rmgpy.molecule import Molecule -from rmgpy.reaction import Reaction from rmgpy.transport import TransportData +from arkane.common import is_pdep +from arkane.explorer import ExplorerJob from arkane.kinetics import KineticsJob +from arkane.pdep import PressureDependenceJob from arkane.statmech import StatMechJob, assign_frequency_scale_factor from arkane.thermo import ThermoJob -from arkane.pdep import PressureDependenceJob -from arkane.explorer import ExplorerJob -from arkane.common import is_pdep ################################################################################ @@ -86,7 +76,6 @@ networkDict = {} jobList = [] - ################################################################################ @@ -230,8 +219,9 @@ def species(label, *args, **kwargs): if db is None: raise DatabaseError('Thermo database is None.') except DatabaseError: - logging.warn("The database isn't loaded, cannot estimate thermo for {0}. " - "If it is a bath gas, set reactive = False to avoid generating thermo.".format(spec.label)) + logging.warning("The database isn't loaded, cannot estimate thermo for {0}. " + "If it is a bath gas, set reactive = False to avoid generating" + " thermo.".format(spec.label)) else: logging.info('No E0 or thermo found, estimating thermo and E0 of species {0} using' ' RMG-Database...'.format(spec.label)) @@ -356,7 +346,7 @@ def reaction(label, reactants, products, transitionState=None, kinetics=None, tu if isinstance(rxn, Reaction): reactionDict[label] = rxn else: - for i in xrange(len(rxn)): + for i in range(len(rxn)): reactionDict[label + str(i)] = rxn[i] return rxn @@ -497,7 +487,7 @@ def pressureDependence(label, Tmin=None, Tmax=None, Tcount=0, Tlist=None, Pmin=N interpolationModel = (interpolationModel,) nwk = None - if label in networkDict.keys(): + if label in list(networkDict.keys()): nwk = networkDict[label] job = PressureDependenceJob(network=nwk, Tmin=Tmin, Tmax=Tmax, Tcount=Tcount, Tlist=Tlist, @@ -633,7 +623,7 @@ def loadInputFile(path): with open(path, 'r') as f: try: - exec f in global_context, local_context + exec(f.read(), global_context, local_context) except (NameError, TypeError, SyntaxError): logging.error('The input file {0!r} was invalid:'.format(path)) raise diff --git a/arkane/inputTest.py b/arkane/inputTest.py index 86a1ff1bc0..5f9a839d72 100644 --- a/arkane/inputTest.py +++ b/arkane/inputTest.py @@ -29,22 +29,21 @@ ############################################################################### """ -Unit tests for the input module of Arkane +This module contains unit tests of the :mod:`arkane.input` module. """ -import unittest import os +import unittest import rmgpy +from rmgpy.exceptions import InputError +from rmgpy.kinetics.tunneling import Eckart from rmgpy.pdep.collision import SingleExponentialDown -from rmgpy.transport import TransportData -from rmgpy.statmech.vibration import HarmonicOscillator -from rmgpy.statmech.translation import IdealGasTranslation from rmgpy.statmech.rotation import NonlinearRotor -from rmgpy.kinetics.tunneling import Eckart -from rmgpy.exceptions import InputError +from rmgpy.statmech.translation import IdealGasTranslation +from rmgpy.statmech.vibration import HarmonicOscillator from rmgpy.thermo.nasa import NASAPolynomial, NASA -from rmgpy.molecule import Molecule +from rmgpy.transport import TransportData from arkane.input import species, transitionState, reaction, SMILES, loadInputFile, process_model_chemistry @@ -89,9 +88,10 @@ def test_species_atomic_NASA_polynomial(self): """ label0 = "H(1)" kwargs = {"structure": SMILES('[H]'), - "thermo": NASA(polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, 25473.7, -0.446683], Tmin=(200, 'K'), Tmax=(1000, 'K')), - NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, 25473.7, -0.446683], Tmin=(1000, 'K'), Tmax=(6000, 'K'))], - Tmin=(200, 'K'), Tmax=(6000, 'K'), comment="""Thermo library: FFCM1(-)"""), + "thermo": NASA(polynomials=[ + NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, 25473.7, -0.446683], Tmin=(200, 'K'), Tmax=(1000, 'K')), + NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, 25473.7, -0.446683], Tmin=(1000, 'K'), Tmax=(6000, 'K'))], + Tmin=(200, 'K'), Tmax=(6000, 'K'), comment="""Thermo library: FFCM1(-)"""), "energyTransferModel": SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85)} spc0 = species(label0, **kwargs) self.assertEqual(spc0.label, label0) @@ -105,9 +105,14 @@ def test_species_polyatomic_NASA_polynomial(self): """ label0 = "benzyl" kwargs = {"structure": SMILES('[c]1ccccc1'), - "thermo": NASA(polynomials=[NASAPolynomial(coeffs=[2.78632, 0.00784632, 7.97887e-05, -1.11617e-07, 4.39429e-11, 39695, 11.5114], Tmin=(100, 'K'), Tmax=(943.73, 'K')), - NASAPolynomial(coeffs=[13.2455, 0.0115667, -2.49996e-06, 4.66496e-10, -4.12376e-14, 35581.1, -49.6793], Tmin=(943.73, 'K'), Tmax=(5000, 'K'))], - Tmin=(100, 'K'), Tmax=(5000, 'K'), comment="""Thermo library: Fulvene_H + radical(CbJ)"""), + "thermo": NASA(polynomials=[NASAPolynomial( + coeffs=[2.78632, 0.00784632, 7.97887e-05, -1.11617e-07, 4.39429e-11, 39695, 11.5114], + Tmin=(100, 'K'), Tmax=(943.73, 'K')), + NASAPolynomial( + coeffs=[13.2455, 0.0115667, -2.49996e-06, 4.66496e-10, -4.12376e-14, + 35581.1, -49.6793], Tmin=(943.73, 'K'), Tmax=(5000, 'K'))], + Tmin=(100, 'K'), Tmax=(5000, 'K'), + comment="""Thermo library: Fulvene_H + radical(CbJ)"""), "energyTransferModel": SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85)} spc0 = species(label0, **kwargs) self.assertEqual(spc0.label, label0) @@ -248,6 +253,7 @@ def test_process_model_chemistry(self): with self.assertRaises(InputError): process_model_chemistry('CCSD(T)-F12a/aug-cc-pVTZ//CCSD(T)-F12a/aug-cc-pVTZ//B3LYP/6-311++G(3df,3pd)') + ################################################################################ diff --git a/arkane/kinetics.py b/arkane/kinetics.py index 80ca56cd4a..9ead721d21 100644 --- a/arkane/kinetics.py +++ b/arkane/kinetics.py @@ -1,10 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -Arkane kinetics module -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -32,24 +28,25 @@ # # ############################################################################### +""" +Arkane kinetics module +""" + +import logging import os.path -import numpy import string -import logging -from rmgpy.kinetics.arrhenius import Arrhenius, ArrheniusEP, PDepArrhenius, MultiArrhenius, MultiPDepArrhenius -from rmgpy.kinetics.chebyshev import Chebyshev -from rmgpy.kinetics.falloff import ThirdBody, Lindemann, Troe -from rmgpy.kinetics.kineticsdata import KineticsData, PDepKineticsData -from rmgpy.kinetics.tunneling import Wigner, Eckart +import numpy as np + import rmgpy.quantity as quantity +from rmgpy.exceptions import SpeciesError, InputError +from rmgpy.kinetics.arrhenius import Arrhenius +from rmgpy.kinetics.tunneling import Wigner, Eckart from rmgpy.molecule.draw import MoleculeDrawer, createNewSurface -from rmgpy.exceptions import SpeciesError -from arkane.sensitivity import KineticsSensitivity as sa -from arkane.output import prettify from arkane.common import ArkaneSpecies - +from arkane.output import prettify +from arkane.sensitivity import KineticsSensitivity as SensAnalysis ################################################################################ @@ -63,12 +60,7 @@ class KineticsJob(object): if kinetics is already given in the input, then it is False. """ - def __init__(self, reaction, - Tmin=None, - Tmax=None, - Tlist=None, - Tcount=0, - sensitivity_conditions=None): + def __init__(self, reaction, Tmin=None, Tmax=None, Tlist=None, Tcount=0, sensitivity_conditions=None): self.usedTST = False self.Tmin = Tmin if Tmin is not None else (298, 'K') self.Tmax = Tmax if Tmax is not None else (2500, 'K') @@ -80,9 +72,7 @@ def __init__(self, reaction, self.Tmax = (max(self.Tlist.value_si), 'K') self.Tcount = len(self.Tlist.value_si) else: - self.Tlist = (1 / numpy.linspace(1 / self.Tmax.value_si, - 1 / self.Tmin.value_si, - self.Tcount), 'K') + self.Tlist = (1 / np.linspace(1 / self.Tmax.value_si, 1 / self.Tmin.value_si, self.Tcount), 'K') self.reaction = reaction self.kunits = None @@ -153,7 +143,7 @@ def execute(self, output_directory=None, plot=True): logging.warning("Could not draw reaction {1} due to error: {0}".format(e, self.reaction.label)) if self.sensitivity_conditions is not None: logging.info('\n\nRunning sensitivity analysis...') - sa(self, output_directory) + SensAnalysis(self, output_directory) logging.debug('Finished kinetics job for reaction {0}.'.format(self.reaction)) logging.debug(repr(self.reaction)) @@ -165,7 +155,7 @@ def generateKinetics(self): if isinstance(self.reaction.kinetics, Arrhenius): return None self.usedTST = True - kineticsClass = 'Arrhenius' + kinetics_class = 'Arrhenius' tunneling = self.reaction.transitionState.tunneling if isinstance(tunneling, Wigner) and tunneling.frequency is None: @@ -183,8 +173,8 @@ def generateKinetics(self): pass else: raise ValueError('Unknown tunneling model {0!r} for reaction {1}.'.format(tunneling, self.reaction)) - logging.debug('Generating {0} kinetics model for {1}...'.format(kineticsClass, self.reaction)) - klist = numpy.zeros_like(self.Tlist.value_si) + logging.debug('Generating {0} kinetics model for {1}...'.format(kinetics_class, self.reaction)) + klist = np.zeros_like(self.Tlist.value_si) for i, t in enumerate(self.Tlist.value_si): klist[i] = self.reaction.calculateTSTRateCoefficient(t) order = len(self.reaction.reactants) @@ -223,11 +213,11 @@ def write_output(self, output_directory): f.write('# ======= =========== =========== =========== ===============\n') if self.Tlist is None: - Tlist = numpy.array([300, 400, 500, 600, 800, 1000, 1500, 2000]) + t_list = np.array([300, 400, 500, 600, 800, 1000, 1500, 2000]) else: - Tlist = self.Tlist.value_si + t_list = self.Tlist.value_si - for T in Tlist: + for T in t_list: tunneling = reaction.transitionState.tunneling reaction.transitionState.tunneling = None try: @@ -261,7 +251,7 @@ def write_output(self, output_directory): else: keq_unit_converter = 1 - for n, T in enumerate(Tlist): + for n, T in enumerate(t_list): k = ks[n] k0 = k0s[n] Keq = keq_unit_converter * reaction.getEquilibriumConstant(T) # getEquilibriumConstant returns SI units @@ -275,8 +265,8 @@ def write_output(self, output_directory): f.write('# ======= ============ =========== ============ ============= =========\n') f.write('\n\n') - kinetics0rev = Arrhenius().fitToData(Tlist, numpy.array(k0revs), kunits=self.krunits) - kineticsrev = Arrhenius().fitToData(Tlist, numpy.array(krevs), kunits=self.krunits) + kinetics0rev = Arrhenius().fitToData(t_list, np.array(k0revs), kunits=self.krunits) + kineticsrev = Arrhenius().fitToData(t_list, np.array(krevs), kunits=self.krunits) f.write('# krev (TST) = {0} \n'.format(kinetics0rev)) f.write('# krev (TST+T) = {0} \n\n'.format(kineticsrev)) @@ -315,14 +305,14 @@ def save_yaml(self, output_directory): """ Save a YAML file for TSs if structures of the respective reactant/s and product/s are known """ - if all ([spc.molecule is not None and len(spc.molecule) - for spc in self.reaction.reactants + self.reaction.products]): + if all([spc.molecule is not None and len(spc.molecule) + for spc in self.reaction.reactants + self.reaction.products]): self.arkane_species.update_species_attributes(self.reaction.transitionState) self.arkane_species.reaction_label = self.reaction.label self.arkane_species.reactants = [{'label': spc.label, 'adjacency_list': spc.molecule[0].toAdjacencyList()} for spc in self.reaction.reactants] self.arkane_species.products = [{'label': spc.label, 'adjacency_list': spc.molecule[0].toAdjacencyList()} - for spc in self.reaction.products] + for spc in self.reaction.products] self.arkane_species.save_yaml(path=output_directory) def plot(self, output_directory): @@ -338,10 +328,10 @@ def plot(self, output_directory): if self.Tlist is not None: t_list = [t for t in self.Tlist.value_si] else: - t_list = 1000.0 / numpy.arange(0.4, 3.35, 0.05) - klist = numpy.zeros_like(t_list) - klist2 = numpy.zeros_like(t_list) - for i in xrange(len(t_list)): + t_list = 1000.0 / np.arange(0.4, 3.35, 0.05) + klist = np.zeros_like(t_list) + klist2 = np.zeros_like(t_list) + for i in range(len(t_list)): klist[i] = self.reaction.calculateTSTRateCoefficient(t_list[i]) klist2[i] = self.reaction.kinetics.getRateCoefficient(t_list[i]) @@ -392,7 +382,7 @@ def draw(self, output_directory, format='pdf'): KineticsDrawer().draw(self.reaction, format=format, path=path) -class KineticsDrawer: +class KineticsDrawer(object): """ This class provides functionality for drawing the potential energy surface for a high pressure limit reaction using the Cairo 2D graphics engine. @@ -436,16 +426,16 @@ def __getEnergyRange(self): """ Return the minimum and maximum energy in J/mol on the potential energy surface. """ - E0min = min(self.wells[0].E0, self.wells[1].E0, self.reaction.transitionState.conformer.E0.value_si) - E0max = max(self.wells[0].E0, self.wells[1].E0, self.reaction.transitionState.conformer.E0.value_si) - if E0max - E0min > 5e5: + e0_min = min(self.wells[0].E0, self.wells[1].E0, self.reaction.transitionState.conformer.E0.value_si) + e0_max = max(self.wells[0].E0, self.wells[1].E0, self.reaction.transitionState.conformer.E0.value_si) + if e0_max - e0_min > 5e5: # the energy barrier in one of the reaction directions is larger than 500 kJ/mol, warn the user logging.warning('The energy differences between the stationary points of reaction {0} ' 'seems too large.'.format(self.reaction)) logging.warning('Got the following energies:\nWell 1: {0} kJ/mol\nTS: {1} kJ/mol\nWell 2: {2}' ' kJ/mol'.format(self.wells[0].E0 / 1000., self.wells[1].E0 / 1000., self.reaction.transitionState.conformer.E0.value_si / 1000.)) - return E0min, E0max + return e0_min, e0_max def __useStructureForLabel(self, configuration): """ @@ -454,16 +444,16 @@ def __useStructureForLabel(self, configuration): """ # Initialize with the current user option value - useStructures = self.options['structures'] + use_structures = self.options['structures'] # But don't use structures if one or more species in the configuration # do not have structure data for spec in configuration.species_list: if spec.molecule is None or len(spec.molecule) == 0: - useStructures = False + use_structures = False break - return useStructures + return use_structures def __getTextSize(self, text, padding=2, format='pdf'): try: @@ -495,56 +485,56 @@ def __drawText(self, text, cr, x0, y0, padding=2): def __getLabelSize(self, configuration, format='pdf'): width = 0 height = 0 - boundingRects = [] + bounding_rects = [] if self.__useStructureForLabel(configuration): for spec in configuration.species_list: _, _, rect = MoleculeDrawer().draw(spec.molecule[0], format=format) - boundingRects.append(list(rect)) + bounding_rects.append(list(rect)) else: for spec in configuration.species_list: - boundingRects.append(self.__getTextSize(spec.label, format=format)) + bounding_rects.append(self.__getTextSize(spec.label, format=format)) - plusRect = self.__getTextSize('+', format=format) + plus_rect = self.__getTextSize('+', format=format) - for rect in boundingRects: + for rect in bounding_rects: if width < rect[2]: width = rect[2] - height += rect[3] + plusRect[3] - height -= plusRect[3] + height += rect[3] + plus_rect[3] + height -= plus_rect[3] return [0, 0, width, height] def __drawLabel(self, configuration, cr, x0, y0, format='pdf'): - boundingRect = self.__getLabelSize(configuration, format=format) + bounding_rect = self.__getLabelSize(configuration, format=format) padding = 2 - useStructures = self.__useStructureForLabel(configuration) + use_structures = self.__useStructureForLabel(configuration) y = y0 for i, spec in enumerate(configuration.species_list): if i > 0: rect = self.__getTextSize('+', padding=padding, format=format) - x = x0 - 0.5 * (rect[2] - boundingRect[2]) + 2 * padding + x = x0 - 0.5 * (rect[2] - bounding_rect[2]) + 2 * padding self.__drawText('+', cr, x, y) y += rect[3] - if useStructures: - moleculeDrawer = MoleculeDrawer() + if use_structures: + molecule_drawer = MoleculeDrawer() cr.save() - _, _, rect = moleculeDrawer.draw(spec.molecule[0], format=format) + _, _, rect = molecule_drawer.draw(spec.molecule[0], format=format) cr.restore() - x = x0 - 0.5 * (rect[2] - boundingRect[2]) + x = x0 - 0.5 * (rect[2] - bounding_rect[2]) cr.save() - moleculeDrawer.render(cr, offset=(x, y)) + molecule_drawer.render(cr, offset=(x, y)) cr.restore() y += rect[3] else: rect = self.__getTextSize(spec.label, padding=padding, format=format) - x = x0 - 0.5 * (rect[2] - boundingRect[2]) + 2 * padding + x = x0 - 0.5 * (rect[2] - bounding_rect[2]) + 2 * padding self.__drawText(spec.label, cr, x, y) y += rect[3] - return boundingRect + return bounding_rect def draw(self, reaction, format, path=None): """ @@ -565,77 +555,77 @@ def draw(self, reaction, format, path=None): self.wells = [Well(self.reaction.reactants), Well(self.reaction.products)] # Generate the bounding rectangles for each configuration label - labelRects = [] + label_rects = [] for well in self.wells: - labelRects.append(self.__getLabelSize(well, format=format)) + label_rects.append(self.__getLabelSize(well, format=format)) # Get energy range (use kJ/mol internally) - E0min, E0max = self.__getEnergyRange() - E0min *= 0.001 - E0max *= 0.001 + e0_min, e0_max = self.__getEnergyRange() + e0_min *= 0.001 + e0_max *= 0.001 # Drawing parameters padding = self.options['padding'] - wellWidth = self.options['wellWidth'] - wellSpacing = self.options['wellSpacing'] - Eslope = self.options['Eslope'] - TSwidth = self.options['TSwidth'] + well_width = self.options['wellWidth'] + well_spacing = self.options['wellSpacing'] + e_slope = self.options['Eslope'] + ts_width = self.options['TSwidth'] - E0_offset = self.options['E0offset'] * 0.001 + e0_offset = self.options['E0offset'] * 0.001 # Choose multiplier to convert energies to desired units (on figure only) - Eunits = self.options['Eunits'] + e_units = self.options['Eunits'] try: - Emult = {'J/mol': 1.0, 'kJ/mol': 0.001, 'cal/mol': 1.0 / 4.184, 'kcal/mol': 1.0 / 4184., - 'cm^-1': 1.0 / 11.962}[Eunits] + e_mult = {'J/mol': 1.0, 'kJ/mol': 0.001, 'cal/mol': 1.0 / 4.184, 'kcal/mol': 1.0 / 4184., + 'cm^-1': 1.0 / 11.962}[e_units] except KeyError: - raise Exception('Invalid value "{0}" for Eunits parameter.'.format(Eunits)) + raise InputError('Invalid value "{0}" for Eunits parameter.'.format(e_units)) # Determine height required for drawing Eheight = self.__getTextSize('0.0', format=format)[3] + 6 - y_E0 = (E0max - 0.0) * Eslope + padding + Eheight - height = (E0max - E0min) * Eslope + 2 * padding + Eheight + 6 - for i in xrange(len(self.wells)): - if 0.001 * self.wells[i].E0 == E0min: - height += labelRects[i][3] + y_e0 = (e0_max - 0.0) * e_slope + padding + Eheight + height = (e0_max - e0_min) * e_slope + 2 * padding + Eheight + 6 + for i in range(len(self.wells)): + if 0.001 * self.wells[i].E0 == e0_min: + height += label_rects[i][3] break # Determine naive position of each well (one per column) - coordinates = numpy.zeros((len(self.wells), 2), numpy.float64) + coordinates = np.zeros((len(self.wells), 2), np.float64) x = padding - for i in xrange(len(self.wells)): + for i in range(len(self.wells)): well = self.wells[i] - rect = labelRects[i] - thisWellWidth = max(wellWidth, rect[2]) - E0 = 0.001 * well.E0 - y = y_E0 - E0 * Eslope - coordinates[i] = [x + 0.5 * thisWellWidth, y] - x += thisWellWidth + wellSpacing - width = x + padding - wellSpacing + rect = label_rects[i] + this_well_width = max(well_width, rect[2]) + e0 = 0.001 * well.E0 + y = y_e0 - e0 * e_slope + coordinates[i] = [x + 0.5 * this_well_width, y] + x += this_well_width + well_spacing + width = x + padding - well_spacing # Determine the rectangles taken up by each well # We'll use this to merge columns safely so that wells don't overlap - wellRects = [] + well_rects = [] for i in range(len(self.wells)): - l, t, w, h = labelRects[i] + l, t, w, h = label_rects[i] x, y = coordinates[i, :] - if w < wellWidth: - w = wellWidth + if w < well_width: + w = well_width t -= 6 + Eheight h += 6 + Eheight - wellRects.append([l + x - 0.5 * w, t + y + 6, w, h]) + well_rects.append([l + x - 0.5 * w, t + y + 6, w, h]) # Squish columns together from the left where possible until an isomer is encountered - oldLeft = numpy.min(coordinates[:, 0]) + oldLeft = np.min(coordinates[:, 0]) Nleft = - 1 columns = [] for i in range(Nleft, -1, -1): - top = wellRects[i][1] - bottom = top + wellRects[i][3] + top = well_rects[i][1] + bottom = top + well_rects[i][3] for column in columns: for c in column: - top0 = wellRects[c][1] - bottom0 = top + wellRects[c][3] + top0 = well_rects[c][1] + bottom0 = top + well_rects[c][3] if (top0 <= top <= bottom0) or (top <= top0 <= bottom): # Can't put it in this column break @@ -647,25 +637,25 @@ def draw(self, reaction, format, path=None): # Needs a new column columns.append([i]) for column in columns: - columnWidth = max([wellRects[c][2] for c in column]) - x = coordinates[column[0] + 1, 0] - 0.5 * wellRects[column[0] + 1][2] - wellSpacing - 0.5 * columnWidth + column_width = max([well_rects[c][2] for c in column]) + x = coordinates[column[0] + 1, 0] - 0.5 * well_rects[column[0] + 1][2] - well_spacing - 0.5 * column_width for c in column: delta = x - coordinates[c, 0] - wellRects[c][0] += delta + well_rects[c][0] += delta coordinates[c, 0] += delta - newLeft = numpy.min(coordinates[:, 0]) - coordinates[:, 0] -= newLeft - oldLeft + new_left = np.min(coordinates[:, 0]) + coordinates[:, 0] -= new_left - oldLeft # Squish columns together from the right where possible until an isomer is encountered - Nright = 3 + n_right = 3 columns = [] - for i in range(Nright, len(self.wells)): - top = wellRects[i][1] - bottom = top + wellRects[i][3] + for i in range(n_right, len(self.wells)): + top = well_rects[i][1] + bottom = top + well_rects[i][3] for column in columns: for c in column: - top0 = wellRects[c][1] - bottom0 = top0 + wellRects[c][3] + top0 = well_rects[c][1] + bottom0 = top0 + well_rects[c][3] if (top0 <= top <= bottom0) or (top <= top0 <= bottom): # Can't put it in this column break @@ -677,14 +667,14 @@ def draw(self, reaction, format, path=None): # Needs a new column columns.append([i]) for column in columns: - columnWidth = max([wellRects[c][2] for c in column]) - x = coordinates[column[0] - 1, 0] + 0.5 * wellRects[column[0] - 1][2] + wellSpacing + 0.5 * columnWidth + column_width = max([well_rects[c][2] for c in column]) + x = coordinates[column[0] - 1, 0] + 0.5 * well_rects[column[0] - 1][2] + well_spacing + 0.5 * column_width for c in column: delta = x - coordinates[c, 0] - wellRects[c][0] += delta + well_rects[c][0] += delta coordinates[c, 0] += delta - width = max([rect[2] + rect[0] for rect in wellRects]) - min([rect[0] for rect in wellRects]) + 2 * padding + width = max([rect[2] + rect[0] for rect in well_rects]) - min([rect[0] for rect in well_rects]) + 2 * padding # Draw to the final surface surface = createNewSurface(format=format, target=path, width=width, height=height) @@ -697,41 +687,41 @@ def draw(self, reaction, format, path=None): # Fill the background with white cr.set_source_rgba(1.0, 1.0, 1.0, 1.0) cr.paint() - self.__drawText('E0 ({0})'.format(Eunits), cr, 15, 10, padding=2) # write units + self.__drawText('E0 ({0})'.format(e_units), cr, 15, 10, padding=2) # write units # Draw reactions - E0_reac = self.wells[0].E0 * 0.001 - E0_offset - E0_prod = self.wells[1].E0 * 0.001 - E0_offset - E0_TS = self.reaction.transitionState.conformer.E0.value_si * 0.001 - E0_offset + e0_reac = self.wells[0].E0 * 0.001 - e0_offset + e0_prod = self.wells[1].E0 * 0.001 - e0_offset + e0_ts = self.reaction.transitionState.conformer.E0.value_si * 0.001 - e0_offset x1, y1 = coordinates[0, :] x2, y2 = coordinates[1, :] - x1 += wellSpacing / 2.0 - x2 -= wellSpacing / 2.0 - if abs(E0_TS - E0_reac) > 0.1 and abs(E0_TS - E0_prod) > 0.1: + x1 += well_spacing / 2.0 + x2 -= well_spacing / 2.0 + if abs(e0_ts - e0_reac) > 0.1 and abs(e0_ts - e0_prod) > 0.1: if len(self.reaction.reactants) == 2: - if E0_reac < E0_prod: - x0 = x1 + wellSpacing * 0.5 + if e0_reac < e0_prod: + x0 = x1 + well_spacing * 0.5 else: - x0 = x2 - wellSpacing * 0.5 + x0 = x2 - well_spacing * 0.5 elif len(self.reaction.products) == 2: - if E0_reac < E0_prod: - x0 = x2 - wellSpacing * 0.5 + if e0_reac < e0_prod: + x0 = x2 - well_spacing * 0.5 else: - x0 = x1 + wellSpacing * 0.5 + x0 = x1 + well_spacing * 0.5 else: x0 = 0.5 * (x1 + x2) - y0 = y_E0 - (E0_TS + E0_offset) * Eslope + y0 = y_e0 - (e0_ts + e0_offset) * e_slope width1 = (x0 - x1) width2 = (x2 - x0) # Draw horizontal line for TS cr.set_source_rgba(0.0, 0.0, 0.0, 1.0) cr.set_line_width(2.0) - cr.move_to(x0 - TSwidth / 2.0, y0) - cr.line_to(x0 + TSwidth / 2.0, y0) + cr.move_to(x0 - ts_width / 2.0, y0) + cr.line_to(x0 + ts_width / 2.0, y0) cr.stroke() # Add background and text for energy - E0 = "{0:.1f}".format(E0_TS * 1000. * Emult) - extents = cr.text_extents(E0) + e0 = "{0:.1f}".format(e0_ts * 1000. * e_mult) + extents = cr.text_extents(e0) x = x0 - extents[2] / 2.0 y = y0 - 6.0 cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0) @@ -739,14 +729,14 @@ def draw(self, reaction, format, path=None): cr.fill() cr.move_to(x, y) cr.set_source_rgba(0.0, 0.0, 0.0, 1.0) - cr.show_text(E0) + cr.show_text(e0) # Draw Bezier curve connecting reactants and products through TS cr.set_source_rgba(0.0, 0.0, 0.0, 0.5) cr.set_line_width(1.0) cr.move_to(x1, y1) - cr.curve_to(x1 + width1 / 8.0, y1, x0 - width1 / 8.0 - TSwidth / 2.0, y0, x0 - TSwidth / 2.0, y0) - cr.move_to(x0 + TSwidth / 2.0, y0) - cr.curve_to(x0 + width2 / 8.0 + TSwidth / 2.0, y0, x2 - width2 / 8.0, y2, x2, y2) + cr.curve_to(x1 + width1 / 8.0, y1, x0 - width1 / 8.0 - ts_width / 2.0, y0, x0 - ts_width / 2.0, y0) + cr.move_to(x0 + ts_width / 2.0, y0) + cr.curve_to(x0 + width2 / 8.0 + ts_width / 2.0, y0, x2 - width2 / 8.0, y2, x2, y2) cr.stroke() else: width = (x2 - x1) @@ -762,14 +752,14 @@ def draw(self, reaction, format, path=None): x0, y0 = coordinates[i, :] # Draw horizontal line for well cr.set_line_width(4.0) - cr.move_to(x0 - wellWidth / 2.0, y0) - cr.line_to(x0 + wellWidth / 2.0, y0) + cr.move_to(x0 - well_width / 2.0, y0) + cr.line_to(x0 + well_width / 2.0, y0) cr.set_source_rgba(0.0, 0.0, 0.0, 1.0) cr.stroke() # Add background and text for energy - E0 = well.E0 * 0.001 - E0_offset - E0 = "{0:.1f}".format(E0 * 1000. * Emult) - extents = cr.text_extents(E0) + e0 = well.E0 * 0.001 - e0_offset + e0 = "{0:.1f}".format(e0 * 1000. * e_mult) + extents = cr.text_extents(e0) x = x0 - extents[2] / 2.0 y = y0 - 6.0 cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0) @@ -777,11 +767,11 @@ def draw(self, reaction, format, path=None): cr.fill() cr.move_to(x, y) cr.set_source_rgba(0.0, 0.0, 0.0, 1.0) - cr.show_text(E0) + cr.show_text(e0) # Draw background and text for label - x = x0 - 0.5 * labelRects[i][2] + x = x0 - 0.5 * label_rects[i][2] y = y0 + 6 - cr.rectangle(x, y, labelRects[i][2], labelRects[i][3]) + cr.rectangle(x, y, label_rects[i][2], label_rects[i][3]) cr.set_source_rgba(1.0, 1.0, 1.0, 0.75) cr.fill() self.__drawLabel(well, cr, x, y, format=format) @@ -793,7 +783,7 @@ def draw(self, reaction, format, path=None): surface.finish() -class Well: +class Well(object): """ A helper class representing a "well" of species `species_list` is a list of at least one entry diff --git a/arkane/kineticsTest.py b/arkane/kineticsTest.py index cf52745e21..7415dcfea4 100644 --- a/arkane/kineticsTest.py +++ b/arkane/kineticsTest.py @@ -28,10 +28,14 @@ # # ############################################################################### +""" +This module contains unit tests of the :mod:`arkane.kinetics` module. +""" + import unittest -from rmgpy.species import TransitionState from rmgpy.reaction import Reaction +from rmgpy.species import TransitionState from arkane.kinetics import KineticsJob @@ -48,11 +52,11 @@ def test_give_tlist_for_kineticsjob(self): Ensures that the proper temperature ranges are set when Tlist is specified """ rxn = Reaction(transitionState=TransitionState()) - Tlist = [50.7, 100, 300, 800, 1255] - kjob = KineticsJob(rxn, Tlist=(Tlist, 'K')) - self.assertEqual(min(Tlist), kjob.Tmin.value_si) - self.assertEqual(max(Tlist), kjob.Tmax.value_si) - self.assertEqual(len(Tlist), kjob.Tcount) + t_list = [50.7, 100, 300, 800, 1255] + kjob = KineticsJob(rxn, Tlist=(t_list, 'K')) + self.assertEqual(min(t_list), kjob.Tmin.value_si) + self.assertEqual(max(t_list), kjob.Tmax.value_si) + self.assertEqual(len(t_list), kjob.Tcount) def test_give_Trange_for_kineticsjob(self): """ diff --git a/arkane/log.py b/arkane/log.py index 5d6c3d9552..6a2101af89 100644 --- a/arkane/log.py +++ b/arkane/log.py @@ -31,13 +31,16 @@ """ A general class for parsing quantum mechanical log files """ -import os.path + import logging +import os.path import shutil from rmgpy.qm.qmdata import QMData from rmgpy.qm.symmetry import PointGroupCalculator +################################################################################ + class Log(object): """ @@ -128,7 +131,6 @@ def load_scan_frozen_atoms(self): """ raise NotImplementedError("load_scan_frozen_atoms is not implemented for the Log class") - def loadNegativeFrequency(self): """ Return the imaginary frequency from a transition state frequency diff --git a/arkane/main.py b/arkane/main.py index 31c8096503..8c999827a6 100644 --- a/arkane/main.py +++ b/arkane/main.py @@ -32,18 +32,18 @@ This module contains the :class:`Arkane` class, the main class used to run Arkane. """ +import argparse +import csv +import logging import os import os.path import sys -import logging -import argparse import time -import csv + import numpy as np try: import matplotlib - matplotlib.rc('mathtext', default='regular') except ImportError: pass @@ -54,19 +54,18 @@ from rmgpy.data.kinetics.library import KineticsLibrary from rmgpy.exceptions import InputError +from arkane.common import is_pdep +from arkane.explorer import ExplorerJob from arkane.input import loadInputFile from arkane.kinetics import KineticsJob +from arkane.pdep import PressureDependenceJob from arkane.statmech import StatMechJob from arkane.thermo import ThermoJob -from arkane.pdep import PressureDependenceJob -from arkane.explorer import ExplorerJob -from arkane.common import is_pdep - ################################################################################ -class Arkane: +class Arkane(object): """ The :class:`Arkane` class represents an instance of Arkane, a tool for computing properties of chemical species and reactions. The attributes are: @@ -245,13 +244,13 @@ def execute(self): # Initialize (and clear!) the output files for the job if self.outputDirectory is None: self.outputDirectory = os.path.dirname(os.path.abspath(self.inputFile)) - outputFile = os.path.join(self.outputDirectory, 'output.py') - with open(outputFile, 'w') as f: + output_file = os.path.join(self.outputDirectory, 'output.py') + with open(output_file, 'w') as f: pass - chemkinFile = os.path.join(self.outputDirectory, 'chem.inp') + chemkin_file = os.path.join(self.outputDirectory, 'chem.inp') # write the chemkin files and run the thermo and then kinetics jobs - with open(chemkinFile, 'w') as f: + with open(chemkin_file, 'w') as f: writeElementsSection(f) f.write('SPECIES\n\n') @@ -280,7 +279,7 @@ def execute(self): for hr_info in job.raw_hindered_rotor_data: hindered_rotor_info.append(hr_info) - with open(chemkinFile, 'a') as f: + with open(chemkin_file, 'a') as f: f.write('\n') f.write('END\n\n\n\n') f.write('REACTIONS KCAL/MOLE MOLES\n\n') @@ -290,10 +289,11 @@ def execute(self): supporting_info_file = os.path.join(self.outputDirectory, 'supporting_information.csv') with open(supporting_info_file, 'wb') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) - writer.writerow(['Label','Symmetry Number','Number of optical isomers','Symmetry Group', - 'Rotational constant (cm-1)','Calculated Frequencies (unscaled and prior to projection, cm^-1)', - 'Electronic energy (J/mol)','E0 (electronic energy + ZPE, J/mol)', - 'E0 with atom and bond corrections (J/mol)','Atom XYZ coordinates (angstrom)', + writer.writerow(['Label', 'Symmetry Number', 'Number of optical isomers', 'Symmetry Group', + 'Rotational constant (cm-1)', + 'Calculated Frequencies (unscaled and prior to projection, cm^-1)', + 'Electronic energy (J/mol)', 'E0 (electronic energy + ZPE, J/mol)', + 'E0 with atom and bond corrections (J/mol)', 'Atom XYZ coordinates (angstrom)', 'T1 diagnostic', 'D1 diagnostic']) for row in supporting_info: label = row[0] @@ -306,10 +306,11 @@ def execute(self): rot = ', '.join(['{0:.2f}'.format(s) for s in row[4].rotationalConstant.value]) if row[5] is not None: freq = '' - if row[6] is not None: #there is a negative frequency + if row[6] is not None: # there is a negative frequency freq = '{0:.1f}'.format(abs(row[6])) + 'i, ' freq += ', '.join(['{0:.1f}'.format(s) for s in row[5]]) - atoms = ', '.join(["{0} {1}".format(atom," ".join([str(c) for c in coords])) for atom, coords in zip(row[10], row[11])]) + atoms = ', '.join(["{0} {1}".format(atom, " ".join([str(c) for c in coords])) + for atom, coords in zip(row[10], row[11])]) writer.writerow([label, row[1], row[2], row[3], rot, freq, row[7], row[8], row[9], atoms, row[12], row[13]]) if hindered_rotor_info: @@ -319,11 +320,11 @@ def execute(self): with open(hr_file, 'wb') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['species', 'rotor_number', 'symmetry', 'resolution (degrees)', - 'pivot_atoms', 'frozen_atoms'] + + 'pivot_atoms', 'frozen_atoms'] + ['energy (J/mol) {}'.format(i) for i in range(max_energy_length)]) for row in hindered_rotor_info: writer.writerow([row[0], row[1], row[2], row[3][1] * 180 / np.pi, - row[5], row[6]] + [a for a in row[4]]) + row[5], row[6]] + [a for a in row[4]]) # run kinetics and pdep jobs (also writes reaction blocks to Chemkin file) for job in self.jobList: if isinstance(job, KineticsJob): @@ -335,13 +336,13 @@ def execute(self): raise InputError( 'No network matched the label of the pressureDependence block and there is no explorer block ' 'to generate a network') - job.execute(outputFile=outputFile, plot=self.plot) + job.execute(outputFile=output_file, plot=self.plot) elif isinstance(job, ExplorerJob): - thermoLibrary, kineticsLibrary, speciesList = self.getLibraries() - job.execute(outputFile=outputFile, plot=self.plot, speciesList=speciesList, thermoLibrary=thermoLibrary, - kineticsLibrary=kineticsLibrary) + thermo_library, kinetics_library, species_list = self.getLibraries() + job.execute(outputFile=output_file, plot=self.plot, speciesList=species_list, + thermoLibrary=thermo_library, kineticsLibrary=kinetics_library) - with open(chemkinFile, 'a') as f: + with open(chemkin_file, 'a') as f: f.write('END\n\n') # Print some information to the end of the log @@ -351,47 +352,47 @@ def getLibraries(self): """Get RMG kinetics and thermo libraries""" name = 'kineticsjobs' - speciesList = self.speciesDict.values() - reactionList = self.reactionDict.values() + species_list = list(self.speciesDict.values()) + reaction_list = list(self.reactionDict.values()) # remove duplicate species - for rxn in reactionList: + for rxn in reaction_list: for i, rspc in enumerate(rxn.reactants): - for spc in speciesList: + for spc in species_list: if spc.isIsomorphic(rspc): rxn.reactants[i] = spc break for i, rspc in enumerate(rxn.products): - for spc in speciesList: + for spc in species_list: if spc.isIsomorphic(rspc): rxn.products[i] = spc break del_inds = [] - for i, spc1 in enumerate(speciesList): - for j, spc2 in enumerate(speciesList): + for i, spc1 in enumerate(species_list): + for j, spc2 in enumerate(species_list): if j > i and spc1.isIsomorphic(spc2): del_inds.append(j) for j in sorted(del_inds)[::-1]: - del speciesList[j] + del species_list[j] - thermoLibrary = ThermoLibrary(name=name) - for i, species in enumerate(speciesList): + thermo_library = ThermoLibrary(name=name) + for i, species in enumerate(species_list): if species.thermo: - thermoLibrary.loadEntry(index=i + 1, - label=species.label, - molecule=species.molecule[0].toAdjacencyList(), - thermo=species.thermo, - shortDesc=species.thermo.comment) + thermo_library.loadEntry(index=i + 1, + label=species.label, + molecule=species.molecule[0].toAdjacencyList(), + thermo=species.thermo, + shortDesc=species.thermo.comment) else: logging.warning( 'Species {0} did not contain any thermo data and was omitted from the thermo library.'.format( str(species))) # load kinetics library entries - kineticsLibrary = KineticsLibrary(name=name, autoGenerated=True) - kineticsLibrary.entries = {} - for i, reaction in enumerate(reactionList): + kinetics_library = KineticsLibrary(name=name, autoGenerated=True) + kinetics_library.entries = {} + for i, reaction in enumerate(reaction_list): entry = Entry( index=i + 1, label=reaction.toLabeledStr(), @@ -405,8 +406,8 @@ def getLibraries(self): else: entry.longDesc = reaction.kinetics.comment - kineticsLibrary.entries[i + 1] = entry + kinetics_library.entries[i + 1] = entry - kineticsLibrary.label = name + kinetics_library.label = name - return thermoLibrary, kineticsLibrary, speciesList + return thermo_library, kinetics_library, species_list diff --git a/arkane/mainTest.py b/arkane/mainTest.py index 3dabc27575..0ace057527 100644 --- a/arkane/mainTest.py +++ b/arkane/mainTest.py @@ -29,17 +29,19 @@ ############################################################################### """ -This script contains unit tests of the :mod:`arkane.main` module. +This module contains unit tests of the :mod:`arkane.main` module. """ -import unittest import logging import os import shutil -from nose.plugins.attrib import attr +import unittest import zipfile +from nose.plugins.attrib import attr + import rmgpy + from arkane import Arkane ################################################################################ @@ -50,6 +52,7 @@ class TestArkaneExamples(unittest.TestCase): """ Run all of Arkane's examples, and report which one failed """ + @classmethod def setUpClass(cls): """A function that is run ONCE before all unit tests in this class.""" @@ -88,7 +91,7 @@ def tearDownClass(cls): """A function that is run ONCE after all unit tests in this class.""" cls.extensions_to_delete = ['pdf', 'csv', 'txt', 'inp'] cls.files_to_delete = ['arkane.log', 'output.py'] - cls.files_to_keep = ['README.txt'] # files to keep that have extentions marked for deletion + cls.files_to_keep = ['README.txt'] # files to keep that have extensions marked for deletion cls.base_path = os.path.join(os.path.dirname(os.path.dirname(rmgpy.__file__)), 'examples', 'arkane') for example_type in cls.example_types: example_type_path = os.path.join(cls.base_path, example_type) @@ -99,7 +102,7 @@ def tearDownClass(cls): item_path = os.path.join(example_path, name) if os.path.isfile(item_path): extension = name.split('.')[-1] - if name in cls.files_to_delete or\ + if name in cls.files_to_delete or \ (extension in cls.extensions_to_delete and name not in cls.files_to_keep): os.remove(item_path) else: diff --git a/arkane/molpro.py b/arkane/molpro.py index 309f05dd53..4e1fd594a5 100644 --- a/arkane/molpro.py +++ b/arkane/molpro.py @@ -1,11 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -Arkane Molpro module -Used to parse Molpro output files -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -33,15 +28,21 @@ # # ############################################################################### -import math -import numpy +""" +Arkane Molpro module +Used to parse Molpro output files +""" + import logging +import math + +import numpy as np import rmgpy.constants as constants -from rmgpy.exceptions import InputError from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer from arkane.common import get_element_mass +from arkane.exceptions import LogError from arkane.log import Log ################################################################################ @@ -92,18 +93,18 @@ def loadForceConstantMatrix(self): while line != '': # Read force constant matrix if 'Force Constants (Second Derivatives of the Energy) in [a.u.]' in line: - fc = numpy.zeros((n_rows, n_rows), numpy.float64) + fc = np.zeros((n_rows, n_rows), np.float64) for i in range(int(math.ceil(n_rows / 5.0))): # Header row line = f.readline() # Matrix element rows - for j in range(i*5, n_rows): + for j in range(i * 5, n_rows): data = f.readline().split() - for k in range(len(data)-1): - fc[j, i*5+k] = float(data[k+1].replace('D', 'E')) - fc[i*5+k, j] = fc[j, i*5+k] + for k in range(len(data) - 1): + fc[j, i * 5 + k] = float(data[k + 1].replace('D', 'E')) + fc[i * 5 + k, j] = fc[j, i * 5 + k] # Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2 - fc *= 4.35974417e-18 / 5.291772108e-11**2 + fc *= 4.35974417e-18 / 5.291772108e-11 ** 2 line = f.readline() return fc @@ -156,9 +157,9 @@ def loadGeometry(self): mass1, num1 = get_element_mass(atom1) mass.append(mass1) number.append(num1) - number = numpy.array(number, numpy.int) - mass = numpy.array(mass, numpy.float64) - coord = numpy.array(coord, numpy.float64) + number = np.array(number, np.int) + mass = np.array(mass, np.float64) + coord = np.array(coord, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: raise InputError('Unable to read atoms from Molpro geometry output file {0}'.format(self.path)) @@ -188,7 +189,7 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, splits = line.replace('=', ' ').replace(',', ' ').split(' ') for i, s in enumerate(splits): if 'spin' in s: - spinMultiplicity = int(splits[i+1]) + 1 + spinMultiplicity = int(splits[i + 1]) + 1 logging.debug( 'Conformer {0} is assigned a spin multiplicity of {1}'.format(label, spinMultiplicity)) break @@ -231,14 +232,15 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, elif 'Rotational Constants' in line and line.split()[-1] == '[GHz]': inertia = [float(d) for d in line.split()[-4:-1]] for i in range(3): - inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9)\ + inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9) \ * constants.Na * 1e23 rotation = NonlinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) modes.append(rotation) elif 'Rotational Constant' in line and line.split()[3] == '[GHz]': inertia = float(line.split()[2]) - inertia = constants.h / (8 * constants.pi * constants.pi * inertia * 1e9) * constants.Na * 1e23 + inertia = constants.h / (8 * constants.pi * constants.pi * inertia * 1e9) \ + * constants.Na * 1e23 rotation = LinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) modes.append(rotation) @@ -399,7 +401,7 @@ def loadScanEnergies(self): def get_T1_diagnostic(self): """ Returns the T1 diagnostic from output log. - If multiple occurrences exist, returns the last occurence + If multiple occurrences exist, returns the last occurrence """ with open(self.path) as f: log = f.readlines() @@ -413,7 +415,7 @@ def get_T1_diagnostic(self): def get_D1_diagnostic(self): """ Returns the D1 diagnostic from output log. - If multiple occurrences exist, returns the last occurence + If multiple occurrences exist, returns the last occurrence """ with open(self.path) as f: log = f.readlines() diff --git a/arkane/molproTest.py b/arkane/molproTest.py index ea9aec9195..966ab17a1e 100644 --- a/arkane/molproTest.py +++ b/arkane/molproTest.py @@ -28,15 +28,21 @@ # # ############################################################################### -import numpy +""" +This module contains unit tests of the :mod:`arkane.molpro` module. +""" + import unittest import os -from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, HarmonicOscillator, HinderedRotor +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, HarmonicOscillator, HinderedRotor from arkane.molpro import MolproLog + ################################################################################ @@ -45,27 +51,27 @@ class MolproTest(unittest.TestCase): Contains unit tests for the chempy.io.gaussian module, used for reading and writing Molpro files. """ - + def testLoadDzFromMolproLog_F12(self): """ Uses a Molpro log file for ethylene_dz (C2H4) to test that F12a energy can be properly read. """ - + log = MolproLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene_f12_dz.out')) e0 = log.loadEnergy() - + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.474353559604, 5) - + def testLoadQzFromMolproLog_F12(self): """ Uses a Molpro log file for ethylene_qz (C2H4) to test that F12b energy can be properly read. """ - + log = MolproLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene_f12_qz.out')) e0 = log.loadEnergy() - + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -78.472682755635, 5) def testLoadRadFromMolproLog_F12(self): @@ -73,10 +79,10 @@ def testLoadRadFromMolproLog_F12(self): Uses a Molpro log file for OH (C2H4) to test that radical energy can be properly read. """ - + log = MolproLog(os.path.join(os.path.dirname(__file__), 'data', 'OH_f12.out')) e0 = log.loadEnergy() - + self.assertAlmostEqual(e0 / constants.Na / constants.E_h, -75.663696424380, 5) def testLoadHOSIFromMolpro_log(self): @@ -97,7 +103,7 @@ def testLoadHOSIFromMolpro_log(self): trans = [mode for mode in conformer.modes if isinstance(mode, IdealGasTranslation)][0] rot = [mode for mode in conformer.modes if isinstance(mode, NonlinearRotor)][0] vib = [mode for mode in conformer.modes if isinstance(mode, HarmonicOscillator)][0] - t_list = numpy.array([298.15], numpy.float64) + t_list = np.array([298.15], np.float64) self.assertAlmostEqual(trans.getPartitionFunction(t_list), 9.175364e7, delta=1e1) self.assertAlmostEqual(rot.getPartitionFunction(t_list), 1.00005557e5, delta=1e-2) @@ -138,7 +144,7 @@ def test_get_D1_diagnostic(self): """ Ensure molpro can retrieve the T1 diagnostic from CCSD calculations """ - log=MolproLog(os.path.join(os.path.dirname(__file__),'data','ethylene_f12_dz.out')) + log = MolproLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene_f12_dz.out')) d1_diagnostic = log.get_D1_diagnostic() self.assertAlmostEqual(d1_diagnostic, 0.03369031) @@ -146,9 +152,10 @@ def test_get_T1_diagnostic(self): """ Ensure molpro can retrieve the T1 diagnostic from CCSD calculations """ - log=MolproLog(os.path.join(os.path.dirname(__file__),'data','ethylene_f12_dz.out')) + log = MolproLog(os.path.join(os.path.dirname(__file__), 'data', 'ethylene_f12_dz.out')) t1_diagnostic = log.get_T1_diagnostic() self.assertAlmostEqual(t1_diagnostic, 0.01152184) + if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/arkane/output.py b/arkane/output.py index 5c2106b8be..345bdd4c66 100644 --- a/arkane/output.py +++ b/arkane/output.py @@ -34,7 +34,6 @@ import ast - ################################################################################ @@ -101,14 +100,14 @@ def visit_Tuple(self, node): Return a pretty representation of the tuple represented by `node`. """ # If the tuple represents a quantity, keep it on one line - isQuantity = True + is_quantity = True if len(node.elts) == 0 or not isinstance(node.elts[0], (ast.Num, ast.List)) or ( isinstance(node.elts[0], ast.List) and any([not isinstance(e, ast.Num) for e in node.elts[0].elts])): - isQuantity = False + is_quantity = False elif len(node.elts) < 2 or not isinstance(node.elts[1], ast.Str): - isQuantity = False + is_quantity = False - if not isQuantity: + if not is_quantity: # Split elements onto multiple lines result = '(\n' self.level += 1 diff --git a/arkane/pdep.py b/arkane/pdep.py index 97dc28c394..ef9220395f 100644 --- a/arkane/pdep.py +++ b/arkane/pdep.py @@ -34,22 +34,22 @@ reaction network. """ -import os.path -import math -import numpy import logging +import math +import os.path + +import numpy as np import rmgpy.quantity as quantity +from rmgpy.chemkin import writeKineticsEntry +from rmgpy.data.kinetics.library import LibraryReaction +from rmgpy.exceptions import InvalidMicrocanonicalRateError, ModifiedStrongCollisionError, PressureDependenceError from rmgpy.kinetics import Chebyshev, PDepArrhenius from rmgpy.reaction import Reaction from rmgpy.kinetics.tunneling import Wigner, Eckart -from rmgpy.data.kinetics.library import LibraryReaction -from rmgpy.chemkin import writeKineticsEntry -from rmgpy.exceptions import InvalidMicrocanonicalRateError, ModifiedStrongCollisionError from arkane.output import prettify -from arkane.sensitivity import PDepSensitivity as sa - +from arkane.sensitivity import PDepSensitivity as SensAnalysis ################################################################################ @@ -115,8 +115,8 @@ def __init__(self, network, self.Tcount = Tcount if Tlist is not None: self.Tlist = Tlist - self.Tmin = (numpy.min(self.Tlist.value_si), "K") - self.Tmax = (numpy.max(self.Tlist.value_si), "K") + self.Tmin = (np.min(self.Tlist.value_si), "K") + self.Tmax = (np.max(self.Tlist.value_si), "K") self.Tcount = len(self.Tlist.value_si) else: self.Tlist = None @@ -126,8 +126,8 @@ def __init__(self, network, self.Pcount = Pcount if Plist is not None: self.Plist = Plist - self.Pmin = (numpy.min(self.Plist.value_si) * 1e-5, "bar") - self.Pmax = (numpy.max(self.Plist.value_si) * 1e-5, "bar") + self.Pmin = (np.min(self.Plist.value_si) * 1e-5, "bar") + self.Pmax = (np.max(self.Plist.value_si) * 1e-5, "bar") self.Pcount = len(self.Plist.value_si) else: self.Plist = None @@ -254,10 +254,10 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True): # set transition state Energy if not set previously using same method as RMG pdep for reaction in self.network.pathReactions: - transitionState = reaction.transitionState - if transitionState.conformer and transitionState.conformer.E0 is None: - transitionState.conformer.E0 = (sum([spec.conformer.E0.value_si for spec in reaction.reactants]) - + reaction.kinetics.Ea.value_si, 'J/mol') + transition_state = reaction.transitionState + if transition_state.conformer and transition_state.conformer.E0 is None: + transition_state.conformer.E0 = (sum([spec.conformer.E0.value_si for spec in reaction.reactants]) + + reaction.kinetics.Ea.value_si, 'J/mol') logging.info('Approximated transitions state E0 for reaction {3} from kinetics ' 'A={0}, n={1}, Ea={2} J/mol'.format(reaction.kinetics.A.value_si, reaction.kinetics.n.value_si, @@ -281,20 +281,20 @@ def execute(self, outputFile, plot, format='pdf', print_summary=True): if self.sensitivity_conditions is not None: perturbation = 0.1 # kcal/mol logging.info('\n\nRunning sensitivity analysis...') - for i in xrange(3): + for i in range(3): try: - sa(self, os.path.dirname(outputFile), perturbation=perturbation) - except (InvalidMicrocanonicalRateError, ModifiedStrongCollisionError) as exept: - logging.warn("Could not complete the sensitivity analysis with a perturbation of {0}" - " kcal/mol, trying {1} kcal/mol instead.".format( - perturbation, perturbation / 2.0)) + SensAnalysis(self, os.path.dirname(outputFile), perturbation=perturbation) + except (InvalidMicrocanonicalRateError, ModifiedStrongCollisionError) as e: + logging.warning('Could not complete the sensitivity analysis with a perturbation of {0} ' + 'kcal/mol, trying {1} kcal/mol instead.'.format( + perturbation, perturbation / 2.0)) perturbation /= 2.0 else: break else: logging.error("Could not complete the sensitivity analysis even with a perturbation of {0}" " kcal/mol".format(perturbation)) - raise exept + raise e logging.info("Completed the sensitivity analysis using a perturbation of {0} kcal/mol".format( perturbation)) logging.debug('Finished pdep job for reaction {0}.'.format(self.network.label)) @@ -317,7 +317,7 @@ def generateTemperatureList(self): pass elif self.interpolationModel[0].lower() == 'chebyshev': # Distribute temperatures on a Gauss-Chebyshev grid - Tlist = numpy.zeros(Tcount, numpy.float64) + Tlist = np.zeros(Tcount, np.float64) for i in range(Tcount): T = -math.cos((2 * i + 1) * math.pi / (2 * self.Tcount)) T = 2.0 / ((1.0 / Tmax - 1.0 / Tmin) * T + 1.0 / Tmax + 1.0 / Tmin) @@ -325,7 +325,7 @@ def generateTemperatureList(self): self.Tlist = (Tlist, "K") else: # Distribute temperatures evenly on a T^-1 domain - Tlist = 1.0 / numpy.linspace(1.0 / Tmax, 1.0 / Tmin, Tcount) + Tlist = 1.0 / np.linspace(1.0 / Tmax, 1.0 / Tmin, Tcount) self.Tlist = (Tlist, "K") return self.Tlist.value_si @@ -355,14 +355,14 @@ def initialize(self): else: raise ValueError('Unknown tunneling model {0!r} for path reaction {1}.'.format(tunneling, reaction)) - maximumGrainSize = self.maximumGrainSize.value_si if self.maximumGrainSize is not None else 0.0 + maximum_grain_size = self.maximumGrainSize.value_si if self.maximumGrainSize is not None else 0.0 self.network.initialize( Tmin=self.Tmin.value_si, Tmax=self.Tmax.value_si, Pmin=self.Pmin.value_si, Pmax=self.Pmax.value_si, - maximumGrainSize=maximumGrainSize, + maximumGrainSize=maximum_grain_size, minimumGrainCount=self.minimumGrainCount, activeJRotor=self.activeJRotor, activeKRotor=self.activeKRotor, @@ -389,7 +389,7 @@ def generatePressureList(self): pass elif self.interpolationModel[0].lower() == 'chebyshev': # Distribute pressures on a Gauss-Chebyshev grid - Plist = numpy.zeros(Pcount, numpy.float64) + Plist = np.zeros(Pcount, np.float64) for i in range(Pcount): P = -math.cos((2 * i + 1) * math.pi / (2 * self.Pcount)) P = 10 ** (0.5 * ((math.log10(Pmax) - math.log10(Pmin)) * P + math.log10(Pmax) + math.log10(Pmin))) @@ -397,7 +397,7 @@ def generatePressureList(self): self.Plist = (Plist * 1e-5, "bar") else: # Distribute pressures evenly on a log domain - Plist = 10.0 ** numpy.linspace(math.log10(Pmin), math.log10(Pmax), Pcount) + Plist = 10.0 ** np.linspace(math.log10(Pmin), math.log10(Pmax), Pcount) self.Plist = (Plist * 1e-5, "bar") return self.Plist.value_si @@ -410,8 +410,8 @@ def fitInterpolationModels(self): self.network.netReactions = [] - Nreac = self.network.Nisom + self.network.Nreac - Nprod = Nreac + self.network.Nprod + n_reac = self.network.Nisom + self.network.Nreac + n_prod = n_reac + self.network.Nprod Tmin = self.Tmin.value_si Tmax = self.Tmax.value_si @@ -420,8 +420,8 @@ def fitInterpolationModels(self): Pmax = self.Pmax.value_si Pdata = self.Plist.value_si - for prod in range(Nprod): - for reac in range(Nreac): + for prod in range(n_prod): + for reac in range(n_reac): if reac == prod: continue reaction = Reaction(reactants=configurations[reac].species, @@ -462,8 +462,8 @@ def save(self, outputFile): f = open(outputFile, 'a') f_chemkin = open(os.path.join(os.path.dirname(outputFile), 'chem.inp'), 'a') - Nreac = self.network.Nisom + self.network.Nreac - Nprod = Nreac + self.network.Nprod + n_reac = self.network.Nisom + self.network.Nreac + n_prod = n_reac + self.network.Nprod Tlist = self.Tlist.value_si Plist = self.Plist.value_si Tcount = Tlist.shape[0] @@ -488,8 +488,8 @@ def save(self, outputFile): count = 0 printed_reactions = [] # list of rxns already printed - for prod in range(Nprod): - for reac in range(Nreac): + for prod in range(n_prod): + for reac in range(n_reac): if reac == prod: continue reaction = self.network.netReactions[count] @@ -517,7 +517,7 @@ def save(self, outputFile): f.write('# =========== ') f.write('=========== ' * Pcount) f.write('\n') - f.write('# T \ P ') + f.write('# T / P ') f.write(' '.join(['{0:11.3e}'.format(P * 1e-5) for P in Plist])) f.write('\n') f.write('# =========== ') @@ -559,8 +559,8 @@ def plot(self, outputDirectory): import matplotlib.cm cm = matplotlib.cm.jet - Nreac = self.network.Nisom + self.network.Nreac - Nprod = Nreac + self.network.Nprod + n_reac = self.network.Nisom + self.network.Nreac + n_prod = n_reac + self.network.Nprod Tlist = self.Tlist.value_si Plist = self.Plist.value_si Tcount = Tlist.shape[0] @@ -569,8 +569,8 @@ def plot(self, outputDirectory): K = self.K count = 0 - for prod in range(Nprod): - for reac in range(Nreac): + for prod in range(n_prod): + for reac in range(n_reac): if reac == prod: continue reaction = self.network.netReactions[count] @@ -578,13 +578,13 @@ def plot(self, outputDirectory): reaction_str = '{0} {1} {2}'.format( ' + '.join([reactant.label for reactant in reaction.reactants]), - '<=>' if prod < Nreac else '-->', + '<=>' if prod < n_reac else '-->', ' + '.join([product.label for product in reaction.products]), ) fig = plt.figure(figsize=(10, 6)) - K2 = numpy.zeros((Tcount, Pcount)) + K2 = np.zeros((Tcount, Pcount)) if reaction.kinetics is not None: for t in range(Tcount): for p in range(Pcount): @@ -597,7 +597,7 @@ def plot(self, outputDirectory): kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order] plt.subplot(1, 2, 1) - for p in xrange(Pcount): + for p in range(Pcount): plt.semilogy(1000.0 / Tlist, K[:, p], color=cm(1. * p / (Pcount - 1)), marker='o', linestyle='', label=str('%.2e' % (Plist[p] / 1e+5)) + ' bar') if reaction.kinetics is not None: @@ -609,7 +609,7 @@ def plot(self, outputDirectory): plt.legend() plt.subplot(1, 2, 2) - for t in xrange(Tcount): + for t in range(Tcount): plt.loglog(Plist * 1e-5, K[t, :], color=cm(1. * t / (Tcount - 1)), marker='o', linestyle='', label=str('%.0d' % Tlist[t]) + ' K') plt.loglog(Plist * 1e-5, K2[t, :], color=cm(1. * t / (Tcount - 1)), marker='', linestyle='-') diff --git a/arkane/pdepTest.py b/arkane/pdepTest.py index 5ab1cc26e4..379d460394 100644 --- a/arkane/pdepTest.py +++ b/arkane/pdepTest.py @@ -28,17 +28,21 @@ # # ############################################################################### +""" +This module contains unit tests of the :mod:`arkane.pdep` module. +""" + +import logging import os -import unittest import shutil -import logging +import unittest from nose.plugins.attrib import attr from rmgpy import settings from rmgpy.chemkin import readReactionsBlock -from rmgpy.species import Species from rmgpy.kinetics.chebyshev import Chebyshev +from rmgpy.species import Species from arkane.main import Arkane diff --git a/arkane/qchem.py b/arkane/qchem.py index 5490b379c9..443367c327 100644 --- a/arkane/qchem.py +++ b/arkane/qchem.py @@ -1,11 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""" -Arkane QChem module -Used to parse QChem output files -""" - ############################################################################### # # # RMG - Reaction Mechanism Generator # @@ -33,13 +28,18 @@ # # ############################################################################### +""" +Arkane QChem module +Used to parse QChem output files +""" + import math import logging import os.path -import numpy + +import numpy as np import rmgpy.constants as constants -from rmgpy.exceptions import InputError from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer from arkane.common import check_conformer_energy, get_element_mass @@ -64,21 +64,21 @@ def getNumberOfAtoms(self): Return the number of atoms in the molecular configuration used in the QChem output file. """ - Natoms = 0 + n_atoms = 0 with open(self.path, 'r') as f: line = f.readline() - while line != '' and Natoms == 0: + while line != '' and n_atoms == 0: # Automatically determine the number of atoms - if 'Standard Nuclear Orientation' in line and Natoms == 0: + if 'Standard Nuclear Orientation' in line and n_atoms == 0: for i in range(3): line = f.readline() while '----------------------------------------------------' not in line: - Natoms += 1 + n_atoms += 1 line = f.readline() line = f.readline() - return Natoms + return n_atoms def loadForceConstantMatrix(self): """ @@ -88,30 +88,30 @@ def loadForceConstantMatrix(self): are J/m^2. If no force constant matrix can be found in the log file, ``None`` is returned. """ - F = None + force = None - Natoms = self.getNumberOfAtoms() - Nrows = Natoms * 3 + n_atoms = self.getNumberOfAtoms() + n_rows = n_atoms * 3 with open(self.path, 'r') as f: line = f.readline() while line != '': # Read force constant matrix if 'Final Hessian.' in line or 'Hessian of the SCF Energy' in line: - F = numpy.zeros((Nrows, Nrows), numpy.float64) - for i in range(int(math.ceil(Nrows / 6.0))): + force = np.zeros((n_rows, n_rows), np.float64) + for i in range(int(math.ceil(n_rows / 6.0))): # Header row line = f.readline() # Matrix element rows - for j in range(Nrows): # for j in range(i*6, Nrows): + for j in range(n_rows): # for j in range(i*6, Nrows): data = f.readline().split() for k in range(len(data) - 1): - F[j, i * 6 + k] = float(data[k + 1]) + force[j, i * 6 + k] = float(data[k + 1]) # F[i*5+k,j] = F[j,i*5+k] # Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2 - F *= 4.35974417e-18 / 5.291772108e-11 ** 2 + force *= 4.35974417e-18 / 5.291772108e-11 ** 2 line = f.readline() - return F + return force def loadGeometry(self): @@ -130,7 +130,7 @@ def loadGeometry(self): completed_job = False for line in reversed(log): if 'Total job time:' in line: - logging.debug('Found a sucessfully completed QChem Job') + logging.debug('Found a successfully completed QChem Job') completed_job = True break @@ -141,7 +141,7 @@ def loadGeometry(self): # Now look for the geometry. # Will return the final geometry in the file under Standard Nuclear Orientation. geometry_flag = False - for i in reversed(xrange(len(log))): + for i in reversed(range(len(log))): line = log[i] if 'Standard Nuclear Orientation' in line: for line in log[(i + 3):]: @@ -160,9 +160,9 @@ def loadGeometry(self): mass1, num1 = get_element_mass(atom1) mass.append(mass1) number.append(num1) - coord = numpy.array(coord, numpy.float64) - number = numpy.array(number, numpy.int) - mass = numpy.array(mass, numpy.float64) + coord = np.array(coord, np.float64) + number = np.array(number, np.int) + mass = np.array(mass, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: raise InputError('Unable to read atoms from QChem geometry output file {0}'.format(self.path)) @@ -253,7 +253,7 @@ def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, logging.debug('inertia is {}'.format(str(inertia))) for i in range(2): inertia[i] *= (constants.a0 / 1e-10) ** 2 - inertia = numpy.sqrt(inertia[0] * inertia[1]) + inertia = np.sqrt(inertia[0] * inertia[1]) rotation = LinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) rot.append(rotation) else: @@ -308,7 +308,7 @@ def loadScanEnergies(self): Extract the optimized energies in J/mol from a QChem log file, e.g. the result of a QChem "PES Scan" quantum chemistry calculation. """ - Vlist = [] + v_list = [] angle = [] read = False with open(self.path, 'r') as f: @@ -318,24 +318,24 @@ def loadScanEnergies(self): if read: values = [float(item) for item in line.split()] angle.append(values[0]) - Vlist.append(values[1]) + v_list.append(values[1]) if 'Summary of potential scan:' in line: - logging.info('found a sucessfully completed QChem Job') + logging.info('found a successfully completed QChem Job') read = True elif 'SCF failed to converge' in line: raise InputError('QChem Job did not sucessfully complete: SCF failed to converge') logging.info(' Assuming {0} is the output from a QChem PES scan...'.format(os.path.basename(self.path))) - Vlist = numpy.array(Vlist, numpy.float64) + v_list = np.array(v_list, np.float64) # check to see if the scanlog indicates that one of your reacting species may not be the lowest energy conformer - check_conformer_energy(Vlist, self.path) + check_conformer_energy(v_list, self.path) # Adjust energies to be relative to minimum energy conformer # Also convert units from Hartree/particle to J/mol - Vlist -= numpy.min(Vlist) - Vlist *= constants.E_h * constants.Na - angle = numpy.arange(0.0, 2 * math.pi + 0.00001, 2 * math.pi / (len(Vlist) - 1), numpy.float64) - return Vlist, angle + v_list -= np.min(v_list) + v_list *= constants.E_h * constants.Na + angle = np.arange(0.0, 2 * math.pi + 0.00001, 2 * math.pi / (len(v_list) - 1), np.float64) + return v_list, angle def loadNegativeFrequency(self): """ diff --git a/arkane/qchemTest.py b/arkane/qchemTest.py index 702f3c9ba8..778f03e222 100644 --- a/arkane/qchemTest.py +++ b/arkane/qchemTest.py @@ -28,15 +28,17 @@ # # ############################################################################### -import unittest +""" +This module contains unit tests of the :mod:`arkane.qchem` module. +""" + import os +import unittest -from rmgpy.statmech import Conformer, IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, \ - HinderedRotor +from rmgpy.statmech import IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator, HinderedRotor from arkane.qchem import QChemLog - ################################################################################ diff --git a/arkane/sensitivity.py b/arkane/sensitivity.py index 63adf3f12e..0c58d119b9 100644 --- a/arkane/sensitivity.py +++ b/arkane/sensitivity.py @@ -33,15 +33,15 @@ of kinetics and pressure-dependent jobs. """ -import os import logging +import os import string + import numpy as np +from rmgpy.pdep import Configuration import rmgpy.quantity as quantity from rmgpy.species import TransitionState -from rmgpy.pdep import Configuration - ################################################################################ @@ -99,10 +99,10 @@ def execute(self): # Calculate the sensitivity coefficients according to dln(r) / dln(E0) = (E0 * dr) / (r * dE0) self.f_sa_coefficients[species] = [(self.f_sa_rates[species][i] - self.f_rates[i]) / (self.perturbation.value_si * self.f_rates[i]) - for i in xrange(len(self.conditions))] + for i in range(len(self.conditions))] self.r_sa_coefficients[species] = [(self.r_sa_rates[species][i] - self.r_rates[i]) / (self.perturbation.value_si * self.r_rates[i]) - for i in xrange(len(self.conditions))] + for i in range(len(self.conditions))] self.save() self.plot() @@ -127,16 +127,16 @@ def save(self): with open(path, 'w') as sa_f: sa_f.write("Sensitivity analysis for reaction {0}\n\n" "The semi-normalized sensitivity coefficients are calculated as dln(r)/dE0\n" - "by perturbing E0 of each well or TS by {1}, and are given in `mol/J` units.\n\n\n".format( - reaction_str, self.perturbation)) + "by perturbing E0 of each well or TS by {1}, and are given in " + "`mol/J` units.\n\n\n".format(reaction_str, self.perturbation)) reactants_label = ' + '.join([reactant.label for reactant in self.job.reaction.reactants]) ts_label = self.job.reaction.transitionState.label products_label = ' + '.join([reactant.label for reactant in self.job.reaction.products]) max_label = max(len(reactants_label), len(products_label), len(ts_label), 10) sa_f.write('========================={0}=============================================\n' '| Direction | Well or TS {1}| Temperature (K) | Sensitivity coefficient |\n' - '|-----------+------------{2}+-----------------+-------------------------|\n'.format( - '=' * (max_label - 10), ' ' * (max_label - 10), '-' * (max_label - 10))) + '|-----------+------------{2}+-----------------+-------------------------|\n' + .format('=' * (max_label - 10), ' ' * (max_label - 10), '-' * (max_label - 10))) for i, condition in enumerate(self.conditions): sa_f.write('| Forward | {0} {1}| {2:6.1f} | {3:+1.2e} |\n'.format( reactants_label, ' ' * (max_label - len(reactants_label)), condition.value_si, @@ -180,8 +180,8 @@ def plot(self): plt.rcdefaults() _, ax = plt.subplots(nrows=len(self.conditions), ncols=2, tight_layout=True) labels = [reactants_label, ts_label, products_label] - min_sa = min(min(min(self.f_sa_coefficients.itervalues())), min(min(self.r_sa_coefficients.itervalues()))) - max_sa = max(max(max(self.f_sa_coefficients.itervalues())), max(max(self.r_sa_coefficients.itervalues()))) + min_sa = min(min(min(self.f_sa_coefficients.values())), min(min(self.r_sa_coefficients.values()))) + max_sa = max(max(max(self.f_sa_coefficients.values())), max(max(self.r_sa_coefficients.values()))) for i, condition in enumerate(self.conditions): f_values = [self.f_sa_coefficients[self.job.reaction.reactants[0]][i], self.f_sa_coefficients[self.job.reaction.transitionState][i], @@ -285,7 +285,7 @@ def execute(self): self.sa_coefficients[str(rxn)][entry] = [((self.sa_rates[str(rxn)][entry][i] - self.rates[str(rxn)][i])) / (self.perturbation.value_si * self.rates[str(rxn)][i]) - for i in xrange(len(self.conditions))] + for i in range(len(self.conditions))] self.save(wells, transition_states) self.plot(wells, transition_states) @@ -321,8 +321,8 @@ def save(self, wells, transition_states): with open(path, 'w') as sa_f: sa_f.write("Sensitivity analysis for network {0}\n\n" "The semi-normalized sensitivity coefficients are calculated as dln(r)/dE0\n" - "by perturbing E0 of each well or TS by {1},\n and are given in `mol/J` units.\n\n\n".format( - network_str, self.perturbation)) + "by perturbing E0 of each well or TS by {1},\n and are given in " + "`mol/J` units.\n\n\n".format(network_str, self.perturbation)) for rxn in self.job.network.netReactions: reactants_label = ' + '.join([reactant.label for reactant in rxn.reactants]) products_label = ' + '.join([reactant.label for reactant in rxn.products]) @@ -331,8 +331,8 @@ def save(self, wells, transition_states): max_label = 40 sa_f.write('========================={0}==================================================\n' '| Well or TS {1}| Temperature (K) | Pressure (bar) | Sensitivity coefficient |\n' - '|------------{2}+-----------------+----------------+-------------------------|\n'.format( - '=' * (max_label - 10), ' ' * (max_label - 10), '-' * (max_label - 10))) + '|------------{2}+-----------------+----------------+-------------------------|\n' + .format('=' * (max_label - 10), ' ' * (max_label - 10), '-' * (max_label - 10))) for entry in wells + transition_states: if isinstance(entry, TransitionState): entry_label = '(TS) ' + entry.label @@ -359,7 +359,7 @@ def plot(self, wells, transition_states): labels = [str(entry) for entry in wells] labels.extend(ts.label for ts in transition_states) max_sa = min_sa = self.sa_coefficients[str(rxn)][wells[0]][0] - for conformer_sa in self.sa_coefficients[str(rxn)].itervalues(): + for conformer_sa in self.sa_coefficients[str(rxn)].values(): for sa_condition in conformer_sa: if min_sa > sa_condition: min_sa = sa_condition diff --git a/arkane/statmech.py b/arkane/statmech.py index d0c40f0796..b39802eab6 100644 --- a/arkane/statmech.py +++ b/arkane/statmech.py @@ -34,36 +34,34 @@ information for a single species or transition state. """ -import os.path -import math -import numpy as np import logging +import math +import os.path -from rdkit.Chem import GetPeriodicTable +import matplotlib.pyplot as plt +import numpy as np import rmgpy.constants as constants +from rmgpy.exceptions import InputError, ElementError +from rmgpy.molecule.molecule import Molecule from rmgpy.species import TransitionState, Species -from rmgpy.statmech.translation import Translation, IdealGasTranslation -from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor -from rmgpy.statmech.vibration import Vibration, HarmonicOscillator -from rmgpy.statmech.torsion import Torsion, HinderedRotor, FreeRotor -from rmgpy.statmech.conformer import Conformer from rmgpy.statmech.ndTorsions import HinderedRotor2D, HinderedRotorClassicalND -from rmgpy.exceptions import InputError, StatmechError +from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor +from rmgpy.statmech.torsion import HinderedRotor, FreeRotor +from rmgpy.statmech.translation import Translation, IdealGasTranslation +from rmgpy.statmech.vibration import HarmonicOscillator from rmgpy.quantity import Quantity -from rmgpy.molecule.molecule import Molecule -from arkane.output import prettify -from arkane.log import Log +from arkane.common import ArkaneSpecies +from arkane.common import symbol_by_number +from arkane.encorr.corr import get_atom_correction, get_bac from arkane.gaussian import GaussianLog +from arkane.log import Log from arkane.molpro import MolproLog +from arkane.output import prettify from arkane.qchem import QChemLog -from arkane.common import symbol_by_number -from arkane.common import ArkaneSpecies -from arkane.encorr.corr import get_atom_correction, get_bac from arkane.util import determine_qm_software - ################################################################################ @@ -97,7 +95,7 @@ def load(self): angles (in radians) and energies (in J/mol). """ angles, energies = [], [] - angleUnits, energyUnits, angleFactor, energyFactor = None, None, None, None + angle_units, energy_units, angle_factor, energy_factor = None, None, None, None with open(self.path, 'r') as stream: for line in stream: @@ -106,22 +104,22 @@ def load(self): continue tokens = line.split() - if angleUnits is None or energyUnits is None: - angleUnits = tokens[1][1:-1] - energyUnits = tokens[3][1:-1] + if angle_units is None or energy_units is None: + angle_units = tokens[1][1:-1] + energy_units = tokens[3][1:-1] try: - angleFactor = ScanLog.angleFactors[angleUnits] + angle_factor = ScanLog.angleFactors[angle_units] except KeyError: - raise ValueError('Invalid angle units {0!r}.'.format(angleUnits)) + raise ValueError('Invalid angle units {0!r}.'.format(angle_units)) try: - energyFactor = ScanLog.energyFactors[energyUnits] + energy_factor = ScanLog.energyFactors[energy_units] except KeyError: - raise ValueError('Invalid energy units {0!r}.'.format(energyUnits)) + raise ValueError('Invalid energy units {0!r}.'.format(energy_units)) else: - angles.append(float(tokens[0]) / angleFactor) - energies.append(float(tokens[1]) / energyFactor) + angles.append(float(tokens[0]) / angle_factor) + energies.append(float(tokens[1]) / energy_factor) angles = np.array(angles) energies = np.array(energies) @@ -138,11 +136,11 @@ def save(self, angles, energies, angleUnits='radians', energyUnits='kJ/mol'): assert len(angles) == len(energies) try: - angleFactor = ScanLog.angleFactors[angleUnits] + angle_factor = ScanLog.angleFactors[angleUnits] except KeyError: raise ValueError('Invalid angle units {0!r}.'.format(angleUnits)) try: - energyFactor = ScanLog.energyFactors[energyUnits] + energy_factor = ScanLog.energyFactors[energyUnits] except KeyError: raise ValueError('Invalid energy units {0!r}.'.format(energyUnits)) @@ -152,7 +150,7 @@ def save(self, angles, energies, angleUnits='radians', energyUnits='kJ/mol'): 'Energy ({0})'.format(energyUnits), )) for angle, energy in zip(angles, energies): - stream.write('{0:23.10f} {1:23.10f}\n'.format(angle * angleFactor, energy * energyFactor)) + stream.write('{0:23.10f} {1:23.10f}\n'.format(angle * angle_factor, energy * energy_factor)) ################################################################################ @@ -167,11 +165,16 @@ def freeRotor(pivots, top, symmetry): """Read a free rotor directive, and return the attributes in a list""" return [pivots, top, symmetry] -def hinderedRotor2D(scandir,pivots1,top1,symmetry1,pivots2,top2,symmetry2,symmetry='none'): - return [scandir,pivots1,top1,symmetry1,pivots2,top2,symmetry2,symmetry] -def hinderedRotorClassicalND(calcPath,pivots,tops,sigmas,semiclassical): - return [calcPath,pivots,tops,sigmas,semiclassical] +def hinderedRotor2D(scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry='none'): + """Read a two dimensional hindered rotor directive, and return the attributes in a list""" + return [scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry] + + +def hinderedRotorClassicalND(calcPath, pivots, tops, sigmas, semiclassical): + """Read an N dimensional hindered rotor directive, and return the attributes in a list""" + return [calcPath, pivots, tops, sigmas, semiclassical] + class StatMechJob(object): """ @@ -257,7 +260,7 @@ def load(self, pdep=False, plot=False): 'False': False, 'HinderedRotor': hinderedRotor, 'FreeRotor': freeRotor, - 'HinderedRotor2D' : hinderedRotor2D, + 'HinderedRotor2D': hinderedRotor2D, 'HinderedRotorClassicalND': hinderedRotorClassicalND, # File formats 'GaussianLog': GaussianLog, @@ -271,7 +274,7 @@ def load(self, pdep=False, plot=False): with open(path, 'r') as f: try: - exec f in global_context, local_context + exec(f.read(), global_context, local_context) except (NameError, TypeError, SyntaxError): logging.error('The species file {0} was invalid:'.format(path)) raise @@ -288,20 +291,20 @@ def load(self, pdep=False, plot=False): linear = None try: - externalSymmetry = local_context['externalSymmetry'] + external_symmetry = local_context['externalSymmetry'] except KeyError: - externalSymmetry = None + external_symmetry = None try: - spinMultiplicity = local_context['spinMultiplicity'] + spin_multiplicity = local_context['spinMultiplicity'] except KeyError: - spinMultiplicity = 0 + spin_multiplicity = 0 try: - opticalIsomers = local_context['opticalIsomers'] + optical_isomers = local_context['opticalIsomers'] except KeyError: logging.debug('No opticalIsomers provided, estimating them from the quantum file.') - opticalIsomers = None + optical_isomers = None try: energy = local_context['energy'] @@ -315,12 +318,12 @@ def load(self, pdep=False, plot=False): raise InputError('Model chemistry {0!r} not found in from dictionary of energy values in species file ' '{1!r}.'.format(self.modelChemistry, path)) e0, e_electronic = None, None # E0 = e_electronic + ZPE - energyLog = None + energy_log = None if isinstance(energy, Log) and type(energy).__name__ == 'Log': - energyLog = determine_qm_software(os.path.join(directory, energy.path)) + energy_log = determine_qm_software(os.path.join(directory, energy.path)) elif isinstance(energy, Log) and type(energy).__name__ != 'Log': - energyLog = energy - energyLog.path = os.path.join(directory, energyLog.path) + energy_log = energy + energy_log.path = os.path.join(directory, energy_log.path) elif isinstance(energy, float): e_electronic = energy elif isinstance(energy, tuple) and len(energy) == 2: @@ -339,21 +342,21 @@ def load(self, pdep=False, plot=False): 'or E0 (including the ZPE). Got: {0}'.format(energy[2])) try: - statmechLog = local_context['frequencies'] + statmech_log = local_context['frequencies'] except KeyError: raise InputError('Required attribute "frequencies" not found in species file {0!r}.'.format(path)) - if isinstance(statmechLog, Log) and type(statmechLog).__name__ == 'Log': - statmechLog = determine_qm_software(os.path.join(directory, statmechLog.path)) + if isinstance(statmech_log, Log) and type(statmech_log).__name__ == 'Log': + statmech_log = determine_qm_software(os.path.join(directory, statmech_log.path)) else: - statmechLog.path = os.path.join(directory, statmechLog.path) + statmech_log.path = os.path.join(directory, statmech_log.path) try: - geomLog = local_context['geometry'] - if isinstance(geomLog, Log) and type(geomLog).__name__ == 'Log': - geomLog = determine_qm_software(os.path.join(directory, geomLog.path)) + geom_log = local_context['geometry'] + if isinstance(geom_log, Log) and type(geom_log).__name__ == 'Log': + geom_log = determine_qm_software(os.path.join(directory, geom_log.path)) else: - geomLog.path = os.path.join(directory, geomLog.path) + geom_log.path = os.path.join(directory, geom_log.path) except KeyError: - geomLog = statmechLog + geom_log = statmech_log logging.debug("Reading geometry from the specified frequencies file.") if 'frequencyScaleFactor' in local_context: @@ -376,23 +379,23 @@ def load(self, pdep=False, plot=False): # a warning is output reminding the user to ensure the geometry and Hessian are defined in consistent # coordinates. if len(rotors) > 0: - if isinstance(statmechLog, GaussianLog): - if statmechLog.path != geomLog.path: - raise InputError('For {0!r}, the geometry log, {1!r}, and frequency log, {2!r}, are not the same.' - ' In order to ensure the geometry and Hessian of {0!r} are defined in consistent' - ' coordinate systems for hindered/free rotor projection, either use the frequency' - ' log for both geometry and frequency, or remove rotors.'.format( - self.species.label, geomLog.path, statmechLog.path)) - elif isinstance(statmechLog, QChemLog): - logging.warning('QChem log will be used for Hessian of {0!r}. Please verify that the geometry' - ' and Hessian of {0!r} are defined in the same coordinate system'.format( + if isinstance(statmech_log, GaussianLog): + if statmech_log.path != geom_log.path: + raise InputError('For {0!r}, the geometry log, {1!r}, and frequency log, {2!r}, are not the same. ' + 'In order to ensure the geometry and Hessian of {0!r} are defined in consistent ' + 'coordinate systems for hindered/free rotor projection, either use the frequency ' + 'log for both geometry and frequency, or remove rotors.'.format( + self.species.label, geom_log.path, statmech_log.path)) + elif isinstance(statmech_log, QChemLog): + logging.warning('QChem log will be used for Hessian of {0!r}. Please verify that the geometry ' + 'and Hessian of {0!r} are defined in the same coordinate system'.format( self.species.label)) logging.debug(' Reading molecular degrees of freedom...') - conformer, unscaled_frequencies = statmechLog.loadConformer(symmetry=externalSymmetry, - spinMultiplicity=spinMultiplicity, - opticalIsomers=opticalIsomers, - label=self.species.label) + conformer, unscaled_frequencies = statmech_log.loadConformer(symmetry=external_symmetry, + spinMultiplicity=spin_multiplicity, + opticalIsomers=optical_isomers, + label=self.species.label) for mode in conformer.modes: if isinstance(mode, (Translation, IdealGasTranslation)): @@ -406,7 +409,7 @@ def load(self, pdep=False, plot=False): "please specify the multiplicity in the input file.".format(self.path)) logging.debug(' Reading optimized geometry...') - coordinates, number, mass = geomLog.loadGeometry() + coordinates, number, mass = geom_log.loadGeometry() # Infer atoms from geometry atoms = {} @@ -433,13 +436,13 @@ def load(self, pdep=False, plot=False): if e0 is None: if e_electronic is None: # The energy read from the log file is without the ZPE - e_electronic = energyLog.loadEnergy(zpe_scale_factor) # in J/mol + e_electronic = energy_log.loadEnergy(zpe_scale_factor) # in J/mol else: e_electronic *= constants.E_h * constants.Na # convert Hartree/particle into J/mol if self.applyAtomEnergyCorrections: atom_corrections = get_atom_correction(self.modelChemistry, atoms, self.atomEnergies) - + else: atom_corrections = 0 logging.warning('Atom corrections are not being used. Do not trust energies and thermo.') @@ -447,12 +450,13 @@ def load(self, pdep=False, plot=False): if not self.bonds and hasattr(self.species, 'molecule') and self.species.molecule: self.bonds = self.species.molecule[0].enumerate_bonds() bond_corrections = get_bac(self.modelChemistry, self.bonds, coordinates, number, - bac_type=self.bondEnergyCorrectionType, multiplicity=conformer.spinMultiplicity) + bac_type=self.bondEnergyCorrectionType, + multiplicity=conformer.spinMultiplicity) else: bond_corrections = 0 e_electronic_with_corrections = e_electronic + atom_corrections + bond_corrections # Get ZPE only for polyatomic species (monoatomic species don't have frequencies, so ZPE = 0) - zpe = statmechLog.loadZeroPointEnergy() * zpe_scale_factor if len(number) > 1 else 0 + zpe = statmech_log.loadZeroPointEnergy() * zpe_scale_factor if len(number) > 1 else 0 logging.debug('Scaled zero point energy (ZPE) is {0} J/mol'.format(zpe)) e0 = e_electronic_with_corrections + zpe @@ -466,20 +470,20 @@ def load(self, pdep=False, plot=False): # If loading a transition state, also read the imaginary frequency if is_ts: - neg_freq = statmechLog.loadNegativeFrequency() + neg_freq = statmech_log.loadNegativeFrequency() self.species.frequency = (neg_freq * self.frequencyScaleFactor, "cm^-1") # Read and fit the 1D hindered rotors if applicable # If rotors are found, the vibrational frequencies are also # recomputed with the torsional modes removed - F = statmechLog.loadForceConstantMatrix() + F = statmech_log.loadForceConstantMatrix() if F is not None and len(mass) > 1 and len(rotors) > 0: logging.debug(' Fitting {0} hindered rotors...'.format(len(rotors))) - rotorCount = 0 - for j,q in enumerate(rotors): + rotor_count = 0 + for j, q in enumerate(rotors): symmetry = None if len(q) == 3: # No potential scan is given, this is a free rotor @@ -487,119 +491,123 @@ def load(self, pdep=False, plot=False): inertia = conformer.getInternalReducedMomentOfInertia(pivots, top) * constants.Na * 1e23 rotor = FreeRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) conformer.modes.append(rotor) - rotorCount += 1 + rotor_count += 1 elif len(q) == 8: - scandir,pivots1,top1,symmetry1,pivots2,top2,symmetry2,symmetry = q + scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry = q logging.info("Calculating energy levels for 2D-HR, may take a while...") - rotor = HinderedRotor2D(name='r'+str(j),torsigma1=symmetry1,torsigma2=symmetry2,symmetry=symmetry, - calcPath=os.path.join(directory,scandir),pivots1=pivots1,pivots2=pivots2,top1=top1,top2=top2) + rotor = HinderedRotor2D(name='r' + str(j), torsigma1=symmetry1, torsigma2=symmetry2, + symmetry=symmetry, + calcPath=os.path.join(directory, scandir), pivots1=pivots1, pivots2=pivots2, + top1=top1, top2=top2) rotor.run() conformer.modes.append(rotor) - rotorCount += 2 - elif len(q) == 5 and isinstance(q[1][0],list): - scandir,pivots,tops,sigmas,semiclassical = q - rotor = HinderedRotorClassicalND(pivots,tops,sigmas,calcPath=os.path.join(directory,scandir),conformer=conformer,F=F, - semiclassical=semiclassical,isLinear=linear,isTS=is_ts) + rotor_count += 2 + elif len(q) == 5 and isinstance(q[1][0], list): + scandir, pivots, tops, sigmas, semiclassical = q + rotor = HinderedRotorClassicalND(pivots, tops, sigmas, calcPath=os.path.join(directory, scandir), + conformer=conformer, F=F, + semiclassical=semiclassical, isLinear=linear, isTS=is_ts) rotor.run() conformer.modes.append(rotor) - rotorCount += len(pivots) + rotor_count += len(pivots) elif len(q) in [4, 5]: # This is a hindered rotor if len(q) == 5: - scanLog, pivots, top, symmetry, fit = q + scan_log, pivots, top, symmetry, fit = q elif len(q) == 4: # the symmetry number will be derived from the scan - scanLog, pivots, top, fit = q + scan_log, pivots, top, fit = q # Load the hindered rotor scan energies - if isinstance(scanLog, Log) and type(scanLog).__name__ == 'Log': - scanLog = determine_qm_software(os.path.join(directory, scanLog.path)) - scanLog.path = os.path.join(directory, scanLog.path) - if isinstance(scanLog, (GaussianLog, QChemLog)): - v_list, angle = scanLog.loadScanEnergies() + if isinstance(scan_log, Log) and type(scan_log).__name__ == 'Log': + scan_log = determine_qm_software(os.path.join(directory, scan_log.path)) + scan_log.path = os.path.join(directory, scan_log.path) + if isinstance(scan_log, (GaussianLog, QChemLog)): + v_list, angle = scan_log.loadScanEnergies() try: - pivot_atoms = scanLog.load_scan_pivot_atoms() + pivot_atoms = scan_log.load_scan_pivot_atoms() except Exception as e: logging.warning("Unable to find pivot atoms in scan due to error: {}".format(e)) pivot_atoms = 'N/A' try: - frozen_atoms = scanLog.load_scan_frozen_atoms() + frozen_atoms = scan_log.load_scan_frozen_atoms() except Exception as e: logging.warning("Unable to find pivot atoms in scan due to error: {}".format(e)) frozen_atoms = 'N/A' - elif isinstance(scanLog, ScanLog): - angle, v_list = scanLog.load() + elif isinstance(scan_log, ScanLog): + angle, v_list = scan_log.load() # no way to find pivot atoms or frozen atoms from ScanLog pivot_atoms = 'N/A' frozen_atoms = 'N/A' else: - raise InputError('Invalid log file type {0} for scan log.'.format(scanLog.__class__)) + raise InputError('Invalid log file type {0} for scan log.'.format(scan_log.__class__)) if symmetry is None: symmetry = determine_rotor_symmetry(v_list, self.species.label, pivots) - self.raw_hindered_rotor_data.append((self.species.label, rotorCount, symmetry, angle, + self.raw_hindered_rotor_data.append((self.species.label, rotor_count, symmetry, angle, v_list, pivot_atoms, frozen_atoms)) inertia = conformer.getInternalReducedMomentOfInertia(pivots, top) * constants.Na * 1e23 - cosineRotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) - cosineRotor.fitCosinePotentialToData(angle, v_list) - fourierRotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) - fourierRotor.fitFourierPotentialToData(angle, v_list) + cosine_rotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) + cosine_rotor.fitCosinePotentialToData(angle, v_list) + fourier_rotor = HinderedRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry) + fourier_rotor.fitFourierPotentialToData(angle, v_list) Vlist_cosine = np.zeros_like(angle) Vlist_fourier = np.zeros_like(angle) for i in range(angle.shape[0]): - Vlist_cosine[i] = cosineRotor.getPotential(angle[i]) - Vlist_fourier[i] = fourierRotor.getPotential(angle[i]) + Vlist_cosine[i] = cosine_rotor.getPotential(angle[i]) + Vlist_fourier[i] = fourier_rotor.getPotential(angle[i]) if fit == 'cosine': - rotor = cosineRotor - rotorCount += 1 + rotor = cosine_rotor + rotor_count += 1 conformer.modes.append(rotor) elif fit == 'fourier': - rotor = fourierRotor - rotorCount += 1 + rotor = fourier_rotor + rotor_count += 1 conformer.modes.append(rotor) elif fit == 'best': rms_cosine = np.sqrt(np.sum((Vlist_cosine - v_list) * (Vlist_cosine - v_list)) / - (len(v_list) - 1)) / 4184. + (len(v_list) - 1)) / 4184. rms_fourier = np.sqrt(np.sum((Vlist_fourier - v_list) * (Vlist_fourier - v_list)) / - (len(v_list) - 1)) / 4184. + (len(v_list) - 1)) / 4184. # Keep the rotor with the most accurate potential - rotor = cosineRotor if rms_cosine < rms_fourier else fourierRotor + rotor = cosine_rotor if rms_cosine < rms_fourier else fourier_rotor # However, keep the cosine rotor if it is accurate enough, the # fourier rotor is not significantly more accurate, and the cosine # rotor has the correct symmetry if rms_cosine < 0.05 and rms_cosine / rms_fourier < 2.0 and rms_cosine / rms_fourier < 4.0 \ - and symmetry == cosineRotor.symmetry: - rotor = cosineRotor + and symmetry == cosine_rotor.symmetry: + rotor = cosine_rotor conformer.modes.append(rotor) if plot: try: - self.create_hindered_rotor_figure(angle, v_list, cosineRotor, fourierRotor, rotor, rotorCount) + self.create_hindered_rotor_figure(angle, v_list, cosine_rotor, fourier_rotor, rotor, + rotor_count) except Exception as e: logging.warning("Could not plot hindered rotor graph due to error: {0}".format(e)) - rotorCount += 1 + rotor_count += 1 logging.debug(' Determining frequencies from reduced force constant matrix...') - frequencies = np.array(projectRotors(conformer, F, rotors, linear, is_ts)) + frequencies = np.array(projectRotors(conformer, F, rotors, linear, is_ts, label=self.species.label)) elif len(conformer.modes) > 2: if len(rotors) > 0: - logging.warn('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already' - ' present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for' - ' Gaussian to generate the force constant matrix, if running Molpro include keyword print,' - ' hessian') + logging.warning('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already ' + 'present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for ' + 'Gaussian to generate the force constant matrix, if running Molpro include keyword ' + 'print, hessian') frequencies = conformer.modes[2].frequencies.value_si rotors = np.array([]) else: if len(rotors) > 0: - logging.warn('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already' - ' present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for' - ' Gaussian to generate the force constant matrix, if running Molpro include keyword print,' - ' hessian') + logging.warning('Force Constant Matrix Missing Ignoring rotors, if running Gaussian if not already ' + 'present you need to add the keyword iop(7/33=1) in your Gaussian frequency job for ' + 'Gaussian to generate the force constant matrix, if running Molpro include keyword' + 'print, hessian') frequencies = np.array([]) rotors = np.array([]) @@ -607,11 +615,11 @@ def load(self, pdep=False, plot=False): if isinstance(mode, HarmonicOscillator): mode.frequencies = (frequencies * self.frequencyScaleFactor, "cm^-1") - ##save supporting information for calculation + # save supporting information for calculation self.supporting_info = [self.species.label] - symmetry_read, optical_isomers_read, point_group_read = statmechLog.get_symmetry_properties() - self.supporting_info.append(externalSymmetry if externalSymmetry else symmetry_read) - self.supporting_info.append(opticalIsomers if opticalIsomers else optical_isomers_read) + symmetry_read, optical_isomers_read, point_group_read = statmech_log.get_symmetry_properties() + self.supporting_info.append(external_symmetry if external_symmetry else symmetry_read) + self.supporting_info.append(optical_isomers if optical_isomers else optical_isomers_read) self.supporting_info.append(point_group_read) for mode in conformer.modes: if isinstance(mode, (LinearRotor, NonlinearRotor)): @@ -630,19 +638,19 @@ def load(self, pdep=False, plot=False): self.supporting_info.append(e_electronic) self.supporting_info.append(e_electronic + zpe) self.supporting_info.append(e0) - self.supporting_info.append(list(map(lambda x: symbol_by_number[x],number))) #atom symbols + self.supporting_info.append(list([symbol_by_number[x] for x in number])) # atom symbols self.supporting_info.append(coordinates) try: - t1d = energyLog.get_T1_diagnostic() + t1d = energy_log.get_T1_diagnostic() except (NotImplementedError, AttributeError): t1d = None self.supporting_info.append(t1d) try: - d1d = energyLog.get_D1_diagnostic() + d1d = energy_log.get_D1_diagnostic() except (NotImplementedError, AttributeError): d1d = None self.supporting_info.append(d1d) - #save conformer + # save conformer self.species.conformer = conformer def write_output(self, output_directory): @@ -651,9 +659,9 @@ def write_output(self, output_directory): in `output_directory`. """ - outputFile = os.path.join(output_directory, 'output.py') + output_file = os.path.join(output_directory, 'output.py') logging.info('Saving statistical mechanics parameters for {0}...'.format(self.species.label)) - f = open(outputFile, 'a') + f = open(output_file, 'a') conformer = self.species.conformer coordinates = conformer.coordinates.value_si * 1e10 @@ -716,7 +724,7 @@ def create_hindered_rotor_figure(self, angle, v_list, cosineRotor, fourierRotor, axes.set_xticklabels( ['$0$', '$\pi/4$', '$\pi/2$', '$3\pi/4$', '$\pi$', '$5\pi/4$', '$3\pi/2$', '$7\pi/4$', '$2\pi$']) - self.hindered_rotor_plots.append((fig,rotorIndex)) + self.hindered_rotor_plots.append((fig, rotorIndex)) def save_hindered_rotor_figures(self, directory): """ @@ -757,7 +765,8 @@ def is_linear(coordinates): return False return True -def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=False): + +def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=False, label=None): """ For a given `conformer` with associated force constant matrix `F`, lists of rotor information `rotors`, `pivots`, and `top1`, and the linearity of the @@ -768,14 +777,14 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals Refer to Gaussian whitepaper (http://gaussian.com/vib/) for procedure to calculate harmonic oscillator vibrational frequencies using the force constant matrix. """ - Nrotors = 0 + n_rotors = 0 for rotor in rotors: if len(rotor) == 8: - Nrotors += 2 - elif len(rotor) == 5 and isinstance(rotor[1][0],list): - Nrotors += len(rotor[1]) + n_rotors += 2 + elif len(rotor) == 5 and isinstance(rotor[1][0], list): + n_rotors += len(rotor[1]) else: - Nrotors += 1 + n_rotors += 1 mass = conformer.mass.value_si coordinates = conformer.coordinates.getValue() @@ -783,15 +792,15 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals linear = is_linear(coordinates) if linear: logging.info('Determined species {0} to be linear.'.format(label)) - Natoms = len(conformer.mass.value) - Nvib = 3 * Natoms - (5 if linear else 6) - Nrotors - (1 if is_ts else 0) + n_atoms = len(conformer.mass.value) + n_vib = 3 * n_atoms - (5 if linear else 6) - n_rotors - (1 if is_ts else 0) # Put origin in center of mass xm = 0.0 ym = 0.0 zm = 0.0 totmass = 0.0 - for i in range(Natoms): + for i in range(n_atoms): xm += mass[i] * coordinates[i, 0] ym += mass[i] * coordinates[i, 1] zm += mass[i] * coordinates[i, 2] @@ -801,7 +810,7 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals ym /= totmass zm /= totmass - for i in range(Natoms): + for i in range(n_atoms): coordinates[i, 0] -= xm coordinates[i, 1] -= ym coordinates[i, 2] -= zm @@ -809,28 +818,28 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals amass = np.sqrt(mass / constants.amu) # Rotation matrix - I = conformer.getMomentOfInertiaTensor() - PMoI, Ixyz = np.linalg.eigh(I) + inertia = conformer.getMomentOfInertiaTensor() + PMoI, Ixyz = np.linalg.eigh(inertia) external = 6 if linear: external = 5 - D = np.zeros((Natoms * 3, external), np.float64) + D = np.zeros((n_atoms * 3, external), np.float64) - P = np.zeros((Natoms, 3), np.float64) + P = np.zeros((n_atoms, 3), np.float64) # Transform the coordinates to the principal axes P = np.dot(coordinates, Ixyz) - for i in range(Natoms): + for i in range(n_atoms): # Projection vectors for translation D[3 * i + 0, 0] = amass[i] D[3 * i + 1, 1] = amass[i] D[3 * i + 2, 2] = amass[i] # Construction of the projection vectors for external rotation - for i in range(Natoms): + for i in range(n_atoms): D[3 * i, 3] = (P[i, 1] * Ixyz[0, 2] - P[i, 2] * Ixyz[0, 1]) * amass[i] D[3 * i + 1, 3] = (P[i, 1] * Ixyz[1, 2] - P[i, 2] * Ixyz[1, 1]) * amass[i] D[3 * i + 2, 3] = (P[i, 1] * Ixyz[2, 2] - P[i, 2] * Ixyz[2, 1]) * amass[i] @@ -843,75 +852,74 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals D[3 * i + 2, 5] = (P[i, 0] * Ixyz[2, 1] - P[i, 1] * Ixyz[2, 0]) * amass[i] # Make sure projection matrix is orthonormal - import scipy.linalg - I = np.identity(Natoms * 3, np.float64) + inertia = np.identity(n_atoms * 3, np.float64) - P = np.zeros((Natoms * 3, 3 * Natoms + external), np.float64) + P = np.zeros((n_atoms * 3, 3 * n_atoms + external), np.float64) P[:, 0:external] = D[:, 0:external] - P[:, external:external + 3 * Natoms] = I[:, 0:3 * Natoms] + P[:, external:external + 3 * n_atoms] = inertia[:, 0:3 * n_atoms] - for i in range(3 * Natoms + external): + for i in range(3 * n_atoms + external): norm = 0.0 - for j in range(3 * Natoms): + for j in range(3 * n_atoms): norm += P[j, i] * P[j, i] - for j in range(3 * Natoms): + for j in range(3 * n_atoms): if norm > 1E-15: P[j, i] /= np.sqrt(norm) else: P[j, i] = 0.0 - for j in range(i + 1, 3 * Natoms + external): + for j in range(i + 1, 3 * n_atoms + external): proj = 0.0 - for k in range(3 * Natoms): + for k in range(3 * n_atoms): proj += P[k, i] * P[k, j] - for k in range(3 * Natoms): + for k in range(3 * n_atoms): P[k, j] -= proj * P[k, i] # Order D, there will be vectors that are 0.0 i = 0 - while i < 3 * Natoms: + while i < 3 * n_atoms: norm = 0.0 - for j in range(3 * Natoms): + for j in range(3 * n_atoms): norm += P[j, i] * P[j, i] if norm < 0.5: - P[:, i:3 * Natoms + external - 1] = P[:, i + 1:3 * Natoms + external] + P[:, i:3 * n_atoms + external - 1] = P[:, i + 1:3 * n_atoms + external] else: i += 1 # T is the transformation vector from cartesian to internal coordinates - T = np.zeros((Natoms * 3, 3 * Natoms - external), np.float64) + T = np.zeros((n_atoms * 3, 3 * n_atoms - external), np.float64) - T[:, 0:3 * Natoms - external] = P[:, external:3 * Natoms] + T[:, 0:3 * n_atoms - external] = P[:, external:3 * n_atoms] # Generate mass-weighted force constant matrix # This converts the axes to mass-weighted Cartesian axes # Units of Fm are J/m^2*kg = 1/s^2 - Fm = F.copy() - for i in range(Natoms): - for j in range(Natoms): + fm = F.copy() + for i in range(n_atoms): + for j in range(n_atoms): for u in range(3): for v in range(3): - Fm[3 * i + u, 3 * j + v] /= math.sqrt(mass[i] * mass[j]) + fm[3 * i + u, 3 * j + v] /= math.sqrt(mass[i] * mass[j]) - Fint = np.dot(T.T, np.dot(Fm, T)) + f_int = np.dot(T.T, np.dot(fm, T)) # Get eigenvalues of internal force constant matrix, V = 3N-6 * 3N-6 - eig, V = np.linalg.eigh(Fint) + eig, v = np.linalg.eigh(f_int) logging.debug('Frequencies from internal Hessian') - for i in range(3 * Natoms - external): + for i in range(3 * n_atoms - external): with np.warnings.catch_warnings(): np.warnings.filterwarnings('ignore', r'invalid value encountered in sqrt') logging.debug(np.sqrt(eig[i]) / (2 * math.pi * constants.c * 100)) # Now we can start thinking about projecting out the internal rotations - Dint = np.zeros((3 * Natoms, Nrotors), np.float64) + d_int = np.zeros((3 * n_atoms, n_rotors), np.float64) counter = 0 for i, rotor in enumerate(rotors): - if len(rotor) == 5 and isinstance(rotor[1][0],list): - scandir,pivotss,tops,sigmas,semiclassical = rotor + if len(rotor) == 5 and isinstance(rotor[1][0], list): + scandir, pivotss, tops, sigmas, semiclassical = rotor elif len(rotor) == 5: scanLog, pivots, top, symmetry, fit = rotor pivotss = [pivots] @@ -921,12 +929,12 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals pivotss = [pivots] tops = [top] elif len(rotor) == 8: - scandir,pivots1,top1,symmetry1,pivots2,top2,symmetry2,symmetry = rotor - pivotss = [pivots1,pivots2] - tops = [top1,top2] + scandir, pivots1, top1, symmetry1, pivots2, top2, symmetry2, symmetry = rotor + pivotss = [pivots1, pivots2] + tops = [top1, top2] else: raise ValueError("{} not a proper rotor format".format(rotor)) - for i in xrange(len(tops)): + for i in range(len(tops)): top = tops[i] pivots = pivotss[i] # Determine pivot atom @@ -936,82 +944,82 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals elif pivots[1] in top: pivot1 = pivots[1] pivot2 = pivots[0] - else: + else: raise ValueError('Could not determine pivot atom for rotor {}.'.format(label)) # Projection vectors for internal rotation - e12 = coordinates[pivot1-1,:] - coordinates[pivot2-1,:] - for j in range(Natoms): - atom=j+1 + e12 = coordinates[pivot1 - 1, :] - coordinates[pivot2 - 1, :] + for j in range(n_atoms): + atom = j + 1 if atom in top: - e31 = coordinates[atom-1,:] - coordinates[pivot1-1,:] - Dint[3*(atom-1):3*(atom-1)+3,counter] = np.cross(e31, e12)*amass[atom-1] + e31 = coordinates[atom - 1, :] - coordinates[pivot1 - 1, :] + d_int[3 * (atom - 1):3 * (atom - 1) + 3, counter] = np.cross(e31, e12) * amass[atom - 1] else: - e31 = coordinates[atom-1,:] - coordinates[pivot2-1,:] - Dint[3*(atom-1):3*(atom-1)+3,counter] = np.cross(e31, -e12)*amass[atom-1] - counter+=1 + e31 = coordinates[atom - 1, :] - coordinates[pivot2 - 1, :] + d_int[3 * (atom - 1):3 * (atom - 1) + 3, counter] = np.cross(e31, -e12) * amass[atom - 1] + counter += 1 # Normal modes in mass weighted cartesian coordinates - Vmw = np.dot(T, V) - eigM = np.zeros((3 * Natoms - external, 3 * Natoms - external), np.float64) + vmw = np.dot(T, v) + eigm = np.zeros((3 * n_atoms - external, 3 * n_atoms - external), np.float64) - for i in range(3 * Natoms - external): - eigM[i, i] = eig[i] + for i in range(3 * n_atoms - external): + eigm[i, i] = eig[i] - Fm = np.dot(Vmw, np.dot(eigM, Vmw.T)) + fm = np.dot(vmw, np.dot(eigm, vmw.T)) # Internal rotations are not normal modes => project them on the normal modes and orthogonalize - # Dintproj = (3N-6) x (3N) x (3N) x (Nrotors) - Dintproj = np.dot(Vmw.T, Dint) + # d_int_proj = (3N-6) x (3N) x (3N) x (Nrotors) + d_int_proj = np.dot(vmw.T, d_int) - # Reconstruct Dint - for i in range(Nrotors): - for j in range(3 * Natoms): - Dint[j, i] = 0 - for k in range(3 * Natoms - external): - Dint[j, i] += Dintproj[k, i] * Vmw[j, k] + # Reconstruct d_int + for i in range(n_rotors): + for j in range(3 * n_atoms): + d_int[j, i] = 0 + for k in range(3 * n_atoms - external): + d_int[j, i] += d_int_proj[k, i] * vmw[j, k] # Ortho normalize - for i in range(Nrotors): - norm=0.0 - for j in range(3*Natoms): - norm+=Dint[j,i]*Dint[j,i] - for j in range(3*Natoms): - Dint[j,i]/=np.sqrt(norm) - for j in range(i+1,Nrotors): - proj=0.0 - for k in range (3*Natoms): - proj+=Dint[k,i]*Dint[k,j] - for k in range(3*Natoms): - Dint[k,j]-=proj*Dint[k,i] - - #calculate the frequencies correspondinng to the internal rotors - intProj = np.dot(Fm,Dint) - kmus = np.array([np.linalg.norm(intProj[:,i]) for i in xrange(intProj.shape[1])]) + for i in range(n_rotors): + norm = 0.0 + for j in range(3 * n_atoms): + norm += d_int[j, i] * d_int[j, i] + for j in range(3 * n_atoms): + d_int[j, i] /= np.sqrt(norm) + for j in range(i + 1, n_rotors): + proj = 0.0 + for k in range(3 * n_atoms): + proj += d_int[k, i] * d_int[k, j] + for k in range(3 * n_atoms): + d_int[k, j] -= proj * d_int[k, i] + + # calculate the frequencies corresponding to the internal rotors + int_proj = np.dot(fm, d_int) + kmus = np.array([np.linalg.norm(int_proj[:, i]) for i in range(int_proj.shape[1])]) intRotorFreqs = np.sqrt(kmus) / (2.0 * math.pi * constants.c * 100.0) if getProjectedOutFreqs: return intRotorFreqs - #Do the projection - Dintproj=np.dot(Vmw.T,Dint) - Proj = np.dot(Dint, Dint.T) - I = np.identity(Natoms * 3, np.float64) - Proj = I - Proj - Fm = np.dot(Proj, np.dot(Fm, Proj)) + # Do the projection + d_int_proj = np.dot(vmw.T, d_int) + proj = np.dot(d_int, d_int.T) + inertia = np.identity(n_atoms * 3, np.float64) + proj = inertia - proj + fm = np.dot(proj, np.dot(fm, proj)) # Get eigenvalues of mass-weighted force constant matrix - eig, V = np.linalg.eigh(Fm) + eig, v = np.linalg.eigh(fm) eig.sort() # Convert eigenvalues to vibrational frequencies in cm^-1 # Only keep the modes that don't correspond to translation, rotation, or internal rotation logging.debug('Frequencies from projected Hessian') - for i in range(3 * Natoms): + for i in range(3 * n_atoms): with np.warnings.catch_warnings(): np.warnings.filterwarnings('ignore', r'invalid value encountered in sqrt') logging.debug(np.sqrt(eig[i]) / (2 * math.pi * constants.c * 100)) - return np.sqrt(eig[-Nvib:]) / (2 * math.pi * constants.c * 100) + return np.sqrt(eig[-n_vib:]) / (2 * math.pi * constants.c * 100) def assign_frequency_scale_factor(freq_level): @@ -1157,8 +1165,8 @@ def determine_rotor_symmetry(energies, label, pivots): symmetry = len(peaks) reason = 'number of peaks and valleys, all within the determined resolution criteria' if symmetry not in [1, 2, 3]: - logging.warn('Determined symmetry number {0} for rotor of species {1} between pivots {2};' - ' you should make sure this makes sense'.format(symmetry, label, pivots)) + logging.warning('Determined symmetry number {0} for rotor of species {1} between pivots {2}; ' + 'you should make sure this makes sense'.format(symmetry, label, pivots)) else: logging.info('Determined a symmetry number of {0} for rotor of species {1} between pivots {2}' ' based on the {3}.'.format(symmetry, label, pivots, reason)) diff --git a/arkane/statmechTest.py b/arkane/statmechTest.py index c94d569b43..dd9a2ab5c4 100644 --- a/arkane/statmechTest.py +++ b/arkane/statmechTest.py @@ -29,18 +29,19 @@ ############################################################################### """ -This script contains unit tests of the :mod:`arkane.main` module. +This module contains unit tests of the :mod:`arkane.statmech` module. """ -import unittest import os +import unittest + import numpy as np from rmgpy.exceptions import InputError from arkane import Arkane -from arkane.statmech import StatMechJob, determine_rotor_symmetry, is_linear from arkane.qchem import QChemLog +from arkane.statmech import StatMechJob, determine_rotor_symmetry, is_linear ################################################################################ @@ -49,6 +50,7 @@ class TestStatmech(unittest.TestCase): """ Contains unit tests of the StatmechJob class. """ + @classmethod def setUp(cls): """A method that is run before each unit test in this class""" diff --git a/arkane/thermo.py b/arkane/thermo.py index 48167e17cb..0def3a5a56 100644 --- a/arkane/thermo.py +++ b/arkane/thermo.py @@ -33,28 +33,23 @@ thermodynamics information for a single species. """ -import os.path -import numpy as np import logging +import os.path import string +import numpy as np + import rmgpy.constants as constants -from rmgpy.statmech.translation import Translation, IdealGasTranslation -from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor -from rmgpy.statmech.vibration import Vibration, HarmonicOscillator -from rmgpy.statmech.torsion import Torsion, HinderedRotor -from rmgpy.statmech.conformer import Conformer -from rmgpy.thermo.thermodata import ThermoData -from rmgpy.thermo.nasa import NASAPolynomial, NASA -from rmgpy.thermo.wilhoit import Wilhoit from rmgpy.chemkin import writeThermoEntry -from rmgpy.species import Species +from rmgpy.exceptions import InputError from rmgpy.molecule import Molecule from rmgpy.molecule.util import retrieveElementCount +from rmgpy.species import Species +from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor +from rmgpy.thermo.wilhoit import Wilhoit -from arkane.output import prettify from arkane.common import ArkaneSpecies, symbol_by_number - +from arkane.output import prettify ################################################################################ @@ -135,20 +130,20 @@ def generateThermo(self): if not any([isinstance(mode, (LinearRotor, NonlinearRotor)) for mode in conformer.modes]): # Monatomic species linear = False - Nfreq = 0 - Nrotors = 0 + n_freq = 0 + n_rotors = 0 Cp0 = 2.5 * constants.R CpInf = 2.5 * constants.R else: # Polyatomic species linear = True if isinstance(conformer.modes[1], LinearRotor) else False - Nfreq = len(conformer.modes[2].frequencies.value) - Nrotors = len(conformer.modes[3:]) + n_freq = len(conformer.modes[2].frequencies.value) + n_rotors = len(conformer.modes[3:]) Cp0 = (3.5 if linear else 4.0) * constants.R - CpInf = Cp0 + (Nfreq + 0.5 * Nrotors) * constants.R + CpInf = Cp0 + (n_freq + 0.5 * n_rotors) * constants.R wilhoit = Wilhoit() - if Nfreq == 0 and Nrotors == 0: + if n_freq == 0 and n_rotors == 0: wilhoit.Cp0 = (Cplist[0], "J/(mol*K)") wilhoit.CpInf = (Cplist[0], "J/(mol*K)") wilhoit.B = (500., "K") @@ -170,10 +165,10 @@ def write_output(self, output_directory): in `output_directory`. """ species = self.species - outputFile = os.path.join(output_directory, 'output.py') + output_file = os.path.join(output_directory, 'output.py') logging.info('Saving thermo for {0}...'.format(species.label)) - with open(outputFile, 'a') as f: + with open(output_file, 'a') as f: f.write('# Thermodynamics for {0}:\n'.format(species.label)) H298 = species.getThermoData().getEnthalpy(298) / 4184. S298 = species.getThermoData().getEntropy(298) / 4.184 diff --git a/arkane/thermoTest.py b/arkane/thermoTest.py index 862257e348..3101a4f75a 100644 --- a/arkane/thermoTest.py +++ b/arkane/thermoTest.py @@ -29,11 +29,11 @@ ############################################################################### """ -This script contains unit tests of the :mod:`arkane.thermo` module. +This module contains unit tests of the :mod:`arkane.thermo` module. """ -import unittest import os +import unittest from rmgpy.species import Species @@ -47,6 +47,7 @@ class TestThermo(unittest.TestCase): """ Contains unit tests of the ThermoJob class. """ + @classmethod def setUp(cls): """A method that is run before each unit test in this class""" @@ -64,8 +65,6 @@ def test_element_count_from_conformer(self): element_count = self.thermo_job.element_count_from_conformer() self.assertEqual(element_count, {'H': 4, 'C': 2}) - - ################################################################################ diff --git a/arkane/util.py b/arkane/util.py index e2654a139a..63d34571e4 100644 --- a/arkane/util.py +++ b/arkane/util.py @@ -28,10 +28,19 @@ # # ############################################################################### +""" +This module contains different utilities used in Arkane. +""" + +from rmgpy.exceptions import InputError + from arkane.gaussian import GaussianLog from arkane.molpro import MolproLog from arkane.qchem import QChemLog +################################################################################ + + def determine_qm_software(fullpath): """ Given a path to the log file of a QM software, determine whether it is Gaussian, Molpro, or QChem @@ -54,7 +63,6 @@ def determine_qm_software(fullpath): break line = f.readline() else: - raise InputError( - "File at {0} could not be identified as a Gaussian, QChem or Molpro log file.".format(fullpath)) + raise InputError('File at {0} could not be identified as a Gaussian, ' + 'QChem or Molpro log file.'.format(fullpath)) return software_log - From 3b0a17141be30fa1ca5b3fd56339f83b121efd43 Mon Sep 17 00:00:00 2001 From: alongd Date: Thu, 15 Aug 2019 16:04:00 -0400 Subject: [PATCH 012/155] BugFix: Nested variable i definition resolved --- arkane/statmech.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arkane/statmech.py b/arkane/statmech.py index b39802eab6..a0d1f05cbd 100644 --- a/arkane/statmech.py +++ b/arkane/statmech.py @@ -934,9 +934,9 @@ def projectRotors(conformer, F, rotors, linear, is_ts, getProjectedOutFreqs=Fals tops = [top1, top2] else: raise ValueError("{} not a proper rotor format".format(rotor)) - for i in range(len(tops)): - top = tops[i] - pivots = pivotss[i] + for k in range(len(tops)): + top = tops[k] + pivots = pivotss[k] # Determine pivot atom if pivots[0] in top: pivot1 = pivots[0] From d9d7f8b15211283cb0a0c7ea19679405bccc852c Mon Sep 17 00:00:00 2001 From: alongd Date: Thu, 15 Aug 2019 16:15:57 -0400 Subject: [PATCH 013/155] Implement abstract methods in Arkane Logs, raise NotImplementedErrors --- arkane/gaussian.py | 8 ++++++++ arkane/molpro.py | 8 ++++++++ arkane/qchem.py | 16 ++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/arkane/gaussian.py b/arkane/gaussian.py index ec2c34e150..7d60f13b4c 100644 --- a/arkane/gaussian.py +++ b/arkane/gaussian.py @@ -470,3 +470,11 @@ def loadNegativeFrequency(self): raise Exception('Unable to find imaginary frequency of {1} ' 'in Gaussian output file {0}'.format(self.path, self.species.label)) return frequency + + def get_D1_diagnostic(self): + """Not implemented for Gaussian""" + raise NotImplementedError('The get_D1_diagnostic method is not implemented for Gaussian Logs') + + def get_T1_diagnostic(self): + """Not implemented for Gaussian""" + raise NotImplementedError('The get_T1_diagnostic method is not implemented for Gaussian Logs') diff --git a/arkane/molpro.py b/arkane/molpro.py index 4e1fd594a5..5d6eda11a8 100644 --- a/arkane/molpro.py +++ b/arkane/molpro.py @@ -425,3 +425,11 @@ def get_D1_diagnostic(self): items = line.split() return float(items[-1]) raise ValueError('Unable to find D1 diagnostic in energy file: {}'.format(self.path)) + + def load_scan_pivot_atoms(self): + """Not implemented for Molpro""" + raise NotImplementedError('The load_scan_pivot_atoms method is not implemented for Molpro Logs') + + def load_scan_frozen_atoms(self): + """Not implemented for Molpro""" + raise NotImplementedError('The load_scan_frozen_atoms method is not implemented for Molpro Logs') diff --git a/arkane/qchem.py b/arkane/qchem.py index 443367c327..b6db1c5ac5 100644 --- a/arkane/qchem.py +++ b/arkane/qchem.py @@ -354,3 +354,19 @@ def loadNegativeFrequency(self): return frequency else: raise InputError('Unable to find imaginary frequency in QChem output file {0}'.format(self.path)) + + def load_scan_pivot_atoms(self): + """Not implemented for QChem""" + raise NotImplementedError('The load_scan_pivot_atoms method is not implemented for QChem Logs') + + def load_scan_frozen_atoms(self): + """Not implemented for QChem""" + raise NotImplementedError('The load_scan_frozen_atoms method is not implemented for QChem Logs') + + def get_D1_diagnostic(self): + """Not implemented for QChem""" + raise NotImplementedError('The get_D1_diagnostic method is not implemented for QChem Logs') + + def get_T1_diagnostic(self): + """Not implemented for QChem""" + raise NotImplementedError('The get_T1_diagnostic method is not implemented for QChem Logs') From 078b9a5481ce3b39e40099ecf6fe02e8fcfd0b8d Mon Sep 17 00:00:00 2001 From: alongd Date: Thu, 15 Aug 2019 16:28:17 -0400 Subject: [PATCH 014/155] Turned static log methods in Arkane into functions, renamed them --- arkane/main.py | 141 +++++++++++++++++++++++++------------------------ 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/arkane/main.py b/arkane/main.py index 8c999827a6..061be582c8 100644 --- a/arkane/main.py +++ b/arkane/main.py @@ -147,72 +147,6 @@ def parseCommandLineArguments(self): else: self.outputDirectory = os.path.dirname(os.path.abspath(args.file[0])) - def initializeLog(self, verbose=logging.INFO, logFile=None): - """ - Set up a logger for Arkane to use to print output to stdout. The - `verbose` parameter is an integer specifying the amount of log text seen - at the console; the levels correspond to those of the :data:`logging` module. - """ - # Create logger - logger = logging.getLogger() - logger.setLevel(verbose) - - # Use custom level names for cleaner log output - logging.addLevelName(logging.CRITICAL, 'Critical: ') - logging.addLevelName(logging.ERROR, 'Error: ') - logging.addLevelName(logging.WARNING, 'Warning: ') - logging.addLevelName(logging.INFO, '') - logging.addLevelName(logging.DEBUG, '') - logging.addLevelName(0, '') - - # Create formatter and add to handlers - formatter = logging.Formatter('%(levelname)s%(message)s') - - # Remove old handlers before adding ours - while logger.handlers: - logger.removeHandler(logger.handlers[0]) - - # Create console handler; send everything to stdout rather than stderr - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(verbose) - ch.setFormatter(formatter) - logger.addHandler(ch) - - # Create file handler; always be at least verbose in the file - if logFile: - fh = logging.FileHandler(filename=logFile) - fh.setLevel(min(logging.DEBUG, verbose)) - fh.setFormatter(formatter) - logger.addHandler(fh) - - def logHeader(self, level=logging.INFO): - """ - Output a header containing identifying information about Arkane to the log. - """ - from rmgpy import __version__ - logging.log(level, 'Arkane execution initiated at {0}'.format(time.asctime())) - logging.log(level, '') - - logging.log(level, '################################################################') - logging.log(level, '# #') - logging.log(level, '# Automated Reaction Kinetics and Network Exploration (Arkane) #') - logging.log(level, '# #') - logging.log(level, '# Version: {0:49s} #'.format(__version__)) - logging.log(level, '# Authors: RMG Developers (rmg_dev@mit.edu) #') - logging.log(level, '# P.I.s: William H. Green (whgreen@mit.edu) #') - logging.log(level, '# Richard H. West (r.west@neu.edu) #') - logging.log(level, '# Website: http://reactionmechanismgenerator.github.io/ #') - logging.log(level, '# #') - logging.log(level, '################################################################') - logging.log(level, '') - - def logFooter(self, level=logging.INFO): - """ - Output a footer to the log. - """ - logging.log(level, '') - logging.log(level, 'Arkane execution terminated at {0}'.format(time.asctime())) - def loadInputFile(self, inputFile): """ Load a set of jobs from the given `inputFile` on disk. Returns the @@ -232,10 +166,10 @@ def execute(self): # Initialize the logging system (both to the console and to a file in the # output directory) - self.initializeLog(self.verbose, os.path.join(self.outputDirectory, 'arkane.log')) + initialize_log(self.verbose, os.path.join(self.outputDirectory, 'arkane.log')) # Print some information to the beginning of the log - self.logHeader() + log_header() # Load the input file for the job self.jobList = self.loadInputFile(self.inputFile) @@ -346,7 +280,7 @@ def execute(self): f.write('END\n\n') # Print some information to the end of the log - self.logFooter() + log_footer() def getLibraries(self): """Get RMG kinetics and thermo libraries""" @@ -411,3 +345,72 @@ def getLibraries(self): kinetics_library.label = name return thermo_library, kinetics_library, species_list + + +def initialize_log(verbose=logging.INFO, log_file=None): + """ + Set up a logger for Arkane to use to print output to stdout. The + `verbose` parameter is an integer specifying the amount of log text seen + at the console; the levels correspond to those of the :data:`logging` module. + """ + # Create logger + logger = logging.getLogger() + logger.setLevel(verbose) + + # Use custom level names for cleaner log output + logging.addLevelName(logging.CRITICAL, 'Critical: ') + logging.addLevelName(logging.ERROR, 'Error: ') + logging.addLevelName(logging.WARNING, 'Warning: ') + logging.addLevelName(logging.INFO, '') + logging.addLevelName(logging.DEBUG, '') + logging.addLevelName(0, '') + + # Create formatter and add to handlers + formatter = logging.Formatter('%(levelname)s%(message)s') + + # Remove old handlers before adding ours + while logger.handlers: + logger.removeHandler(logger.handlers[0]) + + # Create console handler; send everything to stdout rather than stderr + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(verbose) + ch.setFormatter(formatter) + logger.addHandler(ch) + + # Create file handler; always be at least verbose in the file + if log_file: + fh = logging.FileHandler(filename=log_file) + fh.setLevel(min(logging.DEBUG, verbose)) + fh.setFormatter(formatter) + logger.addHandler(fh) + + +def log_header(level=logging.INFO): + """ + Output a header containing identifying information about Arkane to the log. + """ + from rmgpy import __version__ + logging.log(level, 'Arkane execution initiated at {0}'.format(time.asctime())) + logging.log(level, '') + + logging.log(level, '################################################################') + logging.log(level, '# #') + logging.log(level, '# Automated Reaction Kinetics and Network Exploration (Arkane) #') + logging.log(level, '# #') + logging.log(level, '# Version: {0:49s} #'.format(__version__)) + logging.log(level, '# Authors: RMG Developers (rmg_dev@mit.edu) #') + logging.log(level, '# P.I.s: William H. Green (whgreen@mit.edu) #') + logging.log(level, '# Richard H. West (r.west@neu.edu) #') + logging.log(level, '# Website: http://reactionmechanismgenerator.github.io/ #') + logging.log(level, '# #') + logging.log(level, '################################################################') + logging.log(level, '') + + +def log_footer(level=logging.INFO): + """ + Output a footer to the log. + """ + logging.log(level, '') + logging.log(level, 'Arkane execution terminated at {0}'.format(time.asctime())) From a82e020a1db63cb769be57f5901185a28f60fb08 Mon Sep 17 00:00:00 2001 From: alongd Date: Thu, 15 Aug 2019 22:31:14 -0400 Subject: [PATCH 015/155] Don't shadow `database` from outer scope --- arkane/input.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arkane/input.py b/arkane/input.py index d36514e1b7..233d2e33c7 100644 --- a/arkane/input.py +++ b/arkane/input.py @@ -116,9 +116,9 @@ def database(thermoLibraries=None, transportLibraries=None, reactionLibraries=No "['H_Abstraction','R_Recombination'] or ['!Intra_Disproportionation'].") kineticsFamilies = kineticsFamilies - database = getDB() or RMGDatabase() + rmg_database = getDB() or RMGDatabase() - database.load( + rmg_database.load( path=databaseDirectory, thermoLibraries=thermoLibraries, transportLibraries=transportLibraries, @@ -129,10 +129,10 @@ def database(thermoLibraries=None, transportLibraries=None, reactionLibraries=No depository=False, # Don't bother loading the depository information, as we don't use it ) - for family in database.kinetics.families.values(): # load training - family.addKineticsRulesFromTrainingSet(thermoDatabase=database.thermo) + for family in rmg_database.kinetics.families.values(): # load training + family.addKineticsRulesFromTrainingSet(thermoDatabase=rmg_database.thermo) - for family in database.kinetics.families.values(): + for family in rmg_database.kinetics.families.values(): family.fillKineticsRulesByAveragingUp(verbose=True) @@ -220,8 +220,8 @@ def species(label, *args, **kwargs): raise DatabaseError('Thermo database is None.') except DatabaseError: logging.warning("The database isn't loaded, cannot estimate thermo for {0}. " - "If it is a bath gas, set reactive = False to avoid generating" - " thermo.".format(spec.label)) + "If it is a bath gas, set reactive = False to avoid generating " + "thermo.".format(spec.label)) else: logging.info('No E0 or thermo found, estimating thermo and E0 of species {0} using' ' RMG-Database...'.format(spec.label)) From b188a17749597e511799ff4cf75c92177711738c Mon Sep 17 00:00:00 2001 From: alongd Date: Fri, 16 Aug 2019 00:25:06 -0400 Subject: [PATCH 016/155] Create LogError exception class for Arkane Also replace instances of catching broad exceptions --- arkane/exceptions.py | 8 ++++++++ arkane/gaussian.py | 24 ++++++++++++------------ arkane/kinetics.py | 2 +- arkane/molpro.py | 19 ++++++++++--------- arkane/pdep.py | 2 +- arkane/qchem.py | 16 +++++++++------- arkane/statmech.py | 4 ++-- arkane/thermo.py | 2 +- 8 files changed, 44 insertions(+), 33 deletions(-) diff --git a/arkane/exceptions.py b/arkane/exceptions.py index 211b5e9a29..e21e2ca209 100644 --- a/arkane/exceptions.py +++ b/arkane/exceptions.py @@ -47,3 +47,11 @@ class BondAdditivityCorrectionError(Exception): additivity corrections. """ pass + + +class LogError(Exception): + """ + An exception to be raised when an error occurs while parsing + electronic structure calculation log files. + """ + pass diff --git a/arkane/gaussian.py b/arkane/gaussian.py index 7d60f13b4c..98891ca391 100644 --- a/arkane/gaussian.py +++ b/arkane/gaussian.py @@ -40,10 +40,10 @@ import numpy as np import rmgpy.constants as constants -from rmgpy.exceptions import InputError from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer from arkane.common import check_conformer_energy, get_element_mass +from arkane.exceptions import LogError from arkane.log import Log ################################################################################ @@ -149,10 +149,10 @@ def loadGeometry(self): number = np.array(number, np.int) mass = np.array(mass, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: - raise InputError('Unable to read atoms from Gaussian geometry output file {0}. ' - 'Make sure the output file is not corrupt.\nNote: if your species has ' - '50 or more atoms, you will need to add the `iop(2/9=2000)` keyword to your ' - 'input file so Gaussian will print the input orientation geomerty.'.format(self.path)) + raise LogError('Unable to read atoms from Gaussian geometry output file {0}. ' + 'Make sure the output file is not corrupt.\nNote: if your species has ' + '50 or more atoms, you will need to add the `iop(2/9=2000)` keyword to your ' + 'input file so Gaussian will print the input orientation geometry.'.format(self.path)) return coord, number, mass @@ -293,12 +293,12 @@ def loadEnergy(self, zpe_scale_factor=1.): if e0_composite is not None: if scaled_zpe is None: - raise Exception('Unable to find zero-point energy in Gaussian log file.') + raise LogError('Unable to find zero-point energy in Gaussian log file.') return e0_composite - scaled_zpe elif e_elect is not None: return e_elect else: - raise Exception('Unable to find energy in Gaussian log file.') + raise LogError('Unable to find energy in Gaussian log file.') def loadZeroPointEnergy(self): """ @@ -324,7 +324,7 @@ def loadZeroPointEnergy(self): if zpe is not None: return zpe else: - raise Exception('Unable to find zero-point energy in Gaussian log file.') + raise LogError('Unable to find zero-point energy in Gaussian log file.') def loadScanEnergies(self): """ @@ -414,8 +414,8 @@ def _load_scan_specs(self, letter_spec): elif terms[0] == 'B': action_index = 3 # bond length with 2 terms else: - raise ValueError('This file has an option not supported by arkane.' - 'Unable to read scan specs for line: {}'.format(line)) + raise LogError('This file has an option not supported by Arkane. ' + 'Unable to read scan specs for line: {0}'.format(line)) if len(terms) > action_index: # specified type explicitly if terms[action_index] == letter_spec: @@ -467,8 +467,8 @@ def loadNegativeFrequency(self): frequencies.sort() frequency = [freq for freq in frequencies if freq < 0][0] if frequency is None: - raise Exception('Unable to find imaginary frequency of {1} ' - 'in Gaussian output file {0}'.format(self.path, self.species.label)) + raise LogError('Unable to find imaginary frequency of {1} ' + 'in Gaussian output file {0}'.format(self.path, self.species.label)) return frequency def get_D1_diagnostic(self): diff --git a/arkane/kinetics.py b/arkane/kinetics.py index 9ead721d21..a27bbdb592 100644 --- a/arkane/kinetics.py +++ b/arkane/kinetics.py @@ -139,7 +139,7 @@ def execute(self, output_directory=None, plot=True): "{0} in reaction {1}".format(e, self.reaction.label)) try: self.draw(output_directory) - except: + except Exception as e: logging.warning("Could not draw reaction {1} due to error: {0}".format(e, self.reaction.label)) if self.sensitivity_conditions is not None: logging.info('\n\nRunning sensitivity analysis...') diff --git a/arkane/molpro.py b/arkane/molpro.py index 5d6eda11a8..2925186ae4 100644 --- a/arkane/molpro.py +++ b/arkane/molpro.py @@ -44,6 +44,7 @@ from arkane.common import get_element_mass from arkane.exceptions import LogError from arkane.log import Log +from arkane.exceptions import LogError ################################################################################ @@ -161,7 +162,7 @@ def loadGeometry(self): mass = np.array(mass, np.float64) coord = np.array(coord, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: - raise InputError('Unable to read atoms from Molpro geometry output file {0}'.format(self.path)) + raise LogError('Unable to read atoms from Molpro geometry output file {0}'.format(self.path)) return coord, number, mass @@ -300,8 +301,8 @@ def loadEnergy(self, zpe_scale_factor=1.): if any([mrci, f12a, f12b]): break else: - raise ValueError('Could not determine type of calculation. Currently, CCSD(T)-F12a, CCSD(T)-F12b,' - ' MRCI, MRCI+Davidson are supported') + raise LogError('Could not determine type of calculation. Currently, CCSD(T)-F12a, CCSD(T)-F12b, ' + 'MRCI, MRCI+Davidson are supported') # Search for e_elect for line in lines: if f12 and f12a: @@ -344,7 +345,7 @@ def loadEnergy(self, zpe_scale_factor=1.): logging.debug('Molpro energy found is {0} J/mol'.format(e_elect)) return e_elect else: - raise Exception('Unable to find energy in Molpro log file {0}.'.format(self.path)) + raise LogError('Unable to find energy in Molpro log file {0}.'.format(self.path)) def loadZeroPointEnergy(self): """ @@ -369,8 +370,8 @@ def loadZeroPointEnergy(self): if zpe is not None: return zpe else: - raise Exception('Unable to find zero-point energy in Molpro log file. Make sure that the' - ' keyword {frequencies, thermo, print,thermo} is included in the input file') + raise LogError('Unable to find zero-point energy in Molpro log file. Make sure that the ' + 'keyword {frequencies, thermo, print,thermo} is included in the input file.') def loadNegativeFrequency(self): """ @@ -388,7 +389,7 @@ def loadNegativeFrequency(self): line = f.readline() if frequency is None: - raise Exception('Unable to find imaginary frequency in Molpro output file {0}'.format(self.path)) + raise LogError('Unable to find imaginary frequency in Molpro output file {0}'.format(self.path)) negativefrequency = -float(frequency) return negativefrequency @@ -410,7 +411,7 @@ def get_T1_diagnostic(self): if 'T1 diagnostic: ' in line: items = line.split() return float(items[-1]) - raise ValueError('Unable to find T1 diagnostic in energy file: {}'.format(self.path)) + raise LogError('Unable to find T1 diagnostic in energy file: {0}'.format(self.path)) def get_D1_diagnostic(self): """ @@ -424,7 +425,7 @@ def get_D1_diagnostic(self): if 'D1 diagnostic: ' in line: items = line.split() return float(items[-1]) - raise ValueError('Unable to find D1 diagnostic in energy file: {}'.format(self.path)) + raise LogError('Unable to find D1 diagnostic in energy file: {0}'.format(self.path)) def load_scan_pivot_atoms(self): """Not implemented for Molpro""" diff --git a/arkane/pdep.py b/arkane/pdep.py index ef9220395f..4afae0759b 100644 --- a/arkane/pdep.py +++ b/arkane/pdep.py @@ -453,7 +453,7 @@ def fitInterpolationModel(self, Tdata, Pdata, kdata, kunits): elif model == 'pdeparrhenius': kinetics = PDepArrhenius().fitToData(Tdata, Pdata, kdata, kunits) else: - raise Exception('Invalid interpolation model {0!r}.'.format(self.interpolationModel[0])) + raise PressureDependenceError('Invalid interpolation model {0!r}.'.format(self.interpolationModel[0])) return kinetics def save(self, outputFile): diff --git a/arkane/qchem.py b/arkane/qchem.py index b6db1c5ac5..01b6901d4b 100644 --- a/arkane/qchem.py +++ b/arkane/qchem.py @@ -44,6 +44,7 @@ from arkane.common import check_conformer_energy, get_element_mass from arkane.log import Log +from arkane.exceptions import LogError ################################################################################ @@ -135,8 +136,8 @@ def loadGeometry(self): break if not completed_job: - raise InputError( - 'Could not find a successfully completed QChem job in QChem output file {0}'.format(self.path)) + raise LogError('Could not find a successfully completed QChem job ' + 'in QChem output file {0}'.format(self.path)) # Now look for the geometry. # Will return the final geometry in the file under Standard Nuclear Orientation. @@ -164,7 +165,7 @@ def loadGeometry(self): number = np.array(number, np.int) mass = np.array(mass, np.float64) if len(number) == 0 or len(coord) == 0 or len(mass) == 0: - raise InputError('Unable to read atoms from QChem geometry output file {0}'.format(self.path)) + raise LogError('Unable to read atoms from QChem geometry output file {0}.'.format(self.path)) return coord, number, mass @@ -285,7 +286,7 @@ def loadEnergy(self, zpe_scale_factor=1.): b = float(line.split()[8]) * constants.E_h * constants.Na e_elect = a or b if e_elect is None: - raise InputError('Unable to find energy in QChem output file.') + raise LogError('Unable to find energy in QChem output file {0}.'.format(self.path)) return e_elect def loadZeroPointEnergy(self): @@ -301,7 +302,7 @@ def loadZeroPointEnergy(self): if zpe is not None: return zpe else: - raise InputError('Unable to find zero-point energy in QChem output file.') + raise LogError('Unable to find zero-point energy in QChem output file {0}.'.format(self.path)) def loadScanEnergies(self): """ @@ -323,7 +324,8 @@ def loadScanEnergies(self): logging.info('found a successfully completed QChem Job') read = True elif 'SCF failed to converge' in line: - raise InputError('QChem Job did not sucessfully complete: SCF failed to converge') + raise LogError('QChem Job did not successfully complete: ' + 'SCF failed to converge in file {0}.'.format(self.path)) logging.info(' Assuming {0} is the output from a QChem PES scan...'.format(os.path.basename(self.path))) v_list = np.array(v_list, np.float64) @@ -353,7 +355,7 @@ def loadNegativeFrequency(self): if frequency < 0: return frequency else: - raise InputError('Unable to find imaginary frequency in QChem output file {0}'.format(self.path)) + raise LogError('Unable to find imaginary frequency in QChem output file {0}.'.format(self.path)) def load_scan_pivot_atoms(self): """Not implemented for QChem""" diff --git a/arkane/statmech.py b/arkane/statmech.py index a0d1f05cbd..bac66caf2d 100644 --- a/arkane/statmech.py +++ b/arkane/statmech.py @@ -417,7 +417,7 @@ def load(self, pdep=False, plot=False): try: symbol = symbol_by_number[atom_num] except KeyError: - raise Exception('Could not recognize element number {0}.'.format(atom_num)) + raise ElementError('Could not recognize element number {0}.'.format(atom_num)) atoms[symbol] = atoms.get(symbol, 0) + 1 # Save atoms for use in writing thermo output @@ -696,7 +696,7 @@ def create_hindered_rotor_figure(self, angle, v_list, cosineRotor, fourierRotor, """ try: import pylab - except: + except ImportError: logging.warning("Unable to import pylab. not generating hindered rotor figures") return phi = np.arange(0, 6.3, 0.02, np.float64) diff --git a/arkane/thermo.py b/arkane/thermo.py index 0def3a5a56..1cdce590cc 100644 --- a/arkane/thermo.py +++ b/arkane/thermo.py @@ -107,7 +107,7 @@ def generateThermo(self): attribute). """ if self.thermoClass.lower() not in ['wilhoit', 'nasa']: - raise Exception('Unknown thermodynamic model "{0}".'.format(self.thermoClass)) + raise InputError('Unknown thermodynamic model "{0}".'.format(self.thermoClass)) species = self.species From 83a3466407d087c51e5f19ab5a8c31fc14eb92fb Mon Sep 17 00:00:00 2001 From: alongd Date: Fri, 16 Aug 2019 00:12:39 -0400 Subject: [PATCH 017/155] Removed seemingly unnecessary print statements from Arkane input.py --- arkane/input.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/arkane/input.py b/arkane/input.py index 233d2e33c7..6b3ce3df6b 100644 --- a/arkane/input.py +++ b/arkane/input.py @@ -319,9 +319,6 @@ def reaction(label, reactants, products, transitionState=None, kinetics=None, tu if not all([m.molecule != [] for m in rxn.reactants + rxn.products]): raise ValueError('chemical structures of reactants and products not available for RMG estimation of ' 'reaction {0}'.format(label)) - for spc in rxn.reactants + rxn.products: - print spc.label - print spc.molecule db = getDB('kinetics') rxns = db.generate_reactions_from_libraries(reactants=rxn.reactants, products=rxn.products) rxns = [r for r in rxns if r.elementary_high_p] From b5a240d4ed8549ae4b0f6d9de7eebfaf47f73a98 Mon Sep 17 00:00:00 2001 From: alongd Date: Fri, 16 Aug 2019 07:59:38 -0400 Subject: [PATCH 018/155] Directly use the correct returned argument (instead of _, markings) --- arkane/kinetics.py | 4 ++-- arkane/sensitivity.py | 4 ++-- arkane/statmech.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arkane/kinetics.py b/arkane/kinetics.py index a27bbdb592..fb0591c57f 100644 --- a/arkane/kinetics.py +++ b/arkane/kinetics.py @@ -488,7 +488,7 @@ def __getLabelSize(self, configuration, format='pdf'): bounding_rects = [] if self.__useStructureForLabel(configuration): for spec in configuration.species_list: - _, _, rect = MoleculeDrawer().draw(spec.molecule[0], format=format) + rect = MoleculeDrawer().draw(spec.molecule[0], format=format)[2] bounding_rects.append(list(rect)) else: for spec in configuration.species_list: @@ -521,7 +521,7 @@ def __drawLabel(self, configuration, cr, x0, y0, format='pdf'): if use_structures: molecule_drawer = MoleculeDrawer() cr.save() - _, _, rect = molecule_drawer.draw(spec.molecule[0], format=format) + rect = molecule_drawer.draw(spec.molecule[0], format=format)[2] cr.restore() x = x0 - 0.5 * (rect[2] - bounding_rect[2]) cr.save() diff --git a/arkane/sensitivity.py b/arkane/sensitivity.py index 0c58d119b9..38346f2dc2 100644 --- a/arkane/sensitivity.py +++ b/arkane/sensitivity.py @@ -178,7 +178,7 @@ def plot(self): products_label = ' + '.join([reactant.label for reactant in self.job.reaction.products]) plt.rcdefaults() - _, ax = plt.subplots(nrows=len(self.conditions), ncols=2, tight_layout=True) + ax = plt.subplots(nrows=len(self.conditions), ncols=2, tight_layout=True)[1] labels = [reactants_label, ts_label, products_label] min_sa = min(min(min(self.f_sa_coefficients.values())), min(min(self.r_sa_coefficients.values()))) max_sa = max(max(max(self.f_sa_coefficients.values())), max(max(self.r_sa_coefficients.values()))) @@ -355,7 +355,7 @@ def plot(self, wells, transition_states): for rxn in self.job.network.netReactions: plt.rcdefaults() - _, ax = plt.subplots(nrows=len(self.conditions), ncols=1, tight_layout=True) + ax = plt.subplots(nrows=len(self.conditions), ncols=1, tight_layout=True)[1] labels = [str(entry) for entry in wells] labels.extend(ts.label for ts in transition_states) max_sa = min_sa = self.sa_coefficients[str(rxn)][wells[0]][0] diff --git a/arkane/statmech.py b/arkane/statmech.py index bac66caf2d..3e2b323e9d 100644 --- a/arkane/statmech.py +++ b/arkane/statmech.py @@ -232,7 +232,7 @@ def load(self, pdep=False, plot=False): """ path = self.path is_ts = isinstance(self.species, TransitionState) - _, file_extension = os.path.splitext(path) + file_extension = os.path.splitext(path)[1] if file_extension in ['.yml', '.yaml']: self.arkane_species.load_yaml(path=path, label=self.species.label, pdep=pdep) self.species.conformer = self.arkane_species.conformer From b3fa2508c550a758e576e179422e0957d54c1c29 Mon Sep 17 00:00:00 2001 From: alongd Date: Mon, 19 Aug 2019 13:25:02 -0400 Subject: [PATCH 019/155] Replace pylab with pyplot for plotting rotor scans --- arkane/statmech.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/arkane/statmech.py b/arkane/statmech.py index 3e2b323e9d..cbaf19a416 100644 --- a/arkane/statmech.py +++ b/arkane/statmech.py @@ -694,11 +694,6 @@ def create_hindered_rotor_figure(self, angle, v_list, cosineRotor, fourierRotor, Plot the potential for the rotor, along with its cosine and Fourier series potential fits, and save it in the `hindered_rotor_plots` attribute. """ - try: - import pylab - except ImportError: - logging.warning("Unable to import pylab. not generating hindered rotor figures") - return phi = np.arange(0, 6.3, 0.02, np.float64) Vlist_cosine = np.zeros_like(phi) Vlist_fourier = np.zeros_like(phi) @@ -706,17 +701,17 @@ def create_hindered_rotor_figure(self, angle, v_list, cosineRotor, fourierRotor, Vlist_cosine[i] = cosineRotor.getPotential(phi[i]) Vlist_fourier[i] = fourierRotor.getPotential(phi[i]) - fig = pylab.figure(figsize=(6, 5)) - pylab.plot(angle, v_list / 4184., 'ok') + fig = plt.figure(figsize=(6, 5)) + plt.plot(angle, v_list / 4184., 'ok') linespec = '-r' if rotor is cosineRotor else '--r' - pylab.plot(phi, Vlist_cosine / 4184., linespec) + plt.plot(phi, Vlist_cosine / 4184., linespec) linespec = '-b' if rotor is fourierRotor else '--b' - pylab.plot(phi, Vlist_fourier / 4184., linespec) - pylab.legend(['scan', 'cosine', 'fourier'], loc=1) - pylab.xlim(0, 2 * constants.pi) - pylab.xlabel('Angle') - pylab.ylabel('Potential (kcal/mol)') - pylab.title('{0} hindered rotor #{1:d}'.format(self.species.label, rotorIndex + 1)) + plt.plot(phi, Vlist_fourier / 4184., linespec) + plt.legend(['scan', 'cosine', 'fourier'], loc=1) + plt.xlim(0, 2 * constants.pi) + plt.xlabel('Angle') + plt.ylabel('Potential (kcal/mol)') + plt.title('{0} hindered rotor #{1:d}'.format(self.species.label, rotorIndex + 1)) axes = fig.get_axes()[0] axes.set_xticks([float(j * constants.pi / 4) for j in range(0, 9)]) From 7e206f3d6bc4ffe9100f5af1f5130379fd1b01f2 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:00:00 -0400 Subject: [PATCH 020/155] Upgrade external wip to Python 3 --- external/wip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/external/wip.py b/external/wip.py index cb0fb143c3..ce7f1589ad 100644 --- a/external/wip.py +++ b/external/wip.py @@ -5,12 +5,15 @@ # From http://www.natpryce.com/articles/000788.html # Copyright 2011 Nat Pryce. Posted 2011-05-30 from functools import wraps + from nose.plugins.attrib import attr from nose.plugins.skip import SkipTest + def fail(message): raise AssertionError(message) + def work_in_progress(f): @wraps(f) def run_test(*args, **kwargs): @@ -20,4 +23,4 @@ def run_test(*args, **kwargs): raise SkipTest("WIP test failed: " + str(e)) fail("test passed but marked as work in progress") - return attr('work_in_progress')(run_test) \ No newline at end of file + return attr('work_in_progress')(run_test) From 842358125af58c9cb76cd62b45a9c338b57d5024 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:02:00 -0400 Subject: [PATCH 021/155] Upgrade rmg.py to Python 3 --- rmg.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rmg.py b/rmg.py index cc4b3edb41..d4e5dd2b64 100755 --- a/rmg.py +++ b/rmg.py @@ -31,14 +31,16 @@ """ RMG is an automatic chemical mechanism generator. It is awesomely awesome. """ +from __future__ import print_function import os.path import argparse import logging -import rmgpy +import rmgpy from rmgpy.rmg.main import RMG, initializeLog, processProfileStats, makeProfileGraph + ################################################################################ @@ -49,13 +51,11 @@ def parse_command_line_arguments(command_line_args=None): sensible, parses them, and returns them. """ - parser = argparse.ArgumentParser(description= - """ - Reaction Mechanism Generator (RMG) is an automatic chemical reaction - mechanism generator that constructs kinetic models composed of - elementary chemical reaction steps using a general understanding of - how molecules react. - """) + parser = argparse.ArgumentParser(description='Reaction Mechanism Generator (RMG) is an automatic chemical reaction ' + 'mechanism generator that constructs kinetic models composed of ' + 'elementary chemical reaction steps using a general understanding of ' + 'how molecules react.') + parser.add_argument('file', metavar='FILE', type=str, nargs=1, help='a file describing the job to execute') From 57edf9a237a9e884a9a927ca3df210e0feceae8e Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Tue, 20 Aug 2019 17:00:22 -0400 Subject: [PATCH 022/155] Upgrade rmgpy/*.py to Python 3 Only covers the following files: chemkinTest.py constants*.py constraints*.py display.py exceptions.py quantityTest.py rmgObjectTest.py --- rmgpy/__init__.py | 35 ++-- rmgpy/chemkinTest.py | 376 +++++++++++++++++----------------- rmgpy/constantsTest.py | 14 +- rmgpy/constraints.py | 74 +++---- rmgpy/constraintsTest.py | 1 + rmgpy/display.py | 6 +- rmgpy/exceptions.py | 57 +++++- rmgpy/quantityTest.py | 423 +++++++++++++++++++++------------------ rmgpy/rmgobjectTest.py | 27 +-- 9 files changed, 550 insertions(+), 463 deletions(-) diff --git a/rmgpy/__init__.py b/rmgpy/__init__.py index 598e6f28d4..de0005ffa0 100644 --- a/rmgpy/__init__.py +++ b/rmgpy/__init__.py @@ -34,9 +34,11 @@ import os import os.path -import logging -from .version import __version__ -from .exceptions import SettingsError + +from rmgpy.version import __version__ +from rmgpy.exceptions import SettingsError + + ################################################################################ class Settings(dict): @@ -52,13 +54,13 @@ class Settings(dict): In general you should be working with the module-level variable ``settings`` in this module, which is an instance of this class. """ - - def __init__(self, path = None): + + def __init__(self, path=None): super(Settings, self).__init__() self.filename = None self.sources = dict() self.load(path) - + def __setitem__(self, key, value): if key == 'database.directory': value = os.path.abspath(os.path.expandvars(value)) @@ -68,7 +70,7 @@ def __setitem__(self, key, value): raise SettingsError('Unexpecting setting "{0}" encountered.'.format(key)) self.sources[key] = '-' super(Settings, self).__setitem__(key, value) - + def report(self): """ Returns a string saying what is set and where things came from, suitable for logging @@ -94,7 +96,7 @@ def load(self, path=None): """ # First set all settings to their default values self.reset() - + if path: # The user specified an explicit file to use for the settings # Make sure that it exists, fail if it does not @@ -113,9 +115,8 @@ def load(self, path=None): elif os.path.exists(os.path.join(working_dir, 'rmgrc')): self.filename = os.path.join(working_dir, 'rmgrc') else: - return # fail silently, instead of raising the following error: - raise SettingsError('Could not find an RMG settings file to load!') - + return # fail silently, instead of raising an error + # From here on we assume that we have identified the appropriate # settings file to load @@ -123,7 +124,8 @@ def load(self, path=None): for line in f: # Remove any comments from the line index = line.find('#') - if index != -1: line = line[:index] + if index != -1: + line = line[:index] # Is there a key-value pair remaining? if line.find('database.directory') != -1: value = line.split()[-1] # Get the last token from this line @@ -136,20 +138,23 @@ def load(self, path=None): value = value.strip() self['test_data.directory'] = value self.sources['test_data.directory'] = "from {0}".format(self.filename) - + def reset(self): """ Reset all settings to their default values. """ self.filename = None rmgpy_module_dir = os.path.abspath(os.path.dirname(__file__)) - self['database.directory'] = os.path.realpath(os.path.join(rmgpy_module_dir, '..', '..', 'RMG-database', 'input')) + self['database.directory'] = os.path.realpath( + os.path.join(rmgpy_module_dir, '..', '..', 'RMG-database', 'input')) self.sources['database.directory'] = 'Default, relative to RMG-Py source code' self['test_data.directory'] = os.path.realpath(os.path.join(rmgpy_module_dir, 'test_data')) self.sources['test_data.directory'] = 'Default, relative to RMG-Py source code' + # The global settings object -settings = Settings(path = None) +settings = Settings(path=None) + ################################################################################ diff --git a/rmgpy/chemkinTest.py b/rmgpy/chemkinTest.py index 7b1a0e689a..77241ef29d 100644 --- a/rmgpy/chemkinTest.py +++ b/rmgpy/chemkinTest.py @@ -31,14 +31,19 @@ import unittest import mock import os -from chemkin import * -from chemkin import _removeLineBreaks, _process_duplicate_reactions + import rmgpy -from rmgpy.species import Species -from rmgpy.reaction import Reaction +from rmgpy.chemkin import getSpeciesIdentifier, loadChemkinFile, loadTransportFile, markDuplicateReactions, \ + readKineticsEntry, readReactionComments, readThermoEntry, saveChemkinFile, saveSpeciesDictionary, saveTransportFile +from rmgpy.chemkin import _removeLineBreaks, _process_duplicate_reactions from rmgpy.data.kinetics import LibraryReaction +from rmgpy.exceptions import ChemkinError from rmgpy.kinetics.arrhenius import Arrhenius, MultiArrhenius from rmgpy.kinetics.chebyshev import Chebyshev +from rmgpy.reaction import Reaction +from rmgpy.species import Species +from rmgpy.thermo import NASA +from rmgpy.transport import TransportData ################################################### @@ -56,14 +61,15 @@ def test_readThermoEntry_BadElementCount(self, mock_logging): the expected logging statements are being created. """ entry = """C2H6 H XC X L 100.000 5000.000 827.28 1 - 2.44813916E+00 1.83377834E-02-7.25714119E-06 1.35300042E-09-9.60327447E-14 2 --1.19655244E+04 8.07917520E+00 3.50507145E+00-3.65219841E-03 6.32200490E-05 3 --8.01049582E-08 3.19734088E-11-1.15627878E+04 6.67152939E+00 4 -""" + 2.44813916E+00 1.83377834E-02-7.25714119E-06 1.35300042E-09-9.60327447E-14 2 + -1.19655244E+04 8.07917520E+00 3.50507145E+00-3.65219841E-03 6.32200490E-05 3 + -8.01049582E-08 3.19734088E-11-1.15627878E+04 6.67152939E+00 4 + """ with self.assertRaises(ValueError): readThermoEntry(entry) - mock_logging.info.assert_called_with("Trouble reading line 'C2H6 H XC X L 100.000 5000.000 827.28 1' element segment 'H X'") + mock_logging.info.assert_called_with( + "Trouble reading line 'C2H6 H XC X L 100.000 5000.000 827.28 1' element segment 'H X'") @mock.patch('rmgpy.chemkin.logging') def test_readThermoEntry_NotGasPhase(self, mock_logging): @@ -77,10 +83,10 @@ def test_readThermoEntry_NotGasPhase(self, mock_logging): the expected logging statements are being created. """ entry = """C2H6 H 6C 2 L 100.000 5000.000 827.28 1 - 2.44813916E+00 1.83377834E-02-7.25714119E-06 1.35300042E-09-9.60327447E-14 2 --1.19655244E+04 8.07917520E+00 3.50507145E+00-3.65219841E-03 6.32200490E-05 3 --8.01049582E-08 3.19734088E-11-1.15627878E+04 6.67152939E+00 4 -""" + 2.44813916E+00 1.83377834E-02-7.25714119E-06 1.35300042E-09-9.60327447E-14 2 + -1.19655244E+04 8.07917520E+00 3.50507145E+00-3.65219841E-03 6.32200490E-05 3 + -8.01049582E-08 3.19734088E-11-1.15627878E+04 6.67152939E+00 4 + """ species, thermo, formula = readThermoEntry(entry) mock_logging.warning.assert_called_with("Was expecting gas phase thermo data for C2H6. Skipping thermo data.") @@ -131,11 +137,11 @@ def testReadAndWriteAndReadTemplateReactionFamilyForMinimalExample(self): """ folder = os.path.join(os.path.dirname(rmgpy.__file__), 'test_data/chemkin/chemkin_py') - chemkinPath = os.path.join(folder, 'minimal', 'chem.inp') - dictionaryPath = os.path.join(folder, 'minimal', 'species_dictionary.txt') + chemkin_path = os.path.join(folder, 'minimal', 'chem.inp') + dictionary_path = os.path.join(folder, 'minimal', 'species_dictionary.txt') # read original chemkin file - species, reactions = loadChemkinFile(chemkinPath, dictionaryPath) + species, reactions = loadChemkinFile(chemkin_path, dictionary_path) # ensure correct reading reaction1 = reactions[0] @@ -145,17 +151,17 @@ def testReadAndWriteAndReadTemplateReactionFamilyForMinimalExample(self): self.assertEqual(reaction2.family, "H_Abstraction") self.assertEqual(frozenset('C/H3/Cs\H3;C_methyl'.split(';')), frozenset(reaction2.template)) # saveChemkinFile - chemkinSavePath = os.path.join(folder, 'minimal', 'chem_new.inp') - dictionarySavePath = os.path.join(folder, 'minimal', 'species_dictionary_new.txt') + chemkin_save_path = os.path.join(folder, 'minimal', 'chem_new.inp') + dictionary_save_path = os.path.join(folder, 'minimal', 'species_dictionary_new.txt') - saveChemkinFile(chemkinSavePath, species, reactions, verbose=True, checkForDuplicates=True) - saveSpeciesDictionary(dictionarySavePath, species, oldStyle=False) + saveChemkinFile(chemkin_save_path, species, reactions, verbose=True, checkForDuplicates=True) + saveSpeciesDictionary(dictionary_save_path, species, oldStyle=False) - self.assertTrue(os.path.isfile(chemkinSavePath)) - self.assertTrue(os.path.isfile(dictionarySavePath)) + self.assertTrue(os.path.isfile(chemkin_save_path)) + self.assertTrue(os.path.isfile(dictionary_save_path)) # read newly written chemkin file to make sure the entire cycle works - _, reactions2 =loadChemkinFile(chemkinSavePath, dictionarySavePath) + _, reactions2 = loadChemkinFile(chemkin_save_path, dictionary_save_path) reaction1_new = reactions2[0] self.assertEqual(reaction1_new.family, reaction1_new.family) @@ -168,8 +174,8 @@ def testReadAndWriteAndReadTemplateReactionFamilyForMinimalExample(self): self.assertEqual(reaction2_new.degeneracy, reaction2_new.degeneracy) # clean up - os.remove(chemkinSavePath) - os.remove(dictionarySavePath) + os.remove(chemkin_save_path) + os.remove(dictionary_save_path) def testReadAndWriteTemplateReactionFamilyForPDDExample(self): """ @@ -181,11 +187,11 @@ def testReadAndWriteTemplateReactionFamilyForPDDExample(self): """ folder = os.path.join(os.path.dirname(rmgpy.__file__), 'test_data/chemkin/chemkin_py') - chemkinPath = os.path.join(folder, 'pdd', 'chem.inp') - dictionaryPath = os.path.join(folder, 'pdd', 'species_dictionary.txt') + chemkin_path = os.path.join(folder, 'pdd', 'chem.inp') + dictionary_path = os.path.join(folder, 'pdd', 'species_dictionary.txt') # loadChemkinFile - species, reactions = loadChemkinFile(chemkinPath, dictionaryPath) + species, reactions = loadChemkinFile(chemkin_path, dictionary_path) reaction1 = reactions[0] self.assertEqual(reaction1.family, "H_Abstraction") @@ -194,27 +200,23 @@ def testReadAndWriteTemplateReactionFamilyForPDDExample(self): self.assertEqual(reaction2.family, "H_Abstraction") # saveChemkinFile - chemkinSavePath = os.path.join(folder, 'minimal', 'chem_new.inp') - dictionarySavePath = os.path.join(folder, 'minimal', 'species_dictionary_new.txt') + chemkin_save_path = os.path.join(folder, 'minimal', 'chem_new.inp') + dictionary_save_path = os.path.join(folder, 'minimal', 'species_dictionary_new.txt') - saveChemkinFile(chemkinSavePath, species, reactions, verbose=False, checkForDuplicates=False) - saveSpeciesDictionary(dictionarySavePath, species, oldStyle=False) + saveChemkinFile(chemkin_save_path, species, reactions, verbose=False, checkForDuplicates=False) + saveSpeciesDictionary(dictionary_save_path, species, oldStyle=False) - self.assertTrue(os.path.isfile(chemkinSavePath)) - self.assertTrue(os.path.isfile(dictionarySavePath)) + self.assertTrue(os.path.isfile(chemkin_save_path)) + self.assertTrue(os.path.isfile(dictionary_save_path)) # clean up - os.remove(chemkinSavePath) - os.remove(dictionarySavePath) + os.remove(chemkin_save_path) + os.remove(dictionary_save_path) def testTransportDataReadAndWrite(self): """ Test that we can write to chemkin and recreate the same transport object """ - from rmgpy.species import Species - from rmgpy.molecule import Molecule - from rmgpy.transport import TransportData - Ar = Species(label="Ar", transportData=TransportData(shapeIndex=0, epsilon=(1134.93, 'J/mol'), sigma=(3.33, 'angstrom'), dipoleMoment=(0, 'De'), polarizability=(0, 'angstrom^3'), @@ -223,14 +225,14 @@ def testTransportDataReadAndWrite(self): Ar_write = Species(label="Ar") folder = os.path.join(os.path.dirname(rmgpy.__file__), 'test_data') - tempTransportPath = os.path.join(folder, 'tran_temp.dat') + temp_transport_path = os.path.join(folder, 'tran_temp.dat') - saveTransportFile(tempTransportPath, [Ar]) - speciesDict = {'Ar': Ar_write} - loadTransportFile(tempTransportPath, speciesDict) + saveTransportFile(temp_transport_path, [Ar]) + species_dict = {'Ar': Ar_write} + loadTransportFile(temp_transport_path, species_dict) self.assertEqual(repr(Ar), repr(Ar_write)) - os.remove(tempTransportPath) + os.remove(temp_transport_path) def testUseChemkinNames(self): """ @@ -239,11 +241,11 @@ def testUseChemkinNames(self): folder = os.path.join(os.path.dirname(rmgpy.__file__), 'test_data/chemkin/chemkin_py') - chemkinPath = os.path.join(folder, 'minimal', 'chem.inp') - dictionaryPath = os.path.join(folder, 'minimal', 'species_dictionary.txt') + chemkin_path = os.path.join(folder, 'minimal', 'chem.inp') + dictionary_path = os.path.join(folder, 'minimal', 'species_dictionary.txt') # loadChemkinFile - species, reactions = loadChemkinFile(chemkinPath, dictionaryPath, useChemkinNames=True) + species, reactions = loadChemkinFile(chemkin_path, dictionary_path, useChemkinNames=True) expected = [ 'Ar', @@ -267,11 +269,11 @@ def testReactantN2IsReactiveAndGetsRightSpeciesIdentifier(self): """ folder = os.path.join(os.path.dirname(rmgpy.__file__), 'test_data/chemkin/chemkin_py') - chemkinPath = os.path.join(folder, 'NC', 'chem.inp') - dictionaryPath = os.path.join(folder, 'NC', 'species_dictionary.txt') + chemkin_path = os.path.join(folder, 'NC', 'chem.inp') + dictionary_path = os.path.join(folder, 'NC', 'species_dictionary.txt') # loadChemkinFile - species, reactions = loadChemkinFile(chemkinPath, dictionaryPath, useChemkinNames=True) + species, reactions = loadChemkinFile(chemkin_path, dictionary_path, useChemkinNames=True) for n2 in species: if n2.label == 'N2': @@ -286,29 +288,29 @@ def testReadSpecificCollider(self): even if the species name contains parenthesis """ entry = """O2(4)+H(5)(+N2(5))<=>HO2(10)(+N2(5)) 4.651e+12 0.440 0.000""" - speciesDict = {} + species_dict = {} s1 = Species().fromAdjacencyList("""O2(4) -multiplicity 3 -1 O u1 p2 c0 {2,S} -2 O u1 p2 c0 {1,S}""") + multiplicity 3 + 1 O u1 p2 c0 {2,S} + 2 O u1 p2 c0 {1,S}""") s2 = Species().fromAdjacencyList("""H(5) -multiplicity 2 -1 H u1 p0 c0""") + multiplicity 2 + 1 H u1 p0 c0""") s3 = Species().fromAdjacencyList("""N2(5) -1 N u0 p1 c0 {2,T} -2 N u0 p1 c0 {1,T}""") + 1 N u0 p1 c0 {2,T} + 2 N u0 p1 c0 {1,T}""") s4 = Species().fromAdjacencyList("""HO2(10) -multiplicity 2 -1 O u0 p2 c0 {2,S} {3,S} -2 O u1 p2 c0 {1,S} -3 H u0 p0 c0 {1,S}""") - speciesDict['O2(4)'] = s1 - speciesDict['H(5)'] = s2 - speciesDict['N2(5)'] = s3 - speciesDict['HO2(10)'] = s4 - Aunits = ['','s^-1','cm^3/(mol*s)','cm^6/(mol^2*s)','cm^9/(mol^3*s)'] - Eunits = 'kcal/mol' - reaction = readKineticsEntry(entry, speciesDict, Aunits, Eunits) + multiplicity 2 + 1 O u0 p2 c0 {2,S} {3,S} + 2 O u1 p2 c0 {1,S} + 3 H u0 p0 c0 {1,S}""") + species_dict['O2(4)'] = s1 + species_dict['H(5)'] = s2 + species_dict['N2(5)'] = s3 + species_dict['HO2(10)'] = s4 + A_units = ['', 's^-1', 'cm^3/(mol*s)', 'cm^6/(mol^2*s)', 'cm^9/(mol^3*s)'] + E_units = 'kcal/mol' + reaction = readKineticsEntry(entry, species_dict, A_units, E_units) self.assertEqual(reaction.specificCollider.label, 'N2(5)') @@ -429,122 +431,122 @@ def test_mark_duplicate_reactions(self): class TestReadReactionComments(unittest.TestCase): @classmethod - def setUpClass(self): + def setUpClass(cls): r = Species().fromSMILES('[CH3]') r.label = '[CH3]' p = Species().fromSMILES('CC') p.label = 'CC' - self.reaction = Reaction(reactants=[r,r], - products=[p], - kinetics = Arrhenius(A=(8.26e+17,'cm^3/(mol*s)'), - n=-1.4, - Ea=(1,'kcal/mol'), T0=(1,'K')) + cls.reaction = Reaction(reactants=[r, r], + products=[p], + kinetics=Arrhenius(A=(8.26e+17, 'cm^3/(mol*s)'), + n=-1.4, + Ea=(1, 'kcal/mol'), T0=(1, 'K')) ) - self.comments_list = [""" -Reaction index: Chemkin #1; RMG #1 -Template reaction: R_Recombination -Exact match found for rate rule (C_methyl;C_methyl) -Multiplied by reaction path degeneracy 0.5 -""", -""" -Reaction index: Chemkin #2; RMG #4 -Template reaction: H_Abstraction -Estimated using template (C/H3/Cs;C_methyl) for rate rule (C/H3/Cs\H3;C_methyl) -Multiplied by reaction path degeneracy 6 -""", -""" -Reaction index: Chemkin #13; RMG #8 -Template reaction: H_Abstraction -Flux pairs: [CH3], CC; [CH3], CC; -Estimated using an average for rate rule [C/H3/Cs\H3;C_rad/H2/Cs] -Multiplied by reaction path degeneracy 6.0 -""", -""" -Reaction index: Chemkin #17; RMG #31 -Template reaction: H_Abstraction -Flux pairs: [CH3], CC; [CH3], CC; -Estimated using average of templates [C/H3/Cs;H_rad] + [C/H3/Cs\H3;Y_rad] for rate rule [C/H3/Cs\H3;H_rad] -Multiplied by reaction path degeneracy 6.0 -""", -""" -Reaction index: Chemkin #69; RMG #171 -Template reaction: intra_H_migration -Flux pairs: [CH3], CC; [CH3], CC; -Estimated using average of templates [R3H_SS;O_rad_out;Cs_H_out_2H] + [R3H_SS_Cs;Y_rad_out;Cs_H_out_2H] for rate rule -[R3H_SS_Cs;O_rad_out;Cs_H_out_2H] -Multiplied by reaction path degeneracy 3.0 -""", -""" -Reaction index: Chemkin #3; RMG #243 -Template reaction: Disproportionation -Flux pairs: [CH3], CC; [CH3], CC; -Average of [Average of [O2b;O_Csrad] + Average of [O_atom_triplet;O_Csrad + CH2_triplet;O_Csrad] + Average of [Average of [Ct_rad/Ct;O_Csrad from -training reaction 0] + Average of [O_pri_rad;O_Csrad + Average of [O_rad/NonDeC;O_Csrad + O_rad/NonDeO;O_Csrad]] + Average of [Cd_pri_rad;O_Csrad] + -Average of [CO_pri_rad;O_Csrad] + Average of [C_methyl;O_Csrad + Average of [C_rad/H2/Cs;O_Csrad + C_rad/H2/Cd;O_Csrad + C_rad/H2/O;O_Csrad] + Average -of [C_rad/H/NonDeC;O_Csrad] + Average of [Average of [C_rad/Cs3;O_Csrad]]] + H_rad;O_Csrad]] -Estimated using template [Y_rad_birad_trirad_quadrad;O_Csrad] for rate rule [CH_quartet;O_Csrad] -""", -""" -Reaction index: Chemkin #4; RMG #303 -Template reaction: Disproportionation -Flux pairs: [CH3], CC; [CH3], CC; -Matched reaction 0 C2H + CH3O <=> C2H2 + CH2O in Disproportionation/training -""", -""" -Reaction index: Chemkin #51; RMG #136 -Template reaction: H_Abstraction -Flux pairs: [CH3], CC; [CH3], CC; -Estimated using an average for rate rule [C/H3/Cd\H_Cd\H2;C_rad/H2/Cs] -Euclidian distance = 0 -Multiplied by reaction path degeneracy 3.0 -""", -""" -Reaction index: Chemkin #32; RMG #27 -Template reaction: R_Recombination -Flux pairs: [CH3], CC; [CH3], CC; -Matched reaction 20 CH3 + CH3 <=> C2H6 in R_Recombination/training -This reaction matched rate rule [C_methyl;C_methyl] -""", -""" -Reaction index: Chemkin #2; RMG #4 -Template reaction: R_Recombination -Flux pairs: [CH3], CC; [CH3], CC; -From training reaction 21 used for C_rad/H2/Cs;C_methyl -Exact match found for rate rule [C_rad/H2/Cs;C_methyl] -Euclidian distance = 0 -"""] - self.template_list = [['C_methyl','C_methyl'], - ['C/H3/Cs\H3','C_methyl'], - ['C/H3/Cs\H3','C_rad/H2/Cs'], - ['C/H3/Cs\H3','H_rad'], - ['R3H_SS_Cs','O_rad_out','Cs_H_out_2H'], - ['CH_quartet','O_Csrad'], - None, - ['C/H3/Cd\H_Cd\H2','C_rad/H2/Cs'], - ['C_methyl','C_methyl'], - ['C_rad/H2/Cs','C_methyl']] - self.family_list = ['R_Recombination', - 'H_Abstraction', - 'H_Abstraction', - 'H_Abstraction', - 'intra_H_migration', - 'Disproportionation', - 'Disproportionation', - 'H_Abstraction', - 'R_Recombination', - 'R_Recombination',] - self.degeneracy_list = [0.5, - 6, - 6, - 6, - 3, - 1, - 1, - 3, - 1, - 1] - self.expected_lines = [4,4,5,5,5,5,4,6,5,6] + cls.comments_list = [""" + Reaction index: Chemkin #1; RMG #1 + Template reaction: R_Recombination + Exact match found for rate rule (C_methyl;C_methyl) + Multiplied by reaction path degeneracy 0.5 + """, + """ + Reaction index: Chemkin #2; RMG #4 + Template reaction: H_Abstraction + Estimated using template (C/H3/Cs;C_methyl) for rate rule (C/H3/Cs\H3;C_methyl) + Multiplied by reaction path degeneracy 6 + """, + """ + Reaction index: Chemkin #13; RMG #8 + Template reaction: H_Abstraction + Flux pairs: [CH3], CC; [CH3], CC; + Estimated using an average for rate rule [C/H3/Cs\H3;C_rad/H2/Cs] + Multiplied by reaction path degeneracy 6.0 + """, + """ + Reaction index: Chemkin #17; RMG #31 + Template reaction: H_Abstraction + Flux pairs: [CH3], CC; [CH3], CC; + Estimated using average of templates [C/H3/Cs;H_rad] + [C/H3/Cs\H3;Y_rad] for rate rule [C/H3/Cs\H3;H_rad] + Multiplied by reaction path degeneracy 6.0 + """, + """ + Reaction index: Chemkin #69; RMG #171 + Template reaction: intra_H_migration + Flux pairs: [CH3], CC; [CH3], CC; + Estimated using average of templates [R3H_SS;O_rad_out;Cs_H_out_2H] + [R3H_SS_Cs;Y_rad_out;Cs_H_out_2H] for rate rule + [R3H_SS_Cs;O_rad_out;Cs_H_out_2H] + Multiplied by reaction path degeneracy 3.0 + """, + """ + Reaction index: Chemkin #3; RMG #243 + Template reaction: Disproportionation + Flux pairs: [CH3], CC; [CH3], CC; + Average of [Average of [O2b;O_Csrad] + Average of [O_atom_triplet;O_Csrad + CH2_triplet;O_Csrad] + Average of [Average of [Ct_rad/Ct;O_Csrad from + training reaction 0] + Average of [O_pri_rad;O_Csrad + Average of [O_rad/NonDeC;O_Csrad + O_rad/NonDeO;O_Csrad]] + Average of [Cd_pri_rad;O_Csrad] + + Average of [CO_pri_rad;O_Csrad] + Average of [C_methyl;O_Csrad + Average of [C_rad/H2/Cs;O_Csrad + C_rad/H2/Cd;O_Csrad + C_rad/H2/O;O_Csrad] + Average + of [C_rad/H/NonDeC;O_Csrad] + Average of [Average of [C_rad/Cs3;O_Csrad]]] + H_rad;O_Csrad]] + Estimated using template [Y_rad_birad_trirad_quadrad;O_Csrad] for rate rule [CH_quartet;O_Csrad] + """, + """ + Reaction index: Chemkin #4; RMG #303 + Template reaction: Disproportionation + Flux pairs: [CH3], CC; [CH3], CC; + Matched reaction 0 C2H + CH3O <=> C2H2 + CH2O in Disproportionation/training + """, + """ + Reaction index: Chemkin #51; RMG #136 + Template reaction: H_Abstraction + Flux pairs: [CH3], CC; [CH3], CC; + Estimated using an average for rate rule [C/H3/Cd\H_Cd\H2;C_rad/H2/Cs] + Euclidian distance = 0 + Multiplied by reaction path degeneracy 3.0 + """, + """ + Reaction index: Chemkin #32; RMG #27 + Template reaction: R_Recombination + Flux pairs: [CH3], CC; [CH3], CC; + Matched reaction 20 CH3 + CH3 <=> C2H6 in R_Recombination/training + This reaction matched rate rule [C_methyl;C_methyl] + """, + """ + Reaction index: Chemkin #2; RMG #4 + Template reaction: R_Recombination + Flux pairs: [CH3], CC; [CH3], CC; + From training reaction 21 used for C_rad/H2/Cs;C_methyl + Exact match found for rate rule [C_rad/H2/Cs;C_methyl] + Euclidian distance = 0 + """] + cls.template_list = [['C_methyl', 'C_methyl'], + ['C/H3/Cs\H3', 'C_methyl'], + ['C/H3/Cs\H3', 'C_rad/H2/Cs'], + ['C/H3/Cs\H3', 'H_rad'], + ['R3H_SS_Cs', 'O_rad_out', 'Cs_H_out_2H'], + ['CH_quartet', 'O_Csrad'], + None, + ['C/H3/Cd\H_Cd\H2', 'C_rad/H2/Cs'], + ['C_methyl', 'C_methyl'], + ['C_rad/H2/Cs', 'C_methyl']] + cls.family_list = ['R_Recombination', + 'H_Abstraction', + 'H_Abstraction', + 'H_Abstraction', + 'intra_H_migration', + 'Disproportionation', + 'Disproportionation', + 'H_Abstraction', + 'R_Recombination', + 'R_Recombination', ] + cls.degeneracy_list = [0.5, + 6, + 6, + 6, + 3, + 1, + 1, + 3, + 1, + 1] + cls.expected_lines = [4, 4, 5, 5, 5, 5, 4, 6, 5, 6] def testReadReactionCommentsTemplate(self): """ @@ -555,8 +557,10 @@ def testReadReactionCommentsTemplate(self): # only check template if meant to find one if self.template_list[index]: - self.assertTrue(new_rxn.template,'The template was not saved from the reaction comment {}'.format(comment)) - self.assertEqual(frozenset(new_rxn.template),frozenset(self.template_list[index]),'The reaction template does not match') + self.assertTrue(new_rxn.template, + 'The template was not saved from the reaction comment {}'.format(comment)) + self.assertEqual(frozenset(new_rxn.template), frozenset(self.template_list[index]), + 'The reaction template does not match') else: self.assertFalse(new_rxn.template) @@ -587,8 +591,9 @@ def testReadReactionCommentsDegeneracy(self): # Check that the comment only appears once in the kinetics comment if new_rxn.degeneracy != 1: - self.assertEqual(new_rxn.kinetics.comment.count('Multiplied by reaction path degeneracy {}'.format(new_rxn.degeneracy)), 1, - 'Reaction degeneracy comment duplicated while reading Chemkin comments') + self.assertEqual(new_rxn.kinetics.comment.count( + 'Multiplied by reaction path degeneracy {}'.format(new_rxn.degeneracy)), 1, + 'Reaction degeneracy comment duplicated while reading Chemkin comments') else: self.assertTrue('Multiplied by reaction path degeneracy' not in new_rxn.kinetics.comment) @@ -600,4 +605,5 @@ def testRemoveLineBreaks(self): new_comment = _removeLineBreaks(comment) new_comment_lines = len(new_comment.strip().splitlines()) self.assertEqual(new_comment_lines, self.expected_lines[index], - 'Found {} more lines than expected for comment \n\n""{}""\n\n which converted to \n\n""{}""'.format(new_comment_lines - self.expected_lines[index],comment.strip(), new_comment.strip())) + 'Found {} more lines than expected for comment \n\n""{}""\n\n which converted to \n\n""{}""'.format( + new_comment_lines - self.expected_lines[index], comment.strip(), new_comment.strip())) diff --git a/rmgpy/constantsTest.py b/rmgpy/constantsTest.py index 69f57509fe..e2f5b6e36d 100644 --- a/rmgpy/constantsTest.py +++ b/rmgpy/constantsTest.py @@ -37,6 +37,7 @@ import rmgpy.constants as constants + ################################################################################ class TestConstants(unittest.TestCase): @@ -45,7 +46,7 @@ class TestConstants(unittest.TestCase): both pure Python and compiled Cython modes, and in both cases are set to the appropriate values. """ - + def test_avogadroConstant(self): """ Test the value of the Avogadro constant. @@ -59,35 +60,35 @@ def test_boltzmannConstant(self): """ kB = 1.3806504e-23 self.assertAlmostEqual(constants.kB / kB, 1.0, 6, '{0} != {1}'.format(constants.kB, kB)) - + def test_elementaryCharge(self): """ Test the value of the elementary charge constant. """ e = 1.602176565e-19 self.assertAlmostEqual(constants.e / e, 1.0, 6, '{0} != {1}'.format(constants.e, e)) - + def test_gasLawConstant(self): """ Test the value of the gas law constant. """ R = 8.314472 self.assertAlmostEqual(constants.R / R, 1.0, 6, '{0} != {1}'.format(constants.R, R)) - + def test_planckConstant(self): """ Test the value of the Planck constant. """ h = 6.62606896e-34 self.assertAlmostEqual(constants.h / h, 1.0, 6, '{0} != {1}'.format(constants.h, h)) - + def test_reducedPlanckConstant(self): """ Test the value of the reduced Planck constant. """ hbar = 1.054571726e-34 self.assertAlmostEqual(constants.hbar / hbar, 1.0, 6, '{0} != {1}'.format(constants.hbar, hbar)) - + def test_pi(self): """ Test the value of pi. @@ -143,6 +144,7 @@ def test_hartreeEnergy(self): E_h = 4.35974434e-18 self.assertAlmostEqual(constants.E_h / E_h, 1.0, 6, '{0} != {1}'.format(constants.E_h, E_h)) + ################################################################################ if __name__ == '__main__': diff --git a/rmgpy/constraints.py b/rmgpy/constraints.py index 5a65121ba8..31c76ac692 100644 --- a/rmgpy/constraints.py +++ b/rmgpy/constraints.py @@ -29,78 +29,78 @@ ############################################################################### import logging -from numpy import isclose -from rmgpy.molecule.element import getElement + from rmgpy.species import Species + def failsSpeciesConstraints(species): """ Pass in either a `Species` or `Molecule` object and checks whether it passes the speciesConstraints set by the user. If not, returns `True` for failing speciesConstraints. """ - + from rmgpy.rmg.input import getInput try: - speciesConstraints = getInput('speciesConstraints') + species_constraints = getInput('speciesConstraints') except Exception: logging.debug('Species constraints could not be found.') - speciesConstraints = {} - + species_constraints = {} + if isinstance(species, Species): struct = species.molecule[0] else: # expects a molecule here struct = species - explicitlyAllowedMolecules = speciesConstraints.get('explicitlyAllowedMolecules', []) - for molecule in explicitlyAllowedMolecules: + explicitly_allowed_molecules = species_constraints.get('explicitlyAllowedMolecules', []) + for molecule in explicitly_allowed_molecules: if struct.isIsomorphic(molecule): - return False - - maxCarbonAtoms = speciesConstraints.get('maximumCarbonAtoms', -1) - if maxCarbonAtoms != -1: - if struct.getNumAtoms('C') > maxCarbonAtoms: + return False + + max_carbon_atoms = species_constraints.get('maximumCarbonAtoms', -1) + if max_carbon_atoms != -1: + if struct.getNumAtoms('C') > max_carbon_atoms: return True - maxOxygenAtoms = speciesConstraints.get('maximumOxygenAtoms', -1) - if maxOxygenAtoms != -1: - if struct.getNumAtoms('O') > maxOxygenAtoms: + max_oxygen_atoms = species_constraints.get('maximumOxygenAtoms', -1) + if max_oxygen_atoms != -1: + if struct.getNumAtoms('O') > max_oxygen_atoms: return True - maxNitrogenAtoms = speciesConstraints.get('maximumNitrogenAtoms', -1) - if maxNitrogenAtoms != -1: - if struct.getNumAtoms('N') > maxNitrogenAtoms: + max_nitrogen_atoms = species_constraints.get('maximumNitrogenAtoms', -1) + if max_nitrogen_atoms != -1: + if struct.getNumAtoms('N') > max_nitrogen_atoms: return True - maxSiliconAtoms = speciesConstraints.get('maximumSiliconAtoms', -1) - if maxSiliconAtoms != -1: - if struct.getNumAtoms('Si') > maxSiliconAtoms: + max_silicon_atoms = species_constraints.get('maximumSiliconAtoms', -1) + if max_silicon_atoms != -1: + if struct.getNumAtoms('Si') > max_silicon_atoms: return True - maxSulfurAtoms = speciesConstraints.get('maximumSulfurAtoms', -1) - if maxSulfurAtoms != -1: - if struct.getNumAtoms('S') > maxSulfurAtoms: + max_sulfur_atoms = species_constraints.get('maximumSulfurAtoms', -1) + if max_sulfur_atoms != -1: + if struct.getNumAtoms('S') > max_sulfur_atoms: return True - maxHeavyAtoms = speciesConstraints.get('maximumHeavyAtoms', -1) - if maxHeavyAtoms != -1: - if struct.getNumAtoms() - struct.getNumAtoms('H') > maxHeavyAtoms: + max_heavy_atoms = species_constraints.get('maximumHeavyAtoms', -1) + if max_heavy_atoms != -1: + if struct.getNumAtoms() - struct.getNumAtoms('H') > max_heavy_atoms: return True - maxRadicals = speciesConstraints.get('maximumRadicalElectrons', -1) - if maxRadicals != -1: - if (struct.getRadicalCount() > maxRadicals): + max_radicals = species_constraints.get('maximumRadicalElectrons', -1) + if max_radicals != -1: + if (struct.getRadicalCount() > max_radicals): return True - maxCarbenes = speciesConstraints.get('maximumSingletCarbenes', 1) - if maxRadicals != -1: - if struct.getSingletCarbeneCount() > maxCarbenes: + max_carbenes = species_constraints.get('maximumSingletCarbenes', 1) + if max_radicals != -1: + if struct.getSingletCarbeneCount() > max_carbenes: return True - maxCarbeneRadicals = speciesConstraints.get('maximumCarbeneRadicals', 0) - if maxCarbeneRadicals != -1: - if struct.getSingletCarbeneCount() > 0 and struct.getRadicalCount() > maxCarbeneRadicals: + max_carbene_radicals = species_constraints.get('maximumCarbeneRadicals', 0) + if max_carbene_radicals != -1: + if struct.getSingletCarbeneCount() > 0 and struct.getRadicalCount() > max_carbene_radicals: return True return False diff --git a/rmgpy/constraintsTest.py b/rmgpy/constraintsTest.py index d8643bba1d..c14656cab4 100644 --- a/rmgpy/constraintsTest.py +++ b/rmgpy/constraintsTest.py @@ -41,6 +41,7 @@ from rmgpy.molecule import Molecule import rmgpy.rmg.input + ################################################################################ class TestFailsSpeciesConstraints(unittest.TestCase): diff --git a/rmgpy/display.py b/rmgpy/display.py index f496e03711..bb0c547596 100644 --- a/rmgpy/display.py +++ b/rmgpy/display.py @@ -37,9 +37,11 @@ try: from IPython.core.interactiveshell import InteractiveShell except ImportError: - def display(obj): pass + def display(obj): + pass else: if InteractiveShell.initialized(): from IPython.core.display import display else: - def display(obj): pass + def display(obj): + pass diff --git a/rmgpy/exceptions.py b/rmgpy/exceptions.py index 420a85a186..ccd10116a0 100644 --- a/rmgpy/exceptions.py +++ b/rmgpy/exceptions.py @@ -32,6 +32,7 @@ This module contains classes which extend Exception for usage in the RMG module """ + class ActionError(Exception): """ An exception class for errors that occur while applying reaction recipe @@ -40,6 +41,7 @@ class ActionError(Exception): """ pass + class AtomTypeError(Exception): """ An exception to be raised when an error occurs while working with atom @@ -48,6 +50,7 @@ class AtomTypeError(Exception): """ pass + class ChemicallySignificantEigenvaluesError(Exception): """ An exception raised when the chemically significant eigenvalue method is @@ -56,6 +59,7 @@ class ChemicallySignificantEigenvaluesError(Exception): """ pass + class ChemkinError(Exception): """ An exception class for exceptional behavior involving Chemkin files. Pass a @@ -63,6 +67,7 @@ class ChemkinError(Exception): """ pass + class CollisionError(Exception): """ An exception class for when RMG is unable to calculate collision efficiencies @@ -71,6 +76,7 @@ class CollisionError(Exception): """ pass + class DatabaseError(Exception): """ A exception that occurs when working with an RMG database. Pass a string @@ -78,6 +84,7 @@ class DatabaseError(Exception): """ pass + class DependencyError(Exception): """ An exception that occurs when an error is encountered with a dependency. @@ -85,6 +92,7 @@ class DependencyError(Exception): """ pass + class ElementError(Exception): """ An exception class for errors that occur while working with elements. @@ -93,6 +101,7 @@ class ElementError(Exception): """ pass + class ForbiddenStructureException(Exception): """ An exception passed when RMG encounters a forbidden structure. These are usually @@ -100,6 +109,7 @@ class ForbiddenStructureException(Exception): """ pass + class ILPSolutionError(Exception): """ An exception to be raised when solving an integer linear programming problem if a solution @@ -108,6 +118,7 @@ class ILPSolutionError(Exception): """ pass + class ImplicitBenzeneError(Exception): """ An exception class when encountering a group with too many implicit benzene @@ -116,6 +127,7 @@ class ImplicitBenzeneError(Exception): """ pass + class InchiException(Exception): """ An exception used when encountering a non-valid Inchi expression are encountered. @@ -123,6 +135,7 @@ class InchiException(Exception): """ pass + class InputError(Exception): """ An exception raised when parsing an input file for any module in RMG: @@ -131,6 +144,7 @@ class InputError(Exception): """ pass + class InvalidActionError(Exception): """ An exception to be raised when an invalid action is encountered in a @@ -138,6 +152,7 @@ class InvalidActionError(Exception): """ pass + class InvalidAdjacencyListError(Exception): """ An exception used to indicate that an RMG-style adjacency list is invalid. @@ -145,6 +160,7 @@ class InvalidAdjacencyListError(Exception): """ pass + class KekulizationError(Exception): """ An exception to be raised when encountering an error while kekulizing an aromatic molecule. @@ -152,6 +168,7 @@ class KekulizationError(Exception): """ pass + class KineticsError(Exception): """ An exception class for problems with kinetics. This can be used when finding @@ -160,7 +177,8 @@ class KineticsError(Exception): """ pass -class ModifiedStrongCollisionError(Exception): + +class ModifiedStrongCollisionError(Exception): """ An exception raised when the modified strong collision method is unsuccessful for any reason. Pass a string describing the cause of the exceptional @@ -168,18 +186,21 @@ class ModifiedStrongCollisionError(Exception): """ pass + class NegativeBarrierException(Exception): """ This Exception occurs when the energy barrier for a hindered Rotor is negative. This can occur if the scan or fourier fit is poor. """ - + pass -class NetworkError(Exception): + +class NetworkError(Exception): """ Raised when an error occurs while working with a pressure-dependent reaction network """ pass + class OutputError(Exception): """ This exception is raised whenever an error occurs while saving output @@ -188,6 +209,7 @@ class OutputError(Exception): """ pass + class PressureDependenceError(Exception): """ An exception class to use when an error involving pressure dependence is @@ -196,6 +218,7 @@ class PressureDependenceError(Exception): """ pass + class QuantityError(Exception): """ An exception to be raised when an error occurs while working with physical @@ -204,6 +227,7 @@ class QuantityError(Exception): """ pass + class ReactionError(Exception): """ An exception class for exceptional behavior involving :class:`Reaction` @@ -212,6 +236,7 @@ class ReactionError(Exception): """ pass + class ReactionPairsError(Exception): """ An exception to be raised when an error occurs while working with reaction @@ -219,19 +244,22 @@ class ReactionPairsError(Exception): """ pass -class ReservoirStateError(Exception): + +class ReservoirStateError(Exception): """ An exception raised when the reservoir state method is unsuccessful for any reason. Pass a string describing the cause of the exceptional behavior. """ pass + class SettingsError(Exception): """ An exception raised when dealing with settings. """ pass + class SpeciesError(Exception): """ An exception class for exceptional behavior that occurs while working with @@ -240,11 +268,13 @@ class SpeciesError(Exception): """ pass + class StatmechError(Exception): """ An exception used when an error occurs in estimating Statmech. """ + class StatmechFitError(StatmechError): """ An exception used when attempting to fit molecular degrees of freedom to @@ -253,6 +283,7 @@ class StatmechFitError(StatmechError): """ pass + class UnexpectedChargeError(Exception): """ An exception class when encountering a group/molecule with unexpected charge @@ -263,9 +294,11 @@ class UnexpectedChargeError(Exception): Attributes: `graph` is the molecule or group object with the unexpected charge """ + def __init__(self, graph): self.graph = graph + class VF2Error(Exception): """ An exception raised if an error occurs within the VF2 graph isomorphism @@ -273,29 +306,35 @@ class VF2Error(Exception): """ pass + class CoreError(Exception): """ An exception raised if there is a problem within the model core """ pass + class ResonanceError(Exception): """ An exception class for when RMG is unable to generate resonance structures. """ pass -################## move classes that extend off previous exceptions here +################################################################################ +# move classes that extend off previous exceptions here + class InvalidMicrocanonicalRateError(NetworkError): """ Used in pressure dependence when the k(E) calculation does not give the correct kf(T) or Kc(T) """ - def __init__(self,message, k_ratio=1.0, Keq_ratio=1.0): + + def __init__(self, message, k_ratio=1.0, Keq_ratio=1.0): self.message = message self.k_ratio = k_ratio self.Keq_ratio = Keq_ratio + def badness(self): """ How bad is the error? @@ -305,11 +344,13 @@ def badness(self): import math return max(abs(math.log10(self.k_ratio)), abs(math.log10(self.Keq_ratio))) + class UndeterminableKineticsError(ReactionError): """ An exception raised when attempts to estimate appropriate kinetic parameters for a chemical reaction are unsuccessful. """ + def __init__(self, reaction, message=''): - new_message = 'Kinetics could not be determined. '+message - ReactionError.__init__(self,reaction,new_message) + new_message = 'Kinetics could not be determined. ' + message + ReactionError.__init__(self, reaction, new_message) diff --git a/rmgpy/quantityTest.py b/rmgpy/quantityTest.py index 5d345e0658..c4ff49c7d1 100644 --- a/rmgpy/quantityTest.py +++ b/rmgpy/quantityTest.py @@ -31,25 +31,28 @@ """ This script contains unit tests of the :mod:`rmgpy.quantity` module. """ +from __future__ import print_function import unittest -import numpy + +import numpy as np import rmgpy.constants as constants import rmgpy.quantity as quantity + ################################################################################ class TestAcceleration(unittest.TestCase): """ Contains unit tests of the Acceleration unit type object. """ - + def test_mpers2(self): """ Test the creation of an acceleration quantity with units of m/s^2. """ - q = quantity.Acceleration(1.0,"m/s^2") + q = quantity.Acceleration(1.0, "m/s^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m/s^2") @@ -58,23 +61,24 @@ def test_cmpers2(self): """ Test the creation of an acceleration quantity with units of cm/s^2. """ - q = quantity.Acceleration(1.0,"cm/s^2") + q = quantity.Acceleration(1.0, "cm/s^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 0.01, delta=1e-8) self.assertEqual(q.units, "cm/s^2") + ################################################################################ class TestArea(unittest.TestCase): """ Contains unit tests of the Area unit type object. """ - + def test_m2(self): """ Test the creation of an area quantity with units of m^2. """ - q = quantity.Area(1.0,"m^2") + q = quantity.Area(1.0, "m^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m^2") @@ -83,24 +87,25 @@ def test_cm2(self): """ Test the creation of an area quantity with units of m^2. """ - q = quantity.Area(1.0,"cm^2") + q = quantity.Area(1.0, "cm^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-4, delta=1e-10) self.assertEqual(q.units, "cm^2") + ################################################################################ class TestConcentration(unittest.TestCase): """ Contains unit tests of the Concentration unit type object. """ - + def test_perm3(self): """ Test the creation of an concentration quantity with units of m^-3. """ try: - q = quantity.Concentration(1.0,"m^-3") + quantity.Concentration(1.0, "m^-3") self.fail('Allowed invalid unit type "m^-3".') except quantity.QuantityError: pass @@ -109,7 +114,7 @@ def test_molperm3(self): """ Test the creation of an concentration quantity with units of mol/m^3. """ - q = quantity.Concentration(1.0,"mol/m^3") + q = quantity.Concentration(1.0, "mol/m^3") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "mol/m^3") @@ -118,24 +123,25 @@ def test_moleculesperm3(self): """ Test the creation of an concentration quantity with units of molecules/m^3. """ - q = quantity.Concentration(1.0,"molecules/m^3") + q = quantity.Concentration(1.0, "molecules/m^3") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * constants.Na, 1.0, delta=1e-6) self.assertEqual(q.units, "molecules/m^3") + ################################################################################ class TestEnergy(unittest.TestCase): """ Contains unit tests of the Energy unit type object. """ - + def test_J(self): """ Test the creation of an energy quantity with units of J. """ try: - q = quantity.Energy(1.0,"J") + quantity.Energy(1.0, "J") self.fail('Allowed invalid unit type "J".') except quantity.QuantityError: pass @@ -144,7 +150,7 @@ def test_Jpermol(self): """ Test the creation of an energy quantity with units of J/mol. """ - q = quantity.Energy(1.0,"J/mol") + q = quantity.Energy(1.0, "J/mol") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "J/mol") @@ -154,7 +160,7 @@ def test_cal(self): Test the creation of an energy quantity with units of cal. """ try: - q = quantity.Energy(1.0,"cal") + quantity.Energy(1.0, "cal") self.fail('Allowed invalid unit type "cal".') except quantity.QuantityError: pass @@ -163,7 +169,7 @@ def test_calpermol(self): """ Test the creation of an energy quantity with units of cal/mol. """ - q = quantity.Energy(1.0,"cal/mol") + q = quantity.Energy(1.0, "cal/mol") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6) self.assertEqual(q.units, "cal/mol") @@ -173,7 +179,7 @@ def test_kJ(self): Test the creation of an energy quantity with units of kJ. """ try: - q = quantity.Energy(1.0,"kJ") + quantity.Energy(1.0, "kJ") self.fail('Allowed invalid unit type "kJ".') except quantity.QuantityError: pass @@ -182,7 +188,7 @@ def test_kJpermol(self): """ Test the creation of an energy quantity with units of kJ/mol. """ - q = quantity.Energy(1.0,"kJ/mol") + q = quantity.Energy(1.0, "kJ/mol") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1000., delta=1e-6) self.assertEqual(q.units, "kJ/mol") @@ -192,7 +198,7 @@ def test_kcal(self): Test the creation of an energy quantity with units of kcal. """ try: - q = quantity.Energy(1.0,"kcal") + quantity.Energy(1.0, "kcal") self.fail('Allowed invalid unit type "kcal".') except quantity.QuantityError: pass @@ -201,31 +207,32 @@ def test_kcalpermol(self): """ Test the creation of an energy quantity with units of kcal/mol. """ - q = quantity.Energy(1.0,"kcal/mol") + q = quantity.Energy(1.0, "kcal/mol") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 4184., delta=1e-6) self.assertEqual(q.units, "kcal/mol") - + def test_Kelvin(self): """ Test the creation of an energy quantity with units of K (not really an energy!). """ - q = quantity.Energy(10.0,"K") - self.assertAlmostEqual(q.value, 10*8.314472, delta=1e-6) + q = quantity.Energy(10.0, "K") + self.assertAlmostEqual(q.value, 10 * 8.314472, delta=1e-6) self.assertEqual(q.units, "J/mol") + ################################################################################ class TestDipoleMoment(unittest.TestCase): """ Contains unit tests of the DipoleMoment unit type object. """ - + def test_Ctimesm(self): """ Test the creation of a dipole moment quantity with units of C*m. """ - q = quantity.DipoleMoment(1.0,"C*m") + q = quantity.DipoleMoment(1.0, "C*m") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, 6) self.assertEqual(q.units, "C*m") @@ -234,24 +241,25 @@ def test_D(self): """ Test the creation of a dipole moment quantity with units of J/mol. """ - q = quantity.DipoleMoment(1.0,"De") + q = quantity.DipoleMoment(1.0, "De") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*constants.c*1.0e21, 1.0, 6) + self.assertAlmostEqual(q.value_si * constants.c * 1.0e21, 1.0, 6) self.assertEqual(q.units, "De") + ################################################################################ class TestFlux(unittest.TestCase): """ Contains unit tests of the Flux unit type object. """ - + def test_perm2pers(self): """ Test the creation of a flux quantity with units of m^-2*s^-1. """ try: - q = quantity.Flux(1.0,"m^-2*s^-1") + quantity.Flux(1.0, "m^-2*s^-1") self.fail('Allowed invalid unit type "m^-2*s^-1".') except quantity.QuantityError: pass @@ -260,7 +268,7 @@ def test_molperm3(self): """ Test the creation of a flux quantity with units of mol/(m^2*s). """ - q = quantity.Flux(1.0,"mol/(m^2*s)") + q = quantity.Flux(1.0, "mol/(m^2*s)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "mol/(m^2*s)") @@ -269,27 +277,29 @@ def test_moleculesperm3(self): """ Test the creation of a flux quantity with units of molecules/(m^2*s). """ - q = quantity.Flux(1.0,"molecules/(m^2*s)") + q = quantity.Flux(1.0, "molecules/(m^2*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*constants.Na, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * constants.Na, 1.0, delta=1e-6) self.assertEqual(q.units, "molecules/(m^2*s)") + ################################################################################ class TestForce(unittest.TestCase): """ Contains unit tests of the Force unit type object. """ - + def test_N(self): """ Test the creation of an force quantity with units of N. """ - q = quantity.Force(1.0,"N") + q = quantity.Force(1.0, "N") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "N") + ################################################################################ class TestFrequency(unittest.TestCase): @@ -298,12 +308,12 @@ class TestFrequency(unittest.TestCase): special case, frequencies can be read in several units, but are always stored internally as cm^-1. """ - + def test_cm_1(self): """ Test the creation of a frequency quantity with units of cm^-1. """ - q = quantity.Frequency(1.0,"cm^-1") + q = quantity.Frequency(1.0, "cm^-1") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^-1") @@ -312,78 +322,79 @@ def test_s_1(self): """ Test the creation of a frequency quantity with units of s^-1. """ - q = quantity.Frequency(1.0,"s^-1") - self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17) - self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17) + q = quantity.Frequency(1.0, "s^-1") + self.assertAlmostEqual(q.value, 1. / (constants.c * 100.), delta=1e-17) + self.assertAlmostEqual(q.value_si, 1. / (constants.c * 100.), delta=1e-17) self.assertEqual(q.units, "cm^-1") def test_K(self): """ Test the creation of a frequency quantity with units of K. """ - q = quantity.Frequency(1.0,"K") - self.assertAlmostEqual(q.value, constants.kB/(constants.h*constants.c*100.), 6) - self.assertAlmostEqual(q.value_si, constants.kB/(constants.h*constants.c*100.), delta=1e-6) + q = quantity.Frequency(1.0, "K") + self.assertAlmostEqual(q.value, constants.kB / (constants.h * constants.c * 100.), 6) + self.assertAlmostEqual(q.value_si, constants.kB / (constants.h * constants.c * 100.), delta=1e-6) self.assertEqual(q.units, "cm^-1") def test_eV(self): """ Test the creation of a frequency quantity with units of eV. """ - q = quantity.Frequency(1.0,"eV") - self.assertAlmostEqual(q.value, constants.e/(constants.h*constants.c*100.), 2) - self.assertAlmostEqual(q.value_si, constants.e/(constants.h*constants.c*100.), delta=1e-2) + q = quantity.Frequency(1.0, "eV") + self.assertAlmostEqual(q.value, constants.e / (constants.h * constants.c * 100.), 2) + self.assertAlmostEqual(q.value_si, constants.e / (constants.h * constants.c * 100.), delta=1e-2) self.assertEqual(q.units, "cm^-1") def test_Hz(self): """ Test the creation of a frequency quantity with units of Hz. """ - q = quantity.Frequency(1.0,"Hz") - self.assertAlmostEqual(q.value, 1./(constants.c*100.), delta=1e-17) - self.assertAlmostEqual(q.value_si, 1./(constants.c*100.), delta=1e-17) + q = quantity.Frequency(1.0, "Hz") + self.assertAlmostEqual(q.value, 1. / (constants.c * 100.), delta=1e-17) + self.assertAlmostEqual(q.value_si, 1. / (constants.c * 100.), delta=1e-17) self.assertEqual(q.units, "cm^-1") def test_kHz(self): """ Test the creation of a frequency quantity with units of kHz. """ - q = quantity.Frequency(1.0,"kHz") - self.assertAlmostEqual(q.value, 1e3/(constants.c*100.), delta=1e-14) - self.assertAlmostEqual(q.value_si, 1e3/(constants.c*100.), delta=1e-14) + q = quantity.Frequency(1.0, "kHz") + self.assertAlmostEqual(q.value, 1e3 / (constants.c * 100.), delta=1e-14) + self.assertAlmostEqual(q.value_si, 1e3 / (constants.c * 100.), delta=1e-14) self.assertEqual(q.units, "cm^-1") def test_MHz(self): """ Test the creation of a frequency quantity with units of MHz. """ - q = quantity.Frequency(1.0,"MHz") - self.assertAlmostEqual(q.value, 1e6/(constants.c*100.), delta=1e-11) - self.assertAlmostEqual(q.value_si, 1e6/(constants.c*100.), delta=1e-11) + q = quantity.Frequency(1.0, "MHz") + self.assertAlmostEqual(q.value, 1e6 / (constants.c * 100.), delta=1e-11) + self.assertAlmostEqual(q.value_si, 1e6 / (constants.c * 100.), delta=1e-11) self.assertEqual(q.units, "cm^-1") def test_GHz(self): """ Test the creation of a frequency quantity with units of GHz. """ - q = quantity.Frequency(1.0,"GHz") - self.assertAlmostEqual(q.value, 1e9/(constants.c*100.), delta=1e-08) - self.assertAlmostEqual(q.value_si, 1e9/(constants.c*100.), delta=1e-08) + q = quantity.Frequency(1.0, "GHz") + self.assertAlmostEqual(q.value, 1e9 / (constants.c * 100.), delta=1e-08) + self.assertAlmostEqual(q.value_si, 1e9 / (constants.c * 100.), delta=1e-08) self.assertEqual(q.units, "cm^-1") + ################################################################################ class TestHeatCapacity(unittest.TestCase): """ Contains unit tests of the HeatCapacity unit type object. """ - + def test_JperK(self): """ Test the creation of a heat capacity quantity with units of J/K. """ try: - q = quantity.HeatCapacity(1.0,"J/K") + quantity.HeatCapacity(1.0, "J/K") self.fail('Allowed invalid unit type "J/K".') except quantity.QuantityError: pass @@ -392,7 +403,7 @@ def test_JpermolperK(self): """ Test the creation of a heat capacity quantity with units of J/(mol*K). """ - q = quantity.HeatCapacity(1.0,"J/(mol*K)") + q = quantity.HeatCapacity(1.0, "J/(mol*K)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "J/(mol*K)") @@ -402,7 +413,7 @@ def test_calperK(self): Test the creation of a heat capacity quantity with units of cal/K. """ try: - q = quantity.HeatCapacity(1.0,"cal/K") + quantity.HeatCapacity(1.0, "cal/K") self.fail('Allowed invalid unit type "cal/K".') except quantity.QuantityError: pass @@ -411,7 +422,7 @@ def test_calpermolperK(self): """ Test the creation of a heat capacity quantity with units of cal/(mol*K). """ - q = quantity.HeatCapacity(1.0,"cal/(mol*K)") + q = quantity.HeatCapacity(1.0, "cal/(mol*K)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 4.184, delta=1e-6) self.assertEqual(q.units, "cal/(mol*K)") @@ -421,7 +432,7 @@ def test_kJperK(self): Test the creation of a heat capacity quantity with units of kJ/K. """ try: - q = quantity.HeatCapacity(1.0,"kJ/K") + quantity.HeatCapacity(1.0, "kJ/K") self.fail('Allowed invalid unit type "kJ/K".') except quantity.QuantityError: pass @@ -430,7 +441,7 @@ def test_kJpermolperK(self): """ Test the creation of a heat capacity quantity with units of kJ/(mol*K). """ - q = quantity.HeatCapacity(1.0,"kJ/(mol*K)") + q = quantity.HeatCapacity(1.0, "kJ/(mol*K)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1000., delta=1e-6) self.assertEqual(q.units, "kJ/(mol*K)") @@ -440,7 +451,7 @@ def test_kcalperK(self): Test the creation of a heat capacity quantity with units of kcal/K. """ try: - q = quantity.HeatCapacity(1.0,"kcal/K") + quantity.HeatCapacity(1.0, "kcal/K") self.fail('Allowed invalid unit type "kcal/K".') except quantity.QuantityError: pass @@ -449,23 +460,24 @@ def test_kcalpermolperK(self): """ Test the creation of a heat capacity quantity with units of kcal/(mol*K). """ - q = quantity.HeatCapacity(1.0,"kcal/(mol*K)") + q = quantity.HeatCapacity(1.0, "kcal/(mol*K)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 4184., delta=1e-6) self.assertEqual(q.units, "kcal/(mol*K)") + ################################################################################ class TestInertia(unittest.TestCase): """ Contains unit tests of the Inertia unit type object. """ - + def test_kg_m2(self): """ Test the creation of a moment of inertia quantity with units of kg*m^2. """ - q = quantity.Inertia(1.0,"kg*m^2") + q = quantity.Inertia(1.0, "kg*m^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "kg*m^2") @@ -474,23 +486,24 @@ def test_amu_angstrom2(self): """ Test the creation of a moment of inertia quantity with units of amu*angstrom^2. """ - q = quantity.Inertia(1.0,"amu*angstrom^2") + q = quantity.Inertia(1.0, "amu*angstrom^2") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*constants.Na*1e23, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * constants.Na * 1e23, 1.0, delta=1e-6) self.assertEqual(q.units, "amu*angstrom^2") + ################################################################################ class TestLength(unittest.TestCase): """ Contains unit tests of the Length unit type object. """ - + def test_m(self): """ Test the creation of a length quantity with units of m. """ - q = quantity.Length(1.0,"m") + q = quantity.Length(1.0, "m") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m") @@ -499,7 +512,7 @@ def test_km(self): """ Test the creation of a length quantity with units of km. """ - q = quantity.Length(1.0,"km") + q = quantity.Length(1.0, "km") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e3, delta=1e-3) self.assertEqual(q.units, "km") @@ -508,7 +521,7 @@ def test_cm(self): """ Test the creation of a length quantity with units of cm. """ - q = quantity.Length(1.0,"cm") + q = quantity.Length(1.0, "cm") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-2, delta=1e-8) self.assertEqual(q.units, "cm") @@ -517,7 +530,7 @@ def test_mm(self): """ Test the creation of a length quantity with units of mm. """ - q = quantity.Length(1.0,"mm") + q = quantity.Length(1.0, "mm") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9) self.assertEqual(q.units, "mm") @@ -526,7 +539,7 @@ def test_um(self): """ Test the creation of a length quantity with units of um. """ - q = quantity.Length(1.0,"um") + q = quantity.Length(1.0, "um") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-6, delta=1e-12) self.assertEqual(q.units, "um") @@ -535,7 +548,7 @@ def test_nm(self): """ Test the creation of a length quantity with units of nm. """ - q = quantity.Length(1.0,"nm") + q = quantity.Length(1.0, "nm") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-9, delta=1e-15) self.assertEqual(q.units, "nm") @@ -544,11 +557,12 @@ def test_pm(self): """ Test the creation of a length quantity with units of pm. """ - q = quantity.Length(1.0,"pm") + q = quantity.Length(1.0, "pm") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-12, delta=1e-18) self.assertEqual(q.units, "pm") + ################################################################################ class TestMass(unittest.TestCase): @@ -557,12 +571,12 @@ class TestMass(unittest.TestCase): Note that value_si is always kg (per molecule), not kg/mol. """ - + def test_kg(self): """ Test the creation of a mass quantity with units of kg. """ - q = quantity.Mass(1.0,"kg") + q = quantity.Mass(1.0, "kg") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "kg") @@ -572,7 +586,7 @@ def test_gpermol(self): Test the creation of a mass quantity with units of g/mol. Note that g/mol is automatically coerced to amu. """ - q = quantity.Mass(1.0,"g/mol") + q = quantity.Mass(1.0, "g/mol") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32) self.assertEqual(q.units, "amu") @@ -582,64 +596,67 @@ def test_kgpermol(self): Test the creation of a mass quantity with units of kg/mol. Note that kg/mol is automatically coerced to amu. """ - q = quantity.Mass(1.0,"kg/mol") + q = quantity.Mass(1.0, "kg/mol") self.assertAlmostEqual(q.value, 1000.0, 3) - self.assertAlmostEqual(q.value_si, 1000.*constants.amu, delta=1e-29) + self.assertAlmostEqual(q.value_si, 1000. * constants.amu, delta=1e-29) self.assertEqual(q.units, "amu") def test_amu(self): """ Test the creation of a mass quantity with units of amu. """ - q = quantity.Mass(1.0,"amu") + q = quantity.Mass(1.0, "amu") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, constants.amu, delta=1e-32) self.assertEqual(q.units, "amu") + ################################################################################ class TestMomentum(unittest.TestCase): """ Contains unit tests of the Momentum unit type object. """ - + def test_kgmpers2(self): """ Test the creation of a momentum quantity with units of kg*m/s^2. """ - q = quantity.Momentum(1.0,"kg*m/s^2") + q = quantity.Momentum(1.0, "kg*m/s^2") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "kg*m/s^2") + ################################################################################ class TestPower(unittest.TestCase): """ Contains unit tests of the Power unit type object. """ - + def test_W(self): """ Test the creation of a power quantity with units of W. """ - q = quantity.Power(1.0,"W") + q = quantity.Power(1.0, "W") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "W") + ################################################################################ class TestPressure(unittest.TestCase): """ Contains unit tests of the Pressure unit type object. """ - + def test_Pa(self): """ Test the creation of a pressure quantity with units of Pa. """ - q = quantity.Pressure(1.0,"Pa") + q = quantity.Pressure(1.0, "Pa") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "Pa") @@ -648,7 +665,7 @@ def test_bar(self): """ Test the creation of a pressure quantity with units of bar. """ - q = quantity.Pressure(1.0,"bar") + q = quantity.Pressure(1.0, "bar") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e5, delta=1e-6) self.assertEqual(q.units, "bar") @@ -657,7 +674,7 @@ def test_atm(self): """ Test the creation of a pressure quantity with units of atm. """ - q = quantity.Pressure(1.0,"atm") + q = quantity.Pressure(1.0, "atm") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 101325., delta=1e-6) self.assertEqual(q.units, "atm") @@ -666,104 +683,109 @@ def test_torr(self): """ Test the creation of a pressure quantity with units of torr. """ - q = quantity.Pressure(1.0,"torr") + q = quantity.Pressure(1.0, "torr") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si, 101325./760., delta=1e-6) + self.assertAlmostEqual(q.value_si, 101325. / 760., delta=1e-6) self.assertEqual(q.units, "torr") def test_psi(self): """ Test the creation of a pressure quantity with units of psi. """ - q = quantity.Pressure(1.0,"psi") + q = quantity.Pressure(1.0, "psi") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si, 101325./14.695949, delta=1e-2) + self.assertAlmostEqual(q.value_si, 101325. / 14.695949, delta=1e-2) self.assertEqual(q.units, "psi") + ################################################################################ class TestRateCoefficient(unittest.TestCase): """ Contains unit tests of the RateCoefficient unit type object. """ - + def test_s(self): """ Test the creation of a rate coefficient quantity with units of s^-1. """ - q = quantity.RateCoefficient(1.0,"s^-1") + q = quantity.RateCoefficient(1.0, "s^-1") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "s^-1") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1.0, places=1) # 1 /s = 1 /s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1.0, places=1) # 1 /s = 1 /s def test_m3permols(self): """ Test the creation of a rate coefficient quantity with units of m^3/(mol*s). """ - q = quantity.RateCoefficient(1.0,"m^3/(mol*s)") + q = quantity.RateCoefficient(1.0, "m^3/(mol*s)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m^3/(mol*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1e6 cm3/mol/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1e6 cm3/mol/s def test_m6permol2s(self): """ Test the creation of a rate coefficient quantity with units of m^6/(mol^2*s). """ - q = quantity.RateCoefficient(1.0,"m^6/(mol^2*s)") + q = quantity.RateCoefficient(1.0, "m^6/(mol^2*s)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m^6/(mol^2*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, + places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s def test_m9permol3s(self): """ Test the creation of a rate coefficient quantity with units of m^9/(mol^3*s). """ - q = quantity.RateCoefficient(1.0,"m^9/(mol^3*s)") + q = quantity.RateCoefficient(1.0, "m^9/(mol^3*s)") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m^9/(mol^3*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, + delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s def test_cm3permols(self): """ Test the creation of a rate coefficient quantity with units of cm^3/(mol*s). """ - q = quantity.RateCoefficient(1.0,"cm^3/(mol*s)") + q = quantity.RateCoefficient(1.0, "cm^3/(mol*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*1e6, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * 1e6, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^3/(mol*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1 cm3/mol/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, places=1) # 1 m3/mol/s = 1 cm3/mol/s def test_cm6permol2s(self): """ Test the creation of a rate coefficient quantity with units of cm^6/(mol^2*s). """ - q = quantity.RateCoefficient(1.0,"cm^6/(mol^2*s)") + q = quantity.RateCoefficient(1.0, "cm^6/(mol^2*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*(1e6)**2, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * 1e6 ** 2, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^6/(mol^2*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, + places=1) # 1 m6/mol2/s = 1e12 cm6/mol2/s def test_cm9permol3s(self): """ Test the creation of a rate coefficient quantity with units of cm^9/(mol^3*s). """ - q = quantity.RateCoefficient(1.0,"cm^9/(mol^3*s)") + q = quantity.RateCoefficient(1.0, "cm^9/(mol^3*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*(1e6)**3, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * 1e6 ** 3, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^9/(mol^3*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, + delta=1e3) # 1 m9/mol3/s = 1e18 cm9/mol3/s def test_cm3permolecules(self): """ Test the creation of a rate coefficient quantity with units of cm^3/(molecule*s). """ - q = quantity.RateCoefficient(1.0,"cm^3/(molecule*s)") + q = quantity.RateCoefficient(1.0, "cm^3/(molecule*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*1e6/constants.Na, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * 1e6 / constants.Na, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^3/(molecule*s)") self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e6, delta=1e0) # 1 m3/mol/s = 1e6 cm3/mol/s @@ -771,22 +793,24 @@ def test_cm6permolecule2s(self): """ Test the creation of a rate coefficient quantity with units of cm^6/(molecule^2*s). """ - q = quantity.RateCoefficient(1.0,"cm^6/(molecule^2*s)") + q = quantity.RateCoefficient(1.0, "cm^6/(molecule^2*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**2, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * (1e6 / constants.Na) ** 2, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^6/(molecule^2*s)") - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12 , delta=1e0) # 1 m6/mol2/s = 1e12 cm6/mol2/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e12, + delta=1e0) # 1 m6/mol2/s = 1e12 cm6/mol2/s def test_cm9permolecule3s(self): """ Test the creation of a rate coefficient quantity with units of cm^9/(molecule^3*s). """ - q = quantity.RateCoefficient(1.0,"cm^9/(molecule^3*s)") + q = quantity.RateCoefficient(1.0, "cm^9/(molecule^3*s)") self.assertAlmostEqual(q.value, 1.0, 6) - self.assertAlmostEqual(q.value_si*(1e6/constants.Na)**3, 1.0, delta=1e-6) + self.assertAlmostEqual(q.value_si * (1e6 / constants.Na) ** 3, 1.0, delta=1e-6) self.assertEqual(q.units, "cm^9/(molecule^3*s)") - print q.units - self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18 , delta=1e3) # 1 m9/mole3/s = 1e18 cm9/mol3/s + self.assertAlmostEqual(q.getConversionFactorFromSItoCmMolS(), 1e18, + delta=1e3) # 1 m9/mole3/s = 1e18 cm9/mol3/s + ################################################################################ @@ -794,12 +818,12 @@ class TestTemperature(unittest.TestCase): """ Contains unit tests of the Temperature unit type object. """ - + def test_K(self): """ Test the creation of a temperature quantity with units of K. """ - q = quantity.Temperature(1.0,"K") + q = quantity.Temperature(1.0, "K") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "K") @@ -809,44 +833,44 @@ def test_degC(self): Test the creation of a temperature quantity with units of degrees C. """ with self.assertRaises(NotImplementedError): - q = quantity.Temperature(1.0,"degC") - + quantity.Temperature(1.0, "degC") def test_degF(self): """ Test the creation of a temperature quantity with units of degrees F. """ with self.assertRaises(NotImplementedError): - q = quantity.Temperature(1.0,"degF") - + quantity.Temperature(1.0, "degF") + def test_degR(self): """ Test the creation of a temperature quantity with units of degrees R. """ with self.assertRaises(NotImplementedError): - q = quantity.Temperature(1.0,"degR") - + quantity.Temperature(1.0, "degR") + + ################################################################################ class TestTime(unittest.TestCase): """ Contains unit tests of the Time unit type object. """ - + def test_s(self): """ Test the creation of a time quantity with units of s. """ - q = quantity.Time(1.0,"s") + q = quantity.Time(1.0, "s") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "s") - + def test_ms(self): """ Test the creation of a time quantity with units of ms. """ - q = quantity.Time(1.0,"ms") + q = quantity.Time(1.0, "ms") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9) self.assertEqual(q.units, "ms") @@ -855,7 +879,7 @@ def test_us(self): """ Test the creation of a time quantity with units of us. """ - q = quantity.Time(1.0,"us") + q = quantity.Time(1.0, "us") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-6, delta=1e-12) self.assertEqual(q.units, "us") @@ -864,7 +888,7 @@ def test_ns(self): """ Test the creation of a time quantity with units of ns. """ - q = quantity.Time(1.0,"ns") + q = quantity.Time(1.0, "ns") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-9, delta=1e-15) self.assertEqual(q.units, "ns") @@ -873,7 +897,7 @@ def test_ps(self): """ Test the creation of a time quantity with units of ps. """ - q = quantity.Time(1.0,"ps") + q = quantity.Time(1.0, "ps") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-12, delta=1e-18) self.assertEqual(q.units, "ps") @@ -882,7 +906,7 @@ def test_fs(self): """ Test the creation of a time quantity with units of fs. """ - q = quantity.Time(1.0,"fs") + q = quantity.Time(1.0, "fs") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-15, delta=1e-21) self.assertEqual(q.units, "fs") @@ -891,7 +915,7 @@ def test_min(self): """ Test the creation of a time quantity with units of min. """ - q = quantity.Time(1.0,"min") + q = quantity.Time(1.0, "min") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 60.0, delta=1e-6) self.assertEqual(q.units, "min") @@ -900,23 +924,24 @@ def test_hr(self): """ Test the creation of a time quantity with units of hr. """ - q = quantity.Time(1.0,"hr") + q = quantity.Time(1.0, "hr") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 3600.0, delta=1e-6) self.assertEqual(q.units, "hr") + ################################################################################ class TestVelocity(unittest.TestCase): """ Contains unit tests of the Velocity unit type object. """ - + def test_mpers(self): """ Test the creation of an velocity quantity with units of m/s. """ - q = quantity.Velocity(1.0,"m/s") + q = quantity.Velocity(1.0, "m/s") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m/s") @@ -925,36 +950,38 @@ def test_cmpers(self): """ Test the creation of an velocity quantity with units of m/s. """ - q = quantity.Velocity(1.0,"cm/s") + q = quantity.Velocity(1.0, "cm/s") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 0.01, delta=1e-8) self.assertEqual(q.units, "cm/s") + ################################################################################ class TestVolume(unittest.TestCase): """ Contains unit tests of the Volume unit type object. """ - + def test_m3(self): """ Test the creation of an volume quantity with units of m^3. """ - q = quantity.Volume(1.0,"m^3") + q = quantity.Volume(1.0, "m^3") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6) self.assertEqual(q.units, "m^3") - + def test_L(self): """ Test the creation of an volume quantity with units of L. """ - q = quantity.Volume(1.0,"L") + q = quantity.Volume(1.0, "L") self.assertAlmostEqual(q.value, 1.0, 6) self.assertAlmostEqual(q.value_si, 1.0e-3, delta=1e-9) self.assertEqual(q.units, "L") + class TestQuantity(unittest.TestCase): """ Contains unit tests testing the value and uncertainty storage behavior for ScalarQuantity and ArrayQuantity objects @@ -965,47 +992,48 @@ def setUp(self): A function run before each unit test in this class. This tests the creation of several both ScalarQuantity and ArrayQuantity objects """ - self.Cp = quantity.Quantity([-6.51,-5.19333,-4.47333,-3.76,-3.44333,-2.94667,-2.47],'cal/(mol*K)', - '+|-',[2.72057,3.42407,4.84068,5.11681,5.13207,5.8757,8.29108]) - self.v = quantity.Quantity([5,10,12],'cm/s','*|/',[1.2,0.4,1]) - self.H = quantity.Quantity(33.1097,'kcal/mol','+|-',24.8344) + self.Cp = quantity.Quantity([-6.51, -5.19333, -4.47333, -3.76, -3.44333, -2.94667, -2.47], 'cal/(mol*K)', + '+|-', [2.72057, 3.42407, 4.84068, 5.11681, 5.13207, 5.8757, 8.29108]) + self.v = quantity.Quantity([5, 10, 12], 'cm/s', '*|/', [1.2, 0.4, 1]) + self.H = quantity.Quantity(33.1097, 'kcal/mol', '+|-', 24.8344) self.A = quantity.Quantity(7.25e+13, 'cm^3/(mol*s)', '*|/', 5) - self.Cp_array = quantity.ArrayQuantity([-6.51,-5.19333,-4.47333,-3.76,-3.44333,-2.94667,-2.47],'cal/(mol*K)', - [2.72057,3.42407,4.84068,5.11681,5.13207,5.8757,8.29108],'+|-') - self.v_array = quantity.ArrayQuantity([5,10,12],'cm/s',[1.2,0.4,1],'*|/') - self.H_scalar = quantity.ScalarQuantity(33.1097,'kcal/mol',24.8344,'+|-',) - self.A_scalar = quantity.ScalarQuantity(7.25e+13, 'cm^3/(mol*s)', 5,'*|/') - + self.Cp_array = quantity.ArrayQuantity([-6.51, -5.19333, -4.47333, -3.76, -3.44333, -2.94667, -2.47], + 'cal/(mol*K)', + [2.72057, 3.42407, 4.84068, 5.11681, 5.13207, 5.8757, 8.29108], '+|-') + self.v_array = quantity.ArrayQuantity([5, 10, 12], 'cm/s', [1.2, 0.4, 1], '*|/') + self.H_scalar = quantity.ScalarQuantity(33.1097, 'kcal/mol', 24.8344, '+|-', ) + self.A_scalar = quantity.ScalarQuantity(7.25e+13, 'cm^3/(mol*s)', 5, '*|/') + def test_scalar_conversion(self): """ ScalarQuantity: test that the value and uncertainty get converted to the proper si value. """ # Uncertainty of type +|- must be adjusted by units - self.assertAlmostEqual(self.H.value_si,self.H.value*4184) - self.assertAlmostEqual(self.H.uncertainty_si, self.H.uncertainty*4184) - self.assertAlmostEqual(self.H_scalar.value_si,self.H_scalar.value*4184) - self.assertAlmostEqual(self.H_scalar.uncertainty_si, self.H_scalar.uncertainty*4184) - + self.assertAlmostEqual(self.H.value_si, self.H.value * 4184) + self.assertAlmostEqual(self.H.uncertainty_si, self.H.uncertainty * 4184) + self.assertAlmostEqual(self.H_scalar.value_si, self.H_scalar.value * 4184) + self.assertAlmostEqual(self.H_scalar.uncertainty_si, self.H_scalar.uncertainty * 4184) + # Uncertainty of type *|/ does not need to be adjusted by units - self.assertAlmostEqual(self.A.value_si,self.A.value*1e-6) + self.assertAlmostEqual(self.A.value_si, self.A.value * 1e-6) self.assertAlmostEqual(self.A.uncertainty_si, self.A.uncertainty) - self.assertAlmostEqual(self.A_scalar.value_si, self.A_scalar.value*1e-6) + self.assertAlmostEqual(self.A_scalar.value_si, self.A_scalar.value * 1e-6) self.assertAlmostEqual(self.A_scalar.uncertainty_si, self.A_scalar.uncertainty) - + def test_array_conversion(self): """ ArrayQuantity: test that the value and uncertainty get converted to the proper si value. """ - numpy.testing.assert_array_almost_equal(self.v.value_si, self.v.value*1e-2) - numpy.testing.assert_array_almost_equal(self.v.uncertainty_si, self.v.uncertainty) - numpy.testing.assert_array_almost_equal(self.v_array.value_si, self.v.value*1e-2) - numpy.testing.assert_array_almost_equal(self.v_array.uncertainty_si, self.v.uncertainty) + np.testing.assert_array_almost_equal(self.v.value_si, self.v.value * 1e-2) + np.testing.assert_array_almost_equal(self.v.uncertainty_si, self.v.uncertainty) + np.testing.assert_array_almost_equal(self.v_array.value_si, self.v.value * 1e-2) + np.testing.assert_array_almost_equal(self.v_array.uncertainty_si, self.v.uncertainty) + + np.testing.assert_array_almost_equal(self.Cp.value_si, self.Cp.value * 4.184) + np.testing.assert_array_almost_equal(self.Cp.uncertainty_si, self.Cp.uncertainty * 4.184) + np.testing.assert_array_almost_equal(self.Cp_array.value_si, self.Cp.value * 4.184) + np.testing.assert_array_almost_equal(self.Cp_array.uncertainty_si, self.Cp.uncertainty * 4.184) - numpy.testing.assert_array_almost_equal(self.Cp.value_si, self.Cp.value*4.184) - numpy.testing.assert_array_almost_equal(self.Cp.uncertainty_si, self.Cp.uncertainty*4.184) - numpy.testing.assert_array_almost_equal(self.Cp_array.value_si, self.Cp.value*4.184) - numpy.testing.assert_array_almost_equal(self.Cp_array.uncertainty_si, self.Cp.uncertainty*4.184) - def test_scalar_repr(self): """ Test that the ScalarQuantity objects can be recreated using their __repr__ function @@ -1016,18 +1044,18 @@ def test_scalar_repr(self): self.assertEqual(H.uncertainty_si, self.H.uncertainty_si) self.assertEqual(H.uncertaintyType, self.H.uncertaintyType) self.assertEqual(H.units, self.H.units) - + A = quantity.Quantity(eval(repr(self.A))) self.assertEqual(A.value_si, self.A.value_si) self.assertEqual(A.uncertainty_si, self.A.uncertainty_si) self.assertEqual(A.uncertaintyType, self.A.uncertaintyType) self.assertEqual(A.units, self.A.units) - + # Test that the __repr__ strings are the same - self.assertEqual(repr(H),repr(self.H)) - self.assertEqual(repr(self.H),repr(self.H_scalar)) - self.assertEqual(repr(A),repr(self.A)) - self.assertEqual(repr(self.A),repr(self.A_scalar)) + self.assertEqual(repr(H), repr(self.H)) + self.assertEqual(repr(self.H), repr(self.H_scalar)) + self.assertEqual(repr(A), repr(self.A)) + self.assertEqual(repr(self.A), repr(self.A_scalar)) def test_array_repr(self): """ @@ -1035,35 +1063,36 @@ def test_array_repr(self): """ # Test that the values can be reconstituted Cp = quantity.Quantity(eval(repr(self.Cp))) - numpy.testing.assert_array_almost_equal(Cp.value_si, self.Cp.value_si) - numpy.testing.assert_array_almost_equal(Cp.uncertainty_si, self.Cp.uncertainty_si) + np.testing.assert_array_almost_equal(Cp.value_si, self.Cp.value_si) + np.testing.assert_array_almost_equal(Cp.uncertainty_si, self.Cp.uncertainty_si) self.assertEqual(Cp.uncertaintyType, self.Cp.uncertaintyType) self.assertEqual(Cp.units, self.Cp.units) - + v = quantity.Quantity(eval(repr(self.v))) - numpy.testing.assert_array_almost_equal(v.value_si, self.v.value_si) - numpy.testing.assert_array_almost_equal(v.uncertainty_si, self.v.uncertainty_si) + np.testing.assert_array_almost_equal(v.value_si, self.v.value_si) + np.testing.assert_array_almost_equal(v.uncertainty_si, self.v.uncertainty_si) self.assertEqual(v.uncertaintyType, self.v.uncertaintyType) self.assertEqual(v.units, self.v.units) - + # Test that the __repr__ strings are the same - self.assertEqual(repr(Cp),repr(self.Cp)) - self.assertEqual(repr(self.Cp),repr(self.Cp_array)) - self.assertEqual(repr(v),repr(self.v)) - self.assertEqual(repr(self.v),repr(self.v_array)) + self.assertEqual(repr(Cp), repr(self.Cp)) + self.assertEqual(repr(self.Cp), repr(self.Cp_array)) + self.assertEqual(repr(v), repr(self.v)) + self.assertEqual(repr(self.v), repr(self.v_array)) class TestQuantityDictionaryConversion(unittest.TestCase): """ Test that Scalar and Array Quantity objects can be represented and reconstructed from dictionaries """ + def setUp(self): """ Initialize necessary variables for the TestQuantityDictionaryConversion unit test """ self.class_dict = {'ScalarQuantity': quantity.ScalarQuantity, 'ArrayQuantity': quantity.ArrayQuantity, - 'np_array': numpy.array + 'np_array': np.array } self.empty_scalar = quantity.ScalarQuantity() @@ -1072,14 +1101,14 @@ def setUp(self): self.uncertain_scalar = quantity.ScalarQuantity(value=3, uncertainty=0.2) self.empty_array = quantity.ArrayQuantity() - self.minimal_array = quantity.ArrayQuantity(value=numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) - self.known_array = quantity.ArrayQuantity(value=numpy.array([[1.2, 2.4, 3.4], [4.8, 5.0, 6.0], [7.4, 8.6, 9]]), + self.minimal_array = quantity.ArrayQuantity(value=np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + self.known_array = quantity.ArrayQuantity(value=np.array([[1.2, 2.4, 3.4], [4.8, 5.0, 6.0], [7.4, 8.6, 9]]), units='kcal/mol') - self.uncertain_array = quantity.ArrayQuantity(value=numpy.array([[1.2, 2.4, 3.4], + self.uncertain_array = quantity.ArrayQuantity(value=np.array([[1.2, 2.4, 3.4], [4.8, 5.0, 6.0], [7.4, 8.6, 9.0]] ), - uncertainty=numpy.array([[0.2, 0.4, 0.6], + uncertainty=np.array([[0.2, 0.4, 0.6], [0.6, 0.4, 0.2], [0.8, 0.2, 0.4]]) ) diff --git a/rmgpy/rmgobjectTest.py b/rmgpy/rmgobjectTest.py index 7234281400..15164aae7d 100644 --- a/rmgpy/rmgobjectTest.py +++ b/rmgpy/rmgobjectTest.py @@ -39,6 +39,7 @@ from rmgpy.quantity import ScalarQuantity, ArrayQuantity from rmgpy.rmgobject import RMGObject, expand_to_dict, recursive_make_object + ################################################################################ @@ -59,7 +60,7 @@ class TestRMGObject(unittest.TestCase): """ Contains unit tests for the RMGObject class """ - + def test_save_int(self): """Test saving ints""" obj = PseudoRMGObject(a=1, b=5) @@ -271,19 +272,18 @@ def setUp(self): self.scalar_quantity = ScalarQuantity(value=500.0, units='K') self.scalar_dict = {'class': 'ScalarQuantity', 'value': 500.0, 'units': 'K'} - # Abbreviate name - PRO = PseudoRMGObject - - self.highly_nested_object = PRO(a=PRO(a=PRO(b=self.np_array, - c=PRO(c=self.array_quantity, - d=PRO(a=self.scalar_quantity, - b=PRO() - ) - ) + self.highly_nested_object = PseudoRMGObject(a=PseudoRMGObject(a=PseudoRMGObject(b=self.np_array, + c=PseudoRMGObject( + c=self.array_quantity, + d=PseudoRMGObject( + a=self.scalar_quantity, + b=PseudoRMGObject() + ) + ) + ) + ), + b=6 ) - ), - b=6 - ) self.highly_nest_dictionary = {'class': 'PseudoRMGObject', 'a': {'class': 'PseudoRMGObject', 'a': {'class': 'PseudoRMGObject', @@ -401,6 +401,7 @@ def test_np_array_creation(self): """ self.assertTrue(np.array_equal(recursive_make_object(self.np_dict, self.class_dictionary), self.np_array)) + ################################################################################ From d74b859f8a5577bfcbd238e79ff04ab2776fef70 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Tue, 20 Aug 2019 17:00:40 -0400 Subject: [PATCH 023/155] Upgrade rmgpy/rmg/*.py to Python 3 --- rmgpy/rmg/__init__.py | 2 - rmgpy/rmg/input.py | 611 ++++++++-------- rmgpy/rmg/inputTest.py | 31 +- rmgpy/rmg/listener.py | 29 +- rmgpy/rmg/main.py | 1488 ++++++++++++++++++++------------------- rmgpy/rmg/mainTest.py | 68 +- rmgpy/rmg/model.py | 1001 +++++++++++++------------- rmgpy/rmg/modelTest.py | 340 +++++---- rmgpy/rmg/output.py | 187 ++--- rmgpy/rmg/outputTest.py | 44 +- rmgpy/rmg/pdep.py | 411 +++++------ rmgpy/rmg/pdepTest.py | 199 +++--- rmgpy/rmg/react.py | 24 +- rmgpy/rmg/reactTest.py | 26 +- rmgpy/rmg/rmgTest.py | 34 +- rmgpy/rmg/settings.py | 47 +- 16 files changed, 2424 insertions(+), 2118 deletions(-) diff --git a/rmgpy/rmg/__init__.py b/rmgpy/rmg/__init__.py index 4ea5ad6243..5a228f8660 100644 --- a/rmgpy/rmg/__init__.py +++ b/rmgpy/rmg/__init__.py @@ -32,5 +32,3 @@ This is the rmg module. """ pass - - diff --git a/rmgpy/rmg/input.py b/rmgpy/rmg/input.py index a560d34fc9..34ab5ec477 100644 --- a/rmgpy/rmg/input.py +++ b/rmgpy/rmg/input.py @@ -29,48 +29,52 @@ ############################################################################### import logging -import warnings -import quantities import os -import numpy from copy import deepcopy -from rmgpy import settings +import numpy as np +from rmgpy import settings +from rmgpy.exceptions import InputError from rmgpy.molecule import Molecule from rmgpy.quantity import Quantity, Energy, RateCoefficient, SurfaceConcentration +from rmgpy.rmg.model import CoreEdgeReactionModel +from rmgpy.rmg.settings import ModelSettings, SimulatorSettings from rmgpy.solver.base import TerminationTime, TerminationConversion, TerminationRateRatio -from rmgpy.solver.simple import SimpleReactor from rmgpy.solver.liquid import LiquidReactor from rmgpy.solver.mbSampled import MBSampledReactor +from rmgpy.solver.simple import SimpleReactor from rmgpy.solver.surface import SurfaceReactor -from rmgpy.rmg.settings import ModelSettings, SimulatorSettings -from model import CoreEdgeReactionModel -from rmgpy.exceptions import InputError ################################################################################ rmg = None speciesDict = {} + def database( - thermoLibraries = None, - transportLibraries = None, - reactionLibraries = None, - frequenciesLibraries = None, - seedMechanisms = None, - kineticsFamilies = 'default', - kineticsDepositories = 'default', - kineticsEstimator = 'rate rules', - ): + thermoLibraries=None, + transportLibraries=None, + reactionLibraries=None, + frequenciesLibraries=None, + seedMechanisms=None, + kineticsFamilies='default', + kineticsDepositories='default', + kineticsEstimator='rate rules', +): # This function just stores the information about the database to be loaded # We don't actually load the database until after we're finished reading # the input file - if isinstance(thermoLibraries, str): thermoLibraries = [thermoLibraries] - if isinstance(transportLibraries, str): transportLibraries = [transportLibraries] - if isinstance(reactionLibraries, str): reactionLibraries = [reactionLibraries] - if isinstance(seedMechanisms, str): seedMechanisms = [seedMechanisms] - if isinstance(frequenciesLibraries, str): frequenciesLibraries = [frequenciesLibraries] + if isinstance(thermoLibraries, str): + thermoLibraries = [thermoLibraries] + if isinstance(transportLibraries, str): + transportLibraries = [transportLibraries] + if isinstance(reactionLibraries, str): + reactionLibraries = [reactionLibraries] + if isinstance(seedMechanisms, str): + seedMechanisms = [seedMechanisms] + if isinstance(frequenciesLibraries, str): + frequenciesLibraries = [frequenciesLibraries] rmg.databaseDirectory = settings['database.directory'] rmg.thermoLibraries = thermoLibraries or [] rmg.transportLibraries = transportLibraries @@ -78,9 +82,9 @@ def database( if reactionLibraries: index = 0 while index < len(reactionLibraries): - if isinstance(reactionLibraries[index],tuple): + if isinstance(reactionLibraries[index], tuple): pass - elif isinstance(reactionLibraries[index],str): + elif isinstance(reactionLibraries[index], str): reactionLibraries[index] = (reactionLibraries[index], False) else: raise TypeError('reaction libraries must be input as tuples or strings') @@ -94,20 +98,22 @@ def database( elif kineticsDepositories == 'all': rmg.kineticsDepositories = None else: - if not isinstance(kineticsDepositories,list): - raise InputError("kineticsDepositories should be either 'default', 'all', or a list of names eg. ['training','PrIMe'].") + if not isinstance(kineticsDepositories, list): + raise InputError("kineticsDepositories should be either 'default', 'all', or a list of names eg. " + "['training','PrIMe'].") rmg.kineticsDepositories = kineticsDepositories if kineticsFamilies in ('default', 'all', 'none'): rmg.kineticsFamilies = kineticsFamilies else: - if not isinstance(kineticsFamilies,list): - raise InputError("kineticsFamilies should be either 'default', 'all', 'none', or a list of names eg. ['H_Abstraction','R_Recombination'] or ['!Intra_Disproportionation'].") + if not isinstance(kineticsFamilies, list): + raise InputError("kineticsFamilies should be either 'default', 'all', 'none', or a list of names eg. " + "['H_Abstraction','R_Recombination'] or ['!Intra_Disproportionation'].") rmg.kineticsFamilies = kineticsFamilies -def catalystProperties(bindingEnergies = None, - surfaceSiteDensity = None,): +def catalystProperties(bindingEnergies=None, + surfaceSiteDensity=None, ): """ Specify the properties of the catalyst. Binding energies of C,H,O,N atoms, and the surface site density. @@ -131,52 +137,60 @@ def convertBindingEnergies(bindingEnergies): :return: the processed and checked dictionary """ if bindingEnergies is None: - bindingEnergies = { # default values for Pt(111) - 'C':(-6.750, 'eV/molecule'), - 'H':(-2.479, 'eV/molecule'), - 'O':(-3.586, 'eV/molecule'), - 'N':(-4.352, 'eV/molecule'), - } + bindingEnergies = { # default values for Pt(111) + 'C': (-6.750, 'eV/molecule'), + 'H': (-2.479, 'eV/molecule'), + 'O': (-3.586, 'eV/molecule'), + 'N': (-4.352, 'eV/molecule'), + } logging.info("Using default binding energies for Pt(111):\n{0!r}".format(bindingEnergies)) - if not isinstance(bindingEnergies, dict): raise InputError("bindingEnergies should be None (for default) or a dict.") - newDict = {} + if not isinstance(bindingEnergies, dict): + raise InputError("bindingEnergies should be None (for default) or a dict.") + new_dict = {} for element in 'CHON': try: - newDict[element] = Energy(bindingEnergies[element]) + new_dict[element] = Energy(bindingEnergies[element]) except KeyError: logging.error('Element {} missing from bindingEnergies dictionary'.format(element)) raise - return newDict - + return new_dict def species(label, structure, reactive=True): - logging.debug('Found {0} species "{1}" ({2})'.format('reactive' if reactive else 'nonreactive', label, structure.toSMILES())) - + logging.debug('Found {0} species "{1}" ({2})'.format('reactive' if reactive else 'nonreactive', + label, + structure.toSMILES())) + if '+' in label: raise InputError('species {0} label cannot include a + sign'.format(label)) - - spec, isNew = rmg.reactionModel.makeNewSpecies(structure, label=label, reactive=reactive) - if not isNew: - raise InputError("Species {0} is a duplicate of {1}. Species in input file must be unique".format(label,spec.label)) + + spec, is_new = rmg.reactionModel.makeNewSpecies(structure, label=label, reactive=reactive) + if not is_new: + raise InputError("Species {0} is a duplicate of {1}. Species in input file must be unique".format(label, + spec.label)) # Force RMG to add the species to edge first, prior to where it is added to the core, in case it is found in # any reaction libraries along the way rmg.reactionModel.addSpeciesToEdge(spec) rmg.initialSpecies.append(spec) speciesDict[label] = spec - + + def SMARTS(string): return Molecule().fromSMARTS(string) + def SMILES(string): return Molecule().fromSMILES(string) + def InChI(string): return Molecule().fromInChI(string) + def adjacencyList(string): return Molecule().fromAdjacencyList(string) + # Reaction systems def simpleReactor(temperature, pressure, @@ -193,96 +207,103 @@ def simpleReactor(temperature, sensitivityMoleFractions=None, ): logging.debug('Found SimpleReactor reaction system') - - for key,value in initialMoleFractions.iteritems(): - if not isinstance(value,list): + + for key, value in initialMoleFractions.items(): + if not isinstance(value, list): initialMoleFractions[key] = float(value) if value < 0: raise InputError('Initial mole fractions cannot be negative.') else: if len(value) != 2: raise InputError("Initial mole fraction values must either be a number or a list with 2 entries") - initialMoleFractions[key] = [float(value[0]),float(value[1])] + initialMoleFractions[key] = [float(value[0]), float(value[1])] if value[0] < 0 or value[1] < 0: raise InputError('Initial mole fractions cannot be negative.') elif value[1] < value[0]: raise InputError('Initial mole fraction range out of order: {0}'.format(key)) - - if not isinstance(temperature,list): + + if not isinstance(temperature, list): T = Quantity(temperature) else: if len(temperature) != 2: - raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list with 2 entries of the same format') + raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list ' + 'with 2 entries of the same format') T = [Quantity(t) for t in temperature] - - if not isinstance(pressure,list): + + if not isinstance(pressure, list): P = Quantity(pressure) else: if len(pressure) != 2: - raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list with 2 entries of the same format') + raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list ' + 'with 2 entries of the same format') P = [Quantity(p) for p in pressure] - - - if not isinstance(temperature,list) and not isinstance(pressure,list) and all([not isinstance(x,list) for x in initialMoleFractions.values()]): - nSims=1 - + + if not isinstance(temperature, list) and not isinstance(pressure, list) and all( + [not isinstance(x, list) for x in initialMoleFractions.values()]): + nSims = 1 + termination = [] if terminationConversion is not None: - for spec, conv in terminationConversion.iteritems(): + for spec, conv in terminationConversion.items(): termination.append(TerminationConversion(speciesDict[spec], conv)) if terminationTime is not None: termination.append(TerminationTime(Quantity(terminationTime))) if terminationRateRatio is not None: termination.append(TerminationRateRatio(terminationRateRatio)) if len(termination) == 0: - raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2)) - - sensitiveSpecies = [] + raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems) + 2)) + + sensitive_species = [] if sensitivity: if sensitivity != 'all': - if isinstance(sensitivity, str): sensitivity = [sensitivity] + if isinstance(sensitivity, str): + sensitivity = [sensitivity] for spec in sensitivity: - sensitiveSpecies.append(speciesDict[spec]) + sensitive_species.append(speciesDict[spec]) else: - sensitiveSpecies.append('all') - - if not isinstance(T,list): + sensitive_species.append('all') + + if not isinstance(T, list): sensitivityTemperature = T - if not isinstance(P,list): + if not isinstance(P, list): sensitivityPressure = P - if not any([isinstance(x,list) for x in initialMoleFractions.itervalues()]): + if not any([isinstance(x, list) for x in initialMoleFractions.values()]): sensitivityMoleFractions = deepcopy(initialMoleFractions) if sensitivityMoleFractions is None or sensitivityTemperature is None or sensitivityPressure is None: - sensConditions = None + sens_conditions = None else: - sensConditions = sensitivityMoleFractions - sensConditions['T'] = Quantity(sensitivityTemperature).value_si - sensConditions['P'] = Quantity(sensitivityPressure).value_si + sens_conditions = sensitivityMoleFractions + sens_conditions['T'] = Quantity(sensitivityTemperature).value_si + sens_conditions['P'] = Quantity(sensitivityPressure).value_si - - system = SimpleReactor(T, P, initialMoleFractions, nSims, termination, sensitiveSpecies, sensitivityThreshold,sensConditions) + system = SimpleReactor(T, P, initialMoleFractions, nSims, termination, sensitive_species, sensitivityThreshold, + sens_conditions) rmg.reactionSystems.append(system) - - assert balanceSpecies is None or isinstance(balanceSpecies,str), 'balanceSpecies should be the string corresponding to a single species' + + assert balanceSpecies is None or isinstance(balanceSpecies, str), 'balanceSpecies should be the string corresponding to a single species' rmg.balanceSpecies = balanceSpecies - if balanceSpecies: #check that the balanceSpecies can't be taken to zero + if balanceSpecies: # check that the balanceSpecies can't be taken to zero total = 0.0 - for key,item in initialMoleFractions.iteritems(): + for key, item in initialMoleFractions.items(): if key == balanceSpecies: - assert not isinstance(item,list), 'balanceSpecies must not have a defined range' + assert not isinstance(item, list), 'balanceSpecies must not have a defined range' xbspcs = item - if isinstance(item,list): - total += item[1]-item[0] + if isinstance(item, list): + total += item[1] - item[0] if total > xbspcs: - raise ValueError('The sum of the differences in the ranged mole fractions is greater than the mole fraction of the balance species, this would require the balanceSpecies mole fraction to be negative in some cases which is not allowed, either reduce the maximum mole fractions or dont use balanceSpecies') + raise ValueError('The sum of the differences in the ranged mole fractions is greater than the mole ' + 'fraction of the balance species, this would require the balanceSpecies mole fraction to ' + 'be negative in some cases which is not allowed, either reduce the maximum mole fractions ' + 'or dont use balanceSpecies') + # Reaction systems def liquidReactor(temperature, initialConcentrations, terminationConversion=None, - nSims = 4, + nSims=4, terminationTime=None, terminationRateRatio=None, sensitivity=None, @@ -290,124 +311,126 @@ def liquidReactor(temperature, sensitivityTemperature=None, sensitivityConcentrations=None, constantSpecies=None): - logging.debug('Found LiquidReactor reaction system') - - if not isinstance(temperature,list): + + if not isinstance(temperature, list): T = Quantity(temperature) else: if len(temperature) != 2: - raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list with 2 entries of the same format') + raise InputError('Temperature and pressure ranges can either be in the form of (number,units) or a list ' + 'with 2 entries of the same format') T = [Quantity(t) for t in temperature] - - for spec,conc in initialConcentrations.iteritems(): - if not isinstance(conc,list): + + for spec, conc in initialConcentrations.items(): + if not isinstance(conc, list): concentration = Quantity(conc) # check the dimensions are ok # convert to mol/m^3 (or something numerically nice? or must it be SI) initialConcentrations[spec] = concentration.value_si else: if len(conc) != 2: - raise InputError("Concentration values must either be in the form of (number,units) or a list with 2 entries of the same format") - initialConcentrations[spec] = [Quantity(conc[0]),Quantity(conc[1])] - - if not isinstance(temperature,list) and all([not isinstance(x,list) for x in initialConcentrations.itervalues()]): - nSims=1 - + raise InputError("Concentration values must either be in the form of (number,units) or a list with 2 " + "entries of the same format") + initialConcentrations[spec] = [Quantity(conc[0]), Quantity(conc[1])] + + if not isinstance(temperature, list) and all([not isinstance(x, list) for x in initialConcentrations.values()]): + nSims = 1 + termination = [] if terminationConversion is not None: - for spec, conv in terminationConversion.iteritems(): + for spec, conv in terminationConversion.items(): termination.append(TerminationConversion(speciesDict[spec], conv)) if terminationTime is not None: termination.append(TerminationTime(Quantity(terminationTime))) if terminationRateRatio is not None: termination.append(TerminationRateRatio(terminationRateRatio)) if len(termination) == 0: - raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems)+2)) - + raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems) + 2)) + sensitiveSpecies = [] if sensitivity: for spec in sensitivity: sensitiveSpecies.append(speciesDict[spec]) - - ##chatelak: check the constant species exist + + # chatelak: check the constant species exist if constantSpecies is not None: logging.debug(' Generation with constant species:') for constantSpecie in constantSpecies: logging.debug(" {0}".format(constantSpecie)) - if not speciesDict.has_key(constantSpecie): + if constantSpecie not in speciesDict: raise InputError('Species {0} not found in the input file'.format(constantSpecie)) - - if not isinstance(T,list): + + if not isinstance(T, list): sensitivityTemperature = T - if not any([isinstance(x,list) for x in initialConcentrations.itervalues()]): + if not any([isinstance(x, list) for x in initialConcentrations.values()]): sensitivityConcentrations = initialConcentrations if sensitivityConcentrations is None or sensitivityTemperature is None: - sensConditions = None + sens_conditions = None else: - sensConditions = sensitivityConcentrations - sensConditions['T'] = Quantity(sensitivityTemperature).value_si - - system = LiquidReactor(T, initialConcentrations, nSims, termination, sensitiveSpecies, sensitivityThreshold, sensConditions, constantSpecies) + sens_conditions = sensitivityConcentrations + sens_conditions['T'] = Quantity(sensitivityTemperature).value_si + + system = LiquidReactor(T, initialConcentrations, nSims, termination, sensitiveSpecies, sensitivityThreshold, + sens_conditions, constantSpecies) rmg.reactionSystems.append(system) - + + # Reaction systems def surfaceReactor(temperature, initialPressure, - initialGasMoleFractions, - initialSurfaceCoverages, - surfaceVolumeRatio, - nSims=4, - terminationConversion=None, - terminationTime=None, - terminationRateRatio=None, - sensitivity=None, - sensitivityThreshold=1e-3): - + initialGasMoleFractions, + initialSurfaceCoverages, + surfaceVolumeRatio, + nSims=4, + terminationConversion=None, + terminationTime=None, + terminationRateRatio=None, + sensitivity=None, + sensitivityThreshold=1e-3): logging.debug('Found SurfaceReactor reaction system') - for value in initialGasMoleFractions.values(): + for value in list(initialGasMoleFractions.values()): if value < 0: raise InputError('Initial mole fractions cannot be negative.') - totalInitialMoles = sum(initialGasMoleFractions.values()) - if totalInitialMoles != 1: + total_initial_moles = sum(initialGasMoleFractions.values()) + if total_initial_moles != 1: logging.warning('Initial gas mole fractions do not sum to one; renormalizing.') logging.debug('') logging.debug('Original composition:') - for spec, molfrac in initialGasMoleFractions.iteritems(): + for spec, molfrac in initialGasMoleFractions.items(): logging.debug("{0} = {1}".format(spec, molfrac)) for spec in initialGasMoleFractions: - initialGasMoleFractions[spec] /= totalInitialMoles + initialGasMoleFractions[spec] /= total_initial_moles logging.info('') logging.debug('Normalized mole fractions:') - for spec, molfrac in initialGasMoleFractions.iteritems(): + for spec, molfrac in initialGasMoleFractions.items(): logging.debug("{0} = {1}".format(spec, molfrac)) if not isinstance(temperature, list): T = Quantity(temperature) else: if len(temperature) != 2: - raise InputError('Temperature ranges can either be in the form ' - 'of (number,units) or a list with 2 entries of the same format') + raise InputError('Temperature ranges can either be in the form of (number,units) or a list with 2 entries ' + 'of the same format') T = [Quantity(t) for t in temperature] if not isinstance(initialPressure, list): initialP = Quantity(initialPressure) else: if len(initialPressure) != 2: - raise InputError('Initial pressure ranges can either be in the form ' - 'of (number,units) or a list with 2 entries of the same format') + raise InputError('Initial pressure ranges can either be in the form ''of (number,units) or a list with ' + '2 entries of the same format') initialP = [Quantity(p) for p in initialPressure] if not isinstance(temperature, list) and not isinstance(initialPressure, list): nSims = 1 if any([isinstance(x, list) for x in initialGasMoleFractions.values()]) or \ - any([isinstance(x, list) for x in initialSurfaceCoverages.values()]): + any([isinstance(x, list) for x in initialSurfaceCoverages.values()]): raise NotImplementedError("Can't do ranges on species concentrations for surface reactors yet.") termination = [] if terminationConversion is not None: - for spec, conv in terminationConversion.iteritems(): + for spec, conv in terminationConversion.items(): termination.append(TerminationConversion(speciesDict[spec], conv)) if terminationTime is not None: termination.append(TerminationTime(Quantity(terminationTime))) @@ -416,22 +439,20 @@ def surfaceReactor(temperature, if len(termination) == 0: raise InputError('No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems) + 2)) - sensitiveSpecies = [] + sensitive_species = [] if sensitivity: for spec in sensitivity: - sensitiveSpecies.append(speciesDict[spec]) + sensitive_species.append(speciesDict[spec]) if not isinstance(T, list): sensitivityTemperature = T if not isinstance(initialPressure, list): sensitivityPressure = initialPressure - sensConditions = None + sens_conditions = None if sensitivity: raise NotImplementedError("Can't currently do sensitivity with surface reactors.") - """ - The problem is inside base.pyx it reads the dictionary 'sensConditions' - and guesses whether they're all concentrations (liquid reactor) or - mole fractions (simple reactor). In fact, some may be surface coverages. - """ + # The problem is inside base.pyx it reads the dictionary 'sensConditions' + # and guesses whether they're all concentrations (liquid reactor) or + # mole fractions (simple reactor). In fact, some may be surface coverages. system = SurfaceReactor(T=T, initialP=initialP, @@ -441,9 +462,9 @@ def surfaceReactor(temperature, surfaceSiteDensity=rmg.surfaceSiteDensity, nSims=nSims, termination=termination, - sensitiveSpecies=sensitiveSpecies, + sensitiveSpecies=sensitive_species, sensitivityThreshold=sensitivityThreshold, - sensConditions=sensConditions) + sensConditions=sens_conditions) rmg.reactionSystems.append(system) system.log_initial_conditions(number=len(rmg.reactionSystems)) @@ -468,18 +489,18 @@ def mbsampledReactor(temperature, for spec in initialMoleFractions: initialMoleFractions[spec] = float(initialMoleFractions[spec]) - totalInitialMoles = sum(initialMoleFractions.values()) - if totalInitialMoles != 1: + total_initial_moles = sum(initialMoleFractions.values()) + if total_initial_moles != 1: logging.warning('Initial mole fractions do not sum to one; normalizing.') logging.info('') logging.info('Original composition:') - for spec, molfrac in initialMoleFractions.iteritems(): + for spec, molfrac in initialMoleFractions.items(): logging.info("{0} = {1}".format(spec, molfrac)) for spec in initialMoleFractions: - initialMoleFractions[spec] /= totalInitialMoles + initialMoleFractions[spec] /= total_initial_moles logging.info('') logging.info('Normalized mole fractions:') - for spec, molfrac in initialMoleFractions.iteritems(): + for spec, molfrac in initialMoleFractions.items(): logging.info("{0} = {1}".format(spec, molfrac)) T = Quantity(temperature) @@ -487,14 +508,14 @@ def mbsampledReactor(temperature, k_sampling = RateCoefficient(mbsamplingRate, 's^-1') - constantSpeciesList = [] + constant_species_list = [] for spec in constantSpecies: - constantSpeciesList.append(speciesDict[spec]) + constant_species_list.append(speciesDict[spec]) termination = [] if terminationConversion is not None: - for spec, conv in terminationConversion.iteritems(): + for spec, conv in terminationConversion.items(): termination.append(TerminationConversion(speciesDict[spec], conv)) if terminationTime is not None: termination.append(TerminationTime(Quantity(terminationTime))) @@ -502,29 +523,38 @@ def mbsampledReactor(temperature, raise InputError( 'No termination conditions specified for reaction system #{0}.'.format(len(rmg.reactionSystems) + 2)) - sensitiveSpecies = [] + sensitive_species = [] if sensitivity: - if isinstance(sensitivity, str): sensitivity = [sensitivity] + if isinstance(sensitivity, str): + sensitivity = [sensitivity] for spec in sensitivity: - sensitiveSpecies.append(speciesDict[spec]) - system = MBSampledReactor(T, P, initialMoleFractions, k_sampling, constantSpeciesList, termination, sensitiveSpecies, sensitivityThreshold) + sensitive_species.append(speciesDict[spec]) + system = MBSampledReactor(T, P, initialMoleFractions, k_sampling, constant_species_list, termination, + sensitive_species, sensitivityThreshold) rmg.reactionSystems.append(system) + def simulator(atol, rtol, sens_atol=1e-6, sens_rtol=1e-4): rmg.simulatorSettingsList.append(SimulatorSettings(atol, rtol, sens_atol, sens_rtol)) - + + def solvation(solvent): # If solvation module in input file, set the RMG solvent variable - if not isinstance(solvent,str): + if not isinstance(solvent, str): raise InputError("solvent should be a string like 'water'") rmg.solvent = solvent -def model(toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=numpy.inf,toleranceKeepInEdge=0.0, toleranceInterruptSimulation=1.0, - toleranceMoveEdgeReactionToSurface=numpy.inf, toleranceMoveSurfaceSpeciesToCore=numpy.inf, toleranceMoveSurfaceReactionToCore=numpy.inf, + +def model(toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=np.inf, toleranceKeepInEdge=0.0, + toleranceInterruptSimulation=1.0, + toleranceMoveEdgeReactionToSurface=np.inf, toleranceMoveSurfaceSpeciesToCore=np.inf, + toleranceMoveSurfaceReactionToCore=np.inf, toleranceMoveEdgeReactionToSurfaceInterrupt=None, - toleranceMoveEdgeReactionToCoreInterrupt=None, maximumEdgeSpecies=1000000, minCoreSizeForPrune=50, - minSpeciesExistIterationsForPrune=2, filterReactions=False, filterThreshold=1e8, ignoreOverallFluxCriterion=False, - maxNumSpecies=None,maxNumObjsPerIter=1,terminateAtMaxObjects=False,toleranceThermoKeepSpeciesInEdge=numpy.inf,dynamicsTimeScale=(0.0,'sec'), + toleranceMoveEdgeReactionToCoreInterrupt=None, maximumEdgeSpecies=1000000, minCoreSizeForPrune=50, + minSpeciesExistIterationsForPrune=2, filterReactions=False, filterThreshold=1e8, + ignoreOverallFluxCriterion=False, + maxNumSpecies=None, maxNumObjsPerIter=1, terminateAtMaxObjects=False, + toleranceThermoKeepSpeciesInEdge=np.inf, dynamicsTimeScale=(0.0, 'sec'), toleranceBranchReactionToCore=0.0, branchingIndex=0.5, branchingRatioMax=1.0): """ How to generate the model. `toleranceMoveToCore` must be specified. @@ -535,10 +565,12 @@ def model(toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=numpy.inf,to to the pressure dependent network expansion and not movement of species from edge to core """ if toleranceMoveToCore is None: - raise InputError("You must provide a toleranceMoveToCore value. It should be less than or equal to toleranceInterruptSimulation which is currently {0}".format(toleranceInterruptSimulation)) + raise InputError("You must provide a toleranceMoveToCore value. It should be less than or equal to " + "toleranceInterruptSimulation which is currently {0}".format(toleranceInterruptSimulation)) if toleranceMoveToCore > toleranceInterruptSimulation: - raise InputError("toleranceMoveToCore must be less than or equal to toleranceInterruptSimulation, which is currently {0}".format(toleranceInterruptSimulation)) - + raise InputError("toleranceMoveToCore must be less than or equal to toleranceInterruptSimulation, which is " + "currently {0}".format(toleranceInterruptSimulation)) + rmg.modelSettingsList.append( ModelSettings( toleranceMoveToCore=toleranceMoveToCore, @@ -566,24 +598,26 @@ def model(toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=numpy.inf,to branchingRatioMax=branchingRatioMax, ) ) - + + def quantumMechanics( - software, - method, - fileStore = None, - scratchDirectory = None, - onlyCyclics = False, - maxRadicalNumber = 0, - ): + software, + method, + fileStore=None, + scratchDirectory=None, + onlyCyclics=False, + maxRadicalNumber=0, +): from rmgpy.qm.main import QMCalculator - rmg.quantumMechanics = QMCalculator(software = software, - method = method, - fileStore = fileStore, - scratchDirectory = scratchDirectory, - onlyCyclics = onlyCyclics, - maxRadicalNumber = maxRadicalNumber, + rmg.quantumMechanics = QMCalculator(software=software, + method=method, + fileStore=fileStore, + scratchDirectory=scratchDirectory, + onlyCyclics=onlyCyclics, + maxRadicalNumber=maxRadicalNumber, ) + def mlEstimator(thermo=True, name='main', minHeavyAtoms=1, @@ -634,37 +668,39 @@ def mlEstimator(thermo=True, # Shows warning when onlyCyclics is False and onlyHeterocyclics is True if minCycleOverlap > 0 and not onlyCyclics and not onlyHeterocyclics: - logging.warning('"onlyCyclics" should be True when "minCycleOverlap" is greater than zero.' - ' Machine learning estimator is restricted to only cyclic species thermo with the specified minimum cycle overlap') + logging.warning('"onlyCyclics" should be True when "minCycleOverlap" is greater than zero. ' + 'Machine learning estimator is restricted to only cyclic species thermo with the specified ' + 'minimum cycle overlap') elif minCycleOverlap > 0 and not onlyCyclics and onlyHeterocyclics: - logging.warning('"onlyCyclics" should be True when "onlyHeterocyclics" is True and "minCycleOverlap" is greater than zero.' - ' Machine learning estimator is restricted to only heterocyclic species thermo with the specified minimum cycle overlap') + logging.warning('"onlyCyclics" should be True when "onlyHeterocyclics" is True and "minCycleOverlap" is ' + 'greater than zero. Machine learning estimator is restricted to only heterocyclic species ' + 'thermo with the specified minimum cycle overlap') elif onlyHeterocyclics and not onlyCyclics: - logging.warning('"onlyCyclics" should be True when "onlyHeterocyclics" is True.' - ' Machine learning estimator is restricted to only heterocyclic species thermo') + logging.warning('"onlyCyclics" should be True when "onlyHeterocyclics" is True. ' + 'Machine learning estimator is restricted to only heterocyclic species thermo') -def pressureDependence( - method, - temperatures, - pressures, - maximumGrainSize = 0.0, - minimumNumberOfGrains = 0, - interpolation = None, - maximumAtoms=None, - ): +def pressureDependence( + method, + temperatures, + pressures, + maximumGrainSize=0.0, + minimumNumberOfGrains=0, + interpolation=None, + maximumAtoms=None, +): from arkane.pdep import PressureDependenceJob - + # Setting the pressureDependence attribute to non-None enables pressure dependence rmg.pressureDependence = PressureDependenceJob(network=None) - + # Process method rmg.pressureDependence.method = method - + # Process interpolation model if isinstance(interpolation, str): interpolation = (interpolation,) - if interpolation[0].lower() not in ("chebyshev","pdeparrhenius"): + if interpolation[0].lower() not in ("chebyshev", "pdeparrhenius"): raise InputError("Interpolation model must be set to either 'Chebyshev' or 'PDepArrhenius'.") rmg.pressureDependence.interpolationModel = interpolation @@ -674,27 +710,28 @@ def pressureDependence( rmg.pressureDependence.Tmax = Quantity(Tmax, Tunits) rmg.pressureDependence.Tcount = Tcount rmg.pressureDependence.generateTemperatureList() - + # Process pressures Pmin, Pmax, Punits, Pcount = pressures rmg.pressureDependence.Pmin = Quantity(Pmin, Punits) rmg.pressureDependence.Pmax = Quantity(Pmax, Punits) rmg.pressureDependence.Pcount = Pcount rmg.pressureDependence.generatePressureList() - + # Process grain size and count rmg.pressureDependence.maximumGrainSize = Quantity(maximumGrainSize) rmg.pressureDependence.minimumGrainCount = minimumNumberOfGrains - + # Process maximum atoms rmg.pressureDependence.maximumAtoms = maximumAtoms - + rmg.pressureDependence.activeJRotor = True rmg.pressureDependence.activeKRotor = True rmg.pressureDependence.rmgmode = True + def options(name='Seed', generateSeedEachIteration=True, saveSeedToDatabase=False, units='si', saveRestartPeriod=None, - generateOutputHTML=False, generatePlots=False, saveSimulationProfiles=False, verboseComments=False, + generateOutputHTML=False, generatePlots=False, saveSimulationProfiles=False, verboseComments=False, saveEdgeSpecies=False, keepIrreversible=False, trimolecularProductReversible=True, wallTime='00:00:00:00'): if saveRestartPeriod: logging.warning("`saveRestartPeriod` flag was set in the input file, but this feature has been removed. Please " @@ -703,25 +740,26 @@ def options(name='Seed', generateSeedEachIteration=True, saveSeedToDatabase=Fals "http://reactionmechanismgenerator.github.io/RMG-Py/users/rmg/input.html#restarting-from-a-seed-mechanism") rmg.name = name - rmg.generateSeedEachIteration=generateSeedEachIteration - rmg.saveSeedToDatabase=saveSeedToDatabase + rmg.generateSeedEachIteration = generateSeedEachIteration + rmg.saveSeedToDatabase = saveSeedToDatabase rmg.units = units if generateOutputHTML: logging.warning('Generate Output HTML option was turned on. Note that this will slow down model generation.') - rmg.generateOutputHTML = generateOutputHTML + rmg.generateOutputHTML = generateOutputHTML rmg.generatePlots = generatePlots rmg.saveSimulationProfiles = saveSimulationProfiles rmg.verboseComments = verboseComments if saveEdgeSpecies: - logging.warning('Edge species saving was turned on. This will slow down model generation for large simulations.') + logging.warning( + 'Edge species saving was turned on. This will slow down model generation for large simulations.') rmg.saveEdgeSpecies = saveEdgeSpecies rmg.keepIrreversible = keepIrreversible rmg.trimolecularProductReversible = trimolecularProductReversible rmg.wallTime = wallTime -def generatedSpeciesConstraints(**kwargs): - validConstraints = [ +def generatedSpeciesConstraints(**kwargs): + valid_constraints = [ 'allowed', 'maximumCarbonAtoms', 'maximumOxygenAtoms', @@ -736,28 +774,27 @@ def generatedSpeciesConstraints(**kwargs): ] for key, value in kwargs.items(): - if key not in validConstraints: + if key not in valid_constraints: raise InputError('Invalid generated species constraint {0!r}.'.format(key)) - + rmg.speciesConstraints[key] = value + def thermoCentralDatabase(host, - port, - username, - password, - application): - + port, + username, + password, + application): from rmgpy.data.thermo import ThermoCentralDatabaseInterface rmg.thermoCentralDatabase = ThermoCentralDatabaseInterface(host, - port, - username, - password, - application) - + port, + username, + password, + application) + def uncertainty(localAnalysis=False, globalAnalysis=False, uncorrelated=True, correlated=True, localNumber=10, globalNumber=5, terminationTime=None, pceRunTime=1800, logx=True): - if not localAnalysis and globalAnalysis: logging.info('Enabling local uncertainty analysis as prerequisite for running global uncertainty analysis.') @@ -775,9 +812,9 @@ def uncertainty(localAnalysis=False, globalAnalysis=False, uncorrelated=True, co def restartFromSeed(path=None, coreSeed=None, edgeSeed=None, filters=None, speciesMap=None): - parentDir = os.path.dirname(rmg.inputFile) + parent_dir = os.path.dirname(rmg.inputFile) rmg.restart = True - docLink = 'http://reactionmechanismgenerator.github.io/RMG-Py/users/rmg/input.html#restarting-from-a-seed-mechanism.' + doc_link = 'http://reactionmechanismgenerator.github.io/RMG-Py/users/rmg/input.html#restarting-from-a-seed-mechanism.' if path: if any((coreSeed, edgeSeed, filters, speciesMap)): @@ -785,10 +822,10 @@ def restartFromSeed(path=None, coreSeed=None, edgeSeed=None, filters=None, speci 'seed mechanism should be given as `path`, or the path for each of the required files ' 'should be explicitly given, but not both. Please take one approach or the other. For ' 'further information see the RMG documentation on restarting from a seed mechanism at ' - '{0}.'.format(docLink)) + '{0}.'.format(doc_link)) if not os.path.isabs(path): - path = os.path.join(parentDir, path) + path = os.path.join(parent_dir, path) if not os.path.exists(path): raise ValueError('Unable to find the path to the restart seed folder. {0} does not exist'.format(path)) @@ -805,18 +842,19 @@ def restartFromSeed(path=None, coreSeed=None, edgeSeed=None, filters=None, speci rmg.filtersPath = filters rmg.speciesMapPath = speciesMap - rmgPaths = [rmg.coreSeedPath, rmg.edgeSeedPath, rmg.filtersPath, rmg.speciesMapPath] - pathErrors = [filePath for filePath in rmgPaths if not os.path.exists(filePath)] + rmg_paths = [rmg.coreSeedPath, rmg.edgeSeedPath, rmg.filtersPath, rmg.speciesMapPath] + path_errors = [filePath for filePath in rmg_paths if not os.path.exists(filePath)] - if pathErrors: + if path_errors: if path: - raise InputError('Could not find one or more of the required files/directories for restarting from a seed ' + raise InputError('Could not find one or more of the required files/directories for restarting from a seed ' 'mechanism: {0}. Try specifying the file paths individually. See the RMG documentation ' - 'at {1} for more information'.format(pathErrors, docLink)) + 'at {1} for more information'.format(path_errors, doc_link)) else: raise InputError('Could not find one or more of the required files/directories for restarting from a seed ' - 'mechanism: {0}. See the RMG documentation at {1} for more information'.format(pathErrors, - docLink)) + 'mechanism: {0}. See the RMG documentation at {1} for more information'.format(path_errors, + doc_link)) + ################################################################################ @@ -828,13 +866,14 @@ def setGlobalRMG(rmg0): global rmg rmg = rmg0 + def readInputFile(path, rmg0): """ Read an RMG input file at `path` on disk into the :class:`RMG` object `rmg`. """ global rmg, speciesDict - + full_path = os.path.abspath(os.path.expandvars(path)) try: f = open(full_path) @@ -845,15 +884,15 @@ def readInputFile(path, rmg0): logging.info('Reading input file "{0}"...'.format(full_path)) logging.info(f.read()) - f.seek(0)# return to beginning of file + f.seek(0) # return to beginning of file setGlobalRMG(rmg0) rmg.reactionModel = CoreEdgeReactionModel() rmg.initialSpecies = [] rmg.reactionSystems = [] speciesDict = {} - - global_context = { '__builtins__': None } + + global_context = {'__builtins__': None} local_context = { '__builtins__': None, 'True': True, @@ -883,15 +922,15 @@ def readInputFile(path, rmg0): } try: - exec f in global_context, local_context + exec(f, global_context, local_context) except (NameError, TypeError, SyntaxError) as e: logging.error('The input file "{0}" was invalid:'.format(full_path)) logging.exception(e) raise finally: f.close() - - rmg.speciesConstraints['explicitlyAllowedMolecules'] = [] + + rmg.speciesConstraints['explicitlyAllowedMolecules'] = [] # convert keys from species names into species objects. for reactionSystem in rmg.reactionSystems: @@ -902,7 +941,8 @@ def readInputFile(path, rmg0): rmg.quantumMechanics.initialize() logging.info('') - + + ################################################################################ def readThermoInputFile(path, rmg0): @@ -912,7 +952,7 @@ def readThermoInputFile(path, rmg0): """ global rmg, speciesDict - + full_path = os.path.abspath(os.path.expandvars(path)) try: f = open(full_path) @@ -928,8 +968,8 @@ def readThermoInputFile(path, rmg0): rmg.initialSpecies = [] rmg.reactionSystems = [] speciesDict = {} - - global_context = { '__builtins__': None } + + global_context = {'__builtins__': None} local_context = { '__builtins__': None, 'True': True, @@ -947,7 +987,7 @@ def readThermoInputFile(path, rmg0): } try: - exec f in global_context, local_context + exec(f, global_context, local_context) except (NameError, TypeError, SyntaxError) as e: logging.error('The input file "{0}" was invalid:'.format(full_path)) logging.exception(e) @@ -959,7 +999,8 @@ def readThermoInputFile(path, rmg0): rmg.quantumMechanics.setDefaultOutputDirectory(rmg.outputDirectory) rmg.quantumMechanics.initialize() - logging.info('') + logging.info('') + ################################################################################ @@ -973,7 +1014,7 @@ def saveInputFile(path, rmg): # Databases f.write('database(\n') - #f.write(' "{0}",\n'.format(rmg.databaseDirectory)) + # f.write(' "{0}",\n'.format(rmg.databaseDirectory)) f.write(' thermoLibraries = {0!r},\n'.format(rmg.thermoLibraries)) f.write(' reactionLibraries = {0!r},\n'.format(rmg.reactionLibraries)) f.write(' seedMechanisms = {0!r},\n'.format(rmg.seedMechanisms)) @@ -991,48 +1032,48 @@ def saveInputFile(path, rmg): f.write(')\n\n') # Species - for species in rmg.initialSpecies: + for spcs in rmg.initialSpecies: f.write('species(\n') - f.write(' label = "{0}",\n'.format(species.label)) - f.write(' reactive = {0},\n'.format(species.reactive)) + f.write(' label = "{0}",\n'.format(spcs.label)) + f.write(' reactive = {0},\n'.format(spcs.reactive)) f.write(' structure = adjacencyList(\n') f.write('"""\n') - f.write(species.molecule[0].toAdjacencyList()) + f.write(spcs.molecule[0].toAdjacencyList()) f.write('"""),\n') f.write(')\n\n') - + # Reaction systems for system in rmg.reactionSystems: if rmg.solvent: f.write('liquidReactor(\n') - f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(),system.T.units)) + f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(), system.T.units)) f.write(' initialConcentrations={\n') - for species, conc in system.initialConcentrations.iteritems(): - f.write(' "{0!s}": ({1:g},"{2!s}"),\n'.format(species.label,conc.getValue(),conc.units)) + for spcs, conc in system.initialConcentrations.items(): + f.write(' "{0!s}": ({1:g},"{2!s}"),\n'.format(spcs.label, conc.getValue(), conc.units)) else: f.write('simpleReactor(\n') - f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(),system.T.units)) + f.write(' temperature = ({0:g},"{1!s}"),\n'.format(system.T.getValue(), system.T.units)) # Convert the pressure from SI pascal units to bar here # Do something more fancy later for converting to user's desired units for both T and P.. - f.write(' pressure = ({0:g},"{1!s}"),\n'.format(system.P.getValue(),system.P.units)) + f.write(' pressure = ({0:g},"{1!s}"),\n'.format(system.P.getValue(), system.P.units)) f.write(' initialMoleFractions={\n') - for species, molfrac in system.initialMoleFractions.iteritems(): - f.write(' "{0!s}": {1:g},\n'.format(species.label, molfrac)) - f.write(' },\n') - + for spcs, molfrac in system.initialMoleFractions.items(): + f.write(' "{0!s}": {1:g},\n'.format(spcs.label, molfrac)) + f.write(' },\n') + # Termination criteria conversions = '' for term in system.termination: if isinstance(term, TerminationTime): - f.write(' terminationTime = ({0:g},"{1!s}"),\n'.format(term.time.getValue(),term.time.units)) - + f.write(' terminationTime = ({0:g},"{1!s}"),\n'.format(term.time.getValue(), term.time.units)) + else: conversions += ' "{0:s}": {1:g},\n'.format(term.species.label, term.conversion) - if conversions: + if conversions: f.write(' terminationConversion = {\n') f.write(conversions) f.write(' },\n') - + # Sensitivity analysis if system.sensitiveSpecies: sensitivity = [] @@ -1040,12 +1081,12 @@ def saveInputFile(path, rmg): sensitivity.append(item.label) f.write(' sensitivity = {0},\n'.format(sensitivity)) f.write(' sensitivityThreshold = {0},\n'.format(system.sensitivityThreshold)) - + f.write(')\n\n') - + if rmg.solvent: f.write("solvation(\n solvent = '{0!s}'\n)\n\n".format(rmg.solvent)) - + # Simulator tolerances f.write('simulator(\n') f.write(' atol = {0:g},\n'.format(rmg.absoluteTolerance)) @@ -1070,7 +1111,8 @@ def saveInputFile(path, rmg): if rmg.pressureDependence: f.write('pressureDependence(\n') f.write(' method = {0!r},\n'.format(rmg.pressureDependence.method)) - f.write(' maximumGrainSize = ({0:g},"{1!s}"),\n'.format(rmg.pressureDependence.grainSize.getValue(),rmg.pressureDependence.grainSize.units)) + f.write(' maximumGrainSize = ({0:g},"{1!s}"),\n'.format(rmg.pressureDependence.grainSize.getValue(), + rmg.pressureDependence.grainSize.units)) f.write(' minimumNumberOfGrains = {0},\n'.format(rmg.pressureDependence.grainCount)) f.write(' temperatures = ({0:g},{1:g},"{2!s}",{3:d}),\n'.format( rmg.pressureDependence.Tmin.getValue(), @@ -1084,10 +1126,10 @@ def saveInputFile(path, rmg): rmg.pressureDependence.Pmax.units, rmg.pressureDependence.Pcount, )) - f.write(' interpolation = {0},\n'.format(rmg.pressureDependence.interpolationModel)) + f.write(' interpolation = {0},\n'.format(rmg.pressureDependence.interpolationModel)) f.write(' maximumAtoms = {0}, \n'.format(rmg.pressureDependence.maximumAtoms)) f.write(')\n\n') - + # Quantum Mechanics if rmg.quantumMechanics: f.write('quantumMechanics(\n') @@ -1099,20 +1141,22 @@ def saveInputFile(path, rmg): else: f.write(' fileStore = None,\n') if rmg.quantumMechanics.settings.scratchDirectory: - f.write(' scratchDirectory = {0!r},\n'.format(os.path.split(rmg.quantumMechanics.settings.scratchDirectory)[0])) + f.write(' scratchDirectory = {0!r},\n'.format( + os.path.split(rmg.quantumMechanics.settings.scratchDirectory)[0])) else: f.write(' scratchDirectory = None,\n') f.write(' onlyCyclics = {0},\n'.format(rmg.quantumMechanics.settings.onlyCyclics)) f.write(' maxRadicalNumber = {0},\n'.format(rmg.quantumMechanics.settings.maxRadicalNumber)) f.write(')\n\n') - + # Species Constraints if rmg.speciesConstraints: f.write('generatedSpeciesConstraints(\n') - for constraint, value in sorted(rmg.speciesConstraints.items(), key=lambda constraint: constraint[0]): - if value is not None: f.write(' {0} = {1},\n'.format(constraint,value)) + for constraint, value in sorted(list(rmg.speciesConstraints.items()), key=lambda constraint: constraint[0]): + if value is not None: + f.write(' {0} = {1},\n'.format(constraint, value)) f.write(')\n\n') - + # Options f.write('options(\n') f.write(' units = "{0}",\n'.format(rmg.units)) @@ -1125,9 +1169,10 @@ def saveInputFile(path, rmg): f.write(' verboseComments = {0},\n'.format(rmg.verboseComments)) f.write(' wallTime = {0},\n'.format(rmg.wallTime)) f.write(')\n\n') - + f.close() + def getInput(name): """ Returns the RMG input object that corresponds diff --git a/rmgpy/rmg/inputTest.py b/rmgpy/rmg/inputTest.py index 1900ed3589..45b259e74e 100644 --- a/rmgpy/rmg/inputTest.py +++ b/rmgpy/rmg/inputTest.py @@ -28,10 +28,11 @@ # # ############################################################################### -import unittest +import unittest +import rmgpy.rmg.input as inp from rmgpy.rmg.main import RMG -from rmgpy.rmg import input as inp + ################################################### @@ -45,11 +46,13 @@ def setUpModule(self): rmg = RMG() inp.setGlobalRMG(rmg) + def tearDownModule(self): # remove RMG object global rmg rmg = None + class TestInputDatabase(unittest.TestCase): """ Contains unit tests rmgpy.rmg.input.database @@ -69,31 +72,33 @@ def testImportingDatabaseReactionLibrariesFromString(self): inp.database(reactionLibraries=['test']) self.assertIsInstance(rmg.reactionLibraries[0], tuple) self.assertFalse(rmg.reactionLibraries[0][1]) - + def testImportingDatabaseReactionLibrariesFromFalseTuple(self): """ Test that we can import Reaction Libraries using the Tuple False form. """ global rmg # add database properties to RMG - inp.database(reactionLibraries=[('test',False)]) + inp.database(reactionLibraries=[('test', False)]) self.assertIsInstance(rmg.reactionLibraries[0], tuple) self.assertFalse(rmg.reactionLibraries[0][1]) - + def testImportingDatabaseReactionLibrariesFromTrueTuple(self): """ Test that we can import Reaction Libraries using the Tuple True form. """ global rmg # add database properties to RMG - inp.database(reactionLibraries=[('test',True)]) + inp.database(reactionLibraries=[('test', True)]) self.assertIsInstance(rmg.reactionLibraries[0], tuple) self.assertTrue(rmg.reactionLibraries[0][1]) + class TestInputMLEstimator(unittest.TestCase): """ Contains unit tests rmgpy.rmg.input.mlEstimator """ + def tearDown(self): # remove the reactionLibraries value global rmg @@ -110,15 +115,17 @@ def testMLEstimator(self): self.assertIsInstance(rmg.ml_estimator, MLEstimator) self.assertIsInstance(rmg.ml_settings, dict) + class TestInputThemoCentralDatabase(unittest.TestCase): """ Contains unit tests rmgpy.rmg.input.thermoCentralDatabase """ + def tearDown(self): # remove the reactionLibraries value global rmg rmg.thermoCentralDatabase = None - + def testThemoCentralDatabase(self): """ Test that we can input. @@ -126,11 +133,11 @@ def testThemoCentralDatabase(self): global rmg # add database properties to RMG inp.thermoCentralDatabase( - host='some_host', - port=0, - username='some_usr', - password='some_pw', - application='some_app' + host='some_host', + port=0, + username='some_usr', + password='some_pw', + application='some_app' ) self.assertEqual(rmg.thermoCentralDatabase.host, 'some_host') self.assertEqual(rmg.thermoCentralDatabase.port, 0) diff --git a/rmgpy/rmg/listener.py b/rmgpy/rmg/listener.py index 6510562dd1..fba655a819 100644 --- a/rmgpy/rmg/listener.py +++ b/rmgpy/rmg/listener.py @@ -30,9 +30,11 @@ import csv import os + from rmgpy.chemkin import getSpeciesIdentifier from rmgpy.tools.plot import SimulationPlot + class SimulationProfileWriter(object): """ SimulationProfileWriter listens to a ReactionSystem subject @@ -55,9 +57,10 @@ class SimulationProfileWriter(object): reactionSystem.detach(listener) """ + def __init__(self, outputDirectory, reaction_sys_index, coreSpecies): super(SimulationProfileWriter, self).__init__() - + self.outputDirectory = outputDirectory self.reaction_sys_index = reaction_sys_index self.coreSpecies = coreSpecies @@ -78,8 +81,8 @@ def update(self, reactionSystem): 'solver', 'simulation_{0}_{1:d}.csv'.format( self.reaction_sys_index + 1, len(self.coreSpecies) - ) ) + ) header = ['Time (s)', 'Volume (m^3)'] for spc in self.coreSpecies: @@ -89,11 +92,11 @@ def update(self, reactionSystem): worksheet = csv.writer(csvfile) # add header row: - worksheet.writerow(header) + worksheet.writerow(header) # add mole fractions: worksheet.writerows(reactionSystem.snapshots) - + class SimulationProfilePlotter(object): """ @@ -114,10 +117,10 @@ class SimulationProfilePlotter(object): reactionSystem.detach(listener) """ - + def __init__(self, outputDirectory, reaction_sys_index, coreSpecies): super(SimulationProfilePlotter, self).__init__() - + self.outputDirectory = outputDirectory self.reaction_sys_index = reaction_sys_index self.coreSpecies = coreSpecies @@ -129,20 +132,20 @@ def update(self, reactionSystem): - number of core species """ - csvFile = os.path.join( + csv_file = os.path.join( self.outputDirectory, 'solver', 'simulation_{0}_{1:d}.csv'.format( self.reaction_sys_index + 1, len(self.coreSpecies) - ) ) - - pngFile = os.path.join( + ) + + png_file = os.path.join( self.outputDirectory, 'solver', 'simulation_{0}_{1:d}.png'.format( self.reaction_sys_index + 1, len(self.coreSpecies) - ) ) - - SimulationPlot(csvFile=csvFile, numSpecies=10, ylabel='Moles').plot(pngFile) + ) + + SimulationPlot(csvFile=csv_file, numSpecies=10, ylabel='Moles').plot(png_file) diff --git a/rmgpy/rmg/main.py b/rmgpy/rmg/main.py index 6d0f6c8fab..193c1959e6 100644 --- a/rmgpy/rmg/main.py +++ b/rmgpy/rmg/main.py @@ -32,62 +32,60 @@ This module contains the main execution functionality for Reaction Mechanism Generator (RMG). """ +from __future__ import print_function -import sys -import warnings -import time +import copy +import gc import logging import os -import shutil import resource -import psutil +import shutil +import sys +import time +import warnings +from copy import deepcopy -import numpy as np -import gc -import copy import h5py -from copy import deepcopy -from scipy.optimize import brute -from cantera import ck2cti +import numpy as np +import psutil import yaml +from cantera import ck2cti +from scipy.optimize import brute -from rmgpy.rmg.settings import ModelSettings +import rmgpy.util as util +from rmgpy.rmg.model import Species, CoreEdgeReactionModel +from rmgpy.rmg.pdep import PDepNetwork +from rmgpy import settings +from rmgpy.chemkin import ChemkinWriter from rmgpy.constraints import failsSpeciesConstraints -from rmgpy.molecule import Molecule -from rmgpy.solver.base import TerminationTime, TerminationConversion -from rmgpy.solver.simple import SimpleReactor +from rmgpy.data.base import Entry +from rmgpy.data.kinetics.family import TemplateReaction +from rmgpy.data.kinetics.library import KineticsLibrary, LibraryReaction from rmgpy.data.rmg import RMGDatabase from rmgpy.exceptions import ForbiddenStructureException, DatabaseError, CoreError -from rmgpy.data.kinetics.library import KineticsLibrary, LibraryReaction -from rmgpy.data.kinetics.family import KineticsFamily, TemplateReaction -from rmgpy.rmg.pdep import PDepReaction - -from rmgpy.data.base import Entry -from rmgpy import settings - from rmgpy.kinetics.diffusionLimited import diffusionLimiter - -from model import Species, CoreEdgeReactionModel +from rmgpy.molecule import Molecule +from rmgpy.qm.main import QMDatabaseWriter from rmgpy.reaction import Reaction -from pdep import PDepNetwork -import rmgpy.util as util - -from rmgpy.chemkin import ChemkinWriter -from rmgpy.yml import RMSWriter -from rmgpy.rmg.output import OutputHTMLWriter from rmgpy.rmg.listener import SimulationProfileWriter, SimulationProfilePlotter -from rmgpy.qm.main import QMDatabaseWriter +from rmgpy.rmg.output import OutputHTMLWriter +from rmgpy.rmg.pdep import PDepReaction +from rmgpy.rmg.settings import ModelSettings +from rmgpy.solver.base import TerminationTime, TerminationConversion +from rmgpy.solver.simple import SimpleReactor from rmgpy.stats import ExecutionStatsWriter from rmgpy.thermo.thermoengine import submit from rmgpy.tools.plot import plot_sensitivity from rmgpy.tools.uncertainty import Uncertainty, process_local_results +from rmgpy.yml import RMSWriter ################################################################################ solvent = None # Maximum number of user defined processors -maxproc = 1 +maxproc = 1 + class RMG(util.Subject): """ @@ -146,7 +144,7 @@ class RMG(util.Subject): =================================== ================================================ """ - + def __init__(self, inputFile=None, outputDirectory=None): super(RMG, self).__init__() self.inputFile = inputFile @@ -158,7 +156,7 @@ def __init__(self, inputFile=None, outputDirectory=None): self.Tmax = 0.0 self.Pmin = 0.0 self.Pmax = 0.0 - + def clear(self): """ Clear all loaded information about the job (except the file paths). @@ -176,17 +174,17 @@ def clear(self): self.diffusionLimiter = None self.surfaceSiteDensity = None self.bindingEnergies = None - + self.reactionModel = None self.reactionSystems = None self.database = None self.reactionSystem = None - + self.modelSettingsList = [] self.simulatorSettingsList = [] self.balanceSpecies = None - - self.filterReactions=False + + self.filterReactions = False self.trimolecular = False self.unimolecularReact = None self.bimolecularReact = None @@ -195,7 +193,7 @@ def clear(self): self.bimolecularThreshold = None self.trimolecularThreshold = None self.termination = [] - + self.done = False self.verbosity = logging.INFO self.units = 'si' @@ -219,7 +217,7 @@ def clear(self): self.edgeSeedPath = None self.filtersPath = None self.speciesMapPath = None - + self.name = 'Seed' self.generateSeedEachIteration = True self.saveSeedToDatabase = False @@ -228,13 +226,13 @@ def clear(self): self.uncertainty = None self.execTime = [] - + def loadInput(self, path=None): """ Load an RMG job from the input file located at `inputFile`, or from the `inputFile` attribute if not given as a parameter. """ - from input import readInputFile + from rmgpy.rmg.input import readInputFile if path is None: path = self.inputFile readInputFile(path, self) self.reactionModel.kineticsEstimator = self.kineticsEstimator @@ -253,24 +251,25 @@ def loadInput(self, path=None): self.reactionModel.verboseComments = self.verboseComments self.reactionModel.saveEdgeSpecies = self.saveEdgeSpecies - + if self.quantumMechanics: self.reactionModel.quantumMechanics = self.quantumMechanics - + def loadThermoInput(self, path=None): """ Load an Thermo Estimation job from a thermo input file located at `inputFile`, or from the `inputFile` attribute if not given as a parameter. """ - from input import readThermoInputFile - if path is None: path = self.inputFile + from rmgpy.rmg.input import readThermoInputFile + if path is None: + path = self.inputFile if not self.outputDirectory: self.outputDirectory = os.path.dirname(path) readThermoInputFile(path, self) - + if self.quantumMechanics: self.reactionModel.quantumMechanics = self.quantumMechanics - + def checkInput(self): """ Check for a few common mistakes in the input file. @@ -290,7 +289,6 @@ def checkInput(self): else: assert (reactionSystem.Prange[1].value_si < self.pressureDependence.Pmax.value_si), "Reaction system P is above pressureDependence range." assert (reactionSystem.Prange[0].value_si > self.pressureDependence.Pmin.value_si), "Reaction system P is below pressureDependence range." - assert any([not s.reactive for s in reactionSystem.initialMoleFractions.keys()]), \ "Pressure Dependence calculations require at least one inert (nonreacting) species for the bath gas." @@ -302,60 +300,71 @@ def checkLibraries(self): Loading a Liquid phase library obtained in another solvent than the one defined in the input file. Other checks can be added here. """ - #Liquid phase simulation checks + # Liquid phase simulation checks if self.solvent: - #check thermo librairies - for libIter in self.database.thermo.libraries.iterkeys(): + # check thermo librairies + for libIter in self.database.thermo.libraries.keys(): if self.database.thermo.libraries[libIter].solvent: - if not self.solvent == self.database.thermo.libraries[libIter].solvent: - raise DatabaseError('''Thermo library "{2}" was obtained in "{1}" and cannot be used with this liquid phase simulation in "{0}" - '''.format(self.solvent, self.database.thermo.libraries[libIter].solvent, self.database.thermo.libraries[libIter].name)) - #Check kinetic librairies - for libIter in self.database.kinetics.libraries.iterkeys(): + if not self.solvent == self.database.thermo.libraries[libIter].solvent: + raise DatabaseError("Thermo library '{2}' was obtained in '{1}' and cannot be used with this " + "liquid phase simulation in '{0}' " + .format(self.solvent, + self.database.thermo.libraries[libIter].solvent, + self.database.thermo.libraries[libIter].name)) + # Check kinetic librairies + for libIter in self.database.kinetics.libraries.keys(): if self.database.kinetics.libraries[libIter].solvent: - if not self.solvent == self.database.kinetics.libraries[libIter].solvent: - raise DatabaseError('''Kinetics library "{2}" was obtained in "{1}" and cannot be used with this liquid phase simulation in "{0}" - '''.format(self.solvent, self.database.kinetics.libraries[libIter].solvent, self.database.kinetics.libraries[libIter].name)) - #Gas phase simulation checks + if not self.solvent == self.database.kinetics.libraries[libIter].solvent: + raise DatabaseError("Kinetics library '{2}' was obtained in '{1}' and cannot be used with this " + "liquid phase simulation in '{0}'" + .format(self.solvent, + self.database.kinetics.libraries[libIter].solvent, + self.database.kinetics.libraries[libIter].name)) + # Gas phase simulation checks else: - #check thermo librairies - for libIter in self.database.thermo.libraries.iterkeys(): + # check thermo librairies + for libIter in self.database.thermo.libraries.keys(): if self.database.thermo.libraries[libIter].solvent: - raise DatabaseError('''Thermo library "{1}" was obtained in "{0}" solvent and cannot be used in gas phase simulation - '''.format(self.database.thermo.libraries[libIter].solvent, self.database.thermo.libraries[libIter].name)) - #Check kinetic librairies - for libIter in self.database.kinetics.libraries.iterkeys(): + raise DatabaseError("Thermo library '{1}' was obtained in '{0}' solvent and cannot be used in gas " + "phase simulation" + .format(self.database.thermo.libraries[libIter].solvent, + self.database.thermo.libraries[libIter].name)) + # Check kinetic librairies + for libIter in self.database.kinetics.libraries.keys(): if self.database.kinetics.libraries[libIter].solvent: - raise DatabaseError('''Kinetics library "{1}" was obtained in "{0}" solvent and cannot be used in gas phase simulation - '''.format(self.database.kinetics.libraries[libIter].solvent, self.database.kinetics.libraries[libIter].name)) - + raise DatabaseError("Kinetics library '{1}' was obtained in '{0}' solvent and cannot be used in " + "gas phase simulation" + .format(self.database.kinetics.libraries[libIter].solvent, + self.database.kinetics.libraries[libIter].name)) + def saveInput(self, path=None): """ Save an RMG job to the input file located at `path`, or from the `outputFile` attribute if not given as a parameter. """ - from input import saveInputFile - if path is None: path = self.outputFile + from rmgpy.rmg.input import saveInputFile + if path is None: + path = self.outputFile saveInputFile(path, self) - + def loadDatabase(self): - + self.database = RMGDatabase() self.database.load( - path = self.databaseDirectory, - thermoLibraries = self.thermoLibraries, - transportLibraries = self.transportLibraries, - reactionLibraries = [library for library, option in self.reactionLibraries], - seedMechanisms = self.seedMechanisms, - kineticsFamilies = self.kineticsFamilies, - kineticsDepositories = self.kineticsDepositories, - #frequenciesLibraries = self.statmechLibraries, - depository = False, # Don't bother loading the depository information, as we don't use it + path=self.databaseDirectory, + thermoLibraries=self.thermoLibraries, + transportLibraries=self.transportLibraries, + reactionLibraries=[library for library, option in self.reactionLibraries], + seedMechanisms=self.seedMechanisms, + kineticsFamilies=self.kineticsFamilies, + kineticsDepositories=self.kineticsDepositories, + # frequenciesLibraries = self.statmechLibraries, + depository=False, # Don't bother loading the depository information, as we don't use it ) # Turn off reversibility for families with three products if desired if not self.trimolecularProductReversible: - for family in self.database.kinetics.families.itervalues(): + for family in self.database.kinetics.families.values(): if len(family.forwardTemplate.products) > 2: family.reversible = False family.reverseTemplate = None @@ -363,53 +372,55 @@ def loadDatabase(self): family.reverse = None # Determine if trimolecular families are present - for family in self.database.kinetics.families.itervalues(): + for family in self.database.kinetics.families.values(): if len(family.forwardTemplate.reactants) > 2: logging.info('Trimolecular reactions are turned on') self.trimolecular = True break # Only check products if we want to react them if not self.trimolecular and self.trimolecularProductReversible: - for family in self.database.kinetics.families.itervalues(): + for family in self.database.kinetics.families.values(): if len(family.forwardTemplate.products) > 2: logging.info('Trimolecular reactions are turned on') self.trimolecular = True break - #check libraries + # check libraries self.checkLibraries() - + if self.bindingEnergies: self.database.thermo.setDeltaAtomicAdsorptionEnergies(self.bindingEnergies) - #set global variable solvent + # set global variable solvent if self.solvent: global solvent - solvent=self.solvent - + solvent = self.solvent + if self.kineticsEstimator == 'rate rules': if '!training' not in self.kineticsDepositories: logging.info('Adding rate rules from training set in kinetics families...') # Temporarily remove species constraints for the training reactions - copySpeciesConstraints=copy.copy(self.speciesConstraints) - self.speciesConstraints={} + copy_species_constraints = copy.copy(self.speciesConstraints) + self.speciesConstraints = {} for family in self.database.kinetics.families.values(): if not family.autoGenerated: family.addKineticsRulesFromTrainingSet(thermoDatabase=self.database.thermo) - #If requested by the user, write a text file for each kinetics family detailing the source of each entry + # If requested by the user, write a text file for each kinetics family detailing the source of each entry if self.kineticsdatastore: - logging.info('Writing sources of kinetic entries in family {0} to text file'.format(family.label)) + logging.info( + 'Writing sources of kinetic entries in family {0} to text file'.format(family.label)) path = os.path.join(self.outputDirectory, 'kinetics_database', family.label + '.txt') with open(path, 'w') as f: - for template_label, entries in family.rules.entries.iteritems(): - f.write("Template [{0}] uses the {1} following source(s):\n".format(template_label,str(len(entries)))) + for template_label, entries in family.rules.entries.items(): + f.write("Template [{0}] uses the {1} following source(s):\n".format(template_label, + str(len(entries)))) for entry_index, entry in enumerate(entries): f.write(str(entry_index+1) + ". " + entry.shortDesc + "\n" + entry.longDesc + "\n") f.write('\n') f.write('\n') - self.speciesConstraints=copySpeciesConstraints + self.speciesConstraints = copy_species_constraints else: logging.info('Training set explicitly not added to rate rules in kinetics families...') logging.info('Filling in rate rules in kinetics families by averaging...') @@ -422,16 +433,16 @@ def initialize(self, **kwargs): Initialize an RMG job using the command-line arguments `args` as returned by the :mod:`argparse` package. """ - + # Save initialization time self.initializationTime = time.time() - + # Log start timestamp logging.info('RMG execution initiated at ' + time.asctime() + '\n') - + # Print out RMG header self.logHeader() - + # Read input file self.loadInput(self.inputFile) @@ -441,11 +452,11 @@ def initialize(self, **kwargs): # Check input file self.checkInput() - - #Properly set filterReactions to initialize flags properly + + # Properly set filterReactions to initialize flags properly if len(self.modelSettingsList) > 0: self.filterReactions = self.modelSettingsList[0].filterReactions - + # Make output subdirectories util.makeOutputSubdirectory(self.outputDirectory, 'pdep') util.makeOutputSubdirectory(self.outputDirectory, 'solver') @@ -474,19 +485,19 @@ def initialize(self, **kwargs): # Load restart seed mechanism (if specified) if self.restart: # Copy the restart files to a separate folder so that the job does not overwrite it - restartDir = os.path.join(self.outputDirectory, 'previous_restart') - coreRestart = os.path.join(restartDir, 'restart') - edgeRestart = os.path.join(restartDir, 'restart_edge') - filtersRestart = os.path.join(restartDir, 'filters') + restart_dir = os.path.join(self.outputDirectory, 'previous_restart') + core_restart = os.path.join(restart_dir, 'restart') + edge_restart = os.path.join(restart_dir, 'restart_edge') + filters_restart = os.path.join(restart_dir, 'filters') util.makeOutputSubdirectory(self.outputDirectory, 'previous_restart') - shutil.copytree(self.coreSeedPath, coreRestart) - shutil.copytree(self.edgeSeedPath, edgeRestart) - os.mkdir(filtersRestart) - shutil.copyfile(self.filtersPath, os.path.join(filtersRestart, 'filters.h5')) - shutil.copyfile(self.speciesMapPath, os.path.join(filtersRestart, 'species_map.yml')) + shutil.copytree(self.coreSeedPath, core_restart) + shutil.copytree(self.edgeSeedPath, edge_restart) + os.mkdir(filters_restart) + shutil.copyfile(self.filtersPath, os.path.join(filters_restart, 'filters.h5')) + shutil.copyfile(self.speciesMapPath, os.path.join(filters_restart, 'species_map.yml')) # Load the seed mechanism to get the core and edge species - self.database.kinetics.loadLibraries(restartDir, libraries=['restart', 'restart_edge']) + self.database.kinetics.loadLibraries(restart_dir, libraries=['restart', 'restart_edge']) self.seedMechanisms.append('restart') self.reactionLibraries.append(('restart_edge', False)) @@ -494,16 +505,16 @@ def initialize(self, **kwargs): if self.trimolecular: for reactionSystem in self.reactionSystems: reactionSystem.trimolecular = True - + # Do all liquid-phase startup things: if self.solvent: - solventData = self.database.solvation.getSolventData(self.solvent) - diffusionLimiter.enable(solventData, self.database.solvation) + solvent_data = self.database.solvation.getSolventData(self.solvent) + diffusionLimiter.enable(solvent_data, self.database.solvation) logging.info("Setting solvent data for {0}".format(self.solvent)) # Set solvent viscosity for reaction filtering for reactionSystem in self.reactionSystems: - reactionSystem.viscosity = solventData.getSolventViscosity(reactionSystem.T.value_si) + reactionSystem.viscosity = solvent_data.getSolventViscosity(reactionSystem.T.value_si) try: self.wallTime = kwargs['walltime'] @@ -516,7 +527,7 @@ def initialize(self, **kwargs): self.wallTime = int(data[-1]) + 60 * int(data[-2]) + 3600 * int(data[-3]) + 86400 * int(data[-4]) # Initialize reaction model - + # Seed mechanisms: add species and reactions from seed mechanism # DON'T generate any more reactions for the seed species at this time for seedMechanism in self.seedMechanisms: @@ -528,24 +539,29 @@ def initialize(self, **kwargs): self.reactionModel.addReactionLibraryToEdge(library) # Also always add in a few bath gases (since RMG-Java does) - for label, smiles in [('Ar','[Ar]'), ('He','[He]'), ('Ne','[Ne]'), ('N2','N#N')]: + for label, smiles in [('Ar', '[Ar]'), ('He', '[He]'), ('Ne', '[Ne]'), ('N2', 'N#N')]: molecule = Molecule().fromSMILES(smiles) - spec, isNew = self.reactionModel.makeNewSpecies(molecule, label=label, reactive=False) - if isNew: + spec, is_new = self.reactionModel.makeNewSpecies(molecule, label=label, reactive=False) + if is_new: self.initialSpecies.append(spec) # Perform species constraints and forbidden species checks on input species for spec in self.initialSpecies: if self.database.forbiddenStructures.isMoleculeForbidden(spec.molecule[0]): if 'allowed' in self.speciesConstraints and 'input species' in self.speciesConstraints['allowed']: - logging.warning('Input species {0} is globally forbidden. It will behave as an inert unless found in a seed mechanism or reaction library.'.format(spec.label)) + logging.warning('Input species {0} is globally forbidden. It will behave as an inert unless found ' + 'in a seed mechanism or reaction library.'.format(spec.label)) else: - raise ForbiddenStructureException("Input species {0} is globally forbidden. You may explicitly allow it, but it will remain inert unless found in a seed mechanism or reaction library.".format(spec.label)) + raise ForbiddenStructureException("Input species {0} is globally forbidden. You may explicitly " + "allow it, but it will remain inert unless found in a seed " + "mechanism or reaction library.".format(spec.label)) if failsSpeciesConstraints(spec): if 'allowed' in self.speciesConstraints and 'input species' in self.speciesConstraints['allowed']: self.speciesConstraints['explicitlyAllowedMolecules'].append(spec.molecule[0]) else: - raise ForbiddenStructureException("Species constraints forbids input species {0}. Please reformulate constraints, remove the species, or explicitly allow it.".format(spec.label)) + raise ForbiddenStructureException("Species constraints forbids input species {0}. Please " + "reformulate constraints, remove the species, or explicitly " + "allow it.".format(spec.label)) # For liquidReactor, checks whether the solvent is listed as one of the initial species. if self.solvent: @@ -553,28 +569,30 @@ def initialize(self, **kwargs): for spc in solvent_structure_list: self.database.solvation.checkSolventinInitialSpecies(self, spc) - #Check to see if user has input Singlet O2 into their input file or libraries - #This constraint is special in that we only want to check it once in the input instead of every time a species is made + # Check to see if user has input Singlet O2 into their input file or libraries + # This constraint is special in that we only want to check it once in the input instead of every time a species is made if 'allowSingletO2' in self.speciesConstraints and self.speciesConstraints['allowSingletO2']: pass else: - #Here we get a list of all species that from the user input - allInputtedSpecies=[spec for spec in self.initialSpecies] - #Because no iterations have taken place, the only things in the core are from seed mechanisms - allInputtedSpecies.extend(self.reactionModel.core.species) - #Because no iterations have taken place, the only things in the edge are from reaction libraries - allInputtedSpecies.extend(self.reactionModel.edge.species) - - O2Singlet=Molecule().fromSMILES('O=O') - for spec in allInputtedSpecies: + # Here we get a list of all species that from the user input + all_inputted_species = [spec for spec in self.initialSpecies] + # Because no iterations have taken place, the only things in the core are from seed mechanisms + all_inputted_species.extend(self.reactionModel.core.species) + # Because no iterations have taken place, the only things in the edge are from reaction libraries + all_inputted_species.extend(self.reactionModel.edge.species) + + O2Singlet = Molecule().fromSMILES('O=O') + for spec in all_inputted_species: if spec.isIsomorphic(O2Singlet): - raise ForbiddenStructureException("""Species constraints forbids input species {0} - RMG expects the triplet form of oxygen for correct usage in reaction families. Please change your input to SMILES='[O][O]' - If you actually want to use the singlet state, set the allowSingletO2=True inside of the Species Constraints block in your input file. - """.format(spec.label)) + raise ForbiddenStructureException("Species constraints forbids input species {0} RMG expects the " + "triplet form of oxygen for correct usage in reaction families. " + "Please change your input to SMILES='[O][O]' If you actually " + "want to use the singlet state, set the allowSingletO2=True " + "inside of the Species Constraints block in your input file." + .format(spec.label)) for spec in self.initialSpecies: - submit(spec,self.solvent) + submit(spec, self.solvent) # Add nonreactive species (e.g. bath gases) to core first # This is necessary so that the PDep algorithm can identify the bath gas @@ -585,16 +603,16 @@ def initialize(self, **kwargs): if spec.reactive: self.reactionModel.enlarge(spec) - #chatelak: store constant SPC indices in the reactor attributes if any constant SPC provided in the input file - #advantages to write it here: this is run only once (as species indexes does not change over the generation) + # chatelak: store constant SPC indices in the reactor attributes if any constant SPC provided in the input file + # advantages to write it here: this is run only once (as species indexes does not change over the generation) if self.solvent is not None: for index, reactionSystem in enumerate(self.reactionSystems): - if reactionSystem.constSPCNames is not None: #if no constant species provided do nothing - reactionSystem.get_constSPCIndices(self.reactionModel.core.species) ##call the function to identify indices in the solver + if reactionSystem.constSPCNames is not None: # if no constant species provided do nothing + reactionSystem.get_constSPCIndices( + self.reactionModel.core.species) # call the function to identify indices in the solver self.initializeReactionThresholdAndReactFlags() self.reactionModel.initializeIndexSpeciesDict() - def register_listeners(self): """ @@ -609,7 +627,7 @@ def register_listeners(self): self.attach(OutputHTMLWriter(self.outputDirectory)) if self.quantumMechanics: - self.attach(QMDatabaseWriter()) + self.attach(QMDatabaseWriter()) self.attach(ExecutionStatsWriter(self.outputDirectory)) @@ -617,17 +635,16 @@ def register_listeners(self): for index, reactionSystem in enumerate(self.reactionSystems): reactionSystem.attach(SimulationProfileWriter( - self.outputDirectory, index, self.reactionModel.core.species)) + self.outputDirectory, index, self.reactionModel.core.species)) reactionSystem.attach(SimulationProfilePlotter( - self.outputDirectory, index, self.reactionModel.core.species)) - + self.outputDirectory, index, self.reactionModel.core.species)) def execute(self, **kwargs): """ Execute an RMG job using the command-line arguments `args` as returned by the :mod:`argparse` package. """ - + self.initialize(**kwargs) # register listeners @@ -644,7 +661,7 @@ def execute(self, **kwargs): except AttributeError: # For LiquidReactor, Pmin and Pmax remain with the default value of `None` pass - + self.rmg_memories = [] logging.info('Initialization complete. Starting model generation.\n') @@ -668,7 +685,7 @@ def execute(self, **kwargs): atol=self.simulatorSettingsList[0].atol, rtol=self.simulatorSettingsList[0].rtol, filterReactions=True, - conditions = self.rmg_memories[index].get_cond(), + conditions=self.rmg_memories[index].get_cond(), ) self.updateReactionThresholdAndReactFlags( @@ -687,9 +704,9 @@ def execute(self, **kwargs): # React core species to enlarge edge self.reactionModel.enlarge(reactEdge=True, - unimolecularReact=self.unimolecularReact, - bimolecularReact=self.bimolecularReact, - trimolecularReact=self.trimolecularReact) + unimolecularReact=self.unimolecularReact, + bimolecularReact=self.bimolecularReact, + trimolecularReact=self.trimolecularReact) if not np.isinf(self.modelSettingsList[0].toleranceThermoKeepSpeciesInEdge): self.reactionModel.setThermodynamicFilteringParameters( @@ -702,75 +719,76 @@ def execute(self, **kwargs): if not np.isinf(self.modelSettingsList[0].toleranceThermoKeepSpeciesInEdge): self.reactionModel.thermoFilterDown(maximumEdgeSpecies=self.modelSettingsList[0].maximumEdgeSpecies) - + logging.info('Completed initial enlarge edge step.\n') - + self.saveEverything() - + if self.generateSeedEachIteration: self.makeSeedMech(firstTime=True) - maxNumSpcsHit = False #default - - for q,modelSettings in enumerate(self.modelSettingsList): - if len(self.simulatorSettingsList) > 1: - simulatorSettings = self.simulatorSettingsList[q] - else: #if they only provide one input for simulator use that everytime - simulatorSettings = self.simulatorSettingsList[0] + max_num_spcs_hit = False # default + + for q, modelSettings in enumerate(self.modelSettingsList): + if len(self.simulatorSettingsList) > 1: + simulator_settings = self.simulatorSettingsList[q] + else: # if they only provide one input for simulator use that everytime + simulator_settings = self.simulatorSettingsList[0] self.filterReactions = modelSettings.filterReactions - logging.info('Beginning model generation stage {0}...\n'.format(q+1)) - + logging.info('Beginning model generation stage {0}...\n'.format(q + 1)) + self.done = False # Main RMG loop while not self.done: if self.generateSeedEachIteration: self.makeSeedMech() - + self.reactionModel.iterationNum += 1 self.done = True - - allTerminated = True - numCoreSpecies = len(self.reactionModel.core.species) - - prunableSpecies = self.reactionModel.edge.species[:] - prunableNetworks = self.reactionModel.networkList[:] - + + all_terminated = True + num_core_species = len(self.reactionModel.core.species) + + prunable_species = self.reactionModel.edge.species[:] + prunable_networks = self.reactionModel.networkList[:] + for index, reactionSystem in enumerate(self.reactionSystems): - - reactionSystem.prunableSpecies = prunableSpecies #these lines reset pruning for a new cycle - reactionSystem.prunableNetworks = prunableNetworks - reactionSystem.reset_max_edge_species_rate_ratios() - - for p in xrange(reactionSystem.nSims): - reactorDone = True - objectsToEnlarge = [] + + reactionSystem.prunableSpecies = prunable_species # these lines reset pruning for a new cycle + reactionSystem.prunableNetworks = prunable_networks + reactionSystem.reset_max_edge_species_rate_ratios() + + for p in range(reactionSystem.nSims): + reactor_done = True + objects_to_enlarge = [] self.reactionSystem = reactionSystem # Conduct simulation - logging.info('Conducting simulation of reaction system %s...' % (index+1)) + logging.info('Conducting simulation of reaction system %s...' % (index + 1)) prune = True - + self.reactionModel.adjustSurface() - - if numCoreSpecies < modelSettings.minCoreSizeForPrune: + + if num_core_species < modelSettings.minCoreSizeForPrune: # Turn pruning off if we haven't reached minimum core size. prune = False - - try: terminated,resurrected,obj,newSurfaceSpecies,newSurfaceReactions,t,x = reactionSystem.simulate( - coreSpecies = self.reactionModel.core.species, - coreReactions = self.reactionModel.core.reactions, - edgeSpecies = self.reactionModel.edge.species, - edgeReactions = self.reactionModel.edge.reactions, - surfaceSpecies = self.reactionModel.surface.species, - surfaceReactions = self.reactionModel.surface.reactions, - pdepNetworks = self.reactionModel.networkList, - prune = prune, - modelSettings=modelSettings, - simulatorSettings = simulatorSettings, - conditions = self.rmg_memories[index].get_cond() - ) + + try: + terminated, resurrected, obj, new_surface_species, new_surface_reactions, t, x = reactionSystem.simulate( + coreSpecies=self.reactionModel.core.species, + coreReactions=self.reactionModel.core.reactions, + edgeSpecies=self.reactionModel.edge.species, + edgeReactions=self.reactionModel.edge.reactions, + surfaceSpecies=self.reactionModel.surface.species, + surfaceReactions=self.reactionModel.surface.reactions, + pdepNetworks=self.reactionModel.networkList, + prune=prune, + modelSettings=modelSettings, + simulatorSettings=simulator_settings, + conditions=self.rmg_memories[index].get_cond() + ) except: logging.error("Model core reactions:") if len(self.reactionModel.core.reactions) > 5: @@ -781,121 +799,130 @@ def execute(self, **kwargs): if not self.generateSeedEachIteration: # Then we haven't saved the seed mechanism yet self.makeSeedMech(firstTime=True) # Just in case the user wants to restart from this raise - - self.rmg_memories[index].add_t_conv_N(t,x,len(obj)) + + self.rmg_memories[index].add_t_conv_N(t, x, len(obj)) self.rmg_memories[index].generate_cond() - log_conditions(self.rmg_memories,index) + log_conditions(self.rmg_memories, index) + + reactor_done = self.reactionModel.addNewSurfaceObjects(obj, new_surface_species, + new_surface_reactions, reactionSystem) - reactorDone = self.reactionModel.addNewSurfaceObjects(obj,newSurfaceSpecies,newSurfaceReactions,reactionSystem) - - allTerminated = allTerminated and terminated + all_terminated = all_terminated and terminated logging.info('') - + # If simulation is invalid, note which species should be added to # the core if obj != [] and not (obj is None): - objectsToEnlarge = self.processToSpeciesNetworks(obj) - - reactorDone = False + objects_to_enlarge = self.processToSpeciesNetworks(obj) + + reactor_done = False # Enlarge objects identified by the simulation for enlarging # These should be Species or Network objects logging.info('') - - objectsToEnlarge = list(set(objectsToEnlarge)) - + + objects_to_enlarge = list(set(objects_to_enlarge)) + # Add objects to enlarge to the core first - for objectToEnlarge in objectsToEnlarge: + for objectToEnlarge in objects_to_enlarge: self.reactionModel.enlarge(objectToEnlarge) - + if modelSettings.filterReactions: # Run a raw simulation to get updated reaction system threshold values # Run with the same conditions as with pruning off - tempModelSettings = deepcopy(modelSettings) - tempModelSettings.fluxToleranceKeepInEdge = 0 + temp_model_settings = deepcopy(modelSettings) + temp_model_settings.fluxToleranceKeepInEdge = 0 if not resurrected: try: reactionSystem.simulate( - coreSpecies = self.reactionModel.core.species, - coreReactions = self.reactionModel.core.reactions, - edgeSpecies = [], - edgeReactions = [], - surfaceSpecies = self.reactionModel.surface.species, - surfaceReactions = self.reactionModel.surface.reactions, - pdepNetworks = self.reactionModel.networkList, - modelSettings = tempModelSettings, - simulatorSettings = simulatorSettings, - conditions = self.rmg_memories[index].get_cond() + coreSpecies=self.reactionModel.core.species, + coreReactions=self.reactionModel.core.reactions, + edgeSpecies=[], + edgeReactions=[], + surfaceSpecies=self.reactionModel.surface.species, + surfaceReactions=self.reactionModel.surface.reactions, + pdepNetworks=self.reactionModel.networkList, + modelSettings=temp_model_settings, + simulatorSettings=simulator_settings, + conditions=self.rmg_memories[index].get_cond() ) except: self.updateReactionThresholdAndReactFlags( - rxnSysUnimolecularThreshold = reactionSystem.unimolecularThreshold, - rxnSysBimolecularThreshold = reactionSystem.bimolecularThreshold, - rxnSysTrimolecularThreshold = reactionSystem.trimolecularThreshold, + rxnSysUnimolecularThreshold=reactionSystem.unimolecularThreshold, + rxnSysBimolecularThreshold=reactionSystem.bimolecularThreshold, + rxnSysTrimolecularThreshold=reactionSystem.trimolecularThreshold, skipUpdate=True) - logging.warn('Reaction thresholds/flags for Reaction System {0} was not updated due to simulation failure'.format(index+1)) + logging.warning('Reaction thresholds/flags for Reaction System {0} was not updated ' + 'due to simulation failure'.format(index + 1)) else: self.updateReactionThresholdAndReactFlags( - rxnSysUnimolecularThreshold = reactionSystem.unimolecularThreshold, - rxnSysBimolecularThreshold = reactionSystem.bimolecularThreshold, - rxnSysTrimolecularThreshold = reactionSystem.trimolecularThreshold + rxnSysUnimolecularThreshold=reactionSystem.unimolecularThreshold, + rxnSysBimolecularThreshold=reactionSystem.bimolecularThreshold, + rxnSysTrimolecularThreshold=reactionSystem.trimolecularThreshold ) else: self.updateReactionThresholdAndReactFlags( - rxnSysUnimolecularThreshold = reactionSystem.unimolecularThreshold, - rxnSysBimolecularThreshold = reactionSystem.bimolecularThreshold, - rxnSysTrimolecularThreshold = reactionSystem.trimolecularThreshold, - skipUpdate = True + rxnSysUnimolecularThreshold=reactionSystem.unimolecularThreshold, + rxnSysBimolecularThreshold=reactionSystem.bimolecularThreshold, + rxnSysTrimolecularThreshold=reactionSystem.trimolecularThreshold, + skipUpdate=True ) - logging.warn('Reaction thresholds/flags for Reaction System {0} was not updated due to resurrection'.format(index+1)) + logging.warning('Reaction thresholds/flags for Reaction System {0} was not updated due ' + 'to resurrection'.format(index + 1)) logging.info('') else: self.updateReactionThresholdAndReactFlags() if not np.isinf(modelSettings.toleranceThermoKeepSpeciesInEdge): - self.reactionModel.setThermodynamicFilteringParameters(self.Tmax, toleranceThermoKeepSpeciesInEdge=modelSettings.toleranceThermoKeepSpeciesInEdge, - minCoreSizeForPrune=modelSettings.minCoreSizeForPrune, - maximumEdgeSpecies=modelSettings.maximumEdgeSpecies, - reactionSystems=self.reactionSystems) - - oldEdgeSize = len(self.reactionModel.edge.reactions) - oldCoreSize = len(self.reactionModel.core.reactions) - self.reactionModel.enlarge(reactEdge=True, - unimolecularReact=self.unimolecularReact, - bimolecularReact=self.bimolecularReact, - trimolecularReact=self.trimolecularReact) - - if oldEdgeSize != len(self.reactionModel.edge.reactions) or oldCoreSize != len(self.reactionModel.core.reactions): - reactorDone = False - + self.reactionModel.setThermodynamicFilteringParameters(self.Tmax, + toleranceThermoKeepSpeciesInEdge=modelSettings.toleranceThermoKeepSpeciesInEdge, + minCoreSizeForPrune=modelSettings.minCoreSizeForPrune, + maximumEdgeSpecies=modelSettings.maximumEdgeSpecies, + reactionSystems=self.reactionSystems + ) + + old_edge_size = len(self.reactionModel.edge.reactions) + old_core_size = len(self.reactionModel.core.reactions) + self.reactionModel.enlarge(reactEdge=True, + unimolecularReact=self.unimolecularReact, + bimolecularReact=self.bimolecularReact, + trimolecularReact=self.trimolecularReact) + + if old_edge_size != len(self.reactionModel.edge.reactions) or old_core_size != len( + self.reactionModel.core.reactions): + reactor_done = False + if not np.isinf(self.modelSettingsList[0].toleranceThermoKeepSpeciesInEdge): self.reactionModel.thermoFilterDown(maximumEdgeSpecies=modelSettings.maximumEdgeSpecies) - - maxNumSpcsHit = len(self.reactionModel.core.species) >= modelSettings.maxNumSpecies + + max_num_spcs_hit = len(self.reactionModel.core.species) >= modelSettings.maxNumSpecies self.saveEverything() - if maxNumSpcsHit: # breaks the nSims loop + if max_num_spcs_hit: # breaks the nSims loop # self.done is still True, which will break the while loop break - if not reactorDone: + if not reactor_done: self.done = False - - if maxNumSpcsHit: # breaks the reactionSystems loop + + if max_num_spcs_hit: # breaks the reactionSystems loop break - if not self.done: # There is something that needs exploring/enlarging + if not self.done: # There is something that needs exploring/enlarging # If we reached our termination conditions, then try to prune # species from the edge - if allTerminated and modelSettings.fluxToleranceKeepInEdge>0.0: + if all_terminated and modelSettings.fluxToleranceKeepInEdge > 0.0: logging.info('Attempting to prune...') - self.reactionModel.prune(self.reactionSystems, modelSettings.fluxToleranceKeepInEdge, modelSettings.fluxToleranceMoveToCore, modelSettings.maximumEdgeSpecies, modelSettings.minSpeciesExistIterationsForPrune) + self.reactionModel.prune(self.reactionSystems, modelSettings.fluxToleranceKeepInEdge, + modelSettings.fluxToleranceMoveToCore, + modelSettings.maximumEdgeSpecies, + modelSettings.minSpeciesExistIterationsForPrune) # Perform garbage collection after pruning collected = gc.collect() - logging.info('Garbage collector: collected %d objects.' % (collected)) - + logging.info('Garbage collector: collected %d objects.' % collected) + # Consider stopping gracefully if the next iteration might take us # past the wall time if self.wallTime > 0 and len(self.execTime) > 1: @@ -904,19 +931,21 @@ def execute(self, **kwargs): if t + 3 * dt > self.wallTime: logging.info('MODEL GENERATION TERMINATED') logging.info('') - logging.info('There is not enough time to complete the next iteration before the wall time is reached.') + logging.info( + 'There is not enough time to complete the next iteration before the wall time is reached.') logging.info('The output model may be incomplete.') logging.info('') - coreSpec, coreReac, edgeSpec, edgeReac = self.reactionModel.getModelSize() - logging.info('The current model core has %s species and %s reactions' % (coreSpec, coreReac)) - logging.info('The current model edge has %s species and %s reactions' % (edgeSpec, edgeReac)) + core_spec, core_reac, edge_spec, edge_reac = self.reactionModel.getModelSize() + logging.info('The current model core has %s species and %s reactions' % (core_spec, core_reac)) + logging.info('The current model edge has %s species and %s reactions' % (edge_spec, edge_reac)) return - - if maxNumSpcsHit: #resets maxNumSpcsHit and continues the settings for loop - logging.info('The maximum number of species ({0}) has been hit, Exiting stage {1} ...'.format(modelSettings.maxNumSpecies,q+1)) - maxNumSpcsHit = False + + if max_num_spcs_hit: # resets maxNumSpcsHit and continues the settings for loop + logging.info('The maximum number of species ({0}) has been hit, Exiting stage {1} ...'.format( + modelSettings.maxNumSpecies, q + 1)) + max_num_spcs_hit = False continue - + # Save the final seed mechanism if self.generateSeedEachIteration: self.makeSeedMech() @@ -929,26 +958,29 @@ def execute(self, **kwargs): try: if any([s.containsSurfaceSite() for s in self.reactionModel.core.species]): self.generateCanteraFiles(os.path.join(self.outputDirectory, 'chemkin', 'chem-gas.inp'), - surfaceFile=(os.path.join(self.outputDirectory, 'chemkin', 'chem-surface.inp'))) + surfaceFile=( + os.path.join(self.outputDirectory, 'chemkin', 'chem-surface.inp'))) self.generateCanteraFiles(os.path.join(self.outputDirectory, 'chemkin', 'chem_annotated-gas.inp'), - surfaceFile=(os.path.join(self.outputDirectory, 'chemkin', 'chem_annotated-surface.inp'))) + surfaceFile=(os.path.join(self.outputDirectory, 'chemkin', + 'chem_annotated-surface.inp'))) else: # gas phase only self.generateCanteraFiles(os.path.join(self.outputDirectory, 'chemkin', 'chem.inp')) self.generateCanteraFiles(os.path.join(self.outputDirectory, 'chemkin', 'chem_annotated.inp')) except EnvironmentError: - logging.exception('Could not generate Cantera files due to EnvironmentError. Check read\write privileges in output directory.') + logging.exception('Could not generate Cantera files due to EnvironmentError. Check read\write privileges ' + 'in output directory.') except Exception: logging.exception('Could not generate Cantera files for some reason.') - + self.check_model() # Write output file logging.info('') logging.info('MODEL GENERATION COMPLETED') logging.info('') - coreSpec, coreReac, edgeSpec, edgeReac = self.reactionModel.getModelSize() - logging.info('The final model core has %s species and %s reactions' % (coreSpec, coreReac)) - logging.info('The final model edge has %s species and %s reactions' % (edgeSpec, edgeReac)) - + core_spec, core_reac, edge_spec, edge_reac = self.reactionModel.getModelSize() + logging.info('The final model core has %s species and %s reactions' % (core_spec, core_reac)) + logging.info('The final model edge has %s species and %s reactions' % (edge_spec, edge_reac)) + self.finish() def run_model_analysis(self, number=10): @@ -964,13 +996,13 @@ def run_model_analysis(self, number=10): if reactionSystem.sensitiveSpecies == ['all']: reactionSystem.sensitiveSpecies = self.reactionModel.core.species - sensWorksheet = [] + sens_worksheet = [] for spec in reactionSystem.sensitiveSpecies: - csvfilePath = os.path.join(self.outputDirectory, 'solver', + csvfile_path = os.path.join(self.outputDirectory, 'solver', 'sensitivity_{0}_SPC_{1}.csv'.format(index + 1, spec.index)) - sensWorksheet.append(csvfilePath) + sens_worksheet.append(csvfile_path) - terminated, resurrected, obj, surfaceSpecies, surfaceReactions, t, x = reactionSystem.simulate( + terminated, resurrected, obj, surface_species, surface_reactions, t, x = reactionSystem.simulate( coreSpecies=self.reactionModel.core.species, coreReactions=self.reactionModel.core.reactions, edgeSpecies=self.reactionModel.edge.species, @@ -979,7 +1011,7 @@ def run_model_analysis(self, number=10): surfaceReactions=[], pdepNetworks=self.reactionModel.networkList, sensitivity=True, - sensWorksheet=sensWorksheet, + sensWorksheet=sens_worksheet, modelSettings=ModelSettings(toleranceMoveToCore=1e8, toleranceInterruptSimulation=1e8), simulatorSettings=self.simulatorSettingsList[-1], conditions=reactionSystem.sensConditions, @@ -1023,7 +1055,7 @@ def run_uncertainty_analysis(self): self.kineticsFamilies, self.kineticsDepositories) # Temporarily remove species constraints for the training reactions self.speciesConstraints, speciesConstraintsCopy = {}, self.speciesConstraints - for family in self.database.kinetics.families.itervalues(): + for family in self.database.kinetics.families.values(): family.addKineticsRulesFromTrainingSet(thermoDatabase=self.database.thermo) family.fillKineticsRulesByAveragingUp(verbose=True) self.speciesConstraints = speciesConstraintsCopy @@ -1050,10 +1082,10 @@ def run_uncertainty_analysis(self): # Get simulation conditions for criteria in reactionSystem.termination: if isinstance(criteria, TerminationTime): - time = ([criteria.time.value], criteria.time.units) + time_criteria = ([criteria.time.value], criteria.time.units) break else: - time = self.uncertainty['time'] + time_criteria = self.uncertainty['time'] Tlist = ([reactionSystem.sensConditions['T']], 'K') Plist = ([reactionSystem.sensConditions['P']], 'Pa') molFracList = [reactionSystem.sensConditions.copy()] @@ -1066,57 +1098,59 @@ def run_uncertainty_analysis(self): job.loadModel() job.generateConditions( reactorTypeList=['IdealGasConstPressureTemperatureReactor'], - reactionTimeList=time, + reactionTimeList=time_criteria, molFracList=molFracList, Tlist=Tlist, Plist=Plist, ) # Extract uncertain parameters from local analysis - kParams = [] - gParams = [] + k_params = [] + g_params = [] for spc in reactionSystem.sensitiveSpecies: _, reaction_c, thermo_c = local_result[spc] for label, _, _ in reaction_c[:self.uncertainty['globalnum']]: if correlated: - kParam = label + k_param = label else: # For uncorrelated, we need the reaction index k_index = label.split(':')[0] # Looks like 'k1234: A+B=C+D' - kParam = int(k_index[1:]) - if kParam not in kParams: - kParams.append(kParam) + k_param = int(k_index[1:]) + if k_param not in k_params: + k_params.append(k_param) for label, _, _ in thermo_c[:self.uncertainty['globalnum']]: if correlated: - gParam = label + g_param = label else: # For uncorrelated, we need the species index match = re.search(r'dG\[\S+\((\d+)\)\]', label) - gParam = int(match.group(1)) - if gParam not in gParams: - gParams.append(gParam) + g_param = int(match.group(1)) + if g_param not in g_params: + g_params.append(g_param) - reactorPCEFactory = ReactorPCEFactory( + reactor_pce_factory = ReactorPCEFactory( cantera=job, outputSpeciesList=reactionSystem.sensitiveSpecies, - kParams=kParams, + kParams=k_params, kUncertainty=uncertainty.kineticInputUncertainties, - gParams=gParams, + gParams=g_params, gUncertainty=uncertainty.thermoInputUncertainties, correlated=correlated, logx=self.uncertainty['logx'], ) logging.info('Generating PCEs...') - reactorPCEFactory.generatePCE(runTime=self.uncertainty['pcetime']) + reactor_pce_factory.generatePCE(runTime=self.uncertainty['pcetime']) # Try a test point to see how well the PCE performs - reactorPCEFactory.compareOutput([random.uniform(-1.0,1.0) for i in range(len(kParams)+len(gParams))]) + reactor_pce_factory.compareOutput( + [random.uniform(-1.0, 1.0) for i in range(len(k_params) + len(g_params))]) # Analyze results and save statistics - reactorPCEFactory.analyzeResults() + reactor_pce_factory.analyzeResults() else: - logging.info('Unable to run uncertainty analysis. Must specify sensitivity analysis options in reactor options.') + logging.info('Unable to run uncertainty analysis. Must specify sensitivity analysis options in ' + 'reactor options.') def check_model(self): """ @@ -1126,7 +1160,7 @@ def check_model(self): # Check that no two species in core or edge are isomorphic for i, spc in enumerate(self.reactionModel.core.species): - for j in xrange(i): + for j in range(i): spc2 = self.reactionModel.core.species[j] if spc.isIsomorphic(spc2): raise CoreError( @@ -1136,7 +1170,7 @@ def check_model(self): ) for i, spc in enumerate(self.reactionModel.edge.species): - for j in xrange(i): + for j in range(i): spc2 = self.reactionModel.edge.species[j] if spc.isIsomorphic(spc2): logging.warning( @@ -1154,7 +1188,7 @@ def check_model(self): # Don't check collision limits for surface reactions. continue violator_list = rxn.check_collision_limit_violation(t_min=self.Tmin, t_max=self.Tmax, - p_min=self.Pmin, p_max=self.Pmax) + p_min=self.Pmin, p_max=self.Pmax) if violator_list: violators.extend(violator_list) num_rxn_violators += 1 @@ -1178,7 +1212,7 @@ def check_model(self): for violator in violators: rxn_string = str(violator[0]) kinetics = violator[0].kinetics - comment='' + comment = '' if isinstance(violator[0], TemplateReaction): comment = violator[0].kinetics.comment violator[0].kinetics.comment = '' # the comment is printed better when outside of the object @@ -1190,15 +1224,15 @@ def check_model(self): ratio = violator[2] condition = violator[3] violators_f.write('{0}\n{1}\n{2}\nDirection: {3}\nViolation factor: {4:.2f}\n' - 'Violation condition: {5}\n\n'.format( - rxn_string, kinetics, comment, direction, ratio, condition)) + 'Violation condition: {5}\n\n'.format(rxn_string, kinetics, comment, direction, + ratio, condition)) if isinstance(violator[0], TemplateReaction): # although this is the end of the run, restore the original comment violator[0].kinetics.comment = comment else: logging.info("No collision rate violators found.") - def makeSeedMech(self,firstTime=False): + def makeSeedMech(self, firstTime=False): """ causes RMG to make a seed mechanism out of the current chem_annotated.inp and species_dictionary.txt this seed mechanism is outputted in a seed folder within the run directory and automatically @@ -1209,113 +1243,115 @@ def makeSeedMech(self,firstTime=False): This also writes the filter tensors to the `filters` sub-folder for restarting an RMG job from a seed mechanism """ - + logging.info('Making seed mechanism...') - + name = self.name - - if self.saveSeedToDatabase and firstTime: #make sure don't overwrite current libraries - thermoNames = self.database.thermo.libraries.keys() - kineticsNames = self.database.kinetics.libraries.keys() - - if name in thermoNames or name in kineticsNames: + + if self.saveSeedToDatabase and firstTime: # make sure don't overwrite current libraries + thermo_names = list(self.database.thermo.libraries.keys()) + kinetics_names = list(self.database.kinetics.libraries.keys()) + + if name in thermo_names or name in kinetics_names: q = 1 - while name+str(q) in thermoNames or name+str(q) in kineticsNames: + while name + str(q) in thermo_names or name + str(q) in kinetics_names: q += 1 self.name = name + str(q) - - seedDir = os.path.join(self.outputDirectory,'seed') - filterDir = os.path.join(seedDir, 'filters') - tempSeedDir = os.path.join(self.outputDirectory, 'seed_tmp') - + + seed_dir = os.path.join(self.outputDirectory, 'seed') + filter_dir = os.path.join(seed_dir, 'filters') + temp_seed_dir = os.path.join(self.outputDirectory, 'seed_tmp') + if firstTime: - if os.path.exists(seedDir): # This is a seed from a previous RMG run. Delete it - shutil.rmtree(seedDir) + if os.path.exists(seed_dir): # This is a seed from a previous RMG run. Delete it + shutil.rmtree(seed_dir) else: # This is a seed from the previous iteration. Move it to a temporary directory in case we run into errors - os.rename(seedDir, os.path.join(tempSeedDir)) + os.rename(seed_dir, os.path.join(temp_seed_dir)) # Now that we have either deleted or moved the seed mechanism folder, create a new one - os.mkdir(seedDir) - + os.mkdir(seed_dir) + try: - speciesList = self.reactionModel.core.species - reactionList = self.reactionModel.core.reactions - edgeSpeciesList = self.reactionModel.edge.species - edgeReactionList = self.reactionModel.edge.reactions + species_list = self.reactionModel.core.species + reaction_list = self.reactionModel.core.reactions + edge_species_list = self.reactionModel.edge.species + edge_reaction_list = self.reactionModel.edge.reactions # Make species labels independent - oldLabels = self.makeSpeciesLabelsIndependent(speciesList) - edgeOldLabels = self.makeSpeciesLabelsIndependent(edgeSpeciesList) - + old_labels = self.makeSpeciesLabelsIndependent(species_list) + edge_old_labels = self.makeSpeciesLabelsIndependent(edge_species_list) # load kinetics library entries - kineticsLibrary = KineticsLibrary(name=name,autoGenerated=True) - kineticsLibrary.entries = {} - for i in range(len(reactionList)): - reaction = reactionList[i] + kinetics_library = KineticsLibrary(name=name, autoGenerated=True) + kinetics_library.entries = {} + for i in range(len(reaction_list)): + reaction = reaction_list[i] entry = Entry( - index = i+1, - label = reaction.toLabeledStr(), - item = reaction, - data = reaction.kinetics, - ) + index=i + 1, + label=reaction.toLabeledStr(), + item=reaction, + data=reaction.kinetics, + ) if 'rate rule' in reaction.kinetics.comment: entry.longDesc = reaction.kinetics.comment - elif hasattr(reaction,'library') and reaction.library: + elif hasattr(reaction, 'library') and reaction.library: entry.longDesc = 'Originally from reaction library: ' + reaction.library + "\n" + reaction.kinetics.comment else: entry.longDesc = reaction.kinetics.comment - kineticsLibrary.entries[i+1] = entry + kinetics_library.entries[i + 1] = entry # load kinetics library entries - edgeKineticsLibrary = KineticsLibrary(name=name+'_edge',autoGenerated=True) - edgeKineticsLibrary.entries = {} - for i,reaction in enumerate(edgeReactionList): + edge_kinetics_library = KineticsLibrary(name=name + '_edge', autoGenerated=True) + edge_kinetics_library.entries = {} + for i, reaction in enumerate(edge_reaction_list): entry = Entry( - index = i+1, - label = reaction.toLabeledStr(), - item = reaction, - data = reaction.kinetics, - ) + index=i + 1, + label=reaction.toLabeledStr(), + item=reaction, + data=reaction.kinetics, + ) try: entry.longDesc = 'Originally from reaction library: ' + reaction.library + "\n" + reaction.kinetics.comment except AttributeError: entry.longDesc = reaction.kinetics.comment - edgeKineticsLibrary.entries[i+1] = entry + edge_kinetics_library.entries[i + 1] = entry - #save in database + # save in database if self.saveSeedToDatabase: - databaseDirectory = settings['database.directory'] + database_directory = settings['database.directory'] try: - os.makedirs(os.path.join(databaseDirectory, 'kinetics', 'libraries',name)) + os.makedirs(os.path.join(database_directory, 'kinetics', 'libraries', name)) except: pass - kineticsLibrary.save(os.path.join(databaseDirectory, 'kinetics', 'libraries', name, 'reactions.py')) - kineticsLibrary.saveDictionary(os.path.join(databaseDirectory, 'kinetics', 'libraries', name, 'dictionary.txt')) + kinetics_library.save(os.path.join(database_directory, 'kinetics', 'libraries', name, 'reactions.py')) + kinetics_library.saveDictionary( + os.path.join(database_directory, 'kinetics', 'libraries', name, 'dictionary.txt')) try: - os.makedirs(os.path.join(databaseDirectory, 'kinetics', 'libraries',name+'_edge')) + os.makedirs(os.path.join(database_directory, 'kinetics', 'libraries', name + '_edge')) except: pass - edgeKineticsLibrary.save(os.path.join(databaseDirectory, 'kinetics', 'libraries', name+'_edge', 'reactions.py')) - edgeKineticsLibrary.saveDictionary(os.path.join(databaseDirectory, 'kinetics', 'libraries', name+'_edge', 'dictionary.txt')) + edge_kinetics_library.save( + os.path.join(database_directory, 'kinetics', 'libraries', name + '_edge', 'reactions.py')) + edge_kinetics_library.saveDictionary( + os.path.join(database_directory, 'kinetics', 'libraries', name + '_edge', 'dictionary.txt')) - #save in output directory + # save in output directory # Rename for the output directory, as these names should not be dynamic - kineticsLibrary.name = 'seed' - kineticsLibrary.save(os.path.join(seedDir, 'seed', 'reactions.py')) - kineticsLibrary.saveDictionary(os.path.join(seedDir, 'seed', 'dictionary.txt')) + kinetics_library.name = 'seed' + kinetics_library.save(os.path.join(seed_dir, 'seed', 'reactions.py')) + kinetics_library.saveDictionary(os.path.join(seed_dir, 'seed', 'dictionary.txt')) - edgeKineticsLibrary.name = 'seed_edge' - edgeKineticsLibrary.save(os.path.join(seedDir, 'seed_edge', 'reactions.py')) - edgeKineticsLibrary.saveDictionary(os.path.join(seedDir, 'seed_edge', 'dictionary.txt')) + edge_kinetics_library.name = 'seed_edge' + edge_kinetics_library.save(os.path.join(seed_dir, 'seed_edge', 'reactions.py')) + edge_kinetics_library.saveDictionary(os.path.join(seed_dir, 'seed_edge', 'dictionary.txt')) # Save the filter tensors - if not os.path.exists(filterDir): - os.mkdir(filterDir) - with h5py.File(os.path.join(filterDir, 'filters.h5'), 'w') as f: + if not os.path.exists(filter_dir): + os.mkdir(filter_dir) + with h5py.File(os.path.join(filter_dir, 'filters.h5'), 'w') as f: if self.unimolecularThreshold is not None: f.create_dataset('unimolecularThreshold', data=self.unimolecularThreshold) if self.bimolecularThreshold is not None: @@ -1324,10 +1360,10 @@ def makeSeedMech(self,firstTime=False): f.create_dataset('trimolecularThreshold', data=self.trimolecularThreshold) # Save a map of species indices - spcsMap = [spc.molecule[0].toAdjacencyList() for spc in self.reactionModel.core.species] + spcs_map = [spc.molecule[0].toAdjacencyList() for spc in self.reactionModel.core.species] - with open(os.path.join(filterDir, 'species_map.yml'), 'w') as f: - yaml.dump(data=spcsMap, stream=f) + with open(os.path.join(filter_dir, 'species_map.yml'), 'w') as f: + yaml.dump(data=spcs_map, stream=f) # Generate a file for restarting from a seed mechanism if this is not a restart job if firstTime and (not self.restart): @@ -1337,35 +1373,35 @@ def makeSeedMech(self,firstTime=False): f.write(''.join(inputFile.readlines())) # Finally, delete the seed mechanism from the previous iteration (if it exists) - if os.path.exists(tempSeedDir): - shutil.rmtree(tempSeedDir) + if os.path.exists(temp_seed_dir): + shutil.rmtree(temp_seed_dir) except Exception as e: # Move the seed mechanism from the previous iteration (if it exists) back - if os.path.exists(tempSeedDir): - shutil.rmtree(seedDir) # Delete the bad save of the current seed mechanism - os.rename(tempSeedDir, seedDir) + if os.path.exists(temp_seed_dir): + shutil.rmtree(seed_dir) # Delete the bad save of the current seed mechanism + os.rename(temp_seed_dir, seed_dir) logging.error('Error in writing the seed mechanism for the current iteration. The seed mechanism from ' 'the previous iteration has been restored') raise e - - #change labels back so species aren't renamed - for i,label in enumerate(oldLabels): - speciesList[i].label = label - - for i,label in enumerate(edgeOldLabels): - edgeSpeciesList[i].label = label - + + # change labels back so species aren't renamed + for i, label in enumerate(old_labels): + species_list[i].label = label + + for i, label in enumerate(edge_old_labels): + edge_species_list[i].label = label + def makeSpeciesLabelsIndependent(self, species): """ This method looks at the core species labels and makes sure none of them conflict If a conflict occurs, the second occurance will have '-2' added returns a list of the old labels """ - oldLabels = [] + old_labels = [] labels = set() for spec in species: - oldLabels.append(spec.label) + old_labels.append(spec.label) duplicate_index = 1 if '+' in spec.label: L = spec.molecule[0].getFormula() @@ -1378,141 +1414,140 @@ def makeSpeciesLabelsIndependent(self, species): spec.label = potential_label labels.add(potential_label) - - - return oldLabels - + + return old_labels + ################################################################################ - def processToSpeciesNetworks(self,obj): + def processToSpeciesNetworks(self, obj): """ breaks down the objects returned by simulate into Species and PDepNetwork components """ - + if isinstance(obj, PDepNetwork): out = [self.processPdepNetworks(obj)] return out elif isinstance(obj, Species): return [obj] - elif isinstance(obj,Reaction): + elif isinstance(obj, Reaction): return list(self.processReactionsToSpecies(obj)) - elif isinstance(obj,list): #list of species - rspcs = self.processReactionsToSpecies([k for k in obj if isinstance(k,Reaction)]) - spcs = {k for k in obj if isinstance(k,Species)} | rspcs - nworks,pspcs = self.processPdepNetworks([k for k in obj if isinstance(k,PDepNetwork)]) - spcs = list(spcs-pspcs) #avoid duplicate species - return spcs+nworks + elif isinstance(obj, list): # list of species + rspcs = self.processReactionsToSpecies([k for k in obj if isinstance(k, Reaction)]) + spcs = {k for k in obj if isinstance(k, Species)} | rspcs + nworks, pspcs = self.processPdepNetworks([k for k in obj if isinstance(k, PDepNetwork)]) + spcs = list(spcs - pspcs) # avoid duplicate species + return spcs + nworks else: raise TypeError("improper call, obj input was incorrect") - def processPdepNetworks(self,obj): + def processPdepNetworks(self, obj): """ properly processes PDepNetwork objects and lists of PDepNetwork objects returned from simulate """ - reactionSystem = self.reactionSystem + reaction_system = self.reactionSystem if isinstance(obj, PDepNetwork): # Determine which species in that network has the highest leak rate # We do this here because we need a temperature and pressure # Store the maximum leak species along with the associated network - ob = (obj, obj.getMaximumLeakSpecies(reactionSystem.T.value_si, reactionSystem.P.value_si)) + ob = (obj, obj.getMaximumLeakSpecies(reaction_system.T.value_si, reaction_system.P.value_si)) return ob - elif isinstance(obj,list): - spcs = [ob.getMaximumLeakSpecies(reactionSystem.T.value_si, reactionSystem.P.value_si) for ob in obj] - nworks = [(obj[i],spcs[i]) for i in xrange(len(obj))] - return nworks,set(spcs) + elif isinstance(obj, list): + spcs = [ob.getMaximumLeakSpecies(reaction_system.T.value_si, reaction_system.P.value_si) for ob in obj] + nworks = [(obj[i], spcs[i]) for i in range(len(obj))] + return nworks, set(spcs) else: raise TypeError("improper call, obj input was incorrect") - - def processReactionsToSpecies(self,obj): + + def processReactionsToSpecies(self, obj): """ properly processes Reaction objects and lists of Reaction objects returned from simulate """ - coreSpecies = self.reactionModel.core.species - filterFcn = lambda x: not ((x in coreSpecies)) #remove species already in core - if isinstance(obj,Reaction): - potentialSpcs = obj.reactants+obj.products - potentialSpcs = filter(filterFcn,potentialSpcs) - elif isinstance(obj,list) or isinstance(obj,set): - potentialSpcs = set() + core_species = self.reactionModel.core.species + filter_fcn = lambda x: not ((x in core_species)) # remove species already in core + if isinstance(obj, Reaction): + potential_spcs = obj.reactants + obj.products + potential_spcs = list(filter(filter_fcn, potential_spcs)) + elif isinstance(obj, list) or isinstance(obj, set): + potential_spcs = set() for ob in obj: - potentialSpcs = potentialSpcs | set(ob.reactants+ob.products) - potentialSpcs = {sp for sp in potentialSpcs if filterFcn(sp)} + potential_spcs = potential_spcs | set(ob.reactants + ob.products) + potential_spcs = {sp for sp in potential_spcs if filter_fcn(sp)} else: raise TypeError("improper call, obj input was incorrect") - return potentialSpcs + return potential_spcs def generateCanteraFiles(self, chemkinFile, **kwargs): """ Convert a chemkin mechanism chem.inp file to a cantera mechanism file chem.cti and save it in the cantera directory """ - transportFile = os.path.join(os.path.dirname(chemkinFile), 'tran.dat') - fileName = os.path.splitext(os.path.basename(chemkinFile))[0] + '.cti' - outName = os.path.join(self.outputDirectory, 'cantera', fileName) - if kwargs.has_key('surfaceFile'): - outName = outName.replace('-gas.', '.') - canteraDir = os.path.dirname(outName) + transport_file = os.path.join(os.path.dirname(chemkinFile), 'tran.dat') + file_name = os.path.splitext(os.path.basename(chemkinFile))[0] + '.cti' + out_name = os.path.join(self.outputDirectory, 'cantera', file_name) + if 'surfaceFile' in kwargs: + out_name = out_name.replace('-gas.', '.') + cantera_dir = os.path.dirname(out_name) try: - os.makedirs(canteraDir) + os.makedirs(cantera_dir) except OSError: - if not os.path.isdir(canteraDir): + if not os.path.isdir(cantera_dir): raise - if os.path.exists(outName): - os.remove(outName) + if os.path.exists(out_name): + os.remove(out_name) parser = ck2cti.Parser() try: - parser.convertMech(chemkinFile, transportFile=transportFile, outName=outName, quiet=True, permissive=True, **kwargs) + parser.convertMech(chemkinFile, transportFile=transport_file, outName=out_name, quiet=True, permissive=True, + **kwargs) except ck2cti.InputParseError: logging.exception("Error converting to Cantera format.") logging.info("Trying again without transport data file.") - parser.convertMech(chemkinFile, outName=outName, quiet=True, permissive=True, **kwargs) - + parser.convertMech(chemkinFile, outName=out_name, quiet=True, permissive=True, **kwargs) def initializeReactionThresholdAndReactFlags(self): - numCoreSpecies = len(self.reactionModel.core.species) + num_core_species = len(self.reactionModel.core.species) # Initialize everything to react by default, but we will handle the restart and filtering case immediately after - self.unimolecularReact = np.ones(numCoreSpecies, bool) - self.bimolecularReact = np.ones((numCoreSpecies, numCoreSpecies), bool) + self.unimolecularReact = np.ones(num_core_species, bool) + self.bimolecularReact = np.ones((num_core_species, num_core_species), bool) if self.trimolecular: - self.trimolecularReact = np.ones((numCoreSpecies, numCoreSpecies, numCoreSpecies), bool) + self.trimolecularReact = np.ones((num_core_species, num_core_species, num_core_species), bool) if self.filterReactions or self.restart: # Otherwise no need to initialize thresholds or fix react flags - self.unimolecularThreshold = np.zeros(numCoreSpecies, bool) - self.bimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies), bool) + self.unimolecularThreshold = np.zeros(num_core_species, bool) + self.bimolecularThreshold = np.zeros((num_core_species, num_core_species), bool) if self.trimolecular: - self.trimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies, numCoreSpecies), bool) + self.trimolecularThreshold = np.zeros((num_core_species, num_core_species, num_core_species), bool) if self.restart: # Load in the restart mapping with open(os.path.join(self.speciesMapPath), 'r') as f: - restartSpeciesList = yaml.safe_load(stream=f) + restart_species_list = yaml.safe_load(stream=f) - numRestartSpcs = len(restartSpeciesList) - restartSpeciesList = [Species().fromAdjacencyList(adj_list) for adj_list in restartSpeciesList] + num_restart_spcs = len(restart_species_list) + restart_species_list = [Species().fromAdjacencyList(adj_list) for adj_list in restart_species_list] # Load in the restart filter tensors with h5py.File(self.filtersPath, 'r') as f: try: - unimolecularThreshold_restart = f.get('unimolecularThreshold').value - bimolecularThreshold_restart = f.get('bimolecularThreshold').value + unimolecular_threshold_restart = f.get('unimolecularThreshold').value + bimolecular_threshold_restart = f.get('bimolecularThreshold').value if self.trimolecular: - trimolecularThreshold_restart = f.get('trimolecularThreshold').value + trimolecular_threshold_restart = f.get('trimolecularThreshold').value # Expand Thresholds to match number of species in the current model. # Note that we are about to reorder the core species to match the order in the restart seed # mechanism, so we only need to broadcast to the indices up to numRestartSpcs. Any indices after # this are additional species that should have `False` for their threshold - unimolecularThreshold = np.zeros(numCoreSpecies, bool) - unimolecularThreshold[:numRestartSpcs] = unimolecularThreshold_restart + unimolecular_threshold = np.zeros(num_core_species, bool) + unimolecular_threshold[:num_restart_spcs] = unimolecular_threshold_restart - bimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies), bool) - bimolecularThreshold[:numRestartSpcs, :numRestartSpcs] = bimolecularThreshold_restart + bimolecular_threshold = np.zeros((num_core_species, num_core_species), bool) + bimolecular_threshold[:num_restart_spcs, :num_restart_spcs] = bimolecular_threshold_restart if self.trimolecular: - trimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies, numCoreSpecies), bool) - trimolecularThreshold[:numRestartSpcs, :numRestartSpcs, :numRestartSpcs] = \ - trimolecularThreshold_restart + trimolecular_threshold = np.zeros((num_core_species, num_core_species, num_core_species), bool) + trimolecular_threshold[:num_restart_spcs, :num_restart_spcs, :num_restart_spcs] = \ + trimolecular_threshold_restart filters_found = True @@ -1528,7 +1563,7 @@ def initializeReactionThresholdAndReactFlags(self): # Reorder the core species to match the indices of the restart filter tensors reordered_core_species = [] - for spc in restartSpeciesList: + for spc in restart_species_list: for j, oldCoreSpc in enumerate(self.reactionModel.core.species): if oldCoreSpc.isIsomorphic(spc, strict=False): reordered_core_species.append(self.reactionModel.core.species.pop(j)) @@ -1560,31 +1595,31 @@ def initializeReactionThresholdAndReactFlags(self): if self.filterReactions: # Filling in the filter thresholds will suffice # Fill in the newly initialized filter tensors - self.unimolecularThreshold = unimolecularThreshold - self.bimolecularThreshold = bimolecularThreshold + self.unimolecularThreshold = unimolecular_threshold + self.bimolecularThreshold = bimolecular_threshold if self.trimolecular: - self.trimolecularThreshold = trimolecularThreshold + self.trimolecularThreshold = trimolecular_threshold else: # We must set the react flags instead. If it was `True` in the threshold, it should not react - self.unimolecularReact = np.logical_not(unimolecularThreshold) - self.bimolecularReact = np.logical_not(bimolecularThreshold) + self.unimolecularReact = np.logical_not(unimolecular_threshold) + self.bimolecularReact = np.logical_not(bimolecular_threshold) if self.trimolecular: - self.trimolecularReact = np.logical_not(trimolecularThreshold) + self.trimolecularReact = np.logical_not(trimolecular_threshold) else: # Assume that all species found in the restart core seed have already been reacted if self.filterReactions: # Filling in the filter thresholds will suffice - self.unimolecularThreshold[:numRestartSpcs] = True - self.bimolecularThreshold[:numRestartSpcs, :numRestartSpcs] = True + self.unimolecularThreshold[:num_restart_spcs] = True + self.bimolecularThreshold[:num_restart_spcs, :num_restart_spcs] = True if self.trimolecular: - self.trimolecularThreshold[:numRestartSpcs, :numRestartSpcs, :numRestartSpcs] = True + self.trimolecularThreshold[:num_restart_spcs, :num_restart_spcs, :num_restart_spcs] = True else: # We must set the react flags instead. # Don't react any species that were present in the restart core seed - self.unimolecularReact[:numRestartSpcs] = False - self.bimolecularReact[:numRestartSpcs, :numRestartSpcs] = False + self.unimolecularReact[:num_restart_spcs] = False + self.bimolecularReact[:num_restart_spcs, :num_restart_spcs] = False if self.trimolecular: - self.trimolecularReact[:numRestartSpcs, :numRestartSpcs, :numRestartSpcs] = False - + self.trimolecularReact[:num_restart_spcs, :num_restart_spcs, :num_restart_spcs] = False + def updateReactionThresholdAndReactFlags(self, rxnSysUnimolecularThreshold=None, rxnSysBimolecularThreshold=None, @@ -1593,80 +1628,77 @@ def updateReactionThresholdAndReactFlags(self, """ updates the length and boolean value of the unimolecular and bimolecular react and threshold flags """ - numCoreSpecies = len(self.reactionModel.core.species) - prevNumCoreSpecies = len(self.unimolecularReact) - new_core_species = numCoreSpecies > prevNumCoreSpecies + num_core_species = len(self.reactionModel.core.species) + prev_num_core_species = len(self.unimolecularReact) + new_core_species = num_core_species > prev_num_core_species # Always reset the react arrays from prior iterations - self.unimolecularReact = np.zeros((numCoreSpecies), bool) - self.bimolecularReact = np.zeros((numCoreSpecies, numCoreSpecies), bool) + self.unimolecularReact = np.zeros((num_core_species), bool) + self.bimolecularReact = np.zeros((num_core_species, num_core_species), bool) if self.trimolecular: - self.trimolecularReact = np.zeros((numCoreSpecies, numCoreSpecies, numCoreSpecies), bool) + self.trimolecularReact = np.zeros((num_core_species, num_core_species, num_core_species), bool) if self.filterReactions: if new_core_species: # Expand the threshold arrays if there were new core species added - unimolecularThreshold = np.zeros((numCoreSpecies), bool) - bimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies), bool) + unimolecular_threshold = np.zeros((num_core_species), bool) + bimolecular_threshold = np.zeros((num_core_species, num_core_species), bool) # Broadcast original thresholds - unimolecularThreshold[:prevNumCoreSpecies] = self.unimolecularThreshold - bimolecularThreshold[:prevNumCoreSpecies,:prevNumCoreSpecies] = self.bimolecularThreshold - self.unimolecularThreshold = unimolecularThreshold - self.bimolecularThreshold = bimolecularThreshold + unimolecular_threshold[:prev_num_core_species] = self.unimolecularThreshold + bimolecular_threshold[:prev_num_core_species, :prev_num_core_species] = self.bimolecularThreshold + self.unimolecularThreshold = unimolecular_threshold + self.bimolecularThreshold = bimolecular_threshold if self.trimolecular: - trimolecularThreshold = np.zeros((numCoreSpecies, numCoreSpecies, numCoreSpecies), bool) - trimolecularThreshold[:prevNumCoreSpecies, - :prevNumCoreSpecies, - :prevNumCoreSpecies] = self.trimolecularThreshold - self.trimolecularThreshold = trimolecularThreshold - + trimolecular_threshold = np.zeros((num_core_species, num_core_species, num_core_species), bool) + trimolecular_threshold[:prev_num_core_species, :prev_num_core_species, :prev_num_core_species] = self.trimolecularThreshold + self.trimolecularThreshold = trimolecular_threshold + if skipUpdate: return - + # Always update the react and threshold arrays - for i in xrange(numCoreSpecies): + for i in range(num_core_species): if not self.unimolecularThreshold[i] and rxnSysUnimolecularThreshold[i]: # We've shifted from not reacting to reacting self.unimolecularReact[i] = True self.unimolecularThreshold[i] = True - for i in xrange(numCoreSpecies): - for j in xrange(i, numCoreSpecies): - if not self.bimolecularThreshold[i,j] and rxnSysBimolecularThreshold[i,j]: + for i in range(num_core_species): + for j in range(i, num_core_species): + if not self.bimolecularThreshold[i, j] and rxnSysBimolecularThreshold[i, j]: # We've shifted from not reacting to reacting - self.bimolecularReact[i,j] = True - self.bimolecularThreshold[i,j] = True + self.bimolecularReact[i, j] = True + self.bimolecularThreshold[i, j] = True if self.trimolecular: - for i in xrange(numCoreSpecies): - for j in xrange(i, numCoreSpecies): - for k in xrange(j, numCoreSpecies): - if not self.trimolecularThreshold[i,j,k] and rxnSysTrimolecularThreshold[i,j,k]: + for i in range(num_core_species): + for j in range(i, num_core_species): + for k in range(j, num_core_species): + if not self.trimolecularThreshold[i, j, k] and rxnSysTrimolecularThreshold[i, j, k]: # We've shifted from not reacting to reacting - self.trimolecularReact[i,j,k] = True - self.trimolecularThreshold[i,j,k] = True + self.trimolecularReact[i, j, k] = True + self.trimolecularThreshold[i, j, k] = True else: # We are not filtering reactions if new_core_species: # React all the new core species unimolecularly - for i in xrange(prevNumCoreSpecies, numCoreSpecies): + for i in range(prev_num_core_species, num_core_species): self.unimolecularReact[i] = True - + # React all the new core species with all the core species bimolecularly - for i in xrange(numCoreSpecies): - for j in xrange(prevNumCoreSpecies,numCoreSpecies): - self.bimolecularReact[i,j] = True + for i in range(num_core_species): + for j in range(prev_num_core_species, num_core_species): + self.bimolecularReact[i, j] = True # React all the new core species with all bimolecular combinations trimolecularly if self.trimolecular: - for i in xrange(numCoreSpecies): - for j in xrange(numCoreSpecies): - for k in xrange(prevNumCoreSpecies, numCoreSpecies): - self.trimolecularReact[i,j,k] = True + for i in range(num_core_species): + for j in range(num_core_species): + for k in range(prev_num_core_species, num_core_species): + self.trimolecularReact[i, j, k] = True - def saveEverything(self): """ Saves the output HTML and the Chemkin file @@ -1674,7 +1706,7 @@ def saveEverything(self): # If the user specifies it, add unused reaction library reactions to # an additional output species and reaction list which is written to the ouput HTML # file as well as the chemkin file - + if self.reactionLibraries: # First initialize the outputReactionList and outputSpeciesList to empty self.reactionModel.outputSpeciesList = [] @@ -1682,12 +1714,12 @@ def saveEverything(self): for library, option in self.reactionLibraries: if option: self.reactionModel.addReactionLibraryToOutput(library) - + self.execTime.append(time.time() - self.initializationTime) # Notify registered listeners: self.notify() - + def finish(self): """ Complete the model generation. @@ -1710,19 +1742,19 @@ def finish(self): # Log end timestamp logging.info('') logging.info('RMG execution terminated at ' + time.asctime()) - + def getGitCommit(self, modulePath): import subprocess - if os.path.exists(os.path.join(modulePath,'..','.git')): + if os.path.exists(os.path.join(modulePath, '..', '.git')): try: return subprocess.check_output(['git', 'log', '--format=%H%n%cd', '-1'], - cwd=modulePath).splitlines() + cwd=modulePath).splitlines() except: return '', '' else: return '', '' - + def logHeader(self, level=logging.INFO): """ Output a header containing identifying information about RMG to the log. @@ -1736,7 +1768,7 @@ def logHeader(self, level=logging.INFO): logging.log(level, '# Richard H. West (r.west@neu.edu) #') logging.log(level, '# Website: http://reactionmechanismgenerator.github.io/ #') logging.log(level, '#########################################################\n') - + # Extract git commit from RMG-Py head, date = self.getGitCommit(getPath()) if head != '' and date != '': @@ -1746,25 +1778,25 @@ def logHeader(self, level=logging.INFO): logging.log(level, '') else: # If we cannot get git info, try checking if it is a conda package instead: - condaPackage = get_condaPackage('rmg') - if condaPackage != '': + conda_package = get_condaPackage('rmg') + if conda_package != '': logging.log(level, 'The current anaconda package for RMG-Py is:') - logging.log(level, condaPackage) - logging.log(level,'') - - databaseHead, databaseDate = self.getGitCommit(settings['database.directory']) - if databaseHead !='' and databaseDate !='': + logging.log(level, conda_package) + logging.log(level, '') + + database_head, database_date = self.getGitCommit(settings['database.directory']) + if database_head != '' and database_date != '': logging.log(level, 'The current git HEAD for RMG-database is:') - logging.log(level, '\t%s' % databaseHead) - logging.log(level, '\t%s' % databaseDate) + logging.log(level, '\t%s' % database_head) + logging.log(level, '\t%s' % database_date) logging.log(level, '') else: - databaseCondaPackage=get_condaPackage('rmgdatabase') - if databaseCondaPackage != '': + database_conda_package = get_condaPackage('rmgdatabase') + if database_conda_package != '': logging.log(level, 'The current anaconda package for RMG-database is:') - logging.log(level, databaseCondaPackage) - logging.log(level,'') - + logging.log(level, database_conda_package) + logging.log(level, '') + def loadRMGJavaInput(self, path): """ Load an RMG-Java job from the input file located at `inputFile`, or @@ -1774,56 +1806,62 @@ def loadRMGJavaInput(self, path): " removed in version 2.3.", DeprecationWarning) # NOTE: This function is currently incomplete! # It only loads a subset of the available information. - + self.reactionModel = CoreEdgeReactionModel() self.initialSpecies = [] self.reactionSystems = [] - - Tlist = []; Plist = []; concentrationList = []; speciesDict = {} + + T_list = [] + P_list = [] + concentration_list = [] + species_dict = {} termination = [] - + with open(path, 'r') as f: line = self.readMeaningfulLineJava(f) while line != '': - - + if line.startswith('TemperatureModel:'): tokens = line.split() units = tokens[2][1:-1] assert units in ['C', 'F', 'K'] if units == 'C': - Tlist = [float(T)+273.15 for T in tokens[3:]] + T_list = [float(T) + 273.15 for T in tokens[3:]] elif units == 'F': - Tlist = [(float(T)+459.67)*5./9. for T in tokens[3:]] + T_list = [(float(T) + 459.67) * 5. / 9. for T in tokens[3:]] else: - Tlist = [float(T) for T in tokens[3:]] - + T_list = [float(T) for T in tokens[3:]] + elif line.startswith('PressureModel:'): tokens = line.split() units = tokens[2][1:-1] assert units in ['atm', 'bar', 'Pa', 'torr'] if units == 'atm': - Plist = [float(P)*101325. for P in tokens[3:]] + P_list = [float(P) * 101325. for P in tokens[3:]] elif units == 'bar': - Plist = [float(P)*100000. for P in tokens[3:]] + P_list = [float(P) * 100000. for P in tokens[3:]] elif units == 'torr': - Plist = [float(P)/760.*101325. for P in tokens[3:]] + P_list = [float(P) / 760. * 101325. for P in tokens[3:]] else: - Plist = [float(P) for P in tokens[3:]] - + P_list = [float(P) for P in tokens[3:]] + elif line.startswith('InitialStatus:'): - label = ''; concentrations = []; adjlist = '' - + label = '' + concentrations = [] + adjlist = '' + line = self.readMeaningfulLineJava(f) while line != 'END': - + if line == '' and label != '': species = Species(label=label, molecule=[Molecule().fromAdjacencyList(adjlist)]) self.initialSpecies.append(species) - speciesDict[label] = species - concentrationList.append(concentrations) - label = ''; concentrations = []; adjlist = '' - + species_dict[label] = species + concentration_list.append(concentrations) + label = '' + concentrations = [] + adjlist = '' + elif line != '' and label == '': tokens = line.split() label = tokens[0] @@ -1832,23 +1870,24 @@ def loadRMGJavaInput(self, path): tokens.pop(-1) assert units in ['mol/cm3', 'mol/m3', 'mol/l'] if units == 'mol/cm3': - concentrations = [float(C)*1.0e6 for C in tokens[2:]] + concentrations = [float(C) * 1.0e6 for C in tokens[2:]] elif units == 'mol/l': - concentrations = [float(C)*1.0e3 for C in tokens[2:]] + concentrations = [float(C) * 1.0e3 for C in tokens[2:]] else: concentrations = [float(C) for C in tokens[2:]] - + elif line != '': adjlist += line + '\n' - + line = f.readline().strip() - if '//' in line: line = line[0:line.index('//')] - + if '//' in line: + line = line[0:line.index('//')] + elif line.startswith('InertGas:'): - + line = self.readMeaningfulLineJava(f) while line != 'END': - + tokens = line.split() label = tokens[0] assert label in ['N2', 'Ar', 'He', 'Ne'] @@ -1863,28 +1902,28 @@ def loadRMGJavaInput(self, path): units = tokens[1][1:-1] assert units in ['mol/cm3', 'mol/m3', 'mol/l'] if units == 'mol/cm3': - concentrations = [float(C)*1.0e6 for C in tokens[2:]] + concentrations = [float(C) * 1.0e6 for C in tokens[2:]] elif units == 'mol/l': - concentrations = [float(C)*1.0e3 for C in tokens[2:]] + concentrations = [float(C) * 1.0e3 for C in tokens[2:]] else: concentrations = [float(C) for C in tokens[2:]] - + species = Species(label=label, reactive=False, molecule=[Molecule().fromSMILES(smiles)]) self.initialSpecies.append(species) - speciesDict[label] = species - concentrationList.append(concentrations) - + species_dict[label] = species + concentration_list.append(concentrations) + line = self.readMeaningfulLineJava(f) - + elif line.startswith('FinishController:'): - + # First meaningful line is a termination time or conversion line = self.readMeaningfulLineJava(f) tokens = line.split() if tokens[2].lower() == 'conversion:': label = tokens[3] conversion = float(tokens[4]) - termination.append(TerminationConversion(spec=speciesDict[label], conv=conversion)) + termination.append(TerminationConversion(spec=species_dict[label], conv=conversion)) elif tokens[2].lower() == 'reactiontime:': time = float(tokens[3]) units = tokens[4][1:-1] @@ -1896,30 +1935,32 @@ def loadRMGJavaInput(self, path): elif units == 'day': time *= 60. * 60. * 24. termination.append(TerminationTime(time=time)) - + # Second meaningful line is the error tolerance # We're not doing anything with this information yet! line = self.readMeaningfulLineJava(f) - + line = self.readMeaningfulLineJava(f) - - assert len(Tlist) > 0 - assert len(Plist) > 0 - concentrationList = np.array(concentrationList) + + assert len(T_list) > 0 + assert len(P_list) > 0 + concentration_list = np.array(concentration_list) # An arbitrary number of concentrations is acceptable, and should be run for each reactor system - if not concentrationList.shape[1] > 0: + if not concentration_list.shape[1] > 0: raise AssertionError() # Make a reaction system for each (T,P) combination - for T in Tlist: - for P in Plist: - for i in range(concentrationList.shape[1]): - concentrations = concentrationList[:,i] - totalConc = np.sum(concentrations) - initialMoleFractions = dict([(self.initialSpecies[i], concentrations[i] / totalConc) for i in range(len(self.initialSpecies))]) - reactionSystem = SimpleReactor(T, P, initialMoleFractions=initialMoleFractions, termination=termination) - self.reactionSystems.append(reactionSystem) - + for T in T_list: + for P in P_list: + for i in range(concentration_list.shape[1]): + concentrations = concentration_list[:, i] + total_conc = np.sum(concentrations) + initial_mole_fractions = dict([(self.initialSpecies[i], concentrations[i] / total_conc) for i in + range(len(self.initialSpecies))]) + reaction_system = SimpleReactor(T, P, initialMoleFractions=initial_mole_fractions, + termination=termination) + self.reactionSystems.append(reaction_system) + def readMeaningfulLineJava(self, f): """ Read a meaningful line from an RMG-Java condition file object `f`, @@ -1930,14 +1971,18 @@ def readMeaningfulLineJava(self, f): line = f.readline() if line != '': line = line.strip() - if '//' in line: line = line[0:line.index('//')] + if '//' in line: + line = line[0:line.index('//')] while line == '': line = f.readline() - if line == '': break + if line == '': + break line = line.strip() - if '//' in line: line = line[0:line.index('//')] + if '//' in line: + line = line[0:line.index('//')] return line - + + ################################################################################ def determine_procnum_from_RAM(): @@ -1947,14 +1992,14 @@ def determine_procnum_from_RAM(): if sys.platform.startswith('linux'): # linux memory_available = psutil.virtual_memory().free / (1000.0 ** 3) - memory_use = psutil.Process(os.getpid()).memory_info()[0]/(1000.0 ** 3) + memory_use = psutil.Process(os.getpid()).memory_info()[0] / (1000.0 ** 3) tmp = divmod(memory_available, memory_use) tmp2 = min(maxproc, tmp[0]) procnum = max(1, int(tmp2)) elif sys.platform == "darwin": # OS X - memory_available = psutil.virtual_memory().available/(1000.0 ** 3) - memory_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1000.0 ** 3) + memory_available = psutil.virtual_memory().available / (1000.0 ** 3) + memory_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1000.0 ** 3) tmp = divmod(memory_available, memory_use) tmp2 = min(maxproc, tmp[0]) procnum = max(1, int(tmp2)) @@ -1965,6 +2010,7 @@ def determine_procnum_from_RAM(): # Return the maximal number of processes for multiprocessing return procnum + def initializeLog(verbose, log_file_name): """ Set up a logger for RMG to use to print output to stdout. The @@ -1988,8 +2034,8 @@ def initializeLog(verbose, log_file_name): logging.addLevelName(1, '') # Create formatter and add to console handler - #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S') - #formatter = Formatter('%(message)s', '%Y-%m-%d %H:%M:%S') + # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S') + # formatter = Formatter('%(message)s', '%Y-%m-%d %H:%M:%S') formatter = logging.Formatter('%(levelname)s%(message)s') ch.setFormatter(formatter) @@ -1998,12 +2044,12 @@ def initializeLog(verbose, log_file_name): name, ext = os.path.splitext(log_file_name) backup = name + '_backup' + ext if os.path.exists(backup): - logging.info("Removing old "+backup) + logging.info("Removing old " + backup) os.remove(backup) logging.info('Moving {0} to {1}\n'.format(log_file_name, backup)) shutil.move(log_file_name, backup) - fh = logging.FileHandler(filename=log_file_name) #, backupCount=3) - fh.setLevel(min(logging.DEBUG,verbose)) # always at least VERBOSE in the file + fh = logging.FileHandler(filename=log_file_name) # , backupCount=3) + fh.setLevel(min(logging.DEBUG, verbose)) # always at least VERBOSE in the file fh.setFormatter(formatter) # notice that STDERR does not get saved to the log file # so errors from underlying libraries (eg. openbabel) etc. that report @@ -2017,39 +2063,42 @@ def initializeLog(verbose, log_file_name): logger.addHandler(ch) logger.addHandler(fh) + ################################################################################ -class RMG_Memory: +class RMG_Memory(object): """ class for remembering RMG simulations and determining what simulation to run next """ - def __init__(self,reactionSystem,bspc): + + def __init__(self, reactionSystem, bspc): self.Ranges = dict() - - if hasattr(reactionSystem,'Trange') and isinstance(reactionSystem.Trange, list): - Trange = reactionSystem.Trange - self.Ranges['T'] = [T.value_si for T in Trange] - if hasattr(reactionSystem,'Prange') and isinstance(reactionSystem.Prange, list): - Prange = reactionSystem.Prange - self.Ranges['P'] = [np.log(P.value_si) for P in Prange] - if hasattr(reactionSystem,'initialMoleFractions'): + + if hasattr(reactionSystem, 'Trange') and isinstance(reactionSystem.Trange, list): + T_range = reactionSystem.Trange + self.Ranges['T'] = [T.value_si for T in T_range] + if hasattr(reactionSystem, 'Prange') and isinstance(reactionSystem.Prange, list): + P_range = reactionSystem.Prange + self.Ranges['P'] = [np.log(P.value_si) for P in P_range] + if hasattr(reactionSystem, 'initialMoleFractions'): if bspc: self.initialMoleFractions = deepcopy(reactionSystem.initialMoleFractions) - self.balanceSpecies = [x for x in self.initialMoleFractions.keys() if x.label == bspc][0] #find the balance species - for key,value in reactionSystem.initialMoleFractions.iteritems(): + self.balanceSpecies = [x for x in self.initialMoleFractions.keys() if x.label == bspc][ + 0] # find the balance species + for key, value in reactionSystem.initialMoleFractions.items(): assert key != 'T' and key != 'P', 'naming a species T or P is forbidden' if isinstance(value, list): self.Ranges[key] = value - if hasattr(reactionSystem,'initialConcentrations'): - for key,value in reactionSystem.initialConcentrations.iteritems(): + if hasattr(reactionSystem, 'initialConcentrations'): + for key, value in reactionSystem.initialConcentrations.items(): assert key != 'T' and key != 'P', 'naming a species T or P is forbidden' if isinstance(value, list): self.Ranges[key] = [v.value_si for v in value] - + for term in reactionSystem.termination: if isinstance(term, TerminationTime): self.tmax = term.time.value_si - + self.reactionSystem = reactionSystem self.conditionList = [] self.scaledConditionList = [] @@ -2057,17 +2106,17 @@ def __init__(self,reactionSystem,bspc): self.convs = [] self.Ns = [] self.randState = np.random.RandomState(1) - - def add_t_conv_N(self,t,conv,N): + + def add_t_conv_N(self, t, conv, N): """ adds the completion time and conversion and the number of objects added from a given run to the memory """ - if hasattr(self,'tmax'): - self.ts.append(t/self.tmax) + if hasattr(self, 'tmax'): + self.ts.append(t / self.tmax) self.convs.append(conv) self.Ns.append(N) - + def get_cond(self): """ Returns the condition being run @@ -2076,8 +2125,8 @@ def get_cond(self): return None else: return self.conditionList[-1] - - def calculate_cond(self,obj,Ndims,Ns=20): + + def calculate_cond(self, obj, Ndims, Ns=20): """ Weighted Stochastic Grid Sampling algorithm obj is evaluated at a grid of points and the evaluations are normalized @@ -2085,30 +2134,31 @@ def calculate_cond(self,obj,Ndims,Ns=20): then a random step of length 1/(2*Ns) is taken from that point to give a final condition point if this process were to impact runtime under some conditions you could decrease the value of Ns to speed it up """ - bounds = tuple((0.0,1.0) for k in xrange(Ndims)) - x0,fval,grid,Jout = brute(obj,bounds,Ns=Ns,full_output=True,finish=None) #run brute just to easily get the evaluations at each grid point (we don't care about the optimal value) - Jout += abs(Jout.min(tuple(xrange(Ndims)))) #shifts Jout positive so tot is positive - tot = np.sum(Jout,axis=tuple(xrange(len(Jout.shape)))) - Jout /= tot #normalize Jout - n = self.randState.uniform(0,1,1)[0] #draw a random number between 0 and 1 + bounds = tuple((0.0, 1.0) for k in range(Ndims)) + x0, fval, grid, Jout = brute(obj, bounds, Ns=Ns, full_output=True, + finish=None) # run brute just to easily get the evaluations at each grid point (we don't care about the optimal value) + Jout += abs(Jout.min(tuple(range(Ndims)))) # shifts Jout positive so tot is positive + tot = np.sum(Jout, axis=tuple(range(len(Jout.shape)))) + Jout /= tot # normalize Jout + n = self.randState.uniform(0, 1, 1)[0] # draw a random number between 0 and 1 s = 0.0 - for indexes in np.ndenumerate(Jout): #choose a coordinate such that grid[indexes] is choosen with probability Jout[indexes] + for indexes in np.ndenumerate(Jout): # choose a coordinate such that grid[indexes] is choosen with probability Jout[indexes] s += Jout[indexes[0]] if s > n: break if len(bounds) != 1: - yf = np.array([grid[i][indexes[0]] for i in xrange(len(grid))]) + yf = np.array([grid[i][indexes[0]] for i in range(len(grid))]) else: - yf = np.array([grid[indexes[0]] for i in xrange(len(grid))]) - - step = self.randState.uniform(0,1,len(Jout.shape)) #take a step in a random direction in a length between 0 and 1/(2*Ns) + yf = np.array([grid[indexes[0]] for i in range(len(grid))]) + + step = self.randState.uniform(0, 1, len(Jout.shape)) # take a step in a random direction in a length between 0 and 1/(2*Ns) step /= step.sum() - mag = self.randState.uniform(0,1,1)[0] + mag = self.randState.uniform(0, 1, 1)[0] + + yf += step * mag * np.sqrt(2) / (2.0 * Ns) - yf += step*mag*np.sqrt(2)/(2.0*Ns) - return yf - + def generate_cond(self): """ find the next condition to run at by solving an optimization problem @@ -2117,76 +2167,83 @@ def generate_cond(self): the resulting condition is added to the end of conditionList """ if self.conditionList == []: - self.conditionList.append({key:value[0] for key,value in self.Ranges.iteritems()}) - self.scaledConditionList.append({key:0.0 for key,value in self.Ranges.iteritems()}) + self.conditionList.append({key: value[0] for key, value in self.Ranges.items()}) + self.scaledConditionList.append({key: 0.0 for key, value in self.Ranges.items()}) elif len(self.conditionList[0]) == 0: pass else: - ykey = self.conditionList[0].keys() + ykey = list(self.conditionList[0].keys()) Ns = self.Ns + def obj(y): boo = y.shape == tuple() vec = [] N = len(self.conditionList) - for i,cond in enumerate(self.scaledConditionList): - for j,key in enumerate(ykey): + for i, cond in enumerate(self.scaledConditionList): + for j, key in enumerate(ykey): if not boo: - vec.append(10.0*N/((N-i)*(Ns[i]+1))*abs(y[j]-cond[key])**0.3) + vec.append(10.0 * N / ((N - i) * (Ns[i] + 1)) * abs(y[j] - cond[key]) ** 0.3) else: - vec.append(10.0*N/((N-i)*(Ns[i]+1))*abs(y-cond[key])**0.3) + vec.append(10.0 * N / ((N - i) * (Ns[i] + 1)) * abs(y - cond[key]) ** 0.3) return -np.array(vec).sum() - yf = self.calculate_cond(obj,len(ykey)) - - scaledNewCond = {ykey[i]:yf[i] for i in xrange(len(ykey))} - newCond = {yk:yf[i]*(self.Ranges[yk][1]-self.Ranges[yk][0])+self.Ranges[yk][0] for i,yk in enumerate(ykey) } - if 'P' in newCond.keys(): - newCond['P'] = np.exp(newCond['P']) - - if hasattr(self,'initialMoleFractions'): + yf = self.calculate_cond(obj, len(ykey)) + + scaled_new_cond = {ykey[i]: yf[i] for i in range(len(ykey))} + new_cond = {yk: yf[i] * (self.Ranges[yk][1] - self.Ranges[yk][0]) + self.Ranges[yk][0] for i, yk in + enumerate(ykey)} + if 'P' in list(new_cond.keys()): + new_cond['P'] = np.exp(new_cond['P']) + + if hasattr(self, 'initialMoleFractions'): for key in self.initialMoleFractions.keys(): - if not isinstance(self.initialMoleFractions[key],list): - newCond[key] = self.initialMoleFractions[key] - total = sum([val for key,val in newCond.iteritems() if key != 'T' and key != 'P']) + if not isinstance(self.initialMoleFractions[key], list): + new_cond[key] = self.initialMoleFractions[key] + total = sum([val for key, val in new_cond.items() if key != 'T' and key != 'P']) if self.balanceSpecies is None: - for key,val in newCond.iteritems(): + for key, val in new_cond.items(): if key != 'T' and key != 'P': - newCond[key] = val/total + new_cond[key] = val / total else: - newCond[self.balanceSpecies] = self.initialMoleFractions[self.balanceSpecies] + 1.0 - total + new_cond[self.balanceSpecies] = self.initialMoleFractions[self.balanceSpecies] + 1.0 - total + + self.conditionList.append(new_cond) + self.scaledConditionList.append(scaled_new_cond) + return - self.conditionList.append(newCond) - self.scaledConditionList.append(scaledNewCond) - return -def log_conditions(RMG_Memories,index): +def log_conditions(RMG_Memories, index): """ log newly generated reactor conditions """ if RMG_Memories[index].get_cond() is not None: s = 'conditions choosen for reactor {0} were: '.format(index) - for key,item in RMG_Memories[index].get_cond().iteritems(): + for key, item in RMG_Memories[index].get_cond().items(): if key == 'T': s += 'T = {0} K, '.format(item) elif key == 'P': - s += 'P = {0} bar, '.format(item/1.0e5) + s += 'P = {0} bar, '.format(item / 1.0e5) else: s += key.label + ' = {0}, '.format(item) - + logging.info(s) - -class Tee: + + +class Tee(object): """A simple tee to create a stream which prints to many streams. This is used to report the profiling statistics to both the log file and the standard output. """ + def __init__(self, *fileobjects): - self.fileobjects=fileobjects + self.fileobjects = fileobjects + def write(self, string): for fileobject in self.fileobjects: fileobject.write(string) - + + def get_condaPackage(module): """ Check the version of any conda package @@ -2194,37 +2251,39 @@ def get_condaPackage(module): import subprocess try: lines = subprocess.check_output(['conda', 'list', '-f', module]).splitlines() - - packages=[] + + packages = [] # Strip comments for line in lines: - if line[:1]=='#': + if line[:1] == '#': pass else: packages.append(line) - + return '\n'.join(packages) except: return '' + def processProfileStats(stats_file, log_file): import pstats - out_stream = Tee(sys.stdout,open(log_file,'a')) # print to screen AND append to RMG.log - print >>out_stream, "="*80 - print >>out_stream, "Profiling Data".center(80) - print >>out_stream, "="*80 - stats = pstats.Stats(stats_file,stream=out_stream) + out_stream = Tee(sys.stdout, open(log_file, 'a')) # print to screen AND append to RMG.log + print("=" * 80, file=out_stream) + print("Profiling Data".center(80), file=out_stream) + print("=" * 80, file=out_stream) + stats = pstats.Stats(stats_file, stream=out_stream) stats.strip_dirs() - print >>out_stream, "Sorted by internal time" + print("Sorted by internal time", file=out_stream) stats.sort_stats('time') stats.print_stats(25) stats.print_callers(25) - print >>out_stream, "Sorted by cumulative time" + print("Sorted by cumulative time", file=out_stream) stats.sort_stats('cumulative') stats.print_stats(25) stats.print_callers(25) stats.print_callees(25) + def makeProfileGraph(stats_file): """ Uses gprof2dot to create a graphviz dot file of the profiling information. @@ -2243,11 +2302,11 @@ def makeProfileGraph(stats_file): logging.warning('Try getting the latest version with something like `pip install --upgrade gprof2dot`.') return import subprocess - - #create an Options class to mimic optparser output as much as possible: - class Options: + + # create an Options class to mimic optparser output as much as possible: + class Options(object): pass - + options = Options() options.node_thres = 0.8 options.edge_thres = 0.1 @@ -2256,55 +2315,56 @@ class Options: options.root = "" options.leaf = "" options.wrap = True - - theme = themes['color'] # bw color gray pink - theme.fontname = "ArialMT" # default "Arial" leads to PostScript warnings in dot (on Mac OS) + + theme = themes['color'] # bw color gray pink + theme.fontname = "ArialMT" # default "Arial" leads to PostScript warnings in dot (on Mac OS) parser = PstatsParser(stats_file) profile = parser.parse() - + dot_file = stats_file + '.dot' - output = open(dot_file,'wt') + output = open(dot_file, 'wt') dot = DotWriter(output) dot.strip = options.strip dot.wrap = options.wrap - + if options.show_samples: dot.show_function_events.append(SAMPLES) - + profile = profile - profile.prune(options.node_thres/100.0, options.edge_thres/100.0) + profile.prune(options.node_thres / 100.0, options.edge_thres / 100.0) if options.root: - rootId = profile.getFunctionId(options.root) - if not rootId: + root_id = profile.getFunctionId(options.root) + if not root_id: sys.stderr.write('root node ' + options.root + ' not found (might already be pruned : try -e0 -n0 flags)\n') sys.exit(1) - profile.prune_root(rootId) + profile.prune_root(root_id) if options.leaf: - leafId = profile.getFunctionId(options.leaf) - if not leafId: + leaf_id = profile.getFunctionId(options.leaf) + if not leaf_id: sys.stderr.write('leaf node ' + options.leaf + ' not found (maybe already pruned : try -e0 -n0 flags)\n') sys.exit(1) - profile.prune_leaf(leafId) + profile.prune_leaf(leaf_id) dot.graph(profile, theme) output.close() - + try: subprocess.check_call(['dot', '-Tps2', dot_file, '-o', '{0}.ps2'.format(dot_file)]) except subprocess.CalledProcessError: logging.error("Error returned by 'dot' when generating graph of the profile statistics.") logging.info("To try it yourself:\n dot -Tps2 {0} -o {0}.ps2".format(dot_file)) except OSError: - logging.error("Couldn't run 'dot' to create graph of profile statistics. Check graphviz is installed properly and on your path.") + logging.error("Couldn't run 'dot' to create graph of profile statistics. Check graphviz is installed properly " + "and on your path.") logging.info("Once you've got it, try:\n dot -Tps2 {0} -o {0}.ps2".format(dot_file)) - + try: subprocess.check_call(['ps2pdf', '{0}.ps2'.format(dot_file), '{0}.pdf'.format(dot_file)]) except OSError: - logging.error("Couldn't run 'ps2pdf' to create pdf graph of profile statistics. Check that ps2pdf converter is installed.") - logging.info("Once you've got it, try:\n pd2pdf {0}.ps2 {0}.pdf".format(dot_file)) + logging.error("Couldn't run 'ps2pdf' to create pdf graph of profile statistics. Check that ps2pdf converter " + "is installed.") + logging.info("Once you've got it, try:\n pd2pdf {0}.ps2 {0}.pdf".format(dot_file)) else: logging.info("Graph of profile statistics saved to: \n {0}.pdf".format(dot_file)) - diff --git a/rmgpy/rmg/mainTest.py b/rmgpy/rmg/mainTest.py index c383227a86..8bfad9cce8 100644 --- a/rmgpy/rmg/mainTest.py +++ b/rmgpy/rmg/mainTest.py @@ -29,18 +29,23 @@ ############################################################################### import os +import shutil import unittest -import shutil + from nose.plugins.attrib import attr -from main import RMG -from main import RMG_Memory + +from rmgpy.rmg.main import RMG +from rmgpy.rmg.main import RMG_Memory +from rmgpy import getPath from rmgpy import settings from rmgpy.data.rmg import RMGDatabase -from rmgpy import getPath from rmgpy.rmg.model import CoreEdgeReactionModel + ################################################### originalPath = getPath() + + @attr('functional') class TestMain(unittest.TestCase): @@ -89,23 +94,23 @@ def testRMGIncreasesReactions(self): def testRMGSeedMechanismCreation(self): """Test that the expected seed mechanisms are created in output directory.""" - seedDir = os.path.join(self.testDir, self.outputDir, 'seed') + seed_dir = os.path.join(self.testDir, self.outputDir, 'seed') self.assertTrue(os.path.exists) - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed'))) # kinetics library folder made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed'))) # kinetics library folder made - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed', 'dictionary.txt'))) # dictionary file made - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed', 'reactions.py'))) # reactions file made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed', 'dictionary.txt'))) # dictionary file made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed', 'reactions.py'))) # reactions file made def testRMGSeedEdgeMechanismCreation(self): """Test that the expected seed mechanisms are created in output directory.""" - seedDir = os.path.join(self.testDir, self.outputDir, 'seed') + seed_dir = os.path.join(self.testDir, self.outputDir, 'seed') self.assertTrue(os.path.exists) - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed_edge'))) # kinetics library folder made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed_edge'))) # kinetics library folder made - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed_edge', 'dictionary.txt'))) # dictionary file made - self.assertTrue(os.path.exists(os.path.join(seedDir, 'seed_edge', 'reactions.py'))) # reactions file made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed_edge', 'dictionary.txt'))) # dictionary file made + self.assertTrue(os.path.exists(os.path.join(seed_dir, 'seed_edge', 'reactions.py'))) # reactions file made def testRMGSeedLibraryCreation(self): """Test that seed mechanisms are created in the correct database locations.""" @@ -130,12 +135,12 @@ def testRMGSeedWorks(self): kineticsDepositories=[], depository=False ) - + self.rmg.reactionModel = CoreEdgeReactionModel() self.rmg.reactionModel.addReactionLibraryToEdge('testSeed') # try adding seed as library self.assertTrue(len(self.rmg.reactionModel.edge.species) > 0) self.assertTrue(len(self.rmg.reactionModel.edge.reactions) > 0) - + self.rmg.reactionModel = CoreEdgeReactionModel() self.rmg.reactionModel.addSeedMechanismToCore('testSeed') # try adding seed as seed mech self.assertTrue(len(self.rmg.reactionModel.core.species) > 0) @@ -150,25 +155,25 @@ def testRMGSeedWorks(self): self.rmg.reactionModel.addSeedMechanismToCore('testSeed_edge') # try adding seed as seed mech self.assertTrue(len(self.rmg.reactionModel.core.species) > 0) self.assertTrue(len(self.rmg.reactionModel.core.reactions) > 0) - + def testRMGMemory(self): """ test that RMG Memory objects function properly """ for rxnsys in self.rmg.reactionSystems: - Rmem = RMG_Memory(rxnsys,None) + Rmem = RMG_Memory(rxnsys, None) Rmem.generate_cond() Rmem.get_cond() - Rmem.add_t_conv_N(1.0,.2,2) + Rmem.add_t_conv_N(1.0, .2, 2) Rmem.generate_cond() Rmem.get_cond() - + def testMakeCanteraInputFile(self): """ This tests to ensure that a usable Cantera input file is created. """ import cantera as ct - + outName = os.path.join(self.rmg.outputDirectory, 'cantera') files = os.listdir(outName) for f in files: @@ -180,9 +185,9 @@ def testMakeCanteraInputFile(self): class TestCanteraOutput(unittest.TestCase): - + def setUp(self): - self.chemkin_files={"""ELEMENTS + self.chemkin_files = {"""ELEMENTS H D /2.014/ T /3.016/ @@ -318,29 +323,28 @@ def testChemkinToCanteraConversion(self): """ Tests that good and bad chemkin files raise proper exceptions """ - + from cantera.ck2cti import InputParseError - + for ck_input, works in self.chemkin_files.items(): os.chdir(originalPath) os.mkdir(self.dir_name) os.chdir(self.dir_name) - - f = open('chem001.inp','w') + + f = open('chem001.inp', 'w') f.write(ck_input) f.close() - - f = open('tran.dat','w') + + f = open('tran.dat', 'w') f.write(self.tran_dat) f.close() - + if works: - self.rmg.generateCanteraFiles(os.path.join(os.getcwd(),'chem001.inp')) + self.rmg.generateCanteraFiles(os.path.join(os.getcwd(), 'chem001.inp')) else: with self.assertRaises(InputParseError): - self.rmg.generateCanteraFiles(os.path.join(os.getcwd(),'chem001.inp')) - + self.rmg.generateCanteraFiles(os.path.join(os.getcwd(), 'chem001.inp')) + # clean up os.chdir(originalPath) shutil.rmtree(self.dir_name) - diff --git a/rmgpy/rmg/model.py b/rmgpy/rmg/model.py index 4bd63f16a9..563dc90680 100644 --- a/rmgpy/rmg/model.py +++ b/rmgpy/rmg/model.py @@ -31,13 +31,14 @@ """ Contains classes for working with the reaction model generated by RMG. """ +from __future__ import print_function import gc import itertools import logging import os -import numpy +import numpy as np import rmgpy.data.rmg from rmgpy import settings @@ -68,12 +69,12 @@ class ReactionModel: def __init__(self, species=None, reactions=None): self.species = species or [] self.reactions = reactions or [] - + def __reduce__(self): """ A helper function used when pickling an object. """ - return (ReactionModel, (self.species, self.reactions)) + return ReactionModel, (self.species, self.reactions) def merge(self, other): """ @@ -84,67 +85,70 @@ def merge(self, other): raise ValueError('Expected type ReactionModel for other parameter, got {0}'.format(other.__class__)) # Initialize the merged model - finalModel = ReactionModel() - + final_model = ReactionModel() + # Put the current model into the merged model as-is - finalModel.species.extend(self.species) - finalModel.reactions.extend(self.reactions) - + final_model.species.extend(self.species) + final_model.reactions.extend(self.reactions) + # Determine which species in other are already in self - commonSpecies = {}; uniqueSpecies = [] + common_species = {} + unique_species = [] for spec in other.species: - for spec0 in finalModel.species: + for spec0 in final_model.species: if spec.isIsomorphic(spec0): - commonSpecies[spec] = spec0 - if spec0.label not in ['Ar','N2','Ne','He']: + common_species[spec] = spec0 + if spec0.label not in ['Ar', 'N2', 'Ne', 'He']: if not spec0.thermo.isIdenticalTo(spec.thermo): - print 'Species {0} thermo from model 1 did not match that of model 2.'.format(spec.label) - + print('Species {0} thermo from model 1 did not match that of model 2.'.format(spec.label)) + break else: - uniqueSpecies.append(spec) - + unique_species.append(spec) + # Determine which reactions in other are already in self - commonReactions = {}; uniqueReactions = [] + common_reactions = {} + unique_reactions = [] for rxn in other.reactions: - for rxn0 in finalModel.reactions: + for rxn0 in final_model.reactions: if rxn.isIsomorphic(rxn0, eitherDirection=True): - commonReactions[rxn] = rxn0 + common_reactions[rxn] = rxn0 if not rxn0.kinetics.isIdenticalTo(rxn.kinetics): - print 'Reaction {0} kinetics from model 1 did not match that of model 2.'.format(str(rxn0)) + print('Reaction {0} kinetics from model 1 did not match that of model 2.'.format(str(rxn0))) break else: - uniqueReactions.append(rxn) - + unique_reactions.append(rxn) + # Add the unique species from other to the final model - finalModel.species.extend(uniqueSpecies) + final_model.species.extend(unique_species) # Make sure unique reactions only refer to species in the final model - for rxn in uniqueReactions: + for rxn in unique_reactions: for i, reactant in enumerate(rxn.reactants): try: - rxn.reactants[i] = commonSpecies[reactant] + rxn.reactants[i] = common_species[reactant] if rxn.pairs: for j, pair in enumerate(rxn.pairs): if reactant in pair: - rxn.pairs[j] = (rxn.reactants[i],pair[1]) + rxn.pairs[j] = (rxn.reactants[i], pair[1]) except KeyError: pass for i, product in enumerate(rxn.products): try: - rxn.products[i] = commonSpecies[product] + rxn.products[i] = common_species[product] if rxn.pairs: for j, pair in enumerate(rxn.pairs): if product in pair: rxn.pairs[j] = (pair[0], rxn.products[i]) except KeyError: pass - + # Add the unique reactions from other to the final model - finalModel.reactions.extend(uniqueReactions) - + final_model.reactions.extend(unique_reactions) + # Return the merged model - return finalModel + return final_model + ################################################################################ @@ -184,7 +188,7 @@ def __init__(self, core=None, edge=None, surface=None): self.surface = ReactionModel() else: self.surface = surface - + # The default tolerances mimic the original RMG behavior; no edge # pruning takes place, and the simulation is interrupted as soon as # a species flux higher than the validity @@ -207,10 +211,10 @@ def __init__(self, core=None, edge=None, surface=None): self.indexSpeciesDict = {} self.saveEdgeSpecies = False self.iterationNum = 0 - self.toleranceThermoKeepSpeciesInEdge = numpy.inf - self.Gfmax = numpy.inf - self.Gmax = numpy.inf - self.Gmin = -numpy.inf + self.toleranceThermoKeepSpeciesInEdge = np.inf + self.Gfmax = np.inf + self.Gmax = np.inf + self.Gmin = -np.inf self.minCoreSizeForPrune = 50 self.maximumEdgeSpecies = 100000 self.Tmax = 0 @@ -269,7 +273,7 @@ def makeNewSpecies(self, object, label='', reactive=True, checkForExisting=True, reactive = object.reactive else: molecule = object - + molecule.clearLabeledAtoms() # If desired, check to ensure that the species is new; return the @@ -281,20 +285,20 @@ def makeNewSpecies(self, object, label='', reactive=True, checkForExisting=True, # If we're here then we're ready to make the new species if reactive: - self.speciesCounter += 1 # count only reactive species - speciesIndex = self.speciesCounter + self.speciesCounter += 1 # count only reactive species + species_index = self.speciesCounter else: - speciesIndex = -1 + species_index = -1 try: - spec = Species(index=speciesIndex, label=label, molecule=[molecule], reactive=reactive, - thermo=object.thermo, transportData=object.transportData) + spec = Species(index=species_index, label=label, molecule=[molecule], reactive=reactive, + thermo=object.thermo, transportData=object.transportData) except AttributeError: - spec = Species(index=speciesIndex, label=label, molecule=[molecule], reactive=reactive) - + spec = Species(index=species_index, label=label, molecule=[molecule], reactive=reactive) + spec.creationIteration = self.iterationNum spec.generate_resonance_structures() - spec.molecularWeight = Quantity(spec.molecule[0].getMolecularWeight()*1000.,"amu") - + spec.molecularWeight = Quantity(spec.molecule[0].getMolecularWeight() * 1000., "amu") + if generateThermo: self.generateThermo(spec) @@ -310,7 +314,6 @@ def makeNewSpecies(self, object, label='', reactive=True, checkForExisting=True, else: self.speciesDict[formula] = [spec] - # Since the species is new, add it to the list of new species self.newSpeciesList.append(spec) @@ -348,8 +351,8 @@ def checkForExistingReaction(self, rxn): if rxn.reactants == rxn.products: logging.debug("Symmetrical reaction found. Returning no reaction") return True, None - - familyObj = getFamilyLibraryObject(rxn.family) + + family_obj = getFamilyLibraryObject(rxn.family) shortlist = self.searchRetrieveReactions(rxn) # Now use short-list to check for matches. All should be in same forward direction. @@ -361,12 +364,12 @@ def checkForExistingReaction(self, rxn): rxn_id0 = generateReactionId(rxn0) if rxn_id == rxn_id0 and areIdenticalSpeciesReferences(rxn, rxn0): - if isinstance(familyObj, KineticsLibrary) or isinstance(familyObj, KineticsFamily): + if isinstance(family_obj, KineticsLibrary) or isinstance(family_obj, KineticsFamily): if not rxn.duplicate: return True, rxn0 else: return True, rxn0 - elif (isinstance(familyObj, KineticsFamily) + elif (isinstance(family_obj, KineticsFamily) and rxn_id == rxn_id0[::-1] and areIdenticalSpeciesReferences(rxn, rxn0)): if not rxn.duplicate: @@ -379,22 +382,22 @@ def checkForExistingReaction(self, rxn): _, r1_rev, r2_rev = generateReactionKey(rxn, useProducts=True) for library in self.reactionDict: - libObj = getFamilyLibraryObject(library) - if isinstance(libObj, KineticsLibrary) and library != rxn.family: + lib_obj = getFamilyLibraryObject(library) + if isinstance(lib_obj, KineticsLibrary) and library != rxn.family: # First check seed short-list in forward direction shortlist = self.retrieve(library, r1_fwd, r2_fwd) - + for rxn0 in shortlist: rxn_id0 = generateReactionId(rxn0) if (rxn_id == rxn_id0) or (rxn_id == rxn_id0[::-1]): if areIdenticalSpeciesReferences(rxn, rxn0): return True, rxn0 - + # Now get the seed short-list of the reverse reaction shortlist = self.retrieve(library, r1_rev, r2_rev) - + for rxn0 in shortlist: if areIdenticalSpeciesReferences(rxn, rxn0): return True, rxn0 @@ -418,24 +421,25 @@ def makeNewReaction(self, forward, checkExisting=True, generateThermo=True): # Determine the proper species objects for all reactants and products reactants = [self.makeNewSpecies(reactant, generateThermo=generateThermo)[0] for reactant in forward.reactants] - products = [self.makeNewSpecies(product, generateThermo=generateThermo)[0] for product in forward.products ] + products = [self.makeNewSpecies(product, generateThermo=generateThermo)[0] for product in forward.products] if forward.specificCollider is not None: forward.specificCollider = self.makeNewSpecies(forward.specificCollider)[0] if forward.pairs is not None: for pairIndex in range(len(forward.pairs)): - reactantIndex = forward.reactants.index(forward.pairs[pairIndex][0]) - productIndex = forward.products.index(forward.pairs[pairIndex][1]) - forward.pairs[pairIndex] = (reactants[reactantIndex], products[productIndex]) - if hasattr(forward, 'reverse'): + reactant_index = forward.reactants.index(forward.pairs[pairIndex][0]) + product_index = forward.products.index(forward.pairs[pairIndex][1]) + forward.pairs[pairIndex] = (reactants[reactant_index], products[product_index]) + if hasattr(forward, 'reverse'): if forward.reverse: - forward.reverse.pairs[pairIndex] = (products[productIndex], reactants[reactantIndex]) + forward.reverse.pairs[pairIndex] = (products[product_index], reactants[reactant_index]) forward.reactants = reactants - forward.products = products + forward.products = products if checkExisting: found, rxn = self.checkForExistingReaction(forward) - if found: return rxn, False + if found: + return rxn, False # Generate the reaction pairs if not yet defined if forward.pairs is None: @@ -443,7 +447,7 @@ def makeNewReaction(self, forward, checkExisting=True, generateThermo=True): if hasattr(forward, 'reverse'): if forward.reverse: forward.reverse.generatePairs() - + # Note in the log if isinstance(forward, TemplateReaction): logging.debug('Creating new {0} template reaction {1}'.format(forward.family, forward)) @@ -453,7 +457,7 @@ def makeNewReaction(self, forward, checkExisting=True, generateThermo=True): logging.debug('Creating new library reaction {0}'.format(forward)) else: raise Exception("Unrecognized reaction type {0!s}".format(forward.__class__)) - + self.registerReaction(forward) forward.index = self.reactionCounter + 1 @@ -487,7 +491,7 @@ def makeNewPDepReaction(self, forward): # Generate the reaction pairs if not yet defined if forward.pairs is None: forward.generatePairs() - + # Set reaction index and increment the counter forward.index = self.reactionCounter + 1 self.reactionCounter += 1 @@ -509,13 +513,14 @@ def enlarge(self, newObject=None, reactEdge=False, and instead the algorithm proceeds to react the core species together to form edge reactions. """ - - numOldCoreSpecies = len(self.core.species) - numOldCoreReactions = len(self.core.reactions) - numOldEdgeSpecies = len(self.edge.species) - numOldEdgeReactions = len(self.edge.reactions) - reactionsMovedFromEdge = [] - self.newReactionList = []; self.newSpeciesList = [] + + num_old_core_species = len(self.core.species) + num_old_core_reactions = len(self.core.reactions) + num_old_edge_species = len(self.edge.species) + num_old_edge_reactions = len(self.edge.reactions) + reactions_moved_from_edge = [] + self.newReactionList = [] + self.newSpeciesList = [] # Determine number of parallel processes. from rmgpy.rmg.main import determine_procnum_from_RAM @@ -523,34 +528,36 @@ def enlarge(self, newObject=None, reactEdge=False, if reactEdge is False: # We are adding core species - newReactions = [] - pdepNetwork = None - objectWasInEdge = False - + new_reactions = [] + pdep_network = None + object_was_in_edge = False + if isinstance(newObject, Species): - - newSpecies = newObject - objectWasInEdge = newSpecies in self.edge.species - - if not newSpecies.reactive: - logging.info('NOT generating reactions for unreactive species {0}'.format(newSpecies)) + new_species = newObject + + object_was_in_edge = new_species in self.edge.species + + if not new_species.reactive: + logging.info('NOT generating reactions for unreactive species {0}'.format(new_species)) else: - logging.info('Adding species {0} to model core'.format(newSpecies)) - display(newSpecies) # if running in IPython --pylab mode, draws the picture! + logging.info('Adding species {0} to model core'.format(new_species)) + display(new_species) # if running in IPython --pylab mode, draws the picture! # Add new species - reactionsMovedFromEdge = self.addSpeciesToCore(newSpecies) + reactions_moved_from_edge = self.addSpeciesToCore(new_species) elif isinstance(newObject, tuple) and isinstance(newObject[0], PDepNetwork) and self.pressureDependence: - pdepNetwork, newSpecies = newObject - newReactions.extend(pdepNetwork.exploreIsomer(newSpecies)) + pdep_network, new_species = newObject + new_reactions.extend(pdep_network.exploreIsomer(new_species)) - self.processNewReactions(newReactions, newSpecies, pdepNetwork, generateThermo=False) + self.processNewReactions(new_reactions, new_species, pdep_network, generateThermo=False) else: - raise TypeError('Unable to use object {0} to enlarge reaction model; expecting an object of class rmg.model.Species or rmg.model.PDepNetwork, not {1}'.format(newObject, newObject.__class__)) + raise TypeError('Unable to use object {0} to enlarge reaction model; expecting an object of class ' + 'rmg.model.Species or rmg.model.PDepNetwork, not {1}'.format(newObject, + newObject.__class__)) # If there are any core species among the unimolecular product channels # of any existing network, they need to be made included @@ -566,30 +573,30 @@ def enlarge(self, newObject=None, reactEdge=False, for products in network.products: products = products.species if len(products) == 1 and products[0] == species: - newReactions = network.exploreIsomer(species) + new_reactions = network.exploreIsomer(species) - self.processNewReactions(newReactions, species, network, generateThermo=False) + self.processNewReactions(new_reactions, species, network, generateThermo=False) network.updateConfigurations(self) index = 0 break else: index += 1 - - if isinstance(newObject, Species) and objectWasInEdge: + + if isinstance(newObject, Species) and object_was_in_edge: # moved one species from edge to core - numOldEdgeSpecies -= 1 + num_old_edge_species -= 1 # moved these reactions from edge to core - numOldEdgeReactions -= len(reactionsMovedFromEdge) + num_old_edge_reactions -= len(reactions_moved_from_edge) else: # Generate reactions between all core species which have not been # reacted yet and exceed the reaction filter thresholds - rxnLists, spcsTuples = react_all(self.core.species, numOldCoreSpecies, + rxn_lists, spcs_tuples = react_all(self.core.species, num_old_core_species, unimolecularReact, bimolecularReact, trimolecularReact=trimolecularReact, procnum=procnum) - for rxnList, spcTuple in zip(rxnLists, spcsTuples): + for rxnList, spcTuple in zip(rxn_lists, spcs_tuples): if rxnList: # Identify a core species which was used to generate the reaction # This is only used to determine the reaction direction for processing @@ -605,7 +612,7 @@ def enlarge(self, newObject=None, reactEdge=False, self.applyThermoToSpecies(procnum) # Do thermodynamic filtering - if not numpy.isinf(self.toleranceThermoKeepSpeciesInEdge) and self.newSpeciesList != []: + if not np.isinf(self.toleranceThermoKeepSpeciesInEdge) and self.newSpeciesList != []: self.thermoFilterSpecies(self.newSpeciesList) # Generate kinetics of new reactions @@ -616,7 +623,7 @@ def enlarge(self, newObject=None, reactEdge=False, # assume the kinetics are satisfactory if reaction.kinetics is None: self.applyKineticsToReaction(reaction) - + # For new reactions, convert ArrheniusEP to Arrhenius, and fix barrier heights. # self.newReactionList only contains *actually* new reactions, all in the forward direction. for reaction in self.newReactionList: @@ -624,46 +631,47 @@ def enlarge(self, newObject=None, reactEdge=False, if isinstance(reaction.kinetics, KineticsData): reaction.kinetics = reaction.kinetics.toArrhenius() # correct barrier heights of estimated kinetics - if isinstance(reaction,TemplateReaction) or isinstance(reaction,DepositoryReaction): # i.e. not LibraryReaction - reaction.fixBarrierHeight() # also converts ArrheniusEP to Arrhenius. - + if isinstance(reaction, TemplateReaction) or isinstance(reaction, + DepositoryReaction): # i.e. not LibraryReaction + reaction.fixBarrierHeight() # also converts ArrheniusEP to Arrhenius. + if self.pressureDependence and reaction.isUnimolecular(): # If this is going to be run through pressure dependence code, # we need to make sure the barrier is positive. reaction.fixBarrierHeight(forcePositive=True) - + # Update unimolecular (pressure dependent) reaction networks if self.pressureDependence: # Recalculate k(T,P) values for modified networks self.updateUnimolecularReactionNetworks() logging.info('') - + # Check new core and edge reactions for Chemkin duplicates # The same duplicate reaction gets brought into the core # at the same time, so there is no danger in checking all of the edge. - newCoreReactions = self.core.reactions[numOldCoreReactions:] - newEdgeReactions = self.edge.reactions[numOldEdgeReactions:] - checkedReactions = self.core.reactions[:numOldCoreReactions] + self.edge.reactions[:numOldEdgeReactions] + new_core_reactions = self.core.reactions[num_old_core_reactions:] + new_edge_reactions = self.edge.reactions[num_old_edge_reactions:] + checked_reactions = self.core.reactions[:num_old_core_reactions] + self.edge.reactions[:num_old_edge_reactions] from rmgpy.chemkin import markDuplicateReaction - for rxn in newCoreReactions: - markDuplicateReaction(rxn, checkedReactions) - checkedReactions.append(rxn) + for rxn in new_core_reactions: + markDuplicateReaction(rxn, checked_reactions) + checked_reactions.append(rxn) if self.saveEdgeSpecies: - for rxn in newEdgeReactions: - markDuplicateReaction(rxn, checkedReactions) - checkedReactions.append(rxn) + for rxn in new_edge_reactions: + markDuplicateReaction(rxn, checked_reactions) + checked_reactions.append(rxn) self.printEnlargeSummary( - newCoreSpecies=self.core.species[numOldCoreSpecies:], - newCoreReactions=self.core.reactions[numOldCoreReactions:], - reactionsMovedFromEdge=reactionsMovedFromEdge, - newEdgeSpecies=self.edge.species[numOldEdgeSpecies:], - newEdgeReactions=self.edge.reactions[numOldEdgeReactions:], + newCoreSpecies=self.core.species[num_old_core_species:], + newCoreReactions=self.core.reactions[num_old_core_reactions:], + reactionsMovedFromEdge=reactions_moved_from_edge, + newEdgeSpecies=self.edge.species[num_old_edge_species:], + newEdgeReactions=self.edge.reactions[num_old_edge_reactions:], reactEdge=reactEdge, ) logging.info('') - - def addNewSurfaceObjects(self,obj,newSurfaceSpecies,newSurfaceReactions,reactionSystem): + + def addNewSurfaceObjects(self, obj, newSurfaceSpecies, newSurfaceReactions, reactionSystem): """ obj is the list of objects for enlargement coming from simulate newSurfaceSpecies and newSurfaceReactions are the current lists of surface species and surface reactions @@ -673,29 +681,34 @@ def addNewSurfaceObjects(self,obj,newSurfaceSpecies,newSurfaceReactions,reaction moves them to appropriate newSurfaceSpc/RxnsAdd/loss sets returns false if the surface has changed """ - surfSpcs = set(self.surface.species) - surfRxns = set(self.surface.reactions) - + surf_spcs = set(self.surface.species) + surf_rxns = set(self.surface.reactions) + newSurfaceSpecies = set(newSurfaceSpecies) newSurfaceReactions = set(newSurfaceReactions) - - addedRxns = {k for k in obj if isinstance(k,Reaction)} - addedSurfaceRxns = newSurfaceReactions - surfRxns - - addedBulkRxns = addedRxns-addedSurfaceRxns - lostSurfaceRxns = (surfRxns - newSurfaceReactions) | addedBulkRxns - - addedSpcs = {k for k in obj if isinstance(k,Species)} | {k.getMaximumLeakSpecies(reactionSystem.T.value_si, reactionSystem.P.value_si) for k in obj if isinstance(k,PDepNetwork)} - lostSurfaceSpcs = (surfSpcs-newSurfaceSpecies) | addedSpcs - addedSurfaceSpcs = newSurfaceSpecies - surfSpcs - - self.newSurfaceSpcsAdd = self.newSurfaceSpcsAdd | addedSurfaceSpcs - self.newSurfaceRxnsAdd = self.newSurfaceRxnsAdd | addedSurfaceRxns - self.newSurfaceSpcsLoss = self.newSurfaceSpcsLoss | lostSurfaceSpcs - self.newSurfaceRxnsLoss = self.newSurfaceRxnsLoss | lostSurfaceRxns - - return not (self.newSurfaceRxnsAdd != set() or self.newSurfaceRxnsLoss != set() or self.newSurfaceSpcsLoss != set() or self.newSurfaceSpcsAdd != set()) - + + added_rxns = {k for k in obj if isinstance(k, Reaction)} + added_surface_rxns = newSurfaceReactions - surf_rxns + + added_bulk_rxns = added_rxns - added_surface_rxns + lost_surface_rxns = (surf_rxns - newSurfaceReactions) | added_bulk_rxns + + added_spcs = {k for k in obj if isinstance(k, Species)} | { + k.getMaximumLeakSpecies(reactionSystem.T.value_si, reactionSystem.P.value_si) for k in obj if + isinstance(k, PDepNetwork)} + lost_surface_spcs = (surf_spcs - newSurfaceSpecies) | added_spcs + added_surface_spcs = newSurfaceSpecies - surf_spcs + + self.newSurfaceSpcsAdd = self.newSurfaceSpcsAdd | added_surface_spcs + self.newSurfaceRxnsAdd = self.newSurfaceRxnsAdd | added_surface_rxns + self.newSurfaceSpcsLoss = self.newSurfaceSpcsLoss | lost_surface_spcs + self.newSurfaceRxnsLoss = self.newSurfaceRxnsLoss | lost_surface_rxns + + return not (self.newSurfaceRxnsAdd != set() or + self.newSurfaceRxnsLoss != set() or + self.newSurfaceSpcsLoss != set() or + self.newSurfaceSpcsAdd != set()) + def adjustSurface(self): """ Here we add species intended to be added and remove any species that need to be moved out of the core. @@ -704,10 +717,13 @@ def adjustSurface(self): thus the surface algorithm currently (June 2017) is not implemented for pdep networks (however it will function fine for non-pdep reactions on a pdep run) """ - self.surface.species = list(((set(self.surface.species) | self.newSurfaceSpcsAdd)-self.newSurfaceSpcsLoss) & set(self.core.species)) - self.surface.reactions = list(((set(self.surface.reactions) | self.newSurfaceRxnsAdd)-self.newSurfaceRxnsLoss) & set(self.core.reactions)) + self.surface.species = list( + ((set(self.surface.species) | self.newSurfaceSpcsAdd) - self.newSurfaceSpcsLoss) & set(self.core.species)) + self.surface.reactions = list( + ((set(self.surface.reactions) | self.newSurfaceRxnsAdd) - self.newSurfaceRxnsLoss) & set( + self.core.reactions)) self.clearSurfaceAdjustments() - + def clearSurfaceAdjustments(self): """ empties surface tracking varaibles @@ -716,7 +732,7 @@ def clearSurfaceAdjustments(self): self.newSurfaceRxnsAdd = set() self.newSurfaceSpcsLoss = set() self.newSurfaceRxnsLoss = set() - + def processNewReactions(self, newReactions, newSpecies, pdepNetwork=None, generateThermo=True): """ Process a list of newly-generated reactions involving the new core @@ -725,51 +741,51 @@ def processNewReactions(self, newReactions, newSpecies, pdepNetwork=None, genera Makes a reaction and decides where to put it: core, edge, or PDepNetwork. """ for rxn in newReactions: - rxn, isNew = self.makeNewReaction(rxn, generateThermo=generateThermo) + rxn, is_new = self.makeNewReaction(rxn, generateThermo=generateThermo) if rxn is None: # Skip this reaction because there was something wrong with it continue - if isNew: + if is_new: # We've made a new reaction, so make sure the species involved # are in the core or edge - allSpeciesInCore = True + all_species_in_core = True # Add the reactant and product species to the edge if necessary # At the same time, check if all reactants and products are in the core for spec in rxn.reactants: if spec not in self.core.species: - allSpeciesInCore = False + all_species_in_core = False if spec not in self.edge.species: self.addSpeciesToEdge(spec) for spec in rxn.products: if spec not in self.core.species: - allSpeciesInCore = False + all_species_in_core = False if spec not in self.edge.species: self.addSpeciesToEdge(spec) - - isomerAtoms = sum([len(spec.molecule[0].atoms) for spec in rxn.reactants]) - + + isomer_atoms = sum([len(spec.molecule[0].atoms) for spec in rxn.reactants]) + # Decide whether or not to handle the reaction as a pressure-dependent reaction pdep = True if not self.pressureDependence: # The pressure dependence option is turned off entirely pdep = False - elif self.pressureDependence.maximumAtoms is not None and self.pressureDependence.maximumAtoms < isomerAtoms: + elif self.pressureDependence.maximumAtoms is not None and self.pressureDependence.maximumAtoms < isomer_atoms: # The reaction involves so many atoms that pressure-dependent effects are assumed to be negligible pdep = False elif not (rxn.isIsomerization() or rxn.isDissociation() or rxn.isAssociation()): # The reaction is not unimolecular in either direction, so it cannot be pressure-dependent pdep = False - elif isinstance(rxn,LibraryReaction): + elif isinstance(rxn, LibraryReaction): # Try generating the high pressure limit kinetics. If successful, set pdep to ``True``, and vice versa. pdep = rxn.generate_high_p_limit_kinetics() # If pressure dependence is on, we only add reactions that are not unimolecular; # unimolecular reactions will be added after processing the associated networks if not pdep: - if not isNew: + if not is_new: # The reaction is not new, so it should already be in the core or edge continue - if allSpeciesInCore: + if all_species_in_core: self.addReactionToCore(rxn) else: self.addReactionToEdge(rxn) @@ -824,13 +840,13 @@ def applyKineticsToReaction(self, reaction): """ from rmgpy.data.rmg import getDB # Find the reaction kinetics - kinetics, source, entry, isForward = self.generateKinetics(reaction) + kinetics, source, entry, is_forward = self.generateKinetics(reaction) # Flip the reaction direction if the kinetics are defined in the reverse direction - if not isForward: + if not is_forward: family = getDB('kinetics').families[reaction.family] reaction.reactants, reaction.products = reaction.products, reaction.reactants - reaction.pairs = [(p,r) for r,p in reaction.pairs] - if family.ownReverse and hasattr(reaction,'reverse'): + reaction.pairs = [(p, r) for r, p in reaction.pairs] + if family.ownReverse and hasattr(reaction, 'reverse'): if reaction.reverse: reaction.template = reaction.reverse.template # replace degeneracy @@ -845,77 +861,85 @@ def generateKinetics(self, reaction): """ # Only reactions from families should be missing kinetics assert isinstance(reaction, TemplateReaction) - + family = getFamilyLibraryObject(reaction.family) # Get the kinetics for the reaction - kinetics, source, entry, isForward = family.getKinetics(reaction, templateLabels=reaction.template, degeneracy=reaction.degeneracy, estimator=self.kineticsEstimator, returnAllKinetics=False) + kinetics, source, entry, is_forward = family.getKinetics(reaction, templateLabels=reaction.template, + degeneracy=reaction.degeneracy, + estimator=self.kineticsEstimator, + returnAllKinetics=False) # Get the gibbs free energy of reaction at 298 K G298 = reaction.getFreeEnergyOfReaction(298) - gibbsIsPositive = G298 > -1e-8 - - if family.ownReverse and hasattr(reaction,'reverse'): + gibbs_is_positive = G298 > -1e-8 + + if family.ownReverse and hasattr(reaction, 'reverse'): if reaction.reverse: # The kinetics family is its own reverse, so we could estimate kinetics in either direction - + # First get the kinetics for the other direction - rev_kinetics, rev_source, rev_entry, rev_isForward = family.getKinetics(reaction.reverse, templateLabels=reaction.reverse.template, degeneracy=reaction.reverse.degeneracy, estimator=self.kineticsEstimator, returnAllKinetics=False) + rev_kinetics, rev_source, rev_entry, rev_is_forward = family.getKinetics(reaction.reverse, + templateLabels=reaction.reverse.template, + degeneracy=reaction.reverse.degeneracy, + estimator=self.kineticsEstimator, + returnAllKinetics=False) # Now decide which direction's kinetics to keep - keepReverse = False - if (entry is not None and rev_entry is None): + keep_reverse = False + if entry is not None and rev_entry is None: # Only the forward has an entry, meaning an exact match in a depository or template # the reverse must have used an averaged estimated node - so use forward. reason = "This direction matched an entry in {0}, the other was just an estimate.".format(reaction.family) - elif (entry is None and rev_entry is not None): + elif entry is None and rev_entry is not None: # Only the reverse has an entry (see above) - use reverse. - keepReverse = True + keep_reverse = True reason = "This direction matched an entry in {0}, the other was just an estimate.".format(reaction.family) - elif (entry is not None and rev_entry is not None - and entry is rev_entry): + elif entry is not None and rev_entry is not None and entry is rev_entry: # Both forward and reverse have the same source and entry # Use the one for which the kinetics is the forward kinetics - keepReverse = gibbsIsPositive and isForward and rev_isForward + keep_reverse = gibbs_is_positive and is_forward and rev_is_forward reason = "Both directions matched the same entry in {0}, but this direction is exergonic.".format(reaction.family) - elif self.kineticsEstimator == 'group additivity' and (kinetics.comment.find("Fitted to 1 rate")>0 - and not rev_kinetics.comment.find("Fitted to 1 rate")>0) : - # forward kinetics were fitted to only 1 rate, but reverse are hopefully better - keepReverse = True - reason = "Other direction matched a group only fitted to 1 rate." - elif self.kineticsEstimator == 'group additivity' and (not kinetics.comment.find("Fitted to 1 rate")>0 - and rev_kinetics.comment.find("Fitted to 1 rate")>0) : - # reverse kinetics were fitted to only 1 rate, but forward are hopefully better - keepReverse = False - reason = "Other direction matched a group only fitted to 1 rate." + elif self.kineticsEstimator == 'group additivity' and (kinetics.comment.find("Fitted to 1 rate") > 0 + and not rev_kinetics.comment.find("Fitted to 1 rate") > 0): + # forward kinetics were fitted to only 1 rate, but reverse are hopefully better + keep_reverse = True + reason = "Other direction matched a group only fitted to 1 rate." + elif self.kineticsEstimator == 'group additivity' and (not kinetics.comment.find("Fitted to 1 rate") > 0 + and rev_kinetics.comment.find("Fitted to 1 rate") > 0): + # reverse kinetics were fitted to only 1 rate, but forward are hopefully better + keep_reverse = False + reason = "Other direction matched a group only fitted to 1 rate." elif entry is not None and rev_entry is not None: # Both directions matched explicit rate rules # Keep the direction with the lower (but nonzero) rank if entry.rank < rev_entry.rank and entry.rank != 0: - keepReverse = False - reason = "Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).".format(entry.rank, rev_entry.rank) + keep_reverse = False + reason = "Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).".format( + entry.rank, rev_entry.rank) elif rev_entry.rank < entry.rank and rev_entry.rank != 0: - keepReverse = True - reason = "Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).".format(rev_entry.rank, entry.rank) + keep_reverse = True + reason = "Both directions matched explicit rate rules, but this direction has a rule with a lower rank ({0} vs {1}).".format( + rev_entry.rank, entry.rank) # Otherwise keep the direction that is exergonic at 298 K else: - keepReverse = gibbsIsPositive and isForward and rev_isForward + keep_reverse = gibbs_is_positive and is_forward and rev_is_forward reason = "Both directions matched explicit rate rules, but this direction is exergonic." else: # Keep the direction that is exergonic at 298 K # This must be done after the thermo generation step - keepReverse = gibbsIsPositive and isForward and rev_isForward + keep_reverse = gibbs_is_positive and is_forward and rev_is_forward reason = "Both directions are estimates, but this direction is exergonic." - if keepReverse: + if keep_reverse: kinetics = rev_kinetics source = rev_source entry = rev_entry - isForward = not rev_isForward + is_forward = not rev_is_forward G298 = -G298 - + if self.verboseComments: kinetics.comment += "\nKinetics were estimated in this direction instead of the reverse because:\n{0}".format(reason) - kinetics.comment += "\ndGrxn(298 K) = {0:.2f} kJ/mol".format( G298 / 1000.) - + kinetics.comment += "\ndGrxn(298 K) = {0:.2f} kJ/mol".format(G298 / 1000.) + # The comments generated by the database for estimated kinetics can # be quite long, and therefore not very useful # We don't want to waste lots of memory storing these long, @@ -930,11 +954,12 @@ def generateKinetics(self, reaction): pass else: # Estimated (averaged) rate rule - kinetics.comment = kinetics.comment[kinetics.comment.find('Estimated'):] - - return kinetics, source, entry, isForward - - def printEnlargeSummary(self, newCoreSpecies, newCoreReactions, newEdgeSpecies, newEdgeReactions, reactionsMovedFromEdge=None, reactEdge=False): + kinetics.comment = kinetics.comment[kinetics.comment.find('Estimated'):] + + return kinetics, source, entry, is_forward + + def printEnlargeSummary(self, newCoreSpecies, newCoreReactions, newEdgeSpecies, newEdgeReactions, + reactionsMovedFromEdge=None, reactEdge=False): """ Output a summary of a model enlargement step to the log. The details of the enlargement are passed in the `newCoreSpecies`, `newCoreReactions`, @@ -957,31 +982,33 @@ def printEnlargeSummary(self, newCoreSpecies, newCoreReactions, newEdgeSpecies, for spec in newEdgeSpecies: display(spec) logging.info(' {0}'.format(spec)) - + if reactionsMovedFromEdge: logging.info('Moved {0:d} reactions from edge to core'.format(len(reactionsMovedFromEdge))) for rxn in reactionsMovedFromEdge: for r in newCoreReactions: if ((r.reactants == rxn.reactants and r.products == rxn.products) or - (r.products == rxn.reactants and r.reactants == rxn.products)): + (r.products == rxn.reactants and r.reactants == rxn.products)): logging.info(' {0}'.format(r)) newCoreReactions.remove(r) - break + break logging.info('Added {0:d} new core reactions'.format(len(newCoreReactions))) for rxn in newCoreReactions: logging.info(' {0}'.format(rxn)) - + logging.info('Created {0:d} new edge reactions'.format(len(newEdgeReactions))) for rxn in newEdgeReactions: logging.info(' {0}'.format(rxn)) - coreSpeciesCount, coreReactionCount, edgeSpeciesCount, edgeReactionCount = self.getModelSize() + core_species_count, core_reaction_count, edge_species_count, edge_reaction_count = self.getModelSize() # Output current model size information after enlargement logging.info('') logging.info('After model enlargement:') - logging.info(' The model core has {0:d} species and {1:d} reactions'.format(coreSpeciesCount, coreReactionCount)) - logging.info(' The model edge has {0:d} species and {1:d} reactions'.format(edgeSpeciesCount, edgeReactionCount)) + logging.info(' The model core has {0:d} species and {1:d} reactions'.format(core_species_count, + core_reaction_count)) + logging.info(' The model edge has {0:d} species and {1:d} reactions'.format(edge_species_count, + edge_reaction_count)) logging.info('') def addSpeciesToCore(self, spec): @@ -995,23 +1022,22 @@ def addSpeciesToCore(self, spec): assert spec not in self.core.species, "Tried to add species {0} to core, but it's already there".format(spec.label) forbidden_structures = getDB('forbidden') - + # check RMG globally forbidden structures if not spec.explicitlyAllowed and forbidden_structures.isMoleculeForbidden(spec.molecule[0]): - - rxnList = [] - if spec in self.edge.species: - #remove forbidden species from edge + rxn_list = [] + if spec in self.edge.species: + # remove forbidden species from edge logging.info("Species {0} was Forbidden and not added to Core...Removing from Edge.".format(spec)) - self.removeSpeciesFromEdge(self.reactionSystems,spec) + self.removeSpeciesFromEdge(self.reactionSystems, spec) return [] - + # Add the species to the core self.core.species.append(spec) - - rxnList = [] + + rxn_list = [] if spec in self.edge.species: # If species was in edge, remove it @@ -1021,26 +1047,30 @@ def addSpeciesToCore(self, spec): # Search edge for reactions that now contain only core species; # these belong in the model core and will be moved there for rxn in self.edge.reactions: - allCore = True + all_core = True for reactant in rxn.reactants: - if reactant not in self.core.species: allCore = False + if reactant not in self.core.species: + all_core = False for product in rxn.products: - if product not in self.core.species: allCore = False - if allCore: rxnList.append(rxn) + if product not in self.core.species: + all_core = False + if all_core: + rxn_list.append(rxn) # Move any identified reactions to the core - for rxn in rxnList: + for rxn in rxn_list: self.addReactionToCore(rxn) logging.debug("Moving reaction from edge to core: {0}".format(rxn)) - return rxnList + return rxn_list def addSpeciesToEdge(self, spec): """ Add a species `spec` to the reaction model edge. """ self.edge.species.append(spec) - - def setThermodynamicFilteringParameters(self,Tmax, toleranceThermoKeepSpeciesInEdge,minCoreSizeForPrune,maximumEdgeSpecies,reactionSystems): + + def setThermodynamicFilteringParameters(self, Tmax, toleranceThermoKeepSpeciesInEdge, minCoreSizeForPrune, + maximumEdgeSpecies, reactionSystems): """ sets parameters for thermodynamic filtering based on the current core Tmax is the maximum reactor temperature in K @@ -1053,13 +1083,13 @@ def setThermodynamicFilteringParameters(self,Tmax, toleranceThermoKeepSpeciesInE Gs = [spc.thermo.getFreeEnergy(Tmax) for spc in self.core.species] self.Gmax = max(Gs) self.Gmin = min(Gs) - - self.Gfmax = toleranceThermoKeepSpeciesInEdge*(self.Gmax-self.Gmin)+self.Gmax + + self.Gfmax = toleranceThermoKeepSpeciesInEdge * (self.Gmax - self.Gmin) + self.Gmax self.toleranceThermoKeepSpeciesInEdge = toleranceThermoKeepSpeciesInEdge self.minCoreSizeForPrune = minCoreSizeForPrune self.reactionSystems = reactionSystems self.maximumEdgeSpecies = maximumEdgeSpecies - + def thermoFilterSpecies(self, spcs): """ checks Gibbs energy of the species in species against the @@ -1069,15 +1099,17 @@ def thermoFilterSpecies(self, spcs): for spc in spcs: G = spc.thermo.getFreeEnergy(Tmax) if G > self.Gfmax: - Gn = (G-self.Gmax)/(self.Gmax-self.Gmin) - logging.info('Removing species {0} with Gibbs energy {1} from edge because it\'s Gibbs number {2} is greater than the toleranceThermoKeepSpeciesInEdge of {3} '.format(spc,G,Gn,self.toleranceThermoKeepSpeciesInEdge)) - self.removeSpeciesFromEdge(self.reactionSystems,spc) - + Gn = (G - self.Gmax) / (self.Gmax - self.Gmin) + logging.info('Removing species {0} with Gibbs energy {1} from edge because it\'s Gibbs number {2} is ' + 'greater than the toleranceThermoKeepSpeciesInEdge of ' + '{3} '.format(spc, G, Gn, self.toleranceThermoKeepSpeciesInEdge)) + self.removeSpeciesFromEdge(self.reactionSystems, spc) + # Delete any networks that became empty as a result of pruning if self.pressureDependence: self.removeEmptyPdepNetworks() - - def thermoFilterDown(self,maximumEdgeSpecies,minSpeciesExistIterationsForPrune=0): + + def thermoFilterDown(self, maximumEdgeSpecies, minSpeciesExistIterationsForPrune=0): """ removes species from the edge based on their Gibbs energy until maximumEdgeSpecies is reached under the constraint that all removed species are older than @@ -1087,144 +1119,150 @@ def thermoFilterDown(self,maximumEdgeSpecies,minSpeciesExistIterationsForPrune=0 before it is eligible for thermo filtering """ Tmax = self.Tmax - numToRemove = len(self.edge.species) - maximumEdgeSpecies - logging.debug('Planning to remove {0} species'.format(numToRemove)) + num_to_remove = len(self.edge.species) - maximumEdgeSpecies + logging.debug('Planning to remove {0} species'.format(num_to_remove)) iteration = self.iterationNum - - if numToRemove > 0: #implies flux pruning is off or did not trigger + + if num_to_remove > 0: # implies flux pruning is off or did not trigger logging.info('Reached maximum number of edge species') logging.info('Attempting to remove excess edge species with Thermodynamic filtering') spcs = self.edge.species - Gfs = numpy.array([spc.thermo.getFreeEnergy(Tmax) for spc in spcs]) - Gns = (Gfs-self.Gmax)/(self.Gmax-self.Gmin) - inds = numpy.argsort(Gns) #could actually do this with the Gfs, but want to print the Gn value later - inds = inds[::-1] #get in order of increasing Gf + Gfs = np.array([spc.thermo.getFreeEnergy(Tmax) for spc in spcs]) + Gns = (Gfs - self.Gmax) / (self.Gmax - self.Gmin) + inds = np.argsort(Gns) # could actually do this with the Gfs, but want to print the Gn value later + inds = inds[::-1] # get in order of increasing Gf ind = 0 - removeSpcs = [] - + remove_spcs = [] + rInds = [] - while ind < len(inds) and numToRemove > 0: #find the species we can remove and collect indices for removal + while ind < len( + inds) and num_to_remove > 0: # find the species we can remove and collect indices for removal i = inds[ind] spc = spcs[i] if iteration - spc.creationIteration >= minSpeciesExistIterationsForPrune: - removeSpcs.append(spc) + remove_spcs.append(spc) rInds.append(i) - numToRemove -= 1 + num_to_remove -= 1 ind += 1 - - logging.debug('found {0} eligible species for filtering'.format(len(removeSpcs))) - - for i,spc in enumerate(removeSpcs): - logging.info('Removing species {0} from edge to meet maximum number of edge species, Gibbs number is {1}'.format(spc,Gns[rInds[i]])) - self.removeSpeciesFromEdge(self.reactionSystems,spc) - + + logging.debug('found {0} eligible species for filtering'.format(len(remove_spcs))) + + for i, spc in enumerate(remove_spcs): + logging.info('Removing species {0} from edge to meet maximum number of edge species, Gibbs ' + 'number is {1}'.format(spc, Gns[rInds[i]])) + self.removeSpeciesFromEdge(self.reactionSystems, spc) + # Delete any networks that became empty as a result of pruning if self.pressureDependence: self.removeEmptyPdepNetworks() - - #call garbage collection + + # call garbage collection collected = gc.collect() logging.info('Garbage collector: collected %d objects.' % (collected)) - + def removeEmptyPdepNetworks(self): """ searches for and deletes any empty pdep networks """ - networksToDelete = [] + networks_to_delete = [] for network in self.networkList: if len(network.pathReactions) == 0 and len(network.netReactions) == 0: - networksToDelete.append(network) - - if len(networksToDelete) > 0: - logging.info('Deleting {0:d} empty pressure-dependent reaction networks'.format(len(networksToDelete))) - for network in networksToDelete: + networks_to_delete.append(network) + + if len(networks_to_delete) > 0: + logging.info('Deleting {0:d} empty pressure-dependent reaction networks'.format(len(networks_to_delete))) + for network in networks_to_delete: logging.debug(' Deleting empty pressure dependent reaction network #{0:d}'.format(network.index)) source = tuple(network.source) nets_with_this_source = self.networkDict[source] nets_with_this_source.remove(network) if not nets_with_this_source: - del(self.networkDict[source]) + del (self.networkDict[source]) self.networkList.remove(network) - - def prune(self, reactionSystems, toleranceKeepInEdge, toleranceMoveToCore, maximumEdgeSpecies, minSpeciesExistIterationsForPrune): + + def prune(self, reactionSystems, toleranceKeepInEdge, toleranceMoveToCore, maximumEdgeSpecies, + minSpeciesExistIterationsForPrune): """ Remove species from the model edge based on the simulation results from the list of `reactionSystems`. """ - ineligibleSpecies = [] # A list of the species which are not eligible for pruning, for any reason - prunableSpecies = reactionSystems[0].prunableSpecies - prunableNetworks = reactionSystems[0].prunableNetworks - - numPrunableSpecies = len(prunableSpecies) + ineligible_species = [] # A list of the species which are not eligible for pruning, for any reason + prunable_species = reactionSystems[0].prunableSpecies + prunable_networks = reactionSystems[0].prunableNetworks + + num_prunable_species = len(prunable_species) iteration = self.iterationNum # All edge species that have not existed for more than two enlarge # iterations are ineligible for pruning - for spec in prunableSpecies: + for spec in prunable_species: if iteration - spec.creationIteration <= minSpeciesExistIterationsForPrune: - ineligibleSpecies.append(spec) + ineligible_species.append(spec) # Get the maximum species rates (and network leak rates) # across all reaction systems - maxEdgeSpeciesRateRatios = numpy.zeros((numPrunableSpecies), numpy.float64) + max_edge_species_rate_ratios = np.zeros((num_prunable_species), np.float64) for reactionSystem in reactionSystems: - for i in range(numPrunableSpecies): - rateRatio = reactionSystem.maxEdgeSpeciesRateRatios[i] - if maxEdgeSpeciesRateRatios[i] < rateRatio: - maxEdgeSpeciesRateRatios[i] = rateRatio + for i in range(num_prunable_species): + rate_ratio = reactionSystem.maxEdgeSpeciesRateRatios[i] + if max_edge_species_rate_ratios[i] < rate_ratio: + max_edge_species_rate_ratios[i] = rate_ratio - for i,network in enumerate(prunableNetworks): - rateRatio = reactionSystem.maxNetworkLeakRateRatios[i] + for i, network in enumerate(prunable_networks): + rate_ratio = reactionSystem.maxNetworkLeakRateRatios[i] # Add the fraction of the network leak rate contributed by # each unexplored species to that species' rate # This is to ensure we have an overestimate of that species flux - ratios = network.getLeakBranchingRatios(reactionSystem.T.value_si,reactionSystem.P.value_si) - for spec, frac in ratios.iteritems(): - if spec in prunableSpecies: - index = prunableSpecies.index(spec) - maxEdgeSpeciesRateRatios[index] += frac * rateRatio + ratios = network.getLeakBranchingRatios(reactionSystem.T.value_si, reactionSystem.P.value_si) + for spec, frac in ratios.items(): + if spec in prunable_species: + index = prunable_species.index(spec) + max_edge_species_rate_ratios[index] += frac * rate_ratio # Mark any species that is explored in any partial network as ineligible for pruning for spec in network.explored: - if spec not in ineligibleSpecies: - ineligibleSpecies.append(spec) + if spec not in ineligible_species: + ineligible_species.append(spec) # Sort the edge species rates by index - indices = numpy.argsort(maxEdgeSpeciesRateRatios) + indices = np.argsort(max_edge_species_rate_ratios) # Determine which species to prune - speciesToPrune = [] - pruneDueToRateCounter = 0 + species_to_prune = [] + prune_due_to_rate_counter = 0 for index in indices: - spec = prunableSpecies[index] - if spec in ineligibleSpecies or not spec in self.edge.species: + spec = prunable_species[index] + if spec in ineligible_species or not spec in self.edge.species: continue # Remove the species with rates below the pruning tolerance from the model edge - if maxEdgeSpeciesRateRatios[index] < toleranceKeepInEdge: - speciesToPrune.append((index, spec)) - pruneDueToRateCounter += 1 + if max_edge_species_rate_ratios[index] < toleranceKeepInEdge: + species_to_prune.append((index, spec)) + prune_due_to_rate_counter += 1 # Keep removing species with the lowest rates until we are below the maximum edge species size - elif numPrunableSpecies - len(speciesToPrune) > maximumEdgeSpecies: - if maxEdgeSpeciesRateRatios[index] < toleranceMoveToCore: + elif num_prunable_species - len(species_to_prune) > maximumEdgeSpecies: + if max_edge_species_rate_ratios[index] < toleranceMoveToCore: logging.info('Pruning species {0} to make numEdgeSpecies smaller than maximumEdgeSpecies'.format(spec)) - speciesToPrune.append((index, spec)) + species_to_prune.append((index, spec)) else: - logging.warning('Attempted to prune a species that exceeded toleranceMoveToCore, pruning settings for this run are likely bad, either maximumEdgeSpecies needs to be set higher (~100000) or minSpeciesExistIterationsForPrune should be reduced (~2)') + logging.warning('Attempted to prune a species that exceeded toleranceMoveToCore, pruning settings ' + 'for this run are likely bad, either maximumEdgeSpecies needs to be set higher ' + '(~100000) or minSpeciesExistIterationsForPrune should be reduced (~2)') break else: break # Actually do the pruning - if pruneDueToRateCounter > 0: - logging.info('Pruning {0:d} species whose rate ratios against characteristic rate did not exceed the minimum threshold of {1:g}'.format(pruneDueToRateCounter, toleranceKeepInEdge)) - for index, spec in speciesToPrune[0:pruneDueToRateCounter]: + if prune_due_to_rate_counter > 0: + logging.info('Pruning {0:d} species whose rate ratios against characteristic rate did not exceed the ' + 'minimum threshold of {1:g}'.format(prune_due_to_rate_counter, toleranceKeepInEdge)) + for index, spec in species_to_prune[0:prune_due_to_rate_counter]: logging.info('Pruning species {0:<56}'.format(spec)) - logging.debug(' {0:<56} {1:10.4e}'.format(spec, maxEdgeSpeciesRateRatios[index])) + logging.debug(' {0:<56} {1:10.4e}'.format(spec, max_edge_species_rate_ratios[index])) self.removeSpeciesFromEdge(reactionSystems, spec) - if len(speciesToPrune) - pruneDueToRateCounter > 0: - logging.info('Pruning {0:d} species to obtain an edge size of {1:d} species'.format(len(speciesToPrune) - pruneDueToRateCounter, maximumEdgeSpecies)) - for index, spec in speciesToPrune[pruneDueToRateCounter:]: + if len(species_to_prune) - prune_due_to_rate_counter > 0: + logging.info('Pruning {0:d} species to obtain an edge size of {1:d} species'.format(len(species_to_prune) - prune_due_to_rate_counter, maximumEdgeSpecies)) + for index, spec in species_to_prune[prune_due_to_rate_counter:]: logging.info('Pruning species {0:<56}'.format(spec)) - logging.debug(' {0:<56} {1:10.4e}'.format(spec, maxEdgeSpeciesRateRatios[index])) + logging.debug(' {0:<56} {1:10.4e}'.format(spec, max_edge_species_rate_ratios[index])) self.removeSpeciesFromEdge(reactionSystems, spec) # Delete any networks that became empty as a result of pruning @@ -1233,7 +1271,6 @@ def prune(self, reactionSystems, toleranceKeepInEdge, toleranceMoveToCore, maxim logging.info('') - def removeSpeciesFromEdge(self, reactionSystems, spec): """ Remove species `spec` from the reaction model edge. @@ -1251,42 +1288,42 @@ def removeSpeciesFromEdge(self, reactionSystems, spec): pass # identify any reactions it's involved in - rxnList = [] + rxn_list = [] for rxn in reactionSystem.reactionIndex: if spec in rxn.reactants or spec in rxn.products: - rxnList.append(rxn) + rxn_list.append(rxn) - for rxn in rxnList: + for rxn in rxn_list: reactionSystem.reactionIndex.pop(rxn) # identify any reactions it's involved in - rxnList = [] + rxn_list = [] for rxn in self.edge.reactions: if spec in rxn.reactants or spec in rxn.products: - rxnList.append(rxn) + rxn_list.append(rxn) # remove those reactions - for rxn in rxnList: + for rxn in rxn_list: self.edge.reactions.remove(rxn) - + # Remove the species from any unirxn networks it is in if self.pressureDependence: for network in self.networkList: # Delete all path reactions involving the species - rxnList = [] + rxn_list = [] for rxn in network.pathReactions: if spec in rxn.reactants or spec in rxn.products: - rxnList.append(rxn) - if len(rxnList) > 0: - for rxn in rxnList: + rxn_list.append(rxn) + if len(rxn_list) > 0: + for rxn in rxn_list: network.pathReactions.remove(rxn) # Delete all net reactions involving the species - rxnList = [] + rxn_list = [] for rxn in network.netReactions: if spec in rxn.reactants or spec in rxn.products: - rxnList.append(rxn) - for rxn in rxnList: + rxn_list.append(rxn) + for rxn in rxn_list: network.netReactions.remove(rxn) - + # Recompute the isomers, reactants, and products for this network network.updateConfigurations(self) @@ -1300,11 +1337,11 @@ def removeSpeciesFromEdge(self, reactionSystems, spec): del self.reactionDict[family][reactant1][spec] for reactant1 in self.reactionDict[family]: for reactant2 in self.reactionDict[family][reactant1]: - tempRxnDeleteList = [] + temp_rxn_delete_list = [] for templateReaction in self.reactionDict[family][reactant1][reactant2]: if spec in templateReaction.reactants or spec in templateReaction.products: - tempRxnDeleteList.append(templateReaction) - for tempRxnToBeDeleted in tempRxnDeleteList: + temp_rxn_delete_list.append(templateReaction) + for tempRxnToBeDeleted in temp_rxn_delete_list: self.reactionDict[family][reactant1][reactant2].remove(tempRxnToBeDeleted) # remove from the global list of species, to free memory @@ -1325,7 +1362,7 @@ def addReactionToCore(self, rxn): self.core.reactions.append(rxn) if rxn in self.edge.reactions: self.edge.reactions.remove(rxn) - + def addReactionToEdge(self, rxn): """ Add a reaction `rxn` to the reaction model edge. This function assumes @@ -1342,24 +1379,24 @@ def getModelSize(self): Note that this is not necessarily equal to the lengths of the corresponding species and reaction lists. """ - coreSpeciesCount = len(self.core.species) - coreReactionsCount = len(self.core.reactions) - edgeSpeciesCount = len(self.edge.species) - edgeReactionsCount = len(self.edge.reactions) - return (coreSpeciesCount, coreReactionsCount, edgeSpeciesCount, edgeReactionsCount) + core_species_count = len(self.core.species) + core_reactions_count = len(self.core.reactions) + edge_species_count = len(self.edge.species) + edge_reactions_count = len(self.edge.reactions) + return core_species_count, core_reactions_count, edge_species_count, edge_reactions_count def getLists(self): """ Return lists of all of the species and reactions in the core and the edge. """ - speciesList = [] - speciesList.extend(self.core.species) - speciesList.extend(self.edge.species) - reactionList = [] - reactionList.extend(self.core.reactions) - reactionList.extend(self.edge.reactions) - return speciesList, reactionList + species_list = [] + species_list.extend(self.core.species) + species_list.extend(self.edge.species) + reaction_list = [] + reaction_list.extend(self.core.reactions) + reaction_list.extend(self.edge.reactions) + return species_list, reaction_list def getStoichiometryMatrix(self): """ @@ -1367,16 +1404,18 @@ def getStoichiometryMatrix(self): The id of each species and reaction is the corresponding row and column, respectively, in the matrix. """ - speciesList, reactionList = self.getLists() + species_list, reaction_list = self.getLists() from scipy import sparse stoichiometry = sparse.dok_matrix((self.speciesCounter, self.reactionCounter), float) - for rxn in reactionList: + for rxn in reaction_list: j = rxn.index - 1 - specList = rxn.reactants[:]; specList.extend(rxn.products) - for spec in specList: + spec_list = rxn.reactants[:] + spec_list.extend(rxn.products) + for spec in spec_list: i = spec.index - 1 nu = rxn.getStoichiometricCoefficient(spec) - if nu != 0: stoichiometry[i,j] = nu + if nu != 0: + stoichiometry[i, j] = nu return stoichiometry.tocsr() def addSeedMechanismToCore(self, seedMechanism, react=False): @@ -1388,67 +1427,78 @@ def addSeedMechanismToCore(self, seedMechanism, react=False): so it is not done by default. """ - if react: raise NotImplementedError("react=True doesn't work yet") + if react: + raise NotImplementedError("react=True doesn't work yet") database = rmgpy.data.rmg.database - libraryNames = database.kinetics.libraries.keys() - familyNames = database.kinetics.families.keys() - - path = os.path.join(settings['database.directory'],'kinetics','libraries') + library_names = list(database.kinetics.libraries.keys()) + family_names = list(database.kinetics.families.keys()) + + path = os.path.join(settings['database.directory'], 'kinetics', 'libraries') from rmgpy.rmg.input import rmg - - self.newReactionList = []; self.newSpeciesList = [] - numOldCoreSpecies = len(self.core.species) - numOldCoreReactions = len(self.core.reactions) + self.newReactionList = [] + self.newSpeciesList = [] + + num_old_core_species = len(self.core.species) + num_old_core_reactions = len(self.core.reactions) logging.info('Adding seed mechanism {0} to model core...'.format(seedMechanism)) seedMechanism = database.kinetics.libraries[seedMechanism] - + rxns = seedMechanism.getLibraryReactions() - + for rxn in rxns: - if isinstance(rxn,LibraryReaction) and not (rxn.library in libraryNames) and not (rxn.library == 'kineticsjobs'): #if one of the reactions in the library is from another library load that library - database.kinetics.libraryOrder.append((rxn.library,'Internal')) - database.kinetics.loadLibraries(path=path,libraries=[rxn.library]) - libraryNames = database.kinetics.libraries.keys() - if isinstance(rxn,TemplateReaction) and not (rxn.family in familyNames): - logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn),rxn.family)) + if isinstance(rxn, LibraryReaction) and not (rxn.library in library_names) and not (rxn.library == 'kineticsjobs'): # if one of the reactions in the library is from another library load that library + database.kinetics.libraryOrder.append((rxn.library, 'Internal')) + database.kinetics.loadLibraries(path=path, libraries=[rxn.library]) + library_names = list(database.kinetics.libraries.keys()) + if isinstance(rxn, TemplateReaction) and not (rxn.family in family_names): + logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn), + rxn.family)) rxn = LibraryReaction(reactants=rxn.reactants[:], products=rxn.products[:], - library=seedMechanism.name, specificCollider=rxn.specificCollider, kinetics=rxn.kinetics, duplicate=rxn.duplicate, - reversible=rxn.reversible - ) - r, isNew = self.makeNewReaction(rxn) # updates self.newSpeciesList and self.newReactionlist + library=seedMechanism.name, specificCollider=rxn.specificCollider, + kinetics=rxn.kinetics, duplicate=rxn.duplicate, + reversible=rxn.reversible + ) + r, isNew = self.makeNewReaction(rxn) # updates self.newSpeciesList and self.newReactionlist if not isNew: logging.info("This library reaction was not new: {0}".format(rxn)) - elif self.pressureDependence and rxn.elementary_high_p and rxn.isUnimolecular()\ + elif self.pressureDependence and rxn.elementary_high_p and rxn.isUnimolecular() \ and isinstance(rxn, LibraryReaction) and isinstance(rxn.kinetics, Arrhenius): # This unimolecular library reaction is flagged as `elementary_high_p` and has Arrhenius type kinetics. # We should calculate a pressure-dependent rate for it if len(rxn.reactants) == 1: - self.processNewReactions(newReactions=[rxn],newSpecies=rxn.reactants[0]) + self.processNewReactions(newReactions=[rxn], newSpecies=rxn.reactants[0]) else: - self.processNewReactions(newReactions=[rxn],newSpecies=rxn.products[0]) - + self.processNewReactions(newReactions=[rxn], newSpecies=rxn.products[0]) + # Perform species constraints and forbidden species checks - + for spec in self.newSpeciesList: if database.forbiddenStructures.isMoleculeForbidden(spec.molecule[0]): if 'allowed' in rmg.speciesConstraints and 'seed mechanisms' in rmg.speciesConstraints['allowed']: spec.explicitlyAllowed = True - logging.warning("Species {0} from seed mechanism {1} is globally forbidden. It will behave as an inert unless found in a seed mechanism or reaction library.".format(spec.label, seedMechanism.label)) + logging.warning("Species {0} from seed mechanism {1} is globally forbidden. " + "It will behave as an inert unless found in a seed mechanism " + "or reaction library.".format(spec.label, seedMechanism.label)) else: - raise ForbiddenStructureException("Species {0} from seed mechanism {1} is globally forbidden. You may explicitly allow it, but it will remain inert unless found in a seed mechanism or reaction library.".format(spec.label, seedMechanism.label)) + raise ForbiddenStructureException("Species {0} from seed mechanism {1} is globally forbidden. " + "You may explicitly allow it, but it will remain inert unless " + "found in a seed mechanism or reaction " + "library.".format(spec.label, seedMechanism.label)) if failsSpeciesConstraints(spec): if 'allowed' in rmg.speciesConstraints and 'seed mechanisms' in rmg.speciesConstraints['allowed']: rmg.speciesConstraints['explicitlyAllowedMolecules'].extend(spec.molecule) else: - raise ForbiddenStructureException("Species constraints forbids species {0} from seed mechanism {1}. Please reformulate constraints, remove the species, or explicitly allow it.".format(spec.label, seedMechanism.label)) + raise ForbiddenStructureException("Species constraints forbids species {0} from seed mechanism {1}." + " Please reformulate constraints, remove the species, or" + " explicitly allow it.".format(spec.label, seedMechanism.label)) - for spec in self.newSpeciesList: + for spec in self.newSpeciesList: if spec.reactive: - submit(spec,self.solventName) + submit(spec, self.solventName) self.addSpeciesToCore(spec) @@ -1458,23 +1508,21 @@ def addSeedMechanismToCore(self, seedMechanism, react=False): # we need to make sure the barrier is positive. # ...but are Seed Mechanisms run through PDep? Perhaps not. for spec in itertools.chain(rxn.reactants, rxn.products): - submit(spec,self.solventName) + submit(spec, self.solventName) rxn.fixBarrierHeight(forcePositive=True) self.addReactionToCore(rxn) - + # Check we didn't introduce unmarked duplicates self.markChemkinDuplicates() - + self.printEnlargeSummary( - newCoreSpecies=self.core.species[numOldCoreSpecies:], - newCoreReactions=self.core.reactions[numOldCoreReactions:], + newCoreSpecies=self.core.species[num_old_core_species:], + newCoreReactions=self.core.reactions[num_old_core_reactions:], newEdgeSpecies=[], newEdgeReactions=[], ) - - def addReactionLibraryToEdge(self, reactionLibrary): """ Add all species and reactions from `reactionLibrary`, a @@ -1482,44 +1530,46 @@ def addReactionLibraryToEdge(self, reactionLibrary): """ database = rmgpy.data.rmg.database - libraryNames = database.kinetics.libraries.keys() - familyNames = database.kinetics.families.keys() - path = os.path.join(settings['database.directory'],'kinetics','libraries') - + library_names = list(database.kinetics.libraries.keys()) + family_names = list(database.kinetics.families.keys()) + path = os.path.join(settings['database.directory'], 'kinetics', 'libraries') + from rmgpy.rmg.input import rmg self.newReactionList = [] self.newSpeciesList = [] - numOldEdgeSpecies = len(self.edge.species) - numOldEdgeReactions = len(self.edge.reactions) + num_old_edge_species = len(self.edge.species) + num_old_edge_reactions = len(self.edge.reactions) logging.info('Adding reaction library {0} to model edge...'.format(reactionLibrary)) reactionLibrary = database.kinetics.libraries[reactionLibrary] rxns = reactionLibrary.getLibraryReactions() for rxn in rxns: - if isinstance(rxn,LibraryReaction) and not (rxn.library in libraryNames): #if one of the reactions in the library is from another library load that library - database.kinetics.libraryOrder.append((rxn.library,'Internal')) - database.kinetics.loadLibraries(path=path,libraries=[rxn.library]) - libraryNames = database.kinetics.libraries.keys() - if isinstance(rxn,TemplateReaction) and not (rxn.family in familyNames): - logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn),rxn.family)) + if isinstance(rxn, LibraryReaction) and not (rxn.library in library_names): # if one of the reactions in the library is from another library load that library + database.kinetics.libraryOrder.append((rxn.library, 'Internal')) + database.kinetics.loadLibraries(path=path, libraries=[rxn.library]) + library_names = list(database.kinetics.libraries.keys()) + if isinstance(rxn, TemplateReaction) and not (rxn.family in family_names): + logging.warning('loading reaction {0} originally from family {1} as a library reaction'.format(str(rxn), + rxn.family)) rxn = LibraryReaction(reactants=rxn.reactants[:], products=rxn.products[:], - library=reactionLibrary.name, specificCollider=rxn.specificCollider, kinetics=rxn.kinetics, duplicate=rxn.duplicate, - reversible=rxn.reversible - ) - r, isNew = self.makeNewReaction(rxn) # updates self.newSpeciesList and self.newReactionlist + library=reactionLibrary.name, specificCollider=rxn.specificCollider, + kinetics=rxn.kinetics, duplicate=rxn.duplicate, + reversible=rxn.reversible + ) + r, isNew = self.makeNewReaction(rxn) # updates self.newSpeciesList and self.newReactionlist if not isNew: logging.info("This library reaction was not new: {0}".format(rxn)) - elif self.pressureDependence and rxn.elementary_high_p and rxn.isUnimolecular()\ + elif self.pressureDependence and rxn.elementary_high_p and rxn.isUnimolecular() \ and isinstance(rxn, LibraryReaction) and isinstance(rxn.kinetics, Arrhenius): # This unimolecular library reaction is flagged as `elementary_high_p` and has Arrhenius type kinetics. # We should calculate a pressure-dependent rate for it if len(rxn.reactants) == 1: - self.processNewReactions(newReactions=[rxn],newSpecies=rxn.reactants[0]) + self.processNewReactions(newReactions=[rxn], newSpecies=rxn.reactants[0]) else: - self.processNewReactions(newReactions=[rxn],newSpecies=rxn.products[0]) + self.processNewReactions(newReactions=[rxn], newSpecies=rxn.products[0]) # Perform species constraints and forbidden species checks for spec in self.newSpeciesList: @@ -1527,18 +1577,25 @@ def addReactionLibraryToEdge(self, reactionLibrary): if database.forbiddenStructures.isMoleculeForbidden(spec.molecule[0]): if 'allowed' in rmg.speciesConstraints and 'reaction libraries' in rmg.speciesConstraints['allowed']: spec.explicitlyAllowed = True - logging.warning("Species {0} from reaction library {1} is globally forbidden. It will behave as an inert unless found in a seed mechanism or reaction library.".format(spec.label, reactionLibrary.label)) + logging.warning("Species {0} from reaction library {1} is globally forbidden. It will behave " + "as an inert unless found in a seed mechanism or reaction " + "library.".format(spec.label, reactionLibrary.label)) else: - raise ForbiddenStructureException("Species {0} from reaction library {1} is globally forbidden. You may explicitly allow it, but it will remain inert unless found in a seed mechanism or reaction library.".format(spec.label, reactionLibrary.label)) + raise ForbiddenStructureException("Species {0} from reaction library {1} is globally " + "forbidden. You may explicitly allow it, but it will remain " + "inert unless found in a seed mechanism or reaction " + "library.".format(spec.label, reactionLibrary.label)) if failsSpeciesConstraints(spec): if 'allowed' in rmg.speciesConstraints and 'reaction libraries' in rmg.speciesConstraints['allowed']: rmg.speciesConstraints['explicitlyAllowedMolecules'].extend(spec.molecule) else: - raise ForbiddenStructureException("Species constraints forbids species {0} from reaction library {1}. Please reformulate constraints, remove the species, or explicitly allow it.".format(spec.label, reactionLibrary.label)) + raise ForbiddenStructureException("Species constraints forbids species {0} from reaction library " + "{1}. Please reformulate constraints, remove the species, or " + "explicitly allow it.".format(spec.label, reactionLibrary.label)) for spec in self.newSpeciesList: - if spec.reactive: - submit(spec,self.solventName) + if spec.reactive: + submit(spec, self.solventName) self.addSpeciesToEdge(spec) @@ -1553,17 +1610,17 @@ def addReactionLibraryToEdge(self, reactionLibrary): if self.saveEdgeSpecies: from rmgpy.chemkin import markDuplicateReaction - newEdgeReactions = self.edge.reactions[numOldEdgeReactions:] - checkedReactions = self.core.reactions + self.edge.reactions[:numOldEdgeReactions] - for rxn in newEdgeReactions: - markDuplicateReaction(rxn, checkedReactions) - checkedReactions.append(rxn) + new_edge_reactions = self.edge.reactions[num_old_edge_reactions:] + checked_reactions = self.core.reactions + self.edge.reactions[:num_old_edge_reactions] + for rxn in new_edge_reactions: + markDuplicateReaction(rxn, checked_reactions) + checked_reactions.append(rxn) self.printEnlargeSummary( newCoreSpecies=[], newCoreReactions=[], - newEdgeSpecies=self.edge.species[numOldEdgeSpecies:], - newEdgeReactions=self.edge.reactions[numOldEdgeReactions:], + newEdgeSpecies=self.edge.species[num_old_edge_species:], + newEdgeReactions=self.edge.reactions[num_old_edge_reactions:], ) def addReactionLibraryToOutput(self, reactionLib): @@ -1574,18 +1631,16 @@ def addReactionLibraryToOutput(self, reactionLib): """ logging.info('Adding reaction library {0} to output file...'.format(reactionLib)) - + # Append the edge reactions that are from the selected reaction library to an output species and output reactions list for rxn in self.edge.reactions: if isinstance(rxn, LibraryReaction): if rxn.library == reactionLib: self.outputReactionList.append(rxn) - + for species in rxn.reactants + rxn.products: if species not in self.core.species and species not in self.outputSpeciesList: self.outputSpeciesList.append(species) - - def addReactionToUnimolecularNetworks(self, newReaction, newSpecies, network=None): """ @@ -1605,10 +1660,10 @@ def addReactionToUnimolecularNetworks(self, newReaction, newSpecies, network=Non products = newReaction.products[:] else: reactants = newReaction.products[:] - products = newReaction.reactants[:] + products = newReaction.reactants[:] reactants.sort() products.sort() - + source = tuple(reactants) # Only search for a network if we don't specify it as a parameter @@ -1639,7 +1694,7 @@ def addReactionToUnimolecularNetworks(self, newReaction, newSpecies, network=Non if network is None: self.networkCount += 1 network = PDepNetwork(index=self.networkCount, source=reactants[:]) - # should the source passed to PDepNetwork constuctor be a tuple not a list? that's what is used in networkDict + # should the source passed to PDepNetwork constuctor be a tuple not a list? that's what is used in networkDict try: self.networkDict[source].append(network) except KeyError: @@ -1662,11 +1717,11 @@ def updateUnimolecularReactionNetworks(self): # Two partial networks having the same source and containing one or # more explored isomers in common must be merged together to avoid # double-counting of rates - for networks in self.networkDict.itervalues(): - networkCount = len(networks) + for networks in self.networkDict.values(): + network_count = len(networks) for index0, network0 in enumerate(networks): index = index0 + 1 - while index < networkCount: + while index < network_count: found = False network = networks[index] if network0.source == network.source: @@ -1679,25 +1734,28 @@ def updateUnimolecularReactionNetworks(self): if found: # The networks contain the same source and one or more common included isomers # Therefore they need to be merged together - logging.info('Merging PDepNetwork #{0:d} and PDepNetwork #{1:d}'.format(network0.index, network.index)) + logging.info( + 'Merging PDepNetwork #{0:d} and PDepNetwork #{1:d}'.format(network0.index, network.index)) network0.merge(network) networks.remove(network) self.networkList.remove(network) - networkCount -= 1 + network_count -= 1 else: index += 1 - count = sum([1 for network in self.networkList if not network.valid and not (len(network.explored) == 0 and len(network.source) > 1)]) - logging.info('Updating {0:d} modified unimolecular reaction networks (out of {1:d})...'.format(count, len(self.networkList))) - + count = sum([1 for network in self.networkList if + not network.valid and not (len(network.explored) == 0 and len(network.source) > 1)]) + logging.info('Updating {0:d} modified unimolecular reaction networks (out of {1:d})...'.format(count, len( + self.networkList))) + # Iterate over all the networks, updating the invalid ones as necessary # self = reactionModel object - updatedNetworks = [] + updated_networks = [] for network in self.networkList: if not network.valid: network.update(self, self.pressureDependence) - updatedNetworks.append(network) - + updated_networks.append(network) + # PDepReaction objects generated from partial networks are irreversible # However, it makes more sense to have reversible reactions in the core # Thus we mark PDepReaction objects as reversible and remove the reverse @@ -1705,30 +1763,35 @@ def updateUnimolecularReactionNetworks(self): # Note that well-skipping reactions may not have a reverse if the well # that they skip over is not itself in the core index = 0 - coreReactionCount = len(self.core.reactions) - while index < coreReactionCount: + core_reaction_count = len(self.core.reactions) + while index < core_reaction_count: reaction = self.core.reactions[index] if isinstance(reaction, PDepReaction): - for reaction2 in self.core.reactions[index+1:]: - if isinstance(reaction2, PDepReaction) and reaction.reactants == reaction2.products and reaction.products == reaction2.reactants: + for reaction2 in self.core.reactions[index + 1:]: + if isinstance(reaction2, + PDepReaction) and reaction.reactants == reaction2.products and reaction.products == reaction2.reactants: # We've found the PDepReaction for the reverse direction dGrxn = reaction.getFreeEnergyOfReaction(300.) - kf = reaction.getRateCoefficient(1000,1e5) - kr = reaction.getRateCoefficient(1000,1e5) / reaction.getEquilibriumConstant(1000) - kf2 = reaction2.getRateCoefficient(1000,1e5) / reaction2.getEquilibriumConstant(1000) - kr2 = reaction2.getRateCoefficient(1000,1e5) + kf = reaction.getRateCoefficient(1000, 1e5) + kr = reaction.getRateCoefficient(1000, 1e5) / reaction.getEquilibriumConstant(1000) + kf2 = reaction2.getRateCoefficient(1000, 1e5) / reaction2.getEquilibriumConstant(1000) + kr2 = reaction2.getRateCoefficient(1000, 1e5) if kf / kf2 < 0.5 or kf / kf2 > 2.0: # Most pairs of reactions should satisfy thermodynamic consistency (or at least be "close") # Warn about the ones that aren't close (but don't abort) - logging.warning('Forward and reverse PDepReactions for reaction {0!s} generated from networks {1:d} and {2:d} do not satisfy thermodynamic consistency.'.format(reaction, reaction.network.index, reaction2.network.index)) + logging.warning('Forward and reverse PDepReactions for reaction {0!s} generated from ' + 'networks {1:d} and {2:d} do not satisfy thermodynamic ' + 'consistency.'.format(reaction, + reaction.network.index, + reaction2.network.index)) logging.warning('{0!s}:'.format(reaction)) logging.warning('{0:.2e} {1:.2e}:'.format(kf, kf2)) logging.warning('{0!s}:'.format(reaction2)) logging.warning('{0:.2e} {1:.2e}:'.format(kr, kr2)) # Keep the exergonic direction - keepFirst = dGrxn < 0 + keep_first = dGrxn < 0 # Delete the PDepReaction that we aren't keeping - if keepFirst: + if keep_first: self.core.reactions.remove(reaction2) reaction.reversible = True else: @@ -1736,7 +1799,7 @@ def updateUnimolecularReactionNetworks(self): self.core.reactions.remove(reaction2) self.core.reactions.insert(index, reaction2) reaction2.reversible = True - coreReactionCount -= 1 + core_reaction_count -= 1 # There should be only one reverse, so we can stop searching once we've found it break else: @@ -1744,7 +1807,6 @@ def updateUnimolecularReactionNetworks(self): # Move to the next core reaction index += 1 - def markChemkinDuplicates(self): """ Check that all reactions that will appear the chemkin output have been checked as duplicates. @@ -1754,11 +1816,10 @@ def markChemkinDuplicates(self): Anything added via the :meth:`expand` method should already be detected. """ from rmgpy.chemkin import markDuplicateReactions - - rxnList = self.core.reactions + self.outputReactionList - markDuplicateReactions(rxnList) - - + + rxn_list = self.core.reactions + self.outputReactionList + markDuplicateReactions(rxn_list) + def registerReaction(self, rxn): """ Adds the reaction to the reaction database. @@ -1788,16 +1849,15 @@ def registerReaction(self, rxn): if key_family not in self.reactionDict: self.reactionDict[key_family] = {} - if not self.reactionDict[key_family].has_key(key1): + if key1 not in self.reactionDict[key_family]: self.reactionDict[key_family][key1] = {} - if not self.reactionDict[key_family][key1].has_key(key2): + if key2 not in self.reactionDict[key_family][key1]: self.reactionDict[key_family][key1][key2] = [] # store this reaction at the top of the relevant short-list self.reactionDict[key_family][key1][key2].insert(0, rxn) - def searchRetrieveReactions(self, rxn): """ Searches through the reaction database for @@ -1810,24 +1870,22 @@ def searchRetrieveReactions(self, rxn): # Get the short-list of reactions with the same family, reactant1 and reactant2 family_label, r1_fwd, r2_fwd = generateReactionKey(rxn) - - my_reactionList = [] + + my_reaction_list = [] rxns = self.retrieve(family_label, r1_fwd, r2_fwd) - my_reactionList.extend(rxns) - - - family = getFamilyLibraryObject(family_label) - # if the family is its own reverse (H-Abstraction) then check the other direction - if isinstance(family,KineticsFamily): + my_reaction_list.extend(rxns) + family = getFamilyLibraryObject(family_label) + # if the family is its own reverse (H-Abstraction) then check the other direction + if isinstance(family, KineticsFamily): # Get the short-list of reactions with the same family, product1 and product2 family_label, r1_rev, r2_rev = generateReactionKey(rxn, useProducts=True) rxns = self.retrieve(family_label, r1_rev, r2_rev) - my_reactionList.extend(rxns) + my_reaction_list.extend(rxns) - return my_reactionList + return my_reaction_list def initializeIndexSpeciesDict(self): """ @@ -1851,7 +1909,7 @@ def retrieve(self, family_label, key1, key2): """ try: return self.reactionDict[family_label][key1][key2][:] - except KeyError: # no such short-list: must be new, unless in seed. + except KeyError: # no such short-list: must be new, unless in seed. return [] @@ -1872,9 +1930,10 @@ def generateReactionKey(rxn, useProducts=False): spc_list = rxn.products if useProducts else rxn.reactants key1 = getKey(spc_list[0]) key2 = None if len(spc_list) == 1 else getKey(spc_list[1]) - key1, key2 = sorted([key1, key2], reverse=True)# ensure None is always at end + key1, key2 = sorted([key1, key2], reverse=True) # ensure None is always at end + + return key_family, key1, key2 - return (key_family, key1, key2) def generateReactionId(rxn): """ @@ -1886,11 +1945,11 @@ def generateReactionId(rxn): The first element in the tuple is the reactants list. """ - reactants = sorted([getKey(reactant) for reactant in rxn.reactants]) products = sorted([getKey(product) for product in rxn.products]) - return (reactants, products) + return reactants, products + def getFamilyLibraryObject(label): """ @@ -1917,6 +1976,7 @@ def getFamilyLibraryObject(label): raise Exception('Could not retrieve the family/library: {}'.format(label)) + def getKey(spc): """ Returns a string of the species that can serve as a key in a dictionary. @@ -1924,6 +1984,7 @@ def getKey(spc): return spc.label + def areIdenticalSpeciesReferences(rxn1, rxn2): """ Checks if the references of the reactants and products of the two reactions @@ -1932,5 +1993,5 @@ def areIdenticalSpeciesReferences(rxn1, rxn2): identical_same_direction = rxn1.reactants == rxn2.reactants and rxn1.products == rxn2.products identical_opposite_directions = rxn1.reactants == rxn2.products and rxn1.products == rxn2.reactants identical_collider = rxn1.specificCollider == rxn2.specificCollider - + return (identical_same_direction or identical_opposite_directions) and identical_collider diff --git a/rmgpy/rmg/modelTest.py b/rmgpy/rmg/modelTest.py index 3f53300c5d..a6c3d67e56 100644 --- a/rmgpy/rmg/modelTest.py +++ b/rmgpy/rmg/modelTest.py @@ -30,26 +30,30 @@ import itertools import os -import unittest +import unittest import numpy as np from nose.plugins.attrib import attr from rmgpy import settings -from rmgpy.data.rmg import RMGDatabase, database -from rmgpy.rmg.main import RMG -from rmgpy.reaction import Reaction -from rmgpy.rmg.react import react -from rmgpy.rmg.model import * from rmgpy.data.base import ForbiddenStructures from rmgpy.data.kinetics.family import TemplateReaction -from rmgpy.data.thermo import * +from rmgpy.data.rmg import RMGDatabase +from rmgpy.data.thermo import NASA, NASAPolynomial +from rmgpy.molecule import Molecule +from rmgpy.rmg.main import RMG +from rmgpy.rmg.model import CoreEdgeReactionModel +from rmgpy.rmg.react import react +from rmgpy.species import Species + + ################################################### class TestSpecies(unittest.TestCase): """ Contains unit tests of the Species class. """ + @classmethod def setUpClass(cls): """ @@ -64,7 +68,6 @@ def setUpClass(cls): # forbidden structure loading cls.rmg.database.loadThermo(os.path.join(path, 'thermo')) - def testGetThermoData(self): """ @@ -79,7 +82,7 @@ def testGetThermoData(self): spc.getThermoData() self.assertEquals(id(thermo), id(spc.thermo)) - + spc.thermo = None spc.getThermoData() self.assertNotEquals(id(thermo), id(spc.thermo)) @@ -92,6 +95,7 @@ def tearDownClass(cls): import rmgpy.data.rmg rmgpy.data.rmg.database = None + class TestCoreEdgeReactionModel(unittest.TestCase): """ Contains unit tests of the CoreEdgeReactionModel class. @@ -102,23 +106,22 @@ def setUpClass(cls): """ A method that is run before each unit test in this class. """ - TESTFAMILY = 'H_Abstraction' + test_family = 'H_Abstraction' # set-up RMG object rmg = RMG() # load kinetic database and forbidden structures rmg.database = RMGDatabase() - path=os.path.join(settings['test_data.directory'], 'testing_database') - + path = os.path.join(settings['test_data.directory'], 'testing_database') # kinetics family loading rmg.database.loadKinetics(os.path.join(path, 'kinetics'), - kineticsFamilies=[TESTFAMILY], - reactionLibraries=[] - ) - #load empty forbidden structures to avoid any dependence on forbidden structures - #for these tests + kineticsFamilies=[test_family], + reactionLibraries=[] + ) + # load empty forbidden structures to avoid any dependence on forbidden structures + # for these tests for family in rmg.database.kinetics.families.values(): family.forbidden = ForbiddenStructures() rmg.database.forbiddenStructures = ForbiddenStructures() @@ -127,11 +130,14 @@ def testAddNewSurfaceObjects(self): """ basic test that surface movement object management works properly """ - #create object with ReactionSystem behavior + + # create object with ReactionSystem behavior class rsys: pass + class item: pass + T = item() P = item() T.value_si = 1000.0 @@ -139,52 +145,52 @@ class item: rsys.T = T rsys.P = P procnum = 2 - + cerm = CoreEdgeReactionModel() - + spcA = Species().fromSMILES('[OH]') spcs = [Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')] - spcTuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs] - - rxns = list(itertools.chain.from_iterable(react(spcTuples, procnum))) + spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs] + + rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum))) rxns += list(itertools.chain.from_iterable(react([((spcs[0], spcs[1]), ['H_Abstraction'])], procnum))) - + for rxn in rxns: cerm.makeNewReaction(rxn) - - cerm.core.species = [spcA]+spcs - + + cerm.core.species = [spcA] + spcs + corerxns = [] edgerxns = [] edgespcs = set() for rxn in rxns: - if set(rxn.reactants+rxn.products) <= set(cerm.core.species): + if set(rxn.reactants + rxn.products) <= set(cerm.core.species): corerxns.append(rxn) else: - edgespcs |= set(cerm.core.species)-set(rxn.reactants+rxn.products) + edgespcs |= set(cerm.core.species) - set(rxn.reactants + rxn.products) edgerxns.append(rxn) - + cerm.edge.species += list(edgespcs) - + cerm.core.reactions = corerxns cerm.edge.reactions = edgerxns - + cerm.surface.species = [] cerm.surface.reactions = [] - - newSurfaceReactions = [cerm.edge.reactions[0]] - newSurfaceSpecies = [] - obj = newSurfaceReactions - - cerm.addNewSurfaceObjects(obj,newSurfaceSpecies,newSurfaceReactions,rsys) - + + new_surface_reactions = [cerm.edge.reactions[0]] + new_surface_species = [] + obj = new_surface_reactions + + cerm.addNewSurfaceObjects(obj, new_surface_species, new_surface_reactions, rsys) + empty = set() - - self.assertEqual(cerm.newSurfaceSpcsAdd,empty) - self.assertEqual(cerm.newSurfaceSpcsLoss,empty) - self.assertEqual(cerm.newSurfaceRxnsLoss,empty) - self.assertEqual(cerm.newSurfaceRxnsAdd,set([cerm.edge.reactions[0]])) - + + self.assertEqual(cerm.newSurfaceSpcsAdd, empty) + self.assertEqual(cerm.newSurfaceSpcsLoss, empty) + self.assertEqual(cerm.newSurfaceRxnsLoss, empty) + self.assertEqual(cerm.newSurfaceRxnsAdd, set([cerm.edge.reactions[0]])) + def testMakeNewSpecies(self): """ Test that CoreEdgeReactionModel.makeNewSpecies method correctly stores the unique species. @@ -193,28 +199,28 @@ def testMakeNewSpecies(self): # adding 3 unique species: cerm = CoreEdgeReactionModel() - spcs = [Species().fromSMILES('[OH]'), + spcs = [Species().fromSMILES('[OH]'), Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')] for spc in spcs: cerm.makeNewSpecies(spc) - self.assertEquals(len(cerm.speciesDict), len(spcs)) + self.assertEquals(len(cerm.speciesDict), len(spcs)) self.assertEquals(len(cerm.indexSpeciesDict), len(spcs)) # adding 3 unique, and 1 already existing species: cerm = CoreEdgeReactionModel() - spcs = [Species().fromSMILES('[OH]'), + spcs = [Species().fromSMILES('[OH]'), Species().fromSMILES('CC'), Species().fromSMILES('[CH3]'), - Species().fromSMILES('CC')]#duplicate species + Species().fromSMILES('CC')] # duplicate species for spc in spcs: cerm.makeNewSpecies(spc) - self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) + self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1) def test_append_unreactive_structure(self): @@ -248,9 +254,9 @@ def testMakeNewReaction(self): procnum = 2 spcA = Species().fromSMILES('[OH]') spcs = [Species().fromSMILES('CC'), Species().fromSMILES('[CH3]')] - spcTuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs] + spc_tuples = [((spcA, spc), ['H_Abstraction']) for spc in spcs] - rxns = list(itertools.chain.from_iterable(react(spcTuples, procnum))) + rxns = list(itertools.chain.from_iterable(react(spc_tuples, procnum))) cerm = CoreEdgeReactionModel() @@ -266,79 +272,111 @@ def testMakeNewReaction(self): # count no. of entries in reactionDict: counter = 0 - for fam, v1 in cerm.reactionDict.iteritems(): - for key2, v2 in v1.iteritems(): - for key3, rxnList in v2.iteritems(): + for fam, v1 in cerm.reactionDict.items(): + for key2, v2 in v1.items(): + for key3, rxnList in v2.items(): counter += len(rxnList) self.assertEquals(counter, 3) - + def testThermoFilterSpecies(self): """ test that thermoFilterSpecies leaves species alone if if toleranceThermoKeepInEdge is high and removes them if if toleranceThermoKeepInEdge is low """ - + cerm = CoreEdgeReactionModel() - spcs = [Species().fromSMILES('[OH]'), + spcs = [Species().fromSMILES('[OH]'), Species().fromSMILES('C'), Species().fromSMILES('[CH3]'), Species().fromSMILES('[CH2]'), Species().fromSMILES('O')] - + for spc in spcs: - cerm.makeNewSpecies(spc,label=spc.molecule[0].toSMILES()) + cerm.makeNewSpecies(spc, label=spc.molecule[0].toSMILES()) spc.label = spc.molecule[0].toSMILES() - - thermoDict = {'[OH]':NASA(polynomials=[NASAPolynomial(coeffs=[3.51457,2.92787e-05,-5.32168e-07,1.0195e-09,-3.85947e-13,3414.25,2.10435], Tmin=(100,'K'), Tmax=(1145.75,'K')), NASAPolynomial(coeffs=[3.07194,0.000604014,-1.39775e-08,-2.13448e-11,2.48067e-15,3579.39,4.578], Tmin=(1145.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.3945,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""), - 'C' : NASA(polynomials=[NASAPolynomial(coeffs=[4.20541,-0.00535551,2.51121e-05,-2.1376e-08,5.97513e-12,-10161.9,-0.921259], Tmin=(100,'K'), Tmax=(1084.13,'K')), NASAPolynomial(coeffs=[0.908298,0.011454,-4.57171e-06,8.29185e-10,-5.66309e-14,-9719.99,13.9929], Tmin=(1084.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-84.435,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""CH4""", comment="""Thermo library: primaryThermoLibrary"""), - '[CH3]' : NASA(polynomials=[NASAPolynomial(coeffs=[3.67359,0.00201095,5.73022e-06,-6.87117e-09,2.54386e-12,16445,1.60456], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28572,0.0072399,-2.98714e-06,5.95685e-10,-4.67154e-14,16775.6,8.48007], Tmin=(1000,'K'), Tmax=(3500,'K'))], Tmin=(200,'K'), Tmax=(3500,'K'), E0=(136.42,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: GRI-Mech3.0"""), - '[CH2]': NASA(polynomials=[NASAPolynomial(coeffs=[4.01192,-0.000154978,3.26298e-06,-2.40422e-09,5.69497e-13,45867.7,0.533201], Tmin=(100,'K'), Tmax=(1104.62,'K')), NASAPolynomial(coeffs=[3.14983,0.00296674,-9.76056e-07,1.54115e-10,-9.50338e-15,46058.1,4.77808], Tmin=(1104.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(381.37,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""), - 'O' : NASA(polynomials=[NASAPolynomial(coeffs=[4.05764,-0.000787936,2.90877e-06,-1.47519e-09,2.12842e-13,-30281.6,-0.311364], Tmin=(100,'K'), Tmax=(1130.24,'K')), NASAPolynomial(coeffs=[2.84325,0.00275109,-7.81031e-07,1.07244e-10,-5.79392e-15,-29958.6,5.91042], Tmin=(1130.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-251.755,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""H2O""", comment="""Thermo library: primaryThermoLibrary"""), - } - + + thermo_dict = {'[OH]': NASA(polynomials=[NASAPolynomial( + coeffs=[3.51457, 2.92787e-05, -5.32168e-07, 1.0195e-09, -3.85947e-13, 3414.25, 2.10435], + Tmin=(100, 'K'), Tmax=(1145.75, 'K')), NASAPolynomial( + coeffs=[3.07194, 0.000604014, -1.39775e-08, -2.13448e-11, 2.48067e-15, 3579.39, 4.578], + Tmin=(1145.75, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(28.3945, 'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'), + label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""), + 'C': NASA(polynomials=[NASAPolynomial( + coeffs=[4.20541, -0.00535551, 2.51121e-05, -2.1376e-08, 5.97513e-12, -10161.9, -0.921259], + Tmin=(100, 'K'), Tmax=(1084.13, 'K')), NASAPolynomial( + coeffs=[0.908298, 0.011454, -4.57171e-06, 8.29185e-10, -5.66309e-14, -9719.99, 13.9929], + Tmin=(1084.13, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(-84.435, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(108.088, 'J/(mol*K)'), + label="""CH4""", comment="""Thermo library: primaryThermoLibrary"""), + '[CH3]': NASA(polynomials=[NASAPolynomial( + coeffs=[3.67359, 0.00201095, 5.73022e-06, -6.87117e-09, 2.54386e-12, 16445, 1.60456], + Tmin=(200, 'K'), Tmax=(1000, 'K')), NASAPolynomial( + coeffs=[2.28572, 0.0072399, -2.98714e-06, 5.95685e-10, -4.67154e-14, 16775.6, 8.48007], + Tmin=(1000, 'K'), Tmax=(3500, 'K'))], Tmin=(200, 'K'), Tmax=(3500, 'K'), + E0=(136.42, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(83.1447, 'J/(mol*K)'), + label="""CH3""", comment="""Thermo library: GRI-Mech3.0"""), + '[CH2]': NASA(polynomials=[NASAPolynomial( + coeffs=[4.01192, -0.000154978, 3.26298e-06, -2.40422e-09, 5.69497e-13, 45867.7, 0.533201], + Tmin=(100, 'K'), Tmax=(1104.62, 'K')), NASAPolynomial( + coeffs=[3.14983, 0.00296674, -9.76056e-07, 1.54115e-10, -9.50338e-15, 46058.1, 4.77808], + Tmin=(1104.62, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(381.37, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), + label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""), + 'O': NASA(polynomials=[NASAPolynomial( + coeffs=[4.05764, -0.000787936, 2.90877e-06, -1.47519e-09, 2.12842e-13, -30281.6, -0.311364], + Tmin=(100, 'K'), Tmax=(1130.24, 'K')), NASAPolynomial( + coeffs=[2.84325, 0.00275109, -7.81031e-07, 1.07244e-10, -5.79392e-15, -29958.6, 5.91042], + Tmin=(1130.24, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(-251.755, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), + label="""H2O""", comment="""Thermo library: primaryThermoLibrary"""), + } + for spc in spcs[:3]: cerm.addSpeciesToCore(spc) - + reaction = TemplateReaction( - reactants = [spcs[0],spcs[2]], - products = [spcs[-1],spcs[-2]], - degeneracy = 1, - reversible = True, - family = 'H_Abstraction', + reactants=[spcs[0], spcs[2]], + products=[spcs[-1], spcs[-2]], + degeneracy=1, + reversible=True, + family='H_Abstraction', ) - - cerm.processNewReactions(newReactions=[reaction],newSpecies=[]) #adds CH2 and O to edge - - for spc in cerm.core.species+cerm.edge.species: - spc.thermo = thermoDict[spc.molecule[0].toSMILES()] #assign thermo - - cerm.setThermodynamicFilteringParameters(Tmax=300.0, + + cerm.processNewReactions(newReactions=[reaction], newSpecies=[]) # adds CH2 and O to edge + + for spc in cerm.core.species + cerm.edge.species: + spc.thermo = thermo_dict[spc.molecule[0].toSMILES()] # assign thermo + + cerm.setThermodynamicFilteringParameters(Tmax=300.0, toleranceThermoKeepSpeciesInEdge=1000.0, minCoreSizeForPrune=0, maximumEdgeSpecies=1, reactionSystems=[]) - - cerm.thermoFilterSpecies(cerm.edge.species) #should not do anythinb because toleranceThermoKeepSpeciesInEdge is high - - - difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species])-set([x.molecule[0].toSMILES() for x in cerm.core.species]) - - self.assertEquals(len(difset),2) #no change in edge - - cerm.setThermodynamicFilteringParameters(Tmax=300.0, + + cerm.thermoFilterSpecies( + cerm.edge.species) # should not do anythinb because toleranceThermoKeepSpeciesInEdge is high + + difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species]) - set( + [x.molecule[0].toSMILES() for x in cerm.core.species]) + + self.assertEquals(len(difset), 2) # no change in edge + + cerm.setThermodynamicFilteringParameters(Tmax=300.0, toleranceThermoKeepSpeciesInEdge=0.0, minCoreSizeForPrune=0, maximumEdgeSpecies=1, reactionSystems=[]) - - cerm.thermoFilterSpecies(cerm.edge.species) #should remove stuff since CH2 and O have high thermo - - difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species])-set([x.molecule[0].toSMILES() for x in cerm.core.species]) - - self.assertLess(len(difset),2) #edge is smaller - + + cerm.thermoFilterSpecies(cerm.edge.species) # should remove stuff since CH2 and O have high thermo + + difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species]) - set( + [x.molecule[0].toSMILES() for x in cerm.core.species]) + + self.assertLess(len(difset), 2) # edge is smaller + def testThermoFilterDown(self): """ test that thermoFilterDown with maximumEdgeSpecies = 1 reduces @@ -346,55 +384,85 @@ def testThermoFilterDown(self): """ cerm = CoreEdgeReactionModel() - spcs = [Species().fromSMILES('[OH]'), + spcs = [Species().fromSMILES('[OH]'), Species().fromSMILES('C'), Species().fromSMILES('[CH3]'), Species().fromSMILES('[CH2]'), Species().fromSMILES('O')] - + for spc in spcs: - cerm.makeNewSpecies(spc,label=spc.molecule[0].toSMILES()) + cerm.makeNewSpecies(spc, label=spc.molecule[0].toSMILES()) spc.label = spc.molecule[0].toSMILES() - - thermoDict = {'[OH]':NASA(polynomials=[NASAPolynomial(coeffs=[3.51457,2.92787e-05,-5.32168e-07,1.0195e-09,-3.85947e-13,3414.25,2.10435], Tmin=(100,'K'), Tmax=(1145.75,'K')), NASAPolynomial(coeffs=[3.07194,0.000604014,-1.39775e-08,-2.13448e-11,2.48067e-15,3579.39,4.578], Tmin=(1145.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.3945,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""), - 'C' : NASA(polynomials=[NASAPolynomial(coeffs=[4.20541,-0.00535551,2.51121e-05,-2.1376e-08,5.97513e-12,-10161.9,-0.921259], Tmin=(100,'K'), Tmax=(1084.13,'K')), NASAPolynomial(coeffs=[0.908298,0.011454,-4.57171e-06,8.29185e-10,-5.66309e-14,-9719.99,13.9929], Tmin=(1084.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-84.435,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""CH4""", comment="""Thermo library: primaryThermoLibrary"""), - '[CH3]' : NASA(polynomials=[NASAPolynomial(coeffs=[3.67359,0.00201095,5.73022e-06,-6.87117e-09,2.54386e-12,16445,1.60456], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28572,0.0072399,-2.98714e-06,5.95685e-10,-4.67154e-14,16775.6,8.48007], Tmin=(1000,'K'), Tmax=(3500,'K'))], Tmin=(200,'K'), Tmax=(3500,'K'), E0=(136.42,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: GRI-Mech3.0"""), - '[CH2]': NASA(polynomials=[NASAPolynomial(coeffs=[4.01192,-0.000154978,3.26298e-06,-2.40422e-09,5.69497e-13,45867.7,0.533201], Tmin=(100,'K'), Tmax=(1104.62,'K')), NASAPolynomial(coeffs=[3.14983,0.00296674,-9.76056e-07,1.54115e-10,-9.50338e-15,46058.1,4.77808], Tmin=(1104.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(381.37,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""), - 'O' : NASA(polynomials=[NASAPolynomial(coeffs=[4.05764,-0.000787936,2.90877e-06,-1.47519e-09,2.12842e-13,-30281.6,-0.311364], Tmin=(100,'K'), Tmax=(1130.24,'K')), NASAPolynomial(coeffs=[2.84325,0.00275109,-7.81031e-07,1.07244e-10,-5.79392e-15,-29958.6,5.91042], Tmin=(1130.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-251.755,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""H2O""", comment="""Thermo library: primaryThermoLibrary"""), - } - + + thermo_dict = {'[OH]': NASA(polynomials=[NASAPolynomial( + coeffs=[3.51457, 2.92787e-05, -5.32168e-07, 1.0195e-09, -3.85947e-13, 3414.25, 2.10435], + Tmin=(100, 'K'), Tmax=(1145.75, 'K')), NASAPolynomial( + coeffs=[3.07194, 0.000604014, -1.39775e-08, -2.13448e-11, 2.48067e-15, 3579.39, 4.578], + Tmin=(1145.75, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(28.3945, 'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'), + label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""), + 'C': NASA(polynomials=[NASAPolynomial( + coeffs=[4.20541, -0.00535551, 2.51121e-05, -2.1376e-08, 5.97513e-12, -10161.9, -0.921259], + Tmin=(100, 'K'), Tmax=(1084.13, 'K')), NASAPolynomial( + coeffs=[0.908298, 0.011454, -4.57171e-06, 8.29185e-10, -5.66309e-14, -9719.99, 13.9929], + Tmin=(1084.13, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(-84.435, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(108.088, 'J/(mol*K)'), + label="""CH4""", comment="""Thermo library: primaryThermoLibrary"""), + '[CH3]': NASA(polynomials=[NASAPolynomial( + coeffs=[3.67359, 0.00201095, 5.73022e-06, -6.87117e-09, 2.54386e-12, 16445, 1.60456], + Tmin=(200, 'K'), Tmax=(1000, 'K')), NASAPolynomial( + coeffs=[2.28572, 0.0072399, -2.98714e-06, 5.95685e-10, -4.67154e-14, 16775.6, 8.48007], + Tmin=(1000, 'K'), Tmax=(3500, 'K'))], Tmin=(200, 'K'), Tmax=(3500, 'K'), + E0=(136.42, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(83.1447, 'J/(mol*K)'), + label="""CH3""", comment="""Thermo library: GRI-Mech3.0"""), + '[CH2]': NASA(polynomials=[NASAPolynomial( + coeffs=[4.01192, -0.000154978, 3.26298e-06, -2.40422e-09, 5.69497e-13, 45867.7, 0.533201], + Tmin=(100, 'K'), Tmax=(1104.62, 'K')), NASAPolynomial( + coeffs=[3.14983, 0.00296674, -9.76056e-07, 1.54115e-10, -9.50338e-15, 46058.1, 4.77808], + Tmin=(1104.62, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(381.37, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), + label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""), + 'O': NASA(polynomials=[NASAPolynomial( + coeffs=[4.05764, -0.000787936, 2.90877e-06, -1.47519e-09, 2.12842e-13, -30281.6, -0.311364], + Tmin=(100, 'K'), Tmax=(1130.24, 'K')), NASAPolynomial( + coeffs=[2.84325, 0.00275109, -7.81031e-07, 1.07244e-10, -5.79392e-15, -29958.6, 5.91042], + Tmin=(1130.24, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), + E0=(-251.755, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), + label="""H2O""", comment="""Thermo library: primaryThermoLibrary"""), + } + for spc in spcs[:3]: cerm.addSpeciesToCore(spc) - - reaction = TemplateReaction( - reactants = [spcs[0],spcs[2]], - products = [spcs[-1],spcs[-2]], - degeneracy = 1, - reversible = True, - family = 'H_Abstraction', - ) - - cerm.processNewReactions(newReactions=[reaction],newSpecies=[]) #add CH2 and O to edge - - for spc in cerm.core.species+cerm.edge.species: - spc.thermo = thermoDict[spc.molecule[0].toSMILES()] #assign thermo - - cerm.setThermodynamicFilteringParameters(Tmax=300.0, + + reaction = TemplateReaction(reactants=[spcs[0], spcs[2]], + products=[spcs[-1], spcs[-2]], + degeneracy=1, + reversible=True, + family='H_Abstraction') + + cerm.processNewReactions(newReactions=[reaction], newSpecies=[]) # add CH2 and O to edge + + for spc in cerm.core.species + cerm.edge.species: + spc.thermo = thermo_dict[spc.molecule[0].toSMILES()] # assign thermo + + cerm.setThermodynamicFilteringParameters(Tmax=300.0, toleranceThermoKeepSpeciesInEdge=1000.0, minCoreSizeForPrune=0, maximumEdgeSpecies=1, reactionSystems=[]) - - difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species])-set([x.molecule[0].toSMILES() for x in cerm.core.species]) - - self.assertEquals(len(difset),2) #no change because toleranceThermoKeepSpeciesInEdge is high - + + difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species]) - set( + [x.molecule[0].toSMILES() for x in cerm.core.species]) + + self.assertEquals(len(difset), 2) # no change because toleranceThermoKeepSpeciesInEdge is high + cerm.thermoFilterDown(maximumEdgeSpecies=1) - - difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species])-set([x.molecule[0].toSMILES() for x in cerm.core.species]) - - self.assertEquals(len(difset),1) #should be one because we thermo filtered down to one edge species - + + difset = set([x.molecule[0].toSMILES() for x in cerm.edge.species]) - set( + [x.molecule[0].toSMILES() for x in cerm.core.species]) + + self.assertEquals(len(difset), 1) # should be one because we thermo filtered down to one edge species + def test_checkForExistingReaction_eliminates_identical_reactions(self): """ Test that checkForExistingReaction catches identical reactions. @@ -456,7 +524,7 @@ def test_checkForExistingReaction_keeps_identical_reactions_with_duplicate_flag( cerm.addSpeciesToCore(spcB) cerm.addSpeciesToCore(spcC) cerm.addSpeciesToCore(spcD) - + reaction_in_model = TemplateReaction(reactants=[spcA, spcB], products=[spcC, spcD], family='H_Abstraction', @@ -681,8 +749,8 @@ def test_enlarge_4_create_pdep_network(self): self.assertEqual(self.rmg.reactionModel.networkList[0].source[0].label, 'C2H4') self.assertEqual(len(self.rmg.reactionModel.networkDict), 1) - self.assertEqual(len(self.rmg.reactionModel.networkDict.keys()[0]), 1) - self.assertEqual(self.rmg.reactionModel.networkDict.keys()[0][0].label, 'C2H4') + self.assertEqual(len(list(self.rmg.reactionModel.networkDict.keys())[0]), 1) + self.assertEqual(list(self.rmg.reactionModel.networkDict.keys())[0][0].label, 'C2H4') @classmethod def tearDownClass(cls): diff --git a/rmgpy/rmg/output.py b/rmgpy/rmg/output.py index 75cbc3b034..11d913f8bf 100644 --- a/rmgpy/rmg/output.py +++ b/rmgpy/rmg/output.py @@ -33,13 +33,16 @@ files. """ -import os.path import logging +import os.path import re import textwrap -from rmgpy.util import makeOutputSubdirectory + from rmgpy.chemkin import getSpeciesIdentifier from rmgpy.exceptions import OutputError +from rmgpy.util import makeOutputSubdirectory + + ################################################################################ def saveOutputHTML(path, reactionModel, partCoreEdge='core'): @@ -51,9 +54,9 @@ def saveOutputHTML(path, reactionModel, partCoreEdge='core'): package is used to generate the HTML; if this package is not found, no HTML will be generated (but the program will carry on). """ - + from rmgpy.rmg.model import PDepReaction - + from rmgpy.molecule.draw import MoleculeDrawer try: @@ -67,17 +70,17 @@ def saveOutputHTML(path, reactionModel, partCoreEdge='core'): # Prepare parameters to pass to jinja template title = 'RMG Output' - + if partCoreEdge == 'core': species = reactionModel.core.species[:] + reactionModel.outputSpeciesList elif partCoreEdge == 'edge': species = reactionModel.edge.species[:] + reactionModel.outputSpeciesList - - if not os.path.isdir(os.path.join(dirname,'species')): - os.makedirs(os.path.join(dirname,'species')) + + if not os.path.isdir(os.path.join(dirname, 'species')): + os.makedirs(os.path.join(dirname, 'species')) re_index_search = re.compile(r'\((\d+)\)$').search - + for spec in species: # if the species dictionary came from an RMG-Java job, make them prettier # We use the presence of a trailing index on the label to discern this @@ -92,26 +95,27 @@ def saveOutputHTML(path, reactionModel, partCoreEdge='core'): try: MoleculeDrawer().draw(spec.molecule[0], 'png', fstr) except IndexError: - logging.error("{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.".format(getSpeciesIdentifier(spec))) + logging.error("{0} species could not be drawn because it did not contain a molecular structure. " + "Please recheck your files.".format(getSpeciesIdentifier(spec))) raise - #spec.thermo.comment= + # spec.thermo.comment= # Text wrap the thermo comments # We want to keep species sorted in the original order in which they were added to the RMG core. # Rather than ordered by index -# species.sort(key=lambda x: x.index) - - if partCoreEdge == 'core': - reactions = [rxn for rxn in reactionModel.core.reactions ] + reactionModel.outputReactionList + # species.sort(key=lambda x: x.index) + + if partCoreEdge == 'core': + reactions = [rxn for rxn in reactionModel.core.reactions] + reactionModel.outputReactionList elif partCoreEdge == 'edge': - reactions = [rxn for rxn in reactionModel.edge.reactions ] + reactionModel.outputReactionList + reactions = [rxn for rxn in reactionModel.edge.reactions] + reactionModel.outputReactionList # We want to keep reactions sorted in original order in which they were added to core # rather than ordered by index - #reactions.sort(key=lambda x: x.index) + # reactions.sort(key=lambda x: x.index) familyCount = {} for rxn in reactions: - + if isinstance(rxn, PDepReaction): family = "PDepNetwork" else: @@ -120,21 +124,19 @@ def saveOutputHTML(path, reactionModel, partCoreEdge='core'): familyCount[family] += 1 else: familyCount[family] = 1 - families = familyCount.keys() + families = list(familyCount.keys()) families.sort() - - + ## jinja2 filters etc. to_remove_from_css_names = re.compile('[/.\-+,]') + def csssafe(input): "Replace unsafe CSS class name characters with an underscore." - return to_remove_from_css_names.sub('_',input) - + return to_remove_from_css_names.sub('_', input) + environment = jinja2.Environment() environment.filters['csssafe'] = csssafe - - - + # Make HTML file template = environment.from_string( """ @@ -506,19 +508,20 @@ def csssafe(input): """) - f = open(path, 'w') - f.write(template.render(title=title, species=species, reactions=reactions, families=families, familyCount=familyCount, getSpeciesIdentifier=getSpeciesIdentifier,textwrap=textwrap)) + f.write(template.render(title=title, species=species, reactions=reactions, families=families, + familyCount=familyCount, getSpeciesIdentifier=getSpeciesIdentifier, textwrap=textwrap)) f.close() -def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReactions, uniqueReactions1, uniqueReactions2): +def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReactions, uniqueReactions1, + uniqueReactions2): """ This function outputs the species and reactions on an HTML page for the comparison of two RMG models. """ from rmgpy.rmg.model import PDepReaction - from rmgpy.kinetics import Arrhenius, MultiArrhenius, MultiPDepArrhenius + from rmgpy.kinetics import MultiArrhenius, MultiPDepArrhenius from rmgpy.molecule.draw import MoleculeDrawer try: @@ -533,15 +536,15 @@ def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReac # Prepare parameters to pass to jinja template title = 'RMG Model Comparison' - speciesList = [spec1 for spec1, spec2 in commonSpeciesList] + [spec2 for spec1, spec2 in commonSpeciesList] + speciesList1 + speciesList2 + speciesList = [spec1 for spec1, spec2 in commonSpeciesList] + [spec2 for spec1, spec2 in + commonSpeciesList] + speciesList1 + speciesList2 re_index = re.compile(r'\((\d+)\)$') - - if not os.path.isdir(os.path.join(dirname,'species1')): - os.makedirs(os.path.join(dirname,'species1')) - - if not os.path.isdir(os.path.join(dirname,'species2')): - os.makedirs(os.path.join(dirname,'species2')) + if not os.path.isdir(os.path.join(dirname, 'species1')): + os.makedirs(os.path.join(dirname, 'species1')) + + if not os.path.isdir(os.path.join(dirname, 'species2')): + os.makedirs(os.path.join(dirname, 'species2')) for spec1, spec2 in commonSpeciesList: # if the species dictionary came from an RMG-Java job, make them prettier @@ -552,28 +555,29 @@ def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReac spec1.index = int(match1.group(0)[1:-1]) spec1.label = spec1.label[0:match1.start()] - match2 = re_index.search(spec2.label) + match2 = re_index.search(spec2.label) if match2: spec2.index = int(match2.group(0)[1:-1]) - spec2.label = spec2.label[0:match2.start()] - - # Draw molecules if necessary + spec2.label = spec2.label[0:match2.start()] + + # Draw molecules if necessary fstr = os.path.join(dirname, 'species1', '{0}.png'.format(spec1)) if not os.path.exists(fstr): try: MoleculeDrawer().draw(spec1.molecule[0], 'png', fstr) except IndexError: - raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.'.format(getSpeciesIdentifier(spec1))) + raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. ' + 'Please recheck your files.'.format(getSpeciesIdentifier(spec1))) - fstr = os.path.join(dirname, 'species2', '{0}.png'.format(spec2)) if not os.path.exists(fstr): try: MoleculeDrawer().draw(spec2.molecule[0], 'png', fstr) except IndexError: - raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.'.format(getSpeciesIdentifier(spec2))) - - + raise OutputError( + '{0} species could not be drawn because it did not contain a molecular structure. Please recheck ' + 'your files.'.format(getSpeciesIdentifier(spec2))) + for spec in speciesList1: match = re_index.search(spec.label) if match: @@ -585,8 +589,9 @@ def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReac try: MoleculeDrawer().draw(spec.molecule[0], 'png', fstr) except IndexError: - raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.'.format(getSpeciesIdentifier(spec))) - + raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. ' + 'Please recheck your files.'.format(getSpeciesIdentifier(spec))) + for spec in speciesList2: match = re_index.search(spec.label) if match: @@ -598,19 +603,20 @@ def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReac try: MoleculeDrawer().draw(spec.molecule[0], 'png', fstr) except IndexError: - raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.'.format(getSpeciesIdentifier(spec))) - - #Add pictures for species that may not have different thermo but are in reactions with different kinetics - allRxns = [rxnTuple[0] for rxnTuple in commonReactions] + uniqueReactions1 + uniqueReactions2 - allSpecies = [] - for rxn in allRxns: + raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. ' + 'Please recheck your files.'.format(getSpeciesIdentifier(spec))) + + # Add pictures for species that may not have different thermo but are in reactions with different kinetics + all_rxns = [rxnTuple[0] for rxnTuple in commonReactions] + uniqueReactions1 + uniqueReactions2 + all_species = [] + for rxn in all_rxns: for prod in rxn.products: - allSpecies.append(prod) + all_species.append(prod) for rxt in rxn.reactants: - allSpecies.append(rxt) - allSpecies = set(allSpecies) + all_species.append(rxt) + all_species = set(all_species) - for spec in allSpecies: + for spec in all_species: match = re_index.search(spec.label) if match: spec.index = int(match.group(0)[1:-1]) @@ -621,62 +627,61 @@ def saveDiffHTML(path, commonSpeciesList, speciesList1, speciesList2, commonReac try: MoleculeDrawer().draw(spec.molecule[0], 'png', fstr) except IndexError: - raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. Please recheck your files.'.format(getSpeciesIdentifier(spec))) + raise OutputError('{0} species could not be drawn because it did not contain a molecular structure. ' + 'Please recheck your files.'.format(getSpeciesIdentifier(spec))) - - familyCount1 = {} - familyCount2 = {} + family_count1 = {} + family_count2 = {} for rxn1, rxn2 in commonReactions: - if isinstance(rxn2.kinetics, (MultiArrhenius,MultiPDepArrhenius)): - rxn2.duplicate = True + if isinstance(rxn2.kinetics, (MultiArrhenius, MultiPDepArrhenius)): + rxn2.duplicate = True if isinstance(rxn1, PDepReaction): family = "PDepNetwork" else: family = rxn1.getSource() - if family in familyCount1: - familyCount1[family] += 1 - familyCount2[family] += 1 + if family in family_count1: + family_count1[family] += 1 + family_count2[family] += 1 else: - familyCount1[family] = 1 - familyCount2[family] = 1 + family_count1[family] = 1 + family_count2[family] = 1 for rxn in uniqueReactions1: if isinstance(rxn, PDepReaction): family = "PDepNetwork" else: family = rxn.getSource() - if family in familyCount1: - familyCount1[family] += 1 + if family in family_count1: + family_count1[family] += 1 else: - familyCount1[family] = 1 + family_count1[family] = 1 for rxn in uniqueReactions2: if isinstance(rxn, PDepReaction): family = "PDepNetwork" else: family = rxn.getSource() - if family in familyCount2: - familyCount2[family] += 1 + if family in family_count2: + family_count2[family] += 1 else: - familyCount2[family] = 1 + family_count2[family] = 1 - families1 = familyCount1.keys() - families2 = familyCount2.keys() + families1 = list(family_count1.keys()) + families2 = list(family_count2.keys()) families1.sort() families2.sort() - - - ## jinja2 filters etc. + # jinja2 filters etc. to_remove_from_css_names = re.compile('[/.\-+,]') + def csssafe(input): "Replace unsafe CSS class name characters with an underscore." - return to_remove_from_css_names.sub('_',input) + return to_remove_from_css_names.sub('_', input) environment = jinja2.Environment() environment.filters['csssafe'] = csssafe -# Make HTML file + # Make HTML file template = environment.from_string( """ @@ -1301,10 +1306,14 @@ def csssafe(input): """) f = open(path, 'w') - f.write(template.render(title=title, commonSpecies=commonSpeciesList, speciesList1=speciesList1, speciesList2 = speciesList2, - commonReactions=commonReactions, uniqueReactions1=uniqueReactions1, uniqueReactions2=uniqueReactions2, - families1=families1, families2=families2, familyCount1=familyCount1,familyCount2=familyCount2, families_union=set(families1+families2),speciesList=speciesList, - getSpeciesIdentifier=getSpeciesIdentifier,textwrap=textwrap)) + f.write(template.render(title=title, commonSpecies=commonSpeciesList, speciesList1=speciesList1, + speciesList2=speciesList2, + commonReactions=commonReactions, uniqueReactions1=uniqueReactions1, + uniqueReactions2=uniqueReactions2, + families1=families1, families2=families2, familyCount1=family_count1, + familyCount2=family_count2, families_union=set(families1 + families2), + speciesList=speciesList, + getSpeciesIdentifier=getSpeciesIdentifier, textwrap=textwrap)) f.close() @@ -1314,11 +1323,12 @@ def saveOutput(rmg): """ logging.info('Saving current model core to HTML file...') saveOutputHTML(os.path.join(rmg.outputDirectory, 'output.html'), rmg.reactionModel, 'core') - + if rmg.saveEdgeSpecies == True: logging.info('Saving current model edge to HTML file...') saveOutputHTML(os.path.join(rmg.outputDirectory, 'output_edge.html'), rmg.reactionModel, 'edge') + class OutputHTMLWriter(object): """ This class listens to a RMG subject @@ -1340,9 +1350,10 @@ class OutputHTMLWriter(object): rmg.detach(listener) """ + def __init__(self, outputDirectory=''): super(OutputHTMLWriter, self).__init__() makeOutputSubdirectory(outputDirectory, 'species') - + def update(self, rmg): saveOutput(rmg) diff --git a/rmgpy/rmg/outputTest.py b/rmgpy/rmg/outputTest.py index 18ce1dd269..2c0fa68374 100644 --- a/rmgpy/rmg/outputTest.py +++ b/rmgpy/rmg/outputTest.py @@ -29,38 +29,38 @@ ############################################################################### import os +import shutil import unittest -import shutil -from model import CoreEdgeReactionModel, ReactionModel +from rmgpy.rmg.model import CoreEdgeReactionModel, ReactionModel +from rmgpy.rmg.output import saveOutputHTML from rmgpy.chemkin import loadChemkinFile -from output import * ################################################### class TestOutput(unittest.TestCase): - def testSaveOutputHTML(self): - """ - This example is to test if an HTML file can be generated - for the provided chemkin model. - """ - folder = os.path.join(os.getcwd(),'rmgpy/rmg/test_data/saveOutputHTML/') - - chemkinPath = os.path.join(folder, 'eg6', 'chem_annotated.inp') - dictionaryPath = os.path.join(folder,'eg6', 'species_dictionary.txt') + def testSaveOutputHTML(self): + """ + This example is to test if an HTML file can be generated + for the provided chemkin model. + """ + folder = os.path.join(os.getcwd(), 'rmgpy/rmg/test_data/saveOutputHTML/') - # loadChemkinFile - species, reactions = loadChemkinFile(chemkinPath, dictionaryPath) + chemkin_path = os.path.join(folder, 'eg6', 'chem_annotated.inp') + dictionary_path = os.path.join(folder, 'eg6', 'species_dictionary.txt') - # convert it into a reaction model: - core = ReactionModel(species, reactions) - cerm = CoreEdgeReactionModel(core) + # loadChemkinFile + species, reactions = loadChemkinFile(chemkin_path, dictionary_path) - out = os.path.join(folder, 'output.html') - saveOutputHTML(out, cerm) + # convert it into a reaction model: + core = ReactionModel(species, reactions) + cerm = CoreEdgeReactionModel(core) - self.assertTrue(os.path.isfile(out)) - os.remove(out) - shutil.rmtree(os.path.join(folder,'species')) + out = os.path.join(folder, 'output.html') + saveOutputHTML(out, cerm) + + self.assertTrue(os.path.isfile(out)) + os.remove(out) + shutil.rmtree(os.path.join(folder, 'species')) diff --git a/rmgpy/rmg/pdep.py b/rmgpy/rmg/pdep.py index 3363fb0b56..4edf29dd83 100644 --- a/rmgpy/rmg/pdep.py +++ b/rmgpy/rmg/pdep.py @@ -35,18 +35,18 @@ import logging import os.path -import numpy as np + import mpmath as mp +import numpy as np import scipy.optimize as opt import rmgpy.pdep.network import rmgpy.reaction from rmgpy.constants import R - +from rmgpy.data.kinetics.library import LibraryReaction +from rmgpy.exceptions import PressureDependenceError, NetworkError from rmgpy.pdep import Conformer, Configuration from rmgpy.rmg.react import react_species -from rmgpy.exceptions import PressureDependenceError, NetworkError -from rmgpy.data.kinetics.library import LibraryReaction ################################################################################ @@ -101,13 +101,14 @@ def __reduce__(self): self.degeneracy, self.pairs )) - + def getSource(self): """ Get the source of this PDepReaction """ return str(self.network) + ################################################################################ class PDepNetwork(rmgpy.pdep.network.Network): @@ -132,17 +133,17 @@ def __init__(self, index=-1, source=None): self.index = index self.source = source self.explored = [] - + def __str__(self): return "PDepNetwork #{0}".format(self.index) - + def __reduce__(self): """ A helper function used when pickling an object. """ return (PDepNetwork, (self.index, self.source), self.__dict__) - - def __setstate__(self,dict): + + def __setstate__(self, dict): self.__dict__.update(dict) @property @@ -174,10 +175,10 @@ def cleanup(self): self.E0 = None self.Ngrains = 0 self.NJ = 0 - + self.K = None self.p0 = None - + def getLeakCoefficient(self, T, P): """ Return the pressure-dependent rate coefficient :math:`k(T,P)` describing @@ -195,17 +196,18 @@ def getLeakCoefficient(self, T, P): if rxn.reverse.kinetics is not None: rxn = rxn.reverse else: - raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered in PDepNetwork #{1:d} while evaluating leak flux.'.format(rxn, self.index)) + raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered ' + 'in PDepNetwork #{1:d} while evaluating leak flux.'.format(rxn, self.index)) if rxn.products is self.source: - k = rxn.getRateCoefficient(T,P) / rxn.getEquilibriumConstant(T) + k = rxn.getRateCoefficient(T, P) / rxn.getEquilibriumConstant(T) else: - k = rxn.getRateCoefficient(T,P) + k = rxn.getRateCoefficient(T, P) else: # The network has at least one included isomer, so we can calculate # the leak flux normally for rxn in self.netReactions: if len(rxn.products) == 1 and rxn.products[0] not in self.explored: - k += rxn.getRateCoefficient(T,P) + k += rxn.getRateCoefficient(T, P) return k def getMaximumLeakSpecies(self, T, P): @@ -215,29 +217,30 @@ def getMaximumLeakSpecies(self, T, P): pressure, so you must provide these in order to get a meaningful result. """ # Choose species with maximum leak flux - maxK = 0.0; maxSpecies = None + max_k = 0.0 + max_species = None if len(self.netReactions) == 0 and len(self.pathReactions) == 1: - maxK = self.getLeakCoefficient(T,P) + max_k = self.getLeakCoefficient(T, P) rxn = self.pathReactions[0] if rxn.products == self.source: assert len(rxn.reactants) == 1 - maxSpecies = rxn.reactants[0] + max_species = rxn.reactants[0] else: assert len(rxn.products) == 1 - maxSpecies = rxn.products[0] + max_species = rxn.products[0] else: for rxn in self.netReactions: if len(rxn.products) == 1 and rxn.products[0] not in self.explored: - k = rxn.getRateCoefficient(T,P) - if maxSpecies is None or k > maxK: - maxSpecies = rxn.products[0] - maxK = k + k = rxn.getRateCoefficient(T, P) + if max_species is None or k > max_k: + max_species = rxn.products[0] + max_k = k # Make sure we've identified a species - if maxSpecies is None: + if max_species is None: raise NetworkError('No unimolecular isomers left to explore!') # Return the species - return maxSpecies + return max_species def getLeakBranchingRatios(self, T, P): """ @@ -257,7 +260,7 @@ def getLeakBranchingRatios(self, T, P): else: for rxn in self.netReactions: if len(rxn.products) == 1 and rxn.products[0] not in self.explored: - ratios[rxn.products[0]] = rxn.getRateCoefficient(T,P) + ratios[rxn.products[0]] = rxn.getRateCoefficient(T, P) kleak = sum(ratios.values()) for spec in ratios: @@ -273,11 +276,12 @@ def exploreIsomer(self, isomer): """ if isomer in self.explored: - logging.warning('Already explored isomer {0} in pressure-dependent network #{1:d}'.format(isomer, self.index)) + logging.warning('Already explored isomer {0} in pressure-dependent network #{1:d}'.format(isomer, + self.index)) return [] - + assert isomer not in self.source, "Attempted to explore isomer {0}, but that is the source configuration for this network.".format(isomer) - + for product in self.products: if product.species == [isomer]: break @@ -300,9 +304,9 @@ def exploreIsomer(self, isomer): # Don't find reactions involving the new species as bimolecular # reactants or products with other core species (e.g. A + B <---> products) - newReactions = react_species((isomer,)) - - return newReactions + new_reactions = react_species((isomer,)) + + return new_reactions def addPathReaction(self, newReaction): """ @@ -327,8 +331,8 @@ def get_energy_filtered_reactions(self, T, tol): Returns a list of products and isomers that are greater in Free Energy than a*R*T + Gfsource(T) """ - dE = tol*R*T - for conf in self.isomers+self.products+self.reactants: + dE = tol * R * T + for conf in self.isomers + self.products + self.reactants: if len(conf.species) == len(self.source): if len(self.source) == 1: if self.source[0].isIsomorphic(conf.species[0]): @@ -337,7 +341,7 @@ def get_energy_filtered_reactions(self, T, tol): elif len(self.source) == 2: boo00 = self.source[0].isIsomorphic(conf.species[0]) boo01 = self.source[0].isIsomorphic(conf.species[1]) - if boo00 or boo01: # if we found source[0] + if boo00 or boo01: # if we found source[0] boo10 = self.source[1].isIsomorphic(conf.species[0]) boo11 = self.source[1].isIsomorphic(conf.species[1]) if (boo00 and boo11) or (boo01 and boo10): @@ -349,9 +353,9 @@ def get_energy_filtered_reactions(self, T, tol): filtered_rxns = [] for rxn in self.pathReactions: E0 = rxn.transitionState.conformer.E0.value_si - if E0-E0source > dE: + if E0 - E0source > dE: filtered_rxns.append(rxn) - + return filtered_rxns def get_rate_filtered_products(self, T, P, tol): @@ -360,147 +364,143 @@ def get_rate_filtered_products(self, T, P, tol): tol at steady state where all A => B + C reactions are irreversible and there is a constant flux from/to the source configuration of 1.0 """ - c = self.solve_SS_network(T,P) - isomerSpcs = [iso.species[0] for iso in self.isomers] + c = self.solve_SS_network(T, P) + isomer_spcs = [iso.species[0] for iso in self.isomers] filtered_prod = [] if c is not None: for rxn in self.netReactions: val = 0.0 val2 = 0.0 - if rxn.reactants[0] in isomerSpcs: - ind = isomerSpcs.index(rxn.reactants[0]) - kf = rxn.getRateCoefficient(T,P) - val = kf*c[ind] - if rxn.products[0] in isomerSpcs: - ind2 = isomerSpcs.index(rxn.products[0]) - kr = rxn.getRateCoefficient(T,P)/rxn.getEquilibriumConstant(T) - val2 = kr*c[ind2] - - if max(val,val2) < tol: + if rxn.reactants[0] in isomer_spcs: + ind = isomer_spcs.index(rxn.reactants[0]) + kf = rxn.getRateCoefficient(T, P) + val = kf * c[ind] + if rxn.products[0] in isomer_spcs: + ind2 = isomer_spcs.index(rxn.products[0]) + kr = rxn.getRateCoefficient(T, P) / rxn.getEquilibriumConstant(T) + val2 = kr * c[ind2] + + if max(val, val2) < tol: filtered_prod.append(rxn.products) return filtered_prod else: - logging.warn("Falling back flux reduction from Steady State analysis to rate coefficient analysis") - ks = np.array([rxn.getRateCoefficient(T,P) for rxn in self.netReactions]) - frs = ks/ks.sum() - inds = [i for i in xrange(len(frs)) if frs[i] < tol] + logging.warning("Falling back flux reduction from Steady State analysis to rate coefficient analysis") + ks = np.array([rxn.getRateCoefficient(T, P) for rxn in self.netReactions]) + frs = ks / ks.sum() + inds = [i for i in range(len(frs)) if frs[i] < tol] filtered_prod = [self.netReactions[i].products for i in inds] return filtered_prod - def solve_SS_network(self, T, P): """ calculates the steady state concentrations if all A => B + C reactions are irreversible and the flux from/to the source configuration is 1.0 """ - A = np.zeros((len(self.isomers),len(self.isomers))) + A = np.zeros((len(self.isomers), len(self.isomers))) b = np.zeros(len(self.isomers)) bimolecular = len(self.source) > 1 - - isomerSpcs = [iso.species[0] for iso in self.isomers] - + + isomer_spcs = [iso.species[0] for iso in self.isomers] for rxn in self.netReactions: - if rxn.reactants[0] in isomerSpcs: - ind = isomerSpcs.index(rxn.reactants[0]) - kf = rxn.getRateCoefficient(T,P) - A[ind,ind] -= kf + if rxn.reactants[0] in isomer_spcs: + ind = isomer_spcs.index(rxn.reactants[0]) + kf = rxn.getRateCoefficient(T, P) + A[ind, ind] -= kf else: ind = None - if rxn.products[0] in isomerSpcs: - ind2 = isomerSpcs.index(rxn.products[0]) - kr = rxn.getRateCoefficient(T,P)/rxn.getEquilibriumConstant(T) - A[ind2,ind2] -= kr + if rxn.products[0] in isomer_spcs: + ind2 = isomer_spcs.index(rxn.products[0]) + kr = rxn.getRateCoefficient(T, P) / rxn.getEquilibriumConstant(T) + A[ind2, ind2] -= kr else: ind2 = None if ind is not None and ind2 is not None: - A[ind,ind2] += kr - A[ind2,ind] += kf + A[ind, ind2] += kr + A[ind2, ind] += kf if bimolecular: if rxn.reactants[0] == self.source: - kf = rxn.getRateCoefficient(T,P) + kf = rxn.getRateCoefficient(T, P) b[ind2] += kf elif rxn.products[0] == self.source: - kr = rxn.getRateCoefficient(T,P)/rxn.getEquilibriumConstant(T) + kr = rxn.getRateCoefficient(T, P) / rxn.getEquilibriumConstant(T) b[ind] += kr - - + if not bimolecular: - ind = isomerSpcs.index(self.source[0]) - b[ind] = -1.0 #flux at source + ind = isomer_spcs.index(self.source[0]) + b[ind] = -1.0 # flux at source else: - b = -b/b.sum() #1.0 flux from source - + b = -b / b.sum() # 1.0 flux from source + if len(b) == 1: - return np.array([b[0]/A[0,0]]) - + return np.array([b[0] / A[0, 0]]) + con = np.linalg.cond(A) - + if np.log10(con) < 15: - c = np.linalg.solve(A,b) + c = np.linalg.solve(A, b) else: - logging.warn("Matrix Ill-conditioned, attempting to use Arbitrary Precision Arithmetic") - mp.dps = 30+int(np.log10(con)) + logging.warning("Matrix Ill-conditioned, attempting to use Arbitrary Precision Arithmetic") + mp.dps = 30 + int(np.log10(con)) Amp = mp.matrix(A.tolist()) bmp = mp.matrix(b.tolist()) - + try: - c = mp.qr_solve(Amp,bmp) + c = mp.qr_solve(Amp, bmp) c = np.array(list(c[0])) - if any(c<=0.0): - c, rnorm = opt.nnls(A,b) + if any(c <= 0.0): + c, rnorm = opt.nnls(A, b) c = c.astype(np.float64) - except: #fall back to raw flux analysis rather than solve steady state problem + except: # fall back to raw flux analysis rather than solve steady state problem return None return c - - + def remove_disconnected_reactions(self): """ gets rid of reactions/isomers/products not connected to the source by a reaction sequence """ - keptReactions = [] - keptProducts = [self.source] + kept_reactions = [] + kept_products = [self.source] incomplete = True while incomplete: - s = len(keptReactions) + s = len(kept_reactions) for rxn in self.pathReactions: - if not rxn in keptReactions: - if rxn.reactants in keptProducts: - keptProducts.append(rxn.products) - keptReactions.append(rxn) - elif rxn.products in keptProducts: - keptProducts.append(rxn.reactants) - keptReactions.append(rxn) - - incomplete = s != len(keptReactions) - + if not rxn in kept_reactions: + if rxn.reactants in kept_products: + kept_products.append(rxn.products) + kept_reactions.append(rxn) + elif rxn.products in kept_products: + kept_products.append(rxn.reactants) + kept_reactions.append(rxn) + + incomplete = s != len(kept_reactions) + logging.info('Removing disconnected items') for rxn in self.pathReactions: - if rxn not in keptReactions: + if rxn not in kept_reactions: logging.info('Removing rxn: {}'.format(rxn)) self.pathReactions.remove(rxn) - + nrxns = [] for nrxn in self.netReactions: - if nrxn.products not in keptProducts or nrxn.reactants not in keptProducts: - logging.info('Removing net rxn: {}'.format(nrxn)) - else: - logging.info('Keeping net rxn: {}'.format(nrxn)) - nrxns.append(nrxn) + if nrxn.products not in kept_products or nrxn.reactants not in kept_products: + logging.info('Removing net rxn: {}'.format(nrxn)) + else: + logging.info('Keeping net rxn: {}'.format(nrxn)) + nrxns.append(nrxn) self.netReactions = nrxns prods = [] for prod in self.products: - if prod.species not in keptProducts: + if prod.species not in kept_products: logging.info('Removing product: {}'.format(prod)) else: logging.info("Keeping product: {}".format(prod)) @@ -510,7 +510,7 @@ def remove_disconnected_reactions(self): rcts = [] for rct in self.reactants: - if rct.species not in keptProducts: + if rct.species not in kept_products: logging.info('Removing product: {}'.format(rct)) else: logging.info("Keeping product: {}".format(rct)) @@ -519,7 +519,7 @@ def remove_disconnected_reactions(self): isos = [] for iso in self.isomers: - if iso.species not in keptProducts: + if iso.species not in kept_products: logging.info('Removing isomer: {}'.format(iso)) else: logging.info("Keeping isomer: {}".format(iso)) @@ -546,7 +546,7 @@ def remove_reactions(self, reactionModel, rxns=None, prods=None): for prod in prods: prod = [x for x in prod] - if prod[0] in isomers: #skip isomers + if prod[0] in isomers: # skip isomers continue for rxn in self.pathReactions: if rxn.products == prod or rxn.reactants == prod: @@ -555,7 +555,7 @@ def remove_reactions(self, reactionModel, rxns=None, prods=None): prodspc = [x[0] for x in prods] for prod in prods: prod = [x for x in prod] - if prod[0] in isomers: #deal with isomers + if prod[0] in isomers: # deal with isomers for rxn in self.pathReactions: if rxn.reactants == prod and rxn.products[0] not in isomers and rxn.products[0] not in prodspc: break @@ -566,27 +566,26 @@ def remove_reactions(self, reactionModel, rxns=None, prods=None): if rxn.reactants == prod or rxn.products == prod: self.pathReactions.remove(rxn) - self.remove_disconnected_reactions() self.cleanup() self.invalidate() - + assert self.pathReactions != [], 'Reduction process removed all reactions, cannot update network with no reactions' - + reactionModel.updateUnimolecularReactionNetworks() - + if reactionModel.pressureDependence.outputFile: - path = os.path.join(reactionModel.pressureDependence.outputFile,'pdep') - - for name in os.listdir(path): #remove the old reduced file + path = os.path.join(reactionModel.pressureDependence.outputFile, 'pdep') + + for name in os.listdir(path): # remove the old reduced file if name.endswith('reduced.py'): - os.remove(os.path.join(path,name)) - - for name in os.listdir(path): #find the new file and name it network_reduced.py + os.remove(os.path.join(path, name)) + + for name in os.listdir(path): # find the new file and name it network_reduced.py if not name.endswith('full.py'): - os.rename(os.path.join(path,name),os.path.join(path,'network_reduced.py')) + os.rename(os.path.join(path, name), os.path.join(path, 'network_reduced.py')) def merge(self, other): """ @@ -611,14 +610,14 @@ def merge(self, other): for products in other.products: if products not in self.products: self.products.append(products) - + # However, products that have been explored are actually isomers # These should be removed from the list of products! - productsToRemove = [] + products_to_remove = [] for products in self.products: if len(products.species) == 1 and products.species[0] in self.isomers: - productsToRemove.append(products) - for products in productsToRemove: + products_to_remove.append(products) + for products in products_to_remove: self.products.remove(products) # Merge path reactions @@ -659,10 +658,10 @@ def updateConfigurations(self, reactionModel): """ reactants = [] products = [] - + # All explored species are isomers isomers = self.explored[:] - + # The source configuration is an isomer (if unimolecular) or a reactant channel (if bimolecular) if len(self.source) == 1: # The source is a unimolecular isomer @@ -671,7 +670,7 @@ def updateConfigurations(self, reactionModel): # The source is a bimolecular reactant channel self.source.sort() reactants.append(self.source) - + # Iterate over path reactions and make sure each set of reactants and products is classified for rxn in self.pathReactions: # Sort bimolecular configurations so that we always encounter them in the @@ -710,7 +709,7 @@ def updateConfigurations(self, reactionModel): self.isomers = [] self.reactants = [] self.products = [] - + # Make a configuration object for each for isomer in isomers: self.isomers.append(Configuration(isomer)) @@ -725,57 +724,62 @@ def update(self, reactionModel, pdepSettings): network is marked as invalid. """ from rmgpy.kinetics import Arrhenius, KineticsData, MultiArrhenius - from rmgpy.pdep.collision import SingleExponentialDown - from rmgpy.pdep.reaction import fitInterpolationModel - + # Get the parameters for the pressure dependence calculation job = pdepSettings job.network = self - outputDirectory = pdepSettings.outputFile - + output_directory = pdepSettings.outputFile + Tmin = job.Tmin.value_si Tmax = job.Tmax.value_si Pmin = job.Pmin.value_si Pmax = job.Pmax.value_si Tlist = job.Tlist.value_si Plist = job.Plist.value_si - maximumGrainSize = job.maximumGrainSize.value_si if job.maximumGrainSize is not None else 0.0 - minimumGrainCount = job.minimumGrainCount + maximum_grain_size = job.maximumGrainSize.value_si if job.maximumGrainSize is not None else 0.0 + minimum_grain_count = job.minimumGrainCount method = job.method - interpolationModel = job.interpolationModel + interpolation_model = job.interpolationModel activeJRotor = job.activeJRotor activeKRotor = job.activeKRotor rmgmode = job.rmgmode - + # Figure out which configurations are isomers, reactant channels, and product channels self.updateConfigurations(reactionModel) # Make sure we have high-P kinetics for all path reactions for rxn in self.pathReactions: if rxn.kinetics is None and rxn.reverse.kinetics is None: - raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered in PDepNetwork #{1:d}.'.format(rxn, self.index)) + raise PressureDependenceError('Path reaction {0} with no high-pressure-limit kinetics encountered in ' + 'PDepNetwork #{1:d}.'.format(rxn, self.index)) elif rxn.kinetics is not None and rxn.kinetics.isPressureDependent() and rxn.network_kinetics is None: - raise PressureDependenceError('Pressure-dependent kinetics encountered for path reaction {0} in PDepNetwork #{1:d}.'.format(rxn, self.index)) - + raise PressureDependenceError('Pressure-dependent kinetics encountered for path reaction {0} in ' + 'PDepNetwork #{1:d}.'.format(rxn, self.index)) + # Do nothing if the network is already valid - if self.valid: return + if self.valid: + return # Do nothing if there are no explored wells - if len(self.explored) == 0 and len(self.source) > 1: return + if len(self.explored) == 0 and len(self.source) > 1: + return # Log the network being updated logging.info("Updating {0:s}".format(self)) # Generate states data for unimolecular isomers and reactants if necessary for isomer in self.isomers: spec = isomer.species[0] - if not spec.hasStatMech(): spec.generateStatMech() + if not spec.hasStatMech(): + spec.generateStatMech() for reactants in self.reactants: for spec in reactants.species: - if not spec.hasStatMech(): spec.generateStatMech() + if not spec.hasStatMech(): + spec.generateStatMech() # Also generate states data for any path reaction reactants, so we can # always apply the ILT method in the direction the kinetics are known for reaction in self.pathReactions: for spec in reaction.reactants: - if not spec.hasStatMech(): spec.generateStatMech() + if not spec.hasStatMech(): + spec.generateStatMech() # While we don't need the frequencies for product channels, we do need # the E0, so create a conformer object with the E0 for the product # channel species if necessary @@ -783,7 +787,7 @@ def update(self, reactionModel, pdepSettings): for spec in products.species: if spec.conformer is None: spec.conformer = Conformer(E0=spec.getThermoData().E0) - + # Determine transition state energies on potential energy surface # In the absence of any better information, we simply set it to # be the reactant ground-state energy + the activation energy @@ -800,13 +804,14 @@ def update(self, reactionModel, pdepSettings): kunits = 'm^6/(mol^2*s)' else: kunits = '' - rxn.kinetics = Arrhenius().fitToData(Tlist=rxn.kinetics.Tdata.value_si, klist=rxn.kinetics.kdata.value_si, kunits=kunits) + rxn.kinetics = Arrhenius().fitToData(Tlist=rxn.kinetics.Tdata.value_si, + klist=rxn.kinetics.kdata.value_si, kunits=kunits) elif isinstance(rxn.kinetics, MultiArrhenius): logging.info('Converting multiple kinetics to a single Arrhenius expression for reaction {rxn}'.format(rxn=rxn)) rxn.kinetics = rxn.kinetics.toArrhenius(Tmin=Tmin, Tmax=Tmax) elif not isinstance(rxn.kinetics, Arrhenius) and rxn.network_kinetics is None: - raise Exception('Path reaction "{0}" in PDepNetwork #{1:d} has invalid kinetics type "{2!s}".'.format( - rxn,self.index,rxn.kinetics.__class__)) + raise Exception('Path reaction "{0}" in PDepNetwork #{1:d} has invalid kinetics ' + 'type "{2!s}".'.format(rxn, self.index, rxn.kinetics.__class__)) rxn.fixBarrierHeight(forcePositive=True) if rxn.network_kinetics is None: E0 = sum([spec.conformer.E0.value_si for spec in rxn.reactants]) + rxn.kinetics.Ea.value_si @@ -815,24 +820,27 @@ def update(self, reactionModel, pdepSettings): rxn.transitionState = rmgpy.species.TransitionState(conformer=Conformer(E0=(E0 * 0.001, "kJ/mol"))) # Set collision model - bathGas = [spec for spec in reactionModel.core.species if not spec.reactive] - assert len(bathGas) > 0, 'No unreactive species to identify as bath gas' - + bath_gas = [spec for spec in reactionModel.core.species if not spec.reactive] + assert len(bath_gas) > 0, 'No unreactive species to identify as bath gas' + self.bathGas = {} - for spec in bathGas: + for spec in bath_gas: # is this really the only/best way to weight them? - self.bathGas[spec] = 1.0 / len(bathGas) - + self.bathGas[spec] = 1.0 / len(bath_gas) + # Save input file - if not self.label: self.label = str(self.index) - - if outputDirectory: - job.saveInputFile(os.path.join(outputDirectory, 'pdep', 'network{0:d}_{1:d}.py'.format(self.index, len(self.isomers)))) - + if not self.label: + self.label = str(self.index) + + if output_directory: + job.saveInputFile( + os.path.join(output_directory, 'pdep', 'network{0:d}_{1:d}.py'.format(self.index, len(self.isomers)))) + self.printSummary(level=logging.INFO) # Calculate the rate coefficients - self.initialize(Tmin, Tmax, Pmin, Pmax, maximumGrainSize, minimumGrainCount, activeJRotor, activeKRotor, rmgmode) + self.initialize(Tmin, Tmax, Pmin, Pmax, maximum_grain_size, minimum_grain_count, activeJRotor, activeKRotor, + rmgmode) K = self.calculateRateCoefficients(Tlist, Plist, method) # Generate PDepReaction objects @@ -845,55 +853,55 @@ def update(self, reactionModel, pdepSettings): for i in range(K.shape[2]): if i != j: # Find the path reaction - netReaction = None + net_reaction = None for r in self.netReactions: if r.hasTemplate(configurations[j], configurations[i]): - netReaction = r + net_reaction = r # If net reaction does not already exist, make a new one - if netReaction is None: - netReaction = PDepReaction( + if net_reaction is None: + net_reaction = PDepReaction( reactants=configurations[j], products=configurations[i], network=self, kinetics=None ) - netReaction = reactionModel.makeNewPDepReaction(netReaction) - self.netReactions.append(netReaction) + net_reaction = reactionModel.makeNewPDepReaction(net_reaction) + self.netReactions.append(net_reaction) # Place the net reaction in the core or edge if necessary # Note that leak reactions are not placed in the edge - if all([s in reactionModel.core.species for s in netReaction.reactants]) \ - and all([s in reactionModel.core.species for s in netReaction.products]): + if all([s in reactionModel.core.species for s in net_reaction.reactants]) \ + and all([s in reactionModel.core.species for s in net_reaction.products]): # Check whether netReaction already exists in the core as a LibraryReaction for rxn in reactionModel.core.reactions: if isinstance(rxn, LibraryReaction) \ - and rxn.isIsomorphic(netReaction, eitherDirection=True) \ + and rxn.isIsomorphic(net_reaction, eitherDirection=True) \ and not rxn.allow_pdep_route and not rxn.elementary_high_p: logging.info('Network reaction {0} matched an existing core reaction {1}' - ' from the {2} library, and was not added to the model'.format( - str(netReaction), str(rxn), rxn.library)) + ' from the {2} library, and was not added to the model'.format( + str(net_reaction), str(rxn), rxn.library)) break else: - reactionModel.addReactionToCore(netReaction) + reactionModel.addReactionToCore(net_reaction) else: # Check whether netReaction already exists in the edge as a LibraryReaction for rxn in reactionModel.edge.reactions: if isinstance(rxn, LibraryReaction) \ - and rxn.isIsomorphic(netReaction, eitherDirection=True) \ + and rxn.isIsomorphic(net_reaction, eitherDirection=True) \ and not rxn.allow_pdep_route and not rxn.elementary_high_p: logging.info('Network reaction {0} matched an existing edge reaction {1}' - ' from the {2} library, and was not added to the model'.format( - str(netReaction), str(rxn), rxn.library)) + ' from the {2} library, and was not added to the model'.format( + str(net_reaction), str(rxn), rxn.library)) break else: - reactionModel.addReactionToEdge(netReaction) + reactionModel.addReactionToEdge(net_reaction) # Set/update the net reaction kinetics using interpolation model - kdata = K[:,:,i,j].copy() - order = len(netReaction.reactants) - kdata *= 1e6 ** (order-1) + kdata = K[:, :, i, j].copy() + order = len(net_reaction.reactants) + kdata *= 1e6 ** (order - 1) kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order] - netReaction.kinetics = job.fitInterpolationModel(Tlist, Plist, kdata, kunits) + net_reaction.kinetics = job.fitInterpolationModel(Tlist, Plist, kdata, kunits) # Check: For each net reaction that has a path reaction, make # sure the k(T,P) values for the net reaction do not exceed @@ -901,7 +909,8 @@ def update(self, reactionModel, pdepSettings): # Only check the k(T,P) value at the highest P and lowest T, # as this is the one most likely to be in the high-pressure # limit - t = 0; p = len(Plist) - 1 + t = 0 + p = len(Plist) - 1 for pathReaction in self.pathReactions: if pathReaction.isIsomerization(): # Don't check isomerization reactions, since their @@ -911,27 +920,31 @@ def update(self, reactionModel, pdepSettings): # (This can also happen for association/dissociation # reactions, but the effect is generally not too large) continue - if pathReaction.reactants == netReaction.reactants and pathReaction.products == netReaction.products: + if pathReaction.reactants == net_reaction.reactants and pathReaction.products == net_reaction.products: if pathReaction.network_kinetics is not None: kinf = pathReaction.network_kinetics.getRateCoefficient(Tlist[t]) else: kinf = pathReaction.kinetics.getRateCoefficient(Tlist[t]) - if K[t,p,i,j] > 2 * kinf: # To allow for a small discretization error - logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, {3:g} bar'.format(netReaction, K[t,p,i,j] / kinf, Tlist[t], Plist[p]/1e5)) - logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t,p,i,j], kinf)) + if K[t, p, i, j] > 2 * kinf: # To allow for a small discretization error + logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, ' + '{3:g} bar'.format(net_reaction, K[t, p, i, j] / kinf, Tlist[t], Plist[p] / 1e5)) + logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t, p, i, j], kinf)) break - elif pathReaction.products == netReaction.reactants and pathReaction.reactants == netReaction.products: + elif pathReaction.products == net_reaction.reactants and pathReaction.reactants == net_reaction.products: if pathReaction.network_kinetics is not None: - kinf = pathReaction.network_kinetics.getRateCoefficient(Tlist[t]) / pathReaction.getEquilibriumConstant(Tlist[t]) + kinf = pathReaction.network_kinetics.getRateCoefficient( + Tlist[t]) / pathReaction.getEquilibriumConstant(Tlist[t]) else: - kinf = pathReaction.kinetics.getRateCoefficient(Tlist[t]) / pathReaction.getEquilibriumConstant(Tlist[t]) - if K[t,p,i,j] > 2 * kinf: # To allow for a small discretization error - logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, {3:g} bar'.format(netReaction, K[t,p,i,j] / kinf, Tlist[t], Plist[p]/1e5)) - logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t,p,i,j], kinf)) + kinf = pathReaction.kinetics.getRateCoefficient( + Tlist[t]) / pathReaction.getEquilibriumConstant(Tlist[t]) + if K[t, p, i, j] > 2 * kinf: # To allow for a small discretization error + logging.warning('k(T,P) for net reaction {0} exceeds high-P k(T) by {1:g} at {2:g} K, ' + '{3:g} bar'.format(net_reaction, K[t, p, i, j] / kinf, Tlist[t], Plist[p] / 1e5)) + logging.info(' k(T,P) = {0:9.2e} k(T) = {1:9.2e}'.format(K[t, p, i, j], kinf)) break - + # Delete intermediate arrays to conserve memory self.cleanup() - + # We're done processing this network, so mark it as valid self.valid = True diff --git a/rmgpy/rmg/pdepTest.py b/rmgpy/rmg/pdepTest.py index 28fded4032..045b69f93a 100644 --- a/rmgpy/rmg/pdepTest.py +++ b/rmgpy/rmg/pdepTest.py @@ -28,23 +28,24 @@ # # ############################################################################### -import unittest import logging +import unittest from copy import deepcopy -from rmgpy.pdep.network import Network +from rmgpy.kinetics.arrhenius import Arrhenius +from rmgpy.pdep.collision import SingleExponentialDown from rmgpy.pdep.configuration import Configuration +from rmgpy.pdep.network import Network from rmgpy.reaction import Reaction -from rmgpy.statmech.vibration import HarmonicOscillator -from rmgpy.statmech.torsion import HinderedRotor -from rmgpy.statmech.conformer import Conformer -from rmgpy.species import Species, TransitionState from rmgpy.rmg.pdep import PDepNetwork -from rmgpy.transport import TransportData +from rmgpy.species import Species, TransitionState +from rmgpy.statmech.conformer import Conformer +from rmgpy.statmech.rotation import NonlinearRotor +from rmgpy.statmech.torsion import HinderedRotor from rmgpy.statmech.translation import IdealGasTranslation -from rmgpy.statmech.rotation import NonlinearRotor -from rmgpy.pdep.collision import SingleExponentialDown -from rmgpy.kinetics.arrhenius import Arrhenius +from rmgpy.statmech.vibration import HarmonicOscillator +from rmgpy.transport import TransportData + ################################################### @@ -55,107 +56,130 @@ def setUp(self): A method that is run before each unit test in this class. """ self.nC4H10O = Species( - label = 'n-C4H10O', - conformer = Conformer( - E0 = (-317.807,'kJ/mol'), - modes = [ - IdealGasTranslation(mass=(74.07,"g/mol")), - NonlinearRotor(inertia=([41.5091,215.751,233.258],"amu*angstrom^2"), symmetry=1), - HarmonicOscillator(frequencies=([240.915,341.933,500.066,728.41,809.987,833.93,926.308,948.571,1009.3,1031.46,1076,1118.4,1184.66,1251.36,1314.36,1321.42,1381.17,1396.5,1400.54,1448.08,1480.18,1485.34,1492.24,1494.99,1586.16,2949.01,2963.03,2986.19,2988.1,2995.27,3026.03,3049.05,3053.47,3054.83,3778.88],"cm^-1")), - HinderedRotor(inertia=(0.854054,"amu*angstrom^2"), symmetry=1, fourier=([[0.25183,-1.37378,-2.8379,0.0305112,0.0028088], [0.458307,0.542121,-0.599366,-0.00283925,0.0398529]],"kJ/mol")), - HinderedRotor(inertia=(8.79408,"amu*angstrom^2"), symmetry=1, fourier=([[0.26871,-0.59533,-8.15002,-0.294325,-0.145357], [1.1884,0.99479,-0.940416,-0.186538,0.0309834]],"kJ/mol")), - HinderedRotor(inertia=(7.88153,"amu*angstrom^2"), symmetry=1, fourier=([[-4.67373,2.03735,-6.25993,-0.27325,-0.048748], [-0.982845,1.76637,-1.57619,0.474364,-0.000681718]],"kJ/mol")), - HinderedRotor(inertia=(2.81525,"amu*angstrom^2"), symmetry=3, barrier=(2.96807,"kcal/mol")), + label='n-C4H10O', + conformer=Conformer( + E0=(-317.807, 'kJ/mol'), + modes=[ + IdealGasTranslation(mass=(74.07, "g/mol")), + NonlinearRotor(inertia=([41.5091, 215.751, 233.258], "amu*angstrom^2"), symmetry=1), + HarmonicOscillator(frequencies=( + [240.915, 341.933, 500.066, 728.41, 809.987, 833.93, 926.308, 948.571, 1009.3, 1031.46, 1076, + 1118.4, 1184.66, 1251.36, 1314.36, 1321.42, 1381.17, 1396.5, 1400.54, 1448.08, 1480.18, 1485.34, + 1492.24, 1494.99, 1586.16, 2949.01, 2963.03, 2986.19, 2988.1, 2995.27, 3026.03, 3049.05, 3053.47, + 3054.83, 3778.88], "cm^-1")), + HinderedRotor(inertia=(0.854054, "amu*angstrom^2"), symmetry=1, fourier=( + [[0.25183, -1.37378, -2.8379, 0.0305112, 0.0028088], + [0.458307, 0.542121, -0.599366, -0.00283925, 0.0398529]], "kJ/mol")), + HinderedRotor(inertia=(8.79408, "amu*angstrom^2"), symmetry=1, fourier=( + [[0.26871, -0.59533, -8.15002, -0.294325, -0.145357], + [1.1884, 0.99479, -0.940416, -0.186538, 0.0309834]], "kJ/mol")), + HinderedRotor(inertia=(7.88153, "amu*angstrom^2"), symmetry=1, fourier=( + [[-4.67373, 2.03735, -6.25993, -0.27325, -0.048748], + [-0.982845, 1.76637, -1.57619, 0.474364, -0.000681718]], "kJ/mol")), + HinderedRotor(inertia=(2.81525, "amu*angstrom^2"), symmetry=3, barrier=(2.96807, "kcal/mol")), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ), - molecularWeight = (74.07,"g/mol"), + molecularWeight=(74.07, "g/mol"), transportData=TransportData(sigma=(5.94, 'angstrom'), epsilon=(559, 'K')), - energyTransferModel = SingleExponentialDown(alpha0=(447.5*0.011962,"kJ/mol"), T0=(300,"K"), n=0.85), + energyTransferModel=SingleExponentialDown(alpha0=(447.5 * 0.011962, "kJ/mol"), T0=(300, "K"), n=0.85), ) - + self.nC4H10O.fromSMILES('CCCCO') - + self.nC4H8 = Species( - label = 'n-C4H8', - conformer = Conformer( - E0 = (-17.8832,'kJ/mol'), - modes = [ - IdealGasTranslation(mass=(56.06,"g/mol")), - NonlinearRotor(inertia=([22.2748,122.4,125.198],"amu*angstrom^2"), symmetry=1), - HarmonicOscillator(frequencies=([308.537,418.67,636.246,788.665,848.906,936.762,979.97,1009.48,1024.22,1082.96,1186.38,1277.55,1307.65,1332.87,1396.67,1439.09,1469.71,1484.45,1493.19,1691.49,2972.12,2994.31,3018.48,3056.87,3062.76,3079.38,3093.54,3174.52],"cm^-1")), - HinderedRotor(inertia=(5.28338,"amu*angstrom^2"), symmetry=1, fourier=([[-0.579364,-0.28241,-4.46469,0.143368,0.126756], [1.01804,-0.494628,-0.00318651,-0.245289,0.193728]],"kJ/mol")), - HinderedRotor(inertia=(2.60818,"amu*angstrom^2"), symmetry=3, fourier=([[0.0400372,0.0301986,-6.4787,-0.0248675,-0.0324753], [0.0312541,0.0538,-0.493785,0.0965968,0.125292]],"kJ/mol")), + label='n-C4H8', + conformer=Conformer( + E0=(-17.8832, 'kJ/mol'), + modes=[ + IdealGasTranslation(mass=(56.06, "g/mol")), + NonlinearRotor(inertia=([22.2748, 122.4, 125.198], "amu*angstrom^2"), symmetry=1), + HarmonicOscillator(frequencies=( + [308.537, 418.67, 636.246, 788.665, 848.906, 936.762, 979.97, 1009.48, 1024.22, 1082.96, 1186.38, + 1277.55, 1307.65, 1332.87, 1396.67, 1439.09, 1469.71, 1484.45, 1493.19, 1691.49, 2972.12, 2994.31, + 3018.48, 3056.87, 3062.76, 3079.38, 3093.54, 3174.52], "cm^-1")), + HinderedRotor(inertia=(5.28338, "amu*angstrom^2"), symmetry=1, fourier=( + [[-0.579364, -0.28241, -4.46469, 0.143368, 0.126756], + [1.01804, -0.494628, -0.00318651, -0.245289, 0.193728]], "kJ/mol")), + HinderedRotor(inertia=(2.60818, "amu*angstrom^2"), symmetry=3, fourier=( + [[0.0400372, 0.0301986, -6.4787, -0.0248675, -0.0324753], + [0.0312541, 0.0538, -0.493785, 0.0965968, 0.125292]], "kJ/mol")), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ), ) - + self.nC4H8.fromSMILES('CCC=C') - + self.H2O = Species( - label = 'H2O', - conformer = Conformer( - E0 = (-269.598,'kJ/mol'), - modes = [ - IdealGasTranslation(mass=(18.01,"g/mol")), - NonlinearRotor(inertia=([0.630578,1.15529,1.78586],"amu*angstrom^2"), symmetry=2), - HarmonicOscillator(frequencies=([1622.09,3771.85,3867.85],"cm^-1")), + label='H2O', + conformer=Conformer( + E0=(-269.598, 'kJ/mol'), + modes=[ + IdealGasTranslation(mass=(18.01, "g/mol")), + NonlinearRotor(inertia=([0.630578, 1.15529, 1.78586], "amu*angstrom^2"), symmetry=2), + HarmonicOscillator(frequencies=([1622.09, 3771.85, 3867.85], "cm^-1")), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ), ) - + self.H2O.fromSMILES('O') self.N2 = Species( - label = 'N2', - molecularWeight = (28.04,"g/mol"), + label='N2', + molecularWeight=(28.04, "g/mol"), transportData=TransportData(sigma=(3.41, "angstrom"), epsilon=(124, "K")), - energyTransferModel = None, + energyTransferModel=None, ) - + self.N2.fromSMILES('N#N') - + logging.error('to TS') - + self.TS = TransitionState( - label = 'TS', - conformer = Conformer( - E0 = (-42.4373,"kJ/mol"), - modes = [ - IdealGasTranslation(mass=(74.07,"g/mol")), - NonlinearRotor(inertia=([40.518,232.666,246.092],"u*angstrom**2"), symmetry=1, quantum=False), - HarmonicOscillator(frequencies=([134.289,302.326,351.792,407.986,443.419,583.988,699.001,766.1,777.969,829.671,949.753,994.731,1013.59,1073.98,1103.79,1171.89,1225.91,1280.67,1335.08,1373.9,1392.32,1417.43,1469.51,1481.61,1490.16,1503.73,1573.16,2972.85,2984.3,3003.67,3045.78,3051.77,3082.37,3090.44,3190.73,3708.52],"kayser")), - HinderedRotor(inertia=(2.68206,"amu*angstrom^2"), symmetry=3, barrier=(3.35244,"kcal/mol")), - HinderedRotor(inertia=(9.77669,"amu*angstrom^2"), symmetry=1, fourier=([[0.208938,-1.55291,-4.05398,-0.105798,-0.104752], [2.00518,-0.020767,-0.333595,0.137791,-0.274578]],"kJ/mol")), + label='TS', + conformer=Conformer( + E0=(-42.4373, "kJ/mol"), + modes=[ + IdealGasTranslation(mass=(74.07, "g/mol")), + NonlinearRotor(inertia=([40.518, 232.666, 246.092], "u*angstrom**2"), symmetry=1, quantum=False), + HarmonicOscillator(frequencies=( + [134.289, 302.326, 351.792, 407.986, 443.419, 583.988, 699.001, 766.1, 777.969, 829.671, 949.753, + 994.731, 1013.59, 1073.98, 1103.79, 1171.89, 1225.91, 1280.67, 1335.08, 1373.9, 1392.32, 1417.43, + 1469.51, 1481.61, 1490.16, 1503.73, 1573.16, 2972.85, 2984.3, 3003.67, 3045.78, 3051.77, 3082.37, + 3090.44, 3190.73, 3708.52], "kayser")), + HinderedRotor(inertia=(2.68206, "amu*angstrom^2"), symmetry=3, barrier=(3.35244, "kcal/mol")), + HinderedRotor(inertia=(9.77669, "amu*angstrom^2"), symmetry=1, fourier=( + [[0.208938, -1.55291, -4.05398, -0.105798, -0.104752], + [2.00518, -0.020767, -0.333595, 0.137791, -0.274578]], "kJ/mol")), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ), - frequency=(-2038.34,'cm^-1'), + frequency=(-2038.34, 'cm^-1'), ) - + self.reaction = Reaction( - label = 'dehydration', - reactants = [self.nC4H10O], - products = [self.nC4H8, self.H2O], - transitionState = self.TS, - kinetics = Arrhenius(A=(0.0387, 'm^3/(mol*s)'), n=2.7, Ea=(2.6192e4, 'J/mol'), T0=(1, 'K')) + label='dehydration', + reactants=[self.nC4H10O], + products=[self.nC4H8, self.H2O], + transitionState=self.TS, + kinetics=Arrhenius(A=(0.0387, 'm^3/(mol*s)'), n=2.7, Ea=(2.6192e4, 'J/mol'), T0=(1, 'K')) ) - + self.network = Network( - label = 'n-butanol', - isomers = [Configuration(self.nC4H10O)], - reactants = [], - products = [Configuration(self.nC4H8, self.H2O)], - pathReactions = [self.reaction], - bathGas = {self.N2: 1.0}, + label='n-butanol', + isomers=[Configuration(self.nC4H10O)], + reactants=[], + products=[Configuration(self.nC4H8, self.H2O)], + pathReactions=[self.reaction], + bathGas={self.N2: 1.0}, ) - + self.pdepnetwork = deepcopy(self.network) self.pdepnetwork.__class__ = PDepNetwork self.pdepnetwork.source = [self.pdepnetwork.isomers[0].species[0]] @@ -163,13 +187,14 @@ def setUp(self): self.pdepnetwork.explored = [] def test_energy_filter(self): - rxns = self.pdepnetwork.get_energy_filtered_reactions(1000.0,0.0) - self.assertEquals(len(rxns),1) - self.assertEquals(rxns[0],self.pdepnetwork.pathReactions[0]) + rxns = self.pdepnetwork.get_energy_filtered_reactions(1000.0, 0.0) + self.assertEquals(len(rxns), 1) + self.assertEquals(rxns[0], self.pdepnetwork.pathReactions[0]) def test_flux_filter(self): - prods = self.pdepnetwork.get_rate_filtered_products(1000.0,100000.0,1.0) - self.assertEquals(len(prods),0) + prods = self.pdepnetwork.get_rate_filtered_products(1000.0, 100000.0, 1.0) + self.assertEquals(len(prods), 0) + if __name__ == '__main__': unittest.main() diff --git a/rmgpy/rmg/react.py b/rmgpy/rmg/react.py index edd99191b1..7b8974bf2d 100644 --- a/rmgpy/rmg/react.py +++ b/rmgpy/rmg/react.py @@ -32,9 +32,10 @@ Contains functions for generating reactions. """ import logging +from multiprocessing import Pool from rmgpy.data.rmg import getDB -from multiprocessing import Pool + ################################################################################ @@ -63,7 +64,7 @@ def react(spc_fam_tuples, procnum=1): # submits to the process pool as separate tasks. if procnum == 1: logging.info('For reaction generation {0} process is used.'.format(procnum)) - reactions = map(_react_species_star, spc_fam_tuples) + reactions = list(map(_react_species_star, spc_fam_tuples)) else: logging.info('For reaction generation {0} processes are used.'.format(procnum)) p = Pool(processes=procnum) @@ -117,10 +118,10 @@ def react_all(core_spc_list, numOldCoreSpecies, unimolecularReact, bimolecularRe """ # Select reactive species that can undergo unimolecular reactions: spc_tuples = [(core_spc_list[i],) - for i in xrange(numOldCoreSpecies) if (unimolecularReact[i] and core_spc_list[i].reactive)] + for i in range(numOldCoreSpecies) if (unimolecularReact[i] and core_spc_list[i].reactive)] - for i in xrange(numOldCoreSpecies): - for j in xrange(i, numOldCoreSpecies): + for i in range(numOldCoreSpecies): + for j in range(i, numOldCoreSpecies): # Find reactions involving the species that are bimolecular. # This includes a species reacting with itself (if its own concentration is high enough). if bimolecularReact[i, j]: @@ -128,9 +129,9 @@ def react_all(core_spc_list, numOldCoreSpecies, unimolecularReact, bimolecularRe spc_tuples.append((core_spc_list[i], core_spc_list[j])) if trimolecularReact is not None: - for i in xrange(numOldCoreSpecies): - for j in xrange(i, numOldCoreSpecies): - for k in xrange(j, numOldCoreSpecies): + for i in range(numOldCoreSpecies): + for j in range(i, numOldCoreSpecies): + for k in range(j, numOldCoreSpecies): # Find reactions involving the species that are trimolecular. if trimolecularReact[i, j, k]: if core_spc_list[i].reactive and core_spc_list[j].reactive and core_spc_list[k].reactive: @@ -138,10 +139,10 @@ def react_all(core_spc_list, numOldCoreSpecies, unimolecularReact, bimolecularRe if procnum == 1: # React all families like normal (provide empty argument for only_families) - spc_fam_tuples = zip(spc_tuples) + spc_fam_tuples = list(zip(spc_tuples)) else: # Identify and split families that are prone to generate many reactions into sublists. - family_list = getDB('kinetics').families.keys() + family_list = list(getDB('kinetics').families.keys()) major_families = [ 'H_Abstraction', 'R_Recombination', 'Intra_Disproportionation', 'Intra_RH_Add_Endocyclic', 'Singlet_Carbene_Intra_Disproportionation', 'Intra_ene_reaction', 'Disproportionation', @@ -167,7 +168,6 @@ def react_all(core_spc_list, numOldCoreSpecies, unimolecularReact, bimolecularRe for item in split_list: spc_fam_tuples.append((spc_tuple, item)) else: - spc_fam_tuples.append((spc_tuple, )) + spc_fam_tuples.append((spc_tuple,)) return react(spc_fam_tuples, procnum), [fam_tuple[0] for fam_tuple in spc_fam_tuples] - diff --git a/rmgpy/rmg/reactTest.py b/rmgpy/rmg/reactTest.py index 72eb9ba64b..c3a4c600da 100644 --- a/rmgpy/rmg/reactTest.py +++ b/rmgpy/rmg/reactTest.py @@ -30,16 +30,16 @@ import itertools import os -import unittest +import unittest + import numpy as np from rmgpy import settings from rmgpy.data.kinetics import TemplateReaction from rmgpy.data.rmg import RMGDatabase -from rmgpy.species import Species - from rmgpy.rmg.main import RMG from rmgpy.rmg.react import react, react_all +from rmgpy.species import Species ################################################### @@ -109,11 +109,11 @@ def testReactAll(self): procnum = 1 spcs = [ - Species().fromSMILES('C=C'), - Species().fromSMILES('[CH3]'), - Species().fromSMILES('[OH]'), - Species().fromSMILES('CCCCCCCCCCC') - ] + Species().fromSMILES('C=C'), + Species().fromSMILES('[CH3]'), + Species().fromSMILES('[OH]'), + Species().fromSMILES('CCCCCCCCCCC') + ] n = len(spcs) reaction_list, spc_tuples = react_all(spcs, n, np.ones(n), np.ones([n, n]), np.ones([n, n, n]), procnum) @@ -134,11 +134,11 @@ def testReactAllParallel(self): procnum = 2 spcs = [ - Species().fromSMILES('C=C'), - Species().fromSMILES('[CH3]'), - Species().fromSMILES('[OH]'), - Species().fromSMILES('CCCCCCCCCCC') - ] + Species().fromSMILES('C=C'), + Species().fromSMILES('[CH3]'), + Species().fromSMILES('[OH]'), + Species().fromSMILES('CCCCCCCCCCC') + ] n = len(spcs) reaction_list, spc_tuples = react_all(spcs, n, np.ones(n), np.ones([n, n]), np.ones([n, n, n]), procnum) diff --git a/rmgpy/rmg/rmgTest.py b/rmgpy/rmg/rmgTest.py index 628b8139ab..91fe347bd7 100644 --- a/rmgpy/rmg/rmgTest.py +++ b/rmgpy/rmg/rmgTest.py @@ -30,22 +30,23 @@ import os import unittest -from external.wip import work_in_progress -from .main import RMG, CoreEdgeReactionModel -from .model import Species +from external.wip import work_in_progress +from rmg import parse_command_line_arguments from rmgpy import settings +from rmgpy.data.base import ForbiddenStructures from rmgpy.data.rmg import RMGDatabase from rmgpy.molecule import Molecule from rmgpy.rmg.react import react_species -import rmgpy -from rmgpy.data.base import ForbiddenStructures +from rmgpy.rmg.main import RMG +from rmgpy.rmg.model import CoreEdgeReactionModel +from rmgpy.species import Species + -from rmg import * ################################################### class TestRMGWorkFlow(unittest.TestCase): - + @classmethod def setUpClass(self): """ @@ -61,9 +62,10 @@ def setUpClass(self): # kinetics family Disproportionation loading self.rmg.database.loadKinetics(os.path.join(path, 'kinetics'), \ - kineticsFamilies=['H_Abstraction','R_Addition_MultipleBond'],reactionLibraries=[]) + kineticsFamilies=['H_Abstraction', 'R_Addition_MultipleBond'], + reactionLibraries=[]) - #load empty forbidden structures + # load empty forbidden structures for family in self.rmg.database.kinetics.families.values(): family.forbidden = ForbiddenStructures() self.rmg.database.forbiddenStructures = ForbiddenStructures() @@ -75,7 +77,7 @@ def tearDownClass(self): """ import rmgpy.data.rmg rmgpy.data.rmg.database = None - + @work_in_progress def testDeterministicReactionTemplateMatching(self): """ @@ -96,24 +98,24 @@ def testDeterministicReactionTemplateMatching(self): # react spc = Species().fromSMILES("O=C[C]=C") spc.generate_resonance_structures() - newReactions = react_species((spc,)) + new_reactions = react_species((spc,)) # try to pick out the target reaction mol_H = Molecule().fromSMILES("[H]") mol_C3H2O = Molecule().fromSMILES("C=C=C=O") - target_rxns = findTargetRxnsContaining(mol_H, mol_C3H2O, newReactions) + target_rxns = findTargetRxnsContaining(mol_H, mol_C3H2O, new_reactions) self.assertEqual(len(target_rxns), 2) # reverse the order of molecules in spc spc.molecule = list(reversed(spc.molecule)) # react again - newReactions_reverse = [] - newReactions_reverse.extend(react_species((spc,))) + new_reactions_reverse = [] + new_reactions_reverse.extend(react_species((spc,))) # try to pick out the target reaction - target_rxns_reverse = findTargetRxnsContaining(mol_H, mol_C3H2O, newReactions_reverse) + target_rxns_reverse = findTargetRxnsContaining(mol_H, mol_C3H2O, new_reactions_reverse) self.assertEqual(len(target_rxns_reverse), 2) # whatever order of molecules in spc, the reaction template matched should be same @@ -221,7 +223,7 @@ def test_parse_command_line_non_defaults(self): # Acquire arguments args = parse_command_line_arguments(['other_name.py', '-d', '-o', '/test/output/dir/', '-r', 'test/seed/', '-P', - '-t', '01:20:33:45', '-k']) + '-t', '01:20:33:45', '-k']) # Test expected values self.assertEqual(args.walltime, '01:20:33:45') diff --git a/rmgpy/rmg/settings.py b/rmgpy/rmg/settings.py index e2f2d4966e..36908bedc4 100644 --- a/rmgpy/rmg/settings.py +++ b/rmgpy/rmg/settings.py @@ -54,21 +54,28 @@ `maxNumObjPerIter` Maximum number of objects that can be sent for enlargement from a single simulation ================================================================================================================================================== """ -import numpy +import numpy as np + from rmgpy.quantity import Quantity + class ModelSettings(object): """ class for holding the parameters affecting an RMG run """ - def __init__(self,toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=numpy.inf,toleranceKeepInEdge=0.0, toleranceInterruptSimulation=1.0, - toleranceMoveEdgeReactionToSurface=numpy.inf, toleranceMoveSurfaceSpeciesToCore=numpy.inf, toleranceMoveSurfaceReactionToCore=numpy.inf, - toleranceMoveEdgeReactionToSurfaceInterrupt=None,toleranceMoveEdgeReactionToCoreInterrupt=None, maximumEdgeSpecies=1000000, minCoreSizeForPrune=50, - minSpeciesExistIterationsForPrune=2, filterReactions=False, filterThreshold=1e8, ignoreOverallFluxCriterion=False, maxNumSpecies=None, maxNumObjsPerIter=1, - terminateAtMaxObjects=False,toleranceThermoKeepSpeciesInEdge=numpy.inf,dynamicsTimeScale = Quantity((0.0,'sec')), - toleranceBranchReactionToCore=0.0, branchingIndex=0.5, branchingRatioMax=1.0): - + def __init__(self, toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=np.inf, toleranceKeepInEdge=0.0, + toleranceInterruptSimulation=1.0, + toleranceMoveEdgeReactionToSurface=np.inf, toleranceMoveSurfaceSpeciesToCore=np.inf, + toleranceMoveSurfaceReactionToCore=np.inf, + toleranceMoveEdgeReactionToSurfaceInterrupt=None, toleranceMoveEdgeReactionToCoreInterrupt=None, + maximumEdgeSpecies=1000000, minCoreSizeForPrune=50, + minSpeciesExistIterationsForPrune=2, filterReactions=False, filterThreshold=1e8, + ignoreOverallFluxCriterion=False, maxNumSpecies=None, maxNumObjsPerIter=1, + terminateAtMaxObjects=False, toleranceThermoKeepSpeciesInEdge=np.inf, + dynamicsTimeScale=Quantity((0.0, 'sec')), + toleranceBranchReactionToCore=0.0, branchingIndex=0.5, branchingRatioMax=1.0): + self.fluxToleranceKeepInEdge = toleranceKeepInEdge self.fluxToleranceMoveToCore = toleranceMoveToCore self.toleranceMoveEdgeReactionToCore = toleranceMoveEdgeReactionToCore @@ -78,7 +85,7 @@ def __init__(self,toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=nump self.minSpeciesExistIterationsForPrune = minSpeciesExistIterationsForPrune self.filterReactions = filterReactions self.filterThreshold = filterThreshold - self.ignoreOverallFluxCriterion=ignoreOverallFluxCriterion + self.ignoreOverallFluxCriterion = ignoreOverallFluxCriterion self.toleranceMoveEdgeReactionToSurface = toleranceMoveEdgeReactionToSurface self.toleranceMoveSurfaceSpeciesToCore = toleranceMoveSurfaceSpeciesToCore self.toleranceMoveSurfaceReactionToCore = toleranceMoveSurfaceReactionToCore @@ -88,38 +95,40 @@ def __init__(self,toleranceMoveToCore=None, toleranceMoveEdgeReactionToCore=nump self.toleranceBranchReactionToCore = toleranceBranchReactionToCore self.branchingIndex = branchingIndex self.branchingRatioMax = branchingRatioMax - + if toleranceInterruptSimulation: self.fluxToleranceInterrupt = toleranceInterruptSimulation else: self.fluxToleranceInterrupt = toleranceMoveToCore - + if toleranceMoveEdgeReactionToSurfaceInterrupt: self.toleranceMoveEdgeReactionToSurfaceInterrupt = toleranceMoveEdgeReactionToSurfaceInterrupt else: self.toleranceMoveEdgeReactionToSurfaceInterrupt = toleranceMoveEdgeReactionToSurface - + if toleranceMoveEdgeReactionToCoreInterrupt: self.toleranceMoveEdgeReactionToCoreInterrupt = toleranceMoveEdgeReactionToCoreInterrupt else: self.toleranceMoveEdgeReactionToCoreInterrupt = toleranceMoveEdgeReactionToCore - + if maxNumSpecies: self.maxNumSpecies = maxNumSpecies else: - self.maxNumSpecies = numpy.inf - + self.maxNumSpecies = np.inf + if maxNumObjsPerIter <= 0: - self.maxNumObjsPerIter = numpy.inf + self.maxNumObjsPerIter = np.inf else: self.maxNumObjsPerIter = maxNumObjsPerIter - + + class SimulatorSettings(object): """ class for holding the parameters affecting the behavior of the solver """ - def __init__(self,atol=1e-16, rtol=1e-8, sens_atol=1e-6, sens_rtol=1e-4): + + def __init__(self, atol=1e-16, rtol=1e-8, sens_atol=1e-6, sens_rtol=1e-4): self.atol = atol self.rtol = rtol self.sens_atol = sens_atol - self.sens_rtol = sens_rtol \ No newline at end of file + self.sens_rtol = sens_rtol From 1701e940a84060dbcadf780760ebb706c6196460 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:13:00 -0400 Subject: [PATCH 024/155] Upgrade scripts to Python 3 --- scripts/checkModels.py | 136 +++++----- scripts/convertFAME.py | 334 ++++++++++++------------ scripts/diffModels.py | 4 +- scripts/generateChemkinHTML.py | 14 +- scripts/generateFluxDiagram.py | 49 ++-- scripts/generateReactions.py | 1 + scripts/generateTree.py | 31 ++- scripts/isotopes.py | 27 +- scripts/machineWriteDatabase.py | 4 +- scripts/mergeModels.py | 4 +- scripts/simulate.py | 32 +-- scripts/standardizeModelSpeciesNames.py | 45 ++-- scripts/thermoEstimator.py | 37 +-- 13 files changed, 372 insertions(+), 346 deletions(-) diff --git a/scripts/checkModels.py b/scripts/checkModels.py index 2c4edb6b2f..02a8380704 100644 --- a/scripts/checkModels.py +++ b/scripts/checkModels.py @@ -28,13 +28,9 @@ # # ############################################################################### -import sys -import os -import os.path -import math - -import logging import argparse +import logging +import math from rmgpy.tools.diff_models import execute @@ -42,27 +38,26 @@ def parseCommandLineArguments(): - - parser = argparse.ArgumentParser() - + parser.add_argument('name', metavar='NAME', type=str, nargs=1, - help='Name of test target model') + help='Name of test target model') parser.add_argument('benchChemkin', metavar='BENCHCHEMKIN', type=str, nargs=1, - help='The path to the the Chemkin file of the benchmark model') + help='The path to the the Chemkin file of the benchmark model') parser.add_argument('benchSpeciesDict', metavar='BENCHSPECIESDICT', type=str, nargs=1, - help='The path to the the species dictionary file of the benchmark model') - + help='The path to the the species dictionary file of the benchmark model') + parser.add_argument('testChemkin', metavar='TESTEDCHEMKIN', type=str, nargs=1, - help='The path to the the Chemkin file of the tested model') + help='The path to the the Chemkin file of the tested model') parser.add_argument('testSpeciesDict', metavar='TESTEDSPECIESDICT', type=str, nargs=1, - help='The path to the the species dictionary file of the tested model') - + help='The path to the the species dictionary file of the tested model') + args = parser.parse_args() return args + def main(): """ Driver function that parses command line arguments and passes them to the execute function. @@ -72,13 +67,14 @@ def main(): name = args.name[0] initializeLog(logging.WARNING, name + '.log') - benchChemkin = args.benchChemkin[0] - benchSpeciesDict = args.benchSpeciesDict[0] + bench_chemkin = args.benchChemkin[0] + bench_species_dict = args.benchSpeciesDict[0] - testChemkin = args.testChemkin[0] - testSpeciesDict = args.testSpeciesDict[0] + test_chemkin = args.testChemkin[0] + test_species_dict = args.testSpeciesDict[0] + + check(name, bench_chemkin, bench_species_dict, test_chemkin, test_species_dict) - check(name, benchChemkin, benchSpeciesDict, testChemkin, testSpeciesDict) def check(name, benchChemkin, benchSpeciesDict, testChemkin, testSpeciesDict): """ @@ -86,55 +82,54 @@ def check(name, benchChemkin, benchSpeciesDict, testChemkin, testSpeciesDict): """ kwargs = { 'web': True, - } + } + + test_thermo, bench_thermo = None, None + common_species, unique_species_orig, unique_species_test, common_reactions, unique_reactions_orig, unique_reactions_test = \ + execute(benchChemkin, benchSpeciesDict, bench_thermo, testChemkin, testSpeciesDict, test_thermo, **kwargs) - testThermo, benchThermo = None, None - commonSpecies, uniqueSpeciesOrig, uniqueSpeciesTest, commonReactions, uniqueReactionsOrig, uniqueReactionsTest = \ - execute(benchChemkin, benchSpeciesDict, benchThermo, testChemkin, testSpeciesDict, testThermo, **kwargs) + error_model = checkModel(common_species, unique_species_test, unique_species_orig, common_reactions, unique_reactions_test, + unique_reactions_orig) - errorModel = checkModel(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig, commonReactions, uniqueReactionsTest, uniqueReactionsOrig) + error_species = checkSpecies(common_species, unique_species_test, unique_species_orig) - errorSpecies = checkSpecies(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig) + error_reactions = checkReactions(common_reactions, unique_reactions_test, unique_reactions_orig) - errorReactions = checkReactions(commonReactions, uniqueReactionsTest, uniqueReactionsOrig) -def checkModel(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig, commonReactions, uniqueReactionsTest, uniqueReactionsOrig): +def checkModel(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig, commonReactions, uniqueReactionsTest, + uniqueReactionsOrig): """ Compare the species and reaction count of both models. """ - testModelSpecies = len(commonSpecies) + len(uniqueSpeciesTest) - origModelSpecies = len(commonSpecies) + len(uniqueSpeciesOrig) + test_model_species = len(commonSpecies) + len(uniqueSpeciesTest) + orig_model_species = len(commonSpecies) + len(uniqueSpeciesOrig) - logger.error('Test model has {} species.'.format(testModelSpecies)) - logger.error('Original model has {} species.'.format(origModelSpecies)) + logger.error('Test model has {} species.'.format(test_model_species)) + logger.error('Original model has {} species.'.format(orig_model_species)) - testModelRxns = len(commonReactions) + len(uniqueReactionsTest) - origModelRxns = len(commonReactions) + len(uniqueReactionsOrig) - logger.error('Test model has {} reactions.'.format(testModelRxns)) - logger.error('Original model has {} reactions.'.format(origModelRxns)) + test_model_rxns = len(commonReactions) + len(uniqueReactionsTest) + orig_model_rxns = len(commonReactions) + len(uniqueReactionsOrig) + logger.error('Test model has {} reactions.'.format(test_model_rxns)) + logger.error('Original model has {} reactions.'.format(orig_model_rxns)) - return (testModelSpecies != origModelSpecies) or (testModelRxns != origModelRxns) + return (test_model_species != orig_model_species) or (test_model_rxns != orig_model_rxns) -def checkSpecies(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig): +def checkSpecies(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig): error = False # check for unique species in one of the models: if uniqueSpeciesOrig: error = True - logger.error( - 'The original model has {} species that the tested model does not have.' - .format(len(uniqueSpeciesOrig)) - ) + logger.error('The original model has {} species that the tested model ' + 'does not have.'.format(len(uniqueSpeciesOrig))) [printSpecies(spc) for spc in uniqueSpeciesOrig] if uniqueSpeciesTest: error = True - logger.error( - 'The tested model has {} species that the original model does not have.' - .format(len(uniqueSpeciesTest)) - ) + logger.error('The tested model has {} species that the original model ' + 'does not have.'.format(len(uniqueSpeciesTest))) [printSpecies(spc) for spc in uniqueSpeciesTest] # check for different thermo among common species:: @@ -149,8 +144,9 @@ def checkSpecies(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig): logger.error('original:\t{}'.format(spec1.label)) logger.error('tested:\t{}'.format(spec2.label)) logger.error("{0:10}|{1:10}|{2:10}|{3:10}|{4:10}|{5:10}|{6:10}|{7:10}|{8:10}" - .format('Hf(300K)','S(300K)','Cp(300K)','Cp(400K)','Cp(500K)','Cp(600K)','Cp(800K)','Cp(1000K)','Cp(1500K)') - ) + .format('Hf(300K)', 'S(300K)', 'Cp(300K)', 'Cp(400K)', 'Cp(500K)', 'Cp(600K)', + 'Cp(800K)', 'Cp(1000K)', 'Cp(1500K)') + ) [printThermo(spc) for spc in [spec1, spec2]] @@ -161,28 +157,24 @@ def checkSpecies(commonSpecies, uniqueSpeciesTest, uniqueSpeciesOrig): return error -def checkReactions(commonReactions, uniqueReactionsTest, uniqueReactionsOrig): +def checkReactions(commonReactions, uniqueReactionsTest, uniqueReactionsOrig): error = False # check for unique reactions in one of the models: if uniqueReactionsOrig: error = True - - logger.error( - 'The original model has {} reactions that the tested model does not have.' - .format(len(uniqueReactionsOrig)) - ) - + + logger.error('The original model has {} reactions that the tested model ' + 'does not have.'.format(len(uniqueReactionsOrig))) + [printReaction(rxn) for rxn in uniqueReactionsOrig] if uniqueReactionsTest: error = True - - logger.error( - 'The tested model has {} reactions that the original model does not have.' - .format(len(uniqueReactionsTest)) - ) + + logger.error('The tested model has {} reactions that the original model ' + 'does not have.'.format(len(uniqueReactionsTest))) [printReaction(rxn) for rxn in uniqueReactionsTest] @@ -198,10 +190,10 @@ def checkReactions(commonReactions, uniqueReactionsTest, uniqueReactionsOrig): printReaction(rxn1) logger.error('tested:') printReaction(rxn2) - + logger.error("{0:7}|{1:7}|{2:7}|{3:7}|{4:7}|{5:7}|{6:7}|{7:7}|{8:7}" - .format('k(1bar)','300K','400K','500K','600K','800K','1000K','1500K','2000K') - ) + .format('k(1bar)', '300K', '400K', '500K', '600K', '800K', '1000K', '1500K', '2000K') + ) logger.error('') [printRates(rxn) for rxn in [rxn1, rxn2]] @@ -213,10 +205,10 @@ def checkReactions(commonReactions, uniqueReactionsTest, uniqueReactionsOrig): [printReactionComments(rxn) for rxn in [rxn1, rxn2]] else: logger.error('Identical kinetics comments') - return error + def printSpecies(spc): """ @@ -224,7 +216,8 @@ def printSpecies(spc): logger.error( 'spc: {}'.format(spc) - ) + ) + def printRates(rxn): """ @@ -243,6 +236,7 @@ def printRates(rxn): math.log10(rxn.kinetics.getRateCoefficient(2000, 1e5)), )) + def printThermo(spec): """ @@ -260,18 +254,23 @@ def printThermo(spec): spec.thermo.getHeatCapacity(1500) / 4.184, )) + def printReaction(rxn): logger.error('rxn: {}\t\torigin: {}'.format(rxn, rxn.getSource())) + def printReactionComments(rxn): logger.error('kinetics: {}'.format(rxn.kinetics.comment)) + def printSpeciesComments(spc): logger.error('thermo: {}'.format(spc.thermo.comment)) + def printKinetics(rxn): logger.error('Kinetics: {}'.format(rxn.kinetics)) + def initializeLog(verbose, log_file_name='checkModels.log'): """ Set up a logger for RMG to use to print output to stdout. The @@ -283,7 +282,8 @@ def initializeLog(verbose, log_file_name='checkModels.log'): filemode='w', format='%(name)s=%(message)s', level=verbose - ) + ) + if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/scripts/convertFAME.py b/scripts/convertFAME.py index a6ef969755..189ac26e3f 100644 --- a/scripts/convertFAME.py +++ b/scripts/convertFAME.py @@ -38,19 +38,18 @@ import logging import os.path -from rmgpy.molecule import Molecule import rmgpy.constants as constants -from rmgpy.quantity import Quantity, Energy - from arkane.pdep import PressureDependenceJob - +from rmgpy.kinetics import Arrhenius +from rmgpy.molecule import Molecule from rmgpy.pdep import Network, Configuration, SingleExponentialDown -from rmgpy.species import Species, TransitionState +from rmgpy.quantity import Quantity, Energy from rmgpy.reaction import Reaction -from rmgpy.transport import TransportData +from rmgpy.species import Species, TransitionState from rmgpy.statmech import HarmonicOscillator, HinderedRotor, Conformer from rmgpy.thermo import ThermoData -from rmgpy.kinetics import Arrhenius +from rmgpy.transport import TransportData + ################################################################################ @@ -62,16 +61,17 @@ def parseCommandLineArguments(): parser = argparse.ArgumentParser() parser.add_argument('file', metavar='FILE', type=str, nargs='+', - help='a file to convert') + help='a file to convert') parser.add_argument('-d', '--dictionary', metavar='DICTFILE', type=str, nargs=1, - help='the RMG dictionary corresponding to these files') - parser.add_argument('-x', '--max-energy', metavar='VALUE UNITS', type=str, nargs=2, - help='A maximum energy to crop at') + help='the RMG dictionary corresponding to these files') + parser.add_argument('-x', '--max-energy', metavar='VALUE UNITS', type=str, nargs=2, + help='A maximum energy to crop at') return parser.parse_args() + ################################################################################ - + def loadFAMEInput(path, moleculeDict=None): """ Load the contents of a FAME input file into the MEASURE object. FAME @@ -83,7 +83,7 @@ def loadFAMEInput(path, moleculeDict=None): specify a `moleculeDict`, then this script will use it to associate the species with their structures. """ - + def readMeaningfulLine(f): line = f.readline() while line != '': @@ -100,41 +100,41 @@ def readMeaningfulLine(f): f = open(path) job = PressureDependenceJob(network=None) - + # Read method method = readMeaningfulLine(f).lower() - if method == 'modifiedstrongcollision': + if method == 'modifiedstrongcollision': job.method = 'modified strong collision' - elif method == 'reservoirstate': + elif method == 'reservoirstate': job.method = 'reservoir state' # Read temperatures Tcount, Tunits, Tmin, Tmax = readMeaningfulLine(f).split() - job.Tmin = Quantity(float(Tmin), Tunits) + job.Tmin = Quantity(float(Tmin), Tunits) job.Tmax = Quantity(float(Tmax), Tunits) job.Tcount = int(Tcount) Tlist = [] for i in range(int(Tcount)): Tlist.append(float(readMeaningfulLine(f))) job.Tlist = Quantity(Tlist, Tunits) - + # Read pressures Pcount, Punits, Pmin, Pmax = readMeaningfulLine(f).split() - job.Pmin = Quantity(float(Pmin), Punits) + job.Pmin = Quantity(float(Pmin), Punits) job.Pmax = Quantity(float(Pmax), Punits) job.Pcount = int(Pcount) Plist = [] for i in range(int(Pcount)): Plist.append(float(readMeaningfulLine(f))) job.Plist = Quantity(Plist, Punits) - + # Read interpolation model model = readMeaningfulLine(f).split() if model[0].lower() == 'chebyshev': job.interpolationModel = ('chebyshev', int(model[1]), int(model[2])) elif model[0].lower() == 'pdeparrhenius': job.interpolationModel = ('pdeparrhenius',) - + # Read grain size or number of grains job.minimumGrainCount = 0 job.maximumGrainSize = None @@ -157,46 +157,46 @@ def readMeaningfulLine(f): alpha0units, alpha0 = readMeaningfulLine(f).split() T0units, T0 = readMeaningfulLine(f).split() n = readMeaningfulLine(f) - energyTransferModel = SingleExponentialDown( - alpha0 = Quantity(float(alpha0), alpha0units), - T0 = Quantity(float(T0), T0units), - n = float(n), + energy_transfer_model = SingleExponentialDown( + alpha0=Quantity(float(alpha0), alpha0units), + T0=Quantity(float(T0), T0units), + n=float(n), ) - - speciesDict = {} + + species_dict = {} # Read bath gas parameters - bathGas = Species(label='bath_gas', energyTransferModel=energyTransferModel) - molWtunits, molWt = readMeaningfulLine(f).split() - if molWtunits == 'u': molWtunits = 'amu' - bathGas.molecularWeight = Quantity(float(molWt), molWtunits) + bath_gas = Species(label='bath_gas', energyTransferModel=energy_transfer_model) + mol_wt_units, mol_wt = readMeaningfulLine(f).split() + if mol_wt_units == 'u': mol_wt_units = 'amu' + bath_gas.molecularWeight = Quantity(float(mol_wt), mol_wt_units) sigmaLJunits, sigmaLJ = readMeaningfulLine(f).split() epsilonLJunits, epsilonLJ = readMeaningfulLine(f).split() assert epsilonLJunits == 'J' - bathGas.transportData = TransportData( - sigma = Quantity(float(sigmaLJ), sigmaLJunits), - epsilon = Quantity(float(epsilonLJ) / constants.kB, 'K'), + bath_gas.transportData = TransportData( + sigma=Quantity(float(sigmaLJ), sigmaLJunits), + epsilon=Quantity(float(epsilonLJ) / constants.kB, 'K'), ) - job.network.bathGas = {bathGas: 1.0} - + job.network.bathGas = {bath_gas: 1.0} + # Read species data - Nspec = int(readMeaningfulLine(f)) - for i in range(Nspec): + n_spec = int(readMeaningfulLine(f)) + for i in range(n_spec): species = Species() species.conformer = Conformer() - species.energyTransferModel = energyTransferModel - + species.energyTransferModel = energy_transfer_model + # Read species label species.label = readMeaningfulLine(f) - speciesDict[species.label] = species + species_dict[species.label] = species if species.label in moleculeDict: species.molecule = [moleculeDict[species.label]] - + # Read species E0 E0units, E0 = readMeaningfulLine(f).split() species.conformer.E0 = Quantity(float(E0), E0units) species.conformer.E0.units = 'kJ/mol' - + # Read species thermo data H298units, H298 = readMeaningfulLine(f).split() S298units, S298 = readMeaningfulLine(f).split() @@ -207,144 +207,145 @@ def readMeaningfulLine(f): if S298units == 'J/mol*K': S298units = 'J/(mol*K)' if Cpunits == 'J/mol*K': Cpunits = 'J/(mol*K)' species.thermo = ThermoData( - H298 = Quantity(float(H298), H298units), - S298 = Quantity(float(S298), S298units), - Tdata = Quantity([300,400,500,600,800,1000,1500], "K"), - Cpdata = Quantity(Cpdata, Cpunits), - Cp0 = (Cpdata[0], Cpunits), - CpInf = (Cpdata[-1], Cpunits), + H298=Quantity(float(H298), H298units), + S298=Quantity(float(S298), S298units), + Tdata=Quantity([300, 400, 500, 600, 800, 1000, 1500], "K"), + Cpdata=Quantity(Cpdata, Cpunits), + Cp0=(Cpdata[0], Cpunits), + CpInf=(Cpdata[-1], Cpunits), ) - + # Read species collision parameters - molWtunits, molWt = readMeaningfulLine(f).split() - if molWtunits == 'u': molWtunits = 'amu' - species.molecularWeight = Quantity(float(molWt), molWtunits) + mol_wt_units, mol_wt = readMeaningfulLine(f).split() + if mol_wt_units == 'u': mol_wt_units = 'amu' + species.molecularWeight = Quantity(float(mol_wt), mol_wt_units) sigmaLJunits, sigmaLJ = readMeaningfulLine(f).split() epsilonLJunits, epsilonLJ = readMeaningfulLine(f).split() assert epsilonLJunits == 'J' species.transportData = TransportData( - sigma = Quantity(float(sigmaLJ), sigmaLJunits), - epsilon = Quantity(float(epsilonLJ) / constants.kB, 'K'), + sigma=Quantity(float(sigmaLJ), sigmaLJunits), + epsilon=Quantity(float(epsilonLJ) / constants.kB, 'K'), ) - + # Read species vibrational frequencies - freqCount, freqUnits = readMeaningfulLine(f).split() + freq_count, freq_units = readMeaningfulLine(f).split() frequencies = [] - for j in range(int(freqCount)): + for j in range(int(freq_count)): frequencies.append(float(readMeaningfulLine(f))) species.conformer.modes.append(HarmonicOscillator( - frequencies = Quantity(frequencies, freqUnits), + frequencies=Quantity(frequencies, freq_units), )) - + # Read species external rotors rotCount, rotUnits = readMeaningfulLine(f).split() if int(rotCount) > 0: raise NotImplementedError('Cannot handle external rotational modes in FAME input.') - + # Read species internal rotors - freqCount, freqUnits = readMeaningfulLine(f).split() + freq_count, freq_units = readMeaningfulLine(f).split() frequencies = [] - for j in range(int(freqCount)): + for j in range(int(freq_count)): frequencies.append(float(readMeaningfulLine(f))) - barrCount, barrUnits = readMeaningfulLine(f).split() + barr_count, barr_units = readMeaningfulLine(f).split() barriers = [] - for j in range(int(barrCount)): + for j in range(int(barr_count)): barriers.append(float(readMeaningfulLine(f))) - if barrUnits == 'cm^-1': - barrUnits = 'J/mol' + if barr_units == 'cm^-1': + barr_units = 'J/mol' barriers = [barr * constants.h * constants.c * constants.Na * 100. for barr in barriers] - elif barrUnits in ['Hz', 's^-1']: - barrUnits = 'J/mol' + elif barr_units in ['Hz', 's^-1']: + barr_units = 'J/mol' barriers = [barr * constants.h * constants.Na for barr in barriers] - elif barrUnits != 'J/mol': - raise Exception('Unexpected units "{0}" for hindered rotor barrier height.'.format(barrUnits)) - inertia = [V0 / 2.0 / (nu * constants.c * 100.)**2 / constants.Na for nu, V0 in zip(frequencies, barriers)] + elif barr_units != 'J/mol': + raise Exception('Unexpected units "{0}" for hindered rotor barrier height.'.format(barr_units)) + inertia = [V0 / 2.0 / (nu * constants.c * 100.) ** 2 / constants.Na for nu, V0 in zip(frequencies, barriers)] for I, V0 in zip(inertia, barriers): species.conformer.modes.append(HinderedRotor( - inertia = Quantity(I,"kg*m^2"), - barrier = Quantity(V0,barrUnits), - symmetry = 1, - semiclassical = False, + inertia=Quantity(I, "kg*m^2"), + barrier=Quantity(V0, barr_units), + symmetry=1, + semiclassical=False, )) - + # Read overall symmetry number species.conformer.spinMultiplicity = int(readMeaningfulLine(f)) - + # Read isomer, reactant channel, and product channel data - Nisom = int(readMeaningfulLine(f)) - Nreac = int(readMeaningfulLine(f)) - Nprod = int(readMeaningfulLine(f)) - for i in range(Nisom): + n_isom = int(readMeaningfulLine(f)) + n_reac = int(readMeaningfulLine(f)) + n_prod = int(readMeaningfulLine(f)) + for i in range(n_isom): data = readMeaningfulLine(f).split() assert data[0] == '1' - job.network.isomers.append(speciesDict[data[1]]) - for i in range(Nreac): + job.network.isomers.append(species_dict[data[1]]) + for i in range(n_reac): data = readMeaningfulLine(f).split() assert data[0] == '2' - job.network.reactants.append([speciesDict[data[1]], speciesDict[data[2]]]) - for i in range(Nprod): + job.network.reactants.append([species_dict[data[1]], species_dict[data[2]]]) + for i in range(n_prod): data = readMeaningfulLine(f).split() if data[0] == '1': - job.network.products.append([speciesDict[data[1]]]) + job.network.products.append([species_dict[data[1]]]) elif data[0] == '2': - job.network.products.append([speciesDict[data[1]], speciesDict[data[2]]]) + job.network.products.append([species_dict[data[1]], species_dict[data[2]]]) # Read path reactions - Nrxn = int(readMeaningfulLine(f)) - for i in range(Nrxn): - + n_rxn = int(readMeaningfulLine(f)) + for i in range(n_rxn): + # Read and ignore reaction equation equation = readMeaningfulLine(f) reaction = Reaction(transitionState=TransitionState(), reversible=True) job.network.pathReactions.append(reaction) reaction.transitionState.conformer = Conformer() - + # Read reactant and product indices data = readMeaningfulLine(f).split() reac = int(data[0]) - 1 prod = int(data[1]) - 1 - if reac < Nisom: + if reac < n_isom: reaction.reactants = [job.network.isomers[reac]] - elif reac < Nisom+Nreac: - reaction.reactants = job.network.reactants[reac-Nisom] + elif reac < n_isom + n_reac: + reaction.reactants = job.network.reactants[reac - n_isom] else: - reaction.reactants = job.network.products[reac-Nisom-Nreac] - if prod < Nisom: + reaction.reactants = job.network.products[reac - n_isom - n_reac] + if prod < n_isom: reaction.products = [job.network.isomers[prod]] - elif prod < Nisom+Nreac: - reaction.products = job.network.reactants[prod-Nisom] + elif prod < n_isom + n_reac: + reaction.products = job.network.reactants[prod - n_isom] else: - reaction.products = job.network.products[prod-Nisom-Nreac] - + reaction.products = job.network.products[prod - n_isom - n_reac] + # Read reaction E0 E0units, E0 = readMeaningfulLine(f).split() reaction.transitionState.conformer.E0 = Quantity(float(E0), E0units) reaction.transitionState.conformer.E0.units = 'kJ/mol' - + # Read high-pressure limit kinetics data = readMeaningfulLine(f) assert data.lower() == 'arrhenius' - Aunits, A = readMeaningfulLine(f).split() - if '/' in Aunits: - index = Aunits.find('/') - Aunits = '{0}/({1})'.format(Aunits[0:index], Aunits[index+1:]) - Eaunits, Ea = readMeaningfulLine(f).split() + A_units, A = readMeaningfulLine(f).split() + if '/' in A_units: + index = A_units.find('/') + A_units = '{0}/({1})'.format(A_units[0:index], A_units[index + 1:]) + Ea_units, Ea = readMeaningfulLine(f).split() n = readMeaningfulLine(f) reaction.kinetics = Arrhenius( - A = Quantity(float(A), Aunits), - Ea = Quantity(float(Ea), Eaunits), - n = Quantity(float(n)), + A=Quantity(float(A), A_units), + Ea=Quantity(float(Ea), Ea_units), + n=Quantity(float(n)), ) reaction.kinetics.Ea.units = 'kJ/mol' f.close() - + job.network.isomers = [Configuration(isomer) for isomer in job.network.isomers] job.network.reactants = [Configuration(*reactants) for reactants in job.network.reactants] job.network.products = [Configuration(*products) for products in job.network.products] return job + def pruneNetwork(network, Emax): """ Prune the network by removing any configurations with ground-state energy @@ -353,47 +354,47 @@ def pruneNetwork(network, Emax): are also removed. Any configurations that have zero reactions as a result of this process are also removed. """ - + # Remove configurations with ground-state energies above the given Emax - isomersToRemove = [] + isomers_to_remove = [] for isomer in network.isomers: if isomer.E0 > Emax: - isomersToRemove.append(isomer) - for isomer in isomersToRemove: + isomers_to_remove.append(isomer) + for isomer in isomers_to_remove: network.isomers.remove(isomer) - - reactantsToRemove = [] + + reactants_to_remove = [] for reactant in network.reactants: if reactant.E0 > Emax: - reactantsToRemove.append(reactant) - for reactant in reactantsToRemove: + reactants_to_remove.append(reactant) + for reactant in reactants_to_remove: network.reactants.remove(reactant) - - productsToRemove = [] + + products_to_remove = [] for product in network.products: if product.E0 > Emax: - productsToRemove.append(product) - for product in productsToRemove: + products_to_remove.append(product) + for product in products_to_remove: network.products.remove(product) - + # Remove path reactions involving the removed configurations - removedConfigurations = [] - removedConfigurations.extend([isomer.species for isomer in isomersToRemove]) - removedConfigurations.extend([reactant.species for reactant in reactantsToRemove]) - removedConfigurations .extend([product.species for product in productsToRemove]) - reactionsToRemove = [] + removed_configurations = [] + removed_configurations.extend([isomer.species for isomer in isomers_to_remove]) + removed_configurations.extend([reactant.species for reactant in reactants_to_remove]) + removed_configurations.extend([product.species for product in products_to_remove]) + reactions_to_remove = [] for rxn in network.pathReactions: - if rxn.reactants in removedConfigurations or rxn.products in removedConfigurations: - reactionsToRemove.append(rxn) - for rxn in reactionsToRemove: + if rxn.reactants in removed_configurations or rxn.products in removed_configurations: + reactions_to_remove.append(rxn) + for rxn in reactions_to_remove: network.pathReactions.remove(rxn) - + # Remove path reactions with barrier heights above the given Emax - reactionsToRemove = [] + reactions_to_remove = [] for rxn in network.pathReactions: if rxn.transitionState.conformer.E0.value_si > Emax: - reactionsToRemove.append(rxn) - for rxn in reactionsToRemove: + reactions_to_remove.append(rxn) + for rxn in reactions_to_remove: network.pathReactions.remove(rxn) def ismatch(speciesList1, speciesList2): @@ -403,86 +404,95 @@ def ismatch(speciesList1, speciesList2): return ((speciesList1[0] is speciesList2[0] and speciesList1[1] is speciesList2[1]) or (speciesList1[0] is speciesList2[1] and speciesList1[1] is speciesList2[0])) elif len(speciesList1) == len(speciesList2) == 3: - return ((speciesList1[0] is speciesList2[0] and speciesList1[1] is speciesList2[1] and speciesList1[2] is speciesList2[2]) or - (speciesList1[0] is speciesList2[0] and speciesList1[1] is speciesList2[2] and speciesList1[2] is speciesList2[1]) or - (speciesList1[0] is speciesList2[1] and speciesList1[1] is speciesList2[0] and speciesList1[2] is speciesList2[2]) or - (speciesList1[0] is speciesList2[1] and speciesList1[1] is speciesList2[2] and speciesList1[2] is speciesList2[0]) or - (speciesList1[0] is speciesList2[2] and speciesList1[1] is speciesList2[0] and speciesList1[2] is speciesList2[1]) or - (speciesList1[0] is speciesList2[2] and speciesList1[1] is speciesList2[1] and speciesList1[2] is speciesList2[0])) + return ((speciesList1[0] is speciesList2[0] and speciesList1[1] is speciesList2[1] and speciesList1[2] is + speciesList2[2]) or + (speciesList1[0] is speciesList2[0] and speciesList1[1] is speciesList2[2] and speciesList1[2] is + speciesList2[1]) or + (speciesList1[0] is speciesList2[1] and speciesList1[1] is speciesList2[0] and speciesList1[2] is + speciesList2[2]) or + (speciesList1[0] is speciesList2[1] and speciesList1[1] is speciesList2[2] and speciesList1[2] is + speciesList2[0]) or + (speciesList1[0] is speciesList2[2] and speciesList1[1] is speciesList2[0] and speciesList1[2] is + speciesList2[1]) or + (speciesList1[0] is speciesList2[2] and speciesList1[1] is speciesList2[1] and speciesList1[2] is + speciesList2[0])) else: return False - + # Remove orphaned configurations (those with zero path reactions involving them) - isomersToRemove = [] + isomers_to_remove = [] for isomer in network.isomers: for rxn in network.pathReactions: if ismatch(rxn.reactants, isomer.species) or ismatch(rxn.products, isomer.species): break else: - isomersToRemove.append(isomer) - for isomer in isomersToRemove: + isomers_to_remove.append(isomer) + for isomer in isomers_to_remove: network.isomers.remove(isomer) - - reactantsToRemove = [] + + reactants_to_remove = [] for reactant in network.reactants: for rxn in network.pathReactions: if ismatch(rxn.reactants, reactant.species) or ismatch(rxn.products, reactant.species): break else: - reactantsToRemove.append(reactant) - for reactant in reactantsToRemove: + reactants_to_remove.append(reactant) + for reactant in reactants_to_remove: network.reactants.remove(reactant) - - productsToRemove = [] + + products_to_remove = [] for product in network.products: for rxn in network.pathReactions: if ismatch(rxn.reactants, product.species) or ismatch(rxn.products, product.species): break else: - productsToRemove.append(product) - for product in productsToRemove: + products_to_remove.append(product) + for product in products_to_remove: network.products.remove(product) + ################################################################################ if __name__ == '__main__': - + # Parse the command-line arguments args = parseCommandLineArguments() - + if args.max_energy: Emax = float(args.max_energy[0]) Eunits = str(args.max_energy[1]) Emax = Energy(Emax, Eunits).value_si else: Emax = None - + # Load RMG dictionary if specified moleculeDict = {} if args.dictionary is not None: f = open(args.dictionary[0]) - adjlist = ''; label = '' + adjlist = '' + label = '' for line in f: if len(line.strip()) == 0: if len(adjlist.strip()) > 0: molecule = Molecule() molecule.fromAdjacencyList(adjlist) moleculeDict[label] = molecule - adjlist = ''; label = '' + adjlist = '' + label = '' else: if len(adjlist.strip()) == 0: label = line.strip() adjlist += line - + f.close() - + method = None for fstr in args.file: # Construct Arkane job from FAME input job = loadFAMEInput(fstr, moleculeDict) - + if Emax is not None: pruneNetwork(job.network, Emax) diff --git a/scripts/diffModels.py b/scripts/diffModels.py index 2bfb2f06c7..6b66ac3915 100644 --- a/scripts/diffModels.py +++ b/scripts/diffModels.py @@ -54,12 +54,12 @@ """ import rmgpy.tools.diff_models as diff_models + ################################################################################ def main(): diff_models.main() + if __name__ == '__main__': main() - - diff --git a/scripts/generateChemkinHTML.py b/scripts/generateChemkinHTML.py index b9e48f590c..b1816ae677 100644 --- a/scripts/generateChemkinHTML.py +++ b/scripts/generateChemkinHTML.py @@ -41,24 +41,25 @@ directory, unless an output directory is specified. """ -import os import argparse +import os from rmgpy.chemkin import loadChemkinFile from rmgpy.rmg.model import CoreEdgeReactionModel from rmgpy.rmg.output import saveOutputHTML + ################################################################################ def main(chemkin, dictionary, output, foreign): model = CoreEdgeReactionModel() model.core.species, model.core.reactions = loadChemkinFile(chemkin, dictionary, readComments=not foreign, checkDuplicates=foreign) - outputPath = os.path.join(output, 'output.html') - speciesPath = os.path.join(output, 'species') - if not os.path.isdir(speciesPath): - os.makedirs(speciesPath) - saveOutputHTML(outputPath, model) + output_path = os.path.join(output, 'output.html') + species_path = os.path.join(output, 'species') + if not os.path.isdir(species_path): + os.makedirs(species_path) + saveOutputHTML(output_path, model) if __name__ == '__main__': @@ -80,4 +81,3 @@ def main(chemkin, dictionary, output, foreign): foreign = args.foreign main(chemkin, dictionary, output, foreign) - diff --git a/scripts/generateFluxDiagram.py b/scripts/generateFluxDiagram.py index cee420d320..3545be6a80 100644 --- a/scripts/generateFluxDiagram.py +++ b/scripts/generateFluxDiagram.py @@ -37,11 +37,12 @@ Chemkin output file can also be passed as an optional positional argument. """ -import os import argparse +import os from rmgpy.tools.fluxdiagram import createFluxDiagram + ################################################################################ def parse_arguments(): @@ -78,18 +79,18 @@ def parse_arguments(): args = parser.parse_args() - inputFile = os.path.abspath(args.input) - chemkinFile = os.path.abspath(args.chemkin) - dictFile = os.path.abspath(args.dictionary) - speciesPath = os.path.abspath(args.species) if args.species is not None else None - chemkinOutput = os.path.abspath(args.chemkinOutput) if args.chemkinOutput is not None else '' - useJava = args.java + input_file = os.path.abspath(args.input) + chemkin_file = os.path.abspath(args.chemkin) + dict_file = os.path.abspath(args.dictionary) + species_path = os.path.abspath(args.species) if args.species is not None else None + chemkin_output = os.path.abspath(args.chemkinOutput) if args.chemkinOutput is not None else '' + use_java = args.java dflag = args.dlim - checkDuplicates = args.checkDuplicates - centralSpeciesList = args.centralSpecies + check_duplicates = args.checkDuplicates + central_species_list = args.centralSpecies superimpose = args.super - saveStates = args.saveStates - readStates = args.readStates + save_states = args.saveStates + read_states = args.readStates keys = ('maximumNodeCount', 'maximumEdgeCount', @@ -100,20 +101,21 @@ def parse_arguments(): 'timeStep') vals = (args.maxnode, args.maxedge, args.conctol, args.ratetol, args.rad, args.centralReactionCount, args.tstep) settings = {k: v for k, v in zip(keys, vals) if v is not None} - - return (inputFile, - chemkinFile, - dictFile, - speciesPath, - chemkinOutput, - useJava, + + return (input_file, + chemkin_file, + dict_file, + species_path, + chemkin_output, + use_java, dflag, - checkDuplicates, + check_duplicates, settings, - centralSpeciesList, + central_species_list, superimpose, - saveStates, - readStates) + save_states, + read_states) + def main(): (inputFile, @@ -135,5 +137,6 @@ def main(): superimpose=superimpose, saveStates=saveStates, readStates=readStates, checkDuplicates=checkDuplicates) + if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/scripts/generateReactions.py b/scripts/generateReactions.py index b90572d35c..754397a1f4 100644 --- a/scripts/generateReactions.py +++ b/scripts/generateReactions.py @@ -37,6 +37,7 @@ """ import rmgpy.tools.generate_reactions as generate_reactions + ################################################################################ diff --git a/scripts/generateTree.py b/scripts/generateTree.py index 450cd8c35e..38da635073 100644 --- a/scripts/generateTree.py +++ b/scripts/generateTree.py @@ -35,21 +35,22 @@ Note that 6 is the maximum number of processors used currently by this script """ -import os -import os.path import argparse import logging +import os +import os.path + from rmgpy import settings from rmgpy.data.rmg import RMGDatabase from rmgpy.rmg.main import initializeLog + ################################################################################ def parse_arguments(): - parser = argparse.ArgumentParser() parser.add_argument('name', metavar='NAME', type=str, nargs=1, - help='Family Name') + help='Family Name') parser.add_argument('nprocs', metavar='NPROCS', type=int, nargs=1, help='Number of Processors for Parallelization') @@ -60,33 +61,37 @@ def parse_arguments(): return name, nprocs + def main(): - initializeLog(logging.INFO,'treegen.log') + initializeLog(logging.INFO, 'treegen.log') dbdir = settings['database.directory'] - familyName, nprocs = parse_arguments() + family_name, nprocs = parse_arguments() database = RMGDatabase() database.load( path=dbdir, thermoLibraries=['Klippenstein_Glarborg2016', 'BurkeH2O2', 'thermo_DFT_CCSDTF12_BAC', 'DFT_QCI_thermo', - 'primaryThermoLibrary', 'primaryNS', 'NitrogenCurran', 'NOx2018', 'FFCM1(-)', - 'SulfurLibrary', 'SulfurGlarborgH2S'], + 'primaryThermoLibrary', 'primaryNS', 'NitrogenCurran', 'NOx2018', 'FFCM1(-)', + 'SulfurLibrary', 'SulfurGlarborgH2S'], transportLibraries=[], reactionLibraries=[], seedMechanisms=[], - kineticsFamilies=[familyName], + kineticsFamilies=[family_name], kineticsDepositories=['training'], # frequenciesLibraries = self.statmechLibraries, depository=False, # Don't bother loading the depository information, as we don't use it ) - family = database.kinetics.families[familyName] + family = database.kinetics.families[family_name] family.cleanTree(database.thermo) family.generateTree(thermoDatabase=database.thermo, nprocs=min(4, nprocs)) family.checkTree() family.regularize() - templateRxnMap = family.getReactionMatches(thermoDatabase=database.thermo, removeDegeneracy=True, getReverse=True, fixLabels=True) - family.makeBMRulesFromTemplateRxnMap(templateRxnMap, nprocs=min(6, nprocs)) + template_rxn_map = family.getReactionMatches(thermoDatabase=database.thermo, removeDegeneracy=True, getReverse=True, + fixLabels=True) + family.makeBMRulesFromTemplateRxnMap(template_rxn_map, nprocs=min(6, nprocs)) family.checkTree() - family.save(os.path.join(dbdir,'kinetics','families',familyName)) + family.save(os.path.join(dbdir, 'kinetics', 'families', family_name)) + + ################################################################################ if __name__ == '__main__': diff --git a/scripts/isotopes.py b/scripts/isotopes.py index 2bb7d63df2..fcfb1faa0b 100644 --- a/scripts/isotopes.py +++ b/scripts/isotopes.py @@ -40,9 +40,11 @@ import os import os.path +from rmgpy.exceptions import InputError from rmgpy.rmg.main import initializeLog from rmgpy.tools.isotopes import run -from rmgpy.exceptions import InputError + + ################################################################################ @@ -55,12 +57,12 @@ def parseCommandLineArguments(): parser = argparse.ArgumentParser() parser.add_argument('input', help='RMG input file') - parser.add_argument('--output', type=str, nargs=1, default='',help='Output folder') + parser.add_argument('--output', type=str, nargs=1, default='', help='Output folder') parser.add_argument('--original', type=str, nargs=1, default='', help='Location of the isotopeless mechanism') parser.add_argument('--maximumIsotopicAtoms', type=int, nargs=1, default=[1000000], help='The maxuminum number of isotopes you allow in a specific molecule') - parser.add_argument('--useOriginalReactions' , action='store_true', default=False, + parser.add_argument('--useOriginalReactions', action='store_true', default=False, help='Use reactions from the original rmgpy generated chem_annotated.inp file') parser.add_argument('--kineticIsotopeEffect', type=str, nargs=1, default='', help='Type of kinetic isotope effects to use, currently only "simple" supported.') @@ -68,25 +70,26 @@ def parseCommandLineArguments(): return args -def main(): +def main(): args = parseCommandLineArguments() if args.useOriginalReactions and not args.original: raise InputError('Cannot use original reactions without a previously run RMG job') - maximumIsotopicAtoms = args.maximumIsotopicAtoms[0] - useOriginalReactions = args.useOriginalReactions - inputFile = args.input + maximum_isotopic_atoms = args.maximumIsotopicAtoms[0] + use_original_reactions = args.useOriginalReactions + input_file = args.input outputdir = os.path.abspath(args.output[0]) if args.output else os.path.abspath('.') original = os.path.abspath(args.original[0]) if args.original else None kie = args.kineticIsotopeEffect[0] if args.kineticIsotopeEffect else None supported_kie_methods = ['simple'] if kie not in supported_kie_methods and kie is not None: - raise InputError('The kie input, {0}, is not one of the currently supported methods, {1}'.format(kie,supported_kie_methods)) + raise InputError('The kie input, {0}, is not one of the currently supported methods, {1}'.format(kie, supported_kie_methods)) initializeLog(logging.INFO, os.path.join(os.getcwd(), 'RMG.log')) - run(inputFile, outputdir, original=original, - maximumIsotopicAtoms=maximumIsotopicAtoms, - useOriginalReactions=useOriginalReactions, - kineticIsotopeEffect = kie) + run(input_file, outputdir, original=original, + maximumIsotopicAtoms=maximum_isotopic_atoms, + useOriginalReactions=use_original_reactions, + kineticIsotopeEffect=kie) + if __name__ == '__main__': main() diff --git a/scripts/machineWriteDatabase.py b/scripts/machineWriteDatabase.py index 0c64df7989..0a57661ae3 100644 --- a/scripts/machineWriteDatabase.py +++ b/scripts/machineWriteDatabase.py @@ -37,6 +37,6 @@ from rmgpy.data.rmg import RMGDatabase database = RMGDatabase() -database.load(settings['database.directory'], kineticsFamilies = 'all') +database.load(settings['database.directory'], kineticsFamilies='all') -database.save(settings['database.directory']) \ No newline at end of file +database.save(settings['database.directory']) diff --git a/scripts/mergeModels.py b/scripts/mergeModels.py index 152b6c3a34..4101f28129 100644 --- a/scripts/mergeModels.py +++ b/scripts/mergeModels.py @@ -42,12 +42,12 @@ import rmgpy.tools.merge_models as merge_models + ################################################################################ def main(): merge_models.main() + if __name__ == '__main__': main() - - \ No newline at end of file diff --git a/scripts/simulate.py b/scripts/simulate.py index b7cf5e155d..8cf209cba9 100644 --- a/scripts/simulate.py +++ b/scripts/simulate.py @@ -33,40 +33,42 @@ specified in the input file) on an RMG job. """ -import os.path import argparse +import os.path from rmgpy.tools.simulate import run_simulation + ################################################################################ def parse_arguments(): - parser = argparse.ArgumentParser() parser.add_argument('input', metavar='INPUT', type=str, nargs=1, - help='RMG input file') + help='RMG input file') parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1, - help='Chemkin file') + help='Chemkin file') parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1, - help='RMG dictionary file') + help='RMG dictionary file') parser.add_argument('--no-dlim', dest='dlim', action='store_false', - help='Turn off diffusion-limited rates for LiquidReactor') + help='Turn off diffusion-limited rates for LiquidReactor') parser.add_argument('-f', '--foreign', dest='checkDuplicates', action='store_true', - help='Not an RMG generated Chemkin file (will be checked for duplicates)') + help='Not an RMG generated Chemkin file (will be checked for duplicates)') args = parser.parse_args() - - inputFile = os.path.abspath(args.input[0]) - chemkinFile = os.path.abspath(args.chemkin[0]) - dictFile = os.path.abspath(args.dictionary[0]) + + input_file = os.path.abspath(args.input[0]) + chemkin_file = os.path.abspath(args.chemkin[0]) + dict_file = os.path.abspath(args.dictionary[0]) dflag = args.dlim - checkDuplicates = args.checkDuplicates + check_duplicates = args.checkDuplicates + + return input_file, chemkin_file, dict_file, dflag, check_duplicates - return inputFile, chemkinFile, dictFile, dflag, checkDuplicates def main(): - inputFile, chemkinFile, dictFile, dflag, checkDuplicates = parse_arguments() + input_file, chemkin_file, dict_file, dflag, check_duplicates = parse_arguments() + + run_simulation(input_file, chemkin_file, dict_file, diffusionLimited=dflag, checkDuplicates=check_duplicates) - run_simulation(inputFile, chemkinFile, dictFile, diffusionLimited=dflag, checkDuplicates=checkDuplicates) ################################################################################ diff --git a/scripts/standardizeModelSpeciesNames.py b/scripts/standardizeModelSpeciesNames.py index 5e9e246263..2806be69e5 100644 --- a/scripts/standardizeModelSpeciesNames.py +++ b/scripts/standardizeModelSpeciesNames.py @@ -39,34 +39,35 @@ The resulting files are saved as ``chem1.inp`` and ``species_dictionary1.txt``, ``chem2.inp``, ``species_dictionary2.txt`` and so forth in the execution directory. """ +from __future__ import print_function -import os.path import argparse -from rmgpy.chemkin import loadChemkinFile, saveChemkinFile, saveSpeciesDictionary, saveTransportFile +from rmgpy.chemkin import loadChemkinFile, saveChemkinFile, saveSpeciesDictionary from rmgpy.rmg.model import ReactionModel ################################################################################ if __name__ == '__main__': - + parser = argparse.ArgumentParser() parser.add_argument('--model1', metavar='FILE', type=str, nargs='+', - help='the Chemkin files and species dictionaries of the first model') + help='the Chemkin files and species dictionaries of the first model') parser.add_argument('--model2', metavar='FILE', type=str, nargs='+', - help='the Chemkin files and species dictionaries of the second model') + help='the Chemkin files and species dictionaries of the second model') parser.add_argument('--model3', metavar='FILE', type=str, nargs='+', - help='the Chemkin files and species dictionaries of the third model') + help='the Chemkin files and species dictionaries of the third model') parser.add_argument('--model4', metavar='FILE', type=str, nargs='+', - help='the Chemkin files and species dictionaries of the fourth model') + help='the Chemkin files and species dictionaries of the fourth model') parser.add_argument('--model5', metavar='FILE', type=str, nargs='+', - help='the Chemkin files and species dictionaries of the fifth model') - + help='the Chemkin files and species dictionaries of the fifth model') + args = parser.parse_args() - + transport = False inputModelFiles = [] for model in [args.model1, args.model2, args.model3, args.model4, args.model5]: - if model is None: continue + if model is None: + continue if len(model) == 2: inputModelFiles.append((model[0], model[1], None)) elif len(model) == 3: @@ -74,41 +75,41 @@ inputModelFiles.append((model[0], model[1], model[2])) else: raise Exception - + outputChemkinFile = 'chem.inp' outputSpeciesDictionary = 'species_dictionary.txt' outputTransportFile = 'tran.dat' if transport else None - + # Load the models to merge models = [] for chemkin, speciesPath, transportPath in inputModelFiles: - print 'Loading model #{0:d}...'.format(len(models)+1) + print('Loading model #{0:d}...'.format(len(models) + 1)) model = ReactionModel() model.species, model.reactions = loadChemkinFile(chemkin, speciesPath, transportPath=transportPath) models.append(model) allSpecies = [] speciesIndices = [[] for i in range(len(models))] - for i, model in enumerate(models): + for i, model in enumerate(models): speciesIndices[i] = [] for j, species in enumerate(model.species): for index, species0 in enumerate(allSpecies): if species0.isIsomorphic(species): speciesIndices[i].append(index) - break; + break else: allSpecies.append(species) speciesIndices[i].append(allSpecies.index(species)) # Reassign species names and labels according to the list of all species in all models # We must retain the original thermochemistry - for i, model in enumerate(models): + for i, model in enumerate(models): for j, species in enumerate(model.species): index = speciesIndices[i][j] species.label = allSpecies[index].label species.index = allSpecies[index].index - + # Resave the models - saveChemkinFile('chem{0}.inp'.format(i+1), model.species, model.reactions) - saveSpeciesDictionary('species_dictionary{0}.txt'.format(i+1), model.species) - - print 'Saving of new models with consistent names is complete!' \ No newline at end of file + saveChemkinFile('chem{0}.inp'.format(i + 1), model.species, model.reactions) + saveSpeciesDictionary('species_dictionary{0}.txt'.format(i + 1), model.species) + + print('Saving of new models with consistent names is complete!') diff --git a/scripts/thermoEstimator.py b/scripts/thermoEstimator.py index e7735a99da..1750660226 100644 --- a/scripts/thermoEstimator.py +++ b/scripts/thermoEstimator.py @@ -36,30 +36,32 @@ """ import os.path + from rmgpy import settings +from rmgpy.chemkin import saveChemkinFile, saveSpeciesDictionary from rmgpy.data.rmg import RMGDatabase +from rmgpy.data.thermo import ThermoLibrary from rmgpy.rmg.main import RMG -from rmgpy.chemkin import saveChemkinFile, saveSpeciesDictionary from rmgpy.rmg.model import Species from rmgpy.thermo.thermoengine import submit -from rmgpy.data.thermo import ThermoLibrary - + + ################################################################################ def runThermoEstimator(inputFile, library_flag): """ Estimate thermo for a list of species using RMG and the settings chosen inside a thermo input file. """ - + rmg = RMG() rmg.loadThermoInput(inputFile) - + rmg.database = RMGDatabase() path = os.path.join(settings['database.directory']) # forbidden structure loading rmg.database.loadThermo(os.path.join(path, 'thermo'), rmg.thermoLibraries, depository=False) - + if rmg.solvent: rmg.database.loadSolvation(os.path.join(path, 'solvation')) Species.solventData = rmg.database.solvation.getSolventData(rmg.solvent) @@ -72,32 +74,31 @@ def runThermoEstimator(inputFile, library_flag): library = ThermoLibrary(name='Thermo Estimation Library') for species in rmg.initialSpecies: library.loadEntry( - index = len(library.entries) + 1, - label = species.label, - molecule = species.molecule[0].toAdjacencyList(), - thermo = species.getThermoData().toThermoData(), - shortDesc = species.getThermoData().comment, + index=len(library.entries) + 1, + label=species.label, + molecule=species.molecule[0].toAdjacencyList(), + thermo=species.getThermoData().toThermoData(), + shortDesc=species.getThermoData().comment, ) - library.save(os.path.join(rmg.outputDirectory,'ThermoLibrary.py')) - + library.save(os.path.join(rmg.outputDirectory, 'ThermoLibrary.py')) # Save the thermo data to chemkin format output files and dictionary, with no reactions saveChemkinFile(os.path.join(rmg.outputDirectory, 'chem_annotated.inp'), species=rmg.initialSpecies, reactions=[]) saveSpeciesDictionary(os.path.join(rmg.outputDirectory, 'species_dictionary.txt'), species=rmg.initialSpecies) + ################################################################################ if __name__ == '__main__': - import argparse - + parser = argparse.ArgumentParser() parser.add_argument('input', metavar='INPUT', type=str, nargs=1, - help='Thermo input file') + help='Thermo input file') parser.add_argument('-l', '--library', action='store_true', help='generate RMG thermo library') args = parser.parse_args() - + inputFile = os.path.abspath(args.input[0]) - + runThermoEstimator(inputFile, args.library) From abe3d0646007ac2d412fbe9948e930b95c9b094a Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:18:00 -0400 Subject: [PATCH 025/155] Correct path issue so that outputTest.py runs from any location --- rmgpy/rmg/outputTest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rmgpy/rmg/outputTest.py b/rmgpy/rmg/outputTest.py index 2c0fa68374..118b744c4c 100644 --- a/rmgpy/rmg/outputTest.py +++ b/rmgpy/rmg/outputTest.py @@ -46,7 +46,7 @@ def testSaveOutputHTML(self): This example is to test if an HTML file can be generated for the provided chemkin model. """ - folder = os.path.join(os.getcwd(), 'rmgpy/rmg/test_data/saveOutputHTML/') + folder = os.path.join(os.path.dirname(__file__), 'test_data/saveOutputHTML/') chemkin_path = os.path.join(folder, 'eg6', 'chem_annotated.inp') dictionary_path = os.path.join(folder, 'eg6', 'species_dictionary.txt') From 22af8bbc4c406b7c7d9f2b8b28beaf58d949fb14 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:21:00 -0400 Subject: [PATCH 026/155] Prevent file locking for HDF5 files --- rmgpy/rmg/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rmgpy/rmg/main.py b/rmgpy/rmg/main.py index 193c1959e6..e9567f06fd 100644 --- a/rmgpy/rmg/main.py +++ b/rmgpy/rmg/main.py @@ -81,6 +81,11 @@ ################################################################################ +# This module uses the HDF5 data format, which can cause problems on files systems that use NFS (common for network +# mounted file systems. The following sets an environment variable that prevents file locking that would otherwise +# cause a problem for NFS. +os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE' + solvent = None # Maximum number of user defined processors From 2194bf88cbaec4f080a01f42f57ca64da13ce74e Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Fri, 16 Aug 2019 09:22:00 -0400 Subject: [PATCH 027/155] Explicitly add h5py dependency Before we were getting it from a dependency of a dependency --- environment_linux.yml | 1 + environment_mac.yml | 1 + environment_py3.yml | 1 + environment_windows.yml | 1 + requirements.txt | 1 + 5 files changed, 5 insertions(+) diff --git a/environment_linux.yml b/environment_linux.yml index e3a06c0991..3f985f41ea 100644 --- a/environment_linux.yml +++ b/environment_linux.yml @@ -18,6 +18,7 @@ dependencies: - ffmpeg - gprof2dot - graphviz + - h5py - jinja2 - jupyter - lpsolve55 diff --git a/environment_mac.yml b/environment_mac.yml index 9ce08ae1da..78ded8392c 100644 --- a/environment_mac.yml +++ b/environment_mac.yml @@ -18,6 +18,7 @@ dependencies: - ffmpeg - gprof2dot - graphviz + - h5py - jinja2 - jupyter - lpsolve55 diff --git a/environment_py3.yml b/environment_py3.yml index 5e9f6493b5..c06bcf0c5b 100644 --- a/environment_py3.yml +++ b/environment_py3.yml @@ -16,6 +16,7 @@ dependencies: - ffmpeg - gprof2dot - graphviz + - h5py - jinja2 - jupyter - lpsolve55 diff --git a/environment_windows.yml b/environment_windows.yml index 23ef34828e..647e88ae47 100644 --- a/environment_windows.yml +++ b/environment_windows.yml @@ -18,6 +18,7 @@ dependencies: - ffmpeg - gprof2dot - graphviz + - h5py - jinja2 - jupyter - lpsolve55 diff --git a/requirements.txt b/requirements.txt index 4935f43bec..44b6ad990f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,6 +16,7 @@ Jinja2 # this is for rendering the HTML output files cairocffi yaml cclib # needs to be 1.6.1.rmg +h5py # For postprocessing the profiling data argparse From 2913cb782a8de5f2f2c300957cc0ba88186afb49 Mon Sep 17 00:00:00 2001 From: Mark Payne Date: Mon, 19 Aug 2019 14:10:22 -0400 Subject: [PATCH 028/155] Bug fix: Import conformer from statmech instead of pdep Previous wildcard imports masked this issue. Thanks to @mliu49 for catching this and making this fix. --- rmgpy/rmg/pdep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rmgpy/rmg/pdep.py b/rmgpy/rmg/pdep.py index 4edf29dd83..dd8a5de2f2 100644 --- a/rmgpy/rmg/pdep.py +++ b/rmgpy/rmg/pdep.py @@ -45,8 +45,9 @@ from rmgpy.constants import R from rmgpy.data.kinetics.library import LibraryReaction from rmgpy.exceptions import PressureDependenceError, NetworkError -from rmgpy.pdep import Conformer, Configuration +from rmgpy.pdep import Configuration from rmgpy.rmg.react import react_species +from rmgpy.statmech import Conformer ################################################################################ From 610931a7f6095b441ab3f57b63b5a50316034b19 Mon Sep 17 00:00:00 2001 From: alongd Date: Fri, 16 Aug 2019 14:16:46 -0400 Subject: [PATCH 029/155] Various PEP-8 modifications and futurization to the pdep module --- rmgpy/pdep/collision.pxd | 6 +- rmgpy/pdep/collision.pyx | 210 +++++++++-------- rmgpy/pdep/collisionTest.py | 5 +- rmgpy/pdep/configuration.pxd | 15 +- rmgpy/pdep/configuration.pyx | 390 ++++++++++++++++---------------- rmgpy/pdep/configurationTest.py | 13 +- rmgpy/pdep/cse.pyx | 255 +++++++++++---------- rmgpy/pdep/draw.py | 10 +- rmgpy/pdep/me.pyx | 144 ++++++------ rmgpy/pdep/msc.pyx | 190 ++++++++-------- rmgpy/pdep/networkTest.py | 19 +- rmgpy/pdep/reaction.pxd | 28 --- rmgpy/pdep/reaction.pyx | 324 +++++++++++++------------- rmgpy/pdep/rs.pyx | 260 ++++++++++----------- 14 files changed, 940 insertions(+), 929 deletions(-) delete mode 100644 rmgpy/pdep/reaction.pxd diff --git a/rmgpy/pdep/collision.pxd b/rmgpy/pdep/collision.pxd index 59b8864299..5738968805 100644 --- a/rmgpy/pdep/collision.pxd +++ b/rmgpy/pdep/collision.pxd @@ -25,17 +25,17 @@ # # ############################################################################### -cimport numpy +cimport numpy as np from rmgpy.rmgobject cimport RMGObject from rmgpy.quantity cimport ScalarQuantity, ArrayQuantity - ################################################################################ + cdef class SingleExponentialDown(RMGObject): - cdef public ScalarQuantity _alpha0, _T0 + cdef public ScalarQuantity _alpha0, _t0 cdef public double n cpdef double getAlpha(self, double T) except -1000000000 diff --git a/rmgpy/pdep/collision.pyx b/rmgpy/pdep/collision.pyx index 7d13bd327c..d5944c9a31 100644 --- a/rmgpy/pdep/collision.pyx +++ b/rmgpy/pdep/collision.pyx @@ -31,16 +31,20 @@ This module contains classes and functions for working with collision models. """ -import numpy, logging +import logging + cimport cython +import numpy as np +cimport numpy as np +from libc.math cimport exp cimport rmgpy.constants as constants import rmgpy.quantity as quantity -from libc.math cimport exp, sqrt from rmgpy.exceptions import CollisionError ################################################################################ + cdef class SingleExponentialDown(RMGObject): """ A representation of a single exponential down model of collisional energy @@ -87,7 +91,7 @@ cdef class SingleExponentialDown(RMGObject): """ A helper function used when pickling an object. """ - return (SingleExponentialDown, (self.alpha0, self.T0, self.n)) + return SingleExponentialDown, (self.alpha0, self.T0, self.n) property alpha0: """The average energy transferred in a deactivating collision at the reference temperature.""" @@ -105,9 +109,9 @@ cdef class SingleExponentialDown(RMGObject): property T0: """The reference temperature.""" def __get__(self): - return self._T0 + return self._t0 def __set__(self, value): - self._T0 = quantity.Temperature(value) + self._t0 = quantity.Temperature(value) cpdef double getAlpha(self, double T) except -1000000000: """ @@ -115,21 +119,20 @@ cdef class SingleExponentialDown(RMGObject): transferred in a deactivating collision - in J/mol at temperature `T` in K. """ - cdef double alpha0, T0 + cdef double alpha0, t0 alpha0 = self._alpha0.value_si - if self._T0 is None: + if self._t0 is None: return alpha0 else: - T0 = self._T0.value_si - return alpha0 * (T / T0) ** self.n + t0 = self._t0.value_si + return alpha0 * (T / t0) ** self.n @cython.boundscheck(False) @cython.wraparound(False) - def generateCollisionMatrix(self, - double T, - numpy.ndarray[numpy.float64_t,ndim=2] densStates, - numpy.ndarray[numpy.float64_t,ndim=1] Elist, - numpy.ndarray[numpy.int_t,ndim=1] Jlist=None): + def generateCollisionMatrix(self, double T, + np.ndarray[np.float64_t,ndim=2] densStates, + np.ndarray[np.float64_t,ndim=1] Elist, + np.ndarray[np.int_t,ndim=1] Jlist=None): """ Generate and return the collision matrix :math:`\\matrix{M}_\\mathrm{coll} / \\omega = \\matrix{P} - \\matrix{I}` @@ -139,37 +142,37 @@ cdef class SingleExponentialDown(RMGObject): """ cdef double alpha, beta - cdef double C, left, right - cdef int Ngrains, start, i, r, s, u, v - cdef numpy.ndarray[numpy.float64_t,ndim=1] rho - cdef numpy.ndarray[numpy.float64_t,ndim=2] phi, P0 - cdef numpy.ndarray[numpy.float64_t,ndim=4] P + cdef double c, left, right + cdef int n_grains, start, i, r, s, u, v + cdef np.ndarray[np.float64_t,ndim=1] rho + cdef np.ndarray[np.float64_t,ndim=2] phi, p0 + cdef np.ndarray[np.float64_t,ndim=4] p - Ngrains = Elist.shape[0] - NJ = Jlist.shape[0] if Jlist is not None else 1 - P = numpy.zeros((Ngrains,NJ,Ngrains,NJ), numpy.float64) - P0 = numpy.zeros((Ngrains,Ngrains), numpy.float64) + n_grains = Elist.shape[0] + n_j = Jlist.shape[0] if Jlist is not None else 1 + p = np.zeros((n_grains, n_j, n_grains, n_j), np.float64) + p0 = np.zeros((n_grains, n_grains), np.float64) alpha = 1.0 / self.getAlpha(T) beta = 1.0 / (constants.R * T) - if NJ > 1: - rho = numpy.zeros(Ngrains) - for r in range(Ngrains): - rho[r] = numpy.sum((2*Jlist+1) * densStates[r,:]) + if n_j > 1: + rho = np.zeros(n_grains) + for r in range(n_grains): + rho[r] = np.sum((2*Jlist+1) * densStates[r,:]) else: rho = densStates[:,0] - for start in range(Ngrains): + for start in range(n_grains): if rho[start] > 0: break # Determine unnormalized entries in collisional transfer probability matrix - for r in range(start, Ngrains): - for s in range(start,r+1): - P0[s,r] = exp(-(Elist[r] - Elist[s]) * alpha) - for s in range(r+1,Ngrains): - P0[s,r] = exp(-(Elist[s] - Elist[r]) * alpha) * rho[s] / rho[r] * exp(-(Elist[s] - Elist[r]) * beta) + for r in range(start, n_grains): + for s in range(start, r + 1): + p0[s, r] = exp(-(Elist[r] - Elist[s]) * alpha) + for s in range(r+1,n_grains): + p0[s, r] = exp(-(Elist[s] - Elist[r]) * alpha) * rho[s] / rho[r] * exp(-(Elist[s] - Elist[r]) * beta) # Normalize using detailed balance # This method is much more robust, and corresponds to: @@ -177,58 +180,63 @@ cdef class SingleExponentialDown(RMGObject): # [ 1 2 2 2 ...] # [ 1 2 3 3 ...] # [ 1 2 3 4 ...] - for r in range(start, Ngrains): - left = 0.0; right = 0.0 - for s in range(start, r): left += P0[s,r] - for s in range(r, Ngrains): right += P0[s,r] - C = (1 - left) / right + for r in range(start, n_grains): + left, right = 0.0, 0.0 + for s in range(start, r): + left += p0[s, r] + for s in range(r, n_grains): + right += p0[s, r] + c = (1 - left) / right # Check for normalization consistency (i.e. all numbers are positive) - if C < 0: raise CollisionError('Encountered negative normalization coefficient while normalizing collisional transfer probabilities matrix.') - for s in range(r+1,Ngrains): - P0[r,s] *= C - P0[s,r] *= C - P0[r,r] = P0[r,r] * C - 1 + if c < 0: + raise CollisionError('Encountered negative normalization coefficient while normalizing ' + 'collisional transfer probabilities matrix.') + for s in range(r + 1, n_grains): + p0[r, s] *= c + p0[s, r] *= c + p0[r, r] = p0[r, r] * c - 1 # This method is described by Pilling and Holbrook, and corresponds to: # [ ... 4 3 2 1 ] # [ ... 3 3 2 1 ] # [ ... 2 2 2 1 ] # [ ... 1 1 1 1 ] - #for r in range(Ngrains, start, -1): - #left = 0.0; right = 0.0 - #for s in range(start, r): left += P0[s,r] - #for s in range(r, Ngrains): right += P0[s,r] - #C = (1 - right) / left - ## Check for normalization consistency (i.e. all numbers are positive) - #if C < 0: raise CollisionError('Encountered negative normalization coefficient while normalizing collisional transfer probabilities matrix.') - #for s in range(r-1): - #P0[r,s] *= C - #P0[s,r] *= C - #P0[r,r] = P0[r,r] * C - 1 + # for r in range(n_grains, start, -1): + # left = 0.0; right = 0.0 + # for s in range(start, r): left += p0[s,r] + # for s in range(r, n_grains): right += p0[s,r] + # c = (1 - right) / left + # # Check for normalization consistency (i.e. all numbers are positive) + # if c < 0: raise CollisionError('Encountered negative normalization coefficient while normalizing ' + # 'collisional transfer probabilities matrix.') + # for s in range(r-1): + # p0[r,s] *= c + # p0[s,r] *= c + # p0[r,r] = p0[r,r] * c - 1 # If solving the 2D master equation, compute P(E,J,E',J') from P(E,E') # by assuming that the J distribution after the collision is independent # of that before the collision (the strong collision approximation in J) - if NJ > 1: - phi = numpy.zeros_like(densStates) - for s in range(NJ): - phi[:,s] = (2*Jlist[s]+1) * densStates[:,s] - for r in range(start, Ngrains): + if n_j > 1: + phi = np.zeros_like(densStates) + for s in range(n_j): + phi[:,s] = (2 * Jlist[s] + 1) * densStates[:,s] + for r in range(start, n_grains): phi[r,:] /= rho[r] - for r in range(start, Ngrains): - for s in range(NJ): - for u in range(start, Ngrains): - for v in range(NJ): - P[r,s,u,v] = P0[r,u] * phi[r,s] + for r in range(start, n_grains): + for s in range(n_j): + for u in range(start, n_grains): + for v in range(n_j): + p[r, s, u, v] = p0[r, u] * phi[r, s] else: - P[:,0,:,0] = P0 + p[:,0,:,0] = p0 - return P + return p def calculateCollisionEfficiency(self, double T, - numpy.ndarray[numpy.float64_t,ndim=1] Elist, - numpy.ndarray[numpy.int_t,ndim=1] Jlist, - numpy.ndarray[numpy.float64_t,ndim=2] densStates, + np.ndarray[np.float64_t,ndim=1] Elist, + np.ndarray[np.int_t,ndim=1] Jlist, + np.ndarray[np.float64_t,ndim=2] densStates, double E0, double Ereac): """ Calculate an efficiency factor for collisions, particularly useful for the @@ -237,7 +245,7 @@ cdef class SingleExponentialDown(RMGObject): Elist` in J/mol, ground-state energy `E0` in kJ/mol, and first reactive energy `Ereac` in kJ/mol. The collisions occur at temperature `T` in K and are described by the average energy transferred in a deactivating - collision `dEdown` in kJ/mol. The algorithm here is implemented as + collision `d_e_down` in kJ/mol. The algorithm here is implemented as described by Chang, Bozzelli, and Dean [Chang2000]_. .. [Chang2000] A. Y. Chang, J. W. Bozzelli, and A. M. Dean. @@ -246,60 +254,62 @@ cdef class SingleExponentialDown(RMGObject): """ - cdef double dEdown, dE, FeNum, FeDen, Delta1, Delta2, DeltaN, Delta, value, beta - cdef double R = constants.R - cdef int Ngrains, NJ, r + cdef double d_e_down, d_e, fe, fe_num, fe_den, delta1, delta2, delta_n, delta, value, beta + cdef double gas_constant = constants.R + cdef int n_grains, n_j, r # Ensure that the barrier height is sufficiently above the ground state # Otherwise invalid efficiencies are observed if Ereac - E0 < 100: Ereac = E0 + 100 - dEdown = self.getAlpha(T) + d_e_down = self.getAlpha(T) - Ngrains = len(Elist) - NJ = 1 if Jlist is None else len(Jlist) - dE = Elist[1] - Elist[0] + n_grains = len(Elist) + n_j = 1 if Jlist is None else len(Jlist) + d_e = Elist[1] - Elist[0] - FeNum = 0; FeDen = 0 - Delta1 = 0; Delta2 = 0; DeltaN = 0; Delta = 1 + fe_num, fe_den, delta1, delta2, delta_n, delta = 0, 0, 0, 0, 0, 1 - for r in range(Ngrains): + for r in range(n_grains): value = 0.0 - for s in range(NJ): - value += densStates[r,s] * (2*Jlist[s]+1) * exp(-Elist[r] / (R * T)) + for s in range(n_j): + value += densStates[r,s] * (2 * Jlist[s] + 1) * exp(-Elist[r] / (gas_constant * T)) if Elist[r] > Ereac: - FeNum += value - if FeDen == 0: - FeDen = value * R * T / dE - if FeDen == 0: return 1.0 - Fe = FeNum / FeDen + fe_num += value + if fe_den == 0: + fe_den = value * gas_constant * T / d_e + if fe_den == 0: + return 1.0 + fe = fe_num / fe_den - # Chang, Bozzelli, and Dean recommend "freezing out" Fe at values greater + # Chang, Bozzelli, and Dean recommend "freezing out" fe at values greater # than 1e6 to avoid issues of roundoff error # They claim that the collision efficiency isn't too temperature-dependent # in this regime, so it's an okay approximation to use - if Fe > 1e6: Fe = 1e6 + if fe > 1e6: + fe = 1e6 - for r in range(Ngrains): + for r in range(n_grains): value = 0.0 - for s in range(NJ): - value += densStates[r,s] * (2*Jlist[s]+1) * exp(-Elist[r] / (R * T)) + for s in range(n_j): + value += densStates[r,s] * (2*Jlist[s]+1) * exp(-Elist[r] / (gas_constant * T)) # Delta if Elist[r] < Ereac: - Delta1 += value - Delta2 += value * exp(-(Ereac - Elist[r]) / (Fe * R * T)) - DeltaN += value + delta1 += value + delta2 += value * exp(-(Ereac - Elist[r]) / (fe * gas_constant * T)) + delta_n += value - Delta1 /= DeltaN - Delta2 /= DeltaN + delta1 /= delta_n + delta2 /= delta_n - Delta = Delta1 - (Fe * R * T) / (dEdown + Fe * R * T) * Delta2 + delta = delta1 - (fe * gas_constant * T) / (d_e_down + fe * gas_constant * T) * delta2 - beta = (dEdown / (dEdown + Fe * R * T))**2 / Delta + beta = (d_e_down / (d_e_down + fe * gas_constant * T)) ** 2 / delta if beta > 1: - logging.debug('Collision efficiency {0:.3f} calculated at {1:g} K is greater than unity, so it will be set to unity.'.format(beta, T)) + logging.debug('Collision efficiency {0:.3f} calculated at {1:g} K is greater than unity, ' + 'so it will be set to unity.'.format(beta, T)) beta = 1 if beta < 0: raise CollisionError('Invalid collision efficiency {0:.3f} calculated at {1:g} K.'.format(beta, T)) diff --git a/rmgpy/pdep/collisionTest.py b/rmgpy/pdep/collisionTest.py index f62a2ae07a..c4383c8542 100644 --- a/rmgpy/pdep/collisionTest.py +++ b/rmgpy/pdep/collisionTest.py @@ -32,14 +32,13 @@ This module contains unit tests of the :mod:`rmgpy.pdep.collision` module. """ -import numpy import unittest -import rmgpy.constants as constants -from rmgpy.pdep.collision import SingleExponentialDown +from rmgpy.pdep.collision import SingleExponentialDown ################################################################################ + class TestSingleExponentialDown(unittest.TestCase): """ Contains unit tests of the SingleExponentialDown class. diff --git a/rmgpy/pdep/configuration.pxd b/rmgpy/pdep/configuration.pxd index fa722d9e69..6bff125ad0 100644 --- a/rmgpy/pdep/configuration.pxd +++ b/rmgpy/pdep/configuration.pxd @@ -25,16 +25,16 @@ # # ############################################################################### -cimport numpy +cimport numpy as np ################################################################################ -cdef class Configuration: +cdef class Configuration(object): cdef public list species - cdef public numpy.ndarray Elist - cdef public numpy.ndarray densStates - cdef public numpy.ndarray sumStates + cdef public np.ndarray Elist + cdef public np.ndarray densStates + cdef public np.ndarray sumStates cdef public bint activeJRotor cdef public bint activeKRotor @@ -62,6 +62,7 @@ cdef class Configuration: cpdef double calculateCollisionFrequency(self, double T, double P, dict bathGas) except -1 - cpdef numpy.ndarray generateCollisionMatrix(self, double T, numpy.ndarray densStates, numpy.ndarray Elist, numpy.ndarray Jlist=?) + cpdef np.ndarray generateCollisionMatrix(self, double T, np.ndarray dens_states, + np.ndarray e_list, np.ndarray Jlist=?) - cpdef calculateDensityOfStates(self, numpy.ndarray Elist, bint activeJRotor=?, bint activeKRotor=?, bint rmgmode=?) + cpdef calculateDensityOfStates(self, np.ndarray Elist, bint activeJRotor=?, bint activeKRotor=?, bint rmgmode=?) diff --git a/rmgpy/pdep/configuration.pyx b/rmgpy/pdep/configuration.pyx index e245c53d03..380843f30d 100644 --- a/rmgpy/pdep/configuration.pyx +++ b/rmgpy/pdep/configuration.pyx @@ -32,31 +32,27 @@ configurations on a potential energy surface. This includes local minima states). """ -import math -import numpy import logging -import cython +import cython +import numpy as np +cimport numpy as np from libc.math cimport log, exp, sqrt import rmgpy.constants as constants - -from rmgpy.pdep.collision import * -from rmgpy.statmech import * +from rmgpy.statmech import LinearRotor, NonlinearRotor, IdealGasTranslation, HarmonicOscillator from rmgpy.statmech.conformer import getDensityOfStatesForst -from rmgpy.transport import TransportData - from rmgpy.species import Species, TransitionState -from rmgpy.reaction import Reaction +from rmgpy.transport import TransportData ################################################################################ -cdef class Configuration: + +cdef class Configuration(object): """ - A representation of a molecular configuration on a potential energy - surface. + A representation of a molecular configuration on a potential energy surface. """ - + def __init__(self, *species): self.species = list(species) self.Elist = None @@ -64,10 +60,10 @@ cdef class Configuration: self.sumStates = None self.activeJRotor = False self.activeKRotor = False - + def __str__(self): return ' + '.join([str(spec) for spec in self.species]) - + def __repr__(self): string = 'Configuration(' string += 'species="{0!r}", '.format(self.species) @@ -83,7 +79,7 @@ cdef class Configuration: """The ground-state energy of the configuration in J/mol.""" def __get__(self): return sum([float(spec.conformer.E0.value_si) for spec in self.species]) - + cpdef cleanup(self): """ Delete intermediate arrays used in computing k(T,P) values. @@ -91,14 +87,14 @@ cdef class Configuration: self.Elist = None self.densStates = None self.sumStates = None - + cpdef bint isUnimolecular(self) except -2: """ Return ``True`` if the configuration represents a unimolecular isomer, or ``False`` otherwise. """ return len(self.species) == 1 and isinstance(self.species[0], Species) - + cpdef bint isBimolecular(self) except -2: """ Return ``True`` if the configuration represents a bimolecular reactant @@ -119,14 +115,14 @@ cdef class Configuration: or ``False`` otherwise. """ return len(self.species) == 1 and isinstance(self.species[0], TransitionState) - + cpdef bint hasStatMech(self) except -2: """ Return ``True`` if all species in the configuration have statistical mechanics parameters, or ``False`` otherwise. """ return all([spec.hasStatMech() for spec in self.species]) - + cpdef bint hasThermo(self) except -2: """ Return ``True`` if all species in the configuration have thermodynamics @@ -139,39 +135,39 @@ cdef class Configuration: Return the constant-pressure heat capacity in J/mol*K at the specified temperature `T` in K. """ - cdef double Cp = 0.0 + cdef double cp = 0.0 for spec in self.species: - Cp += spec.getHeatCapacity(T) - return Cp + cp += spec.getHeatCapacity(T) + return cp cpdef double getEnthalpy(self, double T) except 100000000: """ Return the enthalpy in kJ/mol at the specified temperature `T` in K. """ - cdef double H = 0.0 + cdef double h = 0.0 for spec in self.species: - H += spec.getEnthalpy(T) - return H + h += spec.getEnthalpy(T) + return h cpdef double getEntropy(self, double T) except -100000000: """ Return the entropy in J/mol*K at the specified temperature `T` in K. """ - cdef double S = 0.0 + cdef double s = 0.0 for spec in self.species: - S += spec.getEntropy(T) - return S + s += spec.getEntropy(T) + return s cpdef double getFreeEnergy(self, double T) except 100000000: """ Return the Gibbs free energy in kJ/mol at the specified temperature `T` in K. """ - cdef double G = 0.0 + cdef double g = 0.0 for spec in self.species: - G += spec.getFreeEnergy(T) - return G - + g += spec.getFreeEnergy(T) + return g + cpdef double calculateCollisionFrequency(self, double T, double P, dict bathGas) except -1: """ Return the value of the collision frequency in Hz at the given @@ -182,33 +178,34 @@ cdef class Configuration: Only the Lennard-Jones collision model is currently supported. """ - cdef double bathGasSigma, bathGasEpsilon, bathGasMW - cdef double sigma, epsilon, mu, gasConc, frac, Tred, omega22 - + cdef double bath_gas_sigma, bath_gas_epsilon, bath_gas_mw + cdef double sigma, epsilon, mu, gas_concentration, frac, tred, omega22 + assert self.isUnimolecular() assert isinstance(self.species[0].getTransportData(), TransportData) for spec, frac in bathGas.items(): assert isinstance(spec.getTransportData(), TransportData) - - bathGasSigma = 0.0; bathGasEpsilon = 1.0; bathGasMW = 0.0 - for spec, frac in bathGas.iteritems(): - bathGasSigma += spec.getTransportData().sigma.value_si * frac - bathGasEpsilon *= spec.getTransportData().epsilon.value_si ** frac - bathGasMW += spec._molecularWeight.value_si * frac - - sigma = 0.5 * (self.species[0].getTransportData().sigma.value_si + bathGasSigma) - epsilon = sqrt((self.species[0].getTransportData().epsilon.value_si * bathGasEpsilon)) - mu = 1.0 / (1.0/self.species[0]._molecularWeight.value_si + 1.0/bathGasMW) - gasConc = P / constants.kB / T - + + bath_gas_sigma, bath_gas_epsilon, bath_gas_mw = 0.0, 1.0, 0.0 + for spec, frac in bathGas.items(): + bath_gas_sigma += spec.getTransportData().sigma.value_si * frac + bath_gas_epsilon *= spec.getTransportData().epsilon.value_si ** frac + bath_gas_mw += spec.molecularWeight.value_si * frac + + sigma = 0.5 * (self.species[0].getTransportData().sigma.value_si + bath_gas_sigma) + epsilon = sqrt((self.species[0].getTransportData().epsilon.value_si * bath_gas_epsilon)) + mu = 1.0 / (1.0 / self.species[0].molecularWeight.value_si + 1.0 / bath_gas_mw) + gas_concentration = P / constants.kB / T + # Evaluate configuration integral - Tred = constants.R * T / epsilon - omega22 = 1.16145 * Tred**(-0.14874) + 0.52487 * exp(-0.77320 * Tred) + 2.16178 * exp(-2.43787 * Tred) - + tred = constants.R * T / epsilon + omega22 = 1.16145 * tred ** (-0.14874) + 0.52487 * exp(-0.77320 * tred) + 2.16178 * exp(-2.43787 * tred) + # Evaluate collision frequency - return omega22 * sqrt(8 * constants.kB * T / constants.pi / mu) * constants.pi * sigma*sigma * gasConc - - cpdef numpy.ndarray generateCollisionMatrix(self, double T, numpy.ndarray densStates, numpy.ndarray Elist, numpy.ndarray Jlist=None): + return omega22 * sqrt(8 * constants.kB * T / constants.pi / mu) * constants.pi * sigma*sigma * gas_concentration + + cpdef np.ndarray generateCollisionMatrix(self, double T, np.ndarray densStates, np.ndarray Elist, + np.ndarray Jlist=None): """ Return the collisional energy transfer probabilities matrix for the configuration at the given temperature `T` in K using the given @@ -219,11 +216,12 @@ cdef class Configuration: assert self.isUnimolecular() assert self.species[0].energyTransferModel is not None return self.species[0].energyTransferModel.generateCollisionMatrix(T, densStates, Elist, Jlist) - - cpdef calculateDensityOfStates(self, numpy.ndarray Elist, bint activeJRotor=True, bint activeKRotor=True, bint rmgmode=False): + + cpdef calculateDensityOfStates(self, np.ndarray Elist, bint activeJRotor=True, bint activeKRotor=True, + bint rmgmode=False): """ Calculate the density (and sum) of states for the configuration at the - given energies above the ground state `Elist` in J/mol. The + given energies above the ground state `Elist` in J/mol. The `activeJRotor` and `activeKRotor` flags control whether the J-rotor and/or K-rotor are treated as active (and therefore included in the density and sum of states). The computed density and sum of states @@ -231,18 +229,19 @@ cdef class Configuration: """ cdef list modes cdef int i - + logging.debug('calculating density of states for {}'.format(self.__str__())) self.Elist = Elist self.activeJRotor = activeJRotor self.activeKRotor = activeKRotor - + # Get the active rovibrational modes for each species in the configuration modes = [] for i, species in enumerate(self.species): - modes.extend(species.conformer.getActiveModes(activeKRotor=activeKRotor, activeJRotor=activeJRotor)) - + modes.extend(species.conformer.getActiveModes(activeKRotor=self.activeKRotor, + activeJRotor=self.activeJRotor)) + if rmgmode or len(modes) == 0: # Include an arbitrary active rigid rotor if needed # The moments of inertia cancel in all subsequent calculations @@ -257,10 +256,10 @@ cdef class Configuration: linear = True break if linear: - modes.insert(0, LinearRotor(inertia=(1.0,"amu*angstrom^2"), symmetry=1)) + modes.insert(0, LinearRotor(inertia=(1.0, "amu*angstrom^2"), symmetry=1)) else: - modes.insert(0, NonlinearRotor(inertia=([1.0,1.0,1.0],"amu*angstrom^2"), symmetry=1)) - + modes.insert(0, NonlinearRotor(inertia=([1.0, 1.0, 1.0], "amu*angstrom^2"), symmetry=1)) + if len(modes) == 0: self.densStates = None self.sumStates = None @@ -284,24 +283,26 @@ cdef class Configuration: mass.append(m * constants.amu * 1000) if self.isBimolecular(): if len(mass) != 2: - raise AttributeError('Length of masses should be two for bimolecular reactants. We got {0}.'.format(len(mass))) - mu = 1.0/(1.0/mass[0] + 1.0/mass[1]) - modes.insert(0, IdealGasTranslation(mass=(mu/constants.amu,"amu"))) + raise AttributeError('Length of masses should be two for bimolecular reactants. ' + 'We got {0}.'.format(len(mass))) + mu = 1.0 / (1.0 / mass[0] + 1.0 / mass[1]) + modes.insert(0, IdealGasTranslation(mass=(mu/constants.amu, "amu"))) else: if len(mass) != 3: - raise AttributeError('Length of masses should be three for termolecular reactants. We got {0}.'.format(len(mass))) - mu = 1.0/(1.0/mass[0] + 1.0/mass[1]) - modes.insert(0, IdealGasTranslation(mass=(mu/constants.amu,"amu"))) - mu2 = 1.0/(1.0/mass[0] + 1.0/mass[2]) - modes.insert(0, IdealGasTranslation(mass=(mu2/constants.amu,"amu"))) + raise AttributeError('Length of masses should be three for termolecular reactants. ' + 'We got {0}.'.format(len(mass))) + mu = 1.0 / (1.0 / mass[0] + 1.0 / mass[1]) + modes.insert(0, IdealGasTranslation(mass=(mu / constants.amu, "amu"))) + mu2 = 1.0 / (1.0 / mass[0] + 1.0 / mass[2]) + modes.insert(0, IdealGasTranslation(mass=(mu2 / constants.amu, "amu"))) if rmgmode: # Compute the density of states by direct count # This is currently faster than the method of steepest descents, # but requires classical hindered rotors - densStates = None + dens_states = None for mode in modes: if not isinstance(mode,HarmonicOscillator): - densStates = mode.getDensityOfStates(Elist, densStates) + dens_states = mode.getDensityOfStates(self.Elist, dens_states) # Fix a numerical artifact that occurs when two modes have # density of states expressions that are zero at the # ground state @@ -310,166 +311,173 @@ cdef class Configuration: # Instead, fill in an approximate value by extrapolation # This should only occur in systems with IdealGasTranslation # and NonlinearRotor modes - if densStates[1] == 0: - densStates[1] = densStates[2] * densStates[2] / densStates[3] + if dens_states[1] == 0: + dens_states[1] = dens_states[2] * dens_states[2] / dens_states[3] for mode in modes: if isinstance(mode,HarmonicOscillator): - densStates = mode.getDensityOfStates(Elist, densStates) - self.densStates = densStates + dens_states = mode.getDensityOfStates(self.Elist, dens_states) + self.densStates = dens_states for spec in self.species: self.densStates *= spec.conformer.spinMultiplicity * spec.conformer.opticalIsomers - + else: # Since the evaluation of quantum hindered rotors is slow, it is # actually faster (and probably negligibly less accurate) to use # interpolation in the steepest descents algorithm import scipy.interpolate - - logTdata = numpy.linspace(log(10.), log(10000.), 250.) - Tdata = numpy.exp(logTdata) - Qdata = numpy.ones_like(Tdata) - for i in range(Tdata.shape[0]): - T = Tdata[i] + + log_t_data = np.linspace(log(10.), log(10000.), 250.) + t_data = np.exp(log_t_data) + q_data = np.ones_like(t_data) + for i in range(t_data.shape[0]): + t = t_data[i] for mode in modes: - Qdata[i] = Qdata[i] * mode.getPartitionFunction(T) - logQ = scipy.interpolate.InterpolatedUnivariateSpline(Tdata, numpy.log(Qdata)) - #logQ = LinearInterpolator(Tdata, numpy.log(Qdata)) - - self.densStates, self.sumStates = getDensityOfStatesForst(Elist, logQ) - + q_data[i] = q_data[i] * mode.getPartitionFunction(t) + log_q = scipy.interpolate.InterpolatedUnivariateSpline(t_data, np.log(q_data)) + # log_q = LinearInterpolator(t_data, np.log(q_data)) + + self.densStates, self.sumStates = getDensityOfStatesForst(self.Elist, log_q) + for spec in self.species: self.densStates *= spec.conformer.spinMultiplicity * spec.conformer.opticalIsomers self.sumStates *= spec.conformer.spinMultiplicity * spec.conformer.opticalIsomers if self.densStates is None: raise ValueError("Species {} has no active modes".format(species.label)) - + @cython.boundscheck(False) @cython.wraparound(False) - def mapDensityOfStates(self, numpy.ndarray[numpy.float64_t,ndim=1] Elist, numpy.ndarray[numpy.int_t,ndim=1] Jlist=None): + def mapDensityOfStates(self, np.ndarray[np.float64_t,ndim=1] Elist, np.ndarray[np.int_t,ndim=1] Jlist=None): """ Return a mapping of the density of states for the configuration to the given energies `Elist` in J/mol and, if the J-rotor is not active, the total angular momentum quantum numbers `Jlist`. """ - cdef numpy.ndarray[numpy.float64_t,ndim=2] densStates - cdef double E0, B1, B2, E, dJ - cdef int r0, r, s, t, Ngrains, NJ, J, J1, J2 - cdef list Blist - + cdef np.ndarray[np.float64_t,ndim=2] dens_states + cdef double e0, de0, b1, b2, e, d_j + cdef int r0, r, s, t, n_grains, n_j, j, j1, j2 + cdef list b_list + import scipy.interpolate for r in range(self.Elist.shape[0]): if self.densStates[r] > 0: break - f = scipy.interpolate.InterpolatedUnivariateSpline(self.Elist[r:], numpy.log(self.densStates[r:])) - - E0 = self.E0 - Ngrains = Elist.shape[0] - dE = Elist[1] - Elist[0] - dE0 = self.Elist[1] - self.Elist[0] - + f = scipy.interpolate.InterpolatedUnivariateSpline(self.Elist[r:], np.log(self.densStates[r:])) + + e0 = self.E0 + n_grains = Elist.shape[0] + d_e = Elist[1] - Elist[0] + d_e0 = self.Elist[1] - self.Elist[0] + if self.activeJRotor: - densStates = numpy.zeros((Ngrains,1)) - for r0 in range(Ngrains): - if Elist[r0] >= E0: break - for r in range(r0, Ngrains): - densStates[r,0] = f(Elist[r] - E0) - densStates[r0:,0] = numpy.exp(densStates[r0:,0]) + dens_states = np.zeros((n_grains,1)) + for r0 in range(n_grains): + if Elist[r0] >= e0: break + for r in range(r0, n_grains): + dens_states[r, 0] = f(Elist[r] - e0) + dens_states[r0:, 0] = np.exp(dens_states[r0:, 0]) else: assert Jlist is not None - NJ = Jlist.shape[0] - dJ = Jlist[1] - Jlist[0] - densStates = numpy.zeros((Ngrains, NJ)) - - Blist = [] + n_j = Jlist.shape[0] + d_j = Jlist[1] - Jlist[0] + dens_states = np.zeros((n_grains, n_j)) + + b_list = [] for spec in self.species: - Jrotor, Krotor = spec.conformer.getSymmetricTopRotors() - Blist.append(float(Jrotor.rotationalConstant.value_si)) - - for r0 in range(Ngrains): - if Elist[r0] >= E0: break - - if len(Blist) == 1: - B1 = Blist[0] * 11.962 # cm^-1 to J/mol - for r in range(r0, Ngrains): - for s in range(NJ): - J1 = Jlist[s] - E = Elist[r] - E0 - B1 * J1 * (J1 + 1) - if E < 0: break - densStates[r,s] = (2 * J1 + 1) * exp(f(E)) * dJ - - elif len(Blist) == 2: - B1 = Blist[0] * 11.962; B2 = Blist[1] * 11.962 # cm^-1 to J/mol - for r in range(r0, Ngrains): - for s in range(NJ): - J = Jlist[s] - for t in range(s+1): - J1 = Jlist[t]; J2 = J - J1 - E = Elist[r] - E0 - B1 * J1 * (J1 + 1) - B2 * J2 * (J2 + 1) - if E > 0: - densStates[r,s] += (2 * J1 + 1) * (2 * J2 + 2) * exp(f(E)) * dJ * dJ - - return densStates * dE / dE0 + j_rotor, k_rotor = spec.conformer.getSymmetricTopRotors() + b_list.append(float(j_rotor.rotationalConstant.value_si)) + + for r0 in range(n_grains): + if Elist[r0] >= e0: break + + if len(b_list) == 1: + b1 = b_list[0] * 11.962 # cm^-1 to J/mol + for r in range(r0, n_grains): + for s in range(n_j): + j1 = Jlist[s] + e = Elist[r] - e0 - b1 * j1 * (j1 + 1) + if e < 0: break + dens_states[r,s] = (2 * j1 + 1) * exp(f(e)) * d_j + + elif len(b_list) == 2: + b1 = b_list[0] * 11.962 # cm^-1 to J/mol + b2 = b_list[1] * 11.962 + for r in range(r0, n_grains): + for s in range(n_j): + j = Jlist[s] + for t in range(s + 1): + j1 = Jlist[t] + j2 = j - j1 + e = Elist[r] - e0 - b1 * j1 * (j1 + 1) - b2 * j2 * (j2 + 1) + if e > 0: + dens_states[r, s] += (2 * j1 + 1) * (2 * j2 + 2) * exp(f(e)) * d_j * d_j + + return dens_states * d_e / d_e0 @cython.boundscheck(False) @cython.wraparound(False) - def mapSumOfStates(self, numpy.ndarray[numpy.float64_t,ndim=1] Elist, numpy.ndarray[numpy.int_t,ndim=1] Jlist=None): + def mapSumOfStates(self, np.ndarray[np.float64_t,ndim=1] Elist, np.ndarray[np.int_t,ndim=1] Jlist=None): """ Return a mapping of the density of states for the configuration to the given energies `Elist` in J/mol and, if the J-rotor is not active, the total angular momentum quantum numbers `Jlist`. """ - cdef numpy.ndarray[numpy.float64_t,ndim=2] sumStates - cdef double E0, B1, B2, dJ - cdef int r0, r, s, Ngrains, NJ, J1, J2 - + cdef np.ndarray[np.float64_t,ndim=2] sum_states + cdef double e0, b1, b2, d_j + cdef int r0, r, s, n_grains, n_j, j1, j2 + import scipy.interpolate for r in range(self.Elist.shape[0]): if self.sumStates[r] > 0: break - f = scipy.interpolate.InterpolatedUnivariateSpline(self.Elist[r:], numpy.log(self.sumStates[r:])) - - E0 = self.E0 - Ngrains = len(Elist) - + f = scipy.interpolate.InterpolatedUnivariateSpline(self.Elist[r:], np.log(self.sumStates[r:])) + + e0 = self.E0 + n_grains = len(Elist) + if self.activeJRotor: - sumStates = numpy.zeros((Ngrains,1)) - for r0 in range(Ngrains): - if Elist[r0] >= E0: break - for r in range(r0, Ngrains): - sumStates[r,0] = f(Elist[r] - E0) - sumStates[r0:,0] = numpy.exp(sumStates[r0:,0]) + sum_states = np.zeros((n_grains,1)) + for r0 in range(n_grains): + if Elist[r0] >= e0: + break + for r in range(r0, n_grains): + sum_states[r, 0] = f(Elist[r] - e0) + sum_states[r0:, 0] = np.exp(sum_states[r0:, 0]) else: assert Jlist is not None - NJ = len(Jlist) - dJ = Jlist[1] - Jlist[0] - sumStates = numpy.zeros((Ngrains, NJ)) - - Blist = [] + n_j = len(Jlist) + d_j = Jlist[1] - Jlist[0] + sum_states = np.zeros((n_grains, n_j)) + + b_list = [] for spec in self.species: - Jrotor, Krotor = spec.conformer.getSymmetricTopRotors() - Blist.append(float(Jrotor.rotationalConstant.value_si)) - - for r0 in range(Ngrains): - if Elist[r0] >= E0: break - - if len(Blist) == 1: - B1 = Blist[0] * 11.962 # cm^-1 to J/mol - for r in range(r0, Ngrains): - for s in range(NJ): - J1 = Jlist[s] - E = Elist[r] - E0 - B1 * J1 * (J1 + 1) - if E < 0: break - sumStates[r,s] = (2 * J1 + 1) * exp(f(E)) * dJ - - elif len(Blist) == 2: - B1 = Blist[0] * 11.962; B2 = Blist[1] * 11.962 # cm^-1 to J/mol - for r in range(r0, Ngrains): - for s in range(NJ): - J = Jlist[s] - for t in range(s+1): - J1 = Jlist[t]; J2 = J - J1 - E = Elist[r] - E0 - B1 * J1 * (J1 + 1) - B2 * J2 * (J2 + 1) - if E > 0: - sumStates[r,s] += (2 * J1 + 1) * (2 * J2 + 1) * exp(f(E)) * dJ * dJ - - return sumStates + j_rotor, k_rotor = spec.conformer.getSymmetricTopRotors() + b_list.append(float(j_rotor.rotationalConstant.value_si)) + + for r0 in range(n_grains): + if Elist[r0] >= e0: + break + + if len(b_list) == 1: + b1 = b_list[0] * 11.962 # cm^-1 to J/mol + for r in range(r0, n_grains): + for s in range(n_j): + j1 = Jlist[s] + e = Elist[r] - e0 - b1 * j1 * (j1 + 1) + if e < 0: + break + sum_states[r,s] = (2 * j1 + 1) * exp(f(e)) * d_j + + elif len(b_list) == 2: + b1 = b_list[0] * 11.962 # cm^-1 to J/mol + b2 = b_list[1] * 11.962 + for r in range(r0, n_grains): + for s in range(n_j): + j = Jlist[s] + for t in range(s + 1): + j1 = Jlist[t] + j2 = j - j1 + e = Elist[r] - e0 - b1 * j1 * (j1 + 1) - b2 * j2 * (j2 + 1) + if e > 0: + sum_states[r, s] += (2 * j1 + 1) * (2 * j2 + 1) * exp(f(e)) * d_j * d_j + + return sum_states diff --git a/rmgpy/pdep/configurationTest.py b/rmgpy/pdep/configurationTest.py index 0655e45938..e77c7da649 100644 --- a/rmgpy/pdep/configurationTest.py +++ b/rmgpy/pdep/configurationTest.py @@ -34,15 +34,16 @@ import unittest +from rmgpy.pdep.collision import SingleExponentialDown from rmgpy.pdep.configuration import Configuration -from rmgpy.transport import TransportData -from rmgpy.statmech.translation import IdealGasTranslation +from rmgpy.species import Species +from rmgpy.statmech.conformer import Conformer from rmgpy.statmech.rotation import NonlinearRotor -from rmgpy.statmech.vibration import HarmonicOscillator from rmgpy.statmech.torsion import HinderedRotor -from rmgpy.statmech.conformer import Conformer -from rmgpy.species import Species -from rmgpy.pdep.collision import SingleExponentialDown +from rmgpy.statmech.translation import IdealGasTranslation +from rmgpy.statmech.vibration import HarmonicOscillator +from rmgpy.transport import TransportData + ################################################################################ diff --git a/rmgpy/pdep/cse.pyx b/rmgpy/pdep/cse.pyx index 4f0a168c6a..89cfa073bc 100644 --- a/rmgpy/pdep/cse.pyx +++ b/rmgpy/pdep/cse.pyx @@ -31,194 +31,193 @@ rate coefficients :math:`k(T,P)` using the chemically-significant eigenvalues method. """ -import numpy -cimport numpy import logging -import scipy.linalg -from libc.math cimport exp, log, sqrt +import numpy as np +cimport numpy as np +import scipy.linalg +from libc.math cimport exp, sqrt import rmgpy.constants as constants - -from rmgpy.pdep.me import generateFullMEMatrix from rmgpy.exceptions import ChemicallySignificantEigenvaluesError +from rmgpy.pdep.me import generateFullMEMatrix ################################################################################ -def applyChemicallySignificantEigenvaluesMethod(network, list lumpingOrder=None): - cdef numpy.ndarray[numpy.int_t,ndim=1] Jlist - cdef numpy.ndarray[numpy.int_t,ndim=3] indices - cdef numpy.ndarray[numpy.float64_t,ndim=1] Elist, S, Sinv, W0, W, eqRatios - cdef numpy.ndarray[numpy.float64_t,ndim=2] M, K, V0, V, Z, Zinv, Y, X - cdef numpy.ndarray[numpy.float64_t,ndim=3] densStates - cdef numpy.ndarray[numpy.float64_t,ndim=4] Kij, Gnj, Fim, pa +def applyChemicallySignificantEigenvaluesMethod(network, list lumpingOrder=None): + """A method for applying the Chemically Significant Eigenvalues approach for solving the master equation.""" + cdef np.ndarray[np.int_t,ndim=1] j_list + cdef np.ndarray[np.int_t,ndim=3] indices + cdef np.ndarray[np.float64_t,ndim=1] e_list, s_mat, s_mat_inv, omega0, omega, eq_ratios + cdef np.ndarray[np.float64_t,ndim=2] me_mat, k, eigen_vectors0, eigen_vectors, z_mat, z_mat_inv, y, x + cdef np.ndarray[np.float64_t,ndim=3] dens_states + cdef np.ndarray[np.float64_t,ndim=4] g_nj, pa cdef list lumping, unlumping - cdef double T, P, ymB - cdef int Nisom, Nreac, Nprod, Ngrains, NJ, Nchem, Ncse, Nrows + cdef double temperature, pressure, ym_b + cdef int n_isom, n_reac, n_prod, n_grains, n_j, n_chem, n_cse, n_rows cdef int i, n, r, s, index - T = network.T - P = network.P - Elist = network.Elist - Jlist = network.Jlist - densStates = network.densStates - collFreq = network.collFreq - Kij = network.Kij - Fim = network.Fim - Gnj = network.Gnj - E0 = network.E0 - eqRatios = network.eqRatios - Nisom = network.Nisom - Nreac = network.Nreac - Nprod = network.Nprod - Ngrains = network.Ngrains - NJ = network.NJ + temperature = network.T + pressure = network.P + e_list = network.Elist + j_list = network.Jlist + dens_states = network.dens_states + g_nj = network.Gnj + eq_ratios = network.eq_ratios + n_isom = network.Nisom + n_reac = network.Nreac + n_prod = network.Nprod + n_grains = network.Ngrains + n_j = network.NJ - Ngrains = len(Elist) - Nchem = Nisom + Nreac + n_grains = len(e_list) + n_chem = n_isom + n_reac - ymB = 1.0e-6 * P / constants.R / T + ym_b = 1.0e-6 * pressure / constants.R / temperature # Generate the full master equation matrix - M, indices = generateFullMEMatrix(network, products=False) - Nrows = M.shape[0] - M[:,Nrows-Nreac:] *= ymB + me_mat, indices = generateFullMEMatrix(network, products=False) + n_rows = me_mat.shape[0] + me_mat[:, n_rows-n_reac:] *= ym_b # Generate symmetrization matrix and its inverse - S = numpy.zeros(Nrows, numpy.float64) - Sinv = numpy.zeros_like(S) - for i in range(Nisom): - for r in range(Ngrains): - for s in range(NJ): - index = indices[i,r,s] + s_mat = np.zeros(n_rows, np.float64) + s_mat_inv = np.zeros_like(s_mat) + for i in range(n_isom): + for r in range(n_grains): + for s in range(n_j): + index = indices[i, r, s] if index > -1: - S[index] = sqrt(densStates[i,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] / constants.R / T) * eqRatios[i]) - Sinv[index] = 1.0 / S[index] - for n in range(Nreac): - index = Nrows - Nreac + n - S[index] = sqrt(eqRatios[n+Nisom] / ymB) - Sinv[index] = 1.0 / S[index] - - # Symmetrize master equation matrix: M = S * Msymm * Sinv - # Since S and Sinv are diagonal we can do this very efficiently - for r in range(Nrows): - for s in range(Nrows): - M[r,s] = Sinv[r] * M[r,s] * S[s] + s_mat[index] = sqrt(dens_states[i, r, s] * (2 * j_list[s] + 1) + * exp(-e_list[r] / constants.R / temperature) * eq_ratios[i]) + s_mat_inv[index] = 1.0 / s_mat[index] + for n in range(n_reac): + index = n_rows - n_reac + n + s_mat[index] = sqrt(eq_ratios[n + n_isom] / ym_b) + s_mat_inv[index] = 1.0 / s_mat[index] + + # Symmetrize master equation matrix: me_mat = s_mat * Msymm * s_mat_inv + # Since s_mat and s_mat_inv are diagonal we can do this very efficiently + for r in range(n_rows): + for s in range(n_rows): + me_mat[r, s] = s_mat_inv[r] * me_mat[r, s] * s_mat[s] # DEBUG: Check that the matrix has been properly symmetrized - properlySymmetrized = True - for r in range(Nrows): + properly_symmetrized = True + for r in range(n_rows): for s in range(r): - if M[r,s] != 0: - if abs(M[r,s] - M[s,r]) > 0.01 * M[r,s]: - if M[r,s] > 1e-200 or M[s,r] > 1e-200: - print r, s, M[r,s], M[s,r] - properlySymmetrized = False - if not properlySymmetrized: + if me_mat[r, s] != 0: + if abs(me_mat[r, s] - me_mat[s,r]) > 0.01 * me_mat[r, s]: + if me_mat[r, s] > 1e-200 or me_mat[s,r] > 1e-200: + print(r, s, me_mat[r, s], me_mat[s,r]) + properly_symmetrized = False + if not properly_symmetrized: raise ChemicallySignificantEigenvaluesError('Master equation matrix not properly symmetrized.') # Get eigenvalues and eigenvectors - # We only need the slowest Nchem + 1 eigenmodes, so only compute those + # We only need the slowest n_chem + 1 eigenmodes, so only compute those try: - #W0, V0 = scipy.linalg.eigh(M, eigvals=(Nrows-Nchem-1,Nrows-1), overwrite_a=True, overwrite_b=True) - W0, V0 = scipy.linalg.eigh(M, overwrite_a=True, overwrite_b=True) - except numpy.linalg.LinAlgError: + # omega0, eigen_vectors0 = scipy.linalg.eigh(me_mat, eigvals=(n_rows-n_chem-1,n_rows-1), + # overwrite_a=True, overwrite_b=True) + omega0, eigen_vectors0 = scipy.linalg.eigh(me_mat, overwrite_a=True, overwrite_b=True) + except np.linalg.LinAlgError: raise ChemicallySignificantEigenvaluesError('Eigenvalue calculation failed to converge.') - + # We can't assume that eigh returns them in sorted order - ind = W0.argsort() + ind = omega0.argsort() # Count the number of distinct eigenvalues - Ncse = 0 - for i in range(Nchem): - if abs(W0[ind[-Nchem-1]] / W0[ind[-1-i]]) > 3.0: Ncse += 1 + n_cse = 0 + for i in range(n_chem): + if abs(omega0[ind[-n_chem - 1]] / omega0[ind[-1 - i]]) > 3.0: + n_cse += 1 - K = numpy.zeros((Nisom+Nreac+Nprod, Nisom+Nreac+Nprod), numpy.float64) - pa = numpy.zeros((Nisom, Nisom+Nreac, Ngrains, NJ), numpy.float64) + k = np.zeros((n_isom + n_reac + n_prod, n_isom + n_reac + n_prod), np.float64) + pa = np.zeros((n_isom, n_isom + n_reac, n_grains, n_j), np.float64) - - - # Check that we have the correct number of distinct eigenvalues and that # there is a zero eigenvalue if there should be (i.e. no product channels) # If not, print an error and return - if Ncse != Nchem: - logging.error('Could only identify {0:d} distinct eigenvalues, when {1:d} are required.'.format(Ncse, Nchem)) - logging.info('Last IERE = {0:g} First CSE = {1:g} Ratio = {2:g}'.format(W0[ind[-Nchem-1]], W0[ind[-Nchem]], W0[ind[-Nchem-1]] / W0[ind[-Nchem]])) - if lumpingOrder is None or len(lumpingOrder) < Nchem - Ncse: - # If we don't have a lumping order, then don't try to recover from - # this situation - return K, pa - lumping = lumpingOrder[0:Nchem-Ncse] - unlumping = [i for i in range(Nchem) if i not in lumping] + if n_cse != n_chem: + logging.error('Could only identify {0:d} distinct eigenvalues, when {1:d} are required.'.format(n_cse, n_chem)) + logging.info('Last IERE = {0:g} First CSE = {1:g} Ratio = {2:g}'.format( + omega0[ind[-n_chem-1]], omega0[ind[-n_chem]], omega0[ind[-n_chem-1]] / omega0[ind[-n_chem]])) + if lumpingOrder is None or len(lumpingOrder) < n_chem - n_cse: + # If we don't have a lumping order, then don't try to recover from this situation + return k, pa + lumping = lumpingOrder[0:n_chem - n_cse] + unlumping = [i for i in range(n_chem) if i not in lumping] - #elif Nprod == 0 and abs(W0[ind[-1]]) > 1e-3: + # elif n_prod == 0 and abs(omega0[ind[-1]]) > 1e-3: # logging.error('Could not identify zero eigenvalue.') - # logging.info('Zero CSE = {0:g} Last CSE = {1:g} Ratio = {2:g}'.format(W0[ind[-1]], W0[ind[-2]], W0[ind[-1]] / W0[ind[-2]])) + # logging.info('Zero CSE = {0:g} Last CSE = {1:g} Ratio = {2:g}'.format( + # omega0[ind[-1]], omega0[ind[-2]], omega0[ind[-1]] / omega0[ind[-2]])) else: lumping = [] - unlumping = range(Nchem) + unlumping = range(n_chem) # Extract the chemically-significant eigenvalues and eigenvectors - W = W0.take(ind[-Ncse:]) - V = V0.take(ind[-Ncse:], axis=1) + omega = omega0.take(ind[-n_cse:]) + eigen_vectors = eigen_vectors0.take(ind[-n_cse:], axis=1) # Unsymmetrize the eigenvectors - for j in range(Ncse): - V[:,j] *= S + for j in range(n_cse): + eigen_vectors[:, j] *= s_mat # Use the "long-time" method to extract the k(T,P) values # This method is more numerically robust # It also doesn't require finagling with various initial conditions # Source: Robertson, Pilling, Jitariu, and Hillier, Phys. Chem. Chem. Phys 9, p. 4085-4097 (2007). - Z = numpy.zeros((Ncse,Ncse), numpy.float64) - Zinv = numpy.zeros((Ncse,Ncse), numpy.float64) - Y = numpy.zeros((Nprod,Ncse), numpy.float64) - for j in range(Ncse): - for i in range(Nisom): - if i in lumping: continue + z_mat = np.zeros((n_cse, n_cse), np.float64) + z_mat_inv = np.zeros((n_cse, n_cse), np.float64) + y = np.zeros((n_prod, n_cse), np.float64) + for j in range(n_cse): + for i in range(n_isom): + if i in lumping: + continue i1 = unlumping.index(i) - for r in range(Ngrains): - for s in range(NJ): - index = indices[i,r,s] + for r in range(n_grains): + for s in range(n_j): + index = indices[i, r, s] if index > -1: - Z[i1,j] += V[index,j] - for j in range(Ncse): - for i in range(Nisom): - for r in range(Ngrains): - for s in range(NJ): - index = indices[i,r,s] + z_mat[i1,j] += eigen_vectors[index, j] + for j in range(n_cse): + for i in range(n_isom): + for r in range(n_grains): + for s in range(n_j): + index = indices[i, r, s] if index > -1: - for n in range(Nprod): - Y[n,j] += Gnj[Nreac+n,i,r,s] * V[index,j] - for j in range(Ncse): - for n in range(Nreac): - index = Nrows - Nreac + n - Z[Nisom+n-len(lumping),j] += V[index,j] + for n in range(n_prod): + y[n, j] += g_nj[n_reac + n, i, r, s] * eigen_vectors[index, j] + for j in range(n_cse): + for n in range(n_reac): + index = n_rows - n_reac + n + z_mat[n_isom + n - len(lumping), j] += eigen_vectors[index, j] - Zinv = numpy.linalg.inv(Z) + z_mat_inv = np.linalg.inv(z_mat) for i, m in enumerate(unlumping): for j, n in enumerate(unlumping): - K[m,n] = numpy.sum(Z[i,:] * W * Zinv[:,j]) - for n in range(Nprod): + k[m, n] = np.sum(z_mat[i, :] * omega * z_mat_inv[:, j]) + for n in range(n_prod): for j, m in enumerate(unlumping): - K[Nisom+Nreac+n,m] = numpy.sum(Y[n,:] * Zinv[:,j]) + k[n_isom + n_reac + n, m] = np.sum(y[n, :] * z_mat_inv[:, j]) # Compute pa - X = numpy.dot(V, Zinv) + x = np.dot(eigen_vectors, z_mat_inv) for src, src1 in enumerate(unlumping): - for i in range(Nisom): - if i in lumping: continue - for r in range(Ngrains): - for s in range(NJ): - index = indices[i,r,s] + for i in range(n_isom): + if i in lumping: + continue + for r in range(n_grains): + for s in range(n_j): + index = indices[i, r, s] if index > -1: - pa[i,src1,r,s] = X[index,src] + pa[i, src1, r, s] = x[index, src] - pa[:,Nisom:,:,:] /= ymB - K[:,Nisom:] /= ymB + pa[:, n_isom:, :, :] /= ym_b + k[:, n_isom:] /= ym_b # Return the matrix of k(T,P) values and the pseudo-steady population distributions - return K, pa + return k, pa diff --git a/rmgpy/pdep/draw.py b/rmgpy/pdep/draw.py index 447497fb89..31e1e173d6 100644 --- a/rmgpy/pdep/draw.py +++ b/rmgpy/pdep/draw.py @@ -33,11 +33,13 @@ depiction of a pressure-dependent reaction network. """ -import numpy import logging +import numpy as np + from rmgpy.molecule.draw import MoleculeDrawer, createNewSurface + ################################################################################ class NetworkDrawer: @@ -277,7 +279,7 @@ def draw(self, network, format, path=None): break # Determine naive position of each well (one per column) - coordinates = numpy.zeros((len(wells), 2), numpy.float64) + coordinates = np.zeros((len(wells), 2), np.float64) x = padding for i in range(len(wells)): well = wells[i] @@ -301,7 +303,7 @@ def draw(self, network, format, path=None): wellRects.append([l + x - 0.5 * w, t + y + 6, w, h]) # Squish columns together from the left where possible until an isomer is encountered - oldLeft = numpy.min(coordinates[:,0]) + oldLeft = np.min(coordinates[:,0]) Nleft = wells.index(network.isomers[0])-1 columns = [] for i in range(Nleft, -1, -1): @@ -328,7 +330,7 @@ def draw(self, network, format, path=None): delta = x - coordinates[c,0] wellRects[c][0] += delta coordinates[c,0] += delta - newLeft = numpy.min(coordinates[:,0]) + newLeft = np.min(coordinates[:,0]) coordinates[:,0] -= newLeft - oldLeft # Squish columns together from the right where possible until an isomer is encountered diff --git a/rmgpy/pdep/me.pyx b/rmgpy/pdep/me.pyx index 4bf9dc2ed5..2b4c15e10d 100644 --- a/rmgpy/pdep/me.pyx +++ b/rmgpy/pdep/me.pyx @@ -30,105 +30,103 @@ Contains functionality for generating the master equation matrix for a given pressure-dependent reaction network. """ -import numpy -cimport numpy - +import numpy as np +cimport numpy as np from libc.math cimport exp import rmgpy.constants as constants ################################################################################ + cpdef generateFullMEMatrix(network, bint products=True): """ Generate the full master equation matrix for the network. """ - cdef numpy.ndarray[numpy.int_t,ndim=1] Jlist - cdef numpy.ndarray[numpy.int_t,ndim=3] indices - cdef numpy.ndarray[numpy.float64_t,ndim=1] Elist - cdef numpy.ndarray[numpy.float64_t,ndim=2] M - cdef numpy.ndarray[numpy.float64_t,ndim=3] densStates - cdef numpy.ndarray[numpy.float64_t,ndim=4] Kij, Gnj, Fim - cdef numpy.ndarray[numpy.float64_t,ndim=5] Mcoll - cdef double T, P, beta, val - cdef int Nisom, Nreac, Nprod, Ngrains, NJ + cdef np.ndarray[np.int_t,ndim=1] j_list + cdef np.ndarray[np.int_t,ndim=3] indices + cdef np.ndarray[np.float64_t,ndim=1] e_list + cdef np.ndarray[np.float64_t,ndim=2] me_mat + cdef np.ndarray[np.float64_t,ndim=3] dens_states + cdef np.ndarray[np.float64_t,ndim=4] k_ij, g_nj, f_im + cdef np.ndarray[np.float64_t,ndim=5] m_coll + cdef double temperature, pressure, beta, val + cdef int n_isom, n_reac, n_prod, n_grains, n_j cdef int i, n, r, s, u, v - T = network.T - P = network.P - Elist = network.Elist - Jlist = network.Jlist - densStates = network.densStates - Mcoll = network.Mcoll - Kij = network.Kij - Fim = network.Fim - Gnj = network.Gnj - Nisom = network.Nisom - Nreac = network.Nreac - Nprod = network.Nprod - Ngrains = network.Ngrains - NJ = network.NJ + temperature = network.T + # pressure = network.P # not used in this module + e_list = network.Elist + j_list = network.Jlist + dens_states = network.densStates + m_coll = network.Mcoll + k_ij = network.Kij + f_im = network.Fim + g_nj = network.Gnj + n_isom = network.Nisom + n_reac = network.Nreac + n_prod = network.Nprod + n_grains = network.Ngrains + n_j = network.NJ - beta = 1. / (constants.R * T) + beta = 1. / (constants.R * temperature) # Construct accounting matrix - indices = -numpy.ones((Nisom,Ngrains,NJ), numpy.int) - Nrows = 0 - for r in range(Ngrains): - for s in range(NJ): - for i in range(Nisom): - if densStates[i,r,s] > 0: - indices[i,r,s] = Nrows - Nrows += 1 - Nrows += Nreac + indices = -np.ones((n_isom,n_grains,n_j), np.int) + n_rows = 0 + for r in range(n_grains): + for s in range(n_j): + for i in range(n_isom): + if dens_states[i, r, s] > 0: + indices[i, r, s] = n_rows + n_rows += 1 + n_rows += n_reac if products: - Nrows += Nprod + n_rows += n_prod # Construct full ME matrix - M = numpy.zeros([Nrows,Nrows], numpy.float64) + me_mat = np.zeros([n_rows,n_rows], np.float64) # Collision terms - for i in range(Nisom): - for r in range(Ngrains): - for s in range(NJ): - if indices[i,r,s] > -1: - for u in range(r, Ngrains): - for v in range(s, NJ): - M[indices[i,r,s], indices[i,u,v]] = Mcoll[i,r,s,u,v] - M[indices[i,u,v], indices[i,r,s]] = Mcoll[i,u,v,r,s] + for i in range(n_isom): + for r in range(n_grains): + for s in range(n_j): + if indices[i, r, s] > -1: + for u in range(r, n_grains): + for v in range(s, n_j): + me_mat[indices[i, r, s], indices[i,u,v]] = m_coll[i, r, s, u, v] + me_mat[indices[i, u, v], indices[i,r,s]] = m_coll[i, u, v, r, s] # Isomerization terms - for i in range(Nisom): + for i in range(n_isom): for j in range(i): - if Kij[i,j,Ngrains-1,0] > 0 or Kij[j,i,Ngrains-1,0] > 0: - for r in range(Ngrains): - for s in range(NJ): - u = indices[i,r,s]; v = indices[j,r,s] + if k_ij[i, j, n_grains - 1,0] > 0 or k_ij[j, i, n_grains - 1,0] > 0: + for r in range(n_grains): + for s in range(n_j): + u, v = indices[i, r, s], v = indices[j,r,s] if u > -1 and v > -1: - M[v,u] = Kij[j,i,r,s] - M[u,u] -= Kij[j,i,r,s] - M[u,v] = Kij[i,j,r,s] - M[v,v] -= Kij[i,j,r,s] + me_mat[u, v] = k_ij[j, i, r, s] + me_mat[u, u] -= k_ij[j, i, r, s] + me_mat[u, v] = k_ij[i, j, r, s] + me_mat[v, v] -= k_ij[i, j, r, s] # Association/dissociation terms - for i in range(Nisom): - for n in range(Nreac+Nprod): - if Gnj[n,i,Ngrains-1,0] > 0: - for r in range(Ngrains): - for s in range(NJ): - u = indices[i,r,s] - if products: - v = Nrows - Nreac - Nprod + n - else: - v = Nrows - Nreac + n + for i in range(n_isom): + for n in range(n_reac + n_prod): + if g_nj[n, i, n_grains - 1,0] > 0: + for r in range(n_grains): + for s in range(n_j): + u = indices[i, r, s] + v = n_rows - n_reac - n_prod + n if products else n_rows - n_reac + n if u > -1: - M[u,u] -= Gnj[n,i,r,s] - if n < Nreac or products: - M[v,u] = Gnj[n,i,r,s] - if n < Nreac: - val = Fim[i,n,r,s] * densStates[n+Nisom,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] * beta) - M[u,v] = val - M[v,v] -= val + me_mat[u, u] -= g_nj[n, i, r, s] + if n < n_reac or products: + me_mat[u, v] = g_nj[n, i, r, s] + if n < n_reac: + val = f_im[i, n, r, s] * dens_states[n + n_isom, r, s] \ + * (2 * j_list[s] + 1) * exp(-e_list[r] * beta) + me_mat[u,v] = val + me_mat[v,v] -= val - return M, indices + return me_mat, indices diff --git a/rmgpy/pdep/msc.pyx b/rmgpy/pdep/msc.pyx index 9ff24ff591..d974c25be2 100644 --- a/rmgpy/pdep/msc.pyx +++ b/rmgpy/pdep/msc.pyx @@ -30,149 +30,157 @@ Contains functionality for computing pressure-dependent phenomenological rate coefficients :math:`k(T,P)` using the modified strong collision method. """ -import numpy -cimport numpy +import logging -from libc.math cimport exp, log, sqrt +import numpy as np +cimport numpy as np +from libc.math cimport exp import rmgpy.constants as constants from rmgpy.exceptions import ModifiedStrongCollisionError -import logging + ################################################################################ cpdef applyModifiedStrongCollisionMethod(network, str efficiencyModel='default'): - - cdef numpy.ndarray[numpy.int_t,ndim=1] Jlist - cdef numpy.ndarray[numpy.float64_t,ndim=1] Elist, collFreq, collEff, dEdown, E0, Ereac - cdef numpy.ndarray[numpy.float64_t,ndim=2] A, b, K, x - cdef numpy.ndarray[numpy.float64_t,ndim=3] densStates - cdef numpy.ndarray[numpy.float64_t,ndim=4] Kij, Gnj, Fim, pa - cdef double T, P, E, Emin, val, beta - cdef int Nisom, Nreac, Nprod, Ngrains, NJ + """A method for applying the Modified Strong Collision approach for solving the master equation.""" + cdef np.ndarray[np.int_t,ndim=1] j_list + cdef np.ndarray[np.float64_t,ndim=1] e_list, coll_freq, coll_eff, d_e_down, e0, e_reac + cdef np.ndarray[np.float64_t,ndim=2] a_mat, b, k, x + cdef np.ndarray[np.float64_t,ndim=3] dens_states + cdef np.ndarray[np.float64_t,ndim=4] k_ij, g_nj, f_im, pa + cdef double temperature, val, beta + cdef int n_isom, n_reac, n_prod, n_grains, n_j cdef int i, j, n, r, s, start, src - T = network.T - P = network.P - Elist = network.Elist - Jlist = network.Jlist - densStates = network.densStates - collFreq = network.collFreq - Kij = network.Kij - Fim = network.Fim - Gnj = network.Gnj - E0 = network.E0 - Nisom = network.Nisom - Nreac = network.Nreac - Nprod = network.Nprod - Ngrains = network.Ngrains - NJ = network.NJ + temperature = network.T + e_list = network.Elist + j_list = network.Jlist + dens_states = network.densStates + coll_freq = network.collFreq + k_ij = network.Kij + f_im = network.Fim + g_nj = network.Gnj + e0 = network.E0 + n_isom = network.Nisom + n_reac = network.Nreac + n_prod = network.Nprod + n_grains = network.Ngrains + n_j = network.NJ - if numpy.isnan(densStates.sum()): + if np.isnan(dens_states.sum()): raise AttributeError('Network {0} has NaN in the density of states. ' 'This will prevent adequate solution to the network'.format(network.label)) - K = numpy.zeros((Nisom+Nreac+Nprod, Nisom+Nreac+Nprod), numpy.float64) - pa = numpy.zeros((Nisom,Nisom+Nreac,Ngrains,NJ), numpy.float64) + k = np.zeros((n_isom + n_reac + n_prod, n_isom + n_reac + n_prod), np.float64) + pa = np.zeros((n_isom, n_isom + n_reac, n_grains, n_j), np.float64) - beta = 1. / (constants.R * T) # [=] mol/kJ + beta = 1. / (constants.R * temperature) # [=] mol/kJ # Determine the starting grain for the calculation - Ereac = numpy.zeros(Nisom) - start = Ngrains - for i in range(Nisom): - for r in range(Ngrains): - if (Kij[:,i,r,0] > 0).any() or (Gnj[:,i,r,0] > 0).any(): + e_reac = np.zeros(n_isom) + start = n_grains + for i in range(n_isom): + for r in range(n_grains): + if (k_ij[:, i, r, 0] > 0).any() or (g_nj[:, i, r, 0] > 0).any(): if start > r: start = r - Ereac[i] = Elist[r] + e_reac[i] = e_list[r] break else: raise ModifiedStrongCollisionError('Unable to determine starting grain; check active-state energies.') - dEdown = numpy.zeros(Nisom) - for i in range(Nisom): - dEdown[i] = network.isomers[i].species[0].energyTransferModel.getAlpha(T) + d_e_down = np.zeros(n_isom) + for i in range(n_isom): + d_e_down[i] = network.isomers[i].species[0].energyTransferModel.getAlpha(temperature) # Compute collision efficiencies - collEff = numpy.ones(Nisom) + coll_eff = np.ones(n_isom) if efficiencyModel == 'default': - for i in range(Nisom): - collEff[i] = network.isomers[i].species[0].energyTransferModel.calculateCollisionEfficiency(T, Elist, Jlist, densStates[i,:,:], E0[i], Ereac[i]) + for i in range(n_isom): + coll_eff[i] = network.isomers[i].species[0].energyTransferModel.calculateCollisionEfficiency( + temperature, e_list, j_list, dens_states[i, :, :], e0[i], e_reac[i]) elif efficiencyModel == 'none': pass else: raise ValueError('Unknown efficiency model "{0}".'.format(efficiencyModel)) # Zero LHS matrix and RHS vectors - A = numpy.zeros((Nisom,Nisom), numpy.float64) - b = numpy.zeros((Nisom,Nisom+Nreac), numpy.float64) + a_mat = np.zeros((n_isom, n_isom), np.float64) + b = np.zeros((n_isom, n_isom + n_reac), np.float64) # Iterate over the grains, calculating the PSSA concentrations - for r in range(start, Ngrains): - for s in range(NJ): + for r in range(start, n_grains): + for s in range(n_j): # Populate LHS matrix # Collisional deactivation - for i in range(Nisom): - A[i,i] = -collFreq[i] * collEff[i] + for i in range(n_isom): + a_mat[i, i] = -coll_freq[i] * coll_eff[i] # Isomerization reactions - for i in range(Nisom): + for i in range(n_isom): for j in range(i): - A[i,j] = Kij[i,j,r,s] - A[j,j] -= Kij[i,j,r,s] - A[j,i] = Kij[j,i,r,s] - A[i,i] -= Kij[j,i,r,s] + a_mat[i, j] = k_ij[i, j, r, s] + a_mat[j, j] -= k_ij[i, j, r, s] + a_mat[j, i] = k_ij[j, i, r, s] + a_mat[i, i] -= k_ij[j, i, r, s] # Dissociation reactions - for n in range(Nreac+Nprod): - for j in range(Nisom): - A[j,j] -= Gnj[n,j,r,s] + for n in range(n_reac + n_prod): + for j in range(n_isom): + a_mat[j, j] -= g_nj[n, j, r, s] # Populate RHS vectors, one per isomer and reactant - for i in range(Nisom): + for i in range(n_isom): # Thermal activation via collisions - b[i,i] = collFreq[i] * collEff[i] * densStates[i,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] * beta) - if numpy.isnan(b[i,i]): + b[i, i] = coll_freq[i] * coll_eff[i] * dens_states[i, r, s] \ + * (2 * j_list[s] + 1) * exp(-e_list[r] * beta) + if np.isnan(b[i, i]): logging.warning('Non-number generated for grain {0} for isomer {1}'.format(r,network.isomers[i])) - for n in range(Nisom, Nisom+Nreac): + for n in range(n_isom, n_isom + n_reac): # Chemical activation via association reaction - for j in range(Nisom): - b[j,n] = Fim[j,n-Nisom,r,s] * densStates[n,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] * beta) - if numpy.isnan(b[j,n]): - logging.warning('Non-number generated for grain {0} for isomer {1} and isomer/reactant {2}'.format(r,network.isomers[j],(network.reactants)[n-Nisom])) - logging.debug(str([Fim[j,n-Nisom,r,s], densStates[n,r,s], (2*Jlist[s]+1), exp(-Elist[r] * beta), Elist[r], beta])) + for j in range(n_isom): + b[j, n] = f_im[j, n - n_isom, r, s] * dens_states[n, r, s] * (2 * j_list[s] + 1) \ + * exp(-e_list[r] * beta) + if np.isnan(b[j, n]): + logging.warning('Non-number generated for grain {0} for isomer {1} and isomer/reactant ' + '{2}'.format(r, network.isomers[j], network.reactants[ - n_isom])) + logging.debug(str([f_im[j, n - n_isom, r, s], dens_states[n, r, s], + (2 * j_list[s] + 1), exp(-e_list[r] * beta), e_list[r], beta])) # Solve for steady-state population - x = -numpy.linalg.solve(A, b) - for n in range(Nisom+Nreac): - for i in range(Nisom): - pa[i,n,r,s] = x[i,n] + x = -np.linalg.solve(a_mat, b) + for n in range(n_isom + n_reac): + for i in range(n_isom): + pa[i, n, r, s] = x[i, n] # Check that our populations are all positive if not (pa >= 0).all(): - for reactant_index in range(len(pa[:,0,0,0])): - for isomer_index in range(len(pa[0,:,0,0])): - populations =pa[reactant_index,isomer_index,:,:] - if not (populations >=0).all(): - logging.debug('A negative concentration was encountered for reactant/isomer {0} and isomer {1} with matrix\n{2}'.format(network.isomers+network.reactants, network.reactants,populations)) + for reactant_index in range(len(pa[:, 0, 0, 0])): + for isomer_index in range(len(pa[0, :, 0, 0])): + populations =pa[reactant_index,isomer_index, :, :] + if not (populations >= 0).all(): + logging.debug('A negative concentration was encountered for reactant/isomer {0} and isomer {1} ' + 'with matrix\n{2}'.format(network.isomers + network.reactants, + network.reactants,populations)) raise ModifiedStrongCollisionError('A negative steady-state concentration was encountered.') # Compute rate coefficients from PSSA concentrations - for src in range(Nisom+Nreac): + for src in range(n_isom + n_reac): # Calculate stabilization rates (i.e.) R + R' --> Ai or M --> Ai - for i in range(Nisom): + for i in range(n_isom): if i != src: - val = collFreq[i] * collEff[i] * numpy.sum(pa[i,src,:,:]) - K[i,src] += val - K[src,src] -= val + val = coll_freq[i] * coll_eff[i] * np.sum(pa[i,src, :, :]) + k[i, src] += val + k[src, src] -= val # Calculate dissociation rates (i.e.) R + R' --> Bn + Cn or M --> Bn + Cn - for n in range(Nreac+Nprod): - for j in range(Nisom): - if n + Nisom != src: - val = numpy.sum(Gnj[n,j,:,:] * pa[j,src,:,:]) - K[n+Nisom,src] += val - K[src,src] -= val + for n in range(n_reac+n_prod): + for j in range(n_isom): + if n + n_isom != src: + val = np.sum(g_nj[n, j, :, :] * pa[j, src, :, :]) + k[n + n_isom, src] += val + k[src, src] -= val # To complete pa we need the Boltzmann distribution at low energies - for i in range(Nisom): - for r in range(Ngrains): - for s in range(NJ): - if pa[i,i,r,s] == 0: pa[i,i,r,s] = densStates[i,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] * beta) + for i in range(n_isom): + for r in range(n_grains): + for s in range(n_j): + if pa[i, i, r, s] == 0: + pa[i,i, r, s] = dens_states[i, r, s] * (2 * j_list[s] + 1) * exp(-e_list[r] * beta) # Return the matrix of k(T,P) values and the pseudo-steady population distributions - return K, pa + return k, pa diff --git a/rmgpy/pdep/networkTest.py b/rmgpy/pdep/networkTest.py index 868cc288d8..58a70889a5 100644 --- a/rmgpy/pdep/networkTest.py +++ b/rmgpy/pdep/networkTest.py @@ -34,17 +34,18 @@ import unittest -from rmgpy.pdep.network import Network +from rmgpy.pdep.collision import SingleExponentialDown from rmgpy.pdep.configuration import Configuration -from rmgpy.transport import TransportData -from rmgpy.statmech.translation import Translation, IdealGasTranslation -from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor -from rmgpy.statmech.vibration import Vibration, HarmonicOscillator -from rmgpy.statmech.torsion import Torsion, HinderedRotor -from rmgpy.statmech.conformer import Conformer -from rmgpy.species import Species, TransitionState +from rmgpy.pdep.network import Network from rmgpy.reaction import Reaction -from rmgpy.pdep.collision import SingleExponentialDown +from rmgpy.species import Species, TransitionState +from rmgpy.statmech.conformer import Conformer +from rmgpy.statmech.rotation import NonlinearRotor +from rmgpy.statmech.torsion import HinderedRotor +from rmgpy.statmech.translation import IdealGasTranslation +from rmgpy.statmech.vibration import HarmonicOscillator +from rmgpy.transport import TransportData + ################################################################################ diff --git a/rmgpy/pdep/reaction.pxd b/rmgpy/pdep/reaction.pxd deleted file mode 100644 index 85e1fe8153..0000000000 --- a/rmgpy/pdep/reaction.pxd +++ /dev/null @@ -1,28 +0,0 @@ -############################################################################### -# # -# RMG - Reaction Mechanism Generator # -# # -# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), # -# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # -# # -# Permission is hereby granted, free of charge, to any person obtaining a # -# copy of this software and associated documentation files (the 'Software'), # -# to deal in the Software without restriction, including without limitation # -# the rights to use, copy, modify, merge, publish, distribute, sublicense, # -# and/or sell copies of the Software, and to permit persons to whom the # -# Software is furnished to do so, subject to the following conditions: # -# # -# The above copyright notice and this permission notice shall be included in # -# all copies or substantial portions of the Software. # -# # -# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # -# DEALINGS IN THE SOFTWARE. # -# # -############################################################################### - -cimport numpy diff --git a/rmgpy/pdep/reaction.pyx b/rmgpy/pdep/reaction.pyx index 1dd104e415..64b350b02a 100644 --- a/rmgpy/pdep/reaction.pyx +++ b/rmgpy/pdep/reaction.pyx @@ -35,24 +35,27 @@ but requires detailed information about the transition state and reactants. """ import logging -import numpy -cimport numpy + cimport cython -from libc.math cimport abs, exp, sqrt, cosh, log +import numpy as np +cimport numpy as np +from libc.math cimport abs, exp, sqrt, log cimport rmgpy.constants as constants +from rmgpy.exceptions import PressureDependenceError from rmgpy.kinetics.arrhenius cimport Arrhenius from rmgpy.statmech.schrodinger import convolve -from rmgpy.exceptions import PressureDependenceError + ################################################################################ + @cython.boundscheck(False) @cython.wraparound(False) def calculateMicrocanonicalRateCoefficient(reaction, - numpy.ndarray[numpy.float64_t,ndim=1] Elist, - numpy.ndarray[numpy.int_t,ndim=1] Jlist, - numpy.ndarray[numpy.float64_t,ndim=2] reacDensStates, - numpy.ndarray[numpy.float64_t,ndim=2] prodDensStates=None, + np.ndarray[np.float64_t,ndim=1] Elist, + np.ndarray[np.int_t,ndim=1] Jlist, + np.ndarray[np.float64_t,ndim=2] reacDensStates, + np.ndarray[np.float64_t,ndim=2] prodDensStates=None, double T=0.0): """ Calculate the microcanonical rate coefficient :math:`k(E)` for the reaction @@ -77,58 +80,59 @@ def calculateMicrocanonicalRateCoefficient(reaction, expression to determine the reverse kinetics, and in certain cases in the inverse Laplace transform method. """ - cdef int Ngrains, NJ, r, s - cdef numpy.ndarray[numpy.float64_t,ndim=2] kf, kr - cdef double C0inv + cdef int n_grains, n_j, r, s + cdef np.ndarray[np.float64_t,ndim=2] kf, kr + cdef double c0_inv cdef list modes - cdef bint reactantStatesKnown, productStatesKnown, forward - - Ngrains = Elist.shape[0] - NJ = Jlist.shape[0] - kf = numpy.zeros((Ngrains,NJ)) - kr = numpy.zeros_like(kf) - activeJRotor = Jlist is None - activeKRotor = False - - reactantStatesKnown = reacDensStates is not None and reacDensStates.any() - productStatesKnown = prodDensStates is not None and prodDensStates.any() + cdef bint reactant_states_known, product_states_known, forward + + n_grains = Elist.shape[0] + n_j = Jlist.shape[0] + kf = np.zeros((n_grains,n_j)) + kr = np.zeros_like(kf) + active_j_rotor = Jlist is None + active_k_rotor = False + + reactant_states_known = reacDensStates is not None and reacDensStates.any() + product_states_known = prodDensStates is not None and prodDensStates.any() forward = True - - C0inv = constants.R * T / 1e5 - + + c0_inv = constants.R * T / 1.0e5 + if reaction.canTST(): - - modes = reaction.transitionState.conformer.getActiveModes(activeJRotor=activeJRotor, activeKRotor=activeKRotor) - + modes = reaction.transitionState.conformer.getActiveModes(activeJRotor=active_j_rotor, + activeKRotor=active_k_rotor) + # We've been provided with molecular degree of freedom data for the # transition state, so let's use the more accurate RRKM theory logging.debug('Calculating microcanonical rate coefficient using RRKM theory for {0}...'.format(reaction)) - if reactantStatesKnown and (reaction.isIsomerization() or reaction.isDissociation()): + if reactant_states_known and (reaction.isIsomerization() or reaction.isDissociation()): kf = applyRRKMTheory(reaction.transitionState, Elist, Jlist, reacDensStates) - kf *= C0inv**(len(reaction.reactants) - 1) + kf *= c0_inv ** (len(reaction.reactants) - 1) forward = True - elif productStatesKnown and reaction.isAssociation(): + elif product_states_known and reaction.isAssociation(): kr = applyRRKMTheory(reaction.transitionState, Elist, Jlist, prodDensStates) - kr *= C0inv**(len(reaction.products) - 1) + kr *= c0_inv ** (len(reaction.products) - 1) forward = False else: - raise PressureDependenceError('Unable to compute k(E) values via RRKM theory for path reaction "{0}".'.format(reaction)) - + raise PressureDependenceError('Unable to compute k(E) values via RRKM theory for path reaction ' + '"{0}".'.format(reaction)) + elif reaction.kinetics is not None: # We've been provided with high-pressure-limit rate coefficient data, # so let's use the less accurate inverse Laplace transform method logging.debug('Calculating microcanonical rate coefficient using ILT method for {0}...'.format(reaction)) - if reactantStatesKnown: + if reactant_states_known: kinetics = reaction.kinetics if reaction.network_kinetics is None else reaction.network_kinetics kf = applyInverseLaplaceTransformMethod(reaction.transitionState, kinetics, Elist, Jlist, reacDensStates, T) forward = True - elif productStatesKnown: + elif product_states_known: kinetics = reaction.generateReverseRateCoefficient(network_kinetics=True) kr = applyInverseLaplaceTransformMethod(reaction.transitionState, kinetics, Elist, Jlist, prodDensStates, T) forward = False else: - raise PressureDependenceError('Unable to compute k(E) values via ILT method for path reaction "{0}".'.format(reaction)) - + raise PressureDependenceError('Unable to compute k(E) values via ILT method for path reaction ' + '"{0}".'.format(reaction)) else: raise PressureDependenceError('Unable to compute k(E) values for path reaction "{0}".'.format(reaction)) @@ -138,44 +142,44 @@ def calculateMicrocanonicalRateCoefficient(reaction, # which violates detailed balance # To fix, we set the forward k(E) to zero wherever this is true # (This is correct within the accuracy of discretizing the energy grains) - if reactantStatesKnown and productStatesKnown: - for r in range(Ngrains): - for s in range(NJ): - if reacDensStates[r,s] != 0 and prodDensStates[r,s] != 0: + if reactant_states_known and product_states_known: + for r in range(n_grains): + for s in range(n_j): + if reacDensStates[r, s] != 0 and prodDensStates[r, s] != 0: break - kf[r,s] = 0 - kr[r,s] = 0 - + kf[r, s] = 0 + kr[r, s] = 0 + # Get the reverse microcanonical rate coefficient if possible - if forward and productStatesKnown: + if forward and product_states_known: # We computed the forward rate coefficient above # Thus we need to compute the reverse rate coefficient here - kr = numpy.zeros_like(kf) - for s in range(NJ): - for r in range(Ngrains): - if prodDensStates[r,s] != 0: - kr[r,s] = kf[r,s] * reacDensStates[r,s] / prodDensStates[r,s] - kr *= C0inv**(len(reaction.products) - len(reaction.reactants)) - elif not forward and reactantStatesKnown: + kr = np.zeros_like(kf) + for s in range(n_j): + for r in range(n_grains): + if prodDensStates[r, s] != 0: + kr[r, s] = kf[r, s] * reacDensStates[r, s] / prodDensStates[r, s] + kr *= c0_inv ** (len(reaction.products) - len(reaction.reactants)) + elif not forward and reactant_states_known: # We computed the reverse rate coefficient above # Thus we need to compute the forward rate coefficient here - kf = numpy.zeros_like(kr) - for s in range(NJ): - for r in range(Ngrains): - if reacDensStates[r,s] != 0: - kf[r,s] = kr[r,s] * prodDensStates[r,s] / reacDensStates[r,s] - kf *= C0inv**(len(reaction.reactants) - len(reaction.products)) + kf = np.zeros_like(kr) + for s in range(n_j): + for r in range(n_grains): + if reacDensStates[r, s] != 0: + kf[r, s] = kr[r, s] * prodDensStates[r, s] / reacDensStates[r, s] + kf *= c0_inv ** (len(reaction.reactants) - len(reaction.products)) logging.debug('Finished finding microcanonical rate coefficients for path reaction {}'.format(reaction)) logging.debug('The forward and reverse rates are found to be {0} and {1} respectively.'.format(kf, kr)) - + return kf, kr @cython.boundscheck(False) @cython.wraparound(False) def applyRRKMTheory(transitionState, - numpy.ndarray[numpy.float64_t,ndim=1] Elist, - numpy.ndarray[numpy.int_t,ndim=1] Jlist, - numpy.ndarray[numpy.float64_t,ndim=2] densStates): + np.ndarray[np.float64_t,ndim=1] Elist, + np.ndarray[np.int_t,ndim=1] Jlist, + np.ndarray[np.float64_t,ndim=2] densStates): """ Calculate the microcanonical rate coefficient for a reaction using RRKM theory, where `transitionState` is the transition state of the reaction, @@ -183,62 +187,61 @@ def applyRRKMTheory(transitionState, microcanonial rate, and `densStates` is the density of states of the reactant. """ - cdef numpy.ndarray[numpy.float64_t,ndim=2] k0, k, sumStates - cdef int Ngrains, NJ - cdef bint activeJRotor - cdef double dE, E0_TS + cdef np.ndarray[np.float64_t,ndim=2] k0, k, sumStates + cdef int n_grains, n_j + cdef bint active_j_rotor + cdef double d_e, e0_ts cdef int r, s from rmgpy.pdep import Configuration - - Ngrains = Elist.shape[0] - NJ = Jlist.shape[0] - activeJRotor = (NJ == 1) - k0 = numpy.zeros((Ngrains,NJ)) - k = numpy.zeros_like(k0) - dE = Elist[1] - Elist[0] - E0_TS = transitionState.conformer.E0.value_si - + + n_grains = Elist.shape[0] + n_j = Jlist.shape[0] + active_j_rotor = (n_j == 1) + k0 = np.zeros((n_grains,n_j)) + k = np.zeros_like(k0) + d_e = Elist[1] - Elist[0] + e0_ts = transitionState.conformer.E0.value_si + conf = Configuration(transitionState) - conf.calculateDensityOfStates(Elist - Elist[0], activeJRotor=activeJRotor) - + conf.calculateDensityOfStates(Elist - Elist[0], activeJRotor=active_j_rotor) + # Compute tunneling function kappa = transitionState.calculateTunnelingFunction(Elist) - + # Convolve with transition state density of states to get new transition # state sum of states that includes tunneling conf.sumStates = convolve(conf.densStates, kappa) - conf.Elist += Elist[0] - E0_TS + conf.Elist += Elist[0] - e0_ts - E0 = None - for r in xrange(Ngrains): + e0 = None + for r in range(n_grains): if conf.sumStates[r] > 0: - E0 = conf.Elist[r] + e0 = conf.Elist[r] break - if E0 is None: + if e0 is None: raise ValueError, "Could not find a positive sum of states for {0}".format(conf) - conf.Elist -= E0 - - sumStates = conf.mapSumOfStates(Elist - E0, Jlist) - + conf.Elist -= e0 + + sumStates = conf.mapSumOfStates(Elist - e0, Jlist) + # Generate k(E) using RRKM formula (with tunneling) - dE /= constants.h * constants.Na # J/mol -> s^-1 - for s in range(NJ): - for r in range(Ngrains): - if sumStates[r,s] > 0 and densStates[r,s] > 0: - k[r,s] = sumStates[r,s] / densStates[r,s] * dE + d_e /= constants.h * constants.Na # J/mol -> s^-1 + for s in range(n_j): + for r in range(n_grains): + if sumStates[r, s] > 0 and densStates[r, s] > 0: + k[r, s] = sumStates[r, s] / densStates[r, s] * d_e logging.debug('Finished applying RRKM for path transition state {}'.format(transitionState)) logging.debug('The rate constant is found to be {}'.format(k)) - return k @cython.boundscheck(False) @cython.wraparound(False) def applyInverseLaplaceTransformMethod(transitionState, Arrhenius kinetics, - numpy.ndarray[numpy.float64_t,ndim=1] Elist, - numpy.ndarray[numpy.int_t,ndim=1] Jlist, - numpy.ndarray[numpy.float64_t,ndim=2] densStates, + np.ndarray[np.float64_t,ndim=1] Elist, + np.ndarray[np.int_t,ndim=1] Jlist, + np.ndarray[np.float64_t,ndim=2] densStates, double T=0.0): """ Calculate the microcanonical rate coefficient for a reaction using the @@ -250,84 +253,86 @@ def applyInverseLaplaceTransformMethod(transitionState, the temperature exponent of the Arrhenius expression is negative (for which the inverse transform is undefined). """ - cdef numpy.ndarray[numpy.float64_t,ndim=2] k - cdef numpy.ndarray[numpy.float64_t,ndim=1] phi0, phi - cdef int Ngrains, NJ - cdef bint activeJRotor - cdef double dE, R, A, n, Ea, m0, rem, E0, num, E, n_crit + cdef np.ndarray[np.float64_t,ndim=2] k + cdef np.ndarray[np.float64_t,ndim=1] phi0, phi + cdef int n_grains, n_j + cdef bint active_j_rotor + cdef double d_e, gas_constant, freq_factor, n, e_a, m0, rem, e0, num, energy, n_crit cdef int r, s, m - Ngrains = Elist.shape[0] - NJ = Jlist.shape[0] - k = numpy.zeros((Ngrains,NJ)) - dE = Elist[1] - Elist[0] - R = constants.R - E0 = transitionState.conformer.E0.value_si - - n = kinetics._n.value_si - A = kinetics._A.value_si / (kinetics._T0.value_si**n) - Ea = kinetics._Ea.value_si + n_grains = Elist.shape[0] + n_j = Jlist.shape[0] + k = np.zeros((n_grains,n_j)) + d_e = Elist[1] - Elist[0] + gas_constant = constants.R + e0 = transitionState.conformer.E0.value_si - if isinstance(kinetics, Arrhenius) and (T != 0.0 or (Ea >= 0 and n >= 0)): + n = kinetics.n.value_si + freq_factor = kinetics.A.value_si / (kinetics.T0.value_si ** n) + e_a = kinetics.Ea.value_si + + if isinstance(kinetics, Arrhenius) and (T != 0.0 or (e_a >= 0 and n >= 0)): # The inverse Laplace transform is not defined for Ea < 0 or n < 0 # In these cases we move the offending portion into the preexponential # at the temperature of interest # This is an approximation, but it's not worth a more robust procedure - + # Including the T^n piece explicitly also has numerical difficulties # for small positive n; it turns out that using this approximation is # actually more accurate than trying to handle the T^n piece "properly" # For now the implementation is to use this approximation for all n # below some critical value, which is purposely placed a bit above zero n_crit = 0.25 - - if Ea < 0: - A *= exp(-Ea / R / T) - Ea = 0.0 + + if e_a < 0: + freq_factor *= exp(-e_a / gas_constant / T) + e_a = 0.0 if n < n_crit: - A *= T**n + freq_factor *= T ** n n = 0.0 if n < n_crit: # Determine the microcanonical rate directly - m0, rem = divmod(Ea, dE) + m0, rem = divmod(e_a, d_e) m = int(m0) if rem == 0: - for s in range(NJ): - for r in range(m, Ngrains): - if Elist[r] > E0 and densStates[r,s] != 0: - k[r,s] = A * densStates[r-m,s] / densStates[r,s] + for s in range(n_j): + for r in range(m, n_grains): + if Elist[r] > e0 and densStates[r, s] != 0: + k[r, s] = freq_factor * densStates[r - m, s] / densStates[r, s] else: - for s in range(NJ): - for r in range(m+1, Ngrains): - if Elist[r] > E0 and densStates[r,s] != 0 and abs(densStates[r-m,s]) > 1e-12 and abs(densStates[r-m-1,s]) > 1e-12: - num = densStates[r-m,s] * (densStates[r-m-1,s] / densStates[r-m,s]) ** (-rem / (Elist[r-m-1] - Elist[r-m])) - k[r,s] = A * num / densStates[r,s] - + for s in range(n_j): + for r in range(m + 1, n_grains): + if Elist[r] > e0 and densStates[r, s] != 0 \ + and abs(densStates[r - m, s]) > 1e-12 \ + and abs(densStates[r - m - 1, s]) > 1e-12: + num = densStates[r - m, s] * (densStates[r - m - 1, s] / densStates[r - m, s]) \ + ** (-rem / (Elist[r - m - 1] - Elist[r - m])) + k[r, s] = freq_factor * num / densStates[r, s] + elif n >= n_crit: import scipy.special - # Evaluate the inverse Laplace transform of the T**n exp(-Ea/RT) piece, which only - # exists for n >= 0 - phi0 = numpy.zeros(Ngrains, numpy.float64) - for r in range(Ngrains): - E = Elist[r] - Elist[0] - Ea - if E > 1: - phi0[r] = (E/R)**(n-1.0) - phi0 = phi0 * (dE / R) / scipy.special.gamma(n) + # Evaluate the inverse Laplace transform of the T**n exp(-Ea/RT) piece, which only exists for n >= 0 + phi0 = np.zeros(n_grains, np.float64) + for r in range(n_grains): + energy = Elist[r] - Elist[0] - e_a + if energy > 1: + phi0[r] = (energy / gas_constant) ** (n - 1.0) + phi0 = phi0 * (d_e / gas_constant) / scipy.special.gamma(n) # Evaluate the convolution - for s in range(NJ): - phi = convolve(phi0, densStates[:,s]) + for s in range(n_j): + phi = convolve(phi0, densStates[:, s]) # Apply to determine the microcanonical rate - for r in range(Ngrains): - if densStates[r,s] != 0: - k[r,s] = A * phi[r] / densStates[r,s] - + for r in range(n_grains): + if densStates[r, s] != 0: + k[r, s] = freq_factor * phi[r] / densStates[r, s] + else: raise PressureDependenceError('Unable to use inverse Laplace transform method for non-Arrhenius kinetics or for n < 0.') logging.debug('Finished applying inverse lapace transform for path transition state {}'.format(transitionState)) logging.debug('The rate constant is found to be {}'.format(k)) - + return k def fitInterpolationModel(reaction, Tlist, Plist, K, model, Tmin, Tmax, Pmin, Pmax, errorCheck=False): @@ -346,7 +351,7 @@ def fitInterpolationModel(reaction, Tlist, Plist, K, model, Tmin, Tmax, Pmin, Pm deviate too much from the data; as this is not necessarily a fast process, it is optional. """ - + from rmgpy.quantity import Quantity from rmgpy.kinetics import PDepArrhenius, Chebyshev @@ -362,9 +367,9 @@ def fitInterpolationModel(reaction, Tlist, Plist, K, model, Tmin, Tmax, Pmin, Pm # Set/update the net reaction kinetics using interpolation model if model[0].lower() == 'chebyshev': - modelType, degreeT, degreeP = model + model_type, degree_t, degree_p = model chebyshev = Chebyshev() - chebyshev.fitToData(Tlist, Plist, K, kunits, degreeT, degreeP, Tmin, Tmax, Pmin, Pmax) + chebyshev.fitToData(Tlist, Plist, K, kunits, degree_t, degree_p, Tmin, Tmax, Pmin, Pmax) kinetics = chebyshev elif model[0].lower() == 'pdeparrhenius': pDepArrhenius = PDepArrhenius() @@ -375,24 +380,23 @@ def fitInterpolationModel(reaction, Tlist, Plist, K, model, Tmin, Tmax, Pmin, Pm # Set temperature and pressure ranges explicitly (as they may be different # from min(Tlist), max(Tlist), min(Plist), max(Plist)) - kinetics.Tmin = Quantity(Tmin,"K") - kinetics.Tmax = Quantity(Tmax,"K") - kinetics.Pmin = Quantity(Pmin/1e5,"bar") - kinetics.Pmax = Quantity(Pmax/1e5,"bar") + kinetics.Tmin = Quantity(Tmin, "K") + kinetics.Tmax = Quantity(Tmax, "K") + kinetics.Pmin = Quantity(Pmin / 1e5, "bar") + kinetics.Pmax = Quantity(Pmax / 1e5, "bar") # Compute log RMS error for fit if errorCheck: - logRMS = 0.0 + log_rms = 0.0 # Check that fit is within an order of magnitude at all points for t, T in enumerate(Tlist): for p, P in enumerate(Plist): - logkmodel = log(kinetics.getRateCoefficient(T, P)) - logkdata = log(K[t,p]) - logRMS += (logkmodel - logkdata) * (logkmodel - logkdata) - logRMS = sqrt(logRMS / len(Tlist) / len(Plist)) - if logRMS > 0.5: - logging.warning('RMS error for k(T,P) fit = {0:g} for reaction {1}.'.format(logRMS, reaction)) + log_k_model = log(kinetics.getRateCoefficient(T, P)) + log_k_data = log(K[t,p]) + log_rms += (log_k_model - log_k_data) * (log_k_model - log_k_data) + log_rms = sqrt(log_rms / len(Tlist) / len(Plist)) + if log_rms > 0.5: + logging.warning('RMS error for k(T,P) fit = {0:g} for reaction {1}.'.format(log_rms, reaction)) logging.debug('Finished fitting model for path reaction {}'.format(reaction)) logging.debug('The kinetics fit is {0!r}'.format(kinetics)) return kinetics - diff --git a/rmgpy/pdep/rs.pyx b/rmgpy/pdep/rs.pyx index 61823b4163..f520b257c2 100644 --- a/rmgpy/pdep/rs.pyx +++ b/rmgpy/pdep/rs.pyx @@ -30,195 +30,203 @@ Contains functionality for computing pressure-dependent phenomenological rate coefficients :math:`k(T,P)` using the reservoir state method. """ -import numpy -cimport numpy import scipy.linalg -from libc.math cimport exp, log, sqrt +import numpy as np +cimport numpy as np import rmgpy.constants as constants from rmgpy.exceptions import ReservoirStateError ################################################################################ -cpdef applyReservoirStateMethod(network): - cdef numpy.ndarray[numpy.int_t,ndim=1] Jlist - cdef numpy.ndarray[numpy.int_t,ndim=2] Nres, Nact - cdef numpy.ndarray[numpy.int_t,ndim=3] indices - cdef numpy.ndarray[numpy.float64_t,ndim=1] Elist, collFreq, E0 - cdef numpy.ndarray[numpy.float64_t,ndim=2] L, Z, X, K - cdef numpy.ndarray[numpy.float64_t,ndim=3] densStates, eqDist - cdef numpy.ndarray[numpy.float64_t,ndim=4] Kij, Gnj, Fim, pa - cdef numpy.ndarray[numpy.float64_t,ndim=5] Mcoll +cpdef applyReservoirStateMethod(network): + """A method for applying the Reservoir State approach for solving the master equation.""" + cdef np.ndarray[np.int_t,ndim=1] j_list + cdef np.ndarray[np.int_t,ndim=2] n_res, n_act + cdef np.ndarray[np.int_t,ndim=3] indices + cdef np.ndarray[np.float64_t,ndim=1] e_list + cdef np.ndarray[np.float64_t,ndim=2] active_state_mat, source_vectors, pss_active_state, k + cdef np.ndarray[np.float64_t,ndim=3] dens_states, eq_dist + cdef np.ndarray[np.float64_t,ndim=4] k_ij, g_nj, f_im, pa + cdef np.ndarray[np.float64_t,ndim=5] m_coll cdef list ind - cdef double T, P, E, tol, y, dfactor, beta - cdef int Nisom, Nreac, Nprod, Ngrains, NJ, bandwidth, halfbandwidth, width, width0 + cdef double temperature, tol, y, beta + cdef int n_isom, n_reac, n_prod, n_grains, n_j, bandwidth, halfbandwidth, width, width0 cdef int i, j, n, r, s, u, v, row, iter - T = network.T - P = network.P - Elist = network.Elist - Jlist = network.Jlist - densStates = network.densStates - collFreq = network.collFreq - Mcoll = network.Mcoll - Kij = network.Kij - Fim = network.Fim - Gnj = network.Gnj - E0 = network.E0 - Nisom = network.Nisom - Nreac = network.Nreac - Nprod = network.Nprod - Ngrains = network.Ngrains - NJ = network.NJ - - beta = 1. / (constants.R * T) # [=] mol/kJ - - K = numpy.zeros((Nisom+Nreac+Nprod, Nisom+Nreac+Nprod), numpy.float64) - pa = numpy.zeros((Nisom,Nisom+Nreac,Ngrains,NJ), numpy.float64) + temperature = network.T + e_list = network.Elist + j_list = network.Jlist + dens_states = network.densStates + m_coll = network.Mcoll + k_ij = network.Kij + f_im = network.Fim + g_nj = network.Gnj + n_isom = network.Nisom + n_reac = network.Nreac + n_prod = network.Nprod + n_grains = network.Ngrains + n_j = network.NJ + + beta = 1. / (constants.R * temperature) # [=] mol/kJ + + k = np.zeros((n_isom + n_reac + n_prod, n_isom + n_reac + n_prod), np.float64) + pa = np.zeros((n_isom, n_isom + n_reac, n_grains, n_j), np.float64) # Determine the reservoir cutoff grain for each isomer # Start by simply placing it at the lowest reactive grain - Nres = numpy.zeros((Nisom,NJ), numpy.int) - for i in range(Nisom): - for s in range(NJ): - for r in range(Ngrains): - if densStates[i,r,s] != 0 and ((Kij[:,i,r,s] > 0).any() or (Gnj[:,i,r,s] > 0).any()): + n_res = np.zeros((n_isom, n_j), np.int) + for i in range(n_isom): + for s in range(n_j): + for r in range(n_grains): + if dens_states[i, r, s] != 0 and ((k_ij[:, i, r, s] > 0).any() or (g_nj[:, i, r, s] > 0).any()): # We need at least one reservoir grain for the RS method to be successful - if r == 0 or densStates[i,r-1,s] == 0: - Nres[i,s] = r + 1 + if r == 0 or dens_states[i, r - 1, s] == 0: + n_res[i, s] = r + 1 else: - Nres[i,s] = r + n_res[i, s] = r break - Nact = Ngrains - Nres - + n_act = n_grains - n_res + # Determine equilibrium distributions - eqDist = numpy.zeros((Nisom+Nreac,Ngrains,NJ), numpy.float64) - for i in range(Nisom+Nreac): - for s in range(NJ): - eqDist[i,:,s] = densStates[i,:,s] * (2*Jlist[s]+1) * numpy.exp(-Elist * beta) + eq_dist = np.zeros((n_isom + n_reac, n_grains, n_j), np.float64) + for i in range(n_isom + n_reac): + for s in range(n_j): + eq_dist[i, :, s] = dens_states[i, :, s] * (2 * j_list[s] + 1) * np.exp(-e_list * beta) # Determine pseudo-steady state populations of active state row = 0 - indices = -numpy.ones((Nisom,Ngrains,NJ), numpy.int) - for r in range(Ngrains): - for s in range(NJ): - for i in range(Nisom): - if r >= Nres[i,s]: - indices[i,r,s] = row + indices = -np.ones((n_isom, n_grains, n_j), np.int) + for r in range(n_grains): + for s in range(n_j): + for i in range(n_isom): + if r >= n_res[i, s]: + indices[i, r, s] = row row += 1 - + # Choose the half-bandwidth using the deepest isomer well width = 0 tol = 1e-12 - for i in range(Nisom): - for s in range(NJ): - r = Nres[i,s] - if Mcoll[i,r,s,r,s] == 0: continue - ratio = numpy.abs(Mcoll[i,:,s,r,s] / Mcoll[i,r,s,r,s]) + for i in range(n_isom): + for s in range(n_j): + r = n_res[i, s] + if m_coll[i, r, s, r, s] == 0: continue + ratio = np.abs(m_coll[i, :, s, r, s] / m_coll[i, r, s, r, s]) ind = [j for j,y in enumerate(ratio) if y > tol] if len(ind) > 0: width0 = max(r - min(ind), max(ind) - r) if width0 > width: width = width0 if width == 0: - raise ReservoirStateError('Unable to determine half-bandwidth for active-state matrix; the wells may be too shallow to use the RS method.') - halfbandwidth = (width + 1) * Nisom * NJ - Nisom + raise ReservoirStateError('Unable to determine half-bandwidth for active-state matrix; ' + 'the wells may be too shallow to use the RS method.') + halfbandwidth = (width + 1) * n_isom * n_j - n_isom bandwidth = 2 * halfbandwidth + 1 - + # Populate active-state matrix and source vectors - L = numpy.zeros((bandwidth,numpy.sum(Nact)), numpy.float64) - Z = numpy.zeros((numpy.sum(Nact),Nisom+Nreac), numpy.float64) + active_state_mat = np.zeros((bandwidth, np.sum(n_act)), np.float64) + source_vectors = np.zeros((np.sum(n_act), n_isom + n_reac), np.float64) # Collisional terms - for i in range(Nisom): - for u in range(NJ): - for v in range(NJ): - for r in range(Nres[i,u], Ngrains): - for s in range(max(Nres[i,v], r-width), min(Ngrains, r+width+1)): - L[halfbandwidth + indices[i,r,u] - indices[i,s,v], indices[i,s,v]] = Mcoll[i,r,u,s,v] - Z[indices[i,r,u],i] = numpy.sum(Mcoll[i,r,u,0:Nres[i,u],v] * eqDist[i,0:Nres[i,u],v]) + for i in range(n_isom): + for u in range(n_j): + for v in range(n_j): + for r in range(n_res[i, u], n_grains): + for s in range(max(n_res[i, v], r - width), min(n_grains, r + width + 1)): + active_state_mat[halfbandwidth + indices[i, r, u] - indices[i, s, v], indices[i, s, v]] = \ + m_coll[i, r, u, s, v] + source_vectors[indices[i, r, u], i] = np.sum(m_coll[i, r, u, 0: n_res[i, u], v] * + eq_dist[i, 0: n_res[i, u], v]) # Isomerization terms - for i in range(Nisom): + for i in range(n_isom): for j in range(i): - for u in range(NJ): - for r in range(max(Nres[i,u], Nres[j,u]), Ngrains): - L[halfbandwidth + indices[j,r,u] - indices[i,r,u], indices[i,r,u]] = Kij[j,i,r,u] - L[halfbandwidth, indices[i,r,u]] -= Kij[j,i,r,u] - L[halfbandwidth + indices[i,r,u] - indices[j,r,u], indices[j,r,u]] = Kij[i,j,r,u] - L[halfbandwidth, indices[j,r,u]] -= Kij[i,j,r,u] + for u in range(n_j): + for r in range(max(n_res[i, u], n_res[j, u]), n_grains): + active_state_mat[halfbandwidth + indices[j, r, u] - indices[i, r, u], indices[i, r, u]] = \ + k_ij[j, i, r, u] + active_state_mat[halfbandwidth, indices[i, r, u]] -= k_ij[j, i, r, u] + active_state_mat[halfbandwidth + indices[i, r, u] - indices[j, r, u], indices[j, r, u]] = \ + k_ij[i, j, r, u] + active_state_mat[halfbandwidth, indices[j, r, u]] -= k_ij[i, j, r, u] # Dissociation/association terms - for i in range(Nisom): - for n in range(Nreac+Nprod): - for u in range(NJ): - for r in range(Nres[i,u], Ngrains): - L[halfbandwidth, indices[i,r,u]] -= Gnj[n,i,r,u] - for n in range(Nreac): - for u in range(NJ): - for r in range(Nres[i,u], Ngrains): - Z[indices[i,r,u], n+Nisom] = Fim[i,n,r,u] * eqDist[n+Nisom,r,u] - + for i in range(n_isom): + for n in range(n_reac + n_prod): + for u in range(n_j): + for r in range(n_res[i, u], n_grains): + active_state_mat[halfbandwidth, indices[i, r, u]] -= g_nj[n, i, r, u] + for n in range(n_reac): + for u in range(n_j): + for r in range(n_res[i, u], n_grains): + source_vectors[indices[i, r, u], n + n_isom] = f_im[i, n, r, u] * eq_dist[n + n_isom, r, u] + # Solve for pseudo-steady state populations of active state - X = scipy.linalg.solve_banded((halfbandwidth,halfbandwidth), L, -Z, overwrite_ab=True, overwrite_b=True) - for i in range(Nisom): - for u in range(NJ): - for r in range(Nres[i,u], Ngrains): - for n in range(Nisom+Nreac): - pa[i,n,r,u] = X[indices[i,r,u], n] - + pss_active_state = scipy.linalg.solve_banded((halfbandwidth, halfbandwidth), active_state_mat, -source_vectors, + overwrite_ab=True, overwrite_b=True) + for i in range(n_isom): + for u in range(n_j): + for r in range(n_res[i, u], n_grains): + for n in range(n_isom + n_reac): + pa[i, n, r, u] = pss_active_state[indices[i, r, u], n] + # Double-check to ensure that we have all positive populations if not (pa >= 0).all(): raise ReservoirStateError('A negative steady-state population was encountered.') # Put the reservoir populations into pa as well - for i in range(Nisom): - for u in range(NJ): - for r in range(Nres[i,u]): - pa[i,i,r,u] = eqDist[i,r,u] + for i in range(n_isom): + for u in range(n_j): + for r in range(n_res[i, u]): + pa[i, i, r, u] = eq_dist[i, r, u] # Determine the phenomenological rate coefficients using the general procedure # This should be exactly the same as the procedure below, which is more # specific to the RS method # Previously it was noted that this more general approach was more robust; # however, more recently it seems that this is no longer the case - #K = computeRateCoefficients(Mcoll, Kij, Fim, Gnj, pa, Nisom, Nreac, Nprod) + # k = computeRateCoefficients(m_coll, k_ij, f_im, g_nj, pa, n_isom, n_reac, n_prod) # Determine the phenomenological rate coefficients - K = numpy.zeros((Nisom+Nreac+Nprod, Nisom+Nreac+Nprod), numpy.float64) + k = np.zeros((n_isom+n_reac+n_prod, n_isom+n_reac+n_prod), np.float64) # Rows relating to isomers - for i in range(Nisom): - for u in range(NJ): - for v in range(NJ): + for i in range(n_isom): + for u in range(n_j): + for v in range(n_j): # Collisional rearrangement within the reservoir of isomer i - K[i,i] = K[i,i] + numpy.sum(numpy.dot(Mcoll[i,0:Nres[i,u],u,0:Nres[i,v],v], eqDist[i,0:Nres[i,v],v])) + k[i, i] = k[i, i] + np.sum(np.dot(m_coll[i, 0: n_res[i, u], u, 0: n_res[i, v], v], + eq_dist[i, 0: n_res[i, v], v])) # Isomerization from isomer j to isomer i - for j in range(Nisom): - K[i,j] = K[i,j] + numpy.sum(numpy.dot(Mcoll[i,0:Nres[i,u],u,Nres[i,v]:Ngrains,v], pa[i,j,Nres[i,v]:Ngrains,v])) + for j in range(n_isom): + k[i, j] = k[i, j] + np.sum(np.dot(m_coll[i, 0: n_res[i, u], u, n_res[i, v]: n_grains, v], + pa[i, j, n_res[i, v]: n_grains, v])) # Association from reactant n to isomer i - for n in range(Nisom, Nisom+Nreac): - K[i,n] = K[i,n] + numpy.sum(numpy.dot(Mcoll[i,0:Nres[i,u],u,Nres[i,v]:Ngrains,v], pa[i,n,Nres[i,v]:Ngrains,v])) + for n in range(n_isom, n_isom + n_reac): + k[i, n] = k[i, n] + np.sum(np.dot(m_coll[i, 0: n_res[i, u], u, n_res[i, v]: n_grains, v], + pa[i, n, n_res[i, v]: n_grains, v])) # Rows relating to reactants - for n in range(Nreac): + for n in range(n_reac): # Association loss - for i in range(Nisom): - K[Nisom+n,Nisom+n] = K[Nisom+n,Nisom+n] - numpy.sum(Fim[i,n,:,:] * eqDist[n+Nisom,:,:]) + for i in range(n_isom): + k[n_isom + n, n_isom + n] = k[n_isom + n, n_isom + n] - np.sum(f_im[i, n, :, :] * eq_dist[n + n_isom, :, :]) # Reaction from isomer or reactant j to reactant n - for j in range(Nisom+Nreac): - for i in range(Nisom): - for u in range(NJ): - K[Nisom+n,j] = K[Nisom+n,j] + numpy.sum(Gnj[n,i,Nres[i,u]:Ngrains,u] * pa[i,j,Nres[i,u]:Ngrains,u]) + for j in range(n_isom + n_reac): + for i in range(n_isom): + for u in range(n_j): + k[n_isom + n, j] = k[n_isom + n, j] + np.sum(g_nj[n, i, n_res[i, u]: n_grains, u] + * pa[i, j, n_res[i, u]: n_grains, u]) # Rows relating to products - for n in range(Nreac, Nreac+Nprod): + for n in range(n_reac, n_reac + n_prod): # Reaction from isomer or reactant j to product n - for j in range(Nisom+Nreac): - for i in range(Nisom): - for u in range(NJ): - K[Nisom+n,j] = K[Nisom+n,j] + numpy.sum(Gnj[n,i,Nres[i,u]:Ngrains,u] * pa[i,j,Nres[i,u]:Ngrains,u]) + for j in range(n_isom + n_reac): + for i in range(n_isom): + for u in range(n_j): + k[n_isom + n, j] = k[n_isom + n, j] + np.sum(g_nj[n, i, n_res[i, u]: n_grains, u] + * pa[i, j, n_res[i, u]: n_grains, u]) # Ensure matrix is conservative - for n in range(Nisom+Nreac): - K[n,n] = K[n,n] - numpy.sum(K[:,n]) + for n in range(n_isom+n_reac): + k[n, n] = k[n, n] - np.sum(k[:, n]) # Return the matrix of k(T,P) values and the pseudo-steady population distributions - return K, pa + return k, pa From 0c0467c1e03dece4423570a648da5900c066bdc8 Mon Sep 17 00:00:00 2001 From: Xiaorui Dong Date: Tue, 13 Aug 2019 16:28:04 -0400 Subject: [PATCH 030/155] Py3 compatibility changes on statmech module --- rmgpy/statmech/__init__.py | 14 +- rmgpy/statmech/conformerTest.py | 338 ++++++------ rmgpy/statmech/rotationTest.py | 866 +++++++++++++++--------------- rmgpy/statmech/schrodingerTest.py | 102 ++-- rmgpy/statmech/torsionTest.py | 494 ++++++++--------- rmgpy/statmech/translationTest.py | 107 ++-- rmgpy/statmech/vibrationTest.py | 184 ++++--- 7 files changed, 1077 insertions(+), 1028 deletions(-) diff --git a/rmgpy/statmech/__init__.py b/rmgpy/statmech/__init__.py index 225097fa7c..cc28a2d442 100644 --- a/rmgpy/statmech/__init__.py +++ b/rmgpy/statmech/__init__.py @@ -28,8 +28,12 @@ # # ############################################################################### -from .translation import Translation, IdealGasTranslation -from .rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor -from .vibration import Vibration, HarmonicOscillator -from .torsion import Torsion, HinderedRotor -from .conformer import Conformer +""" +initialize imports +""" + +from rmgpy.statmech.conformer import Conformer +from rmgpy.statmech.rotation import KRotor, LinearRotor, NonlinearRotor, Rotation, SphericalTopRotor +from rmgpy.statmech.torsion import HinderedRotor, Torsion +from rmgpy.statmech.translation import IdealGasTranslation, Translation +from rmgpy.statmech.vibration import HarmonicOscillator, Vibration diff --git a/rmgpy/statmech/conformerTest.py b/rmgpy/statmech/conformerTest.py index 97b546ce41..ce700055c4 100644 --- a/rmgpy/statmech/conformerTest.py +++ b/rmgpy/statmech/conformerTest.py @@ -32,235 +32,245 @@ This script contains unit tests of the :mod:`rmgpy.statmech.conformer` module. """ +from __future__ import division + import unittest -import numpy -from rmgpy.statmech import Conformer, IdealGasTranslation, NonlinearRotor, HarmonicOscillator, \ - LinearRotor, HinderedRotor +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech import Conformer, HarmonicOscillator, HinderedRotor, \ + IdealGasTranslation, LinearRotor, NonlinearRotor ################################################################################ + class TestConformer(unittest.TestCase): """ Contains unit tests of the :class:`Conformer` class. """ - + def setUp(self): """ A function run before each unit test in this class. """ self.ethylene = Conformer( - E0 = (0.0,"kJ/mol"), - modes = [ - IdealGasTranslation(mass=(28.03,"amu")), - NonlinearRotor(inertia=([3.41526,16.6498,20.065],"amu*angstrom^2"), symmetry=4), - HarmonicOscillator(frequencies=([828.397,970.652,977.223,1052.93,1233.55,1367.56,1465.09,1672.25,3098.46,3111.7,3165.79,3193.54],"cm^-1")), + E0=(0.0, "kJ/mol"), + modes=[ + IdealGasTranslation(mass=(28.03, "amu")), + NonlinearRotor(inertia=([3.41526, 16.6498, 20.065], "amu*angstrom^2"), symmetry=4), + HarmonicOscillator(frequencies=([828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, + 1672.25, 3098.46, 3111.7, 3165.79, 3193.54], "cm^-1")), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ) self.oxygen = Conformer( - E0 = (0.0,"kJ/mol"), - modes = [ - IdealGasTranslation(mass=(31.99,"amu")), - LinearRotor(inertia=(11.6056,"amu*angstrom^2"), symmetry=2), - HarmonicOscillator(frequencies=([1621.54],"cm^-1")), + E0=(0.0, "kJ/mol"), + modes=[ + IdealGasTranslation(mass=(31.99, "amu")), + LinearRotor(inertia=(11.6056, "amu*angstrom^2"), symmetry=2), + HarmonicOscillator(frequencies=([1621.54], "cm^-1")), ], - spinMultiplicity = 3, - opticalIsomers = 1, + spinMultiplicity=3, + opticalIsomers=1, ) - + # The following data is for ethane at the CBS-QB3 level - self.coordinates = numpy.array([ - [ 0.0000, 0.0000, 0.0000], - [ -0.0000, -0.0000, 1.0936], - [ 1.0430, -0.0000, -0.3288], - [ -0.4484, 0.9417, -0.3288], - [ -0.7609, -1.2051, -0.5580], - [ -0.7609, -1.2051, -1.6516], - [ -0.3125, -2.1468, -0.2292], - [ -1.8039, -1.2051, -0.2293], + self.coordinates = np.array([ + [0.0000, 0.0000, 0.0000], + [-0.0000, -0.0000, 1.0936], + [1.0430, -0.0000, -0.3288], + [-0.4484, 0.9417, -0.3288], + [-0.7609, -1.2051, -0.5580], + [-0.7609, -1.2051, -1.6516], + [-0.3125, -2.1468, -0.2292], + [-1.8039, -1.2051, -0.2293], ]) - self.number = numpy.array([6, 1, 1, 1, 6, 1, 1, 1]) - self.mass = numpy.array([12, 1.007825, 1.007825, 1.007825, 12, 1.007825, 1.007825, 1.007825]) + self.number = np.array([6, 1, 1, 1, 6, 1, 1, 1]) + self.mass = np.array([12, 1.007825, 1.007825, 1.007825, 12, 1.007825, 1.007825, 1.007825]) self.E0 = -93.5097 self.conformer = Conformer( - E0 = (self.E0,"kJ/mol"), - modes = [ - IdealGasTranslation(mass=(30.0469,"amu")), - NonlinearRotor(inertia=([6.27071,25.3832,25.3833],"amu*angstrom^2"), symmetry=6), - HarmonicOscillator(frequencies=([818.917,819.479,987.099,1206.76,1207.05,1396,1411.35,1489.73,1489.95,1492.49,1492.66,2995.36,2996.06,3040.77,3041,3065.86,3066.02],"cm^-1")), - HinderedRotor(inertia=(1.56768,"amu*angstrom^2"), symmetry=3, barrier=(2.69401,"kcal/mol"), quantum=False, semiclassical=False), + E0=(self.E0, "kJ/mol"), + modes=[ + IdealGasTranslation(mass=(30.0469, "amu")), + NonlinearRotor(inertia=([6.27071, 25.3832, 25.3833], "amu*angstrom^2"), symmetry=6), + HarmonicOscillator(frequencies=([818.917, 819.479, 987.099, 1206.76, 1207.05, 1396, 1411.35, 1489.73, + 1489.95, 1492.49, 1492.66, 2995.36, 2996.06, 3040.77, 3041, 3065.86, + 3066.02], "cm^-1")), + HinderedRotor(inertia=(1.56768, "amu*angstrom^2"), symmetry=3, + barrier=(2.69401, "kcal/mol"), quantum=False, semiclassical=False), ], - spinMultiplicity = 1, - opticalIsomers = 1, - coordinates = (self.coordinates,"angstrom"), - number = self.number, - mass = (self.mass,"amu"), + spinMultiplicity=1, + opticalIsomers=1, + coordinates=(self.coordinates, "angstrom"), + number=self.number, + mass=(self.mass, "amu"), ) - - def test_getPartitionFunction_ethylene(self): + + def test_get_partition_function_ethylene(self): """ Test the StatMech.getPartitionFunction() method for ethylene. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([4.05311e+09, 4.19728e+10, 2.82309e+12, 7.51135e+13, 1.16538e+15]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.ethylene.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([4.05311e+09, 4.19728e+10, 2.82309e+12, 7.51135e+13, 1.16538e+15]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.ethylene.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) - def test_getHeatCapacity_ethylene(self): + def test_get_heat_capacity_ethylene(self): """ Test the StatMech.getHeatCapacity() method for ethylene. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([5.11186, 7.40447, 11.1659, 13.1221, 14.1617]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.ethylene.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, 3) - - def test_getEnthalpy_ethylene(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([5.11186, 7.40447, 11.1659, 13.1221, 14.1617]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.ethylene.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, 3) + + def test_get_enthalpy_ethylene(self): """ Test the StatMech.getEnthalpy() method for ethylene. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([4.23129, 5.04826, 7.27337, 8.93167, 10.1223]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.ethylene.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEntropy_ethylene(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([4.23129, 5.04826, 7.27337, 8.93167, 10.1223]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.ethylene.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_entropy_ethylene(self): """ Test the StatMech.getEntropy() method for ethylene. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([26.3540, 29.5085, 35.9422, 40.8817, 44.8142]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.ethylene.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, 3) - - def test_getSumOfStates_ethylene(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([26.3540, 29.5085, 35.9422, 40.8817, 44.8142]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.ethylene.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, 3) + + def test_get_sum_of_states_ethylene(self): """ Test the StatMech.getSumOfStates() method for ethylene. """ - Elist = numpy.arange(0, 5000*11.96, 2*11.96) - sumStates = self.ethylene.getSumOfStates(Elist) - densStates = self.ethylene.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n])) - - def test_getDensityOfStates_ethylene(self): + e_list = np.arange(0, 5000 * 11.96, 2 * 11.96) + sum_states = self.ethylene.getSumOfStates(e_list) + dens_states = self.ethylene.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n + 1]) / sum_states[n] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n + 1]), sum_states[n])) + + def test_get_density_of_states_ethylene(self): """ Test the StatMech.getDensityOfStates() method for ethylene. """ - Elist = numpy.arange(0, 5000*11.96, 2*11.96) - densStates = self.ethylene.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.ethylene.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-1*Qexp) + e_list = np.arange(0, 5000 * 11.96, 2 * 11.96) + dens_states = self.ethylene.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.ethylene.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-1 * q_exp) - def test_getPartitionFunction_oxygen(self): + def test_get_partition_function_oxygen(self): """ Test the StatMech.getPartitionFunction() method for oxygen. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1.55584e+09, 9.38339e+09, 1.16459e+11, 5.51016e+11, 1.72794e+12]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.oxygen.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1.55584e+09, 9.38339e+09, 1.16459e+11, 5.51016e+11, 1.72794e+12]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.oxygen.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) - def test_getHeatCapacity_oxygen(self): + def test_get_heat_capacity_oxygen(self): """ Test the StatMech.getHeatCapacity() method for oxygen. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([3.52538, 3.70877, 4.14751, 4.32063, 4.39392]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.oxygen.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, 3) - - def test_getEnthalpy_oxygen(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([3.52538, 3.70877, 4.14751, 4.32063, 4.39392]) * constants.R + for temperature, Cv_exp in zip(t_list, cv_exp_list): + cv_act = self.oxygen.getHeatCapacity(temperature) + self.assertAlmostEqual(Cv_exp, cv_act, 3) + + def test_get_enthalpy_oxygen(self): """ Test the StatMech.getEnthalpy() method for oxygen. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([3.50326, 3.54432, 3.75062, 3.91623, 4.02765]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.oxygen.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEntropy_oxygen(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([3.50326, 3.54432, 3.75062, 3.91623, 4.02765]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.oxygen.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_entropy_oxygen(self): """ Test the StatMech.getEntropy() method for oxygen. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([24.6685, 26.5065, 29.2314, 30.9513, 32.2056]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.oxygen.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, 3) - - def test_getSumOfStates_oxygen(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([24.6685, 26.5065, 29.2314, 30.9513, 32.2056]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.oxygen.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, 3) + + def test_get_sum_of_states_oxygen(self): """ Test the StatMech.getSumOfStates() method for oxygen. """ - Elist = numpy.arange(0, 5000*11.96, 2*11.96) - sumStates = self.oxygen.getSumOfStates(Elist) - densStates = self.oxygen.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n])) - - def test_getDensityOfStates_oxygen(self): + e_list = np.arange(0, 5000 * 11.96, 2 * 11.96) + sum_states = self.oxygen.getSumOfStates(e_list) + dens_states = self.oxygen.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n + 1]) / sum_states[n] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n + 1]), sum_states[n])) + + def test_get_density_of_states_oxygen(self): """ Test the StatMech.getDensityOfStates() method for oxygen. """ - Elist = numpy.arange(0, 5000*11.96, 2*11.96) - densStates = self.oxygen.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.oxygen.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-1*Qexp) + e_list = np.arange(0, 5000 * 11.96, 2 * 11.96) + dens_states = self.oxygen.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.oxygen.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-1 * q_exp) - def test_getTotalMass(self): + def test_get_total_mass(self): """ Test the Conformer.getTotalMass() method. """ - self.assertAlmostEqual(self.conformer.getTotalMass()*constants.Na*1000., numpy.sum(self.mass), 6) + self.assertAlmostEqual(self.conformer.getTotalMass() * constants.Na * 1000., + np.sum(self.mass), 6) - def test_getCenterOfMass(self): + def test_get_center_of_mass(self): """ Test the Conformer.getCenterOfMass() method. """ cm = self.conformer.getCenterOfMass() - self.assertAlmostEqual(cm[0]*1e10, -0.38045, 4) - self.assertAlmostEqual(cm[1]*1e10, -0.60255, 4) - self.assertAlmostEqual(cm[2]*1e10, -0.27900, 4) + self.assertAlmostEqual(cm[0] * 1e10, -0.38045, 4) + self.assertAlmostEqual(cm[1] * 1e10, -0.60255, 4) + self.assertAlmostEqual(cm[2] * 1e10, -0.27900, 4) - def test_getMomentOfInertiaTensor(self): + def test_get_moment_of_inertia_tensor(self): """ Test the Conformer.getMomentOfInertiaTensor() method. """ - I = self.conformer.getMomentOfInertiaTensor() - self.assertAlmostEqual(I[0,0]*constants.Na*1e23, 20.65968, 4) - self.assertAlmostEqual(I[0,1]*constants.Na*1e23, -7.48115, 4) - self.assertAlmostEqual(I[0,2]*constants.Na*1e23, -3.46416, 4) - self.assertAlmostEqual(I[1,0]*constants.Na*1e23, -7.48115, 4) - self.assertAlmostEqual(I[1,1]*constants.Na*1e23, 13.53472, 4) - self.assertAlmostEqual(I[1,2]*constants.Na*1e23, -5.48630, 4) - self.assertAlmostEqual(I[2,0]*constants.Na*1e23, -3.46416, 4) - self.assertAlmostEqual(I[2,1]*constants.Na*1e23, -5.48630, 4) - self.assertAlmostEqual(I[2,2]*constants.Na*1e23, 22.84296, 4) + inertia = self.conformer.getMomentOfInertiaTensor() + self.assertAlmostEqual(inertia[0, 0] * constants.Na * 1e23, 20.65968, 4) + self.assertAlmostEqual(inertia[0, 1] * constants.Na * 1e23, -7.48115, 4) + self.assertAlmostEqual(inertia[0, 2] * constants.Na * 1e23, -3.46416, 4) + self.assertAlmostEqual(inertia[1, 0] * constants.Na * 1e23, -7.48115, 4) + self.assertAlmostEqual(inertia[1, 1] * constants.Na * 1e23, 13.53472, 4) + self.assertAlmostEqual(inertia[1, 2] * constants.Na * 1e23, -5.48630, 4) + self.assertAlmostEqual(inertia[2, 0] * constants.Na * 1e23, -3.46416, 4) + self.assertAlmostEqual(inertia[2, 1] * constants.Na * 1e23, -5.48630, 4) + self.assertAlmostEqual(inertia[2, 2] * constants.Na * 1e23, 22.84296, 4) - def test_getPrincipalMomentsOfInertia(self): + def test_get_principal_moments_of_inertia(self): """ Test the Conformer.getPrincipalMomentsOfInertia() method. """ - I, V = self.conformer.getPrincipalMomentsOfInertia() - self.assertAlmostEqual(I[0]*constants.Na*1e23, 6.27074, 4) - self.assertAlmostEqual(I[1]*constants.Na*1e23, 25.38321, 3) - self.assertAlmostEqual(I[2]*constants.Na*1e23, 25.38341, 3) - #print V + inertia, axes = self.conformer.getPrincipalMomentsOfInertia() + self.assertAlmostEqual(inertia[0] * constants.Na * 1e23, 6.27074, 4) + self.assertAlmostEqual(inertia[1] * constants.Na * 1e23, 25.38321, 3) + self.assertAlmostEqual(inertia[2] * constants.Na * 1e23, 25.38341, 3) # For some reason the axes seem to jump around (positioning and signs change) # but the absolute values should be the same as we expect expected = sorted([0.497140, @@ -272,32 +282,32 @@ def test_getPrincipalMomentsOfInertia(self): 0.364578, 0.792099, 0.489554]) - result = sorted(abs(V).flat) - for i,j in zip(expected, result): + result = sorted(abs(axes).flat) + for i, j in zip(expected, result): self.assertAlmostEqual(i, j, 4) return - def test_getInternalReducedMomentOfInertia(self): + def test_get_internal_reduced_moment_of_inertia(self): """ Test the Conformer.getInternalReducedMomentOfInertia() method. """ - I = self.conformer.getInternalReducedMomentOfInertia(pivots=[1,5], top1=[1,2,3,4]) - self.assertAlmostEqual(I*constants.Na*1e23, 1.56768, 4) + inertia = self.conformer.getInternalReducedMomentOfInertia(pivots=[1, 5], top1=[1, 2, 3, 4]) + self.assertAlmostEqual(inertia * constants.Na * 1e23, 1.56768, 4) - def test_getNumberDegreesOfFreedom(self): + def test_get_number_degrees_of_freedom(self): """ Test the Conformer.getNumberDegreesOfFreedom() method. """ - #this is for ethane: - numberDegreesOfFreedom = self.conformer.getNumberDegreesOfFreedom() - self.assertEqual(numberDegreesOfFreedom, 24) + # this is for ethane: + number_degrees_of_freedom = self.conformer.getNumberDegreesOfFreedom() + self.assertEqual(number_degrees_of_freedom, 24) - #this is for ethylene: - # It doesn't check aganist 3*Natoms, because Natoms is not declared. - numberDegreesOfFreedom = self.ethylene.getNumberDegreesOfFreedom() - self.assertEqual(numberDegreesOfFreedom, 18) + # this is for ethylene: + # It doesn't check against 3 * n_atoms, because n_atoms is not declared. + number_degrees_of_freedom = self.ethylene.getNumberDegreesOfFreedom() + self.assertEqual(number_degrees_of_freedom, 18) - #this is for CO - # It doesn't check aganist 3*Natoms, because Natoms is not declared. - numberDegreesOfFreedom = self.oxygen.getNumberDegreesOfFreedom() - self.assertEqual(numberDegreesOfFreedom, 6) + # this is for CO + # It doesn't check against 3 * n_atoms, because n_atoms is not declared. + number_degrees_of_freedom = self.oxygen.getNumberDegreesOfFreedom() + self.assertEqual(number_degrees_of_freedom, 6) diff --git a/rmgpy/statmech/rotationTest.py b/rmgpy/statmech/rotationTest.py index eb82f0638e..81363baaa3 100644 --- a/rmgpy/statmech/rotationTest.py +++ b/rmgpy/statmech/rotationTest.py @@ -32,19 +32,23 @@ This script contains unit tests of the :mod:`rmgpy.statmech.rotation` module. """ +from __future__ import division + import unittest -import numpy -from rmgpy.statmech.rotation import LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech.rotation import KRotor, LinearRotor, NonlinearRotor, SphericalTopRotor ################################################################################ + class TestLinearRotor(unittest.TestCase): """ Contains unit tests of the LinearRotor class. """ - + def setUp(self): """ A function run before each unit test in this class. @@ -53,191 +57,191 @@ def setUp(self): self.symmetry = 2 self.quantum = False self.mode = LinearRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, - quantum = self.quantum, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, + quantum=self.quantum, ) - - def test_getRotationalConstant(self): + + def test_get_rotational_constant(self): """ Test getting the LinearRotor.rotationalConstant property. """ - Bexp = 1.434692 - Bact = self.mode.rotationalConstant.value_si - self.assertAlmostEqual(Bexp, Bact, 4) - - def test_setRotationalConstant(self): + b_exp = 1.434692 + b_act = self.mode.rotationalConstant.value_si + self.assertAlmostEqual(b_exp, b_act, 4) + + def test_set_rotational_constant(self): """ Test setting the LinearRotor.rotationalConstant property. """ - B = self.mode.rotationalConstant - B.value_si *= 2 - self.mode.rotationalConstant = B - Iexp = 0.5 * self.inertia - Iact = self.mode.inertia.value_si * constants.Na * 1e23 - self.assertAlmostEqual(Iexp, Iact, 4) - - def test_getLevelEnergy(self): + rotational_constant = self.mode.rotationalConstant + rotational_constant.value_si *= 2 + self.mode.rotationalConstant = rotational_constant + i_exp = 0.5 * self.inertia + i_act = self.mode.inertia.value_si * constants.Na * 1e23 + self.assertAlmostEqual(i_exp, i_act, 4) + + def test_get_level_energy(self): """ Test the LinearRotor.getLevelEnergy() method. """ - B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. - B *= constants.Na - for J in range(0, 100): - Eexp = B * J * (J + 1) - Eact = self.mode.getLevelEnergy(J) - if J == 0: - self.assertEqual(Eact, 0) + rotational_constant = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. + rotational_constant *= constants.Na + for j in range(0, 100): + e_exp = rotational_constant * j * (j + 1) + e_act = self.mode.getLevelEnergy(j) + if j == 0: + self.assertEqual(e_act, 0) else: - self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp) - - def test_getLevelDegeneracy(self): + self.assertAlmostEqual(e_exp, e_act, delta=1e-4 * e_exp) + + def test_get_level_degeneracy(self): """ Test the LinearRotor.getLevelDegeneracy() method. """ - for J in range(0, 100): - gexp = 2 * J + 1 - gact = self.mode.getLevelDegeneracy(J) - self.assertEqual(gexp, gact) - - def test_getPartitionFunction_classical(self): + for j in range(0, 100): + g_exp = 2 * j + 1 + g_act = self.mode.getLevelDegeneracy(j) + self.assertEqual(g_exp, g_act) + + def test_get_partition_function_classical(self): """ Test the LinearRotor.getPartitionFunction() method for a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([72.6691, 121.115, 242.230, 363.346, 484.461]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([72.6691, 121.115, 242.230, 363.346, 484.461]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_quantum(self): """ Test the LinearRotor.getPartitionFunction() method for a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([72.8360, 121.282, 242.391, 363.512, 484.627]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getHeatCapacity_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([72.8360, 121.282, 242.391, 363.512, 484.627]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_heat_capacity_classical(self): """ Test the LinearRotor.getHeatCapacity() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1, 1, 1, 1, 1]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_quantum(self): """ Test the LinearRotor.getHeatCapacity() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getEnthalpy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1, 1, 1, 1, 1]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_enthalpy_classical(self): """ Test the LinearRotor.getEnthalpy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1, 1, 1, 1, 1]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1, 1, 1, 1, 1]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_quantum(self): """ Test the LinearRotor.getEnthalpy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.997705, 0.998624, 0.999312, 0.999541, 0.999656]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array( + [0.997705, 0.998624, 0.999312, 0.999541, 0.999656]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) - def test_getEntropy_classical(self): + def test_get_entropy_classical(self): """ Test the LinearRotor.getEntropy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getEntropy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_entropy_quantum(self): """ Test the LinearRotor.getEntropy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([5.28592, 5.79674, 6.48989, 6.89535, 7.18304]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) - def test_getSumOfStates_classical(self): + def test_get_sum_of_states_classical(self): """ Test the LinearRotor.getSumOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 2000*11.96, 1.0*11.96) - densStates = self.mode.getDensityOfStates(Elist) - sumStates = self.mode.getSumOfStates(Elist) - for n in range(1, len(Elist)): - self.assertAlmostEqual(numpy.sum(densStates[0:n]) / sumStates[n], 1.0, 3) + e_list = np.arange(0, 2000 * 11.96, 1.0 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + sum_states = self.mode.getSumOfStates(e_list) + for n in range(1, len(e_list)): + self.assertAlmostEqual(np.sum(dens_states[0:n]) / sum_states[n], 1.0, 3) - def test_getSumOfStates_quantum(self): + def test_get_sum_of_states_quantum(self): """ Test the LinearRotor.getSumOfStates() method using a quantum rotor. """ self.mode.quantum = True - Elist = numpy.arange(0, 4000.*11.96, 2.0*11.96) - densStates = self.mode.getDensityOfStates(Elist) - sumStates = self.mode.getSumOfStates(Elist) - for n in range(1, len(Elist)): - self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 3) + e_list = np.arange(0, 4000. * 11.96, 2.0 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + sum_states = self.mode.getSumOfStates(e_list) + for n in range(1, len(e_list)): + self.assertAlmostEqual(np.sum(dens_states[0:n + 1]) / sum_states[n], 1.0, 3) - def test_getDensityOfStates_classical(self): + def test_get_dsensity_of_states_classical(self): """ - Test the LinearRotor.getDensityOfStates() method using a classical - rotor. + Test the LinearRotor.getDensityOfStates() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,400,500]) - Elist = numpy.arange(0, 4000.*11.96, 1.0*11.96) - for T in Tlist: - densStates = self.mode.getDensityOfStates(Elist) - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + t_list = np.array([300, 400, 500]) + e_list = np.arange(0, 4000. * 11.96, 1.0 * 11.96) + for temperature in t_list: + dens_states = self.mode.getDensityOfStates(e_list) + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) - def test_getDensityOfStates_quantum(self): + def test_get_dsensity_of_states_quantum(self): """ Test the LinearRotor.getDensityOfStates() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,400,500]) - Elist = numpy.arange(0, 4000.*11.96, 2.0*11.96) - for T in Tlist: - densStates = self.mode.getDensityOfStates(Elist) - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + t_list = np.array([300, 400, 500]) + e_list = np.arange(0, 4000. * 11.96, 2.0 * 11.96) + for temperature in t_list: + dens_states = self.mode.getDensityOfStates(e_list) + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) def test_repr(self): """ @@ -250,14 +254,14 @@ def test_repr(self): self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a LinearRotor object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) @@ -265,114 +269,116 @@ def test_pickle(self): ################################################################################ + class TestNonlinearRotor(unittest.TestCase): """ Contains unit tests of the NonlinearRotor class. """ - + def setUp(self): """ A function run before each unit test in this class. """ - self.inertia = numpy.array([3.415, 16.65, 20.07]) + self.inertia = np.array([3.415, 16.65, 20.07]) self.symmetry = 4 self.quantum = False self.mode = NonlinearRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, - quantum = self.quantum, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, + quantum=self.quantum, ) - - def test_getRotationalConstant(self): + + def test_get_rotational_constant(self): """ Test getting the NonlinearRotor.rotationalConstant property. """ - Bexp = numpy.array([4.93635, 1.0125, 0.839942]) - Bact = self.mode.rotationalConstant.value_si - for B0, B in zip(Bexp, Bact): - self.assertAlmostEqual(B0, B, 4) - - def test_setRotationalConstant(self): + b_exp = np.array([4.93635, 1.0125, 0.839942]) + b_act = self.mode.rotationalConstant.value_si + for rotational_constant0, rotational_constant in zip(b_exp, b_act): + self.assertAlmostEqual(rotational_constant0, rotational_constant, 4) + + def test_set_rotational_constant(self): """ Test setting the NonlinearRotor.rotationalConstant property. """ - B = self.mode.rotationalConstant - B.value_si *= 2 - self.mode.rotationalConstant = B - Iexp = 0.5 * self.inertia - Iact = self.mode.inertia.value_si * constants.Na * 1e23 - for I0, I in zip(Iexp, Iact): - self.assertAlmostEqual(I0, I, 4) - - def test_getPartitionFunction_classical(self): + rotational_constant = self.mode.rotationalConstant + rotational_constant.value_si *= 2 + self.mode.rotationalConstant = rotational_constant + i_exp = 0.5 * self.inertia + i_act = self.mode.inertia.value_si * constants.Na * 1e23 + for inertia0, inertia in zip(i_exp, i_act): + self.assertAlmostEqual(inertia0, inertia, 4) + + def test_get_partition_function_classical(self): """ Test the NonlinearRotor.getPartitionFunction() method for a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([651.162, 1401.08, 3962.84, 7280.21, 11208.6]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getHeatCapacity_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([651.162, 1401.08, 3962.84, 7280.21, 11208.6]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_heat_capacity_classical(self): """ Test the NonlinearRotor.getHeatCapacity() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getEnthalpy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_enthalpy_classical(self): """ Test the NonlinearRotor.getEnthalpy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEntropy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_entropy_classical(self): """ Test the NonlinearRotor.getEntropy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([7.97876, 8.74500, 9.78472, 10.3929, 10.8244]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getSumOfStates_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([7.97876, 8.74500, 9.78472, 10.3929, 10.8244]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_sum_of_states_classical(self): """ Test the NonlinearRotor.getSumOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 1000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) + e_list = np.arange(0, 1000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) - def test_getDensityOfStates_classical(self): + def test_get_sensity_of_states_classical(self): """ Test the NonlinearRotor.getDensityOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 1000*11.96, 1*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 1000 * 11.96, 1 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) def test_repr(self): """ @@ -382,33 +388,34 @@ def test_repr(self): mode = None exec('mode = {0!r}'.format(self.mode)) self.assertEqual(self.mode.inertia.value.shape, mode.inertia.value.shape) - for I0, I in zip(self.mode.inertia.value, mode.inertia.value): - self.assertAlmostEqual(I0, I, 6) + for inertia_0, inertia in zip(self.mode.inertia.value, mode.inertia.value): + self.assertAlmostEqual(inertia_0, inertia, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a NonlinearRotor object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertEqual(self.mode.inertia.value.shape, mode.inertia.value.shape) - for I0, I in zip(self.mode.inertia.value, mode.inertia.value): - self.assertAlmostEqual(I0, I, 6) + for inertia_0, inertia in zip(self.mode.inertia.value, mode.inertia.value): + self.assertAlmostEqual(inertia_0, inertia, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) ################################################################################ + class TestKRotor(unittest.TestCase): """ Contains unit tests of the KRotor class. """ - + def setUp(self): """ A function run before each unit test in this class. @@ -417,189 +424,191 @@ def setUp(self): self.symmetry = 2 self.quantum = False self.mode = KRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, - quantum = self.quantum, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, + quantum=self.quantum, ) - - def test_getRotationalConstant(self): + + def test_get_rotational_constant(self): """ Test getting the KRotor.rotationalConstant property. """ - Bexp = 1.434692 - Bact = self.mode.rotationalConstant.value_si - self.assertAlmostEqual(Bexp, Bact, 4) - - def test_setRotationalConstant(self): + b_exp = 1.434692 + b_act = self.mode.rotationalConstant.value_si + self.assertAlmostEqual(b_exp, b_act, 4) + + def test_set_rotational_constant(self): """ Test setting the KRotor.rotationalConstant property. """ - B = self.mode.rotationalConstant - B.value_si *= 2 - self.mode.rotationalConstant = B - Iexp = 0.5 * self.inertia - Iact = self.mode.inertia.value_si * constants.Na * 1e23 - self.assertAlmostEqual(Iexp, Iact, 4) - - def test_getLevelEnergy(self): + rotational_constant = self.mode.rotationalConstant + rotational_constant.value_si *= 2 + self.mode.rotationalConstant = rotational_constant + i_exp = 0.5 * self.inertia + i_act = self.mode.inertia.value_si * constants.Na * 1e23 + self.assertAlmostEqual(i_exp, i_act, 4) + + def test_get_level_energy(self): """ Test the KRotor.getLevelEnergy() method. """ - B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. - B *= constants.Na - for J in range(0, 100): - Eexp = float(B * J * J) - Eact = float(self.mode.getLevelEnergy(J)) - if J == 0: - self.assertEqual(Eact, 0) + rotational_constant = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. + rotational_constant *= constants.Na + for j in range(0, 100): + e_exp = float(rotational_constant * j * j) + e_act = float(self.mode.getLevelEnergy(j)) + if j == 0: + self.assertEqual(e_act, 0) else: - self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp) - - def test_getLevelDegeneracy(self): + self.assertAlmostEqual(e_exp, e_act, delta=1e-4 * e_exp) + + def test_get_level_degeneracy(self): """ Test the KRotor.getLevelDegeneracy() method. """ - for J in range(0, 100): - gexp = 1 if J == 0 else 2 - gact = self.mode.getLevelDegeneracy(J) - self.assertEqual(gexp, gact, '{0} != {1}'.format(gact, gexp)) - - def test_getPartitionFunction_classical(self): + for j in range(0, 100): + g_exp = 1 if j == 0 else 2 + g_act = self.mode.getLevelDegeneracy(j) + self.assertEqual(g_exp, g_act, '{0} != {1}'.format(g_act, g_exp)) + + def test_get_partition_function_classical(self): """ Test the KRotor.getPartitionFunction() method for a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_quantum(self): """ Test the KRotor.getPartitionFunction() method for a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getHeatCapacity_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([10.6839, 13.7929, 19.5060, 23.8899, 27.5857]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_heat_capacity_classical(self): """ Test the KRotor.getHeatCapacity() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_quantum(self): """ Test the KRotor.getHeatCapacity() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getEnthalpy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_enthalpy_classical(self): """ Test the KRotor.getEnthalpy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_quantum(self): """ Test the KRotor.getEnthalpy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.5, 0.5, 0.5, 0.5, 0.5]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) - def test_getEntropy_classical(self): + def test_get_entropy_classical(self): """ Test the KRotor.getEntropy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getEntropy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_entropy_quantum(self): """ Test the KRotor.getEntropy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([2.86874, 3.12415, 3.47072, 3.67346, 3.81730]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) - def test_getSumOfStates_classical(self): + def test_get_sum_of_states_classical(self): """ Test the KRotor.getSumOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 1000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.75 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.3333, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n])) + e_list = np.arange(0, 1000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.75 < np.sum(dens_states[0:n + 1]) / sum_states[n] < 1.3333, + '{0} != {1}'.format(np.sum(dens_states[0:n + 1]), sum_states[n])) - def test_getSumOfStates_quantum(self): + def test_get_sum_of_states_quantum(self): """ Test the KRotor.getSumOfStates() method using a quantum rotor. """ self.mode.quantum = True - Elist = numpy.arange(0, 1000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n+1]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n+1]), sumStates[n])) + e_list = np.arange(0, 1000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n + 1]) / sum_states[n] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n + 1]), sum_states[n])) - def test_getDensityOfStates_classical(self): + def test_get_density_of_states_classical(self): """ Test the KRotor.getDensityOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 3000*11.96, 0.05*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 500 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 3000 * 11.96, 0.05 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 500 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) - def test_getDensityOfStates_quantum(self): + def test_get_density_of_states_quantum(self): """ Test the KRotor.getDensityOfStates() method using a quantum rotor. """ self.mode.quantum = True - Elist = numpy.arange(0, 4000*11.96, 2*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 500 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 4000 * 11.96, 2 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 500 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) def test_repr(self): """ @@ -612,14 +621,14 @@ def test_repr(self): self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a KRotor object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) @@ -627,11 +636,12 @@ def test_pickle(self): ################################################################################ + class TestSphericalTopRotor(unittest.TestCase): """ Contains unit tests of the SphericalTopRotor class. """ - + def setUp(self): """ A function run before each unit test in this class. @@ -640,191 +650,191 @@ def setUp(self): self.symmetry = 2 self.quantum = False self.mode = SphericalTopRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, - quantum = self.quantum, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, + quantum=self.quantum, ) - - def test_getRotationalConstant(self): + + def test_get_rotational_constant(self): """ Test getting the SphericalTopRotor.rotationalConstant property. """ - Bexp = 1.434692 - Bact = self.mode.rotationalConstant.value_si - self.assertAlmostEqual(Bexp, Bact, 4) - - def test_setRotationalConstant(self): + b_exp = 1.434692 + b_act = self.mode.rotationalConstant.value_si + self.assertAlmostEqual(b_exp, b_act, 4) + + def test_set_rotational_constant(self): """ Test setting the SphericalTopRotor.rotationalConstant property. """ - B = self.mode.rotationalConstant - B.value_si *= 2 - self.mode.rotationalConstant = B - Iexp = 0.5 * self.inertia - Iact = self.mode.inertia.value_si * constants.Na * 1e23 - self.assertAlmostEqual(Iexp, Iact, 4) - - def test_getLevelEnergy(self): + rotational_constant = self.mode.rotationalConstant + rotational_constant.value_si *= 2 + self.mode.rotationalConstant = rotational_constant + i_exp = 0.5 * self.inertia + i_act = self.mode.inertia.value_si * constants.Na * 1e23 + self.assertAlmostEqual(i_exp, i_act, 4) + + def test_get_level_energy(self): """ Test the SphericalTopRotor.getLevelEnergy() method. """ - B = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. - B *= constants.Na - for J in range(0, 100): - Eexp = B * J * (J + 1) - Eact = self.mode.getLevelEnergy(J) - if J == 0: - self.assertEqual(Eact, 0) + rotational_constant = self.mode.rotationalConstant.value_si * constants.h * constants.c * 100. + rotational_constant *= constants.Na + for j in range(0, 100): + e_exp = rotational_constant * j * (j + 1) + e_act = self.mode.getLevelEnergy(j) + if j == 0: + self.assertEqual(e_act, 0) else: - self.assertAlmostEqual(Eexp, Eact, delta=1e-4*Eexp) - - def test_getLevelDegeneracy(self): + self.assertAlmostEqual(e_exp, e_act, delta=1e-4 * e_exp) + + def test_get_level_degeneracy(self): """ Test the SphericalTopRotor.getLevelDegeneracy() method. """ - for J in range(0, 100): - gexp = (2 * J + 1)**2 - gact = self.mode.getLevelDegeneracy(J) - self.assertEqual(gexp, gact, '{0} != {1}'.format(gact, gexp)) - - def test_getPartitionFunction_classical(self): + for j in range(0, 100): + g_exp = (2 * j + 1)**2 + g_act = self.mode.getLevelDegeneracy(j) + self.assertEqual(g_exp, g_act) + + def test_get_partition_function_classical(self): """ Test the SphericalTopRotor.getPartitionFunction() method for a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1552.74, 3340.97, 9449.69, 17360.2, 26727.8]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1552.74, 3340.97, 9449.69, 17360.2, 26727.8]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_quantum(self): """ Test the SphericalTopRotor.getPartitionFunction() method for a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1555.42, 3344.42, 9454.57, 17366.2, 26734.7]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getHeatCapacity_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1555.42, 3344.42, 9454.57, 17366.2, 26734.7]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_heat_capacity_classical(self): """ Test the SphericalTopRotor.getHeatCapacity() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_quantum(self): """ Test the SphericalTopRotor.getHeatCapacity() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getEnthalpy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_enthalpy_classical(self): """ Test the SphericalTopRotor.getEnthalpy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1.5, 1.5, 1.5, 1.5, 1.5]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_quantum(self): """ Test the SphericalTopRotor.getEnthalpy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1.49828, 1.49897, 1.49948, 1.49966, 1.49974]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1.49828, 1.49897, 1.49948, 1.49966, 1.49974]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) - def test_getEntropy_classical(self): + def test_get_entropy_classical(self): """ Test the SphericalTopRotor.getEntropy() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getEntropy_quantum(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_entropy_quantum(self): """ Test the SphericalTopRotor.getEntropy() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([8.84778, 9.61402, 10.6537, 11.2619, 11.6935]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) - def test_getSumOfStates_classical(self): + def test_get_sum_of_states_classical(self): """ Test the SphericalTopRotor.getSumOfStates() method using a classical rotor. """ self.mode.quantum = False - Elist = numpy.arange(0, 2000*11.96, 1.0*11.96) - densStates = self.mode.getDensityOfStates(Elist) - sumStates = self.mode.getSumOfStates(Elist) - for n in range(20, len(Elist)): - self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 1) + e_list = np.arange(0, 2000 * 11.96, 1.0 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + sum_states = self.mode.getSumOfStates(e_list) + for n in range(20, len(e_list)): + self.assertAlmostEqual(np.sum(dens_states[0:n + 1]) / sum_states[n], 1.0, 1) - def test_getSumOfStates_quantum(self): + def test_get_sum_of_states_quantum(self): """ Test the SphericalTopRotor.getSumOfStates() method using a quantum rotor. """ self.mode.quantum = True - Elist = numpy.arange(0, 2000*11.96, 1.0*11.96) - densStates = self.mode.getDensityOfStates(Elist) - sumStates = self.mode.getSumOfStates(Elist) - for n in range(1, len(Elist)): - self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 3) + e_list = np.arange(0, 2000 * 11.96, 1.0 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + sum_states = self.mode.getSumOfStates(e_list) + for n in range(1, len(e_list)): + self.assertAlmostEqual(np.sum(dens_states[0:n + 1]) / sum_states[n], 1.0, 3) - def test_getDensityOfStates_classical(self): + def test_get_density_of_states_classical(self): """ Test the SphericalTopRotor.getDensityOfStates() method using a classical rotor. """ self.mode.quantum = False - Tlist = numpy.array([300,400,500]) - Elist = numpy.arange(0, 2000*11.96, 1.0*11.96) - for T in Tlist: - densStates = self.mode.getDensityOfStates(Elist) - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + t_list = np.array([300, 400, 500]) + e_list = np.arange(0, 2000 * 11.96, 1.0 * 11.96) + for temperature in t_list: + dens_states = self.mode.getDensityOfStates(e_list) + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) - def test_getDensityOfStates_quantum(self): + def test_get_density_of_states_quantum(self): """ Test the SphericalTopRotor.getDensityOfStates() method using a quantum rotor. """ self.mode.quantum = True - Tlist = numpy.array([300,400,500]) - Elist = numpy.arange(0, 4000*11.96, 2.0*11.96) - for T in Tlist: - densStates = self.mode.getDensityOfStates(Elist) - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + t_list = np.array([300, 400, 500]) + e_list = np.arange(0, 4000 * 11.96, 2.0 * 11.96) + for temperature in t_list: + dens_states = self.mode.getDensityOfStates(e_list) + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) def test_repr(self): """ @@ -837,14 +847,14 @@ def test_repr(self): self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a SphericalTopRotor object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units) self.assertEqual(self.mode.symmetry, mode.symmetry) diff --git a/rmgpy/statmech/schrodingerTest.py b/rmgpy/statmech/schrodingerTest.py index 64fdc594d3..2990bced58 100644 --- a/rmgpy/statmech/schrodingerTest.py +++ b/rmgpy/statmech/schrodingerTest.py @@ -29,97 +29,103 @@ ############################################################################### """ -This script contains unit tests of the :mod:`rmgpy.statmech.schrodinger` +This script contains unit tests of the :mod:`rmgpy.statmech.schrodinger` module. """ +from __future__ import division + import unittest -import numpy -from rmgpy.statmech.schrodinger import getPartitionFunction, getHeatCapacity, getEnthalpy, getEntropy, getDensityOfStates +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech.schrodinger import getDensityOfStates, getEnthalpy, getEntropy, \ + getHeatCapacity, getPartitionFunction ################################################################################ + class TestSchrodinger(unittest.TestCase): """ Contains unit tests of the various methods of the :mod:`schrodinger` module. The solution to the Schrodinger equation used for these tests is that of a linear rigid rotor with a rotational constant of 1 cm^-1. """ - + def setUp(self): """ A function run before each unit test in this class. """ self.B = 1.0 * 11.96 - self.energy = lambda J: self.B * J * (J + 1) - self.degeneracy = lambda J: 2 * J + 1 + self.energy = lambda j: self.B * j * (j + 1) + self.degeneracy = lambda j: 2 * j + 1 self.n0 = 0 - - def test_getPartitionFunction(self): + + def test_get_partition_function(self): """ Test the getPartitionFunction() method. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([208.8907, 347.9285, 695.5234, 1043.118, 1390.713]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = getPartitionFunction(T, self.energy, self.degeneracy, self.n0) - self.assertAlmostEqual(Qexp / Qact, 1.0, 4, '{0} != {1} within 4 figures'.format(Qexp, Qact)) - - def test_getHeatCapacity(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([208.8907, 347.9285, 695.5234, 1043.118, 1390.713]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = getPartitionFunction(temperature, self.energy, self.degeneracy, self.n0) + self.assertAlmostEqual(q_exp / q_act, 1.0, 4) + + def test_get_heat_capacity(self): """ Test the getHeatCapacity() method. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1, 1, 1, 1, 1]) - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = getHeatCapacity(T, self.energy, self.degeneracy, self.n0) - self.assertAlmostEqual(Cvexp / Cvact, 1.0, 4, '{0} != {1} within 4 figures'.format(Cvexp, Cvact)) - - def test_getEnthalpy(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1, 1, 1, 1, 1]) + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = getHeatCapacity(temperature, self.energy, self.degeneracy, self.n0) + self.assertAlmostEqual(cv_exp / cv_act, 1.0, 4) + + def test_get_enthalpy(self): """ Test the getEnthalpy() method. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.9984012, 0.9990409, 0.9995205, 0.9996803, 0.9997603]) - for T, Hexp in zip(Tlist, Hexplist): - Hact = getEnthalpy(T, self.energy, self.degeneracy, self.n0) - self.assertAlmostEqual(Hexp / Hact, 1.0, 4, '{0} != {1} within 4 figures'.format(Hexp, Hact)) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.9984012, 0.9990409, 0.9995205, 0.9996803, 0.9997603]) + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = getEnthalpy(temperature, self.energy, self.degeneracy, self.n0) + self.assertAlmostEqual(h_exp / h_act, 1.0, 4) - def test_getEntropy(self): + def test_get_entropy(self): """ Test the getEntropy() method. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([6.340212, 6.851038, 7.544185, 7.949650, 8.237332]) - for T, Sexp in zip(Tlist, Sexplist): - Sact = getEntropy(T, self.energy, self.degeneracy, self.n0) - self.assertAlmostEqual(Sexp / Sact, 1.0, 4, '{0} != {1} within 4 figures'.format(Sexp, Sact)) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([6.340212, 6.851038, 7.544185, 7.949650, 8.237332]) + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = getEntropy(temperature, self.energy, self.degeneracy, self.n0) + self.assertAlmostEqual(s_exp / s_act, 1.0, 4) -# def test_getSumOfStates(self): +# def test_get_sum_of_states(self): # """ # Test the getSumOfStates() method. # """ -# Elist = numpy.arange(0, 10., 0.01) -# densStates = getDensityOfStates(Elist, self.energy, self.degeneracy, self.n0) -# sumStates = getSumOfStates(Elist, self.energy, self.degeneracy, self.n0) -# for n in range(1, len(Elist)): -# self.assertAlmostEqual(numpy.sum(densStates[0:n+1]) / sumStates[n], 1.0, 3) +# e_list = np.arange(0, 10., 0.01) +# dens_states = getDensityOfStates(e_list, self.energy, self.degeneracy, self.n0) +# sum_states = getSumOfStates(e_list, self.energy, self.degeneracy, self.n0) +# for n in range(1, len(e_list)): +# self.assertAlmostEqual(np.sum(dens_states[0:n + 1]) / sum_states[n], 1.0, 3) - def test_getDensityOfStates(self): + def test_get_density_of_states(self): """ Test the getDensityOfStates() method. """ - Tlist = numpy.array([300,400,500,600]) - Elist = numpy.arange(0, 40000., 20.) - for T in Tlist: - densStates = getDensityOfStates(Elist, self.energy, self.degeneracy, self.n0) - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = getPartitionFunction(T, self.energy, self.degeneracy, self.n0) - self.assertAlmostEqual(Qexp / Qact, 1.0, 2, '{0} != {1} within 2 figures'.format(Qexp, Qact)) - + t_list = np.array([300, 400, 500, 600]) + e_list = np.arange(0, 40000., 20.) + for temperature in t_list: + dens_states = getDensityOfStates(e_list, self.energy, self.degeneracy, self.n0) + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = getPartitionFunction(temperature, self.energy, self.degeneracy, self.n0) + self.assertAlmostEqual(q_exp / q_act, 1.0, 2) + ################################################################################ + if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/rmgpy/statmech/torsionTest.py b/rmgpy/statmech/torsionTest.py index 8d6315e8e3..b4f54321ea 100644 --- a/rmgpy/statmech/torsionTest.py +++ b/rmgpy/statmech/torsionTest.py @@ -32,426 +32,433 @@ This script contains unit tests of the :mod:`rmgpy.statmech.torsion` module. """ +from __future__ import division + import unittest -import math -import numpy +import numpy as np -from rmgpy.statmech.torsion import HinderedRotor, FreeRotor import rmgpy.constants as constants +from rmgpy.statmech.torsion import FreeRotor, HinderedRotor ################################################################################ + class TestHinderedRotor(unittest.TestCase): """ Contains unit tests of the HinderedRotor class. """ - + def setUp(self): """ A function run before each unit test in this class. """ self.inertia = 1.56764 self.symmetry = 3 - self.barrier = 11.373 + self.barrier = 11.373 self.quantum = True self.mode = HinderedRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, - barrier = (self.barrier,"kJ/mol"), - fourier = ([ [4.58375, 0.841648, -5702.71, 6.02657, 4.7446], [0.726951, -0.677255, 0.207032, 0.553307, -0.503303] ],"J/mol"), - quantum = self.quantum, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, + barrier=(self.barrier, "kJ/mol"), + fourier=([[4.58375, 0.841648, -5702.71, 6.02657, 4.7446], + [0.726951, -0.677255, 0.207032, 0.553307, -0.503303]], "J/mol"), + quantum=self.quantum, ) self.freemode = FreeRotor( - inertia = (self.inertia,"amu*angstrom^2"), - symmetry = self.symmetry, + inertia=(self.inertia, "amu*angstrom^2"), + symmetry=self.symmetry, ) - - def test_getRotationalConstant(self): + + def test_get_rotational_constant(self): """ Test getting the HinderedRotor.rotationalConstant property. """ - Bexp = 10.7535 - Bact = self.mode.rotationalConstant.value_si - self.assertAlmostEqual(Bexp, Bact, 4) - Bact2 = self.freemode.rotationalConstant.value_si - self.assertAlmostEqual(Bexp,Bact2,4) - - def test_setRotationalConstant(self): + b_exp = 10.7535 + b_act = self.mode.rotationalConstant.value_si + self.assertAlmostEqual(b_exp, b_act, 4) + b_act2 = self.freemode.rotationalConstant.value_si + self.assertAlmostEqual(b_exp, b_act2, 4) + + def test_set_rotational_constant(self): """ Test setting the HinderedRotor.rotationalConstant property. """ - B = self.mode.rotationalConstant - B.value_si *= 2 - self.mode.rotationalConstant = B - self.freemode.rotationalConstant = B - Iexp = 0.5 * self.inertia - Iact = self.mode.inertia.value_si * constants.Na * 1e23 - Iact2 = self.freemode.inertia.value_si * constants.Na * 1e23 - self.assertAlmostEqual(Iexp, Iact, 4) - self.assertAlmostEqual(Iexp, Iact2, 4) - - def test_getPotential_cosine(self): + rotational_constant = self.mode.rotationalConstant + rotational_constant.value_si *= 2 + self.mode.rotationalConstant = rotational_constant + self.freemode.rotationalConstant = rotational_constant + i_exp = 0.5 * self.inertia + i_act = self.mode.inertia.value_si * constants.Na * 1e23 + i_act2 = self.freemode.inertia.value_si * constants.Na * 1e23 + self.assertAlmostEqual(i_exp, i_act, 4) + self.assertAlmostEqual(i_exp, i_act2, 4) + + def test_get_potential_cosine(self): """ Test the HinderedRotor.getPotential() method for a cosine potential. """ self.mode.fourier = None - phi = numpy.arange(0.0, 2 * constants.pi + 0.0001, constants.pi / 24.) - V = numpy.zeros_like(phi) + phi = np.arange(0.0, 2 * constants.pi + 0.0001, constants.pi / 24.) + potential = np.zeros_like(phi) for i in range(phi.shape[0]): - V[i] = self.mode.getPotential(phi[i]) - - def test_getPotential_fourier(self): + potential[i] = self.mode.getPotential(phi[i]) + + def test_get_potential_fourier(self): """ Test the HinderedRotor.getPotential() method for a Fourier series potential. """ - phi = numpy.arange(0.0, 2 * constants.pi + 0.0001, constants.pi / 24.) - V = numpy.zeros_like(phi) + phi = np.arange(0.0, 2 * constants.pi + 0.0001, constants.pi / 24.) + potential = np.zeros_like(phi) for i in range(phi.shape[0]): - V[i] = self.mode.getPotential(phi[i]) - - def test_getPartitionFunction_free(self): + potential[i] = self.mode.getPotential(phi[i]) + + def test_get_partition_function_free(self): """ - Test the FreeRotor.getPartitionFunction() method + Test the FreeRotor.getPartitionFunction() method """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.sqrt(8*numpy.pi**3*constants.kB*Tlist*self.freemode.inertia.value_si)/(self.symmetry*constants.h) - for T, Qexp in zip(Tlist,Qexplist): - Qact = self.freemode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp,Qact,delta=1e-4*Qexp) - - def test_getPartitionFunction_classical_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.sqrt(8 * np.pi**3 * constants.kB * t_list + * self.freemode.inertia.value_si) / (self.symmetry * constants.h) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.freemode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_classical_cosine(self): """ Test the HinderedRotor.getPartitionFunction() method for a cosine potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([0.741953, 1.30465, 2.68553, 3.88146, 4.91235]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_classical_fourier(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([0.741953, 1.30465, 2.68553, 3.88146, 4.91235]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_classical_fourier(self): """ Test the HinderedRotor.getPartitionFunction() method for a Fourier series potential in the classical limit. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([0.745526, 1.30751, 2.68722, 3.88258, 4.91315]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_quantum_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([0.745526, 1.30751, 2.68722, 3.88258, 4.91315]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_quantum_cosine(self): """ Test the HinderedRotor.getPartitionFunction() method for a cosine potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1.39947, 1.94793, 3.30171, 4.45856, 5.45188]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getPartitionFunction_quantum_fourier(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1.39947, 1.94793, 3.30171, 4.45856, 5.45188]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_partition_function_quantum_fourier(self): """ Test the HinderedRotor.getPartitionFunction() method for a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1.39364, 1.94182, 3.29509, 4.45205, 5.44563]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=5e-4*Qexp) - - def test_getHeatCapacity_free(self): - """ - Test the FreeRotor.getHeatCapacity() method - """ - Cvexp = constants.R/2.0 - Tlist = numpy.array([300,500,1000,1500,2000]) - for T in Tlist: - Cvact = self.freemode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp,Cvact,delta=1e-4*Cvexp) - - def test_getHeatCapacity_classical_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1.39364, 1.94182, 3.29509, 4.45205, 5.44563]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=5e-4 * q_exp) + + def test_get_heat_capacity_free(self): + """ + Test the FreeRotor.getHeatCapacity() method + """ + cv_exp = constants.R/2.0 + t_list = np.array([300, 500, 1000, 1500, 2000]) + for temperature in t_list: + cv_act = self.freemode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_classical_cosine(self): """ Test the HinderedRotor.getHeatCapacity() method using a cosine potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.01741, 0.951141, 0.681919, 0.589263, 0.552028]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_classical_fourier(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.01741, 0.951141, 0.681919, 0.589263, 0.552028]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_classical_fourier(self): """ Test the HinderedRotor.getHeatCapacity() method using a Fourier series potential in the classical limit. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.17682, 1.01369, 0.698588, 0.596797, 0.556293]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_quantum_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.17682, 1.01369, 0.698588, 0.596797, 0.556293]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_quantum_cosine(self): """ Test the HinderedRotor.getHeatCapacity() method using a cosine potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.01271, 0.945341, 0.684451, 0.591949, 0.554087]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getHeatCapacity_quantum_fourier(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.01271, 0.945341, 0.684451, 0.591949, 0.554087]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_heat_capacity_quantum_fourier(self): """ Test the HinderedRotor.getHeatCapacity() method using a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([1.01263, 0.946618, 0.685345, 0.592427, 0.554374]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-3*Cvexp) - - def test_getEnthalpy_free(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([1.01263, 0.946618, 0.685345, 0.592427, 0.554374]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-3 * cv_exp) + + def test_get_enthalpy_free(self): """ Test the FreeRotor.getEnthalpy() method """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = constants.R*Tlist/2.0 - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.freemode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_classical_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = constants.R * t_list / 2.0 + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.freemode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_classical_cosine(self): """ Test the HinderedRotor.getEnthalpy() method using a cosine potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1.09556, 1.09949, 0.962738, 0.854617, 0.784333]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_classical_fourier(self): - """ - Test the HinderedRotor.getEnthalpy() method using a Fourier series + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1.09556, 1.09949, 0.962738, 0.854617, 0.784333]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_classical_fourier(self): + """ + Test the HinderedRotor.getEnthalpy() method using a Fourier series potential in the classical limit. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([1.08882, 1.09584, 0.961543, 0.854054, 0.784009]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([1.08882, 1.09584, 0.961543, 0.854054, 0.784009]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) - def test_getEnthalpy_quantum_cosine(self): + def test_get_enthalpy_quantum_cosine(self): """ Test the HinderedRotor.getEnthalpy() method using a cosine potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.545814, 0.727200, 0.760918, 0.717496, 0.680767]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEnthalpy_quantum_fourier(self): - """ - Test the HinderedRotor.getEnthalpy() method using a Fourier series + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.545814, 0.727200, 0.760918, 0.717496, 0.680767]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_enthalpy_quantum_fourier(self): + """ + Test the HinderedRotor.getEnthalpy() method using a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.548251, 0.728974, 0.762396, 0.718702, 0.681764]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-3*Hexp) - - def test_getEntropy_free(self): - Tlist = numpy.array([300,500,1000,1500,2000]) - Q = numpy.array([self.freemode.getPartitionFunction(T) for T in Tlist]) - Sexplist = constants.R*(numpy.log(Q)+.5) - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.freemode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getEntropy_classical_cosine(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.548251, 0.728974, 0.762396, 0.718702, 0.681764]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-3 * h_exp) + + def test_get_entropy_free(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + pf = np.array([self.freemode.getPartitionFunction(temperature) for temperature in t_list]) + s_exp_list = constants.R * (np.log(pf) + .5) + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.freemode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_entropy_classical_cosine(self): """ Test the HinderedRotor.getEntropy() method using a cosine potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([0.797089, 1.36543, 1.95062, 2.21083, 2.37608]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([0.797089, 1.36543, 1.95062, 2.21083, 2.37608]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) - def test_getEntropy_classical_fourier(self): + def test_get_entropy_classical_fourier(self): """ - Test the HinderedRotor.getEntropy() method using a Fourier series + Test the HinderedRotor.getEntropy() method using a Fourier series potential in the classical limit. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([0.795154, 1.36396, 1.95005, 2.21055, 2.37592]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([0.795154, 1.36396, 1.95005, 2.21055, 2.37592]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) - def test_getEntropy_quantum_cosine(self): + def test_get_entropy_quantum_cosine(self): """ Test the HinderedRotor.getEntropy() method using a cosine potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([0.881906, 1.39397, 1.95536, 2.21232, 2.37673]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getEntropy_quantum_fourier(self): - """ - Test the HinderedRotor.getEntropy() method using a Fourier series + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([0.881906, 1.39397, 1.95536, 2.21232, 2.37673]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_entropy_quantum_fourier(self): + """ + Test the HinderedRotor.getEntropy() method using a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([0.880170, 1.39260, 1.95483, 2.21207, 2.37658]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-3*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([0.880170, 1.39260, 1.95483, 2.21207, 2.37658]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-3 * s_exp) - def test_getSumOfStates_classical_cosine(self): + def test_get_sum_of_states_classical_cosine(self): """ Test the HinderedRotor.getSumOfStates() method using a cosine potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n - 1] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) - def test_getSumOfStates_classical_fourier(self): + def test_get_sum_of_states_classical_fourier(self): """ Test the HinderedRotor.getSumOfStates() method using a Fourier series potential in the classical limit. """ self.mode.quantum = False - Elist = numpy.arange(0, 10000*11.96, 1*11.96) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) try: - sumStates = self.mode.getSumOfStates(Elist) + sum_states = self.mode.getSumOfStates(e_list) self.fail('NotImplementedError not raised by HinderedRotor.getSumOfStates()') except NotImplementedError: pass - - def test_getSumOfStates_quantum_cosine(self): + + def test_get_sum_of_states_quantum_cosine(self): """ Test the HinderedRotor.getSumOfStates() method using a cosine potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n - 1] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) - def test_getSumOfStates_quantum_fourier(self): + def test_get_sum_of_states_quantum_fourier(self): """ Test the HinderedRotor.getSumOfStates() method using a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) - - def test_getDensityOfStates_classical_cosine(self): + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n - 1] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) + + def test_get_density_of_states_classical_cosine(self): """ Test the HinderedRotor.getDensityOfStates() method using a classical potential in the classical limit. """ self.mode.quantum = False self.mode.fourier = None - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) - def test_getDensityOfStates_classical_fourier(self): + def test_get_density_of_states_classical_fourier(self): """ - Test the HinderedRotor.getDensityOfStates() method using a Fourier + Test the HinderedRotor.getDensityOfStates() method using a Fourier series potential in the classical limit. """ self.mode.quantum = False - Elist = numpy.arange(0, 10000*11.96, 1*11.96) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) try: - densStates = self.mode.getDensityOfStates(Elist) + dens_states = self.mode.getDensityOfStates(e_list) self.fail('NotImplementedError not raised by HinderedRotor.getDensityOfStates()') except NotImplementedError: pass - - def test_getDensityOfStates_quantum_cosine(self): + + def test_get_density_of_states_quantum_cosine(self): """ Test the HinderedRotor.getDensityOfStates() method using a classical potential in the quantum limit. """ self.mode.quantum = True self.mode.fourier = None - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) - def test_getDensityOfStates_quantum_fourier(self): + def test_get_density_of_states_quantum_fourier(self): """ - Test the HinderedRotor.getDensityOfStates() method using a Fourier + Test the HinderedRotor.getDensityOfStates() method using a Fourier series potential in the quantum limit. """ self.mode.quantum = True - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-2*Qexp) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-2 * q_exp) def test_repr(self): """ @@ -470,14 +477,14 @@ def test_repr(self): self.assertEqual(self.mode.barrier.units, mode.barrier.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a HinderedRotor object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertAlmostEqual(self.mode.inertia.value, mode.inertia.value, 6) self.assertEqual(self.mode.inertia.units, mode.inertia.units, 6) self.assertEqual(self.mode.fourier.value.shape, mode.fourier.value.shape) @@ -488,8 +495,9 @@ def test_pickle(self): self.assertEqual(self.mode.barrier.units, mode.barrier.units) self.assertEqual(self.mode.symmetry, mode.symmetry) self.assertEqual(self.mode.quantum, mode.quantum) - + ################################################################################ + if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/rmgpy/statmech/translationTest.py b/rmgpy/statmech/translationTest.py index 28788b57bb..03aca886fb 100644 --- a/rmgpy/statmech/translationTest.py +++ b/rmgpy/statmech/translationTest.py @@ -32,19 +32,23 @@ This script contains unit tests of the :mod:`rmgpy.statmech.translation` module. """ +from __future__ import division + import unittest -import numpy -from rmgpy.statmech.translation import IdealGasTranslation +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech.translation import IdealGasTranslation ################################################################################ + class TestIdealGasTranslation(unittest.TestCase): """ Contains unit tests of the IdealGasTranslation class. """ - + def setUp(self): """ A function run before each unit test in this class. @@ -52,76 +56,77 @@ def setUp(self): self.mass = 32.0 self.quantum = False self.mode = IdealGasTranslation( - mass = (self.mass,"amu"), - quantum = self.quantum, + mass=(self.mass, "amu"), + quantum=self.quantum, ) - - def test_getPartitionFunction_classical(self): + + def test_get_partition_function_classical(self): """ Test the IdealGasTranslation.getPartitionFunction() method for a classical translator. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([7.22597e+06, 2.59130e+07, 1.46586e+08, 4.03944e+08, 8.29217e+08]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - - def test_getHeatCapacity_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([7.22597e+06, 2.59130e+07, 1.46586e+08, 4.03944e+08, 8.29217e+08]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + + def test_get_heat_capacity_classical(self): """ Test the IdealGasTranslation.getHeatCapacity() method using a classical translator. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([2.5, 2.5, 2.5, 2.5, 2.5]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - - def test_getEnthalpy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([2.5, 2.5, 2.5, 2.5, 2.5]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + + def test_get_enthalpy_classical(self): """ Test the IdealGasTranslation.getEnthalpy() method using a classical translator. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([2.5, 2.5, 2.5, 2.5, 2.5]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - - def test_getEntropy_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([2.5, 2.5, 2.5, 2.5, 2.5]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + + def test_get_entropy_classical(self): """ Test the IdealGasTranslation.getEntropy() method using a classical translator. """ - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([18.2932, 19.5703, 21.3031, 22.3168, 23.0360]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) - - def test_getSumOfStates_classical(self): + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([18.2932, 19.5703, 21.3031, 22.3168, 23.0360]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) + + def test_get_sum_of_states_classical(self): """ Test the IdealGasTranslation.getSumOfStates() method using a classical translator. """ - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n - 1] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) - def test_getDensityOfStates_classical(self): + def test_get_density_of_states_classical(self): """ - Test the IdealGasTranslation.getDensityOfStates() method using a + Test the IdealGasTranslation.getDensityOfStates() method using a classical translator. """ - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-6*Qexp) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-6 * q_exp) def test_repr(self): """ @@ -133,14 +138,14 @@ def test_repr(self): self.assertAlmostEqual(self.mode.mass.value, mode.mass.value, 6) self.assertEqual(self.mode.mass.units, mode.mass.units) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a IdealGasTranslation object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertAlmostEqual(self.mode.mass.value, mode.mass.value, 6) self.assertEqual(self.mode.mass.units, mode.mass.units) self.assertEqual(self.mode.quantum, mode.quantum) diff --git a/rmgpy/statmech/vibrationTest.py b/rmgpy/statmech/vibrationTest.py index ad34191491..53fbd3de9b 100644 --- a/rmgpy/statmech/vibrationTest.py +++ b/rmgpy/statmech/vibrationTest.py @@ -32,102 +32,105 @@ This script contains unit tests of the :mod:`rmgpy.statmech.vibration` module. """ +from __future__ import division + import unittest -import math -import numpy -from rmgpy.statmech.vibration import HarmonicOscillator +import numpy as np + import rmgpy.constants as constants +from rmgpy.statmech.vibration import HarmonicOscillator ################################################################################ + class TestHarmonicOscillator(unittest.TestCase): """ Contains unit tests of the HarmonicOscillator class. """ - + def setUp(self): """ A function run before each unit test in this class. """ - self.frequencies = numpy.array([500, 1000, 2000]) + self.frequencies = np.array([500, 1000, 2000]) self.quantum = True self.mode = HarmonicOscillator( - frequencies = (self.frequencies,"cm^-1"), - quantum = self.quantum, + frequencies=(self.frequencies, 'cm^-1'), + quantum=self.quantum, ) - + def test_getPartitionFunction_classical(self): """ Test the HarmonicOscillator.getPartitionFunction() method for a set of classical oscillators. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([0.00906536, 0.04196925, 0.335754, 1.13316978, 2.68603]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([0.00906536, 0.04196925, 0.335754, 1.13316978, 2.68603]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + def test_getPartitionFunction_quantum(self): """ Test the HarmonicOscillator.getPartitionFunction() method for a set of quantum oscillators. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Qexplist = numpy.array([1.10923, 1.39358, 2.70819, 4.98825, 8.459780]) - for T, Qexp in zip(Tlist, Qexplist): - Qact = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + q_exp_list = np.array([1.10923, 1.39358, 2.70819, 4.98825, 8.459780]) + for temperature, q_exp in zip(t_list, q_exp_list): + q_act = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) + def test_getHeatCapacity_classical(self): """ Test the HarmonicOscillator.getHeatCapacity() method using a set of classical oscillators. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([3, 3, 3, 3, 3]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([3, 3, 3, 3, 3]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + def test_getHeatCapacity_quantum(self): """ Test the HarmonicOscillator.getHeatCapacity() method using a set of quantum oscillators. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Cvexplist = numpy.array([0.832004, 1.47271, 2.32513, 2.65024, 2.79124]) * constants.R - for T, Cvexp in zip(Tlist, Cvexplist): - Cvact = self.mode.getHeatCapacity(T) - self.assertAlmostEqual(Cvexp, Cvact, delta=1e-4*Cvexp) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + cv_exp_list = np.array([0.832004, 1.47271, 2.32513, 2.65024, 2.79124]) * constants.R + for temperature, cv_exp in zip(t_list, cv_exp_list): + cv_act = self.mode.getHeatCapacity(temperature) + self.assertAlmostEqual(cv_exp, cv_act, delta=1e-4 * cv_exp) + def test_getEnthalpy_classical(self): """ Test the HarmonicOscillator.getEnthalpy() method using a set of classical oscillators. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([3, 3, 3, 3, 3]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([3, 3, 3, 3, 3]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) + def test_getEnthalpy_quantum(self): """ Test the HarmonicOscillator.getEnthalpy() method using a set of quantum oscillators. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Hexplist = numpy.array([0.280395, 0.637310, 1.30209, 1.70542, 1.96142]) * constants.R * Tlist - for T, Hexp in zip(Tlist, Hexplist): - Hact = self.mode.getEnthalpy(T) - self.assertAlmostEqual(Hexp, Hact, delta=1e-4*Hexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + h_exp_list = np.array([0.280395, 0.637310, 1.30209, 1.70542, 1.96142]) * constants.R * t_list + for temperature, h_exp in zip(t_list, h_exp_list): + h_act = self.mode.getEnthalpy(temperature) + self.assertAlmostEqual(h_exp, h_act, delta=1e-4 * h_exp) def test_getEntropy_classical(self): """ @@ -135,80 +138,82 @@ def test_getEntropy_classical(self): classical oscillators. """ self.mode.quantum = False - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([-1.70329, -0.170818, 1.90862, 3.12502, 3.98807]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*abs(Sexp)) - + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([-1.70329, -0.170818, 1.90862, 3.12502, 3.98807]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * abs(s_exp)) + def test_getEntropy_quantum(self): """ Test the HarmonicOscillator.getEntropy() method using a set of quantum oscillators. """ self.mode.quantum = True - Tlist = numpy.array([300,500,1000,1500,2000]) - Sexplist = numpy.array([0.384065, 0.969182, 2.29837, 3.31251, 4.09675]) * constants.R - for T, Sexp in zip(Tlist, Sexplist): - Sact = self.mode.getEntropy(T) - self.assertAlmostEqual(Sexp, Sact, delta=1e-4*Sexp) + t_list = np.array([300, 500, 1000, 1500, 2000]) + s_exp_list = np.array([0.384065, 0.969182, 2.29837, 3.31251, 4.09675]) * constants.R + for temperature, s_exp in zip(t_list, s_exp_list): + s_act = self.mode.getEntropy(temperature) + self.assertAlmostEqual(s_exp, s_act, delta=1e-4 * s_exp) def test_getSumOfStates_classical(self): """ - Test the HarmonicOscillator.getSumOfStates() method using a set of + Test the HarmonicOscillator.getSumOfStates() method using a set of classical oscillators. """ self.mode.quantum = False - self.mode.frequencies = ([500, 1000],"cm^-1") - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(10, len(Elist)): - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) - + self.mode.frequencies = ([500, 1000], 'cm^-1') + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(10, len(e_list)): + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) + def test_getSumOfStates_quantum(self): """ - Test the HarmonicOscillator.getSumOfStates() method using a set of + Test the HarmonicOscillator.getSumOfStates() method using a set of quantum oscillators. """ self.mode.quantum = True - Elist = numpy.arange(0, 10000*11.96, 1*11.96) - sumStates = self.mode.getSumOfStates(Elist) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(1, len(Elist)): - if sumStates[n-1] == 0: - self.assertTrue(numpy.sum(densStates[0:n]) == 0, '{0} != {1}'.format(numpy.sum(densStates[0:n]), 0)) + e_list = np.arange(0, 10000 * 11.96, 1 * 11.96) + sum_states = self.mode.getSumOfStates(e_list) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(1, len(e_list)): + if sum_states[n - 1] == 0: + self.assertEqual(np.sum(dens_states[0:n]), 0) else: - self.assertTrue(0.8 < numpy.sum(densStates[0:n]) / sumStates[n-1] < 1.25, '{0} != {1}'.format(numpy.sum(densStates[0:n]), sumStates[n])) + self.assertTrue(0.8 < np.sum(dens_states[0:n]) / sum_states[n - 1] < 1.25, + '{0} != {1}'.format(np.sum(dens_states[0:n]), sum_states[n])) def test_getDensityOfStates_classical(self): """ - Test the HarmonicOscillator.getDensityOfStates() method using a set of + Test the HarmonicOscillator.getDensityOfStates() method using a set of classical oscillators. """ self.mode.quantum = False - factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol - Elist = numpy.arange(0, 10000*factor, 1*factor) - densStates = self.mode.getDensityOfStates(Elist) - T = 100 - Qact = numpy.sum(densStates * numpy.exp(-Elist / constants.R / T)) - Qexp = self.mode.getPartitionFunction(T) - self.assertAlmostEqual(Qexp, Qact, delta=1e-4*Qexp) + factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol + e_list = np.arange(0, 10000 * factor, 1 * factor) + dens_states = self.mode.getDensityOfStates(e_list) + temperature = 100 + q_act = np.sum(dens_states * np.exp(-e_list / constants.R / temperature)) + q_exp = self.mode.getPartitionFunction(temperature) + self.assertAlmostEqual(q_exp, q_act, delta=1e-4 * q_exp) def test_getDensityOfStates_quantum(self): """ - Test the HarmonicOscillator.getDensityOfStates() method using a set of + Test the HarmonicOscillator.getDensityOfStates() method using a set of quantum oscillators. """ self.mode.quantum = True - factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol - Elist = numpy.arange(0, 10000*factor, 1*factor) - densStates = self.mode.getDensityOfStates(Elist) - for n in range(len(Elist)): - if densStates[n] != 0: + factor = constants.h * constants.c * 100. * constants.Na # cm^-1 to J/mol + e_list = np.arange(0, 10000 * factor, 1 * factor) + dens_states = self.mode.getDensityOfStates(e_list) + for n in range(len(e_list)): + if dens_states[n] != 0: # The peaks should occur near a multiple of 500 cm^-1 - E = float(Elist[n]) / factor - self.assertTrue(E % 500 < 5 or E % 500 > 495) + energy = float(e_list[n]) / factor + self.assertTrue(energy % 500 < 5 or energy % 500 > 495) def test_repr(self): """ @@ -222,14 +227,14 @@ def test_repr(self): self.assertAlmostEqual(freq0, freq, 6) self.assertEqual(self.mode.frequencies.units, mode.frequencies.units) self.assertEqual(self.mode.quantum, mode.quantum) - + def test_pickle(self): """ Test that a HarmonicOscillator object can be pickled and unpickled with no loss of information. """ - import cPickle - mode = cPickle.loads(cPickle.dumps(self.mode,-1)) + import pickle + mode = pickle.loads(pickle.dumps(self.mode, -1)) self.assertEqual(self.mode.frequencies.value.shape, mode.frequencies.value.shape) for freq0, freq in zip(self.mode.frequencies.value, mode.frequencies.value): self.assertAlmostEqual(freq0, freq, 6) @@ -238,5 +243,6 @@ def test_pickle(self): ################################################################################ + if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) From 396f0b5b8eda1e0b99ad10981191bcc5aaf8e962 Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 12 Aug 2019 13:40:47 -0400 Subject: [PATCH 031/155] Py3 and PEP-8 changes to selected rmgpy/*.py files --- rmgpy/reactionTest.py | 1348 +++++++++++++++++++++++------------------ rmgpy/speciesTest.py | 116 ++-- rmgpy/stats.py | 58 +- rmgpy/statsTest.py | 12 +- rmgpy/util.py | 22 +- 5 files changed, 862 insertions(+), 694 deletions(-) diff --git a/rmgpy/reactionTest.py b/rmgpy/reactionTest.py index b224c9aab6..070fdb7b64 100644 --- a/rmgpy/reactionTest.py +++ b/rmgpy/reactionTest.py @@ -32,6 +32,8 @@ This module contains unit tests of the rmgpy.reaction module. """ +from __future__ import division + import unittest import cantera as ct @@ -40,7 +42,7 @@ import rmgpy.constants as constants from rmgpy.kinetics import Arrhenius, ArrheniusEP, MultiArrhenius, PDepArrhenius, MultiPDepArrhenius, \ - ThirdBody, Troe, Lindemann, Chebyshev, SurfaceArrhenius, StickingCoefficient + ThirdBody, Troe, Lindemann, Chebyshev, SurfaceArrhenius, StickingCoefficient from rmgpy.molecule import Molecule from rmgpy.quantity import Quantity from rmgpy.reaction import Reaction @@ -55,28 +57,33 @@ ################################################################################ -class PseudoSpecies: +class PseudoSpecies(object): """ Can be used in place of a :class:`rmg.species.Species` for isomorphism checks. PseudoSpecies('a') is isomorphic with PseudoSpecies('A') but nothing else. """ + def __init__(self, label): self.label = label + def __repr__(self): return "PseudoSpecies('{0}')".format(self.label) + def __str__(self): return self.label + def isIsomorphic(self, other, generateInitialMap=False, strict=True): return self.label.lower() == other.label.lower() + class TestReactionIsomorphism(unittest.TestCase): """ Contains unit tests of the isomorphism testing of the Reaction class. """ - def makeReaction(self,reaction_string): + def makeReaction(self, reaction_string): """" Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD' """ @@ -84,284 +91,300 @@ def makeReaction(self,reaction_string): reactants = [PseudoSpecies(i) for i in reactants] products = [PseudoSpecies(i) for i in products] return Reaction(reactants=reactants, products=products) + def test1to1(self): r1 = self.makeReaction('A=B') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A'))) - self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'), eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB'))) + def test1to2(self): r1 = self.makeReaction('A=BC') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a'))) - self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False)) - self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False)) + self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'), eitherDirection=False)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'), eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c'))) + def test2to2(self): r1 = self.makeReaction('AB=CD') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd'))) - self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False)) + self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'), eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba'))) - self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'), eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde'))) + def test2to3(self): r1 = self.makeReaction('AB=CDE') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde'))) - self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False)) + self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'), eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba'))) - self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'), eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde'))) + def test2to3_usingCheckOnlyLabel(self): r1 = self.makeReaction('AB=CDE') - self.assertTrue(r1.isIsomorphic(self.makeReaction('AB=CDE'),checkOnlyLabel=True)) - self.assertTrue(r1.isIsomorphic(self.makeReaction('BA=EDC'),eitherDirection=False,checkOnlyLabel=True)) - self.assertFalse(r1.isIsomorphic(self.makeReaction('Ab=CDE'),checkOnlyLabel=True)) - self.assertFalse(r1.isIsomorphic(self.makeReaction('BA=EDd'),eitherDirection=False,checkOnlyLabel=True)) + self.assertTrue(r1.isIsomorphic(self.makeReaction('AB=CDE'), checkOnlyLabel=True)) + self.assertTrue(r1.isIsomorphic(self.makeReaction('BA=EDC'), eitherDirection=False, checkOnlyLabel=True)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('Ab=CDE'), checkOnlyLabel=True)) + self.assertFalse(r1.isIsomorphic(self.makeReaction('BA=EDd'), eitherDirection=False, checkOnlyLabel=True)) class TestSurfaceReaction(unittest.TestCase): - "Test surface reactions" - def setUp(self): - - mH2 = Molecule().fromSMILES("[H][H]") - mX = Molecule().fromAdjacencyList("1 X u0 p0") - mHX = Molecule().fromAdjacencyList("1 H u0 p0 {2,S} \n 2 X u0 p0 {1,S}") - mCH3 = Molecule().fromSMILES("[CH3]") - mCH3X = Molecule().fromAdjacencyList("1 H u0 p0 {2,S} \n 2 X u0 p0 {1,S}") + """Test surface reactions""" - sH2 = Species( - molecule=[mH2], + def setUp(self): + m_h2 = Molecule().fromSMILES("[H][H]") + m_x = Molecule().fromAdjacencyList("1 X u0 p0") + m_hx = Molecule().fromAdjacencyList("1 H u0 p0 {2,S} \n 2 X u0 p0 {1,S}") + m_ch3 = Molecule().fromSMILES("[CH3]") + m_ch3x = Molecule().fromAdjacencyList("1 H u0 p0 {2,S} \n 2 X u0 p0 {1,S}") + + s_h2 = Species( + molecule=[m_h2], thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], "K"), Cpdata=([6.955, 6.955, 6.956, 6.961, 7.003, 7.103, 7.502], "cal/(mol*K)"), H298=(0, "kcal/mol"), - S298=(31.129 , "cal/(mol*K)"))) - sX = Species( - molecule=[mX], + S298=(31.129, "cal/(mol*K)"))) + s_x = Species( + molecule=[m_x], thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], "K"), Cpdata=([0., 0., 0., 0., 0., 0., 0.], "cal/(mol*K)"), H298=(0.0, "kcal/mol"), S298=(0.0, "cal/(mol*K)"))) - sHX = Species( - molecule=[mHX], + s_hx = Species( + molecule=[m_hx], thermo=ThermoData(Tdata=([300, 400, 500, 600, 800, 1000, 1500], "K"), Cpdata=([1.50, 2.58, 3.40, 4.00, 4.73, 5.13, 5.57], "cal/(mol*K)"), H298=(-11.26, "kcal/mol"), S298=(0.44, "cal/(mol*K)"))) - sCH3 = Species( - molecule=[mCH3], - thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.91547, 0.00184155, 3.48741e-06, -3.32746e-09, 8.49953e-13, 16285.6, 0.351743], Tmin=(100, 'K'), Tmax=(1337.63, 'K')), - NASAPolynomial(coeffs=[3.54146, 0.00476786, -1.82148e-06, 3.28876e-10, -2.22545e-14, 16224, 1.66032], Tmin=(1337.63, 'K'), Tmax=(5000, 'K'))], - Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(135.382, 'kJ/mol'), - comment="""Thermo library: primaryThermoLibrary + radical(CH3)""" - ), + s_ch3 = Species( + molecule=[m_ch3], + thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.91547, 0.00184155, 3.48741e-06, -3.32746e-09, 8.49953e-13, 16285.6, 0.351743], + Tmin=(100, 'K'), Tmax=(1337.63, 'K')), + NASAPolynomial(coeffs=[3.54146, 0.00476786, -1.82148e-06, 3.28876e-10, -2.22545e-14, 16224, 1.66032], + Tmin=(1337.63, 'K'), Tmax=(5000, 'K'))], + Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(135.382, 'kJ/mol'), + comment="""Thermo library: primaryThermoLibrary + radical(CH3)""" + ), molecularWeight=(15.0345, 'amu'), - ) + ) - sCH3X = Species( - molecule=[mCH3X], - thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.552219, 0.026442, -3.55617e-05, 2.60044e-08, -7.52707e-12, -4433.47, 0.692144], Tmin=(298, 'K'), Tmax=(1000, 'K')), - NASAPolynomial(coeffs=[3.62557, 0.00739512, -2.43797e-06, 1.86159e-10, 3.6485e-14, -5187.22, -18.9668], Tmin=(1000, 'K'), Tmax=(2000, 'K'))], + s_ch3x = Species( + molecule=[m_ch3x], + thermo=NASA(polynomials=[NASAPolynomial( + coeffs=[-0.552219, 0.026442, -3.55617e-05, 2.60044e-08, -7.52707e-12, -4433.47, 0.692144], + Tmin=(298, 'K'), Tmax=(1000, 'K')), + NASAPolynomial( + coeffs=[3.62557, 0.00739512, -2.43797e-06, 1.86159e-10, 3.6485e-14, -5187.22, + -18.9668], Tmin=(1000, 'K'), Tmax=(2000, 'K'))], Tmin=(298, 'K'), Tmax=(2000, 'K'), E0=(-39.1285, 'kJ/mol'), comment="""Thermo library: surfaceThermo""") - ) - - rxn1s = Reaction(reactants=[sH2, sX, sX], - products=[sHX, sHX], - kinetics=SurfaceArrhenius(A=(9.05e18, 'cm^5/(mol^2*s)'), - n=0.5, - Ea=(5.0, 'kJ/mol'), - T0=(1.0, 'K'))) + ) + + rxn1s = Reaction(reactants=[s_h2, s_x, s_x], + products=[s_hx, s_hx], + kinetics=SurfaceArrhenius(A=(9.05e18, 'cm^5/(mol^2*s)'), + n=0.5, + Ea=(5.0, 'kJ/mol'), + T0=(1.0, 'K'))) self.rxn1s = rxn1s - rxn1m = Reaction(reactants=[mH2, mX, mX], - products=[mHX, mHX]) + rxn1m = Reaction(reactants=[m_h2, m_x, m_x], + products=[m_hx, m_hx]) self.rxn1m = rxn1m - + self.rxn2sSC = Reaction( - reactants=[sCH3, sX], - products=[sCH3X], - kinetics=StickingCoefficient(A=0.1, n=0, Ea=(0, 'kcal/mol'), - T0=(1, 'K'), - Tmin=(200, 'K'), Tmax=(3000, 'K'), - comment="""Exact match found for rate rule (Adsorbate;VacantSite)""" - ) - ) + reactants=[s_ch3, s_x], + products=[s_ch3x], + kinetics=StickingCoefficient(A=0.1, n=0, Ea=(0, 'kcal/mol'), + T0=(1, 'K'), + Tmin=(200, 'K'), Tmax=(3000, 'K'), + comment="""Exact match found for rate rule (Adsorbate;VacantSite)""" + ) + ) self.rxn2sSA = Reaction( - reactants=[sCH3, sX], - products=[sCH3X], - kinetics=SurfaceArrhenius(A=(2.7e10, 'cm^3/(mol*s)'), - n=0.5, - Ea=(5.0, 'kJ/mol'), - T0=(1.0, 'K'), - comment="""Approximate rate""") - ) + reactants=[s_ch3, s_x], + products=[s_ch3x], + kinetics=SurfaceArrhenius(A=(2.7e10, 'cm^3/(mol*s)'), + n=0.5, + Ea=(5.0, 'kJ/mol'), + T0=(1.0, 'K'), + comment="""Approximate rate""") + ) def testIsSurfaceReactionSpecies(self): - "Test isSurfaceReaction for reaction based on Species " + """Test isSurfaceReaction for reaction based on Species """ self.assertTrue(self.rxn1s.isSurfaceReaction()) def testIsSurfaceReactionMolecules(self): - "Test isSurfaceReaction for reaction based on Molecules " + """Test isSurfaceReaction for reaction based on Molecules """ self.assertTrue(self.rxn1m.isSurfaceReaction()) def testMethylAdsorptionSurfaceArrhenius(self): - "Test the CH3 adsorption rate given by SurfaceArrhenius" + """Test the CH3 adsorption rate given by SurfaceArrhenius""" T = 800 - surfaceSiteDensity = Quantity(2.72e-9, 'mol/cm^2').value_si - calculated = self.rxn2sSA.getSurfaceRateCoefficient(T, surfaceSiteDensity) + surface_site_density = Quantity(2.72e-9, 'mol/cm^2').value_si + calculated = self.rxn2sSA.getSurfaceRateCoefficient(T, surface_site_density) target = 1e6 # mol/m2 self.assertAlmostEqual(numpy.log10(calculated), numpy.log10(target), places=0) def testMethylAdsorptionStickingCoefficient(self): - "Test the CH3 adsorption rate given by StickingCoefficient" + """Test the CH3 adsorption rate given by StickingCoefficient""" # First, check the molecular weight is in units we expect self.assertAlmostEqual(self.rxn2sSC.reactants[0].molecularWeight.value_si / constants.amu / 1000, 15.0345e-3) # kg/mol T = 800 - surfaceSiteDensity = Quantity(2.72e-9, 'mol/cm^2').value_si - calculated = self.rxn2sSC.getSurfaceRateCoefficient(T, surfaceSiteDensity) + surface_site_density = Quantity(2.72e-9, 'mol/cm^2').value_si + calculated = self.rxn2sSC.getSurfaceRateCoefficient(T, surface_site_density) target = 1e6 # mol/m2 self.assertAlmostEqual(numpy.log10(calculated), numpy.log10(target), places=0) + class TestReaction(unittest.TestCase): """ Contains unit tests of the Reaction class. """ - + def setUp(self): """ A method that is called prior to each unit test in this class. """ ethylene = Species( - label = 'C2H4', - conformer = Conformer( - E0 = (44.7127, 'kJ/mol'), - modes = [ + label='C2H4', + conformer=Conformer( + E0=(44.7127, 'kJ/mol'), + modes=[ IdealGasTranslation( - mass = (28.0313, 'amu'), + mass=(28.0313, 'amu'), ), NonlinearRotor( - inertia = ( + inertia=( [3.41526, 16.6498, 20.065], 'amu*angstrom^2', ), - symmetry = 4, + symmetry=4, ), HarmonicOscillator( - frequencies = ( - [828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54], + frequencies=( + [828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, + 3165.79, 3193.54], 'cm^-1', ), ), ], - spinMultiplicity = 1, - opticalIsomers = 1, + spinMultiplicity=1, + opticalIsomers=1, ), ) - - hydrogen = Species( - label = 'H', - conformer = Conformer( - E0 = (211.794, 'kJ/mol'), - modes = [ + + hydrogen = Species( + label='H', + conformer=Conformer( + E0=(211.794, 'kJ/mol'), + modes=[ IdealGasTranslation( - mass = (1.00783, 'amu'), + mass=(1.00783, 'amu'), ), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ), ) - + ethyl = Species( - label = 'C2H5', - conformer = Conformer( - E0 = (111.603, 'kJ/mol'), - modes = [ + label='C2H5', + conformer=Conformer( + E0=(111.603, 'kJ/mol'), + modes=[ IdealGasTranslation( - mass = (29.0391, 'amu'), + mass=(29.0391, 'amu'), ), NonlinearRotor( - inertia = ( + inertia=( [4.8709, 22.2353, 23.9925], 'amu*angstrom^2', ), - symmetry = 1, + symmetry=1, ), HarmonicOscillator( - frequencies = ( - [482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73], + frequencies=( + [482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, + 2954.51, 3033.39, 3101.54, 3204.73], 'cm^-1', ), ), HinderedRotor( - inertia = (1.11481, 'amu*angstrom^2'), - symmetry = 6, - barrier = (0.244029, 'kJ/mol'), - semiclassical = None, + inertia=(1.11481, 'amu*angstrom^2'), + symmetry=6, + barrier=(0.244029, 'kJ/mol'), + semiclassical=None, ), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ), ) - + TS = TransitionState( - label = 'TS', - conformer = Conformer( - E0 = (266.694, 'kJ/mol'), - modes = [ + label='TS', + conformer=Conformer( + E0=(266.694, 'kJ/mol'), + modes=[ IdealGasTranslation( - mass = (29.0391, 'amu'), + mass=(29.0391, 'amu'), ), NonlinearRotor( - inertia = ( + inertia=( [6.78512, 22.1437, 22.2114], 'amu*angstrom^2', ), - symmetry = 1, + symmetry=1, ), HarmonicOscillator( - frequencies = ( - [412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88], + frequencies=( + [412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, + 3101.46, 3110.55, 3175.34, 3201.88], 'cm^-1', ), ), ], - spinMultiplicity = 2, - opticalIsomers = 1, + spinMultiplicity=2, + opticalIsomers=1, ), - frequency = (-750.232, 'cm^-1'), + frequency=(-750.232, 'cm^-1'), ) - + self.reaction = Reaction( - reactants = [hydrogen, ethylene], - products = [ethyl], - kinetics = Arrhenius( - A = (501366000.0, 'cm^3/(mol*s)'), - n = 1.637, - Ea = (4.32508, 'kJ/mol'), - T0 = (1, 'K'), - Tmin = (300, 'K'), - Tmax = (2500, 'K'), + reactants=[hydrogen, ethylene], + products=[ethyl], + kinetics=Arrhenius( + A=(501366000.0, 'cm^3/(mol*s)'), + n=1.637, + Ea=(4.32508, 'kJ/mol'), + T0=(1, 'K'), + Tmin=(300, 'K'), + Tmax=(2500, 'K'), ), - transitionState = TS, - degeneracy = 2, + transitionState=TS, + degeneracy=2, ) self.reaction.kinetics.comment = ''' Multiplied by reaction path degeneracy 2.0 @@ -370,64 +393,70 @@ def setUp(self): # CC(=O)O[O] acetylperoxy = Species( label='acetylperoxy', - thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")), + thermo=Wilhoit(Cp0=(4.0 * constants.R, "J/(mol*K)"), CpInf=(21.0 * constants.R, "J/(mol*K)"), a0=-3.95, + a1=9.26, a2=-15.6, a3=8.55, B=(500.0, "K"), H0=(-6.151e+04, "J/mol"), + S0=(-790.2, "J/(mol*K)")), ) # C[C]=O acetyl = Species( label='acetyl', - thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")), + thermo=Wilhoit(Cp0=(4.0 * constants.R, "J/(mol*K)"), CpInf=(15.5 * constants.R, "J/(mol*K)"), a0=0.2541, + a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0, "K"), H0=(-1.439e+05, "J/mol"), + S0=(-524.6, "J/(mol*K)")), ) # [O][O] oxygen = Species( label='oxygen', - thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")), + thermo=Wilhoit(Cp0=(3.5 * constants.R, "J/(mol*K)"), CpInf=(4.5 * constants.R, "J/(mol*K)"), a0=-0.9324, + a1=26.18, a2=-70.47, a3=44.12, B=(500.0, "K"), H0=(1.453e+04, "J/mol"), + S0=(-12.19, "J/(mol*K)")), ) - + self.reaction2 = Reaction( - reactants=[acetyl, oxygen], - products=[acetylperoxy], - kinetics = Arrhenius( - A = (2.65e12, 'cm^3/(mol*s)'), - n = 0.0, - Ea = (0.0, 'kJ/mol'), - T0 = (1, 'K'), - Tmin = (300, 'K'), - Tmax = (2000, 'K'), + reactants=[acetyl, oxygen], + products=[acetylperoxy], + kinetics=Arrhenius( + A=(2.65e12, 'cm^3/(mol*s)'), + n=0.0, + Ea=(0.0, 'kJ/mol'), + T0=(1, 'K'), + Tmin=(300, 'K'), + Tmax=(2000, 'K'), ), ) oxygen_atom = Species().fromSMILES('[O]') - SO2 = Species().fromSMILES('O=S=O') - SO3 = Species().fromSMILES('O=S(=O)=O') + so2 = Species().fromSMILES('O=S=O') + so3 = Species().fromSMILES('O=S(=O)=O') self.reaction3 = Reaction( - reactants=[oxygen_atom, SO2], - products=[SO3], - kinetics = Arrhenius(A=(3.7e+11, 'cm^3/(mol*s)'), n=0, Ea=(1689, 'cal/mol'), T0=(1, 'K'))) - + reactants=[oxygen_atom, so2], + products=[so3], + kinetics=Arrhenius(A=(3.7e+11, 'cm^3/(mol*s)'), n=0, Ea=(1689, 'cal/mol'), T0=(1, 'K'))) + def testIsIsomerization(self): """ Test the Reaction.isIsomerization() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) - association = Reaction(reactants=[Species(),Species()], products=[Species()]) - dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) - bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) + association = Reaction(reactants=[Species(), Species()], products=[Species()]) + dissociation = Reaction(reactants=[Species()], products=[Species(), Species()]) + bimolecular = Reaction(reactants=[Species(), Species()], products=[Species(), Species()]) self.assertTrue(isomerization.isIsomerization()) self.assertFalse(association.isIsomerization()) self.assertFalse(dissociation.isIsomerization()) self.assertFalse(bimolecular.isIsomerization()) - + def testIsAssociation(self): """ Test the Reaction.isAssociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) - association = Reaction(reactants=[Species(),Species()], products=[Species()]) - dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) - bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) + association = Reaction(reactants=[Species(), Species()], products=[Species()]) + dissociation = Reaction(reactants=[Species()], products=[Species(), Species()]) + bimolecular = Reaction(reactants=[Species(), Species()], products=[Species(), Species()]) self.assertFalse(isomerization.isAssociation()) self.assertTrue(association.isAssociation()) self.assertFalse(dissociation.isAssociation()) @@ -438,9 +467,9 @@ def testIsDissociation(self): Test the Reaction.isDissociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) - association = Reaction(reactants=[Species(),Species()], products=[Species()]) - dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) - bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) + association = Reaction(reactants=[Species(), Species()], products=[Species()]) + dissociation = Reaction(reactants=[Species()], products=[Species(), Species()]) + bimolecular = Reaction(reactants=[Species(), Species()], products=[Species(), Species()]) self.assertFalse(isomerization.isDissociation()) self.assertFalse(association.isDissociation()) self.assertTrue(dissociation.isDissociation()) @@ -456,21 +485,21 @@ def testHasTemplate(self): self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) - + reactants.reverse() products.reverse() self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) - + reactants = self.reaction2.reactants[:] products = self.reaction2.products[:] self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) - + reactants.reverse() products.reverse() self.assertFalse(self.reaction.hasTemplate(reactants, products)) @@ -483,7 +512,9 @@ def testEnthalpyOfReaction(self): Test the Reaction.getEnthalpyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']] + Hlist0 = [float(v) for v in + ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', + '-129316']] Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2) @@ -493,7 +524,9 @@ def testEntropyOfReaction(self): Test the Reaction.getEntropyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']] + Slist0 = [float(v) for v in + ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', + '-141.407', '-140.441']] Slist = self.reaction2.getEntropiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Slist[i], Slist0[i], 2) @@ -503,7 +536,9 @@ def testFreeEnergyOfReaction(self): Test the Reaction.getFreeEnergyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']] + Glist0 = [float(v) for v in + ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', + '151565']] Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2) @@ -513,7 +548,9 @@ def testEquilibriumConstantKa(self): Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']] + Kalist0 = [float(v) for v in + ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', + '0.000792389', '0.000262777', '0.000110053']] Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka') for i in range(len(Tlist)): self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4) @@ -523,7 +560,9 @@ def testEquilibriumConstantKc(self): Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']] + Kclist0 = [float(v) for v in + ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', + '0.000105413', '3.93273e-05', '1.83006e-05']] Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc') for i in range(len(Tlist)): self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4) @@ -533,7 +572,9 @@ def testEquilibriumConstantKp(self): Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) - Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']] + Kplist0 = [float(v) for v in + ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', + '7.92389e-09', '2.62777e-09', '1.10053e-09']] Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp') for i in range(len(Tlist)): self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4) @@ -558,20 +599,21 @@ def testRateCoefficient(self): Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 for T in Tlist: - self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6) - + self.assertAlmostEqual( + self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6) + def testGenerateReverseRateCoefficient(self): """ Test the Reaction.generateReverseRateCoefficient() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 - reverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_kinetics = self.reaction2.generateReverseRateCoefficient() for T in Tlist: kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T) - kr = reverseKinetics.getRateCoefficient(T) + kr = reverse_kinetics.getRateCoefficient(T) self.assertAlmostEqual(kr0 / kr, 1.0, 0) - + def testFixBarrierHeight(self): """ Test that fixBarrierHeight: @@ -579,82 +621,82 @@ def testFixBarrierHeight(self): 2) forces Ea to be positive if forcePositive=True 3) Evans-Polanyi kinetics are handled so that negative Ea if Ea CH4""" - thirdBody = ThirdBody( - arrheniusLow = arrheniusLow, - Tmin = (Tmin,"K"), - Tmax = (Tmax,"K"), - Pmin = (Pmin,"bar"), - Pmax = (Pmax,"bar"), - efficiencies = efficiencies, - comment = comment, + third_body = ThirdBody( + arrheniusLow=arrhenius_low, + Tmin=(Tmin, "K"), + Tmax=(Tmax, "K"), + Pmin=(Pmin, "bar"), + Pmax=(Pmax, "bar"), + efficiencies=efficiencies, + comment=comment, ) - - original_kinetics = thirdBody + + original_kinetics = third_body self.reaction2.kinetics = original_kinetics - reverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_kinetics = self.reaction2.generateReverseRateCoefficient() - self.reaction2.kinetics = reverseKinetics + self.reaction2.kinetics = reverse_kinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants - reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_reverse_kinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) - krevrev = reversereverseKinetics.getRateCoefficient(T, P) + krevrev = reverse_reverse_kinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientLindemann(self): @@ -1008,17 +1054,17 @@ def testGenerateReverseRateCoefficientLindemann(self): Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format. """ - arrheniusHigh = Arrhenius( - A = (1.39e+16,"cm^3/(mol*s)"), - n = -0.534, - Ea = (2.243,"kJ/mol"), - T0 = (1,"K"), + arrhenius_high = Arrhenius( + A=(1.39e+16, "cm^3/(mol*s)"), + n=-0.534, + Ea=(2.243, "kJ/mol"), + T0=(1, "K"), ) - arrheniusLow = Arrhenius( - A = (2.62e+33,"cm^6/(mol^2*s)"), - n = -4.76, - Ea = (10.21,"kJ/mol"), - T0 = (1,"K"), + arrhenius_low = Arrhenius( + A=(2.62e+33, "cm^6/(mol^2*s)"), + n=-4.76, + Ea=(10.21, "kJ/mol"), + T0=(1, "K"), ) efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. @@ -1027,52 +1073,51 @@ def testGenerateReverseRateCoefficientLindemann(self): Pmax = 100. comment = """H + CH3 -> CH4""" lindemann = Lindemann( - arrheniusHigh = arrheniusHigh, - arrheniusLow = arrheniusLow, - Tmin = (Tmin,"K"), - Tmax = (Tmax,"K"), - Pmin = (Pmin,"bar"), - Pmax = (Pmax,"bar"), - efficiencies = efficiencies, - comment = comment, + arrheniusHigh=arrhenius_high, + arrheniusLow=arrhenius_low, + Tmin=(Tmin, "K"), + Tmax=(Tmax, "K"), + Pmin=(Pmin, "bar"), + Pmax=(Pmax, "bar"), + efficiencies=efficiencies, + comment=comment, ) - + original_kinetics = lindemann - + self.reaction2.kinetics = original_kinetics - reverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_kinetics = self.reaction2.generateReverseRateCoefficient() - self.reaction2.kinetics = reverseKinetics + self.reaction2.kinetics = reverse_kinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants - reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_reverse_kinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) - krevrev = reversereverseKinetics.getRateCoefficient(T, P) + krevrev = reverse_reverse_kinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) - def testGenerateReverseRateCoefficientTroe(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Troe format. """ - arrheniusHigh = Arrhenius( - A = (1.39e+16,"cm^3/(mol*s)"), - n = -0.534, - Ea = (2.243,"kJ/mol"), - T0 = (1,"K"), + arrhenius_high = Arrhenius( + A=(1.39e+16, "cm^3/(mol*s)"), + n=-0.534, + Ea=(2.243, "kJ/mol"), + T0=(1, "K"), ) - arrheniusLow = Arrhenius( - A = (2.62e+33,"cm^6/(mol^2*s)"), - n = -4.76, - Ea = (10.21,"kJ/mol"), - T0 = (1,"K"), + arrhenius_low = Arrhenius( + A=(2.62e+33, "cm^6/(mol^2*s)"), + n=-4.76, + Ea=(10.21, "kJ/mol"), + T0=(1, "K"), ) alpha = 0.783 T3 = 74 @@ -1085,37 +1130,37 @@ def testGenerateReverseRateCoefficientTroe(self): Pmax = 100. comment = """H + CH3 -> CH4""" troe = Troe( - arrheniusHigh = arrheniusHigh, - arrheniusLow = arrheniusLow, - alpha = alpha, - T3 = (T3,"K"), - T1 = (T1,"K"), - T2 = (T2,"K"), - Tmin = (Tmin,"K"), - Tmax = (Tmax,"K"), - Pmin = (Pmin,"bar"), - Pmax = (Pmax,"bar"), - efficiencies = efficiencies, - comment = comment, + arrheniusHigh=arrhenius_high, + arrheniusLow=arrhenius_low, + alpha=alpha, + T3=(T3, "K"), + T1=(T1, "K"), + T2=(T2, "K"), + Tmin=(Tmin, "K"), + Tmax=(Tmax, "K"), + Pmin=(Pmin, "bar"), + Pmax=(Pmax, "bar"), + efficiencies=efficiencies, + comment=comment, ) - + original_kinetics = troe - + self.reaction2.kinetics = original_kinetics - reverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_kinetics = self.reaction2.generateReverseRateCoefficient() - self.reaction2.kinetics = reverseKinetics + self.reaction2.kinetics = reverse_kinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants - reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() + reverse_reverse_kinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) - krevrev = reversereverseKinetics.getRateCoefficient(T, P) + krevrev = reverse_reverse_kinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testTSTCalculation(self): @@ -1123,11 +1168,11 @@ def testTSTCalculation(self): A test of the transition state theory k(T) calculation function, using the reaction H + C2H4 -> C2H5. """ - Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01) + Tlist = 1000.0 / numpy.arange(0.4, 3.35, 0.01) klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist]) arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)') klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T in Tlist]) - + # Check that the correct Arrhenius parameters are returned self.assertAlmostEqual(arrhenius.A.value_si, 2265.2488, delta=1e-2) self.assertAlmostEqual(arrhenius.n.value_si, 1.45419, delta=1e-4) @@ -1135,14 +1180,14 @@ def testTSTCalculation(self): # Check that the fit is satisfactory (defined here as always within 5%) for i in range(len(Tlist)): self.assertAlmostEqual(klist[i], klist2[i], delta=5e-2 * klist[i]) - + def testPickle(self): """ Test that a Reaction object can be successfully pickled and unpickled with no loss of information. """ - import cPickle - reaction = cPickle.loads(cPickle.dumps(self.reaction,-1)) + import pickle + reaction = pickle.loads(pickle.dumps(self.reaction, -1)) self.assertEqual(len(self.reaction.reactants), len(reaction.reactants)) self.assertEqual(len(self.reaction.products), len(reaction.products)) @@ -1152,11 +1197,13 @@ def testPickle(self): for product0, product in zip(self.reaction.products, reaction.products): self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2) self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units) - self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2) + self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, + reaction.transitionState.conformer.E0.value_si / 1e6, 2) self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units) - self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2) + self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, + reaction.transitionState.frequency.value_si, 2) self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units) - + self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6) @@ -1164,7 +1211,7 @@ def testPickle(self): self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment) self.assertEqual(self.reaction.duplicate, reaction.duplicate) - self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) + self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) def testOutput(self): """ @@ -1181,19 +1228,21 @@ def testOutput(self): for product0, product in zip(self.reaction.products, reaction.products): self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2) self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units) - self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2) + self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, + reaction.transitionState.conformer.E0.value_si / 1e6, 2) self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units) - self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2) + self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, + reaction.transitionState.frequency.value_si, 2) self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units) - + self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6) self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment) - + self.assertEqual(self.reaction.duplicate, reaction.duplicate) - self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) + self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) def testDegeneracyUpdatesRate(self): """ @@ -1234,159 +1283,244 @@ def setUp(self): A method that is called prior to each unit test in this class. """ # define some species: - ch3 = Species(index=13, label="CH3", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.91547,0.00184154,3.48744e-06,-3.3275e-09,8.49964e-13,16285.6,0.351739], Tmin=(100,'K'), Tmax=(1337.62,'K')), NASAPolynomial(coeffs=[3.54145,0.00476788,-1.82149e-06,3.28878e-10,-2.22547e-14,16224,1.6604], Tmin=(1337.62,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + ch3 = Species(index=13, label="CH3", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.91547, 0.00184154, 3.48744e-06, -3.3275e-09, 8.49964e-13, 16285.6, 0.351739], + Tmin=(100, 'K'), Tmax=(1337.62, 'K')), + NASAPolynomial(coeffs=[3.54145, 0.00476788, -1.82149e-06, 3.28878e-10, -2.22547e-14, 16224, 1.6604], + Tmin=(1337.62, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary + radical(CH3) """), molecule=[Molecule(SMILES="[CH3]")]) - ethane = Species(label="ethane", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.78033,-0.00324263,5.52381e-05,-6.38581e-08,2.28637e-11,-11620.3,5.21034], Tmin=(100,'K'), Tmax=(954.51,'K')), NASAPolynomial(coeffs=[4.58983,0.0141508,-4.75962e-06,8.60294e-10,-6.21717e-14,-12721.8,-3.61739], Tmin=(954.51,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + ethane = Species(label="ethane", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.78033, -0.00324263, 5.52381e-05, -6.38581e-08, 2.28637e-11, -11620.3, 5.21034], + Tmin=(100, 'K'), Tmax=(954.51, 'K')), + NASAPolynomial(coeffs=[4.58983, 0.0141508, -4.75962e-06, 8.60294e-10, -6.21717e-14, -12721.8, -3.61739], + Tmin=(954.51, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo group additivity estimation: group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) + group(Cs-CsHHH) + gauche(Cs(CsRRR)) + other(R) """), molecule=[Molecule(SMILES="CC")]) - - co2 = Species(index=16, label="CO2", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.27861,0.00274152,7.16074e-06,-1.08027e-08,4.14282e-12,-48470.3,5.97937], Tmin=(100,'K'), Tmax=(988.89,'K')), NASAPolynomial(coeffs=[4.5461,0.00291913,-1.15484e-06,2.27654e-10,-1.7091e-14,-48980.4,-1.43275], Tmin=(988.89,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + co2 = Species(index=16, label="CO2", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.27861, 0.00274152, 7.16074e-06, -1.08027e-08, 4.14282e-12, -48470.3, 5.97937], + Tmin=(100, 'K'), Tmax=(988.89, 'K')), + NASAPolynomial(coeffs=[4.5461, 0.00291913, -1.15484e-06, 2.27654e-10, -1.7091e-14, -48980.4, -1.43275], + Tmin=(988.89, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo group additivity estimation: group(Cdd-OdOd) + other(R) + group(O2d-Cd) + other(R) + group(O2d-Cd) + other(R) """), molecule=[Molecule(SMILES="O=C=O")]) - ch4 = Species(index=15, label="CH4", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.20541,-0.00535556,2.51123e-05,-2.13762e-08,5.97522e-12,-10161.9,-0.921275], Tmin=(100,'K'), Tmax=(1084.12,'K')), NASAPolynomial(coeffs=[0.908272,0.0114541,-4.57173e-06,8.2919e-10,-5.66314e-14,-9719.98,13.9931], Tmin=(1084.12,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + ch4 = Species(index=15, label="CH4", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[4.20541, -0.00535556, 2.51123e-05, -2.13762e-08, 5.97522e-12, -10161.9, -0.921275], + Tmin=(100, 'K'), Tmax=(1084.12, 'K')), + NASAPolynomial(coeffs=[0.908272, 0.0114541, -4.57173e-06, 8.2919e-10, -5.66314e-14, -9719.98, 13.9931], + Tmin=(1084.12, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="C")]) - - h2o = Species(index=27, label="H2O", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.05764,-0.000787933,2.90876e-06,-1.47518e-09,2.12838e-13,-30281.6,-0.311363], Tmin=(100,'K'), Tmax=(1130.24,'K')), NASAPolynomial(coeffs=[2.84325,0.00275108,-7.8103e-07,1.07243e-10,-5.79389e-15,-29958.6,5.91041], Tmin=(1130.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + h2o = Species(index=27, label="H2O", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[4.05764, -0.000787933, 2.90876e-06, -1.47518e-09, 2.12838e-13, -30281.6, -0.311363], + Tmin=(100, 'K'), Tmax=(1130.24, 'K')), + NASAPolynomial(coeffs=[2.84325, 0.00275108, -7.8103e-07, 1.07243e-10, -5.79389e-15, -29958.6, 5.91041], + Tmin=(1130.24, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="O")]) - - ar = Species(label="Ar", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), comment=""" + + ar = Species(label="Ar", thermo=NASA( + polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 4.37967], Tmin=(200, 'K'), Tmax=(1000, 'K')), + NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 4.37967], Tmin=(1000, 'K'), + Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="[Ar]")]) - - h2 = Species(index=2, label="H2", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.43536,0.00021271,-2.78625e-07,3.40267e-10,-7.76031e-14,-1031.36,-3.90842], Tmin=(100,'K'), Tmax=(1959.08,'K')), NASAPolynomial(coeffs=[2.78816,0.000587644,1.59009e-07,-5.52736e-11,4.34309e-15,-596.143,0.112747], Tmin=(1959.08,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + h2 = Species(index=2, label="H2", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.43536, 0.00021271, -2.78625e-07, 3.40267e-10, -7.76031e-14, -1031.36, -3.90842], + Tmin=(100, 'K'), Tmax=(1959.08, 'K')), + NASAPolynomial(coeffs=[2.78816, 0.000587644, 1.59009e-07, -5.52736e-11, 4.34309e-15, -596.143, 0.112747], + Tmin=(1959.08, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="[H][H]")]) - - h = Species(index=3, label="H", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.5,-1.91243e-12,2.45329e-15,-1.02377e-18,1.31369e-22,25474.2,-0.444973], Tmin=(100,'K'), Tmax=(4563.27,'K')), NASAPolynomial(coeffs=[2.50167,-1.43051e-06,4.6025e-10,-6.57826e-14,3.52412e-18,25472.7,-0.455578], Tmin=(4563.27,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + h = Species(index=3, label="H", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[2.5, -1.91243e-12, 2.45329e-15, -1.02377e-18, 1.31369e-22, 25474.2, -0.444973], + Tmin=(100, 'K'), Tmax=(4563.27, 'K')), + NASAPolynomial(coeffs=[2.50167, -1.43051e-06, 4.6025e-10, -6.57826e-14, 3.52412e-18, 25472.7, -0.455578], + Tmin=(4563.27, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="[H]")]) - - oh = Species(index=4, label="OH", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.51457,2.92773e-05,-5.32163e-07,1.01949e-09,-3.85945e-13,3414.25,2.10435], Tmin=(100,'K'), Tmax=(1145.75,'K')), NASAPolynomial(coeffs=[3.07194,0.000604016,-1.39783e-08,-2.13446e-11,2.48066e-15,3579.39,4.578], Tmin=(1145.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + oh = Species(index=4, label="OH", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.51457, 2.92773e-05, -5.32163e-07, 1.01949e-09, -3.85945e-13, 3414.25, 2.10435], + Tmin=(100, 'K'), Tmax=(1145.75, 'K')), + NASAPolynomial(coeffs=[3.07194, 0.000604016, -1.39783e-08, -2.13446e-11, 2.48066e-15, 3579.39, 4.578], + Tmin=(1145.75, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="[OH]")]) - - ho2 = Species(index=5, label="HO2", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.04594,-0.00173464,1.03766e-05,-1.02202e-08,3.34908e-12,-986.754,4.63581], Tmin=(100,'K'), Tmax=(932.15,'K')), NASAPolynomial(coeffs=[3.21024,0.00367942,-1.27701e-06,2.18045e-10,-1.46338e-14,-910.369,8.18291], Tmin=(932.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + ho2 = Species(index=5, label="HO2", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[4.04594, -0.00173464, 1.03766e-05, -1.02202e-08, 3.34908e-12, -986.754, 4.63581], + Tmin=(100, 'K'), Tmax=(932.15, 'K')), + NASAPolynomial(coeffs=[3.21024, 0.00367942, -1.27701e-06, 2.18045e-10, -1.46338e-14, -910.369, 8.18291], + Tmin=(932.15, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo group additivity estimation: group(O2s-OsH) + gauche(O2s(RR)) + other(R) + group(O2s-OsH) + gauche(O2s(RR)) + other(R) + radical(HOOJ) """), molecule=[Molecule(SMILES="[O]O")]) - - o2 = Species(index=6, label="O2", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.53732,-0.00121572,5.3162e-06,-4.89446e-09,1.45846e-12,-1038.59,4.68368], Tmin=(100,'K'), Tmax=(1074.55,'K')), NASAPolynomial(coeffs=[3.15382,0.00167804,-7.69974e-07,1.51275e-10,-1.08782e-14,-1040.82,6.16756], Tmin=(1074.55,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + o2 = Species(index=6, label="O2", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.53732, -0.00121572, 5.3162e-06, -4.89446e-09, 1.45846e-12, -1038.59, 4.68368], + Tmin=(100, 'K'), Tmax=(1074.55, 'K')), + NASAPolynomial(coeffs=[3.15382, 0.00167804, -7.69974e-07, 1.51275e-10, -1.08782e-14, -1040.82, 6.16756], + Tmin=(1074.55, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo library: primaryThermoLibrary """), molecule=[Molecule(SMILES="[O][O]")]) - - co = Species(index=9, label="CO", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.66965,-0.00550953,2.00538e-05,-2.08391e-08,7.43738e-12,1200.77,-12.4224], Tmin=(100,'K'), Tmax=(884.77,'K')), NASAPolynomial(coeffs=[2.8813,0.00231665,-4.40151e-07,4.75633e-11,-2.78282e-15,1173.45,-9.65831], Tmin=(884.77,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + co = Species(index=9, label="CO", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.66965, -0.00550953, 2.00538e-05, -2.08391e-08, 7.43738e-12, 1200.77, -12.4224], + Tmin=(100, 'K'), Tmax=(884.77, 'K')), + NASAPolynomial(coeffs=[2.8813, 0.00231665, -4.40151e-07, 4.75633e-11, -2.78282e-15, 1173.45, -9.65831], + Tmin=(884.77, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo group additivity estimation: group(Ct-CtCs) + other(R) + group(O2s-CsCs) + other(R) """), molecule=[Molecule(SMILES="[C-]#[O+]")]) - - h2o2 = Species(index=7, label="H2O2", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.73136,0.00335071,9.35033e-06,-1.521e-08,6.41585e-12,-17721.2,5.45911], Tmin=(100,'K'), Tmax=(908.87,'K')), NASAPolynomial(coeffs=[5.41579,0.00261008,-4.39892e-07,4.91087e-11,-3.35188e-15,-18303,-4.02248], Tmin=(908.87,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), comment=""" + + h2o2 = Species(index=7, label="H2O2", thermo=NASA(polynomials=[ + NASAPolynomial(coeffs=[3.73136, 0.00335071, 9.35033e-06, -1.521e-08, 6.41585e-12, -17721.2, 5.45911], + Tmin=(100, 'K'), Tmax=(908.87, 'K')), + NASAPolynomial(coeffs=[5.41579, 0.00261008, -4.39892e-07, 4.91087e-11, -3.35188e-15, -18303, -4.02248], + Tmin=(908.87, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), comment=""" Thermo group additivity estimation: group(O2s-OsH) + gauche(O2s(RR)) + other(R) + group(O2s-OsH) + gauche(O2s(RR)) + other(R) """), molecule=[Molecule(SMILES="OO")]) - - self.speciesList = [ch3,ethane,co2,ch4,h2o,ar,h2, h, oh, ho2, o2, co, h2o2] - - self.troe = Reaction(index=1, reactants=[ch3,ch3], products=[ethane], - kinetics=Troe(arrheniusHigh=Arrhenius(A=(6.77e+16,'cm^3/(mol*s)'), n=-1.18, Ea=(0.654,'kcal/mol'), T0=(1,'K')), arrheniusLow=Arrhenius(A=(3.4e+41,'cm^6/(mol^2*s)'), n=-7.03, Ea=(2.762,'kcal/mol'), T0=(1,'K')), alpha=0.619, T3=(73.2,'K'), T1=(1180,'K'), T2=(10000,'K'), - efficiencies={Molecule(SMILES="O=C=O"): 2.0, Molecule(SMILES="[H][H]"): 2.0, Molecule(SMILES="O"): 6.0, Molecule(SMILES="[Ar]"): 0.7, Molecule(SMILES="C"): 2.0, Molecule(SMILES="CC"): 3.0})) - + + self.speciesList = [ch3, ethane, co2, ch4, h2o, ar, h2, h, oh, ho2, o2, co, h2o2] + + self.troe = Reaction(index=1, reactants=[ch3, ch3], products=[ethane], + kinetics=Troe( + arrheniusHigh=Arrhenius(A=(6.77e+16, 'cm^3/(mol*s)'), n=-1.18, Ea=(0.654, 'kcal/mol'), + T0=(1, 'K')), + arrheniusLow=Arrhenius(A=(3.4e+41, 'cm^6/(mol^2*s)'), n=-7.03, Ea=(2.762, 'kcal/mol'), + T0=(1, 'K')), alpha=0.619, T3=(73.2, 'K'), T1=(1180, 'K'), + T2=(10000, 'K'), + efficiencies={Molecule(SMILES="O=C=O"): 2.0, Molecule(SMILES="[H][H]"): 2.0, + Molecule(SMILES="O"): 6.0, Molecule(SMILES="[Ar]"): 0.7, + Molecule(SMILES="C"): 2.0, Molecule(SMILES="CC"): 3.0})) + self.ct_troe = ct.Reaction.fromCti('''falloff_reaction('CH3(13) + CH3(13) (+ M) <=> ethane (+ M)', kf=[(6.770000e+16,'cm3/mol/s'), -1.18, (0.654,'kcal/mol')], kf0=[(3.400000e+41,'cm6/mol2/s'), -7.03, (2.762,'kcal/mol')], efficiencies='ethane:3.0 CO2(16):2.0 CH4(15):2.0 Ar:0.7 H2O(27):6.0 H2(2):2.0', falloff=Troe(A=0.619, T3=73.2, T1=1180.0, T2=10000.0))''') - - self.arrheniusBi = Reaction(index=2, reactants=[h,ch4], products=[h2,ch3],kinetics=Arrhenius(A=(6.6e+08,'cm^3/(mol*s)'), n=1.62, Ea=(10.84,'kcal/mol'), T0=(1,'K'))) - - self.ct_arrheniusBi = ct.Reaction.fromCti('''reaction('H(3) + CH4(15) <=> H2(2) + CH3(13)', [(6.600000e+08,'cm3/mol/s'), 1.62, (10.84,'kcal/mol')])''') - - self.arrheniusBi_irreversible = Reaction(index=10, reactants=[h,ch4], products=[h2,ch3],kinetics=Arrhenius(A=(6.6e+08,'cm^3/(mol*s)'), n=1.62, Ea=(10.84,'kcal/mol'), T0=(1,'K')),reversible=False) - - self.ct_arrheniusBi_irreversible = ct.Reaction.fromCti('''reaction('H(3) + CH4(15) => H2(2) + CH3(13)', [(6.600000e+08,'cm3/mol/s'), 1.62, (10.84,'kcal/mol')])''') - - self.arrheniusMono = Reaction(index=15, reactants=[h2o2], products=[h2,o2],kinetics=Arrhenius(A=(6.6e+03,'1/s'), n=1.62, Ea=(10.84,'kcal/mol'), T0=(1,'K'))) - - self.ct_arrheniusMono = ct.Reaction.fromCti('''reaction('H2O2(7) <=> H2(2) + O2(6)', [(6.600000e+03,'1/s'), 1.62, (10.84,'kcal/mol')])''') - - self.arrheniusTri = Reaction(index=20, reactants=[h,h,o2], products=[h2o2],kinetics=Arrhenius(A=(6.6e+08,'cm^6/(mol^2*s)'), n=1.62, Ea=(10.84,'kcal/mol'), T0=(1,'K'))) - self.ct_arrheniusTri = ct.Reaction.fromCti('''reaction('H(3) + H(3) + O2(6) <=> H2O2(7)', [(6.6e+08, 'cm6/mol2/s'), 1.62, (10.84,'kcal/mol')])''') - - self.multiArrhenius = Reaction(index=3, reactants=[oh,ho2], products=[h2o,o2], - kinetics=MultiArrhenius(arrhenius=[Arrhenius(A=(1.45e+13,'cm^3/(mol*s)'), n=0, Ea=(-0.5,'kcal/mol'), T0=(1,'K')), Arrhenius(A=(5e+15,'cm^3/(mol*s)'), n=0, Ea=(17.33,'kcal/mol'), T0=(1,'K'))])) - - self.ct_multiArrhenius = [ ct.Reaction.fromCti('''reaction('OH(4) + HO2(5) <=> H2O(27) + O2(6)', [(1.450000e+13,'cm3/mol/s'), 0.0, (-0.5,'kcal/mol')], + + self.arrheniusBi = Reaction(index=2, reactants=[h, ch4], products=[h2, ch3], + kinetics=Arrhenius(A=(6.6e+08, 'cm^3/(mol*s)'), n=1.62, Ea=(10.84, 'kcal/mol'), + T0=(1, 'K'))) + + self.ct_arrheniusBi = ct.Reaction.fromCti( + '''reaction('H(3) + CH4(15) <=> H2(2) + CH3(13)', [(6.600000e+08,'cm3/mol/s'), 1.62, (10.84,'kcal/mol')])''') + + self.arrheniusBi_irreversible = Reaction(index=10, reactants=[h, ch4], products=[h2, ch3], + kinetics=Arrhenius(A=(6.6e+08, 'cm^3/(mol*s)'), n=1.62, + Ea=(10.84, 'kcal/mol'), T0=(1, 'K')), + reversible=False) + + self.ct_arrheniusBi_irreversible = ct.Reaction.fromCti( + '''reaction('H(3) + CH4(15) => H2(2) + CH3(13)', [(6.600000e+08,'cm3/mol/s'), 1.62, (10.84,'kcal/mol')])''') + + self.arrheniusMono = Reaction(index=15, reactants=[h2o2], products=[h2, o2], + kinetics=Arrhenius(A=(6.6e+03, '1/s'), n=1.62, Ea=(10.84, 'kcal/mol'), + T0=(1, 'K'))) + + self.ct_arrheniusMono = ct.Reaction.fromCti( + '''reaction('H2O2(7) <=> H2(2) + O2(6)', [(6.600000e+03,'1/s'), 1.62, (10.84,'kcal/mol')])''') + + self.arrheniusTri = Reaction(index=20, reactants=[h, h, o2], products=[h2o2], + kinetics=Arrhenius(A=(6.6e+08, 'cm^6/(mol^2*s)'), n=1.62, Ea=(10.84, 'kcal/mol'), + T0=(1, 'K'))) + self.ct_arrheniusTri = ct.Reaction.fromCti( + '''reaction('H(3) + H(3) + O2(6) <=> H2O2(7)', [(6.6e+08, 'cm6/mol2/s'), 1.62, (10.84,'kcal/mol')])''') + + self.multiArrhenius = Reaction(index=3, reactants=[oh, ho2], products=[h2o, o2], + kinetics=MultiArrhenius(arrhenius=[ + Arrhenius(A=(1.45e+13, 'cm^3/(mol*s)'), n=0, Ea=(-0.5, 'kcal/mol'), + T0=(1, 'K')), + Arrhenius(A=(5e+15, 'cm^3/(mol*s)'), n=0, Ea=(17.33, 'kcal/mol'), + T0=(1, 'K'))])) + + self.ct_multiArrhenius = [ct.Reaction.fromCti('''reaction('OH(4) + HO2(5) <=> H2O(27) + O2(6)', [(1.450000e+13,'cm3/mol/s'), 0.0, (-0.5,'kcal/mol')], options='duplicate')'''), ct.Reaction.fromCti('''reaction('OH(4) + HO2(5) <=> H2O(27) + O2(6)', [(5.000000e+15,'cm3/mol/s'), 0.0, (17.33,'kcal/mol')], options='duplicate')''')] - - self.pdepArrhenius = Reaction(index=4, reactants=[ho2,ho2], products=[o2,h2o2], - kinetics = PDepArrhenius( - pressures = ([0.1, 1, 10], 'atm'), - arrhenius = [ - Arrhenius( - A = (8.8e+16, 'cm^3/(mol*s)'), - n = -1.05, - Ea = (6461, 'cal/mol'), - T0 = (1, 'K'), - ), - Arrhenius( - A = (8e+21, 'cm^3/(mol*s)'), - n = -2.39, - Ea = (11180, 'cal/mol'), - T0 = (1, 'K'), - ), - Arrhenius( - A = (3.3e+24, 'cm^3/(mol*s)'), - n = -3.04, - Ea = (15610, 'cal/mol'), - T0 = (1, 'K'), - ), - ], - ), - ) - + + self.pdepArrhenius = Reaction(index=4, reactants=[ho2, ho2], products=[o2, h2o2], + kinetics=PDepArrhenius( + pressures=([0.1, 1, 10], 'atm'), + arrhenius=[ + Arrhenius( + A=(8.8e+16, 'cm^3/(mol*s)'), + n=-1.05, + Ea=(6461, 'cal/mol'), + T0=(1, 'K'), + ), + Arrhenius( + A=(8e+21, 'cm^3/(mol*s)'), + n=-2.39, + Ea=(11180, 'cal/mol'), + T0=(1, 'K'), + ), + Arrhenius( + A=(3.3e+24, 'cm^3/(mol*s)'), + n=-3.04, + Ea=(15610, 'cal/mol'), + T0=(1, 'K'), + ), + ], + ), + ) + self.ct_pdepArrhenius = ct.Reaction.fromCti('''pdep_arrhenius('HO2(5) + HO2(5) <=> O2(6) + H2O2(7)', [(0.1, 'atm'), (8.800000e+16, 'cm3/mol/s'), -1.05, (6.461,'kcal/mol')], [(1.0, 'atm'), (8.000000e+21,'cm3/mol/s'), -2.39, (11.18,'kcal/mol')], [(10.0, 'atm'), (3.300000e+24,'cm3/mol/s'), -3.04, (15.61,'kcal/mol')])''') - - self.multiPdepArrhenius = Reaction(index=5, reactants=[ho2,ch3], products=[o2,ch4], - kinetics = MultiPDepArrhenius( - arrhenius = [ - PDepArrhenius( - pressures = ([0.001, 1, 3], 'atm'), - arrhenius = [ - Arrhenius(A=(9.3e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')), - Arrhenius(A=(8e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')), - Arrhenius(A=(7e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), T0=(1, 'K')), - ], - ), - PDepArrhenius( - pressures = ([0.001, 1, 3], 'atm'), - arrhenius = [ - Arrhenius(A=(710000, 'cm^3/(mol*s)'), n=1.8, Ea=(1133, 'cal/mol'), T0=(1, 'K')), - Arrhenius(A=(880000, 'cm^3/(mol*s)'), n=1.77, Ea=(954, 'cal/mol'), T0=(1, 'K')), - Arrhenius(A=(290000, 'cm^3/(mol*s)'), n=1.9, Ea=(397, 'cal/mol'), T0=(1, 'K')), - ], - ), - ], - ), - ) - + + self.multiPdepArrhenius = Reaction(index=5, reactants=[ho2, ch3], products=[o2, ch4], + kinetics=MultiPDepArrhenius( + arrhenius=[ + PDepArrhenius( + pressures=([0.001, 1, 3], 'atm'), + arrhenius=[ + Arrhenius(A=(9.3e+10, 'cm^3/(mol*s)'), n=0, + Ea=(0, 'cal/mol'), T0=(1, 'K')), + Arrhenius(A=(8e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), + T0=(1, 'K')), + Arrhenius(A=(7e+10, 'cm^3/(mol*s)'), n=0, Ea=(0, 'cal/mol'), + T0=(1, 'K')), + ], + ), + PDepArrhenius( + pressures=([0.001, 1, 3], 'atm'), + arrhenius=[ + Arrhenius(A=(710000, 'cm^3/(mol*s)'), n=1.8, + Ea=(1133, 'cal/mol'), T0=(1, 'K')), + Arrhenius(A=(880000, 'cm^3/(mol*s)'), n=1.77, + Ea=(954, 'cal/mol'), T0=(1, 'K')), + Arrhenius(A=(290000, 'cm^3/(mol*s)'), n=1.9, + Ea=(397, 'cal/mol'), T0=(1, 'K')), + ], + ), + ], + ), + ) + self.ct_multiPdepArrhenius = [ct.Reaction.fromCti('''pdep_arrhenius('HO2(5) + CH3(13) <=> O2(6) + CH4(15)', [(0.001, 'atm'), (9.300000e+10, 'cm3/mol/s'), 0.0, (0.0,'kcal/mol')], [(1.0, 'atm'), (8.000000e+10, 'cm3/mol/s'), 0.0, (0.0,'kcal/mol')], [(3.0, 'atm'), (7.000000e+10, 'cm3/mol/s'), 0.0, (0.0,'kcal/mol')], options='duplicate')'''), - ct.Reaction.fromCti('''pdep_arrhenius('HO2(5) + CH3(13) <=> O2(6) + CH4(15)', + ct.Reaction.fromCti('''pdep_arrhenius('HO2(5) + CH3(13) <=> O2(6) + CH4(15)', [(0.001, 'atm'), (7.100000e+05, 'cm3/mol/s'), 1.8, (1.133,'kcal/mol')], [(1.0, 'atm'), (8.800000e+05, 'cm3/mol/s'), 1.77, (0.954,'kcal/mol')], [(3.0, 'atm'), (2.900000e+05, 'cm3/mol/s'), 1.9, (0.397,'kcal/mol')], options='duplicate')''')] - - self.chebyshev = Reaction(index=6, reactants=[h,ch3], products=[ch4], kinetics=Chebyshev(coeffs=[[12.68,0.3961,-0.05481,-0.003606],[-0.7128,0.731,-0.0941,-0.008587],[-0.5806,0.57,-0.05539,-0.01115],[-0.4074,0.3653,-0.0118,-0.01171],[-0.2403,0.1779,0.01946,-0.008505],[-0.1133,0.0485,0.03121,-0.002955]], kunits='cm^3/(mol*s)', Tmin=(300,'K'), Tmax=(3000,'K'), Pmin=(0.001,'atm'), Pmax=(98.692,'atm'))) - + + self.chebyshev = Reaction(index=6, reactants=[h, ch3], products=[ch4], kinetics=Chebyshev( + coeffs=[[12.68, 0.3961, -0.05481, -0.003606], [-0.7128, 0.731, -0.0941, -0.008587], + [-0.5806, 0.57, -0.05539, -0.01115], [-0.4074, 0.3653, -0.0118, -0.01171], + [-0.2403, 0.1779, 0.01946, -0.008505], [-0.1133, 0.0485, 0.03121, -0.002955]], + kunits='cm^3/(mol*s)', Tmin=(300, 'K'), Tmax=(3000, 'K'), Pmin=(0.001, 'atm'), Pmax=(98.692, 'atm'))) + self.ct_chebyshev = ct.Reaction.fromCti('''chebyshev_reaction('H(3) + CH3(13) (+ M) <=> CH4(15) (+ M)', Tmin=300.0, Tmax=3000.0, Pmin=(0.001, 'atm'), Pmax=(98.692, 'atm'), @@ -1396,38 +1530,48 @@ def setUp(self): [-4.07400e-01, 3.65300e-01, -1.18000e-02, -1.17100e-02], [-2.40300e-01, 1.77900e-01, 1.94600e-02, -8.50500e-03], [-1.13300e-01, 4.85000e-02, 3.12100e-02, -2.95500e-03]])''') - - - self.thirdBody = Reaction(index=7, reactants=[h,h], products=[h2], - kinetics=ThirdBody(arrheniusLow=Arrhenius(A=(1e+18,'cm^6/(mol^2*s)'), n=-1, Ea=(0,'kcal/mol'), T0=(1,'K')), - efficiencies={Molecule(SMILES="O=C=O"): 0.0, Molecule(SMILES="[H][H]"): 0.0, Molecule(SMILES="O"): 0.0, - Molecule(SMILES="[Ar]"): 0.63, Molecule(SMILES="C"): 2.0, Molecule(SMILES="CC"): 3.0})) - - + + self.thirdBody = Reaction(index=7, reactants=[h, h], products=[h2], + kinetics=ThirdBody( + arrheniusLow=Arrhenius(A=(1e+18, 'cm^6/(mol^2*s)'), n=-1, Ea=(0, 'kcal/mol'), + T0=(1, 'K')), + efficiencies={Molecule(SMILES="O=C=O"): 0.0, Molecule(SMILES="[H][H]"): 0.0, + Molecule(SMILES="O"): 0.0, + Molecule(SMILES="[Ar]"): 0.63, Molecule(SMILES="C"): 2.0, + Molecule(SMILES="CC"): 3.0})) + self.ct_thirdBody = ct.Reaction.fromCti('''three_body_reaction('H(3) + H(3) + M <=> H2(2) + M', [(1.000000e+18,'cm6/mol2/s'), -1.0, (0.0,'kcal/mol')], efficiencies='CO2(16):0.0 CH4(15):2.0 ethane:3.0 H2O(27):0.0 H2(2):0.0 Ar:0.63')''') - - self.lindemann = Reaction(index=8, reactants=[h,o2], products=[ho2], - kinetics=Lindemann(arrheniusHigh=Arrhenius(A=(1.8e+10,'cm^3/(mol*s)'), n=0, Ea=(2.385,'kcal/mol'), T0=(1,'K')), arrheniusLow=Arrhenius(A=(6.02e+14,'cm^6/(mol^2*s)'), n=0, Ea=(3,'kcal/mol'), T0=(1,'K')), efficiencies={Molecule(SMILES="O=C=O"): 3.5, Molecule(SMILES="[H][H]"): 2.0, Molecule(SMILES="O"): 6.0, Molecule(SMILES="[Ar]"): 0.5, Molecule(SMILES="C"): 2.0, Molecule(SMILES="CC"): 3.0, Molecule(SMILES="[O][O]"): 6.0})) - - self.ct_lindemann =ct.Reaction.fromCti('''falloff_reaction('H(3) + O2(6) (+ M) <=> HO2(5) (+ M)', + + self.lindemann = Reaction(index=8, reactants=[h, o2], products=[ho2], + kinetics=Lindemann( + arrheniusHigh=Arrhenius(A=(1.8e+10, 'cm^3/(mol*s)'), n=0, Ea=(2.385, 'kcal/mol'), + T0=(1, 'K')), + arrheniusLow=Arrhenius(A=(6.02e+14, 'cm^6/(mol^2*s)'), n=0, Ea=(3, 'kcal/mol'), + T0=(1, 'K')), + efficiencies={Molecule(SMILES="O=C=O"): 3.5, Molecule(SMILES="[H][H]"): 2.0, + Molecule(SMILES="O"): 6.0, Molecule(SMILES="[Ar]"): 0.5, + Molecule(SMILES="C"): 2.0, Molecule(SMILES="CC"): 3.0, + Molecule(SMILES="[O][O]"): 6.0})) + + self.ct_lindemann = ct.Reaction.fromCti('''falloff_reaction('H(3) + O2(6) (+ M) <=> HO2(5) (+ M)', kf=[(1.800000e+10,'cm3/mol/s'), 0.0, (2.385,'kcal/mol')], kf0=[(6.020000e+14,'cm6/mol2/s'), 0.0, (3.0,'kcal/mol')], efficiencies='CO2(16):3.5 CH4(15):2.0 ethane:3.0 H2O(27):6.0 O2(6):6.0 H2(2):2.0 Ar:0.5')''') - + def testArrhenius(self): """ Tests formation of cantera reactions with Arrhenius or kinetics. """ - - rmgObjects = [self.arrheniusBi, self.arrheniusBi_irreversible, self.arrheniusMono, self.arrheniusTri] - - ctObjects = [self.ct_arrheniusBi, self.ct_arrheniusBi_irreversible, self.ct_arrheniusMono, self.ct_arrheniusTri] - converted_ctObjects = [obj.toCantera(self.speciesList, useChemkinIdentifier = True) for obj in rmgObjects] - - for converted_obj, ct_obj in zip(converted_ctObjects, ctObjects): + + rmg_objects = [self.arrheniusBi, self.arrheniusBi_irreversible, self.arrheniusMono, self.arrheniusTri] + + ct_objects = [self.ct_arrheniusBi, self.ct_arrheniusBi_irreversible, self.ct_arrheniusMono, self.ct_arrheniusTri] + converted_ct_objects = [obj.toCantera(self.speciesList, useChemkinIdentifier=True) for obj in rmg_objects] + + for converted_obj, ct_obj in zip(converted_ct_objects, ct_objects): # Check that the reaction class is the same - self.assertEqual(type(converted_obj), type(ct_obj)) + self.assertEqual(type(converted_obj), type(ct_obj)) # Check that the reaction string is the same self.assertEqual(repr(converted_obj), repr(ct_obj)) # Check that the Arrhenius string is identical @@ -1437,14 +1581,14 @@ def testMultiArrhenius(self): """ Tests formation of cantera reactions with MultiArrhenius kinetics. """ - rmgObjects = [self.multiArrhenius] - ctObjects = [self.ct_multiArrhenius] - converted_ctObjects = [obj.toCantera(self.speciesList, useChemkinIdentifier = True) for obj in rmgObjects] - - for converted_obj, ct_obj in zip(converted_ctObjects, ctObjects): + rmg_objects = [self.multiArrhenius] + ct_objects = [self.ct_multiArrhenius] + converted_ct_objects = [obj.toCantera(self.speciesList, useChemkinIdentifier=True) for obj in rmg_objects] + + for converted_obj, ct_obj in zip(converted_ct_objects, ct_objects): # Check that the same number of reactions are produced - self.assertEqual(len(converted_obj), len(ct_obj)) - + self.assertEqual(len(converted_obj), len(ct_obj)) + for converted_rxn, ct_rxn in zip(converted_obj, ct_obj): # Check that the reaction has the same type self.assertEqual(type(converted_rxn), type(ct_rxn)) @@ -1452,36 +1596,36 @@ def testMultiArrhenius(self): self.assertEqual(repr(converted_rxn), repr(ct_rxn)) # Check that the Arrhenius rates are identical self.assertEqual(str(converted_rxn.rate), str(ct_rxn.rate)) - + def testPDepArrhenius(self): """ Tests formation of cantera reactions with PDepArrhenius kinetics. """ - rmgObjects = [self.pdepArrhenius] - ctObjects = [self.ct_pdepArrhenius] - converted_ctObjects = [obj.toCantera(self.speciesList, useChemkinIdentifier = True) for obj in rmgObjects] - - for converted_obj, ct_obj in zip(converted_ctObjects, ctObjects): + rmg_objects = [self.pdepArrhenius] + ct_objects = [self.ct_pdepArrhenius] + converted_ct_objects = [obj.toCantera(self.speciesList, useChemkinIdentifier=True) for obj in rmg_objects] + + for converted_obj, ct_obj in zip(converted_ct_objects, ct_objects): # Check that the reaction class is the same - self.assertEqual(type(converted_obj), type(ct_obj)) + self.assertEqual(type(converted_obj), type(ct_obj)) # Check that the reaction string is the same self.assertEqual(repr(converted_obj), repr(ct_obj)) # Check that the Arrhenius rates are identical self.assertEqual(str(converted_obj.rates), str(ct_obj.rates)) - + def testMultiPdepArrhenius(self): """ Tests formation of cantera reactions with MultiPDepArrhenius kinetics. """ - - rmgObjects = [self.multiPdepArrhenius] - ctObjects = [self.ct_multiPdepArrhenius] - converted_ctObjects = [obj.toCantera(self.speciesList, useChemkinIdentifier = True) for obj in rmgObjects] - - for converted_obj, ct_obj in zip(converted_ctObjects, ctObjects): + + rmg_objects = [self.multiPdepArrhenius] + ct_objects = [self.ct_multiPdepArrhenius] + converted_ct_objects = [obj.toCantera(self.speciesList, useChemkinIdentifier=True) for obj in rmg_objects] + + for converted_obj, ct_obj in zip(converted_ct_objects, ct_objects): # Check that the same number of reactions are produced - self.assertEqual(len(converted_obj), len(ct_obj)) - + self.assertEqual(len(converted_obj), len(ct_obj)) + for converted_rxn, ct_rxn in zip(converted_obj, ct_obj): # Check that the reaction has the same type self.assertEqual(type(converted_rxn), type(ct_rxn)) @@ -1489,51 +1633,49 @@ def testMultiPdepArrhenius(self): self.assertEqual(repr(converted_rxn), repr(ct_rxn)) # Check that the Arrhenius rates are identical self.assertEqual(str(converted_rxn.rates), str(ct_rxn.rates)) - - + def testChebyshev(self): """ Tests formation of cantera reactions with Chebyshev kinetics. """ - ct_chebyshev = self.chebyshev.toCantera(self.speciesList, useChemkinIdentifier = True) - self.assertEqual(type(ct_chebyshev),type(self.ct_chebyshev)) - self.assertEqual(repr(ct_chebyshev),repr(self.ct_chebyshev)) - + ct_chebyshev = self.chebyshev.toCantera(self.speciesList, useChemkinIdentifier=True) + self.assertEqual(type(ct_chebyshev), type(self.ct_chebyshev)) + self.assertEqual(repr(ct_chebyshev), repr(self.ct_chebyshev)) + self.assertEqual(ct_chebyshev.Tmax, self.ct_chebyshev.Tmax) self.assertEqual(ct_chebyshev.Tmin, self.ct_chebyshev.Tmin) self.assertEqual(ct_chebyshev.Pmax, self.ct_chebyshev.Pmax) self.assertEqual(ct_chebyshev.Pmin, self.ct_chebyshev.Pmin) self.assertTrue((ct_chebyshev.coeffs == self.ct_chebyshev.coeffs).all()) - - + def testFalloff(self): """ Tests formation of cantera reactions with Falloff kinetics. """ - ct_thirdBody = self.thirdBody.toCantera(self.speciesList, useChemkinIdentifier = True) - self.assertEqual(type(ct_thirdBody),type(self.ct_thirdBody)) - self.assertEqual(repr(ct_thirdBody),repr(self.ct_thirdBody)) - self.assertEqual(str(ct_thirdBody.rate), str(self.ct_thirdBody.rate)) - self.assertEqual(ct_thirdBody.efficiencies, self.ct_thirdBody.efficiencies) - - ct_lindemann = self.lindemann.toCantera(self.speciesList, useChemkinIdentifier = True) - self.assertEqual(type(ct_lindemann),type(self.ct_lindemann)) + ct_third_body = self.thirdBody.toCantera(self.speciesList, useChemkinIdentifier=True) + self.assertEqual(type(ct_third_body), type(self.ct_thirdBody)) + self.assertEqual(repr(ct_third_body), repr(self.ct_thirdBody)) + self.assertEqual(str(ct_third_body.rate), str(self.ct_thirdBody.rate)) + self.assertEqual(ct_third_body.efficiencies, self.ct_thirdBody.efficiencies) + + ct_lindemann = self.lindemann.toCantera(self.speciesList, useChemkinIdentifier=True) + self.assertEqual(type(ct_lindemann), type(self.ct_lindemann)) self.assertEqual(repr(ct_lindemann), repr(self.ct_lindemann)) self.assertEqual(ct_lindemann.efficiencies, self.ct_lindemann.efficiencies) self.assertEqual(str(ct_lindemann.low_rate), str(self.ct_lindemann.low_rate)) self.assertEqual(str(ct_lindemann.high_rate), str(self.ct_lindemann.high_rate)) self.assertEqual(str(ct_lindemann.falloff), str(self.ct_lindemann.falloff)) - - - ct_troe = self.troe.toCantera(self.speciesList, useChemkinIdentifier = True) - self.assertEqual(type(ct_troe),type(self.ct_troe)) + + ct_troe = self.troe.toCantera(self.speciesList, useChemkinIdentifier=True) + self.assertEqual(type(ct_troe), type(self.ct_troe)) self.assertEqual(repr(ct_troe), repr(self.ct_troe)) self.assertEqual(ct_troe.efficiencies, self.ct_troe.efficiencies) - + self.assertEqual(str(ct_troe.low_rate), str(self.ct_troe.low_rate)) self.assertEqual(str(ct_troe.high_rate), str(self.ct_troe.high_rate)) self.assertEqual(str(ct_troe.falloff), str(self.ct_troe.falloff)) - + + ################################################################################ if __name__ == '__main__': diff --git a/rmgpy/speciesTest.py b/rmgpy/speciesTest.py index 4a10930921..c386ec5dd5 100644 --- a/rmgpy/speciesTest.py +++ b/rmgpy/speciesTest.py @@ -32,6 +32,8 @@ This module contains unit tests of the rmgpy.species module. """ +from __future__ import division + import unittest from rmgpy.species import Species @@ -40,13 +42,14 @@ from rmgpy.thermo import ThermoData from rmgpy.statmech import Conformer, IdealGasTranslation, NonlinearRotor, HarmonicOscillator + ################################################################################ class TestSpecies(unittest.TestCase): """ Contains unit tests for the Species class. """ - + def setUp(self): """ A method that is run before each unit test in this class. @@ -55,26 +58,28 @@ def setUp(self): index=1, label='C2H4', thermo=ThermoData( - Tdata=([300.0,400.0,500.0,600.0,800.0,1000.0,1500.0],'K'), - Cpdata=([3.0,4.0,5.0,6.0,8.0,10.0,15.0],'cal/(mol*K)'), - H298=(-20.0,'kcal/mol'), - S298=(50.0,'cal/(mol*K)'), - Tmin=(300.0,'K'), - Tmax=(2000.0,'K'), + Tdata=([300.0, 400.0, 500.0, 600.0, 800.0, 1000.0, 1500.0], 'K'), + Cpdata=([3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 15.0], 'cal/(mol*K)'), + H298=(-20.0, 'kcal/mol'), + S298=(50.0, 'cal/(mol*K)'), + Tmin=(300.0, 'K'), + Tmax=(2000.0, 'K'), ), conformer=Conformer( - E0=(0.0,'kJ/mol'), + E0=(0.0, 'kJ/mol'), modes=[ - IdealGasTranslation(mass=(28.03,'amu')), - NonlinearRotor(inertia=([5.6952e-47, 2.7758e-46, 3.3454e-46],'kg*m^2'), symmetry=1), - HarmonicOscillator(frequencies=([834.50, 973.31, 975.37, 1067.1, 1238.5, 1379.5, 1472.3, 1691.3, 3121.6, 3136.7, 3192.5, 3221.0],'cm^-1')), + IdealGasTranslation(mass=(28.03, 'amu')), + NonlinearRotor(inertia=([5.6952e-47, 2.7758e-46, 3.3454e-46], 'kg*m^2'), symmetry=1), + HarmonicOscillator(frequencies=( + [834.50, 973.31, 975.37, 1067.1, 1238.5, 1379.5, 1472.3, 1691.3, 3121.6, 3136.7, 3192.5, 3221.0], + 'cm^-1')), ], spinMultiplicity=1, opticalIsomers=1, ), molecule=[Molecule().fromSMILES('C=C')], transportData=TransportData(sigma=(1, 'angstrom'), epsilon=(100, 'K')), - molecularWeight=(28.03,'amu'), + molecularWeight=(28.03, 'amu'), reactive=True, ) @@ -100,8 +105,8 @@ def testPickle(self): ...with no loss of information. """ - import cPickle - species = cPickle.loads(cPickle.dumps(self.species,-1)) + import pickle + species = pickle.loads(pickle.dumps(self.species, -1)) self.assertEqual(self.species.index, species.index) self.assertEqual(self.species.label, species.label) self.assertEqual(self.species.molecule[0].multiplicity, species.molecule[0].multiplicity) @@ -114,7 +119,8 @@ def testPickle(self): self.assertEqual(self.species.conformer.E0.units, species.conformer.E0.units) self.assertEqual(self.species.transportData.sigma.value_si, species.transportData.sigma.value_si) self.assertEqual(self.species.transportData.sigma.units, species.transportData.sigma.units) - self.assertAlmostEqual(self.species.transportData.epsilon.value_si / 1.381e-23, species.transportData.epsilon.value_si / 1.381e-23, 4) + self.assertAlmostEqual(self.species.transportData.epsilon.value_si / 1.381e-23, + species.transportData.epsilon.value_si / 1.381e-23, 4) self.assertEqual(self.species.transportData.epsilon.units, species.transportData.epsilon.units) self.assertEqual(self.species.molecularWeight.value_si, species.molecularWeight.value_si) self.assertEqual(self.species.molecularWeight.units, species.molecularWeight.units) @@ -145,14 +151,16 @@ def testOutput(self): self.assertEqual(self.species.molecularWeight.value_si, species.molecularWeight.value_si) self.assertEqual(self.species.molecularWeight.units, species.molecularWeight.units) self.assertEqual(self.species.reactive, species.reactive) - + def testToAdjacencyList(self): """ Test that toAdjacencyList() works as expected. """ string = self.species.toAdjacencyList() - self.assertTrue(string.startswith(self.species.molecule[0].toAdjacencyList(label=self.species.label,removeH=False)),string) - + self.assertTrue( + string.startswith(self.species.molecule[0].toAdjacencyList(label=self.species.label, removeH=False)), + string) + def testSpeciesProps(self): """ Test a key-value pair is added to the props attribute of Species. @@ -160,7 +168,7 @@ def testSpeciesProps(self): self.species.props['foo'] = 'bar' self.assertIsInstance(self.species.props, dict) self.assertEquals(self.species.props['foo'], 'bar') - + def testSpeciesProps_object_attribute(self): """ Test that Species's props dictionaries are independent of each other. @@ -177,20 +185,22 @@ def testSpeciesProps_object_attribute(self): self.assertDictEqual(spc3.props, {'foo': 'bla'}) def testResonanceIsomersGenerated(self): - "Test that 1-penten-3-yl makes 2-penten-1-yl resonance isomer" + """Test that 1-penten-3-yl makes 2-penten-1-yl resonance isomer""" spec = Species().fromSMILES('C=C[CH]CC') spec.generate_resonance_structures() self.assertEquals(len(spec.molecule), 2) self.assertEquals(spec.molecule[1].toSMILES(), "[CH2]C=CCC") def testResonaceIsomersRepresented(self): - "Test that both resonance forms of 1-penten-3-yl are printed by __repr__" + """Test that both resonance forms of 1-penten-3-yl are printed by __repr__""" spec = Species().fromSMILES('C=C[CH]CC') spec.generate_resonance_structures() exec('spec2 = {0!r}'.format(spec)) self.assertEqual(len(spec.molecule), len(spec2.molecule)) for i, j in zip(spec.molecule, spec2.molecule): - self.assertTrue(j.isIsomorphic(i), msg='i is not isomorphic with j, where i is {} and j is {}'.format(i.toSMILES(), j.toSMILES())) + self.assertTrue(j.isIsomorphic(i), + msg='i is not isomorphic with j, where i is {} and j is {}'.format(i.toSMILES(), + j.toSMILES())) def test_is_isomorphic_to_filtered_resonance_structure(self): """ @@ -262,25 +272,26 @@ def testGetResonanceHybrid(self): C~C~CC, where '~' is a hybrid bond of order 1.5. """ spec = Species().fromSMILES('C=C[CH]CC') - hybridMol = spec.getResonanceHybrid() - - self.assertTrue(hybridMol.toSingleBonds().isIsomorphic(spec.molecule[0].toSingleBonds())) - + hybrid_mol = spec.getResonanceHybrid() + + self.assertTrue(hybrid_mol.toSingleBonds().isIsomorphic(spec.molecule[0].toSingleBonds())) + # a rough check for intermediate bond orders - expected_orders = [1,1.5] + expected_orders = [1, 1.5] bonds = [] # ensure all bond orders are expected - for atom in hybridMol.atoms: + for atom in hybrid_mol.atoms: for atom2 in atom.bonds: - bond = hybridMol.getBond(atom,atom2) - self.assertTrue(any([bond.isOrder(otherOrder) for otherOrder in expected_orders]), 'Unexpected bond order {}'.format(bond.getOrderNum())) + bond = hybrid_mol.getBond(atom, atom2) + self.assertTrue(any([bond.isOrder(otherOrder) for otherOrder in expected_orders]), + 'Unexpected bond order {}'.format(bond.getOrderNum())) bonds.append(bond) - + # ensure all expected orders are present for expected_order in expected_orders: - self.assertTrue(any([bond.isOrder(expected_order) for bond in bonds]),'No bond of order {} found'.format(expected_order)) - - + self.assertTrue(any([bond.isOrder(expected_order) for bond in bonds]), + 'No bond of order {} found'.format(expected_order)) + def testCopy(self): """Test that we can make a copy of a Species object.""" @@ -293,20 +304,26 @@ def testCopy(self): self.assertTrue(self.species.molecularWeight.equals(spc_cp.molecularWeight)) self.assertEquals(self.species.reactive, spc_cp.reactive) - + def testCantera(self): """ Test that a Cantera Species object is created correctly. """ from rmgpy.thermo import NASA, NASAPolynomial import cantera as ct - rmgSpecies = Species(label="Ar", thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), comment=""" + rmg_species = Species(label="Ar", thermo=NASA( + polynomials=[NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 4.37967], Tmin=(200, 'K'), Tmax=(1000, 'K')), + NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 4.37967], Tmin=(1000, 'K'), + Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), comment=""" Thermo library: primaryThermoLibrary -"""), molecule=[Molecule(SMILES="[Ar]")], transportData=TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstrom'), dipoleMoment=(2,'De'), polarizability=(1,'angstrom^3'), rotrelaxcollnum=15.0, comment="""GRI-Mech""")) - - rmg_ctSpecies = rmgSpecies.toCantera(useChemkinIdentifier = True) - - ctSpecies = ct.Species.fromCti("""species(name=u'Ar', +"""), molecule=[Molecule(SMILES="[Ar]")], transportData=TransportData(shapeIndex=0, epsilon=(1134.93, 'J/mol'), + sigma=(3.33, 'angstrom'), dipoleMoment=(2, 'De'), + polarizability=(1, 'angstrom^3'), + rotrelaxcollnum=15.0, comment="""GRI-Mech""")) + + rmg_ct_species = rmg_species.toCantera(useChemkinIdentifier=True) + + ct_species = ct.Species.fromCti("""species(name=u'Ar', atoms='Ar:1', thermo=(NASA([200.00, 1000.00], [ 2.50000000E+00, 0.00000000E+00, 0.00000000E+00, @@ -322,19 +339,22 @@ def testCantera(self): dipole=2.0, polar=1.0, rot_relax=15.0))""") - self.assertEqual(type(rmg_ctSpecies),type(ctSpecies)) - self.assertEqual(rmg_ctSpecies.name, ctSpecies.name) - self.assertEqual(rmg_ctSpecies.composition, ctSpecies.composition) - self.assertEqual(rmg_ctSpecies.size, ctSpecies.size) - self.assertEqual(type(rmg_ctSpecies.thermo), type(ctSpecies.thermo)) - self.assertEqual(type(rmg_ctSpecies.transport), type(ctSpecies.transport)) + self.assertEqual(type(rmg_ct_species), type(ct_species)) + self.assertEqual(rmg_ct_species.name, ct_species.name) + self.assertEqual(rmg_ct_species.composition, ct_species.composition) + self.assertEqual(rmg_ct_species.size, ct_species.size) + self.assertEqual(type(rmg_ct_species.thermo), type(ct_species.thermo)) + self.assertEqual(type(rmg_ct_species.transport), type(ct_species.transport)) def testGetTransportData(self): """ Test that transport data can be retrieved correctly via the getTransportData method. """ - spc = Species(label="Ar", molecule=[Molecule(SMILES="[Ar]")], transportData=TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstrom'), dipoleMoment=(2,'De'), polarizability=(1,'angstrom^3'), rotrelaxcollnum=15.0, comment="""GRI-Mech""")) + spc = Species(label="Ar", molecule=[Molecule(SMILES="[Ar]")], + transportData=TransportData(shapeIndex=0, epsilon=(1134.93, 'J/mol'), sigma=(3.33, 'angstrom'), + dipoleMoment=(2, 'De'), polarizability=(1, 'angstrom^3'), + rotrelaxcollnum=15.0, comment="""GRI-Mech""")) self.assertTrue(spc.getTransportData() is spc.transportData) diff --git a/rmgpy/stats.py b/rmgpy/stats.py index dfc26ceea7..a7aa894ee8 100644 --- a/rmgpy/stats.py +++ b/rmgpy/stats.py @@ -28,20 +28,21 @@ # # ############################################################################### -import os.path +from __future__ import division + import logging +import os.path + +import matplotlib.pyplot as plt try: import xlwt except ImportError: - logging.warning( - 'Optional package dependency "xlwt" not loaded;\ - Some output features will not work.' - ) - -import matplotlib.pyplot as plt + logging.warning('Optional package dependency "xlwt" not loaded. Some output features will not work.') + xlwt = None from rmgpy.util import makeOutputSubdirectory + class ExecutionStatsWriter(object): """ This class listens to a RMG subject @@ -69,6 +70,7 @@ class ExecutionStatsWriter(object): rmg.detach(listener) """ + def __init__(self, outputDirectory): super(ExecutionStatsWriter, self).__init__() makeOutputSubdirectory(outputDirectory, 'plot') @@ -79,7 +81,7 @@ def __init__(self, outputDirectory): self.edgeSpeciesCount = [] self.edgeReactionCount = [] self.memoryUse = [] - + def update(self, rmg): self.update_execution(rmg) @@ -87,18 +89,18 @@ def update_execution(self, rmg): # Update RMG execution statistics logging.info('Updating RMG execution statistics...') - coreSpec, coreReac, edgeSpec, edgeReac = rmg.reactionModel.getModelSize() - self.coreSpeciesCount.append(coreSpec) - self.coreReactionCount.append(coreReac) - self.edgeSpeciesCount.append(edgeSpec) - self.edgeReactionCount.append(edgeReac) + core_spec, core_reac, edge_spec, edge_reac = rmg.reactionModel.getModelSize() + self.coreSpeciesCount.append(core_spec) + self.coreReactionCount.append(core_reac) + self.edgeSpeciesCount.append(edge_spec) + self.edgeReactionCount.append(edge_reac) elapsed = rmg.execTime[-1] seconds = elapsed % 60 minutes = (elapsed - seconds) % 3600 / 60 hours = (elapsed - seconds - minutes * 60) % (3600 * 24) / 3600 days = (elapsed - seconds - minutes * 60 - hours * 3600) / (3600 * 24) logging.info(' Execution time (DD:HH:MM:SS): ' - '{0:02}:{1:02}:{2:02}:{3:02}'.format(int(days), int(hours), int(minutes), int(seconds))) + '{0:02}:{1:02}:{2:02}:{3:02}'.format(int(days), int(hours), int(minutes), int(seconds))) try: import psutil process = psutil.Process(os.getpid()) @@ -125,9 +127,7 @@ def saveExecutionStatistics(self, rmg): """ # Attempt to import the xlwt package; return if not installed - try: - xlwt - except NameError: + if xlwt is None: logging.warning('Package xlwt not loaded. Unable to save execution statistics.') return @@ -136,34 +136,34 @@ def saveExecutionStatistics(self, rmg): sheet = workbook.add_sheet('Statistics') # First column is execution time - sheet.write(0,0,'Execution time (s)') + sheet.write(0, 0, 'Execution time (s)') for i, etime in enumerate(rmg.execTime): - sheet.write(i+1,0,etime) + sheet.write(i + 1, 0, etime) # Second column is number of core species - sheet.write(0,1,'Core species') + sheet.write(0, 1, 'Core species') for i, count in enumerate(self.coreSpeciesCount): - sheet.write(i+1,1,count) + sheet.write(i + 1, 1, count) # Third column is number of core reactions - sheet.write(0,2,'Core reactions') + sheet.write(0, 2, 'Core reactions') for i, count in enumerate(self.coreReactionCount): - sheet.write(i+1,2,count) + sheet.write(i + 1, 2, count) # Fourth column is number of edge species - sheet.write(0,3,'Edge species') + sheet.write(0, 3, 'Edge species') for i, count in enumerate(self.edgeSpeciesCount): - sheet.write(i+1,3,count) + sheet.write(i + 1, 3, count) # Fifth column is number of edge reactions - sheet.write(0,4,'Edge reactions') + sheet.write(0, 4, 'Edge reactions') for i, count in enumerate(self.edgeReactionCount): - sheet.write(i+1,4,count) + sheet.write(i + 1, 4, count) # Sixth column is memory used - sheet.write(0,5,'Memory used (MB)') + sheet.write(0, 5, 'Memory used (MB)') for i, memory in enumerate(self.memoryUse): - sheet.write(i+1,5,memory) + sheet.write(i + 1, 5, memory) # Save workbook to file fstr = os.path.join(rmg.outputDirectory, 'statistics.xls') diff --git a/rmgpy/statsTest.py b/rmgpy/statsTest.py index 0777b34516..9197d83cb3 100644 --- a/rmgpy/statsTest.py +++ b/rmgpy/statsTest.py @@ -32,14 +32,14 @@ This script contains unit tests of the :mod:`rmgpy.stats` module. """ -import unittest import os import os.path import shutil +import unittest from rmgpy.rmg.main import RMG, CoreEdgeReactionModel +from rmgpy.stats import ExecutionStatsWriter -from rmgpy.stats import * ################################################################################ @@ -53,7 +53,7 @@ def setUp(self): Set up an RMG object """ - folder = os.path.join(os.getcwd(),'rmgpy/output') + folder = os.path.join(os.getcwd(), 'rmgpy/output') if not os.path.isdir(folder): os.mkdir(folder) @@ -66,13 +66,13 @@ def test_save(self): """ Tests if the statistics output file can be found. """ - + folder = self.rmg.outputDirectory - + writer = ExecutionStatsWriter(folder) writer.update(self.rmg) - statsfile = os.path.join(folder,'statistics.xls') + statsfile = os.path.join(folder, 'statistics.xls') self.assertTrue(os.path.isfile(statsfile)) diff --git a/rmgpy/util.py b/rmgpy/util.py index e055a17176..9b261c50c2 100644 --- a/rmgpy/util.py +++ b/rmgpy/util.py @@ -28,15 +28,16 @@ # # ############################################################################### +import logging import os.path import shutil -from functools import wraps import time -import logging +from functools import wraps class Subject(object): """Subject in Observer Pattern""" + def __init__(self): self._observers = [] @@ -50,11 +51,11 @@ def __init__(self): listener = YourOwnListener() subject.attach(listener) """ + def attach(self, observer): if not observer in self._observers: self._observers.append(observer) - """ Call this method when your (self-implemented) observer class should stop listening to the Subject @@ -69,6 +70,7 @@ def attach(self, observer): subject.detach(listener) """ + def detach(self, observer): try: self._observers.remove(observer) @@ -104,21 +106,24 @@ def update(self, subject): self.data.append(subject.data) """ + def notify(self, modifier=None): for observer in self._observers: if modifier != observer: observer.update(self) + def makeOutputSubdirectory(outputDirectory, folder): """ Create a subdirectory `folder` in the output directory. If the folder already exists (e.g. from a previous job) its contents are deleted. """ - dir = os.path.join(outputDirectory, folder) - if os.path.exists(dir): + dirname = os.path.join(outputDirectory, folder) + if os.path.exists(dirname): # The directory already exists, so delete it (and all its content!) - shutil.rmtree(dir) - os.mkdir(dir) + shutil.rmtree(dirname) + os.mkdir(dirname) + def timefn(fn): @wraps(fn) @@ -126,6 +131,7 @@ def measure_time(*args, **kwargs): t1 = time.time() result = fn(*args, **kwargs) t2 = time.time() - logging.info ("@timefn: {} took {:.2f} seconds".format(fn.func_name, t2 - t1)) + logging.info("@timefn: {} took {:.2f} seconds".format(fn.__name__, t2 - t1)) return result + return measure_time From 31efff30797d2bf4c644bb7ea8fb0d08fcf87dfb Mon Sep 17 00:00:00 2001 From: Max Liu Date: Mon, 12 Aug 2019 15:15:47 -0400 Subject: [PATCH 032/155] Py3 and PEP-8 changes to pure python part of molecule module --- rmgpy/molecule/__init__.py | 8 +- rmgpy/molecule/adjlist.py | 960 +++++++++++---------- rmgpy/molecule/adjlistTest.py | 294 +++---- rmgpy/molecule/atomtypeTest.py | 181 ++-- rmgpy/molecule/atomtypedatabase.py | 125 +-- rmgpy/molecule/converterTest.py | 40 +- rmgpy/molecule/draw.py | 1283 +++++++++++++++------------- rmgpy/molecule/drawTest.py | 13 +- rmgpy/molecule/elementTest.py | 18 +- rmgpy/molecule/filtration.py | 30 +- rmgpy/molecule/filtrationTest.py | 8 +- rmgpy/molecule/graphTest.py | 503 ++++++----- rmgpy/molecule/groupTest.py | 598 ++++++------- rmgpy/molecule/inchiTest.py | 82 +- rmgpy/molecule/isomorphismTest.py | 331 +++---- rmgpy/molecule/kekulizeTest.py | 7 +- rmgpy/molecule/moleculeTest.py | 847 +++++++++--------- rmgpy/molecule/pathfinderTest.py | 138 +-- rmgpy/molecule/resonanceTest.py | 230 ++--- rmgpy/molecule/symmetryTest.py | 312 +++---- rmgpy/molecule/translatorTest.py | 30 - rmgpy/molecule/util.py | 34 +- rmgpy/molecule/utilTest.py | 50 +- rmgpy/molecule/vf2Test.py | 20 +- 24 files changed, 3222 insertions(+), 2920 deletions(-) diff --git a/rmgpy/molecule/__init__.py b/rmgpy/molecule/__init__.py index 0dd76cd220..6de272043f 100644 --- a/rmgpy/molecule/__init__.py +++ b/rmgpy/molecule/__init__.py @@ -28,7 +28,7 @@ # # ############################################################################### -from .atomtype import * -from .element import * -from .molecule import * -from .group import * +from rmgpy.molecule.atomtype import AtomType, atomTypes +from rmgpy.molecule.element import Element, PeriodicSystem, getElement +from rmgpy.molecule.molecule import Atom, Bond, Molecule +from rmgpy.molecule.group import GroupAtom, GroupBond, Group diff --git a/rmgpy/molecule/adjlist.py b/rmgpy/molecule/adjlist.py index 791daadfb0..043cd63992 100644 --- a/rmgpy/molecule/adjlist.py +++ b/rmgpy/molecule/adjlist.py @@ -33,115 +33,132 @@ adjacency list format used by Reaction Mechanism Generator (RMG). """ import logging -import warnings import re -import numpy as np -from .molecule import Atom, Bond, getAtomType -from .group import GroupAtom, GroupBond -from .element import getElement, PeriodicSystem +import warnings + from rmgpy.exceptions import InvalidAdjacencyListError +from rmgpy.molecule.atomtype import getAtomType +from rmgpy.molecule.element import getElement, PeriodicSystem +from rmgpy.molecule.group import GroupAtom, GroupBond +from rmgpy.molecule.molecule import Atom, Bond + class Saturator(object): @staticmethod def saturate(atoms): - ''' - Returns a list of atoms that is extended - (and bond attributes) by saturating the valency of the non-hydrogen atoms with an - appropriate number of hydrogen atoms. - - The required number of hydrogen atoms per heavy atom is determined as follows: - H's = max number of valence electrons - atom.radicalElectrons - - 2* atom.lonePairs - order - atom.charge - - ''' - newAtoms = [] - for atom in atoms: - try: - max_number_of_valence_electrons = PeriodicSystem.valence_electrons[atom.symbol] - except KeyError: - raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown orbital for atom "{0}".'.format(atom.symbol)) - - order = atom.getBondOrdersForAtom() - - number_of_H_to_be_added = max_number_of_valence_electrons - atom.radicalElectrons - 2* atom.lonePairs - int(order) - atom.charge - - if number_of_H_to_be_added < 0: - raise InvalidAdjacencyListError('Incorrect electron configuration on atom.') - - for _ in range(number_of_H_to_be_added): - a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0) - b = Bond(atom, a, 'S') - newAtoms.append(a) - atom.bonds[a] = b - a.bonds[atom] = b - atoms.extend(newAtoms) + """ + Returns a list of atoms that is extended + (and bond attributes) by saturating the valency of the non-hydrogen atoms with an + appropriate number of hydrogen atoms. + + The required number of hydrogen atoms per heavy atom is determined as follows: + H's = max number of valence electrons - atom.radicalElectrons + - 2* atom.lonePairs - order - atom.charge + + """ + new_atoms = [] + for atom in atoms: + try: + max_number_of_valence_electrons = PeriodicSystem.valence_electrons[atom.symbol] + except KeyError: + raise InvalidAdjacencyListError( + 'Cannot add hydrogens to adjacency list: Unknown orbital for atom "{0}".'.format(atom.symbol)) + + order = atom.getBondOrdersForAtom() + + number_of_h_to_be_added = max_number_of_valence_electrons - atom.radicalElectrons - 2 * atom.lonePairs - int( + order) - atom.charge + + if number_of_h_to_be_added < 0: + raise InvalidAdjacencyListError('Incorrect electron configuration on atom.') + + for _ in range(number_of_h_to_be_added): + a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0) + b = Bond(atom, a, 'S') + new_atoms.append(a) + atom.bonds[a] = b + a.bonds[atom] = b + atoms.extend(new_atoms) + class ConsistencyChecker(object): - + @staticmethod def check_partial_charge(atom): - ''' - Checks whether the partial charge attribute of the atom checks out with - the theoretical one: - - ''' - if atom.symbol == 'X': - return # because we can't check it. - - valence = PeriodicSystem.valence_electrons[atom.symbol] - order = atom.getBondOrdersForAtom() - - theoretical = valence - order - atom.radicalElectrons - 2*atom.lonePairs + """ + Checks whether the partial charge attribute of the atom checks out with + the theoretical one: - if not (-0.301 < atom.charge - theoretical < 0.301): - # It should be 0, but -0.1 is caused by a Hydrogen bond - raise InvalidAdjacencyListError( - ('Invalid valency for atom {symbol} ({type}) with {radicals} unpaired electrons, ' - '{lonePairs} pairs of electrons, {charge} charge, and bonds [{bonds}].' - ).format(symbol=atom.symbol, - type=getAtomType(atom, atom.edges).label, - radicals=atom.radicalElectrons, - lonePairs=atom.lonePairs, - charge=atom.charge, - bonds=','.join([str(bond.order) for bond in atom.bonds.values()]) - )) + """ + if atom.symbol == 'X': + return # because we can't check it. + + valence = PeriodicSystem.valence_electrons[atom.symbol] + order = atom.getBondOrdersForAtom() + + theoretical = valence - order - atom.radicalElectrons - 2 * atom.lonePairs + + if not (-0.301 < atom.charge - theoretical < 0.301): + # It should be 0, but -0.1 is caused by a Hydrogen bond + raise InvalidAdjacencyListError( + 'Invalid valency for atom {symbol} ({type}) with {radicals} unpaired electrons, ' + '{lonePairs} pairs of electrons, {charge} charge, and bonds [{bonds}].'.format( + symbol=atom.symbol, + type=getAtomType(atom, atom.edges).label, + radicals=atom.radicalElectrons, + lonePairs=atom.lonePairs, + charge=atom.charge, + bonds=','.join([str(bond.order) for bond in atom.bonds.values()]) + ) + ) @staticmethod - def check_multiplicity(nRad, multiplicity): - ''' + def check_multiplicity(n_rad, multiplicity): + """ Check that the multiplicity complies with the formula: m = 2s + 1, where s is the sum of the spin [+/- (1/2) ] of the unpaired electrons - - For a simple radical (nRad = 1): + + For a simple radical (n_rad = 1): s = +1/2 , m = 2 (doublet) - - For a biradical, s can be either 0 [+0.5 + (-0.5) ] or 1 [+0.5 + (+0.5) ] + + For a biradical, s can be either 0 [+0.5 + (-0.5) ] or 1 [+0.5 + (+0.5) ] and m = 1 (singlet) or m = 3 (triplet). - ''' - if nRad in [0,1]: - if multiplicity != (nRad + 1): - raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad)) - elif nRad == 2: - if not int(multiplicity) in [1,3]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad)) - elif nRad == 3: - if not int(multiplicity) in [4,2]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad)) - elif nRad == 4: - if not int(multiplicity) in [5,3,1]: raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of radicals {1}.'.format(multiplicity, nRad)) - else: logging.warning("Consistency checking of multiplicity of molecules with more than 4 unpaired electrons is not implemented yet!") - + """ + if n_rad in [0, 1]: + if multiplicity != (n_rad + 1): + raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of ' + 'radicals {1}.'.format(multiplicity, n_rad)) + elif n_rad == 2: + if not int(multiplicity) in [1, 3]: + raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of ' + 'radicals {1}.'.format(multiplicity, n_rad)) + elif n_rad == 3: + if not int(multiplicity) in [4, 2]: + raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of ' + 'radicals {1}.'.format(multiplicity, n_rad)) + elif n_rad == 4: + if not int(multiplicity) in [5, 3, 1]: + raise InvalidAdjacencyListError('Multiplicity {0} not in agreement with total number of ' + 'radicals {1}.'.format(multiplicity, n_rad)) + else: + logging.warning("Consistency checking of multiplicity of molecules with " + "more than 4 unpaired electrons is not implemented yet!") + @staticmethod def check_hund_rule(atom, multiplicity): - ''' + """ It is checked whether atoms with 2 unpaired electrons on the same atom - result in a multiplicity of 3, and not 1. - + result in a multiplicity of 3, and not 1. + Unpaired electrons in 2 different orbitals belonging to the same atom - should have the same spin, and hence, should result in a multiplicity of 3. - ''' + should have the same spin, and hence, should result in a multiplicity of 3. + """ if atom.radicalElectrons == 2 and multiplicity == 1: - raise InvalidAdjacencyListError("Violation of hund's rule. Invalid multiplicity of {0} because there is an atom with {1} unpaired electrons" - .format(multiplicity, atom.radicalElectrons)) - + raise InvalidAdjacencyListError( + "Violation of hund's rule. Invalid multiplicity of {0} because there is an " + "atom with {1} unpaired electrons".format(multiplicity, atom.radicalElectrons)) + + ################################################################################ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): @@ -154,7 +171,7 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): atoms = [] atomdict = {} bonds = {} - + try: adjlist = adjlist.strip() lines = adjlist.splitlines() @@ -166,27 +183,27 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): label = lines.pop(0) if len(lines) == 0: raise InvalidAdjacencyListError("""Error in adjacency list\n{0}\nNo atoms specified.""".format(adjlist)) - - mistake1 = re.compile('\{[^}]*\s+[^}]*\}') - atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary + + mistake1 = re.compile(r'\{[^}]*\s+[^}]*\}') + atomic_multiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary # Iterate over the remaining lines, generating Atom or GroupAtom objects for line in lines: # Sometimes people put spaces after commas, which messes up the # parse-by-whitespace. Examples include '{Cd, Ct}'. if mistake1.search(line): - raise InvalidAdjacencyListError( - "Error in adjacency list: \n{1}\nspecies shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group(), adjlist) - ) + raise InvalidAdjacencyListError("Error in adjacency list: \n{1}\nspecies shouldn't have spaces inside " + "braces: {0}".format(mistake1.search(line).group(), adjlist)) # Sometimes commas are used to delimit bonds in the bond list, # so replace them just in case line = line.replace('},{', '} {') - + data = line.split() # Skip if blank line - if len(data) == 0: continue + if len(data) == 0: + continue # First item is index for atom # Sometimes these have a trailing period (as if in a numbered list), @@ -194,125 +211,139 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): aid = int(data[0].strip('.')) # If second item starts with '*', then atom is labeled - label = ''; index = 1 + label = '' + index = 1 if data[1][0] == '*': label = data[1] index += 1 # Next is the element or functional group element # A list can be specified with the {,} syntax - atomType = data[index] - if atomType[0] == '{': - atomType = atomType[1:-1].split(',') + atom_type = data[index] + if atom_type[0] == '{': + atom_type = atom_type[1:-1].split(',') else: - atomType = [atomType] + atom_type = [atom_type] index += 1 - + # Next is the electron state - radicalElectrons = []; - additionalLonePairs = [] - elecState = data[index].upper() - if elecState[0] == '{': - elecState = elecState[1:-1].split(',') + radical_electrons = [] + additional_lone_pairs = [] + elec_state = data[index].upper() + if elec_state[0] == '{': + elec_state = elec_state[1:-1].split(',') else: - elecState = [elecState] - if len(elecState) == 0: - raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nThere must be some electronic state defined for an old adjlist".format(adjlist)) - for e in elecState: + elec_state = [elec_state] + if len(elec_state) == 0: + raise InvalidAdjacencyListError( + "Error in adjacency list:\n{0}\nThere must be some electronic state defined for an " + "old adjlist".format(adjlist)) + for e in elec_state: if e == '0': - radicalElectrons.append(0); additionalLonePairs.append(0) + radical_electrons.append(0) + additional_lone_pairs.append(0) elif e == '1': - radicalElectrons.append(1); additionalLonePairs.append(0) + radical_electrons.append(1) + additional_lone_pairs.append(0) elif e == '2': if not group: - raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 2 is not specific enough. Please use 2S or 2T.".format(adjlist)) + raise InvalidAdjacencyListError( + "Error in adjacency list:\n{0}\nNumber of radical electrons = 2 is not specific enough. " + "Please use 2S or 2T.".format(adjlist)) # includes 2S and 2T - radicalElectrons.append(0); additionalLonePairs.append(1) - radicalElectrons.append(2); additionalLonePairs.append(0) + radical_electrons.append(0); additional_lone_pairs.append(1) + radical_electrons.append(2); additional_lone_pairs.append(0) elif e == '2S': - radicalElectrons.append(0); additionalLonePairs.append(1) + radical_electrons.append(0); additional_lone_pairs.append(1) elif e == '2T': - radicalElectrons.append(2); additionalLonePairs.append(0) + radical_electrons.append(2); additional_lone_pairs.append(0) elif e == '3': if not group: - raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 3 is not specific enough. Please use 3D or 3Q.".format(adjlist)) + raise InvalidAdjacencyListError( + "Error in adjacency list:\n{0}\nNumber of radical electrons = 3 is not specific enough. " + "Please use 3D or 3Q.".format(adjlist)) # includes 3D and 3Q - radicalElectrons.append(1); additionalLonePairs.append(1) - radicalElectrons.append(3); additionalLonePairs.append(0) + radical_electrons.append(1); additional_lone_pairs.append(1) + radical_electrons.append(3); additional_lone_pairs.append(0) elif e == '3D': - radicalElectrons.append(1); additionalLonePairs.append(1) + radical_electrons.append(1); additional_lone_pairs.append(1) elif e == '3Q': - radicalElectrons.append(3); additionalLonePairs.append(0) + radical_electrons.append(3); additional_lone_pairs.append(0) elif e == '4': if not group: - raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = 4 is not specific enough. Please use 4S, 4T, or 4V.".format(adjlist)) + raise InvalidAdjacencyListError( + "Error in adjacency list:\n{0}\nNumber of radical electrons = 4 is not specific enough. " + "Please use 4S, 4T, or 4V.".format(adjlist)) # includes 4S, 4T, and 4V - radicalElectrons.append(0); additionalLonePairs.append(2) - radicalElectrons.append(2); additionalLonePairs.append(1) - radicalElectrons.append(4); additionalLonePairs.append(0) + radical_electrons.append(0); additional_lone_pairs.append(2) + radical_electrons.append(2); additional_lone_pairs.append(1) + radical_electrons.append(4); additional_lone_pairs.append(0) elif e == '4S': - radicalElectrons.append(0); additionalLonePairs.append(2) + radical_electrons.append(0); additional_lone_pairs.append(2) elif e == '4T': - radicalElectrons.append(2); additionalLonePairs.append(1) + radical_electrons.append(2); additional_lone_pairs.append(1) elif e == '4V': - radicalElectrons.append(4); additionalLonePairs.append(0) + radical_electrons.append(4); additional_lone_pairs.append(0) elif e == 'X': if not group: - raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nNumber of radical electrons = X is not specific enough. Wildcards should only be used for groups.".format(adjlist)) - radicalElectrons = [] + raise InvalidAdjacencyListError( + "Error in adjacency list:\n{0}\nNumber of radical electrons = X is not specific enough. " + "Wildcards should only be used for groups.".format(adjlist)) + radical_electrons = [] index += 1 - + # Next number defines the number of lone electron pairs (if provided) - lonePairsOfElectrons = None + lone_pairs_of_electrons = None if len(data) > index: - lpState = data[index] - if lpState[0] == '{': + lp_state = data[index] + if lp_state[0] == '{': # this is the start of the chemical bonds - no lone pair info was provided - lonePairsOfElectrons = None + lone_pairs_of_electrons = None else: - if lpState == '0': - lonePairsOfElectrons = 0 - if lpState == '1': - lonePairsOfElectrons = 1 - if lpState == '2': - lonePairsOfElectrons = 2 - if lpState == '3': - lonePairsOfElectrons = 3 - if lpState == '4': - lonePairsOfElectrons = 4 + if lp_state == '0': + lone_pairs_of_electrons = 0 + if lp_state == '1': + lone_pairs_of_electrons = 1 + if lp_state == '2': + lone_pairs_of_electrons = 2 + if lp_state == '3': + lone_pairs_of_electrons = 3 + if lp_state == '4': + lone_pairs_of_electrons = 4 index += 1 - else: # no bonds or lone pair info provided. - lonePairsOfElectrons = None + else: # no bonds or lone pair info provided. + lone_pairs_of_electrons = None # Create a new atom based on the above information if group: - if lonePairsOfElectrons is not None: - lonePairsOfElectrons = [additional + lonePairsOfElectrons for additional in additionalLonePairs] - atom = GroupAtom(atomType=atomType, - radicalElectrons=sorted(set(radicalElectrons)), + if lone_pairs_of_electrons is not None: + lone_pairs_of_electrons = [additional + lone_pairs_of_electrons for additional in additional_lone_pairs] + atom = GroupAtom(atomType=atom_type, + radicalElectrons=sorted(set(radical_electrons)), charge=None, label=label, - lonePairs=lonePairsOfElectrons, # Assign lonePairsOfElectrons as None if it is not explicitly provided + lonePairs=lone_pairs_of_electrons, + # Assign lone_pairs_of_electrons as None if it is not explicitly provided ) - + else: - if lonePairsOfElectrons is not None: + if lone_pairs_of_electrons is not None: # Intermediate adjlist representation - lonePairsOfElectrons = lonePairsOfElectrons + additionalLonePairs[0] + lone_pairs_of_electrons = lone_pairs_of_electrons + additional_lone_pairs[0] else: # Add the standard number of lone pairs with the additional lone pairs - lonePairsOfElectrons = PeriodicSystem.lone_pairs[atomType[0]] + additionalLonePairs[0] - - atom = Atom(element=atomType[0], - radicalElectrons=radicalElectrons[0], - charge=0, - label=label, - lonePairs=lonePairsOfElectrons, - ) + lone_pairs_of_electrons = PeriodicSystem.lone_pairs[atom_type[0]] + additional_lone_pairs[0] + + atom = Atom(element=atom_type[0], + radicalElectrons=radical_electrons[0], + charge=0, + label=label, + lonePairs=lone_pairs_of_electrons, + ) # Add the atom to the list atoms.append(atom) atomdict[aid] = atom - + # Process list of bonds bonds[aid] = {} for datum in data[index:]: @@ -320,12 +351,13 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): # Sometimes commas are used to delimit bonds in the bond list, # so strip them just in case datum = datum.strip(',') - + aid2, comma, order = datum[1:-1].partition(',') aid2 = int(aid2) if aid == aid2: - raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAttempted to create a bond between atom {0:d} and itself.'.format(aid,adjlist)) - + raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAttempted to create a bond between ' + 'atom {0:d} and itself.'.format(aid, adjlist)) + if order[0] == '{': order = order[1:-1].split(',') else: @@ -337,17 +369,22 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): for atom1 in bonds: for atom2 in bonds[atom1]: if atom2 not in bonds: - raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAtom {0:d} not in bond dictionary.'.format(atom2,adjlist)) + raise InvalidAdjacencyListError( + 'Error in adjacency list:\n{1}\nAtom {0:d} not in bond dictionary.'.format(atom2, adjlist)) elif atom1 not in bonds[atom2]: - raise InvalidAdjacencyListError('Error in adjacency list:\n{2}\nFound bond between {0:d} and {1:d}, but not the reverse'.format(atom1, atom2, adjlist)) + raise InvalidAdjacencyListError( + 'Error in adjacency list:\n{2}\nFound bond between {0:d} and {1:d}, ' + 'but not the reverse'.format(atom1, atom2, adjlist)) elif bonds[atom1][atom2] != bonds[atom2][atom1]: - raise InvalidAdjacencyListError('Error in adjacency list: \n{4}\nFound bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist)) + raise InvalidAdjacencyListError( + 'Error in adjacency list: \n{4}\nFound bonds between {0:d} and {1:d}, but of different orders ' + '"{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist)) # Convert bonddict to use Atom[group] and Bond[group] objects - atomkeys = atomdict.keys() + atomkeys = list(atomdict.keys()) atomkeys.sort() for aid1 in atomkeys: - atomkeys2 = bonds[aid1].keys() + atomkeys2 = list(bonds[aid1].keys()) atomkeys2.sort() for aid2 in atomkeys2: if aid1 < aid2: @@ -359,62 +396,68 @@ def fromOldAdjacencyList(adjlist, group=False, saturateH=False): elif len(order) == 1: bond = Bond(atom1, atom2, order[0]) else: - raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nMultiple bond orders specified for an atom.'.format(adjlist)) + raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nMultiple bond orders specified ' + 'for an atom.'.format(adjlist)) atom1.edges[atom2] = bond atom2.edges[atom1] = bond - + if not group: if saturateH: # Add explicit hydrogen atoms to complete structure if desired - newAtoms = [] + new_atoms = [] for atom in atoms: try: valence = PeriodicSystem.valences[atom.symbol] except KeyError: - raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nCannot add hydrogens: Unknown valence for atom "{0}".'.format(atom.symbol, adjlist)) + raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nCannot add hydrogens: Unknown ' + 'valence for atom "{0}".'.format(atom.symbol, adjlist)) radical = atom.radicalElectrons order = atom.getBondOrdersForAtom() - count = valence - radical - int(order) - 2*(atom.lonePairs-PeriodicSystem.lone_pairs[atom.symbol]) + count = valence - radical - int(order) - 2 * ( + atom.lonePairs - PeriodicSystem.lone_pairs[atom.symbol]) for i in range(count): a = Atom(element='H', radicalElectrons=0, charge=0, label='', lonePairs=0) b = Bond(atom, a, 'S') - newAtoms.append(a) + new_atoms.append(a) atom.bonds[a] = b a.bonds[atom] = b - atoms.extend(newAtoms) - + atoms.extend(new_atoms) + # Calculate the multiplicity for the molecule and update the charges on each atom - nRad = 0 # total number of radical electrons + n_rad = 0 # total number of radical electrons for atom in atoms: atom.updateCharge() - nRad += atom.radicalElectrons - multiplicity = nRad + 1 # 2 s + 1, where s is the combined spin of unpaired electrons (s = 1/2 per unpaired electron) - + n_rad += atom.radicalElectrons + multiplicity = n_rad + 1 # 2 s + 1, where s is the combined spin of unpaired electrons (s = 1/2 per unpaired electron) + else: # Don't set a multiplicity for groups when converting from an old adjlist multiplicity = None - + except InvalidAdjacencyListError: logging.error("Troublesome adjacency list:\n" + adjlist) raise - + return atoms, multiplicity + + ############################### -re_IntermediateAdjList = re.compile('^\s*(\d*)\s+' + # atom number digit - '(?P