From 2c961cbbb89aa8f2a2ab257dcb34322f7cc98bb9 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 17 Jan 2019 15:48:36 +0100 Subject: [PATCH] Implement `CalcJob` process class (#2389) This commit can be summarized in three steps: * Reimplementation of a job calculation as `Process` called `CalcJob` * Changing job calculation to be purely informational * Remove the old job calculation mechanics and business logic * Reimplementation of a job calculation as `Process` called `CalcJob` The old way of creating a job calculation, was to subclass the `JobCalculation` class and override the `_use_methods` class method to define the input nodes and the `_prepare_for_submission` to setup the input files for the calculation. The problem was that these methods were implemented on the `Node` class, thus mixing the responsabilities of running and introspecting the results of a completed calculation. Here we define the `CalcJob` class, a subclass of `Process`. This class replaces the old `JobCalculation` and allows a user to defined the inputs and outputs through the `ProcessSpec`, just as they would do for a `WorkChain`. Except, instead of defining an `outline`, one should implement the `prepare_for_submission`, which fulfills the exact same function as before, only it is now a public method of the `CalcJob` process class. * Changing job calculation to be purely informational Finally, the role of the job calculation state, stored as an attribute with the key `state` on the `CalcJobNode` has changed significantly. The original job calculations had a calculation state that controlled the logic during its lifetime. This was already superceded a long time ago by the process wrapper that now fully governs the progression of the calculation. Despite the calculation state no longer being authoritative during the calculation's lifetime, it was still present. Here we finally fully remove and only leave a stripped down version. The remaining state is stored as an attribute and is a sub state while the `CalcJob` process is in an active state and serves as a more granual state that can be queried for. This is useful, because the process status, which also keeps similar information is human readable and doesn't allow for easy querying. * Remove methods of `Node` to replace or remove links The `replace_*link_from` methods where only implemented because they were needed by the old job calculation created. Since that system is now removed, this functionality is no longer needed. Likewise, the methods to remove links, either from the cache or the database were not used nor tested and so they are removed. * Prevent adding links from or to sealed nodes When a `Node` instance is sealed, it should be impossible to add links to or from it. To enforce this, the `validate_incoming` and `validate_outgoing` methods are overridden in the `Sealable` mixin that will raise `ModificationNotAllowed` if the node is sealed. --- .ci/polish/lib/template/workchain.tpl | 2 +- .ci/test_daemon.py | 84 +- .pre-commit-config.yaml | 4 - .pylintrc | 4 +- aiida/backends/djsite/db/__init__.py | 2 - .../migrations/0015_invalidating_node_hash.py | 4 +- .../0023_calc_job_option_attribute_keys.py | 127 ++ .../backends/djsite/db/migrations/__init__.py | 5 +- aiida/backends/djsite/db/models.py | 3 +- aiida/backends/djsite/db/subtests/generic.py | 17 +- .../backends/djsite/db/subtests/migrations.py | 56 + ...8c391c49_calc_job_option_attribute_keys.py | 98 + aiida/backends/sqlalchemy/tests/generic.py | 16 +- aiida/backends/sqlalchemy/tests/migrations.py | 82 + aiida/backends/tests/__init__.py | 2 - aiida/backends/tests/backup_script.py | 17 +- aiida/backends/tests/calculation_node.py | 26 - .../tests/cmdline/commands/test_calcjob.py | 66 +- .../cmdline/commands/test_calculation.py | 85 +- .../tests/cmdline/commands/test_process.py | 1 + .../tests/common/test_datastructures.py | 41 - aiida/backends/tests/export_and_import.py | 29 - aiida/backends/tests/nodes.py | 204 +- aiida/backends/tests/orm/mixins.py | 41 +- aiida/backends/tests/restapi.py | 17 +- aiida/backends/tests/tcodexporter.py | 12 +- aiida/backends/tests/test_plugin_loader.py | 6 +- aiida/backends/tests/work/class_loader.py | 18 +- aiida/backends/tests/work/job_processes.py | 186 -- aiida/backends/tests/work/persistence.py | 6 +- aiida/backends/tests/work/process.py | 54 +- .../backends/tests/work/test_calcfunctions.py | 2 +- aiida/backends/tests/work/test_futures.py | 4 +- .../tests/work/test_process_builder.py | 23 +- .../tests/work/test_process_function.py | 25 +- aiida/backends/tests/work/test_runners.py | 6 +- aiida/backends/tests/work/work_chain.py | 28 +- aiida/calculations/plugins/arithmetic/add.py | 138 +- .../calculations/plugins/templatereplacer.py | 104 +- aiida/cmdline/commands/cmd_calculation.py | 4 +- aiida/cmdline/params/options/__init__.py | 14 +- aiida/cmdline/utils/common.py | 12 +- aiida/common/datastructures.py | 151 +- aiida/common/extendeddicts.py | 21 +- aiida/daemon/execmanager.py | 28 +- aiida/manage/caching.py | 37 +- aiida/orm/__init__.py | 2 - aiida/orm/calculation/__init__.py | 16 - aiida/orm/calculation/inline.py | 220 -- aiida/orm/calculation/job/__init__.py | 14 - aiida/orm/data/array/trajectory.py | 2 +- aiida/orm/data/cif.py | 2 +- aiida/orm/data/code.py | 43 - aiida/orm/data/structure.py | 2 +- aiida/orm/implementation/django/node.py | 13 - aiida/orm/implementation/general/node.py | 105 +- aiida/orm/implementation/sqlalchemy/node.py | 22 - aiida/orm/importexport.py | 7 +- aiida/orm/mixins.py | 32 +- aiida/orm/node/process/calculation/calcjob.py | 1869 ++--------------- aiida/orm/node/process/process.py | 2 +- aiida/parsers/parser.py | 17 +- aiida/parsers/plugins/arithmetic/add.py | 6 +- .../plugins/templatereplacer/doubler.py | 2 +- aiida/plugins/entry_point.py | 7 - aiida/plugins/loader.py | 2 + aiida/scheduler/__init__.py | 10 +- aiida/scheduler/datastructures.py | 57 +- aiida/scheduler/plugins/direct.py | 32 +- aiida/scheduler/plugins/lsf.py | 30 +- aiida/scheduler/plugins/pbsbaseclasses.py | 32 +- aiida/scheduler/plugins/sge.py | 54 +- aiida/scheduler/plugins/slurm.py | 34 +- aiida/scheduler/plugins/test_lsf.py | 24 +- aiida/scheduler/plugins/test_pbspro.py | 42 +- aiida/scheduler/plugins/test_sge.py | 10 +- aiida/scheduler/plugins/test_slurm.py | 30 +- aiida/scheduler/plugins/test_torque.py | 38 +- .../tests/reference_results/workchain.py2.xml | 6 +- .../tests/reference_results/workchain.py3.xml | 6 +- aiida/tools/dbexporters/tcod.py | 8 +- aiida/tools/dbimporters/baseclasses.py | 2 - aiida/work/__init__.py | 2 - aiida/work/calcjob.py | 385 ++++ aiida/work/job_calcs.py | 4 +- aiida/work/job_processes.py | 302 +-- aiida/work/launch.py | 2 +- aiida/work/persistence.py | 33 +- aiida/work/ports.py | 2 +- aiida/work/process_builder.py | 17 +- aiida/work/process_function.py | 8 +- aiida/work/process_spec.py | 10 + aiida/work/processes.py | 232 +- aiida/work/runners.py | 6 +- aiida/work/test_utils.py | 10 +- aiida/work/utils.py | 8 +- aiida/work/workchain.py | 18 +- docs/source/concepts/processes.rst | 4 +- .../source/developer_guide/core/internals.rst | 8 - .../devel_tutorial/code_plugin_int_sum.rst | 12 +- .../devel_tutorial/sum_parser.py | 2 +- docs/source/nitpick-exceptions | 5 +- docs/source/state/calculation_state.rst | 2 +- 103 files changed, 1866 insertions(+), 3918 deletions(-) create mode 100644 aiida/backends/djsite/db/migrations/0023_calc_job_option_attribute_keys.py create mode 100644 aiida/backends/sqlalchemy/migrations/versions/7ca08c391c49_calc_job_option_attribute_keys.py delete mode 100644 aiida/backends/tests/common/test_datastructures.py delete mode 100644 aiida/backends/tests/work/job_processes.py delete mode 100644 aiida/orm/calculation/__init__.py delete mode 100644 aiida/orm/calculation/inline.py delete mode 100644 aiida/orm/calculation/job/__init__.py create mode 100644 aiida/work/calcjob.py diff --git a/.ci/polish/lib/template/workchain.tpl b/.ci/polish/lib/template/workchain.tpl index c9ea315fb5..f32e839efd 100644 --- a/.ci/polish/lib/template/workchain.tpl +++ b/.ci/polish/lib/template/workchain.tpl @@ -65,7 +65,7 @@ ${outline} 'code': self.inputs.code, 'x': self.ctx.result, 'y': Int(operand), - 'options': get_default_options(), + 'metadata': {'options': get_default_options()}, } running = self.submit(ArithmeticAddCalculation, **inputs) diff --git a/.ci/test_daemon.py b/.ci/test_daemon.py index e3b86071c2..a018f82cbe 100644 --- a/.ci/test_daemon.py +++ b/.ci/test_daemon.py @@ -177,49 +177,6 @@ def validate_cached(cached_calcs): return valid -def create_calculation(code, counter, inputval, use_cache=False): - parameters = ParameterData(dict={'value': inputval}) - template = ParameterData(dict={ - # The following line adds a significant sleep time. - # I set it to 1 second to speed up tests - # I keep it to a non-zero value because I want - # To test the case when AiiDA finds some calcs - # in a queued state - # 'cmdline_params': ["{}".format(counter % 3)], # Sleep time - 'cmdline_params': ["1"], - 'input_file_template': "{value}", # File just contains the value to double - 'input_file_name': 'value_to_double.txt', - 'output_file_name': 'output.txt', - 'retrieve_temporary_files': ['triple_value.tmp'] - }) - calc = code.new_calc() - calc.set_option('max_wallclock_seconds', 5 * 60) # 5 min - calc.set_option('resources', {"num_machines": 1}) - calc.set_option('withmpi', False) - calc.set_option('parser_name', 'templatereplacer.doubler') - - calc.use_parameters(parameters) - calc.use_template(template) - calc.store_all(use_cache=use_cache) - expected_result = { - 'value': 2 * inputval, - 'retrieved_temporary_files': { - 'triple_value.tmp': str(inputval * 3) - } - } - print("[{}] created calculation {}, pk={}".format(counter, calc.uuid, calc.pk)) - return calc, expected_result - - -def submit_calculation(code, counter, inputval): - calc, expected_result = create_calculation( - code=code, counter=counter, inputval=inputval - ) - calc.submit() - print("[{}] calculation submitted.".format(counter)) - return calc, expected_result - - def launch_calculation(code, counter, inputval): """ Launch calculations to the daemon through the Process layer @@ -245,8 +202,6 @@ def create_calculation_process(code, inputval): Create the process and inputs for a submitting / running a calculation. """ TemplatereplacerCalculation = CalculationFactory('templatereplacer') - process = TemplatereplacerCalculation.process() - parameters = ParameterData(dict={'value': inputval}) template = ParameterData(dict={ # The following line adds a significant sleep time. @@ -281,17 +236,11 @@ def create_calculation_process(code, inputval): 'code': code, 'parameters': parameters, 'template': template, - 'options': options, + 'metadata': { + 'options': options, + } } - return process, inputs, expected_result - - -def create_cache_calc(code, counter, inputval): - calc, expected_result = create_calculation( - code=code, counter=counter, inputval=inputval, use_cache=True - ) - print("[{}] created cached calculation.".format(counter)) - return calc, expected_result + return TemplatereplacerCalculation, inputs, expected_result def main(): @@ -299,17 +248,8 @@ def main(): expected_results_workchains = {} code = Code.get_from_string(codename) - # Submitting the Calculations the old way, creating and storing a JobCalc first and submitting it - print("Submitting {} old style calculations to the daemon".format(number_calculations)) - for counter in range(1, number_calculations + 1): - inputval = counter - calc, expected_result = submit_calculation( - code=code, counter=counter, inputval=inputval - ) - expected_results_calculations[calc.pk] = expected_result - # Submitting the Calculations the new way directly through the launchers - print("Submitting {} new style calculations to the daemon".format(number_calculations)) + print("Submitting {} calculations to the daemon".format(number_calculations)) for counter in range(1, number_calculations + 1): inputval = counter calc, expected_result = launch_calculation( @@ -416,18 +356,12 @@ def main(): print("Timeout!! Calculation did not complete after {} seconds".format(timeout_secs)) sys.exit(2) else: - # create cached calculations -- these should be FINISHED immediately + # Launch the same calculations but with caching enabled -- these should be FINISHED immediately cached_calcs = [] - for counter in range(1, number_calculations + 1): - calc, expected_result = create_cache_calc( - code=code, counter=counter, inputval=counter - ) - cached_calcs.append(calc) - expected_results_calculations[calc.pk] = expected_result - # new style cached calculations, with 'run' - with enable_caching(): + with enable_caching(node_class=CalcJobNode): for counter in range(1, number_calculations + 1): - calc, expected_result = run_calculation(code=code, counter=counter, inputval=counter) + inputval = counter + calc, expected_result = run_calculation(code=code, counter=counter, inputval=inputval) cached_calcs.append(calc) expected_results_calculations[calc.pk] = expected_result diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ec32100996..b6c74e5bc2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -115,7 +115,6 @@ aiida/backends/tests/nodes.py| aiida/backends/tests/orm/data/frozendict.py| aiida/backends/tests/orm/data/remote.py| - aiida/backends/tests/orm/mixins.py| aiida/backends/tests/orm/utils/loaders.py| aiida/backends/tests/parsers.py| aiida/backends/tests/query.py| @@ -147,9 +146,6 @@ aiida/orm/autogroup.py| aiida/orm/backend.py| aiida/orm/querybuilder.py| - aiida/orm/calculation/__init__.py| - aiida/orm/calculation/inline.py| - aiida/orm/calculation/job/__init__.py| aiida/orm/code.py| aiida/orm/data/array/bands.py| aiida/orm/data/array/kpoints.py| diff --git a/.pylintrc b/.pylintrc index 1c11f0da54..b1d51927bd 100644 --- a/.pylintrc +++ b/.pylintrc @@ -136,7 +136,7 @@ function-name-hint=(([a-z][a-z0-9_]{2,40})|(_[a-z0-9_]*))$ function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ # Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_, _INPUT_FILE_NAME, _OUTPUT_FILE_NAME, pk +good-names=i,j,k,ex,Run,_,pk # Include a hint for the correct naming format with invalid-name include-naming-hint=no @@ -409,7 +409,7 @@ defining-attr-methods=__init__,__new__,setUp # List of member names, which should be excluded from the protected access # warning. -exclude-protected=_asdict,_fields,_replace,_source,_make,_get_linkname_retrieved +exclude-protected=_asdict,_fields,_replace,_source,_make # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls diff --git a/aiida/backends/djsite/db/__init__.py b/aiida/backends/djsite/db/__init__.py index ef686bb748..a7e3fad50c 100644 --- a/aiida/backends/djsite/db/__init__.py +++ b/aiida/backends/djsite/db/__init__.py @@ -7,5 +7,3 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### - - diff --git a/aiida/backends/djsite/db/migrations/0015_invalidating_node_hash.py b/aiida/backends/djsite/db/migrations/0015_invalidating_node_hash.py index 17451a5907..2366eb7a93 100644 --- a/aiida/backends/djsite/db/migrations/0015_invalidating_node_hash.py +++ b/aiida/backends/djsite/db/migrations/0015_invalidating_node_hash.py @@ -22,6 +22,7 @@ # Currently valid hash key _HASH_EXTRA_KEY = '_aiida_hash' + def notify_user(apps, schema_editor): echo_warning("Invalidating all the hashes of all the nodes. Please run verdi rehash", bold=True) @@ -37,7 +38,6 @@ class Migration(migrations.Migration): migrations.RunPython(notify_user, reverse_code=notify_user), migrations.RunSQL( """ DELETE FROM db_dbextra WHERE key='""" + _HASH_EXTRA_KEY + """';""", - reverse_sql=""" DELETE FROM db_dbextra - WHERE key='""" + _HASH_EXTRA_KEY + """';"""), + reverse_sql=""" DELETE FROM db_dbextra WHERE key='""" + _HASH_EXTRA_KEY + """';"""), upgrade_schema_version(REVISION, DOWN_REVISION) ] diff --git a/aiida/backends/djsite/db/migrations/0023_calc_job_option_attribute_keys.py b/aiida/backends/djsite/db/migrations/0023_calc_job_option_attribute_keys.py new file mode 100644 index 0000000000..b0ca276aa8 --- /dev/null +++ b/aiida/backends/djsite/db/migrations/0023_calc_job_option_attribute_keys.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,too-few-public-methods +"""Migration of CalcJobNode attributes for metadata options whose key changed.""" +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +from __future__ import absolute_import + +# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed +# pylint: disable=no-name-in-module,import-error +from django.db import migrations + +from aiida.backends.djsite.db.migrations import upgrade_schema_version + +REVISION = '1.0.23' +DOWN_REVISION = '1.0.22' + + +class Migration(migrations.Migration): + """Migration of CalcJobNode attributes for metadata options whose key changed. + + Renamed attribute keys: + + * `custom_environment_variables` -> `environment_variables` + * `jobresource_params` -> `resources` + * `_process_label` -> `process_label` + * `parser` -> `parser_name` + + Deleted attributes: + * `linkname_retrieved` (We do not actually delete it just in case some relies on it) + + """ + + dependencies = [ + ('db', '0022_dbgroup_type_string_change_content'), + ] + + operations = [ + migrations.RunSQL( + sql=r""" + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^custom_environment_variables', 'environment_variables') + FROM db_dbnode AS node + WHERE + ( + attribute.key = 'custom_environment_variables' OR + attribute.key LIKE 'custom\_environment\_variables.%' + ) AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- custom_environment_variables -> environment_variables + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^jobresource_params', 'resources') + FROM db_dbnode AS node + WHERE + ( + attribute.key = 'jobresource_params' OR + attribute.key LIKE 'jobresource\_params.%' + ) AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- jobresource_params -> resources + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^_process_label', 'process_label') + FROM db_dbnode AS node + WHERE + attribute.key = '_process_label' AND + node.type LIKE 'node.process.%'; + -- _process_label -> process_label + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^parser', 'parser_name') + FROM db_dbnode AS node + WHERE + attribute.key = 'parser' AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- parser -> parser_name + """, + reverse_sql=r""" + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^environment_variables', 'custom_environment_variables') + FROM db_dbnode AS node + WHERE + ( + attribute.key = 'environment_variables' OR + attribute.key LIKE 'environment\_variables.%' + ) AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- environment_variables -> custom_environment_variables + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^resources', 'jobresource_params') + FROM db_dbnode AS node + WHERE + ( + attribute.key = 'resources' OR + attribute.key LIKE 'resources.%' + ) AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- resources -> jobresource_params + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^process_label', '_process_label') + FROM db_dbnode AS node + WHERE + attribute.key = 'process_label' AND + node.type LIKE 'node.process.%'; + -- process_label -> _process_label + + UPDATE db_dbattribute AS attribute + SET key = regexp_replace(attribute.key, '^parser_name', 'parser') + FROM db_dbnode AS node + WHERE + attribute.key = 'parser_name' AND + node.type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- parser_name -> parser + """), + upgrade_schema_version(REVISION, DOWN_REVISION) + ] diff --git a/aiida/backends/djsite/db/migrations/__init__.py b/aiida/backends/djsite/db/migrations/__init__.py index b2133cca8a..529b0a59bd 100644 --- a/aiida/backends/djsite/db/migrations/__init__.py +++ b/aiida/backends/djsite/db/migrations/__init__.py @@ -11,7 +11,8 @@ from __future__ import print_function from __future__ import absolute_import -LATEST_MIGRATION = '0022_dbgroup_type_string_change_content' +LATEST_MIGRATION = '0023_calc_job_option_attribute_keys' + def _update_schema_version(version, apps, schema_editor): from aiida.backends.djsite.utils import set_db_schema_version @@ -31,7 +32,7 @@ def current_schema_version(): # Have to use this ugly way of importing because the django migration # files start with numbers which are not a valid package name latest_migration = __import__( - "aiida.backends.djsite.db.migrations.{}".format(LATEST_MIGRATION), + 'aiida.backends.djsite.db.migrations.{}'.format(LATEST_MIGRATION), fromlist=['REVISION'] ) return latest_migration.REVISION diff --git a/aiida/backends/djsite/db/models.py b/aiida/backends/djsite/db/models.py index 2e46aef682..0ee5efa48b 100644 --- a/aiida/backends/djsite/db/models.py +++ b/aiida/backends/djsite/db/models.py @@ -841,8 +841,7 @@ def create_value(cls, key, value, subspecifier_value=None, try: jsondata = json.dumps(value) except TypeError: - raise ValueError("Unable to store the value: it must be " - "either a basic datatype, or json-serializable") + raise ValueError("Unable to store the value: it must be either a basic datatype, or json-serializable: {}".format(value)) new_entry.datatype = 'json' new_entry.tval = jsondata diff --git a/aiida/backends/djsite/db/subtests/generic.py b/aiida/backends/djsite/db/subtests/generic.py index f55ac4c5df..8e10a03897 100644 --- a/aiida/backends/djsite/db/subtests/generic.py +++ b/aiida/backends/djsite/db/subtests/generic.py @@ -10,14 +10,13 @@ """ Generic tests that need the use of the DB """ - from __future__ import division from __future__ import print_function from __future__ import absolute_import -from aiida.backends.testbase import AiidaTestCase -from aiida.common import exceptions -from aiida.orm import Node, Data + from aiida import orm +from aiida.backends.testbase import AiidaTestCase +from aiida.orm import Data class TestComputer(AiidaTestCase): @@ -36,13 +35,9 @@ def test_deletion(self): # # This should be possible, because nothing is using this computer orm.Computer.objects.delete(newcomputer.id) - calc_params = { - 'computer': self.computer, - 'resources': {'num_machines': 1, - 'num_mpiprocs_per_machine': 1} - } - - _ = CalcJobNode(**calc_params).store() + calc = CalcJobNode(computer=self.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() # This should fail, because there is at least a calculation # using this computer (the one created just above) diff --git a/aiida/backends/djsite/db/subtests/migrations.py b/aiida/backends/djsite/db/subtests/migrations.py index 6ab48a97e2..a8048a519e 100644 --- a/aiida/backends/djsite/db/subtests/migrations.py +++ b/aiida/backends/djsite/db/subtests/migrations.py @@ -179,6 +179,62 @@ def test_uuid_untouched(self): n = load_node(self.node_id) self.assertEqual(self.node_uuid, n.uuid) + +class TestCalcAttributeKeysMigration(TestMigrations): + + migrate_from = '0022_dbgroup_type_string_change_content' + migrate_to = '0023_calc_job_option_attribute_keys' + + KEY_RESOURCES_OLD = 'jobresource_params' + KEY_RESOURCES_NEW = 'resources' + KEY_PARSER_NAME_OLD = 'parser' + KEY_PARSER_NAME_NEW = 'parser_name' + KEY_PROCESS_LABEL_OLD = '_process_label' + KEY_PROCESS_LABEL_NEW = 'process_label' + KEY_ENVIRONMENT_VARIABLES_OLD = 'custom_environment_variables' + KEY_ENVIRONMENT_VARIABLES_NEW = 'environment_variables' + + def setUpBeforeMigration(self, apps): + from aiida.orm.node.process import WorkflowNode, CalcJobNode + + self.process_label = 'TestLabel' + self.resources = {'number_machines': 1} + self.environment_variables = {} + self.parser_name = 'aiida.parsers:parser' + + self.node_work = WorkflowNode() + self.node_work._set_attr(self.KEY_PROCESS_LABEL_OLD, self.process_label) + self.node_work.store() + + self.node_calc = CalcJobNode(computer=self.computer) + self.node_calc._validate = lambda: True # Need to disable the validation because we cannot set `resources` + self.node_calc._set_attr(self.KEY_PROCESS_LABEL_OLD, self.process_label) + self.node_calc._set_attr(self.KEY_RESOURCES_OLD, self.resources) + self.node_calc._set_attr(self.KEY_ENVIRONMENT_VARIABLES_OLD, self.environment_variables) + self.node_calc._set_attr(self.KEY_PARSER_NAME_OLD, self.parser_name) + self.node_calc.store() + + def test_attribute_key_changes(self): + """Verify that the keys are successfully changed of the affected attributes.""" + from aiida.orm import load_node + + NOT_FOUND = tuple([0]) + + node_work = load_node(self.node_work.pk) + self.assertEqual(node_work.get_attr(self.KEY_PROCESS_LABEL_NEW), self.process_label) + self.assertEqual(node_work.get_attr(self.KEY_PROCESS_LABEL_OLD, default=NOT_FOUND), NOT_FOUND) + + node_calc = load_node(self.node_calc.pk) + self.assertEqual(node_calc.get_attr(self.KEY_PROCESS_LABEL_NEW), self.process_label) + self.assertEqual(node_calc.get_attr(self.KEY_RESOURCES_NEW), self.resources) + self.assertEqual(node_calc.get_attr(self.KEY_ENVIRONMENT_VARIABLES_NEW), self.environment_variables) + self.assertEqual(node_calc.get_attr(self.KEY_PARSER_NAME_NEW), self.parser_name) + self.assertEqual(node_calc.get_attr(self.KEY_PROCESS_LABEL_OLD, default=NOT_FOUND), NOT_FOUND) + self.assertEqual(node_calc.get_attr(self.KEY_RESOURCES_OLD, default=NOT_FOUND), NOT_FOUND) + self.assertEqual(node_calc.get_attr(self.KEY_ENVIRONMENT_VARIABLES_OLD, default=NOT_FOUND), NOT_FOUND) + self.assertEqual(node_calc.get_attr(self.KEY_PARSER_NAME_OLD, default=NOT_FOUND), NOT_FOUND) + + class TestGroupRenamingMigration(TestMigrations): migrate_from = '0021_dbgroup_name_to_label_type_to_type_string' diff --git a/aiida/backends/sqlalchemy/migrations/versions/7ca08c391c49_calc_job_option_attribute_keys.py b/aiida/backends/sqlalchemy/migrations/versions/7ca08c391c49_calc_job_option_attribute_keys.py new file mode 100644 index 0000000000..3db0e1c912 --- /dev/null +++ b/aiida/backends/sqlalchemy/migrations/versions/7ca08c391c49_calc_job_option_attribute_keys.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,no-member +"""Migration of CalcJobNode attributes for metadata options whose key changed. + +Revision ID: 7ca08c391c49 +Revises: e72ad251bcdb +Create Date: 2019-01-15 15:03:43.876133 + +""" +from __future__ import absolute_import +from __future__ import print_function + +# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed +# pylint: disable=no-name-in-module,import-error +from alembic import op +from sqlalchemy.sql import text + +# revision identifiers, used by Alembic. +revision = '7ca08c391c49' +down_revision = 'e72ad251bcdb' +branch_labels = None +depends_on = None + + +def upgrade(): + """Migration of CalcJobNode attributes for metadata options whose key changed. + + Renamed attribute keys: + + * `custom_environment_variables` -> `environment_variables` + * `jobresource_params` -> `resources` + * `_process_label` -> `process_label` + * `parser` -> `parser_name` + + Deleted attributes: + * `linkname_retrieved` (We do not actually delete it just in case some relies on it) + + """ + conn = op.get_bind() + + statement = text(""" + UPDATE db_dbnode + SET attributes = jsonb_set(attributes, '{environment_variables}', to_jsonb(attributes->>'custom_environment_variables')) + WHERE + attributes ? 'custom_environment_variables' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + UPDATE db_dbnode SET attributes = attributes - 'custom_environment_variables' + WHERE + attributes ? 'custom_environment_variables' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- custom_environment_variables -> environment_variables + + UPDATE db_dbnode + SET attributes = jsonb_set(attributes, '{resources}', to_jsonb(attributes->>'jobresource_params')) + WHERE + attributes ? 'jobresource_params' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + UPDATE db_dbnode SET attributes = attributes - 'jobresource_params' + WHERE + attributes ? 'jobresource_params' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- jobresource_params -> resources + + UPDATE db_dbnode + SET attributes = jsonb_set(attributes, '{process_label}', to_jsonb(attributes->>'_process_label')) + WHERE + attributes ? '_process_label' AND + type like 'node.process.%'; + UPDATE db_dbnode SET attributes = attributes - '_process_label' + WHERE + attributes ? '_process_label' AND + type like 'node.process.%'; + -- _process_label -> process_label + + UPDATE db_dbnode + SET attributes = jsonb_set(attributes, '{parser_name}', to_jsonb(attributes->>'parser')) + WHERE + attributes ? 'parser' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + UPDATE db_dbnode SET attributes = attributes - 'parser' + WHERE + attributes ? 'parser' AND + type = 'node.process.calculation.calcjob.CalcJobNode.'; + -- parser -> parser_name + """) + conn.execute(statement) + + +def downgrade(): + pass diff --git a/aiida/backends/sqlalchemy/tests/generic.py b/aiida/backends/sqlalchemy/tests/generic.py index fac8a2b91e..9a79c7f4c4 100644 --- a/aiida/backends/sqlalchemy/tests/generic.py +++ b/aiida/backends/sqlalchemy/tests/generic.py @@ -33,18 +33,16 @@ def test_deletion(self): name="testdeletioncomputer", hostname='localhost', transport_type='local', - scheduler_type='pbspro').store() + scheduler_type='pbspro') + newcomputer.store() - # # This should be possible, because nothing is using this computer + # This should be possible, because nothing is using this computer self.backend.computers.delete(newcomputer.id) - calc_params = { - 'computer': self.computer, - 'resources': {'num_machines': 1, - 'num_mpiprocs_per_machine': 1} - } - - _ = CalcJobNode(**calc_params).store() + node = CalcJobNode() + node.set_computer(self.computer) + node.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + node.store() session = aiida.backends.sqlalchemy.get_scoped_session() diff --git a/aiida/backends/sqlalchemy/tests/migrations.py b/aiida/backends/sqlalchemy/tests/migrations.py index a6c5767148..17cb99a392 100644 --- a/aiida/backends/sqlalchemy/tests/migrations.py +++ b/aiida/backends/sqlalchemy/tests/migrations.py @@ -334,3 +334,85 @@ def test_model_and_migration_schemas_are_the_same(self): # pylint: disable=inva self.assertTrue(result.is_match, "The migration database doesn't match to the one " "created by the models.\nDifferences: " + result._dump_data(result.errors)) # pylint: disable=protected-access + + +class TestCalcAttributeKeysMigration(TestMigrationsSQLA): + """Test the migration of the keys of certain attribute for ProcessNodes and CalcJobNodes.""" + + migrate_from = 'e72ad251bcdb' # e72ad251bcdb_dbgroup_class_change_type_string_values + migrate_to = '7ca08c391c49' # 7ca08c391c49_calc_job_option_attribute_keys + + KEY_RESOURCES_OLD = 'jobresource_params' + KEY_RESOURCES_NEW = 'resources' + KEY_PARSER_NAME_OLD = 'parser' + KEY_PARSER_NAME_NEW = 'parser_name' + KEY_PROCESS_LABEL_OLD = '_process_label' + KEY_PROCESS_LABEL_NEW = 'process_label' + KEY_ENVIRONMENT_VARIABLES_OLD = 'custom_environment_variables' + KEY_ENVIRONMENT_VARIABLES_NEW = 'environment_variables' + + def setUpBeforeMigration(self): + from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module + + DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name + DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name + + with sa.engine.begin() as connection: + session = Session(connection.engine) + + user = DbUser(is_superuser=False, email="{}@aiida.net".format(self.id())) + session.add(user) + session.commit() + + self.resources = {'number_machines': 1} + self.parser_name = 'aiida.parsers:parser' + self.process_label = 'TestLabel' + self.environment_variables = {} + + attributes = { + self.KEY_RESOURCES_OLD: self.resources, + self.KEY_PARSER_NAME_OLD: self.parser_name, + self.KEY_PROCESS_LABEL_OLD: self.process_label, + self.KEY_ENVIRONMENT_VARIABLES_OLD: self.environment_variables, + } + node_work = DbNode(type='node.process.workflow.WorkflowNode.', attributes=attributes, user_id=user.id) + node_calc = DbNode( + type='node.process.calculation.calcjob.CalcJobNode.', attributes=attributes, user_id=user.id) + + session.add(node_work) + session.add(node_calc) + session.commit() + + self.node_work_id = node_work.id + self.node_calc_id = node_calc.id + + def test_attribute_key_changes(self): + """Verify that the keys are successfully changed of the affected attributes.""" + import ast + from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module + + DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name + + not_found = tuple([0]) + + with sa.engine.begin() as connection: + session = Session(connection.engine) + + node_work = session.query(DbNode).filter(DbNode.id == self.node_work_id).one() + self.assertEqual(node_work.attributes.get(self.KEY_PROCESS_LABEL_NEW), self.process_label) + self.assertEqual(node_work.attributes.get(self.KEY_PROCESS_LABEL_OLD, not_found), not_found) + + node_calc = session.query(DbNode).filter(DbNode.id == self.node_calc_id).one() + + # The dictionaries need to be cast with ast.literal_eval, because the `get` will return a string + # representation of the dictionary that the attribute contains + self.assertEqual(node_calc.attributes.get(self.KEY_PROCESS_LABEL_NEW), self.process_label) + self.assertEqual(node_calc.attributes.get(self.KEY_PARSER_NAME_NEW), self.parser_name) + self.assertEqual(ast.literal_eval(node_calc.attributes.get(self.KEY_RESOURCES_NEW)), self.resources) + self.assertEqual( + ast.literal_eval(node_calc.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_NEW)), + self.environment_variables) + self.assertEqual(node_calc.attributes.get(self.KEY_PROCESS_LABEL_OLD, not_found), not_found) + self.assertEqual(node_calc.attributes.get(self.KEY_PARSER_NAME_OLD, not_found), not_found) + self.assertEqual(node_calc.attributes.get(self.KEY_RESOURCES_OLD, not_found), not_found) + self.assertEqual(node_calc.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_OLD, not_found), not_found) diff --git a/aiida/backends/tests/__init__.py b/aiida/backends/tests/__init__.py index 6faa45fda5..1bdefd7cac 100644 --- a/aiida/backends/tests/__init__.py +++ b/aiida/backends/tests/__init__.py @@ -75,7 +75,6 @@ 'cmdline.params.types.node': ['aiida.backends.tests.cmdline.params.types.test_node'], 'cmdline.params.types.plugin': ['aiida.backends.tests.cmdline.params.types.test_plugin'], 'common.archive': ['aiida.backends.tests.common.test_archive'], - 'common.datastructures': ['aiida.backends.tests.common.test_datastructures'], 'common.extendeddicts': ['aiida.backends.tests.common.test_extendeddicts'], 'common.folders': ['aiida.backends.tests.common.test_folders'], 'common.hashing': ['aiida.backends.tests.common.test_hashing'], @@ -119,7 +118,6 @@ 'work.utils': ['aiida.backends.tests.work.test_utils'], 'work.work_chain': ['aiida.backends.tests.work.work_chain'], 'work.workfunctions': ['aiida.backends.tests.work.test_workfunctions'], - 'work.job_processes': ['aiida.backends.tests.work.job_processes'], 'plugin_loader': ['aiida.backends.tests.test_plugin_loader'], 'caching_config': ['aiida.backends.tests.test_caching_config'], } diff --git a/aiida/backends/tests/backup_script.py b/aiida/backends/tests/backup_script.py index c0b91485a9..e71fa7da07 100644 --- a/aiida/backends/tests/backup_script.py +++ b/aiida/backends/tests/backup_script.py @@ -348,21 +348,15 @@ def fill_repo(self): from aiida.orm.node.process import CalcJobNode extra_name = self.__class__.__name__ + "/test_with_subclasses" - calc_params = { - 'computer': self.computer, - 'resources': {'num_machines': 1, - 'num_mpiprocs_per_machine': 1} - } + resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} - TemplateReplacerCalc = CalculationFactory('templatereplacer') ParameterData = DataFactory('parameter') - a1 = CalcJobNode(**calc_params).store() + a1 = CalcJobNode(computer=self.computer) + a1.set_option('resources', resources) + a1.store() # To query only these nodes later a1.set_extra(extra_name, True) - a2 = TemplateReplacerCalc(**calc_params).store() - # To query only these nodes later - a2.set_extra(extra_name, True) a3 = Data().store() a3.set_extra(extra_name, True) a4 = ParameterData(dict={'a': 'b'}).store() @@ -371,7 +365,8 @@ def fill_repo(self): a5.set_extra(extra_name, True) # I don't set the extras, just to be sure that the filtering works # The filtering is needed because other tests will put stuff int he DB - a6 = CalcJobNode(**calc_params) + a6 = CalcJobNode(computer=self.computer) + a6.set_option('resources', resources) a6.store() a7 = Data() a7.store() diff --git a/aiida/backends/tests/calculation_node.py b/aiida/backends/tests/calculation_node.py index b62035f4f8..5946514f65 100644 --- a/aiida/backends/tests/calculation_node.py +++ b/aiida/backends/tests/calculation_node.py @@ -120,32 +120,6 @@ def test_process_node_updatable_attribute(self): with self.assertRaises(ModificationNotAllowed): node._del_attr(CalculationNode.PROCESS_STATE_KEY) - def test_calcjob_get_option(self): - """Verify that options used during process_node construction can be retrieved with `get_option`.""" - for name in self.calcjob.options: # pylint: disable=not-an-iterable - - if name in self.construction_options: - self.assertEqual(self.calcjob.get_option(name), self.construction_options[name]) - - def test_calcjob_get_options_only_set(self): - """Verify that `get_options only` returns explicitly set options if `only_actually_set=True`.""" - set_options = self.calcjob.get_options(only_actually_set=True) - self.assertEqual(set(set_options.keys()), set(self.construction_options.keys())) - - def test_calcjob_get_options_defaults(self): - """Verify that `get_options` returns all options with defaults if `only_actually_set=False`.""" - get_options = self.calcjob.get_options() - - for name, attributes in self.calcjob.options.items(): # pylint: disable=no-member - - # If the option was specified in construction options, verify that `get_options` returns same value - if name in self.construction_options: - self.assertEqual(get_options[name], self.construction_options[name]) - - # Otherwise, if the option defines a default that is not `None`, verify that that is returned correctly - elif 'default' in attributes and attributes['default'] is not None: - self.assertEqual(get_options[name], attributes['default']) - def test_get_description(self): self.assertEqual(self.calcjob.get_desc(), self.calcjob.get_state()) diff --git a/aiida/backends/tests/cmdline/commands/test_calcjob.py b/aiida/backends/tests/cmdline/commands/test_calcjob.py index e1eaaa5ace..1ce3b26bcf 100644 --- a/aiida/backends/tests/cmdline/commands/test_calcjob.py +++ b/aiida/backends/tests/cmdline/commands/test_calcjob.py @@ -19,23 +19,23 @@ from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands import cmd_calcjob as command -from aiida.common.datastructures import calc_states -from aiida.orm.node.process.calculation.calcjob import CalcJobExitStatus +from aiida.common.datastructures import CalcJobState def get_result_lines(result): return [e for e in result.output.split('\n') if e] -@unittest.skip('reenable when issue #2342 is addressed') class TestVerdiCalculation(AiidaTestCase): """Tests for `verdi calcjob`.""" + # Note remove this when reenabling the tests after solving issue #2342 + # pylint: disable=no-member,unused-variable,unused-import + @classmethod def setUpClass(cls, *args, **kwargs): super(TestVerdiCalculation, cls).setUpClass(*args, **kwargs) from aiida.backends.tests.utils.fixtures import import_archive_fixture - from aiida.common.exceptions import ModificationNotAllowed from aiida.common.links import LinkType from aiida.orm import CalculationFactory, Data from aiida.orm.node.process import CalcJobNode @@ -56,35 +56,17 @@ def setUpClass(cls, *args, **kwargs): authinfo = orm.AuthInfo(computer=cls.computer, user=user) authinfo.store() - # Create 13 CalcJobNodes (one for each CalculationState) - for calcjob_state in calc_states: - - calc = CalcJobNode( - computer=cls.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() + # Create 5 CalcJobNodes (one for each CalculationState) + for calculation_state in CalcJobState: - # Trying to set NEW will raise, but in this case we don't need to change the state - try: - calc._set_state(calcjob_state) - except ModificationNotAllowed: - pass - - try: - exit_status = CalcJobExitStatus[calcjob_state] - except KeyError: - if calcjob_state == 'IMPORTED': - calc._set_process_state(ProcessState.FINISHED) - else: - calc._set_process_state(ProcessState.RUNNING) - else: - calc._set_exit_status(exit_status) - calc._set_process_state(ProcessState.FINISHED) + calc = CalcJobNode(computer=cls.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + calc._set_process_state(ProcessState.RUNNING) cls.calcs.append(calc) - if calcjob_state == 'PARSING': + if calculation_state == CalcJobState.PARSING: cls.KEY_ONE = 'key_one' cls.KEY_TWO = 'key_two' cls.VAL_ONE = 'val_one' @@ -103,13 +85,23 @@ def setUpClass(cls, *args, **kwargs): # Add a single calc to a group cls.group.add_nodes([calc]) - # Load the fixture containing a single ArithmeticAddCalculation node - import_archive_fixture('calcjob/arithmetic.add.aiida') + # Create a single failed CalcJobNode + cls.EXIT_STATUS = 100 + calc = CalcJobNode(computer=cls.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + calc._set_exit_status(cls.EXIT_STATUS) + calc._set_process_state(ProcessState.FINISHED) + cls.calcs.append(calc) + + # Uncomment when issue 2342 is addressed + # # Load the fixture containing a single ArithmeticAddCalculation node + # import_archive_fixture('calcjob/arithmetic.add.aiida') - # Get the imported ArithmeticAddCalculation node - ArithmeticAddCalculation = CalculationFactory('arithmetic.add') - calcjobs = orm.QueryBuilder().append(ArithmeticAddCalculation).all()[0] - cls.arithmetic_job = calcjobs[0] + # # Get the imported ArithmeticAddCalculation node + # ArithmeticAddCalculation = CalculationFactory('arithmetic.add') + # calculations = orm.QueryBuilder().append(ArithmeticAddCalculation).all()[0] + # cls.arithmetic_job = calculations[0] def setUp(self): self.cli_runner = CliRunner() @@ -133,6 +125,7 @@ def test_calcjob_res(self): self.assertNotIn(self.KEY_TWO, result.output) self.assertNotIn(self.VAL_TWO, result.output) + @unittest.skip('reenable when issue #2342 is addressed') def test_calcjob_inputls(self): """Test verdi calcjob inputls""" options = [] @@ -154,6 +147,7 @@ def test_calcjob_inputls(self): self.assertIn('calcinfo.json', get_result_lines(result)) self.assertIn('job_tmpl.json', get_result_lines(result)) + @unittest.skip('reenable when issue #2342 is addressed') def test_calcjob_outputls(self): """Test verdi calcjob outputls""" options = [] @@ -174,6 +168,7 @@ def test_calcjob_outputls(self): self.assertEqual(len(get_result_lines(result)), 1) self.assertIn('aiida.out', get_result_lines(result)) + @unittest.skip('reenable when issue #2342 is addressed') def test_calcjob_inputcat(self): """Test verdi calcjob inputcat""" options = [] @@ -192,6 +187,7 @@ def test_calcjob_inputcat(self): self.assertEqual(len(get_result_lines(result)), 1) self.assertEqual(get_result_lines(result)[0], '2 3') + @unittest.skip('reenable when issue #2342 is addressed') def test_calcjob_outputcat(self): """Test verdi calcjob outputcat""" options = [] diff --git a/aiida/backends/tests/cmdline/commands/test_calculation.py b/aiida/backends/tests/cmdline/commands/test_calculation.py index 7318550eb3..71f2d9225e 100644 --- a/aiida/backends/tests/cmdline/commands/test_calculation.py +++ b/aiida/backends/tests/cmdline/commands/test_calculation.py @@ -19,16 +19,13 @@ from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands import cmd_calculation as command -from aiida.common.datastructures import calc_states -from aiida.orm.node.process.calculation.calcjob import CalcJobExitStatus -from aiida.work import runners, rmq +from aiida.common.datastructures import CalcJobState def get_result_lines(result): return [e for e in result.output.split('\n') if e] -@unittest.skip('reenable when issue #2342 is addressed') class TestVerdiCalculation(AiidaTestCase): """Tests for `verdi calculation`.""" @@ -36,9 +33,8 @@ class TestVerdiCalculation(AiidaTestCase): def setUpClass(cls, *args, **kwargs): super(TestVerdiCalculation, cls).setUpClass(*args, **kwargs) from aiida.backends.tests.utils.fixtures import import_archive_fixture - from aiida.common.exceptions import ModificationNotAllowed from aiida.common.links import LinkType - from aiida.orm import Node, CalculationFactory, Data + from aiida.orm import CalculationFactory, Data from aiida.orm.node.process import CalcJobNode from aiida.orm.data.parameter import ParameterData from aiida.work.processes import ProcessState @@ -60,35 +56,17 @@ def setUpClass(cls, *args, **kwargs): authinfo = orm.AuthInfo(computer=cls.computer, user=user) authinfo.store() - # Create 13 CalcJobNodes (one for each CalculationState) - for calculation_state in calc_states: + # Create 5 CalcJobNodes (one for each CalculationState) + for calculation_state in CalcJobState: - calc = CalcJobNode( - computer=cls.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() - - # Trying to set NEW will raise, but in this case we don't need to change the state - try: - calc._set_state(calculation_state) - except ModificationNotAllowed: - pass - - try: - exit_status = CalcJobExitStatus[calculation_state] - except KeyError: - if calculation_state == 'IMPORTED': - calc._set_process_state(ProcessState.FINISHED) - else: - calc._set_process_state(ProcessState.RUNNING) - else: - calc._set_exit_status(exit_status) - calc._set_process_state(ProcessState.FINISHED) + calc = CalcJobNode(computer=cls.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + calc._set_process_state(ProcessState.RUNNING) cls.calcs.append(calc) - if calculation_state == 'PARSING': + if calculation_state == CalcJobState.PARSING: cls.KEY_ONE = 'key_one' cls.KEY_TWO = 'key_two' cls.VAL_ONE = 'val_one' @@ -107,13 +85,23 @@ def setUpClass(cls, *args, **kwargs): # Add a single calc to a group cls.group.add_nodes([calc]) - # Load the fixture containing a single ArithmeticAddCalculation node - import_archive_fixture('calcjob/arithmetic.add.aiida') + # Create a single failed CalcJobNode + cls.EXIT_STATUS = 100 + calc = CalcJobNode(computer=cls.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + calc._set_exit_status(cls.EXIT_STATUS) + calc._set_process_state(ProcessState.FINISHED) + cls.calcs.append(calc) + + # Uncomment when issue 2342 is addressed + # # Load the fixture containing a single ArithmeticAddCalculation node + # import_archive_fixture('calcjob/arithmetic.add.aiida') - # Get the imported ArithmeticAddCalculation node - ArithmeticAddCalculation = CalculationFactory('arithmetic.add') - calculations = orm.QueryBuilder().append(ArithmeticAddCalculation).all()[0] - cls.arithmetic_job = calculations[0] + # # Get the imported ArithmeticAddCalculation node + # ArithmeticAddCalculation = CalculationFactory('arithmetic.add') + # calculations = orm.QueryBuilder().append(ArithmeticAddCalculation).all()[0] + # cls.arithmetic_job = calculations[0] def setUp(self): self.cli_runner = CliRunner() @@ -153,22 +141,22 @@ def test_calculation_list_all_user(self): options = ['-r', '-a', flag] result = self.cli_runner.invoke(command.calculation_list, options) self.assertIsNone(result.exception, result.output) - self.assertEqual(len(get_result_lines(result)), 14) + self.assertEqual(len(get_result_lines(result)), 6) def test_calculation_list_all(self): """Test verdi calculation list with the all option""" - # Without the flag I should only get the "active" states, which should be seven + # Without the flag I should only get the "active" states, which should be five options = ['-r'] result = self.cli_runner.invoke(command.calculation_list, options) self.assertIsNone(result.exception, result.output) - self.assertEqual(len(get_result_lines(result)), 7, result.output) + self.assertEqual(len(get_result_lines(result)), 5, result.output) for flag in ['-a', '--all']: options = ['-r', flag] result = self.cli_runner.invoke(command.calculation_list, options) self.assertIsNone(result.exception, result.output) - self.assertEqual(len(get_result_lines(result)), 14, result.output) + self.assertEqual(len(get_result_lines(result)), 6, result.output) def test_calculation_list_limit(self): """Test verdi calculation list with the limit option""" @@ -201,9 +189,9 @@ def test_calculation_list_process_state(self): self.assertIsNone(result.exception, result.output) if state == 'finished': - self.assertEqual(len(get_result_lines(result)), 7, result.output) + self.assertEqual(len(get_result_lines(result)), 1, result.output) else: - self.assertEqual(len(get_result_lines(result)), 7, result.output) + self.assertEqual(len(get_result_lines(result)), 5, result.output) def test_calculation_list_failed(self): """Test verdi calculation list with the failed filter""" @@ -212,13 +200,13 @@ def test_calculation_list_failed(self): result = self.cli_runner.invoke(command.calculation_list, options) self.assertIsNone(result.exception, result.output) - self.assertEqual(len(get_result_lines(result)), 4, result.output) + self.assertEqual(len(get_result_lines(result)), 1, result.output) def test_calculation_list_exit_status(self): """Test verdi calculation list with the exit status filter""" for flag in ['-E', '--exit-status']: - for exit_status in CalcJobExitStatus: - options = ['-r', flag, exit_status.value] + for exit_status in [self.EXIT_STATUS]: + options = ['-r', flag, exit_status] result = self.cli_runner.invoke(command.calculation_list, options) self.assertIsNone(result.exception, result.output) @@ -266,6 +254,7 @@ def test_calculation_logshow(self): self.assertIsNone(result.exception, result.output) self.assertTrue(len(get_result_lines(result)) > 0) + @unittest.skip('reenable when issue #2342 is addressed') def test_calculation_plugins(self): """Test verdi calculation plugins""" from aiida.plugins.entry_point import get_entry_points @@ -283,6 +272,7 @@ def test_calculation_plugins(self): self.assertIsNone(result.exception, result.output) self.assertTrue(len(get_result_lines(result)) > len(calculation_plugins)) + @unittest.skip('reenable when issue #2342 is addressed') def test_calculation_inputls(self): """Test verdi calculation inputls""" options = [] @@ -304,6 +294,7 @@ def test_calculation_inputls(self): self.assertIn('calcinfo.json', get_result_lines(result)) self.assertIn('job_tmpl.json', get_result_lines(result)) + @unittest.skip('reenable when issue #2342 is addressed') def test_calculation_outputls(self): """Test verdi calculation outputls""" options = [] @@ -324,6 +315,7 @@ def test_calculation_outputls(self): self.assertEqual(len(get_result_lines(result)), 1) self.assertIn('aiida.out', get_result_lines(result)) + @unittest.skip('reenable when issue #2342 is addressed') def test_calculation_inputcat(self): """Test verdi calculation inputcat""" options = [] @@ -342,6 +334,7 @@ def test_calculation_inputcat(self): self.assertEqual(len(get_result_lines(result)), 1) self.assertEqual(get_result_lines(result)[0], '2 3') + @unittest.skip('reenable when issue #2342 is addressed') def test_calculation_outputcat(self): """Test verdi calculation outputcat""" options = [] diff --git a/aiida/backends/tests/cmdline/commands/test_process.py b/aiida/backends/tests/cmdline/commands/test_process.py index d6cadbbb71..96c13b2743 100644 --- a/aiida/backends/tests/cmdline/commands/test_process.py +++ b/aiida/backends/tests/cmdline/commands/test_process.py @@ -64,6 +64,7 @@ def test_pause_play_kill(self): """ Test the pause/play/kill commands """ + # pylint: disable=no-member from aiida.orm import load_node calc = self.runner.submit(test_utils.WaitProcess) diff --git a/aiida/backends/tests/common/test_datastructures.py b/aiida/backends/tests/common/test_datastructures.py deleted file mode 100644 index 9b4b8bf53d..0000000000 --- a/aiida/backends/tests/common/test_datastructures.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -from __future__ import division -from __future__ import print_function -from __future__ import absolute_import -from aiida.backends.testbase import AiidaTestCase -from aiida.common.datastructures import _sorted_datastates, calc_states, is_progressive_state_change - - -class TestCommonDataStructures(AiidaTestCase): - - def test_is_progressive_state_change(self): - """Test the `is_progressive_state_change` utility function by testing all possible state change permutations.""" - for i, state_one in enumerate(_sorted_datastates): - - for j, state_two in enumerate(_sorted_datastates): - - # States will be equal and should not count as progressive state change - if i == j: - self.assertFalse(is_progressive_state_change(state_one, state_two)) - elif i > j: - self.assertFalse(is_progressive_state_change(state_one, state_two)) - elif i < j: - self.assertTrue(is_progressive_state_change(state_one, state_two)) - else: - assert True, 'we broke math' - - def test_is_progressive_state_change_invalid_states(self): - """Test `is_progressive_state_change` function should raise ValueError for invalid states.""" - with self.assertRaises(ValueError): - is_progressive_state_change('NOTEXISTENT', calc_states.NEW) - - with self.assertRaises(ValueError): - is_progressive_state_change(calc_states.NEW, 'NOTEXISTENT') diff --git a/aiida/backends/tests/export_and_import.py b/aiida/backends/tests/export_and_import.py index b4d2923914..48cdb11829 100644 --- a/aiida/backends/tests/export_and_import.py +++ b/aiida/backends/tests/export_and_import.py @@ -464,7 +464,6 @@ def test_5(self): from aiida.orm.node.process import CalcJobNode from aiida.orm.data.structure import StructureData from aiida.orm.importexport import export - from aiida.common.datastructures import calc_states from aiida.common.links import LinkType from aiida.manage import get_manager @@ -490,7 +489,6 @@ def test_5(self): jc1.label = 'jc1' jc1.store() jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') - jc1._set_state(calc_states.PARSING) # Create some nodes from a different user sd2 = StructureData() @@ -505,7 +503,6 @@ def test_5(self): jc2.label = 'jc2' jc2.store() jc2.add_incoming(sd2, link_type=LinkType.INPUT_CALC, link_label='l2') - jc2._set_state(calc_states.PARSING) sd3 = StructureData() sd3.label = 'sd3' @@ -548,7 +545,6 @@ def test_6(self): from aiida.orm.node.process import CalcJobNode from aiida.orm.data.structure import StructureData from aiida.orm.importexport import export - from aiida.common.datastructures import calc_states from aiida.common.links import LinkType from aiida.manage import get_manager @@ -574,7 +570,6 @@ def test_6(self): jc1.label = 'jc1' jc1.store() jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') - jc1._set_state(calc_states.PARSING) # Create some nodes from a different user sd2 = StructureData() @@ -583,9 +578,6 @@ def test_6(self): sd2.store() sd2.add_incoming(jc1, link_type=LinkType.CREATE, link_label='l1') - # Set the jc1 to FINISHED - jc1._set_state(calc_states.FINISHED) - # At this point we export the generated data filename1 = os.path.join(temp_folder, "export1.tar.gz") export([sd2], outfile=filename1, silent=True) @@ -609,16 +601,12 @@ def test_6(self): jc2.label = 'jc2' jc2.store() jc2.add_incoming(sd2_imp, link_type=LinkType.INPUT_CALC, link_label='l2') - jc2._set_state(calc_states.PARSING) sd3 = StructureData() sd3.label = 'sd3' sd3.store() sd3.add_incoming(jc2, link_type=LinkType.CREATE, link_label='l3') - # Set the jc2 to FINISHED - jc2._set_state(calc_states.FINISHED) - # Store the UUIDs of the nodes that should be checked # if they can be imported correctly. uuids2 = [jc2.uuid, sd3.uuid] @@ -654,7 +642,6 @@ def test_7(self): from aiida.orm.node.process import CalcJobNode from aiida.orm.data.structure import StructureData from aiida.orm.importexport import export - from aiida.common.datastructures import calc_states from aiida.orm.querybuilder import QueryBuilder # Creating a folder for the import/export files @@ -678,7 +665,6 @@ def test_7(self): jc1.label = 'jc1' jc1.store() jc1.add_incoming(sd1, link_type=LinkType.INPUT_CALC, link_label='link') - jc1._set_state(calc_states.PARSING) # Create a group and add the data inside from aiida.orm.groups import Group @@ -1070,7 +1056,6 @@ def test_complex_graph_import_export(self): calc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1}) calc1.label = "calc1" calc1.store() - calc1._set_state(u'RETRIEVING') pd1 = ParameterData() pd1.label = "pd1" @@ -1095,7 +1080,6 @@ def test_complex_graph_import_export(self): calc2.add_incoming(pd1, link_type=LinkType.INPUT_CALC, link_label='link1') calc2.add_incoming(pd2, link_type=LinkType.INPUT_CALC, link_label='link2') calc2.add_incoming(rd1, link_type=LinkType.INPUT_CALC, link_label='link3') - calc2._set_state(u'SUBMITTING') fd1 = FolderData() fd1.label = "fd1" @@ -1166,7 +1150,6 @@ def test_same_computer_import(self): "num_mpiprocs_per_machine": 1}) calc1.label = calc1_label calc1.store() - calc1._set_state(u'RETRIEVING') calc2_label = "calc2" calc2 = CalcJobNode() @@ -1175,7 +1158,6 @@ def test_same_computer_import(self): "num_mpiprocs_per_machine": 2}) calc2.label = calc2_label calc2.store() - calc2._set_state(u'RETRIEVING') # Store locally the computer name comp_name = six.text_type(self.computer.name) @@ -1290,7 +1272,6 @@ def test_same_computer_different_name_import(self): "num_mpiprocs_per_machine": 1}) calc1.label = calc1_label calc1.store() - calc1._set_state(u'RETRIEVING') # Store locally the computer name comp1_name = six.text_type(self.computer.name) @@ -1310,7 +1291,6 @@ def test_same_computer_different_name_import(self): "num_mpiprocs_per_machine": 2}) calc2.label = calc2_label calc2.store() - calc2._set_state(u'RETRIEVING') # Export the second job calculation filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz") @@ -1400,7 +1380,6 @@ def test_different_computer_same_name_import(self): "num_mpiprocs_per_machine": 1}) calc1.label = calc1_label calc1.store() - calc1._set_state(u'RETRIEVING') # Export the first job calculation filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz") @@ -1421,7 +1400,6 @@ def test_different_computer_same_name_import(self): "num_mpiprocs_per_machine": 2}) calc2.label = calc2_label calc2.store() - calc2._set_state(u'RETRIEVING') # Export the second job calculation filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz") @@ -1442,7 +1420,6 @@ def test_different_computer_same_name_import(self): "num_mpiprocs_per_machine": 2}) calc3.label = calc3_label calc3.store() - calc3._set_state(u'RETRIEVING') # Export the third job calculation filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz") @@ -1529,7 +1506,6 @@ def test_correct_import_of_computer_json_params(self): "num_mpiprocs_per_machine": 1}) calc1.label = calc1_label calc1.store() - calc1._set_state(u'RETRIEVING') # Export the first job calculation filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz") @@ -1661,7 +1637,6 @@ def construct_complex_graph(self, export_combination=0): from aiida.orm.data.base import Int from aiida.orm.node.process import CalcJobNode from aiida.orm.node.process import WorkChainNode - from aiida.common.datastructures import calc_states from aiida.common.links import LinkType if export_combination < 0 or export_combination > 8: @@ -1698,7 +1673,6 @@ def construct_complex_graph(self, export_combination=0): pw1.add_incoming(d1, LinkType.INPUT_CALC, 'input') pw1.add_incoming(wc2, LinkType.CALL_CALC, 'call') - pw1._set_state(calc_states.PARSING) d3.add_incoming(pw1, LinkType.CREATE, 'create1') d3.add_incoming(wc2, LinkType.RETURN, 'return1') @@ -1707,7 +1681,6 @@ def construct_complex_graph(self, export_combination=0): d4.add_incoming(wc2, LinkType.RETURN, 'return2') pw2.add_incoming(d4, LinkType.INPUT_CALC, 'input') - pw2._set_state(calc_states.PARSING) d5.add_incoming(pw2, LinkType.CREATE, 'create5') d6.add_incoming(pw2, LinkType.CREATE, 'create6') @@ -1738,7 +1711,6 @@ def test_data_create_reversed_false(self): import shutil import tempfile - from aiida.common.datastructures import calc_states from aiida.orm import Data, Group from aiida.orm.data.base import Int from aiida.orm.node.process import CalcJobNode @@ -1758,7 +1730,6 @@ def test_data_create_reversed_false(self): calc.store() calc.add_incoming(data_input, LinkType.INPUT_CALC, 'input') - calc._set_state(calc_states.PARSING) data_output.add_incoming(calc, LinkType.CREATE, 'create') group = orm.Group(label='test_group').store() diff --git a/aiida/backends/tests/nodes.py b/aiida/backends/tests/nodes.py index 86de48e275..29b4d9b6d9 100644 --- a/aiida/backends/tests/nodes.py +++ b/aiida/backends/tests/nodes.py @@ -256,12 +256,12 @@ def test_with_subclasses(self): from aiida.orm.node.process import CalcJobNode extra_name = self.__class__.__name__ + "/test_with_subclasses" - calc_params = {'computer': self.computer, 'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}} - TemplateReplacerCalc = CalculationFactory('templatereplacer') ParameterData = DataFactory('parameter') - a1 = CalcJobNode(**calc_params).store() + a1 = CalcJobNode(computer=self.computer) + a1.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + a1.store() # To query only these nodes later a1.set_extra(extra_name, True) a3 = Data().store() @@ -270,8 +270,9 @@ def test_with_subclasses(self): a4.set_extra(extra_name, True) # I don't set the extras, just to be sure that the filtering works # The filtering is needed because other tests will put stuff int he DB - a6 = CalcJobNode(**calc_params) - a6.store() + a6 = CalcJobNode(computer=self.computer) + a6.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + a1.store() a7 = Data() a7.store() @@ -1492,11 +1493,10 @@ def test_load_unknown_calculation_type(self): from aiida.orm import CalculationFactory from aiida.orm.node.process import CalcJobNode - ###### for calculation - calc_params = {'computer': self.computer, 'resources': {'num_machines': 1, 'num_mpiprocs_per_machine': 1}} - TemplateReplacerCalc = CalculationFactory('templatereplacer') - testcalc = TemplateReplacerCalc(**calc_params).store() + testcalc = TemplateReplacerCalc(computer=self.computer) + testcalc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + testcalc.store() # compare if plugin exist obj = load_node(uuid=testcalc.uuid) @@ -1509,8 +1509,13 @@ class TestCalculation(CalcJobNode): TestCalculation._plugin_type_string = 'node.process.calculation.calcjob.notexisting.TemplatereplacerCalculation.' TestCalculation._query_type_string = 'node.process.calculation.calcjob.notexisting.TemplatereplacerCalculation' - jobcalc = CalcJobNode(**calc_params).store() - testcalc = TestCalculation(**calc_params).store() + jobcalc = CalcJobNode(computer=self.computer) + jobcalc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + jobcalc.store() + + testcalc = TestCalculation(computer=self.computer) + testcalc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + testcalc.store() # Changed node should return CalcJobNode type as its plugin does not exist obj = load_node(uuid=testcalc.uuid) @@ -1659,55 +1664,18 @@ def test_has_children_has_parents(self): self.assertFalse(n2.has_children, "It should be false since n2 doesn't have any children.") self.assertTrue(n2.has_parents, "It should be true since n1 is the parent of n2.") - def test_use_code(self): - from aiida.orm.node.process import CalcJobNode - from aiida.orm.code import Code - - computer = self.computer - - code = Code(remote_computer_exec=(computer, '/bin/true')) # .store() - - unstoredcalc = CalcJobNode(computer=computer, resources={'num_machines': 1, 'num_mpiprocs_per_machine': 1}) - calc = CalcJobNode(computer=computer, resources={'num_machines': 1, 'num_mpiprocs_per_machine': 1}).store() - - # calc is not stored, and also code is not - unstoredcalc.use_code(code) - - # calc is stored, but code is not - calc.use_code(code) - - self.assertEqual(calc.get_code().uuid, code.uuid) - self.assertEqual(unstoredcalc.get_code().uuid, code.uuid) - - # calc is not stored, but code is - code.store() - - self.assertEqual(calc.get_code().uuid, code.uuid) - self.assertEqual(unstoredcalc.get_code().uuid, code.uuid) - - unstoredcalc.store() - - self.assertEqual(calc.get_code().uuid, code.uuid) - self.assertEqual(unstoredcalc.get_code().uuid, code.uuid) - # pylint: disable=unused-variable,no-member,no-self-use def test_calculation_load(self): from aiida.orm.node.process import CalcJobNode # I check with a string, with an object and with the computer pk/id - calc = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() + calc = CalcJobNode(computer=self.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + with self.assertRaises(Exception): - # I should get an error if I ask for a computer id/pk that doesn't - # exist - _ = CalcJobNode( - computer=self.computer.id + 100000, resources={ - 'num_machines': 2, - 'num_mpiprocs_per_machine': 1 - }).store() + # I should get an error if I ask for a computer id/pk that doesn't exist + CalcJobNode(computer=self.computer.id + 100000).store() def test_links_label_constraints(self): d1 = Data().store() @@ -1777,33 +1745,6 @@ def test_link_label_autogenerator(self): """ raise NotImplementedError - @unittest.skip('remove this test once #2219 is addressed') - def test_link_replace(self): - from aiida.orm.node.process import CalculationNode - from aiida.orm import Data - - n1 = CalculationNode().store() - n2 = CalculationNode().store() - n3 = Data().store() - n4 = Data().store() - - n3.add_incoming(n1, link_type=LinkType.CREATE, link_label='the_label') - with self.assertRaises(ValueError): - # A link with the same name already exists - n3.add_incoming(n1, link_type=LinkType.CREATE, link_label='the_label') - - # I can replace the link and check that it was replaced - n3._replace_link_from(n2, LinkType.CREATE, link_label='the_label') - the_parent = [_.node.uuid for _ in n3.get_incoming() if _.link_label == 'the_label'] - self.assertEquals(len(the_parent), 1, "There are multiple input links with the same label (the_label)!") - self.assertEquals(n2.uuid, the_parent[0]) - - # _replace_link_from should work also if there is no previous link - n2._replace_link_from(n1, LinkType.CREATE, link_label='the_label_2') - the_parent_2 = [_.node.uuid for _ in n4.get_incoming() if _.link_label == 'the_label_2'] - self.assertEquals(len(the_parent_2), 1, "There are multiple input links with the same label (the_label_2)!") - self.assertEquals(n1.uuid, the_parent_2[0]) - def test_link_with_unstored(self): """ It is possible to store links between nodes even if they are unstored these links are cached. @@ -1867,8 +1808,6 @@ def test_valid_links(self): from aiida import orm from aiida.orm import DataFactory from aiida.orm.node.process import CalcJobNode - from aiida.orm.code import Code - from aiida.common.datastructures import calc_states SinglefileData = DataFactory('singlefile') @@ -1877,37 +1816,23 @@ def test_valid_links(self): with tempfile.NamedTemporaryFile('w+') as tmpf: d2 = SinglefileData(file=tmpf.name).store() - code = Code() - code._set_remote() - code.set_computer(self.computer) - code.set_remote_computer_exec((self.computer, '/bin/true')) - code.store() - unsavedcomputer = orm.Computer(name='localhost2', hostname='localhost') with self.assertRaises(ValueError): # I need to save the localhost entry first - _ = CalcJobNode( - computer=unsavedcomputer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() + CalcJobNode(computer=unsavedcomputer).store() # Load calculations with two different ways - calc = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() - calc2 = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() + calc = CalcJobNode(computer=self.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + + calc2 = CalcJobNode(computer=self.computer) + calc2.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc2.store() calc.add_incoming(d1, link_type=LinkType.INPUT_CALC, link_label='link') calc.add_incoming(d2, link_type=LinkType.INPUT_CALC, link_label='some_label') - calc.use_code(code) # Cannot link to itself with self.assertRaises(ValueError): @@ -1920,82 +1845,41 @@ def test_valid_links(self): with self.assertRaises(ValueError): d1.add_incoming(d2, link_type=LinkType.INPUT_CALC, link_label='link') - with self.assertRaises(ValueError): - d1.add_incoming(code, link_type=LinkType.INPUT_CALC, link_label='link') - - with self.assertRaises(ValueError): - code.add_incoming(d1, link_type=LinkType.INPUT_CALC, link_label='link') - with self.assertRaises(ValueError): calc.add_incoming(calc2, link_type=LinkType.INPUT_CALC, link_label='link') - calc_a = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() - calc_b = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() + calc_a = CalcJobNode(computer=self.computer) + calc_a.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc_a.store() + calc_b = CalcJobNode(computer=self.computer) + calc_b.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc_b.store() data_node = Data().store() - - # I do a trick to set it to a state that allows writing - calc_a._set_state(calc_states.RETRIEVING) - calc_b._set_state(calc_states.RETRIEVING) - data_node.add_incoming(calc_a, link_type=LinkType.CREATE, link_label='link') # A data cannot have two input calculations with self.assertRaises(ValueError): data_node.add_incoming(calc_b, link_type=LinkType.CREATE, link_label='link') - newdata = Data() - # Cannot add an input link if the calculation is not in status NEW - with self.assertRaises(ModificationNotAllowed): - calc_a.add_incoming(newdata, link_type=LinkType.INPUT_CALC, link_label='link') - - # Cannot replace input nodes if the calculation is not in status NEW - with self.assertRaises(ModificationNotAllowed): - calc_a._replace_link_from(d2, LinkType.INPUT_CALC, link_label='some_label') - - # Cannot (re)set the code if the calculation is not in status NEW - with self.assertRaises(ModificationNotAllowed): - calc_a.use_code(code) - calculation_inputs = calc.get_incoming().all() - # This calculation has three inputs (2 data and one code) - self.assertEquals(len(calculation_inputs), 3) + # This calculation has two data inputs + self.assertEquals(len(calculation_inputs), 2) def test_check_single_calc_source(self): """ Each data node can only have one input calculation """ from aiida.orm.node.process import CalcJobNode - from aiida.common.datastructures import calc_states d1 = Data().store() - calc = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() - calc2 = CalcJobNode( - computer=self.computer, resources={ - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }).store() - - # I cannot, calc it is in state NEW - with self.assertRaises(ModificationNotAllowed): - d1.add_incoming(calc, link_type=LinkType.CREATE, link_label='link') - - # I do a trick to set it to a state that allows setting the link - calc._set_state(calc_states.RETRIEVING) - calc2._set_state(calc_states.RETRIEVING) + calc = CalcJobNode(computer=self.computer) + calc.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc.store() + calc2 = CalcJobNode(computer=self.computer) + calc2.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1}) + calc2.store() d1.add_incoming(calc, link_type=LinkType.CREATE, link_label='link') diff --git a/aiida/backends/tests/orm/mixins.py b/aiida/backends/tests/orm/mixins.py index 3a65c8d1ba..c24d840d97 100644 --- a/aiida/backends/tests/orm/mixins.py +++ b/aiida/backends/tests/orm/mixins.py @@ -7,25 +7,46 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +"""Tests for the ORM mixin classes.""" from __future__ import division from __future__ import print_function from __future__ import absolute_import + from aiida.backends.testbase import AiidaTestCase +from aiida.common import exceptions +from aiida.common.links import LinkType +from aiida.orm.data.int import Int +from aiida.orm.node.process import CalculationNode from aiida.orm.mixins import Sealable class TestSealable(AiidaTestCase): + """Tests for the `Sealable` mixin class.""" - def test_change_updatable_attrs_after_store(self): - """ - Verify that a Sealable node can alter updatable attributes even after storing - """ - from aiida.orm.node.process import CalcJobNode + @staticmethod + def test_change_updatable_attrs_after_store(): + """Verify that a Sealable node can alter updatable attributes even after storing.""" - resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} - job = CalcJobNode(computer=self.computer, resources=resources) - job.store() + node = CalculationNode().store() - for attr in CalcJobNode._updatable_attributes: + for attr in CalculationNode._updatable_attributes: # pylint: disable=protected-access,not-an-iterable if attr != Sealable.SEALED_KEY: - job._set_attr(attr, 'a') + node._set_attr(attr, 'a') # pylint: disable=protected-access + + def test_validate_incoming_sealed(self): + """Verify that trying to add a link to a sealed node will raise.""" + data = Int(1).store() + node = CalculationNode().store() + node.seal() + + with self.assertRaises(exceptions.ModificationNotAllowed): + node.validate_incoming(data, link_type=LinkType.INPUT_CALC, link_label='input') + + def test_validate_outgoing_sealed(self): + """Verify that trying to add a link from a sealed node will raise.""" + data = Int(1).store() + node = CalculationNode().store() + node.seal() + + with self.assertRaises(exceptions.ModificationNotAllowed): + node.validate_outgoing(data, link_type=LinkType.CREATE, link_label='create') diff --git a/aiida/backends/tests/restapi.py b/aiida/backends/tests/restapi.py index 5cc98b5407..d995aab70a 100644 --- a/aiida/backends/tests/restapi.py +++ b/aiida/backends/tests/restapi.py @@ -80,17 +80,18 @@ def setUpClass(cls, *args, **kwargs): resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} - calc = CalcJobNode(computer=cls.computer, resources=resources) + calc = CalcJobNode(computer=cls.computer) + calc.set_option('resources', resources) calc._set_attr("attr1", "OK") # pylint: disable=protected-access calc._set_attr("attr2", "OK") # pylint: disable=protected-access calc.store() calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') - calc._set_state('PARSING') # pylint: disable=protected-access kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') - calc1 = CalcJobNode(computer=cls.computer, resources=resources) + calc1 = CalcJobNode(computer=cls.computer) + calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ @@ -739,13 +740,10 @@ def test_calculation_attributes(self): attributes = { 'attr1': 'OK', 'attr2': 'OK', - 'jobresource_params': { + 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, - 'linkname_retrieved': 'retrieved', - 'parser': None, - 'state': 'PARSING' } node_uuid = self.get_dummy_data()["calculations"][1]["uuid"] url = self.get_url_prefix() + "/calculations/" + str(node_uuid) + "/content/attributes" @@ -762,13 +760,10 @@ def test_calculation_attributes_nalist_filter(self): """ attributes = { 'attr2': 'OK', - 'jobresource_params': { + 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, - 'linkname_retrieved': 'retrieved', - 'parser': None, - 'state': 'PARSING' } node_uuid = self.get_dummy_data()["calculations"][1]["uuid"] url = self.get_url_prefix() + '/calculations/' + str(node_uuid) + '/content/attributes?nalist="attr1"' diff --git a/aiida/backends/tests/tcodexporter.py b/aiida/backends/tests/tcodexporter.py index 5a48ea236e..bdababbb49 100644 --- a/aiida/backends/tests/tcodexporter.py +++ b/aiida/backends/tests/tcodexporter.py @@ -134,7 +134,6 @@ def test_cif_structure_roundtrip(self): from aiida.orm.data.upf import UpfData from aiida.orm.data.folder import FolderData from aiida.common.folders import SandboxFolder - from aiida.common.datastructures import calc_states import tempfile with tempfile.NamedTemporaryFile(mode='w+') as tmpf: @@ -190,22 +189,19 @@ def test_cif_structure_roundtrip(self): calc.add_incoming(cif, LinkType.INPUT_CALC, "cif") calc.store() - calc._set_state(calc_states.TOSUBMIT) with SandboxFolder() as fhandle: calc._store_raw_input_folder(fhandle.abspath) fd = FolderData() - with io.open(fd._get_folder_pathsubfolder.get_abs_path( - calc._SCHED_OUTPUT_FILE), 'w', encoding='utf8') as fhandle: + subfolder = fd._get_folder_pathsubfolder + with io.open(subfolder.get_abs_path('_scheduler-stdout.txt'), 'w', encoding='utf8') as fhandle: fhandle.write(u"standard output") - with io.open(fd._get_folder_pathsubfolder.get_abs_path( - calc._SCHED_ERROR_FILE), 'w', encoding='utf8') as fhandle: + with io.open(subfolder.get_abs_path('_scheduler-stderr.txt'), 'w', encoding='utf8') as fhandle: fhandle.write(u"standard error") fd.store() - calc._set_state(calc_states.PARSING) - fd.add_incoming(calc, LinkType.CREATE, calc._get_linkname_retrieved()) + fd.add_incoming(calc, LinkType.CREATE, calc.link_label_retrieved) pd.add_incoming(calc, LinkType.CREATE, "create1") pd.store() diff --git a/aiida/backends/tests/test_plugin_loader.py b/aiida/backends/tests/test_plugin_loader.py index 6c17eec198..6ad1cc8e0c 100644 --- a/aiida/backends/tests/test_plugin_loader.py +++ b/aiida/backends/tests/test_plugin_loader.py @@ -15,11 +15,11 @@ from aiida.orm import CalculationFactory, DataFactory, WorkflowFactory from aiida.parsers import Parser, ParserFactory from aiida.orm.data import Data -from aiida.orm.node.process import CalculationNode from aiida.scheduler import Scheduler, SchedulerFactory from aiida.transport import Transport, TransportFactory from aiida.tools.dbexporters.tcod_plugins import BaseTcodtranslator, TcodExporterFactory from aiida.tools.dbimporters import DbImporter, DbImporterFactory +from aiida.work.calcjob import CalcJob from aiida.work import WorkChain @@ -41,8 +41,8 @@ def test_existing_calculations(self): for entry_point in entry_points: cls = CalculationFactory(entry_point.name) - self.assertTrue(issubclass(cls, CalculationNode), - 'Calculation plugin class {} is not subclass of {}'.format(cls, CalculationNode)) + self.assertTrue(issubclass(cls, CalcJob), + 'Calculation plugin class {} is not subclass of {}'.format(cls, CalcJob)) def test_existing_data(self): """Test listing all preinstalled data classes.""" diff --git a/aiida/backends/tests/work/class_loader.py b/aiida/backends/tests/work/class_loader.py index 8425920fdf..65ecd1d39e 100644 --- a/aiida/backends/tests/work/class_loader.py +++ b/aiida/backends/tests/work/class_loader.py @@ -10,27 +10,29 @@ from __future__ import division from __future__ import print_function from __future__ import absolute_import + import aiida -from aiida.work import Process + from aiida.backends.testbase import AiidaTestCase -from aiida.calculations.plugins.templatereplacer import TemplatereplacerCalculation +from aiida.orm import CalculationFactory +from aiida.work import Process -class TestJobProcess(AiidaTestCase): +class TestCalcJob(AiidaTestCase): def setUp(self): - super(TestJobProcess, self).setUp() + super(TestCalcJob, self).setUp() self.assertIsNone(Process.current()) def tearDown(self): - super(TestJobProcess, self).tearDown() + super(TestCalcJob, self).tearDown() self.assertIsNone(Process.current()) def test_class_loader(self): - templatereplacer_process = aiida.work.JobProcess.build(TemplatereplacerCalculation) + process = CalculationFactory('templatereplacer') loader = aiida.work.get_object_loader() - class_name = loader.identify_object(templatereplacer_process) + class_name = loader.identify_object(process) loaded_class = loader.load_object(class_name) - self.assertEqual(templatereplacer_process.__name__, loaded_class.__name__) + self.assertEqual(process.__name__, loaded_class.__name__) self.assertEqual(class_name, loader.identify_object(loaded_class)) diff --git a/aiida/backends/tests/work/job_processes.py b/aiida/backends/tests/work/job_processes.py deleted file mode 100644 index 112d3a1a63..0000000000 --- a/aiida/backends/tests/work/job_processes.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### - -from __future__ import division -from __future__ import print_function -from __future__ import absolute_import - -import six - -from aiida.backends.testbase import AiidaTestCase -from aiida.common.lang import classproperty -from aiida.orm.data.int import Int -from aiida.calculations.plugins.templatereplacer import TemplatereplacerCalculation -from aiida.work.persistence import ObjectLoader -from aiida.work.job_processes import JobProcess -from aiida.work.process_builder import JobProcessBuilder -from aiida.work import Process - - -class AdditionalParameterCalculation(TemplatereplacerCalculation): - """ - Subclass of TemplatereplacerCalculation that also defines a use method - with an additional parameter - """ - - @classproperty - def _use_methods(cls): - retdict = TemplatereplacerCalculation._use_methods - retdict.update({ - 'pseudo': { - 'valid_types': Int, - 'additional_parameter': 'kind', - 'linkname': cls._get_linkname_pseudo, - 'docstring': (''), - }, - }) - return retdict - - @classmethod - def _get_linkname_pseudo(cls, kind): - """ - Create the linkname based on the additional parameter - """ - if isinstance(kind, (tuple, list)): - suffix_string = '_'.join(kind) - elif isinstance(kind, six.string_types): - suffix_string = kind - else: - raise TypeError('invalid additional parameter type') - - return '{}_{}'.format('pseudo', suffix_string) - - -class TestJobProcess(AiidaTestCase): - - def setUp(self): - super(TestJobProcess, self).setUp() - self.assertIsNone(Process.current()) - - def tearDown(self): - super(TestJobProcess, self).tearDown() - self.assertIsNone(Process.current()) - - def test_job_calculation_process(self): - """Verify that CalcJobNode.process returns a sub class of JobProcess with correct calculation class.""" - process = TemplatereplacerCalculation.process() - self.assertTrue(issubclass(process, JobProcess)) - self.assertEqual(process._calc_class, TemplatereplacerCalculation) - - def test_job_calculation_get_builder(self): - """Verify that CalcJobNode.get_builder() returns an instance of JobProcessBuilder.""" - process = TemplatereplacerCalculation.process() - builder = TemplatereplacerCalculation.get_builder() - self.assertTrue(isinstance(builder, JobProcessBuilder)) - - # Class objects are actually different memory instances so can't use assertEqual on simply instances - self.assertEqual(builder.process_class.__name__, process.__name__) - - def test_job_process_set_label_and_description(self): - """ - Verify that calculation label and description get set when passed through inputs - """ - label = 'test_label' - description = 'test_description' - inputs = { - 'options': { - 'computer': self.computer, - 'resources': { - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }, - 'max_wallclock_seconds': 10, - }, - 'label': label, - 'description': description - } - process = TemplatereplacerCalculation.process() - job = process(inputs) - - self.assertEquals(job.calc.label, label) - self.assertEquals(job.calc.description, description) - - def test_job_process_label(self): - """ - Verify that the process_label attribute is set equal to the class name of the calculation from which the - JobProcess class was generated - """ - inputs = { - 'options': { - 'computer': self.computer, - 'resources': { - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }, - 'max_wallclock_seconds': 10, - }, - } - process = TemplatereplacerCalculation.process() - job = process(inputs) - - self.assertEquals(job.calc.process_label, TemplatereplacerCalculation.__name__) - - def test_job_process_set_none(self): - """ - Verify that calculation label and description can be not set. - """ - inputs = { - 'options': { - 'computer': self.computer, - 'resources': { - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }, - 'max_wallclock_seconds': 10, - } - } - - process = TemplatereplacerCalculation.process() - job = process(inputs) - - -class TestAdditionalParameterJobProcess(AiidaTestCase): - - def setUp(self): - super(TestAdditionalParameterJobProcess, self).setUp() - self.assertIsNone(Process.current()) - - def tearDown(self): - super(TestAdditionalParameterJobProcess, self).tearDown() - self.assertIsNone(Process.current()) - - def test_class_loader(self): - cl = ObjectLoader() - AdditionalParameterProcess = JobProcess.build(AdditionalParameterCalculation) - - def test_job_process_with_additional_parameter(self): - """ - Verify that the additional parameter use_method 'pseudo' is supported - """ - label = 'test_label' - description = 'test_description' - inputs = { - 'options': { - 'computer': self.computer, - 'resources': { - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 - }, - 'max_wallclock_seconds': 10, - }, - 'pseudo': { - 'a': Int(1), - 'b': Int(2), - }, - 'label': label, - 'description': description - } - process = AdditionalParameterCalculation.process() - job = process(inputs) diff --git a/aiida/backends/tests/work/persistence.py b/aiida/backends/tests/work/persistence.py index d393365390..175507ad12 100644 --- a/aiida/backends/tests/work/persistence.py +++ b/aiida/backends/tests/work/persistence.py @@ -54,7 +54,7 @@ def setUp(self): def test_save_load_checkpoint(self): process = DummyProcess() bundle_saved = self.persister.save_checkpoint(process) - bundle_loaded = self.persister.load_checkpoint(process.calc.pk) + bundle_loaded = self.persister.load_checkpoint(process.node.pk) self.assertDictEqual(bundle_saved, bundle_loaded) @@ -62,7 +62,7 @@ def test_delete_checkpoint(self): process = DummyProcess() self.persister.save_checkpoint(process) - self.assertTrue(isinstance(process.calc.checkpoint, six.string_types)) + self.assertTrue(isinstance(process.node.checkpoint, six.string_types)) self.persister.delete_checkpoint(process.pid) - self.assertEquals(process.calc.checkpoint, None) + self.assertEquals(process.node.checkpoint, None) diff --git a/aiida/backends/tests/work/process.py b/aiida/backends/tests/work/process.py index 0aff9aa6a2..321bbda915 100644 --- a/aiida/backends/tests/work/process.py +++ b/aiida/backends/tests/work/process.py @@ -28,7 +28,8 @@ class NameSpacedProcess(work.Process): - _calc_class = WorkflowNode + + _node_class = WorkflowNode @classmethod def define(cls, spec): @@ -65,13 +66,13 @@ def test_namespaced_process(self): self.assertEquals(input_node.value, 5) # Check that the link of the process node has the correct link name - self.assertTrue('some_name_space_a' in proc.calc.get_incoming().all_link_labels()) - self.assertEquals(proc.calc.get_incoming().get_node_by_label('some_name_space_a'), 5) + self.assertTrue('some_name_space_a' in proc.node.get_incoming().all_link_labels()) + self.assertEquals(proc.node.get_incoming().get_node_by_label('some_name_space_a'), 5) class ProcessStackTest(work.Process): - _calc_class = WorkflowNode + _node_class = WorkflowNode @override def run(self): @@ -111,10 +112,10 @@ def test_input_link_creation(self): dummy_inputs = ["1", "2", "3", "4"] inputs = {l: Int(l) for l in dummy_inputs} - inputs['store_provenance'] = True - p = test_utils.DummyProcess(inputs) + inputs['metadata'] = {'store_provenance': True} + process = test_utils.DummyProcess(inputs) - for entry in p._calc.get_incoming().all(): + for entry in process.node.get_incoming().all(): self.assertTrue(entry.link_label in inputs) self.assertEqual(int(entry.link_label), int(entry.node.value)) dummy_inputs.remove(entry.link_label) @@ -131,24 +132,24 @@ def test_seal(self): self.assertTrue(load_node(pk=pid).is_sealed) def test_description(self): - dp = test_utils.DummyProcess(inputs={'description': "Rockin' process"}) - self.assertEquals(dp.calc.description, "Rockin' process") + dp = test_utils.DummyProcess(inputs={'metadata': {'description': "Rockin' process"}}) + self.assertEquals(dp.node.description, "Rockin' process") with self.assertRaises(ValueError): - test_utils.DummyProcess(inputs={'description': 5}) + test_utils.DummyProcess(inputs={'metadata': {'description': 5}}) def test_label(self): - dp = test_utils.DummyProcess(inputs={'label': 'My label'}) - self.assertEquals(dp.calc.label, 'My label') + dp = test_utils.DummyProcess(inputs={'metadata': {'label': 'My label'}}) + self.assertEquals(dp.node.label, 'My label') with self.assertRaises(ValueError): test_utils.DummyProcess(inputs={'label': 5}) def test_work_calc_finish(self): p = test_utils.DummyProcess() - self.assertFalse(p.calc.is_finished_ok) + self.assertFalse(p.node.is_finished_ok) work.launch.run(p) - self.assertTrue(p.calc.is_finished_ok) + self.assertTrue(p.node.is_finished_ok) def test_calculation_input(self): @work.calcfunction @@ -161,7 +162,7 @@ def simple_wf(): dp = test_utils.DummyProcess(inputs={'calc': calc}) work.launch.run(dp) - input_calc = dp.calc.get_incoming().get_node_by_label('calc') + input_calc = dp.node.get_incoming().get_node_by_label('calc') self.assertTrue(isinstance(input_calc, FrozenDict)) self.assertEqual(input_calc['a'], outputs['a']) @@ -170,7 +171,7 @@ def test_save_instance_state(self): # Save the instance state bundle = plumpy.Bundle(proc) proc.close() - proc2 = bundle.unbundle() + bundle.unbundle() def test_process_type_with_entry_point(self): """ @@ -196,20 +197,21 @@ def test_process_type_with_entry_point(self): 'code': code, 'parameters': parameters, 'template': template, - 'options': options, + 'metadata': { + 'options': options, + } } entry_point = 'templatereplacer' - calculation = CalculationFactory(entry_point) - job_process = calculation.process() - process = job_process(inputs=inputs) + process_class = CalculationFactory(entry_point) + process = process_class(inputs=inputs) expected_process_type = 'aiida.calculations:{}'.format(entry_point) - self.assertEqual(process.calc.process_type, expected_process_type) + self.assertEqual(process.node.process_type, expected_process_type) # Verify that load_process_class on the calculation node returns the original entry point class - recovered_process = process.calc.load_process_class() - self.assertEqual(recovered_process, calculation) + recovered_process = process.node.load_process_class() + self.assertEqual(recovered_process, process_class) def test_process_type_without_entry_point(self): """ @@ -218,10 +220,10 @@ def test_process_type_without_entry_point(self): """ process = test_utils.DummyProcess() expected_process_type = '{}.{}'.format(process.__class__.__module__, process.__class__.__name__) - self.assertEqual(process.calc.process_type, expected_process_type) + self.assertEqual(process.node.process_type, expected_process_type) # Verify that load_process_class on the calculation node returns the original entry point class - recovered_process = process.calc.load_process_class() + recovered_process = process.node.load_process_class() self.assertEqual(recovered_process, process.__class__) def test_validation_error(self): @@ -235,4 +237,4 @@ def define(cls, spec): with self.assertRaises(ValueError) as context: TestProc({'a': {'b': Int(5)}}) - self.assertIn("inputs.a.b", str(context.exception)) + self.assertIn('inputs.a.b', str(context.exception)) diff --git a/aiida/backends/tests/work/test_calcfunctions.py b/aiida/backends/tests/work/test_calcfunctions.py index af4dd234e6..a045590595 100644 --- a/aiida/backends/tests/work/test_calcfunctions.py +++ b/aiida/backends/tests/work/test_calcfunctions.py @@ -100,7 +100,7 @@ def test_calcfunction(data): def test_calcfunction_do_not_store_provenance(self): """Run the function without storing the provenance.""" data = Int(1) - result, node = self.test_calcfunction.run_get_node(data, store_provenance=False) # pylint: disable=unexpected-keyword-arg + result, node = self.test_calcfunction.run_get_node(data, metadata={'store_provenance': False}) # pylint: disable=unexpected-keyword-arg self.assertFalse(result.is_stored) self.assertFalse(data.is_stored) self.assertFalse(node.is_stored) diff --git a/aiida/backends/tests/work/test_futures.py b/aiida/backends/tests/work/test_futures.py index 514b45b0f0..9e995459e8 100644 --- a/aiida/backends/tests/work/test_futures.py +++ b/aiida/backends/tests/work/test_futures.py @@ -37,7 +37,7 @@ def test_calculation_future_broadcasts(self): work.run(process) calc_node = runner.run_until_complete(gen.with_timeout(self.TIMEOUT, future)) - self.assertEqual(process.calc.pk, calc_node.pk) + self.assertEqual(process.node.pk, calc_node.pk) def test_calculation_future_polling(self): runner = get_manager().get_runner() @@ -52,4 +52,4 @@ def test_calculation_future_polling(self): runner.run(process) calc_node = runner.run_until_complete(gen.with_timeout(self.TIMEOUT, future)) - self.assertEqual(process.calc.pk, calc_node.pk) + self.assertEqual(process.node.pk, calc_node.pk) diff --git a/aiida/backends/tests/work/test_process_builder.py b/aiida/backends/tests/work/test_process_builder.py index 5a31b8a1fd..bec61bff88 100644 --- a/aiida/backends/tests/work/test_process_builder.py +++ b/aiida/backends/tests/work/test_process_builder.py @@ -7,10 +7,10 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### - from __future__ import division from __future__ import print_function from __future__ import absolute_import + from aiida.backends.testbase import AiidaTestCase from aiida.orm import CalculationFactory from aiida.orm.data.parameter import ParameterData @@ -23,6 +23,7 @@ class TestWorkChain(WorkChain): + @classmethod def define(cls, spec): super(TestWorkChain, cls).define(spec) @@ -37,8 +38,7 @@ class TestProcessBuilder(AiidaTestCase): def setUp(self): super(TestProcessBuilder, self).setUp() self.assertIsNone(Process.current()) - self.calculation_class = CalculationFactory('templatereplacer') - self.process_class = self.calculation_class.process() + self.process_class = CalculationFactory('templatereplacer') self.builder = self.process_class.get_builder() def tearDown(self): @@ -59,11 +59,11 @@ def test_process_builder_set_attributes(self): label = 'Test label' description = 'Test description' - self.builder.label = label - self.builder.description = description + self.builder.metadata.label = label + self.builder.metadata.description = description - self.assertEquals(self.builder.label, label) - self.assertEquals(self.builder.description, description) + self.assertEquals(self.builder.metadata.label, label) + self.assertEquals(self.builder.metadata.description, description) def test_workchain(self): """ @@ -73,7 +73,7 @@ def test_workchain(self): builder.a = Int(2) builder.b = Float(2.3) builder.c.d = Bool(True) - self.assertEquals(builder, {'a': Int(2), 'b': Float(2.3), 'c': {'d': Bool(True)}}) + self.assertEquals(builder, {'a': Int(2), 'b': Float(2.3), 'c': {'d': Bool(True)}, 'metadata': {'options': {}}}) def test_invalid_setattr_raises(self): """ @@ -125,9 +125,14 @@ def test_job_calculation_get_builder_restart(self): original.label = 'original' original.store() + # Have to set the process type manually, because usually this will be done automatically when the node is + # instantiated by the process itself. Since we hack it here and instantiate the node directly ourselves we + # have to set the process type for the restart builder to be able to recreatem the process class. + original.dbnode.process_type = 'aiida.calculations:templatereplacer' + builder = original.get_builder_restart() - self.assertDictEqual(builder.options, original.get_options(only_actually_set=True)) + self.assertDictEqual(builder.metadata.options, original.get_options()) def test_code_get_builder(self): """ diff --git a/aiida/backends/tests/work/test_process_function.py b/aiida/backends/tests/work/test_process_function.py index 2901515644..2c484d5fba 100644 --- a/aiida/backends/tests/work/test_process_function.py +++ b/aiida/backends/tests/work/test_process_function.py @@ -76,7 +76,11 @@ def function_args_and_default(data_a, data_b=Int(DEFAULT_INT)): return {'data_a': data_a, 'data_b': data_b} @workfunction - def function_defaults(data_a=Int(DEFAULT_INT), label=DEFAULT_LABEL, description=DEFAULT_DESCRIPTION): # pylint: disable=unused-argument + def function_defaults( + data_a=Int(DEFAULT_INT), metadata={ + 'label': DEFAULT_LABEL, + 'description': DEFAULT_DESCRIPTION + }): # pylint: disable=unused-argument,dangerous-default-value,missing-docstring return data_a @workfunction @@ -231,36 +235,37 @@ def test_function_args_passing_kwargs(self): def test_function_set_label_description(self): """Verify that the label and description can be set for all process function variants.""" - _, node = self.function_args.run_get_node( - data_a=Int(DEFAULT_INT), label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + metadata = {'label': CUSTOM_LABEL, 'description': CUSTOM_DESCRIPTION} + + _, node = self.function_args.run_get_node(data_a=Int(DEFAULT_INT), metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) - _, node = self.function_args_with_default.run_get_node(label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + _, node = self.function_args_with_default.run_get_node(metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) - _, node = self.function_kwargs.run_get_node(label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + _, node = self.function_kwargs.run_get_node(metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) - _, node = self.function_args_and_kwargs.run_get_node( - data_a=Int(DEFAULT_INT), label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + _, node = self.function_args_and_kwargs.run_get_node(data_a=Int(DEFAULT_INT), metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) - _, node = self.function_args_and_default.run_get_node( - data_a=Int(DEFAULT_INT), label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + _, node = self.function_args_and_default.run_get_node(data_a=Int(DEFAULT_INT), metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) def test_function_defaults(self): """Verify that a process function can define a default label and description but can be overriden.""" + metadata = {'label': CUSTOM_LABEL, 'description': CUSTOM_DESCRIPTION} + _, node = self.function_defaults.run_get_node(data_a=Int(DEFAULT_INT)) self.assertEqual(node.label, DEFAULT_LABEL) self.assertEqual(node.description, DEFAULT_DESCRIPTION) - _, node = self.function_defaults.run_get_node(label=CUSTOM_LABEL, description=CUSTOM_DESCRIPTION) + _, node = self.function_defaults.run_get_node(metadata=metadata) self.assertEqual(node.label, CUSTOM_LABEL) self.assertEqual(node.description, CUSTOM_DESCRIPTION) diff --git a/aiida/backends/tests/work/test_runners.py b/aiida/backends/tests/work/test_runners.py index 23fa489ca9..6607f870ca 100644 --- a/aiida/backends/tests/work/test_runners.py +++ b/aiida/backends/tests/work/test_runners.py @@ -21,7 +21,7 @@ class Proc(work.Process): - _calc_class = WorkflowNode + _node_class = WorkflowNode def run(self): pass @@ -46,11 +46,11 @@ def test_call_on_calculation_finish(self): future = plumpy.Future() def calc_done(pk): - self.assertEqual(pk, proc.calc.pk) + self.assertEqual(pk, proc.node.pk) loop.stop() future.set_result(True) - self.runner.call_on_calculation_finish(proc.calc.pk, calc_done) + self.runner.call_on_calculation_finish(proc.node.pk, calc_done) # Run the calculation self.runner.loop.add_callback(proc.step_until_terminated) diff --git a/aiida/backends/tests/work/work_chain.py b/aiida/backends/tests/work/work_chain.py index 04ba9f78f9..53d610a9f5 100644 --- a/aiida/backends/tests/work/work_chain.py +++ b/aiida/backends/tests/work/work_chain.py @@ -79,7 +79,7 @@ def run_and_check_success(process_class, **kwargs): """ process = process_class(inputs=kwargs) work.run(process) - assert process.calc.is_finished_ok is True + assert process.node.is_finished_ok is True return process @@ -782,9 +782,9 @@ def run_async(): runner.schedule(process) runner.loop.run_sync(lambda: run_async()) - self.assertEquals(process.calc.is_finished_ok, False) - self.assertEquals(process.calc.is_excepted, True) - self.assertEquals(process.calc.is_killed, False) + self.assertEquals(process.node.is_finished_ok, False) + self.assertEquals(process.node.is_excepted, True) + self.assertEquals(process.node.is_killed, False) def test_simple_kill_through_process(self): """ @@ -808,9 +808,9 @@ def run_async(): runner.schedule(process) runner.loop.run_sync(lambda: run_async()) - self.assertEquals(process.calc.is_finished_ok, False) - self.assertEquals(process.calc.is_excepted, False) - self.assertEquals(process.calc.is_killed, True) + self.assertEquals(process.node.is_finished_ok, False) + self.assertEquals(process.node.is_excepted, False) + self.assertEquals(process.node.is_killed, True) class TestWorkChainAbortChildren(AiidaTestCase): @@ -876,9 +876,9 @@ def test_simple_run(self): with self.assertRaises(RuntimeError): work.run(process) - self.assertEquals(process.calc.is_finished_ok, False) - self.assertEquals(process.calc.is_excepted, True) - self.assertEquals(process.calc.is_killed, False) + self.assertEquals(process.node.is_finished_ok, False) + self.assertEquals(process.node.is_excepted, True) + self.assertEquals(process.node.is_killed, False) def test_simple_kill_through_process(self): """ @@ -900,14 +900,14 @@ def run_async(): runner.schedule(process) runner.loop.run_sync(lambda: run_async()) - child = process.calc.get_outgoing(link_type=LinkType.CALL_WORK).first().node + child = process.node.get_outgoing(link_type=LinkType.CALL_WORK).first().node self.assertEquals(child.is_finished_ok, False) self.assertEquals(child.is_excepted, False) self.assertEquals(child.is_killed, True) - self.assertEquals(process.calc.is_finished_ok, False) - self.assertEquals(process.calc.is_excepted, False) - self.assertEquals(process.calc.is_killed, True) + self.assertEquals(process.node.is_finished_ok, False) + self.assertEquals(process.node.is_excepted, False) + self.assertEquals(process.node.is_killed, True) class TestImmutableInputWorkchain(AiidaTestCase): diff --git a/aiida/calculations/plugins/arithmetic/add.py b/aiida/calculations/plugins/arithmetic/add.py index c4eac729dc..ddb600c219 100644 --- a/aiida/calculations/plugins/arithmetic/add.py +++ b/aiida/calculations/plugins/arithmetic/add.py @@ -13,71 +13,27 @@ from __future__ import absolute_import import io +import six from aiida.common.datastructures import CalcInfo, CodeInfo -from aiida.common.exceptions import InputValidationError -from aiida.common.lang import classproperty from aiida.orm.data.float import Float from aiida.orm.data.int import Int -from aiida.orm.node.process import CalcJobNode +from aiida.work.calcjob import CalcJob -class ArithmeticAddCalculation(CalcJobNode): - """Implementation of CalcJobNode to add two numbers for testing and demonstration purposes.""" +class ArithmeticAddCalculation(CalcJob): + """Implementation of CalcJob to add two numbers for testing and demonstration purposes.""" - # pylint: disable=invalid-name,abstract-method + @classmethod + def define(cls, spec): + super(ArithmeticAddCalculation, cls).define(spec) + spec.input('metadata.options.input_filename', valid_type=six.string_types, default='aiida.in', non_db=True) + spec.input('metadata.options.output_filename', valid_type=six.string_types, default='aiida.out', non_db=True) + spec.input('metadata.options.parser_name', valid_type=six.string_types, default='arithmetic.add', non_db=True) + spec.input('x', valid_type=(Int, Float), help='The left operand.') + spec.input('y', valid_type=(Int, Float), help='The right operand.') - def _init_internal_params(self): - super(ArithmeticAddCalculation, self)._init_internal_params() - - self._PREFIX = 'aiida' # pylint: disable=invalid-name - self._INPUT_FILE_NAME = 'aiida.in' - self._OUTPUT_FILE_NAME = 'aiida.out' - - self._default_parser = 'arithmetic.add' - self._required_inputs = ['code', 'x', 'y'] - self._optional_inputs = [] - - self._DEFAULT_INPUT_FILE = self._INPUT_FILE_NAME - self._DEFAULT_OUTPUT_FILE = self._OUTPUT_FILE_NAME - - @classproperty - def _use_methods(cls): - """ - Define and return the available use_methods - """ - # pylint: disable=no-self-argument,no-member - methods = CalcJobNode._use_methods - methods.update({ - 'x': { - 'valid_types': (Int, Float), - 'additional_parameter': None, - 'linkname': 'x', - 'docstring': ('The left operand'), - }, - 'y': { - 'valid_types': (Int, Float), - 'additional_parameter': None, - 'linkname': 'y', - 'docstring': ('The right operand'), - }, - }) - return methods - - def _get_input_valid_types(self, key): - """Return the valid input types for the given key.""" - return self._use_methods[key]['valid_types'] # pylint: disable=unsubscriptable-object - - def _get_input_valid_type(self, key): - """Return the valid input type for the given key.""" - valid_types = self._get_input_valid_types(key) - - if isinstance(valid_types, tuple): - return valid_types[0] - - return valid_types - - def _prepare_for_submission(self, tempfolder, inputdict): + def prepare_for_submission(self, folder): """ This method is called prior to job submission with a set of calculation input nodes. The inputs will be validated and sanitized, after which the necessary input files will @@ -85,28 +41,26 @@ def _prepare_for_submission(self, tempfolder, inputdict): lists of files that need to be copied to the remote machine before job submission, as well as file lists that are to be retrieved after job completion. - :param tempfolder: an aiida.common.folders.Folder to temporarily write files on disk - :param inputdict: a dictionary with the raw input nodes + :param folder: an aiida.common.folders.Folder to temporarily write files on disk :returns: CalcInfo instance """ - input_nodes = self.validate_input_nodes(inputdict) - input_x = input_nodes[self.get_linkname('x')] - input_y = input_nodes[self.get_linkname('y')] - input_code = input_nodes[self.get_linkname('code')] + input_x = self.inputs.x + input_y = self.inputs.y + input_code = self.inputs.code - self.write_input_files(tempfolder, input_x, input_y) + self.write_input_files(folder, input_x, input_y) retrieve_list = self.get_retrieve_list() local_copy_list = self.get_local_copy_list() remote_copy_list = self.get_remote_copy_list() codeinfo = CodeInfo() - codeinfo.cmdline_params = ['-in', self._INPUT_FILE_NAME] - codeinfo.stdout_name = self._OUTPUT_FILE_NAME + codeinfo.cmdline_params = ['-in', self.options.input_filename] + codeinfo.stdout_name = self.options.output_filename codeinfo.code_uuid = input_code.uuid calcinfo = CalcInfo() - calcinfo.uuid = self.uuid + calcinfo.uuid = str(self.node.uuid) calcinfo.codes_info = [codeinfo] calcinfo.retrieve_list = retrieve_list calcinfo.local_copy_list = local_copy_list @@ -121,12 +75,7 @@ def get_retrieve_list(self): :returns: list of resource retrieval instructions """ - retrieve_list = [] - - # Only the output file needs to be retrieved - retrieve_list.append(self._OUTPUT_FILE_NAME) - - return retrieve_list + return [self.options.output_filename] @staticmethod def get_local_copy_list(): @@ -147,53 +96,16 @@ def get_remote_copy_list(): """ return [] - def validate_input_nodes(self, input_nodes_raw): - """ - This function will validate that all required input nodes are present and that their content is valid - - :param input_nodes_raw: a dictionary with the raw input nodes - :returns: dictionary with validated and sanitized input nodes - """ - input_nodes = {} - - # Verify that all required inputs are provided in the raw input dictionary - for input_key in self._required_inputs: - try: - input_link = self.get_linkname(input_key) - input_node = input_nodes_raw.pop(input_key) - except KeyError: - raise InputValidationError("required input '{}' was not specified".format(input_key)) - - input_nodes[input_link] = input_node - - # Check for optional inputs in the raw input dictionary, creating an instance of its valid types otherwise - for input_key in self._optional_inputs: - try: - input_link = self.get_linkname(input_key) - input_node = input_nodes_raw.pop(input_key) - except KeyError: - valid_type_class = self._get_input_valid_type(input_key) - input_node = valid_type_class() - - input_nodes[input_link] = input_node - - # Any remaining input nodes are not recognized raise an input validation exception - if input_nodes_raw: - raise InputValidationError('the following input nodes were not recognized: {}'.format( - input_nodes_raw.keys())) - - return input_nodes - - def write_input_files(self, tempfolder, input_x, input_y): + def write_input_files(self, folder, input_x, input_y): """ Take the input_parameters dictionary with the namelists and their flags and write the input file to disk in the temporary folder - :param tempfolder: an aiida.common.folders.Folder to temporarily write files on disk + :param folder: an aiida.common.folders.Folder to temporarily write files on disk :param input_x: the numeric node representing the left operand of the summation :param input_y: the numeric node representing the right operand of the summation """ - filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) + filename = folder.get_abs_path(self.options.input_filename) with io.open(filename, 'w', encoding='utf8') as handle: handle.write(u'{} {}\n'.format(input_x.value, input_y.value)) diff --git a/aiida/calculations/plugins/templatereplacer.py b/aiida/calculations/plugins/templatereplacer.py index 121006e753..313d86343f 100644 --- a/aiida/calculations/plugins/templatereplacer.py +++ b/aiida/calculations/plugins/templatereplacer.py @@ -12,14 +12,17 @@ from __future__ import print_function from __future__ import absolute_import -from aiida.common.exceptions import InputValidationError +import six + +from aiida.common import exceptions from aiida.common.datastructures import CalcInfo, CodeInfo -from aiida.common.lang import classproperty -from aiida.orm.node.process import CalcJobNode from aiida.orm.data.parameter import ParameterData +from aiida.orm.data.remote import RemoteData +from aiida.orm.data.singlefile import SinglefileData +from aiida.work.calcjob import CalcJob -class TemplatereplacerCalculation(CalcJobNode): +class TemplatereplacerCalculation(CalcJob): """ Simple stub of a plugin that can be used to replace some text in a given template. Can be used for many different codes, or as a starting point to develop a new plugin. @@ -62,53 +65,36 @@ class TemplatereplacerCalculation(CalcJobNode): """ - # pylint: disable=abstract-method - - @classproperty - def _use_methods(cls): - # pylint: disable=no-self-argument,no-member - retdict = CalcJobNode._use_methods - retdict.update({ - 'template': { - 'valid_types': ParameterData, - 'additional_parameter': None, - 'linkname': 'template', - 'docstring': 'A template for the input file', - }, - 'parameters': { - 'valid_types': ParameterData, - 'additional_parameter': None, - 'linkname': 'parameters', - 'docstring': 'Parameters used to replace placeholders in the template', - }, - }) - return retdict - - def _prepare_for_submission(self, tempfolder, inputdict): + @classmethod + def define(cls, spec): + # yapf: disable + super(TemplatereplacerCalculation, cls).define(spec) + spec.input('metadata.options.parser_name', valid_type=six.string_types, default='templatereplacer.doubler', + non_db=True) + spec.input('template', valid_type=ParameterData, + help='A template for the input file.') + spec.input('parameters', valid_type=ParameterData, required=False, + help='Parameters used to replace placeholders in the template.') + spec.input_namespace('files', valid_type=(RemoteData, SinglefileData), required=False) + + def prepare_for_submission(self, folder): """ - This is the routine to be called when you want to create - the input files and related stuff with a plugin. + This is the routine to be called when you want to create the input files and related stuff with a plugin. - :param tempfolder: a aiida.common.folders.Folder subclass where - the plugin should put all its files. - :param inputdict: a dictionary with the input nodes e.g. {label1: node1, ...} (with the Code!) + :param folder: a aiida.common.folders.Folder subclass where the plugin should put all its files. """ # pylint: disable=too-many-locals,too-many-statements,too-many-branches from six.moves import StringIO - - from aiida.orm.data.singlefile import SinglefileData - from aiida.orm.data.remote import RemoteData from aiida.common.utils import validate_list_of_string_tuples from aiida.common.exceptions import ValidationError - parameters_node = inputdict.pop('parameters', None) - if parameters_node is None: - parameters = {} - else: - parameters = parameters_node.get_dict() + code = self.inputs.code + template = self.inputs.template.get_dict() - template_node = inputdict.pop('template', None) - template = template_node.get_dict() + try: + parameters = self.inputs.parameters.get_dict() + except AttributeError: + parameters = {} input_file_template = template.pop('input_file_template', '') input_file_name = template.pop('input_file_name', None) @@ -119,50 +105,44 @@ def _prepare_for_submission(self, tempfolder, inputdict): retrieve_temporary_files = template.pop('retrieve_temporary_files', []) if template: - raise InputValidationError('The following keys could not be used in the template node: {}'.format( - template.keys())) + raise exceptions.InputValidationError( + 'The following keys could not be used in the template node: {}'.format(template.keys())) try: validate_list_of_string_tuples(files_to_copy, tuple_length=2) except ValidationError as exc: - raise InputValidationError("invalid file_to_copy format: {}".format(exc)) + raise exceptions.InputValidationError("invalid file_to_copy format: {}".format(exc)) local_copy_list = [] remote_copy_list = [] for link_name, dest_rel_path in files_to_copy: try: - fileobj = inputdict.pop(link_name) - except KeyError: - raise InputValidationError("You are asking to copy a file link {}, " - "but there is no input link with such a name".format(link_name)) + fileobj = self.inputs.files[link_name] + except AttributeError: + raise exceptions.InputValidationError("You are asking to copy a file link {}, " + "but there is no input link with such a name".format(link_name)) if isinstance(fileobj, SinglefileData): local_copy_list.append((fileobj.get_file_abs_path(), dest_rel_path)) elif isinstance(fileobj, RemoteData): # can be a folder remote_copy_list.append((fileobj.get_computer().uuid, fileobj.get_remote_path(), dest_rel_path)) else: - raise InputValidationError( + raise exceptions.InputValidationError( "If you ask to copy a file link {}, " "it must be either a SinglefileData or a RemoteData; it is instead of type {}".format( link_name, fileobj.__class__.__name__)) - code = inputdict.pop('code', None) - if code is None: - raise InputValidationError("No code in input") - - if inputdict: - raise InputValidationError("The input nodes with the following labels could not be " - "used by the templatereplacer plugin: {}".format(inputdict.keys())) - if input_file_name is not None and not input_file_template: - raise InputValidationError("If you give an input_file_name, you must also specify a input_file_template") + raise exceptions.InputValidationError( + "If you give an input_file_name, you must also specify a input_file_template") if input_through_stdin and input_file_name is None: - raise InputValidationError("If you ask for input_through_stdin you have to specify a input_file_name") + raise exceptions.InputValidationError( + "If you ask for input_through_stdin you have to specify a input_file_name") input_file = StringIO(input_file_template.format(**parameters)) if input_file_name: - tempfolder.create_file_from_filelike(input_file, input_file_name) + folder.create_file_from_filelike(input_file, input_file_name) else: if input_file_template: self.logger.warning("No input file name passed, but a input file template is present") @@ -173,7 +153,7 @@ def _prepare_for_submission(self, tempfolder, inputdict): calcinfo.retrieve_list = [] calcinfo.retrieve_temporary_list = [] - calcinfo.uuid = self.uuid + calcinfo.uuid = self.node.uuid calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list diff --git a/aiida/cmdline/commands/cmd_calculation.py b/aiida/cmdline/commands/cmd_calculation.py index 8cbee9ae96..6ccbcf29e7 100644 --- a/aiida/cmdline/commands/cmd_calculation.py +++ b/aiida/cmdline/commands/cmd_calculation.py @@ -49,7 +49,7 @@ def calculation_gotocomputer(ctx, calculation): @verdi_calculation.command('list') @arguments.CALCULATIONS() -@options.CALCULATION_STATE() +@options.CALC_JOB_STATE() @options.PROCESS_STATE() @options.EXIT_STATUS() @options.FAILED() @@ -71,7 +71,7 @@ def calculation_gotocomputer(ctx, calculation): @click.pass_context @decorators.with_dbenv() @decorators.deprecated_command("This command is deprecated. Use 'verdi process list' instead.") -def calculation_list(ctx, calculations, past_days, groups, all_entries, calculation_state, process_state, exit_status, +def calculation_list(ctx, calculations, past_days, groups, all_entries, calc_job_state, process_state, exit_status, failed, limit, order_by, project, all_users, raw, absolute_time): """Return a list of job calculations that are still running.""" # pylint: disable=unused-argument diff --git a/aiida/cmdline/params/options/__init__.py b/aiida/cmdline/params/options/__init__.py index f62dae0e5f..eb1d07fd0d 100644 --- a/aiida/cmdline/params/options/__init__.py +++ b/aiida/cmdline/params/options/__init__.py @@ -28,10 +28,10 @@ def valid_process_states(): return tuple(state.value for state in ProcessState) -def valid_calculation_states(): +def valid_calc_job_states(): """Return a list of valid values for the CalcState enum.""" - from aiida.common.datastructures import calc_states - return tuple(state for state in calc_states) + from aiida.common.datastructures import CalcJobState + return tuple(state.value for state in CalcJobState) def active_process_states(): @@ -229,10 +229,10 @@ def active_process_states(): type=types.PluginParamType(group='calculations'), help='Calculation input plugin string.') -CALCULATION_STATE = OverridableOption( - '-s', '--calculation-state', 'calculation_state', - type=types.LazyChoice(valid_calculation_states), cls=MultipleValueOption, - help='Only include entries with this calculation state.') +CALC_JOB_STATE = OverridableOption( + '-s', '--calc-job-state', 'calc_job_state', + type=types.LazyChoice(valid_calc_job_states), cls=MultipleValueOption, + help='Only include entries with this calculation job state.') PROCESS_STATE = OverridableOption( '-S', '--process-state', 'process_state', diff --git a/aiida/cmdline/utils/common.py b/aiida/cmdline/utils/common.py index e17075dfde..93755068ad 100644 --- a/aiida/cmdline/utils/common.py +++ b/aiida/cmdline/utils/common.py @@ -85,6 +85,7 @@ def get_node_summary(node): :return: a string summary of the node """ from plumpy import ProcessState + from aiida.orm.data.code import Code from aiida.orm.node.process import ProcessNode table_headers = ['Property', 'Value'] @@ -122,12 +123,11 @@ def get_node_summary(node): table.append(['computer', '[{}] {}'.format(node.get_computer().pk, node.get_computer().name)]) try: - code = node.get_code() - except AttributeError: + code = node.get_incoming(node_class=Code).first() + except ValueError: pass else: - if code is not None: - table.append(['code', code.label]) + table.append(['code', code.label]) return tabulate(table, headers=table_headers) @@ -211,7 +211,7 @@ def get_calcjob_report(calcjob): :return: a string representation of the log messages and scheduler output """ from aiida import orm - from aiida.common.datastructures import calc_states + from aiida.common.datastructures import CalcJobState log_messages = orm.Log.objects.get_logs_for(calcjob) scheduler_out = calcjob.get_scheduler_output() @@ -221,7 +221,7 @@ def get_calcjob_report(calcjob): report = [] - if calcjob_state == calc_states.WITHSCHEDULER: + if calcjob_state == CalcJobState.WITHSCHEDULER: state_string = '{}, scheduler state: {}'.format(calcjob_state, scheduler_state if scheduler_state else '(unknown)') else: diff --git a/aiida/common/datastructures.py b/aiida/common/datastructures.py index a5cc7fae20..116b7ee040 100644 --- a/aiida/common/datastructures.py +++ b/aiida/common/datastructures.py @@ -7,105 +7,24 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -""" -This module defines the main data structures used by Calculations. -""" +"""Module to define commonly used data structures.""" from __future__ import division from __future__ import print_function from __future__ import absolute_import -from aiida.common.extendeddicts import DefaultFieldsAttributeDict, Enumerate +from enum import Enum, IntEnum +from aiida.common.extendeddicts import DefaultFieldsAttributeDict -class CalcState(Enumerate): - pass +class CalcJobState(Enum): + """The sub state of a CalcJobNode while its Process is in an active state (i.e. Running or Waiting).""" -_sorted_datastates = ( - 'NEW', # just created - 'TOSUBMIT', # used by the executionmanager to submit new calculations scheduled to be submitted - 'SUBMITTING', # being submitted to cluster - 'WITHSCHEDULER', # on the scheduler (on any unfinished status: QUEUED, QUEUED_HELD, SUSPENDED, RUNNING) - 'COMPUTED', # calculation finished on scheduler, not yet retrieved (both DONE and FAILED) - 'RETRIEVING', # while retrieving data - 'PARSING', # while parsing data - 'FINISHED', # final state of the calculation: data retrieved and eventually parsed - 'SUBMISSIONFAILED', # error occurred during submission phase - 'RETRIEVALFAILED', # error occurred during retrieval phase - 'PARSINGFAILED', # error occurred during parsing phase due to a problem in the parse - 'FAILED', # the parser recognized the calculation as failed - 'IMPORTED', # the calculation was imported from another DB -) - -# The order of states is not random: is the order of precedence. -# This is used to verify that calculations always procede in the correct order. -# calc_states, instead, has a random order -calc_states = CalcState(_sorted_datastates) - - -def sort_states(list_states, use_key=False): - """ - Given a list of state names, return a sorted list of states (the first - is the most recent) sorted according to their logical appearance in - the DB (i.e., NEW before of SUBMITTING before of FINISHED). - - .. note:: The order of the internal variable _sorted_datastates is - used. - - :param list_states: a list (or tuple) of state strings. - - :param use_key: if True, expects that each element is not - just a string, but a pair (someobject, string). - Only string is used to sort (should be the state string), - and only someobject is returned in the final list. - - :return: a sorted list of the given data states. - - :raise ValueError: if any of the given states is not a valid state. - """ - datastates_order_dict = {state: idx for idx, state in enumerate( - _sorted_datastates)} - - try: - if use_key: - list_to_sort = [(datastates_order_dict[st[1]], st[0]) - for st in list_states] - else: - list_to_sort = [(datastates_order_dict[st], st) - for st in list_states] - - except KeyError as e: - raise ValueError("At least one of the provided states is not " - "valid ({})".format(e.args[0])) - - # In-place sort - list_to_sort.sort() - - return [_[1] for _ in list_to_sort[::-1]] - - -def is_progressive_state_change(state_old, state_new): - """ - Return whether a state change from `state_old` to `state_new` would be progressive, i.e. moving forward in time - - :param state_old: the old calculation state - :param state_new: the new calculation state - :return: True if the change from `state_old` to `state_new` is progressive, False otherwise - :raise: ValueError if either `state_old` or `state_new` is not a valid calculation state - """ - states = list(_sorted_datastates) - - try: - index_old = states.index(state_old) - except ValueError: - raise ValueError('state_old {} is not a valid calculation state'.format(state_old)) - - try: - index_new = states.index(state_new) - except ValueError: - raise ValueError('state_new {} is not a valid calculation state'.format(state_new)) - - return index_new > index_old + UPLOADING = 'uploading' + SUBMITTING = 'submitting' + WITHSCHEDULER = 'withscheduler' + RETRIEVING = 'retrieving' + PARSING = 'parsing' class CalcInfo(DefaultFieldsAttributeDict): @@ -175,21 +94,6 @@ class CalcInfo(DefaultFieldsAttributeDict): ) -class CodeRunmode(Enumerate): - pass - - -# these are the possible ways to execute more than one code in the same scheduling job -# if parallel, the codes will be executed as something like: -# code1.x & -# code2.x & -# wait -# if serial, it will be: -# code1.x -# code2.x -code_run_modes = CodeRunmode(('PARALLEL', 'SERIAL')) - - class CodeInfo(DefaultFieldsAttributeDict): """ This attribute-dictionary contains the information needed to execute a code. @@ -234,14 +138,33 @@ class CodeInfo(DefaultFieldsAttributeDict): on the remote computer) * ``code_uuid``: the uuid of the code associated to the CodeInfo """ - _default_fields = ('cmdline_params', # as a list of strings - 'stdin_name', - 'stdout_name', - 'stderr_name', - 'join_files', - 'withmpi', - 'code_uuid' - ) + _default_fields = ( + 'cmdline_params', # as a list of strings + 'stdin_name', + 'stdout_name', + 'stderr_name', + 'join_files', + 'withmpi', + 'code_uuid' + ) + + +class CodeRunMode(IntEnum): + """Enum to indicate the way the codes of a calculation should be run. + + For PARALLEL, the codes for a given calculation will be run in parallel by running them in the background:: + + code1.x & + code2.x & + + For the SERIAL option, codes will be executed sequentially by running for example the following:: + + code1.x + code2.x + """ + + SERIAL = 0 + PARALLEL = 1 class LazyStore(object): diff --git a/aiida/common/extendeddicts.py b/aiida/common/extendeddicts.py index d12557aaac..a349d6dbb2 100644 --- a/aiida/common/extendeddicts.py +++ b/aiida/common/extendeddicts.py @@ -12,24 +12,7 @@ from __future__ import print_function from __future__ import absolute_import -import six - -from aiida.common.exceptions import ValidationError - - -class Enumerate(frozenset): - """Custom implementation of enum.Enum.""" - - def __getattr__(self, name): - if name in self: - return six.text_type(name) # always return unicode in Python 2 - raise AttributeError("No attribute '{}' in Enumerate '{}'".format(name, self.__class__.__name__)) - - def __setattr__(self, name, value): - raise AttributeError("Cannot set attribute in Enumerate '{}'".format(self.__class__.__name__)) - - def __delattr__(self, name): - raise AttributeError("Cannot delete attribute in Enumerate '{}'".format(self.__class__.__name__)) +from aiida.common import exceptions class AttributeDict(dict): @@ -233,7 +216,7 @@ def validate(self): try: validator(self[key]) except Exception as exc: - raise ValidationError("Invalid value for key '{}' [{}]: {}".format( + raise exceptions.ValidationError("Invalid value for key '{}' [{}]: {}".format( key, exc.__class__.__name__, exc)) def __setattr__(self, attr, value): diff --git a/aiida/daemon/execmanager.py b/aiida/daemon/execmanager.py index 9464b4b7bf..b2d2e9926b 100644 --- a/aiida/daemon/execmanager.py +++ b/aiida/daemon/execmanager.py @@ -22,13 +22,13 @@ from aiida.common import AIIDA_LOGGER from aiida.common import exceptions -from aiida.common.datastructures import calc_states +from aiida.common.datastructures import CalcJobState from aiida.common.folders import SandboxFolder from aiida.common.links import LinkType from aiida.common.log import get_dblogger_extra from aiida.orm import DataFactory from aiida.orm.data.folder import FolderData -from aiida.scheduler.datastructures import JOB_STATES +from aiida.scheduler.datastructures import JobState REMOTE_WORK_DIRECTORY_LOST_FOUND = 'lost+found' @@ -261,7 +261,7 @@ def retrieve_calculation(calculation, transport, retrieved_temporary_folder): # Create the FolderData node to attach everything to retrieved_files = FolderData() - retrieved_files.add_incoming(calculation, link_type=LinkType.CREATE, link_label=calculation._get_linkname_retrieved()) + retrieved_files.add_incoming(calculation, link_type=LinkType.CREATE, link_label=calculation.link_label_retrieved) with transport: transport.chdir(workdir) @@ -321,7 +321,7 @@ def kill_calculation(calculation, transport): job = running_jobs.get(job_id, None) # If the job is returned it is still running and the kill really failed, so we raise - if job is not None and job.job_state != JOB_STATES.DONE: + if job is not None and job.job_state != JobState.DONE: raise exceptions.RemoteOperationError('scheduler.kill({}) was unsuccessful'.format(job_id)) else: execlogger.warning('scheduler.kill() failed but job<{%s}> no longer seems to be running regardless', job_id) @@ -335,10 +335,10 @@ def parse_results(job, retrieved_temporary_folder=None): :returns: integer exit code, where 0 indicates success and non-zero failure """ - from aiida.orm.node.process.calculation.calcjob import CalcJobExitStatus from aiida.work import ExitCode - assert job.get_state() == calc_states.PARSING, 'the job should be in the PARSING state when calling this function' + assert job.get_state() == CalcJobState.PARSING, \ + 'the job should be in the PARSING state when calling this function yet it is {}'.format(job.get_state()) Parser = job.get_parserclass() exit_code = ExitCode() @@ -366,12 +366,12 @@ def parse_results(job, retrieved_temporary_folder=None): # Some implementations of parse_from_calc may still return a plain boolean or integer for the exit_code. # In the case of a boolean: True should be mapped to the default ExitCode which corresponds to an exit - # status of 0. False values are mapped to the value that is mapped onto the FAILED calculation state - # throught the CalcJobExitStatus. Plain integers are directly used to construct an ExitCode tuple + # status of 0. False values are mapped to the value that is mapped onto the default `CalcJob` exit code + # `ERROR_FAILED`. if isinstance(exit_code, bool) and exit_code is True: exit_code = ExitCode(0) elif isinstance(exit_code, bool) and exit_code is False: - exit_code = ExitCode(CalcJobExitStatus[calc_states.FAILED].value) + exit_code = ExitCode(job.process().exit_codes.ERROR_FAILED) elif isinstance(exit_code, int): exit_code = ExitCode(exit_code) elif isinstance(exit_code, ExitCode): @@ -384,16 +384,6 @@ def parse_results(job, retrieved_temporary_folder=None): n.add_incoming(job, link_type=LinkType.CREATE, link_label=label) n.store() - try: - if exit_code.status == 0: - job._set_state(calc_states.FINISHED) - else: - job._set_state(calc_states.FAILED) - except exceptions.ModificationNotAllowed: - # I should have been the only one to set it, but - # in order to avoid useless error messages, I just ignore - pass - if exit_code.status is not 0: execlogger.error("[parsing of calc {}] " "The parser returned an error, but it should have " diff --git a/aiida/manage/caching.py b/aiida/manage/caching.py index 6d3ef2eaa0..af762dceea 100644 --- a/aiida/manage/caching.py +++ b/aiida/manage/caching.py @@ -15,6 +15,7 @@ import io import os import copy +from enum import Enum from functools import wraps from contextlib import contextmanager @@ -23,17 +24,23 @@ from aiida.backends.utils import get_current_profile from aiida.common import exceptions -from aiida.common.extendeddicts import Enumerate from aiida.common.utils import get_object_from_string __all__ = ['get_use_cache', 'enable_caching', 'disable_caching'] -CONFIG_KEYS = Enumerate(('default', 'enabled', 'disabled')) + +class ConfigKeys(Enum): + """Valid keys for caching configuration.""" + + DEFAULT = 'default' + ENABLED = 'enabled' + DISABLED = 'disabled' + DEFAULT_CONFIG = { - CONFIG_KEYS.default: False, - CONFIG_KEYS.enabled: [], - CONFIG_KEYS.disabled: [], + ConfigKeys.DEFAULT.value: False, + ConfigKeys.ENABLED.value: [], + ConfigKeys.DISABLED.value: [], } @@ -61,7 +68,7 @@ def _get_config(config_file): # load classes try: - for key in [CONFIG_KEYS.enabled, CONFIG_KEYS.disabled]: + for key in [ConfigKeys.ENABLED.value, ConfigKeys.DISABLED.value]: config[key] = [get_object_from_string(c) for c in config[key]] except (ValueError) as err: six.raise_from( @@ -109,15 +116,15 @@ def get_use_cache(node_class=None): :raises ValueError: if the configuration is invalid by defining the class both enabled and disabled """ if node_class is not None: - enabled = node_class in _CONFIG[CONFIG_KEYS.enabled] - disabled = node_class in _CONFIG[CONFIG_KEYS.disabled] + enabled = node_class in _CONFIG[ConfigKeys.ENABLED.value] + disabled = node_class in _CONFIG[ConfigKeys.DISABLED.value] if enabled and disabled: raise ValueError('Invalid configuration: Caching for {} is both enabled and disabled.'.format(node_class)) elif enabled: return True elif disabled: return False - return _CONFIG[CONFIG_KEYS.default] + return _CONFIG[ConfigKeys.DEFAULT.value] @contextmanager @@ -142,11 +149,11 @@ def enable_caching(node_class=None): """ with _reset_config(): if node_class is None: - _CONFIG[CONFIG_KEYS.default] = True + _CONFIG[ConfigKeys.DEFAULT.value] = True else: - _CONFIG[CONFIG_KEYS.enabled].append(node_class) + _CONFIG[ConfigKeys.ENABLED.value].append(node_class) try: - _CONFIG[CONFIG_KEYS.disabled].remove(node_class) + _CONFIG[ConfigKeys.DISABLED.value].remove(node_class) except ValueError: pass yield @@ -162,11 +169,11 @@ def disable_caching(node_class=None): """ with _reset_config(): if node_class is None: - _CONFIG[CONFIG_KEYS.default] = False + _CONFIG[ConfigKeys.DEFAULT.value] = False else: - _CONFIG[CONFIG_KEYS.disabled].append(node_class) + _CONFIG[ConfigKeys.DISABLED.value].append(node_class) try: - _CONFIG[CONFIG_KEYS.enabled].remove(node_class) + _CONFIG[ConfigKeys.ENABLED.value].remove(node_class) except ValueError: pass yield diff --git a/aiida/orm/__init__.py b/aiida/orm/__init__.py index 7d1710aeff..f829702172 100644 --- a/aiida/orm/__init__.py +++ b/aiida/orm/__init__.py @@ -18,7 +18,6 @@ from .data.code import Code from .authinfos import * -from .calculation import * from .computers import * from .entities import * from .groups import * @@ -40,7 +39,6 @@ __all__ = (_local + authinfos.__all__ + - calculation.__all__ + computers.__all__ + entities.__all__ + groups.__all__ + diff --git a/aiida/orm/calculation/__init__.py b/aiida/orm/calculation/__init__.py deleted file mode 100644 index ccacf7ce97..0000000000 --- a/aiida/orm/calculation/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -from __future__ import division -from __future__ import print_function -from __future__ import absolute_import - -from .job import JobCalculation - -__all__ = ('JobCalculation',) diff --git a/aiida/orm/calculation/inline.py b/aiida/orm/calculation/inline.py deleted file mode 100644 index 5afd6b25b5..0000000000 --- a/aiida/orm/calculation/inline.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### - -from __future__ import division -from __future__ import print_function -from __future__ import absolute_import -import functools - -from aiida.orm.node.process import CalcFunctionNode - -__all__ = 'make_inline', 'optional_inline' - - -def make_inline(func): - """ - This make_inline wrapper/decorator takes a function with specific - requirements, runs it and stores the result as an CalcFunctionNode node. - It will also store all other nodes, including any possibly unstored - input node! The return value of the wrapped calculation will also be - slightly changed, see below. - - The wrapper: - - * checks that the function name ends with the string ``'_inline'`` - * checks that each input parameter is a valid Data node - (can be stored or unstored) - * runs the actual function - * gets the result values - * checks that the result value is a dictionary, where the - key are all strings and the values are all **unstored** - data nodes - * creates an CalcFunctionNode node, links all the kwargs - as inputs and the returned nodes as outputs, using the - keys as link labels - * stores all the nodes (including, possibly, unstored input - nodes given as kwargs) - * returns a length-two tuple, where the first element is - the CalcFunctionNode node, and the second is the dictionary - returned by the wrapped function - - To use this function, you can use it as a decorator of a - wrapped function:: - - @make_inline - def copy_inline(source): - return {copy: source.copy()} - - In this way, every time you call copy_inline, the wrapped version - is actually called, and the return value will be a tuple with - the CalcFunctionNode instance, and the returned dictionary. - For instance, if ``s`` is a valid ``Data`` node, with the following - lines:: - - c, s_copy_dict = copy_inline(source=s) - s_copy = s_copy_dict['copy'] - - ``c`` will contain the new ``CalcFunctionNode`` instance, ``s_copy`` the - (stored) copy of ``s`` (with the side effect that, if ``s`` was not stored, - after the function call it will be automatically stored). - - :note: If you use a wrapper, make sure to write explicitly in the docstrings - that the function is going to store the nodes. - - The second possibility, if you want that by default the function does not - store anything, but can be wrapped when it is necessary, is the following. - You simply define the function you want to wrap (``copy_inline`` in the - example above) without decorator:: - - def copy_inline(source): - return {copy: source.copy()} - - This is a normal function, so to call it you will normally do:: - - s_copy_dict = copy_inline(s) - - while if you want to wrap it, so that an ``CalcFunctionNode`` is created, and - everything is stored, you will run:: - - c, s_copy_dict = make_inline(f)(s=s) - - Note that, with the wrapper, all the parameters to ``f()`` have to be - passed as keyworded arguments. Moreover, the return value is different, - i.e. ``(c, s_copy_dict)`` instead of simply ``s_copy_dict``. - - .. note:: EXTREMELY IMPORTANT! The wrapped function MUST have - the following requirements in order to be reproducible. - These requirements cannot be enforced, but must be - followed when writing the wrapped function. - - * The function MUST NOT USE information that is not - passed in the kwargs. In particular, it cannot read - files from the hard-drive (that will not be present - in another user's computer), it cannot connect - to external databases and retrieve the current - entries in that database (that could change over - time), etc. - * The only exception to the above rule is the access - to the AiiDA database for the *parents* of the input - nodes. That is, you can take the input nodes passed - as kwargs, and use also the data given in their inputs, - the inputs of their inputs, ... but you CANNOT use - any output of any of the above-mentioned nodes (that - could change over time). - * The function MUST NOT have side effects (creating files on the disk, - adding entries to an external database, ...). - - .. note:: The function will also store: - - * in the attributes: the function name, function namespace and the starting - line number of the function in the source file - * in the repository: the full source file if it is possible to retrieve it - otherwise it will be set to None, e.g. if the function was defined - in the interactive shell). - - For this reason, try to keep, if possible, all the code to be run - within the same file, so that it is possible to keep the provenance - of the functions that were run (if you instead call a function in a - different file, you will never know in the future what that function - did). - If you call external modules and you matter about provenance, if would - be good to also return in a suitable dictionary the version of these - modules (e.g., after importing a module XXX, you can check if the - module defines a variable XXX.__version__ or XXX.VERSION or something - similar, and store it in an output node). - - :note: All nodes will be stored, including unstored input nodes!! - - :param kwargs: all kwargs are passed to the wrapped function - :return: a length-two tuple, where the first element is - the CalcFunctionNode node, and the second is the dictionary - returned by the wrapped function. All nodes are stored. - :raise TypeError: if the return value is not a dictionary, the - keys are not strings, or the values - are not data nodes. Raise also if the input values are not data nodes. - :raise ModificationNotAllowed: if the returned Data nodes are already - stored. - :raise Exception: All other exceptions from the wrapped function - are not catched. - - .. deprecated:: 1.0.0 - Use the ``@calcfunction`` decorator instead. - """ - import warnings - # If we call this DeprecationWarning, pycharm will properly strike out the function - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('this function has been deprecated, use `aiida.work.calcfunction` instead', DeprecationWarning) - - from aiida.work import workfunction - - # Note: if you pass a lambda function, the name will be ; moreover - # if you define a function f, and then do "h=f", h.__name__ will - # still return 'f'! - function_name = func.__name__ - if not function_name.endswith('_inline'): - raise ValueError( - "The function name that is wrapped must end " - "with '_inline', while its name is '{}'".format(function_name)) - - wf = workfunction(func, CalcFunctionNode) - - @functools.wraps(func) - def swap_result(*args, **kwargs): - result = wf.run_get_node(*args, **kwargs) - return result[1], result[0] - - return swap_result - - -def optional_inline(func): - """ - optional_inline wrapper/decorator takes a function, which can be called - either as wrapped in CalcFunctionNode or a simple function, depending - on 'store' keyworded argument (True stands for CalcFunctionNode, False - for simple function). The wrapped function has to adhere to the - requirements by make_inline wrapper/decorator. - - Usage example:: - - @optional_inline - def copy_inline(source=None): - return {'copy': source.copy()} - - Function ``copy_inline`` will be wrapped in CalcFunctionNode when - invoked in following way:: - - copy_inline(source=node,store=True) - - while it will be called as a simple function when invoked:: - - copy_inline(source=node) - - In any way the ``copy_inline`` will return the same results. - - .. deprecated:: 1.0.0 - Use the ``@calcfunction`` decorator instead - """ - import warnings - # If we call this DeprecationWarning, pycharm will properly strike out the function - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('this function has been deprecated, use `aiida.work.calcfunction` instead', DeprecationWarning) - - def wrapped_function(*args, **kwargs): - """ - This wrapper function is the actual function that is called. - """ - store = kwargs.pop('store', False) - - if store: - return make_inline(func)(*args, **kwargs)[1] - else: - return func(*args, **kwargs) - - return wrapped_function diff --git a/aiida/orm/calculation/job/__init__.py b/aiida/orm/calculation/job/__init__.py deleted file mode 100644 index 2b4648edd4..0000000000 --- a/aiida/orm/calculation/job/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -from __future__ import division -from __future__ import print_function -from __future__ import absolute_import - -from aiida.orm.node.process import CalcJobNode as JobCalculation diff --git a/aiida/orm/data/array/trajectory.py b/aiida/orm/data/array/trajectory.py index a4971b248f..9baa180030 100644 --- a/aiida/orm/data/array/trajectory.py +++ b/aiida/orm/data/array/trajectory.py @@ -465,7 +465,7 @@ def _get_aiida_structure(self, store=False, **kwargs): param = ParameterData(dict=kwargs) - ret_dict = _get_aiida_structure_inline(trajectory=self, parameters=param, store_provenance=store) # pylint: disable=unexpected-keyword-arg + ret_dict = _get_aiida_structure_inline(trajectory=self, parameters=param, metadata={'store_provenance': store}) # pylint: disable=unexpected-keyword-arg return ret_dict['structure'] def _get_cif(self, index=None, **kwargs): diff --git a/aiida/orm/data/cif.py b/aiida/orm/data/cif.py index 77f0aac823..4dc03cbf21 100644 --- a/aiida/orm/data/cif.py +++ b/aiida/orm/data/cif.py @@ -900,7 +900,7 @@ def _get_aiida_structure(self, converter='pymatgen', store=False, **kwargs): except AttributeError: raise ValueError("No such converter '{}' available".format(converter)) - result = convert_function(cif=self, parameters=parameters, store_provenance=store) + result = convert_function(cif=self, parameters=parameters, metadata={'store_provenance': store}) return result['structure'] diff --git a/aiida/orm/data/code.py b/aiida/orm/data/code.py index d324e74024..65bcbddcb8 100644 --- a/aiida/orm/data/code.py +++ b/aiida/orm/data/code.py @@ -441,49 +441,6 @@ def get_execname(self): else: return self.get_remote_exec_path() - def new_calc(self, *args, **kwargs): - """ - Create and return a new Calculation object (unstored) with the correct - plugin subclass, as obtained by the self.get_input_plugin_name() method. - - Parameters are passed to the calculation __init__ method. - - :note: it also directly creates the link to this code (that will of - course be cached, since the new node is not stored yet). - - :raise MissingPluginError: if the specified plugin does not exist. - :raise ValueError: if no default plugin was specified in the code. - """ - import warnings - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn( - 'directly creating and submitting calculations is deprecated, use the {}\nSee:{}'.format( - 'ProcessBuilder', DEPRECATION_DOCS_URL), DeprecationWarning) - - from aiida.orm.utils import CalculationFactory - plugin_name = self.get_input_plugin_name() - if plugin_name is None: - raise ValueError("You did not specify an input plugin for this code") - - try: - C = CalculationFactory(plugin_name) - - except MissingPluginError: - raise MissingPluginError("The input_plugin name for this code is " - "'{}', but it is not an existing plugin" - "name".format(plugin_name)) - - # For remote codes, automatically set the computer, - # unless explicitly set by the user - if not self.is_local(): - if 'computer' not in kwargs: - kwargs['computer'] = self.get_remote_computer() - - new_calc = C(*args, **kwargs) - # I link to the code - new_calc.use_code(self) - return new_calc - def get_builder(self): """ Create and return a new ProcessBuilder for the default Calculation diff --git a/aiida/orm/data/structure.py b/aiida/orm/data/structure.py index 30c1ada369..8f1dd1c50d 100644 --- a/aiida/orm/data/structure.py +++ b/aiida/orm/data/structure.py @@ -1899,7 +1899,7 @@ def _get_cif(self, converter='ase', store=False, **kwargs): except AttributeError: raise ValueError( "No such converter '{}' available".format(converter)) - ret_dict = conv_f(struct=self, parameters=param, store_provenance=store) + ret_dict = conv_f(struct=self, parameters=param, metadata={'store_provenance': store}) return ret_dict['cif'] def _get_object_phonopyatoms(self): diff --git a/aiida/orm/implementation/django/node.py b/aiida/orm/implementation/django/node.py index 4e23b43a06..d018c738a2 100644 --- a/aiida/orm/implementation/django/node.py +++ b/aiida/orm/implementation/django/node.py @@ -203,19 +203,6 @@ def _update_db_description_field(self, field_value): self._dbnode.save() self._increment_version_number_db() - def _replace_dblink_from(self, src, link_type, label): - try: - self._add_dblink_from(src, link_type, label) - except UniquenessError: - # I have to replace the link; I do it within a transaction - with transaction.atomic(): - self._remove_dblink_from(label) - self._add_dblink_from(src, link_type, label) - - def _remove_dblink_from(self, label): - from aiida.backends.djsite.db.models import DbLink - DbLink.objects.filter(output=self._dbnode, label=label).delete() - def _add_dblink_from(self, src, link_type, label): from aiida.orm.querybuilder import QueryBuilder if not isinstance(src, Node): diff --git a/aiida/orm/implementation/general/node.py b/aiida/orm/implementation/general/node.py index e4e6817b68..a8a2ac7223 100644 --- a/aiida/orm/implementation/general/node.py +++ b/aiida/orm/implementation/general/node.py @@ -92,21 +92,11 @@ class _AbstractNodeMeta(ABCMeta): def __new__(cls, name, bases, attrs): newcls = ABCMeta.__new__(cls, name, bases, attrs) + newcls._logger = logging.getLogger('{}.{}'.format(attrs['__module__'], name)) - # Configure the logger by inheriting from the aiida logger - if not attrs['__module__'].startswith('aiida.'): - newcls._logger = logging.getLogger('aiida.{:s}.{:s}'.format(attrs['__module__'], name)) - else: - newcls._logger = logging.getLogger('{:s}.{:s}'.format(attrs['__module__'], name)) - - # Set the plugin type string - plugin_type_string = get_type_string_from_class(attrs['__module__'], name) - - # Set the query type string based on the plugin type string - query_type_string = get_query_type_from_type_string(plugin_type_string) - - newcls._plugin_type_string = plugin_type_string - newcls._query_type_string = query_type_string + # Set the plugin type string and query type string based on the plugin type string + newcls._plugin_type_string = get_type_string_from_class(attrs['__module__'], name) + newcls._query_type_string = get_query_type_from_type_string(newcls._plugin_type_string) return newcls @@ -772,93 +762,6 @@ def get_stored_link_triples(self, node_class=None, link_type=(), link_label_filt return [links.LinkTriple(entry[0], LinkType(entry[1]), entry[2]) for entry in builder.all()] - def _replace_link_from(self, source, link_type, link_label): - """ - Replace an incoming link with the given type andlabel, or create it if it does not exist. - - :note: In subclasses, change only this. Moreover, remember to call - the super() method in order to properly use the caching logic! - - :param source: the node from which the link is coming - :param link_type: the link type - :param link_label: the link label - """ - self.validate_incoming(source, link_type, link_label) - source.validate_outgoing(self, link_type, link_label) - - link_triple = links.LinkTriple(source, link_type, link_label) - - # If both are stored, write directly on the DB - if self.is_stored and source.is_stored: - self._replace_dblink_from(source, link_type, link_label) - - # If the link triple was in the local cache, remove it, which can happen if one first stores the target - # node, followed by the source node. - try: - self._incoming_cache.remove(link_triple) - except ValueError: - pass - else: - # At least one node is not stored yet so add it to the internal cache - # I insert the link directly in the cache rather than calling _add_cachelink_from - # because this latter performs an undesired check - self._incoming_cache.append(link_triple) - - def _remove_link_from(self, label): - """ - Remove from the DB the input link with the given label. - - :note: In subclasses, change only this. Moreover, remember to call - the super() method in order to properly use the caching logic! - - :note: No error is raised if the link does not exist. - - :param str label: the name of the label to set the link from src. - :param link_type: the link type, must be one of the enum values form - :class:`~aiida.common.links.LinkType` - """ - # Try to remove from the local cache, no problem if none is present - try: - del self._incoming_cache[label] - except KeyError: - pass - - # If both are stored, remove also from the DB - if self.is_stored: - self._remove_dblink_from(label) - - @abstractmethod - def _replace_dblink_from(self, src, link_type, label): - """ - Replace an input link with the given label and type, or simply creates - it if it does not exist. - - :note: this function should not be called directly; it acts directly on - the database. - - :param str src: the source object. - :param str label: the label of the link from src to the current Node - :param link_type: the link type, must be one of the enum values form - :class:`~aiida.common.links.LinkType` - """ - pass - - @abstractmethod - def _remove_dblink_from(self, label): - """ - Remove from the DB the input link with the given label. - - :note: this function should not be called directly; it acts directly on - the database. - - :note: No checks are done to verify that the link actually exists. - - :param str label: the label of the link from src to the current Node - :param link_type: the link type, must be one of the enum values form - :class:`~aiida.common.links.LinkType` - """ - pass - @abstractmethod def _add_dblink_from(self, src, link_type, label): """ diff --git a/aiida/orm/implementation/sqlalchemy/node.py b/aiida/orm/implementation/sqlalchemy/node.py index 79336ea6f4..e8ce48b88a 100644 --- a/aiida/orm/implementation/sqlalchemy/node.py +++ b/aiida/orm/implementation/sqlalchemy/node.py @@ -236,28 +236,6 @@ def _update_db_description_field(self, field_value): session.add(self._dbnode) self._increment_version_number_db() - def _replace_dblink_from(self, src, link_type, label): - from aiida.backends.sqlalchemy import get_scoped_session - session = get_scoped_session() - try: - self._add_dblink_from(src, link_type, label) - except UniquenessError: - # I have to replace the link; I do it within a transaction - try: - self._remove_dblink_from(label) - self._add_dblink_from(src, link_type, label) - session.commit() - except: - session.rollback() - raise - - def _remove_dblink_from(self, label): - from aiida.backends.sqlalchemy import get_scoped_session - session = get_scoped_session() - link = DbLink.query.filter_by(label=label).first() - if link is not None: - session.delete(link) - def _add_dblink_from(self, src, link_type, label): from aiida.backends.sqlalchemy import get_scoped_session from aiida.orm.querybuilder import QueryBuilder diff --git a/aiida/orm/importexport.py b/aiida/orm/importexport.py index 0f05c35615..d5e9595486 100644 --- a/aiida/orm/importexport.py +++ b/aiida/orm/importexport.py @@ -17,8 +17,7 @@ from six.moves import zip from six.moves.html_parser import HTMLParser from aiida.common import exceptions -from aiida.common.utils import (export_shard_uuid, get_class_string, - get_object_from_string, grouper) +from aiida.common.utils import export_shard_uuid, get_class_string, grouper from aiida.orm.computers import Computer from aiida.orm.groups import Group, GroupTypeString from aiida.orm.node import Node @@ -351,8 +350,7 @@ def import_data_dj(in_path, user_group=None, ignore_unknown_nodes=False, from aiida.common.links import LinkType from aiida.common.folders import SandboxFolder, RepositoryFolder from aiida.backends.djsite.db import models - from aiida.common.utils import get_class_string, get_object_from_string - from aiida.common.datastructures import calc_states + from aiida.common.utils import get_object_from_string import aiida.common.json as json # This is the export version expected by this function @@ -884,7 +882,6 @@ def import_data_sqla(in_path, user_group=None, ignore_unknown_nodes=False, silen from aiida.common.archive import extract_tree, extract_tar, extract_zip, extract_cif from aiida.common.folders import SandboxFolder, RepositoryFolder from aiida.common.utils import get_object_from_string - from aiida.common.datastructures import calc_states from aiida.common.links import LinkType import aiida.common.json as json diff --git a/aiida/orm/mixins.py b/aiida/orm/mixins.py index cae8174e5d..a72ef0fb27 100644 --- a/aiida/orm/mixins.py +++ b/aiida/orm/mixins.py @@ -146,21 +146,37 @@ class Sealable(object): # pylint: disable=useless-object-inheritance def _updatable_attributes(cls): # pylint: disable=no-self-argument return (cls.SEALED_KEY,) - def add_incoming(self, source, link_type, link_label): + def validate_incoming(self, source, link_type, link_label): """ - Add a link of the given type from a given node to ourself. + Validate adding a link of the given type from a given node to ourself. + + Adding an incoming link to a sealed node is forbidden. :param source: the node from which the link is coming - :param link_type: the type of link - :param link_label: link label - :return: True if the proposed link is allowed, False otherwise - :raise TypeError: if `source` is not a Node instance or `link_type` is not a `LinkType` enum - :raise ValueError: if the proposed link is invalid + :param link_type: the link type + :param link_label: the link label + :raise ModificationNotAllowed: if the target node (self) is sealed + """ + if self.is_sealed: + raise ModificationNotAllowed('Cannot add a link to a sealed node') + + super(Sealable, self).validate_incoming(source, link_type=link_type, link_label=link_label) + + def validate_outgoing(self, target, link_type, link_label): + """ + Validate adding a link of the given type from ourself to a given node. + + Adding an outgoing link from a sealed node is forbidden. + + :param target: the node to which the link is going + :param link_type: the link type + :param link_label: the link label + :raise ModificationNotAllowed: if the source node (self) is sealed """ if self.is_sealed: raise ModificationNotAllowed('Cannot add a link from a sealed node') - super(Sealable, self).add_incoming(source, link_type=link_type, link_label=link_label) + super(Sealable, self).validate_outgoing(target, link_type=link_type, link_label=link_label) @property def is_sealed(self): diff --git a/aiida/orm/node/process/calculation/calcjob.py b/aiida/orm/node/process/calculation/calcjob.py index be3caee39b..4a273478c0 100644 --- a/aiida/orm/node/process/calculation/calcjob.py +++ b/aiida/orm/node/process/calculation/calcjob.py @@ -9,14 +9,10 @@ import copy import datetime -import enum -import io import six -import warnings -from aiida.common.datastructures import calc_states -from aiida.common.exceptions import ModificationNotAllowed, MissingPluginError -from aiida.common.links import LinkType +from aiida.common.datastructures import CalcJobState +from aiida.common.exceptions import MissingPluginError from aiida.common.lang import classproperty from aiida.common.utils import str_timedelta from aiida.plugins.loader import get_plugin_type_from_type_string @@ -29,7 +25,7 @@ __all__ = ('CalcJobNode', 'CalculationResultManager') SEALED_KEY = 'attributes.{}'.format(Sealable.SEALED_KEY) -CALCULATION_STATE_KEY = 'attributes.state' +CALC_JOB_STATE_KEY = 'attributes.state' SCHEDULER_STATE_KEY = 'attributes.scheduler_state' PROCESS_STATE_KEY = 'attributes.{}'.format(ProcessNode.PROCESS_STATE_KEY) EXIT_STATUS_KEY = 'attributes.{}'.format(ProcessNode.EXIT_STATUS_KEY) @@ -38,26 +34,14 @@ _input_subfolder = 'raw_input' -class CalcJobExitStatus(enum.Enum): - """ - This enumeration maps specific calculation states to an integer. This integer can - then be used to set the exit status of a CalcJobNode node. The values defined - here map directly on the failed calculation states, but the idea is that sub classes - of CalcJobNode can extend this enum with additional error codes - """ - FINISHED = 0 - SUBMISSIONFAILED = 100 - RETRIEVALFAILED = 200 - PARSINGFAILED = 300 - FAILED = 400 - - class CalcJobNode(CalculationNode): """ORM class for all nodes representing the execution of a CalcJob.""" # pylint: disable=abstract-method - JOB_STATE_KEY = 'state' - JOB_STATE_ATTRIBUTE_KEY = 'attributes.{}'.format(JOB_STATE_KEY) + CALC_JOB_STATE_KEY = 'state' + CALC_JOB_STATE_ATTRIBUTE_KEY = 'attributes.{}'.format(CALC_JOB_STATE_KEY) + + _cacheable = True # An optional entry point for a CalculationTools instance _tools = None @@ -93,174 +77,11 @@ def tools(self): return self._tools - def __dir__(self): - """ - Allow to list all valid attributes, adding also the use_* methods - """ - return sorted(dir(type(self)) + list(['use_{}'.format(k) for k in self._use_methods.keys()])) - - def __getattr__(self, name): - """ - Expand the methods with the use_* calls. Note that this method only gets called if 'name' - is not already defined as a method. Returning one will then automatically raise the - standard AttributeError exception. - """ - if name == '_use_methods': - raise AttributeError("'{0}' object has no attribute '{1}'".format(type(self), name)) - - class UseMethod(object): - """ - Generic class for the use_* methods. To know which use_* methods - exist, use the ``dir()`` function. To get help on a specific method, - for instance use_code, use:: - ``print use_code.__doc__`` - """ - - def __init__(self, node, actual_name, data): - self.node = node - self.actual_name = actual_name - self.data = data - - try: - self.__doc__ = data['docstring'] - except KeyError: - # Forgot to define the docstring! Use the default one - pass - - def __call__(self, parent_node, *args, **kwargs): - # Not really needed, will be checked in get_linkname but I do anyway in order to raise - # an exception as soon as possible, with the most intuitive caller function name - additional_parameter = _parse_single_arg( - function_name='use_{}'.format(self.actual_name), - additional_parameter=self.data['additional_parameter'], - args=args, kwargs=kwargs) - - # Type check - if not isinstance(parent_node, self.data['valid_types']): - if isinstance(self.data['valid_types'], collections.Iterable): - valid_types_string = ','.join([_.__name__ for _ in self.data['valid_types']]) - else: - valid_types_string = self.data['valid_types'].__name__ - - raise TypeError( - 'The given node is not of the valid type for use_{}.' - 'Valid types are: {}, while you provided {}'.format( - self.actual_name, valid_types_string, parent_node.__class__.__name__)) - - # Get actual link name - actual_linkname = self.node.get_linkname(actual_name, *args, **kwargs) - - # Here I do the real job - self.node._replace_link_from(parent_node, LinkType.INPUT_CALC, actual_linkname) - - prefix = 'use_' - valid_use_methods = ['{}{}'.format(prefix, k) for k in self._use_methods.keys()] - - if name in valid_use_methods: - actual_name = name[len(prefix):] - return UseMethod(node=self, actual_name=actual_name, data=self._use_methods[actual_name]) - else: - raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, name)) - - @classproperty - def _use_methods(cls): - """ - Return the list of valid input nodes that can be set using the - use_* method. - - For each key KEY of the return dictionary, the 'use_KEY' method is - exposed. - Each value must be a dictionary, defining the following keys: - * valid_types: a class, or tuple of classes, that will be used to - validate the parameter using the isinstance() method - * additional_parameter: None, if no additional parameters can be passed - to the use_KEY method beside the node, or the name of the additional - parameter (a string) - * linkname: the name of the link to create (a string if - additional_parameter is None, or a callable if additional_parameter is - a string. The value of the additional parameter will be passed to the - callable, and it should return a string. - * docstring: a docstring for the function - - .. note:: in subclasses, always extend the parent class, do not - substitute it! - """ - from aiida.orm.data.code import Code - return { - 'code': { - 'valid_types': Code, - 'additional_parameter': None, - 'linkname': 'code', - 'docstring': 'Choose the code to use', - }, - } - - def get_linkname(self, link, *args, **kwargs): - """ - Return the linkname used for a given input link - Pass as parameter "NAME" if you would call the use_NAME method. - If the use_NAME method requires a further parameter, pass that - parameter as the second parameter. - """ - try: - data = self._use_methods[link] - except KeyError: - raise ValueError("No '{}' link is defined for this calculation".format(link)) - - # Raises if the wrong # of parameters is passed - additional_parameter = _parse_single_arg( - function_name='get_linkname', - additional_parameter=data['additional_parameter'], - args=args, kwargs=kwargs) - - if data['additional_parameter'] is not None: - # Call the callable to get the proper linkname - actual_linkname = data['linkname'](additional_parameter) - else: - actual_linkname = data['linkname'] - - return actual_linkname - - def get_code(self): - """ - Return the code for this calculation, or None if the code was not set. - """ - from aiida.orm.data.code import Code - from aiida.common.exceptions import NotExistent - - node = self.get_incoming(node_class=Code) - try: - return node.get_node_by_label(self._use_methods['code']['linkname']) - except NotExistent: - return None - - - @classproperty - def exit_status_enum(cls): - return CalcJobExitStatus - - @property - def exit_status_label(self): - """ - Return the label belonging to the exit status of the Calculation - - :returns: the exit status label - """ - try: - exit_status_enum = self.exit_status_enum(self.exit_status) - exit_status_label = exit_status_enum.name - except ValueError: - exit_status_label = 'UNKNOWN' - - return exit_status_label - - _cacheable = True - @classproperty def _updatable_attributes(cls): return super(CalcJobNode, cls)._updatable_attributes + ( 'job_id', 'scheduler_state', 'scheduler_lastchecktime', 'last_jobinfo', 'remote_workdir', 'retrieve_list', - 'retrieve_temporary_list', 'retrieve_singlefile_list', cls.JOB_STATE_KEY) + 'retrieve_temporary_list', 'retrieve_singlefile_list', cls.CALC_JOB_STATE_KEY) @classproperty def _hash_ignored_attributes(cls): @@ -277,31 +98,32 @@ def get_hash(self, ignore_errors=True, ignored_folder_content=('raw_input',), ** return super(CalcJobNode, self).get_hash( ignore_errors=ignore_errors, ignored_folder_content=ignored_folder_content, **kwargs) - @classmethod - def process(cls): - """ - Return the JobProcess class constructed based on this JobCalculatin class + def process(self): + """Return the CalcJob class that was used to create this node. - :return: JobProcess class + :return: CalcJob class + :raises ValueError: if no process type is defined or it is an invalid process type string """ - from aiida.work.job_processes import JobProcess - return JobProcess.build(cls) + from aiida.common.exceptions import MultipleEntryPointError, MissingEntryPointError, LoadingEntryPointError + from aiida.plugins.entry_point import load_entry_point_from_string - @classmethod - def get_builder(cls): - """ - Return a JobProcessBuilder instance, tailored for this calculation class + if not self.process_type: + raise ValueError('no process type for CalcJobNode<{}>: cannot recreate process class'.format(self.pk)) - This builder is a mapping of the inputs of the CalcJobNode class, supports tab-completion, automatic - validation when settings values as well as automated docstrings for each input + try: + process_class = load_entry_point_from_string(self.process_type) + except ValueError: + raise ValueError('process type for CalcJobNode<{}> contains an invalid entry point string: {}'.format( + self.pk, self.process_type)) + except (MissingEntryPointError, MultipleEntryPointError, LoadingEntryPointError) as exception: + raise ValueError('could not process class for {} for CalcJobNode<{}>: {}'.format( + self.pk, self.process_type, exception)) - :return: JobProcessBuilder instance - """ - return cls.process().get_builder() + return process_class def get_builder_restart(self): """ - Return a JobProcessBuilder instance, tailored for this calculation instance + Return a CalcJobBuilder instance, tailored for this calculation instance This builder is a mapping of the inputs of the CalcJobNode class, supports tab-completion, automatic validation when settings values as well as automated docstrings for each input. @@ -313,18 +135,18 @@ def get_builder_restart(self): is that it serves as a starting point to, after changing one or more inputs, launch a similar calculation by using this already completed calculation as a starting point. - :return: JobProcessBuilder instance + :return: CalcJobBuilder instance """ - from aiida.work.job_processes import JobProcess from aiida.work.ports import PortNamespace + process = self.process() inputs = self.get_incoming() - options = self.get_options(only_actually_set=True) - builder = self.get_builder() + options = self.get_options() + builder = process.get_builder() for port_name, port in self.process().spec().inputs.items(): - if port_name == JobProcess.OPTIONS_INPUT_LABEL: - setattr(builder, port_name, options) + if port_name == process.spec().metadata_key: + builder.metadata.options = options elif isinstance(port, PortNamespace): namespace = port_name + '_' sub = {entry.link_label[len(namespace):]: entry.node for entry in inputs if entry.link_label.startswith(namespace)} @@ -336,79 +158,6 @@ def get_builder_restart(self): return builder - def _init_internal_params(self): - """ - Define here internal parameters that should be defined right after the __init__ - This function is actually called by the __init__ - - :note: if you inherit this function, ALWAYS remember to call super()._init_internal_params() - as the first thing in your inherited function. - """ - # By default, no output parser - self._default_parser = None - - # Set default for the link to the retrieved folder (after calc is done) - self._linkname_retrieved = 'retrieved' - - # Files in which the scheduler output and error will be stored. - # If they are identical, outputs will be joined. - self._SCHED_OUTPUT_FILE = '_scheduler-stdout.txt' - self._SCHED_ERROR_FILE = '_scheduler-stderr.txt' - - # Files that should be shown by default, set it to None if you do not have a default file - # Used, e.g., by 'verdi calculation inputshow/outputshow - self._DEFAULT_INPUT_FILE = None - self._DEFAULT_OUTPUT_FILE = None - - @property - def _set_defaults(self): - """ - Return the default parameters to set. - It is done as a property so that it can read the default parameters - defined in _init_internal_params. - - :note: It is a property because in this way, e.g. the - parser_name is taken from the actual subclass of calculation, - and not from the parent Calculation class - """ - parent_dict = super(CalcJobNode, self)._set_defaults - - parent_dict.update({"parser_name": self._default_parser, "_linkname_retrieved": self._linkname_retrieved}) - - return parent_dict - - def _set_internal(self, arguments, allow_hidden=False): - """ - Works as self.set(), but takes a dictionary as the 'arguments' variable, - instead of reading it from the ``kwargs``; moreover, it allows to specify - allow_hidden to True. In this case, if a a key starts with and - underscore, as for instance ``_state``, it will not call - the function ``set__state`` but rather ``_set_state``. - """ - for key, value in copy.copy(arguments).items(): - if key in self.options and value is not None: - arguments.pop(key) - self.set_option(key, value) - - super(CalcJobNode, self)._set_internal(arguments, allow_hidden=allow_hidden) - - def store(self, *args, **kwargs): - """ - Override the store() method to store also the calculation in the NEW - state as soon as this is stored for the first time. - """ - super(CalcJobNode, self).store(*args, **kwargs) - - if self.get_state() is None: - self._set_state(calc_states.NEW) - - return self - - def _add_outputs_from_cache(self, cache_node): - self._set_state(calc_states.PARSING) - super(CalcJobNode, self)._add_outputs_from_cache(cache_node=cache_node) - self._set_state(cache_node.get_state()) - def _validate(self): """ Verify if all the input nodes are present and valid. @@ -422,7 +171,7 @@ def _validate(self): if self.get_computer() is None: raise ValidationError("You did not specify a computer") - if self.get_state() not in calc_states: + if self.get_state() and self.get_state() not in CalcJobState: raise ValidationError("Calculation state '{}' is not valid".format(self.get_state())) try: @@ -443,47 +192,6 @@ def _validate(self): except (TypeError, ValueError) as exc: raise ValidationError("Invalid resources for the scheduler of the specified computer: {}".format(exc)) - if not isinstance(self.get_option('withmpi', only_actually_set=False), bool): - raise ValidationError("withmpi property must be boolean! It in instead {}" - "".format(str(type(self.get_option('withmpi'))))) - - def validate_incoming(self, source, link_type, link_label): - """ - Validate adding a link of the given type from a given node to ourself. - - :param source: the node from which the link is coming - :param link_type: the type of link - :param link_label: link label - :raise TypeError: if `source` is not a Node instance or `link_type` is not a `LinkType` enum - :raise ValueError: if the proposed link is invalid - """ - super(CalcJobNode, self).validate_incoming(source, link_type, link_label) - - state = self.get_state() - valid_states = [calc_states.NEW] - if state not in valid_states: - raise ModificationNotAllowed('invalid link: CalcJobNode has to have state in {}, but is {}'.format( - valid_states, state)) - - def validate_outgoing(self, target, link_type, link_label): - """ - Validate adding a link of the given type from ourself to a given node. - - :param target: the node to which the link is goming - :param link_type: the type of link - :param link_label: link label - :raise TypeError: if `target` is not a Node instance or `link_type` is not a `LinkType` enum - :raise ValueError: if the proposed link is invalid - """ - super(CalcJobNode, self).validate_outgoing(target, link_type, link_label) - - state = self.get_state() - valid_states = [calc_states.SUBMITTING, calc_states.RETRIEVING, calc_states.PARSING] - - if state not in valid_states: - raise ModificationNotAllowed('invalid link: CalcJobNode has to have state in {}, but is {}'.format( - valid_states, state)) - def _store_raw_input_folder(self, folder_path): """ Copy the content of the folder internally, in a subfolder called @@ -492,12 +200,6 @@ def _store_raw_input_folder(self, folder_path): :param folder_path: the path to the folder from which the content should be taken """ - # This function can be called only if the state is TOSUBMIT - if self.get_state() != calc_states.TOSUBMIT: - raise ModificationNotAllowed("The raw input folder can be stored only if the " - "state is TOSUBMIT, it is instead {}".format(self.get_state())) - - # get subfolder and replace with copy _raw_input_folder = self.folder.get_subfolder(_input_subfolder, create=True) _raw_input_folder.replace_with_folder(folder_path, move=False, overwrite=True) @@ -517,161 +219,22 @@ def _raw_input_folder(self): else: raise NotExistent("_raw_input_folder not created yet") - @classproperty - def options(cls): - from aiida import orm - - return { - 'resources': { - 'attribute_key': 'jobresource_params', - 'valid_type': dict, - 'default': {}, - 'help': 'Set the dictionary of resources to be used by the scheduler plugin, like the number of nodes, ' - 'cpus etc. This dictionary is scheduler-plugin dependent. Look at the documentation of the ' - 'scheduler for more details.' - }, - 'max_wallclock_seconds': { - 'attribute_key': 'max_wallclock_seconds', - 'valid_type': int, - 'non_db': True, - 'required': False, - 'help': 'Set the wallclock in seconds asked to the scheduler', - }, - 'custom_scheduler_commands': { - 'attribute_key': 'custom_scheduler_commands', - 'valid_type': six.string_types, - 'non_db': True, - 'required': False, - 'default': '', - 'help': 'Set a (possibly multiline) string with the commands that the user wants to manually set for the ' - 'scheduler. The difference of this option with respect to the `prepend_text` is the position in ' - 'the scheduler submission file where such text is inserted: with this option, the string is ' - 'inserted before any non-scheduler command', - }, - 'queue_name': { - 'attribute_key': 'queue_name', - 'valid_type': six.string_types, - 'non_db': True, - 'required': False, - 'help': 'Set the name of the queue on the remote computer', - }, - 'account': { - 'attribute_key': 'account', - 'valid_type': six.string_types, - 'non_db': True, - 'required': False, - 'help': 'Set the account to use in for the queue on the remote computer', - }, - 'qos': { - 'attribute_key': 'qos', - 'valid_type': six.string_types, - 'non_db': True, - 'required': False, - 'help': 'Set the quality of service to use in for the queue on the remote computer', - }, - 'computer': { - 'attribute_key': None, - 'valid_type': orm.Computer, - 'non_db': True, - 'required': False, - 'help': 'Set the computer to be used by the calculation', - }, - 'withmpi': { - 'attribute_key': 'withmpi', - 'valid_type': bool, - 'non_db': True, - 'required': False, - 'default': True, - 'help': 'Set the calculation to use mpi', - }, - 'mpirun_extra_params': { - 'attribute_key': 'mpirun_extra_params', - 'valid_type': (list, tuple), - 'non_db': True, - 'required': False, - 'default': [], - 'help': 'Set the extra params to pass to the mpirun (or equivalent) command after the one provided in ' - 'computer.mpirun_command. Example: mpirun -np 8 extra_params[0] extra_params[1] ... exec.x', - }, - 'import_sys_environment': { - 'attribute_key': 'import_sys_environment', - 'valid_type': bool, - 'non_db': True, - 'required': False, - 'default': True, - 'help': 'If set to true, the submission script will load the system environment variables', - }, - 'environment_variables': { - 'attribute_key': 'custom_environment_variables', - 'valid_type': dict, - 'non_db': True, - 'required': False, - 'default': {}, - 'help': 'Set a dictionary of custom environment variables for this calculation', - }, - 'priority': { - 'attribute_key': 'priority', - 'valid_type': six.string_types[0], - 'non_db': True, - 'required': False, - 'help': 'Set the priority of the job to be queued', - }, - 'max_memory_kb': { - 'attribute_key': 'max_memory_kb', - 'valid_type': int, - 'non_db': True, - 'required': False, - 'help': 'Set the maximum memory (in KiloBytes) to be asked to the scheduler', - }, - 'prepend_text': { - 'attribute_key': 'prepend_text', - 'valid_type': six.string_types[0], - 'non_db': True, - 'required': False, - 'default': '', - 'help': 'Set the calculation-specific prepend text, which is going to be prepended in the scheduler-job ' - 'script, just before the code execution', - }, - 'append_text': { - 'attribute_key': 'append_text', - 'valid_type': six.string_types[0], - 'non_db': True, - 'required': False, - 'default': '', - 'help': 'Set the calculation-specific append text, which is going to be appended in the scheduler-job ' - 'script, just after the code execution', - }, - 'parser_name': { - 'attribute_key': 'parser', - 'valid_type': six.string_types[0], - 'non_db': True, - 'required': False, - 'help': 'Set a string for the output parser. Can be None if no output plugin is available or needed', - } - } + @property + def options(self): + try: + return self.process().spec().inputs._ports['metadata']['options'] + except ValueError: + return {} - def get_option(self, name, only_actually_set=False): + def get_option(self, name): """ Retun the value of an option that was set for this CalcJobNode :param name: the option name - :param only_actually_set: when False will return the default value even when option had not been explicitly set :return: the option value or None :raises: ValueError for unknown option """ - if name not in self.options: - raise ValueError('unknown option {}'.format(name)) - - option = self.options[name] - attribute_key = option['attribute_key'] - default = option.get('default', None) - - attribute = self.get_attr(attribute_key, None) - - if attribute is None and only_actually_set is False and default is not None: - attribute = default - - return attribute + return self.get_attr(name, None) def set_option(self, name, value): """ @@ -682,31 +245,17 @@ def set_option(self, name, value): :raises: ValueError for unknown option :raises: TypeError for values with invalid type """ - if name not in self.options: - raise ValueError('unknown option {}'.format(name)) - - option = self.options[name] - valid_type = option['valid_type'] - attribute_key = option['attribute_key'] - - if not isinstance(value, valid_type): - raise TypeError('value is of invalid type {}'.format(type(value))) + self._set_attr(name, value) - if name == 'computer': - self.set_computer(value) - else: - self._set_attr(attribute_key, value) - - def get_options(self, only_actually_set=False): + def get_options(self): """ Return the dictionary of options set for this CalcJobNode - :param only_actually_set: when False will return the default value even when option had not been explicitly set :return: dictionary of the options and their values """ options = {} for name in self.options.keys(): - value = self.get_option(name, only_actually_set=only_actually_set) + value = self.get_option(name) if value is not None: options[name] = value @@ -721,425 +270,17 @@ def set_options(self, options): for name, value in options.items(): self.set_option(name, value) - def set_queue_name(self, val): - """ - Set the name of the queue on the remote computer. - - :param str val: the queue name - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - if val is not None: - self._set_attr('queue_name', six.text_type(val)) - - def set_account(self, val): - """ - Set the account on the remote computer. - - :param str val: the account name - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - if val is not None: - self._set_attr('account', six.text_type(val)) - - def set_qos(self, val): - """ - Set the quality of service on the remote computer. - - :param str val: the quality of service - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - if val is not None: - self._set_attr('qos', six.text_type(val)) - - def set_import_sys_environment(self, val): - """ - If set to true, the submission script will load the system - environment variables. - - :param bool val: load the environment if True - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('import_sys_environment', bool(val)) - - def get_import_sys_environment(self): - """ - To check if it's loading the system environment - on the submission script. - - :return: a boolean. If True the system environment will be load. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('import_sys_environment', True) - - def set_environment_variables(self, env_vars_dict): - """ - Set a dictionary of custom environment variables for this calculation. - - Both keys and values must be strings. - - In the remote-computer submission script, it's going to export - variables as ``export 'keys'='values'`` - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - if not isinstance(env_vars_dict, dict): - raise ValueError("You have to pass a dictionary to set_environment_variables") - - for k, v in env_vars_dict.items(): - if not isinstance(k, six.string_types) or not isinstance(v, six.string_types): - raise ValueError("Both the keys and the values of the " - "dictionary passed to set_environment_variables must be " - "strings.") - - return self._set_attr('custom_environment_variables', env_vars_dict) - - def get_environment_variables(self): - """ - Return a dictionary of the environment variables that are set - for this calculation. - - Return an empty dictionary if no special environment variables have - to be set for this calculation. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('custom_environment_variables', {}) - - def set_priority(self, val): - """ - Set the priority of the job to be queued. - - :param val: the values of priority as accepted by the cluster scheduler. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('priority', six.text_type(val)) - - def set_max_memory_kb(self, val): - """ - Set the maximum memory (in KiloBytes) to be asked to the scheduler. - - :param val: an integer. Default=None - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('max_memory_kb', int(val)) - - def get_max_memory_kb(self): - """ - Get the memory (in KiloBytes) requested to the scheduler. - - :return: an integer - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('max_memory_kb', None) - - def set_max_wallclock_seconds(self, val): - """ - Set the wallclock in seconds asked to the scheduler. - - :param val: An integer. Default=None - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('max_wallclock_seconds', int(val)) - - def get_max_wallclock_seconds(self): - """ - Get the max wallclock time in seconds requested to the scheduler. - - :return: an integer - :rtype: int - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('max_wallclock_seconds', None) - - def set_resources(self, resources_dict): - """ - Set the dictionary of resources to be used by the scheduler plugin, - like the number of nodes, cpus, ... - This dictionary is scheduler-plugin dependent. Look at the documentation - of the scheduler. - (scheduler type can be found with - calc.get_computer().get_scheduler_type() ) - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - # Note: for the time being, resources are only validated during the - # 'store' because here we are not sure that a Computer has been set - # yet (in particular, if both computer and resources are set together - # using the .set() method). - self._set_attr('jobresource_params', resources_dict) - - def set_withmpi(self, val): - """ - Set the calculation to use mpi. - - :param val: A boolean. Default=True - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('withmpi', val) - - def get_withmpi(self): - """ - Get whether the job is set with mpi execution. - - :return: a boolean. Default=True. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('withmpi', True) - - def get_resources(self, full=False): - """ - Returns the dictionary of the job resources set. - - :param full: if True, also add the default values, e.g. - ``default_mpiprocs_per_machine`` - - :return: a dictionary - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - resources_dict = self.get_attr('jobresource_params', {}) - - if full: - computer = self.get_computer() - def_cpus_machine = computer.get_default_mpiprocs_per_machine() - if def_cpus_machine is not None: - resources_dict['default_mpiprocs_per_machine'] = def_cpus_machine - - return resources_dict - - def get_queue_name(self): - """ - Get the name of the queue on cluster. - - :return: a string or None. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('queue_name', None) - - def get_account(self): - """ - Get the account on the cluster. - - :return: a string or None. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('account', None) - - def get_qos(self): - """ - Get the quality of service on the cluster. - - :return: a string or None. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('qos', None) - - def get_priority(self): - """ - Get the priority, if set, of the job on the cluster. - - :return: a string or None - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr('priority', None) - - def get_prepend_text(self): - """ - Get the calculation-specific prepend text, - which is going to be prepended in the scheduler-job script, just before - the code execution. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr("prepend_text", "") - - def set_prepend_text(self, val): - """ - Set the calculation-specific prepend text, - which is going to be prepended in the scheduler-job script, just before - the code execution. - - See also ``set_custom_scheduler_commands`` - - :param val: a (possibly multiline) string - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr("prepend_text", six.text_type(val)) - - def get_append_text(self): - """ - Get the calculation-specific append text, - which is going to be appended in the scheduler-job script, just after - the code execution. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr("append_text", "") - - def set_append_text(self, val): - """ - Set the calculation-specific append text, - which is going to be appended in the scheduler-job script, just after - the code execution. - - :param val: a (possibly multiline) string - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr("append_text", six.text_type(val)) - - def set_custom_scheduler_commands(self, val): - """ - Set a (possibly multiline) string with the commands that the user - wants to manually set for the scheduler. - - The difference of this method with respect to the set_prepend_text - is the position in the scheduler submission file where such text is - inserted: with this method, the string is inserted before any - non-scheduler command. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr("custom_scheduler_commands", six.text_type(val)) - - def get_custom_scheduler_commands(self): - """ - Return a (possibly multiline) string with the commands that the user - wants to manually set for the scheduler. - See also the documentation of the corresponding - ``set_`` method. - - :return: the custom scheduler command, or an empty string if no - custom command was defined. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr("custom_scheduler_commands", "") - - def get_mpirun_extra_params(self): - """ - Return a list of strings, that are the extra params to pass to the - mpirun (or equivalent) command after the one provided in - computer.mpirun_command. - Example: mpirun -np 8 extra_params[0] extra_params[1] ... exec.x - - Return an empty list if no parameters have been defined. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - return self.get_attr("mpirun_extra_params", []) - - def set_mpirun_extra_params(self, extra_params): - """ - Set the extra params to pass to the - mpirun (or equivalent) command after the one provided in - computer.mpirun_command. - Example: mpirun -np 8 extra_params[0] extra_params[1] ... exec.x - - :param extra_params: must be a list of strings, one for each - extra parameter - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - if extra_params is None: - try: - self._del_attr("mpirun_extra_params") - except AttributeError: - # it was not saved, yet - pass - return - - if not isinstance(extra_params, (list, tuple)): - raise ValueError("You must pass a list of strings to set_mpirun_extra_params") - for param in extra_params: - if not isinstance(param, six.string_types): - raise ValueError("You must pass a list of strings to set_mpirun_extra_params") - - self._set_attr("mpirun_extra_params", list(extra_params)) - - def set_parser_name(self, parser): - """ - Set a string for the output parser - Can be None if no output plugin is available or needed. - - :param parser: a string identifying the module of the parser. - Such module must be located within the folder 'aiida/parsers/plugins' - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - self._set_attr('parser', parser) - - def get_parser_name(self): - """ - Return a string locating the module that contains - the output parser of this calculation, that will be searched - in the 'aiida/parsers/plugins' directory. None if no parser is needed/set. - - :return: a string. - """ - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('explicit option getter/setter methods are deprecated, use get_option and set_option', - DeprecationWarning) - - return self.get_attr('parser', None) - def _set_state(self, state): """ Set the state of the calculation job. - :param state: a string with the state from ``aiida.common.datastructures.calc_states``. + :param state: a string with the state from ``aiida.common.datastructures.CalcJobState``. :raise: ValueError if state is invalid """ - if state not in calc_states: - raise ValueError("'{}' is not a valid calculation status".format(state)) + if state not in CalcJobState: + raise ValueError('{} is not a valid CalcJobState'.format(state)) - self._set_attr(self.JOB_STATE_KEY, state) + self._set_attr(self.CALC_JOB_STATE_KEY, state.value) def get_state(self): """ @@ -1147,61 +288,14 @@ def get_state(self): :return: the calculation job state """ - if not self.is_stored: - return calc_states.NEW - - return self.get_attr(self.JOB_STATE_KEY, None) - - def _is_new(self): - """ - Get whether the calculation is in the NEW status. - - :return: a boolean - """ - return self.get_state() in [calc_states.NEW, None] - - def _is_running(self): - """ - Get whether the calculation is in a running state, - i.e. one of TOSUBMIT, SUBMITTING, WITHSCHEDULER, - COMPUTED, RETRIEVING or PARSING. - - :return: a boolean - """ - return self.get_state() in [ - calc_states.TOSUBMIT, calc_states.SUBMITTING, calc_states.WITHSCHEDULER, calc_states.COMPUTED, - calc_states.RETRIEVING, calc_states.PARSING - ] + state = self.get_attr(self.CALC_JOB_STATE_KEY, None) - @property - def finished_ok(self): - """ - Returns whether the Calculation has finished successfully, which means that it - terminated nominally and had a zero exit status indicating a successful execution - - :return: True if the calculation has finished successfully, False otherwise - :rtype: bool - """ - return self.get_state() in [calc_states.FINISHED] - - @property - def failed(self): - """ - Returns whether the Calculation has failed, which means that it terminated nominally - but it had a non-zero exit status + if state: + return CalcJobState(state) - :return: True if the calculation has failed, False otherwise - :rtype: bool - """ - return self.get_state() in [ - calc_states.SUBMISSIONFAILED, calc_states.RETRIEVALFAILED, calc_states.PARSINGFAILED, calc_states.FAILED - ] + return None def _set_remote_workdir(self, remote_workdir): - if self.get_state() != calc_states.SUBMITTING: - raise ModificationNotAllowed("Cannot set the remote workdir if you are not " - "submitting the calculation (current state is " - "{})".format(self.get_state())) self._set_attr('remote_workdir', remote_workdir) def _get_remote_workdir(self): @@ -1214,15 +308,6 @@ def _get_remote_workdir(self): return self.get_attr('remote_workdir', None) def _set_retrieve_list(self, retrieve_list): - if self.get_state() not in (calc_states.TOSUBMIT, calc_states.NEW): - raise ModificationNotAllowed("Cannot set the retrieve_list for a calculation that is neither " - "NEW nor TOSUBMIT (current state is {})".format(self.get_state())) - - # accept format of: [ 'remotename', - # ['remotepath','localpath',0] ] - # where the last number is used - # to decide the localname, see CalcInfo or execmanager - if not (isinstance(retrieve_list, (tuple, list))): raise ValueError("You should pass a list/tuple") for item in retrieve_list: @@ -1251,10 +336,6 @@ def _set_retrieve_temporary_list(self, retrieve_temporary_list): Set the list of paths that are to retrieved for parsing and be deleted as soon as the parsing has been completed. """ - if self.get_state() not in (calc_states.TOSUBMIT, calc_states.NEW): - raise ModificationNotAllowed('Cannot set the retrieve_temporary_list for a calculation that is neither ' - 'NEW nor TOSUBMIT (current state is {})'.format(self.get_state())) - if not (isinstance(retrieve_temporary_list, (tuple, list))): raise ValueError('You should pass a list/tuple') @@ -1283,10 +364,6 @@ def _set_retrieve_singlefile_list(self, retrieve_singlefile_list): """ Set the list of information for the retrieval of singlefiles """ - if self.get_state() not in (calc_states.TOSUBMIT, calc_states.NEW): - raise ModificationNotAllowed("Cannot set the retrieve_singlefile_list for a calculation that is neither " - "NEW nor TOSUBMIT (current state is {})".format(self.get_state())) - if not isinstance(retrieve_singlefile_list, (tuple, list)): raise ValueError("You have to pass a list (or tuple) of lists of strings as retrieve_singlefile_list") for j in retrieve_singlefile_list: @@ -1310,11 +387,6 @@ def _set_job_id(self, job_id): """ Always set as a string """ - if self.get_state() != calc_states.SUBMITTING: - raise ModificationNotAllowed("Cannot set the job id if you are not " - "submitting the calculation (current state is " - "{})".format(self.get_state())) - return self._set_attr('job_id', six.text_type(job_id)) def get_job_id(self): @@ -1326,57 +398,188 @@ def get_job_id(self): return self.get_attr('job_id', None) def _set_scheduler_state(self, state): - # I don't do any test here on the possible valid values, - # I just convert it to a string from aiida.common import timezone + from aiida.scheduler.datastructures import JobState + + if not isinstance(state, JobState): + raise ValueError('scheduler state should be an instance of JobState, got: {}'.format()) - self._set_attr('scheduler_state', six.text_type(state)) + self._set_attr('scheduler_state', state.value) self._set_attr('scheduler_lastchecktime', timezone.now()) def get_scheduler_state(self): """ Return the status of the calculation according to the cluster scheduler. - :return: a string. + :return: a JobState enum instance. + """ + from aiida.scheduler.datastructures import JobState + + state = self.get_attr('scheduler_state', None) + + if state is None: + return state + + return JobState(state) + + def _get_scheduler_lastchecktime(self): + """ + Return the time of the last update of the scheduler state by the daemon, + or None if it was never set. + + :return: a datetime object. + """ + return self.get_attr('scheduler_lastchecktime', None) + + def _set_last_jobinfo(self, last_jobinfo): + self._set_attr('last_jobinfo', last_jobinfo.serialize()) + + def _get_last_jobinfo(self): + """ + Get the last information asked to the scheduler + about the status of the job. + + :return: a JobInfo object (that closely resembles a dictionary) or None. + """ + from aiida.scheduler.datastructures import JobInfo + + last_jobinfo_serialized = self.get_attr('last_jobinfo', None) + if last_jobinfo_serialized is not None: + jobinfo = JobInfo() + jobinfo.load_from_serialized(last_jobinfo_serialized) + return jobinfo + else: + return None + + def _get_authinfo(self): + from aiida.common.exceptions import NotExistent + from aiida.orm.authinfos import AuthInfo + + computer = self.get_computer() + if computer is None: + raise NotExistent("No computer has been set for this calculation") + + return AuthInfo.from_backend_entity(self.backend.authinfos.get(computer=computer, user=self.get_user())) + + def _get_transport(self): + """ + Return the transport for this calculation. + """ + return self._get_authinfo().get_transport() + + def get_parserclass(self): + """ + Return the output parser object for this calculation, or None + if no parser is set. + + :return: a Parser class. + :raise: MissingPluginError from ParserFactory no plugin is found. + """ + from aiida.parsers import ParserFactory + + parser_name = self.get_option('parser_name') + + if parser_name is not None: + return ParserFactory(parser_name) + else: + return None + + @property + def link_label_retrieved(self): + """Return the link label used for the retrieved FolderData node.""" + return 'retrieved' + + def get_retrieved_node(self): + """Return the retrieved data folde. + + :return: the retrieved FolderData node + :raise MultipleObjectsError: if no or more than one retrieved node is found. + """ + from aiida.orm.data.folder import FolderData + return self.get_outgoing(node_class=FolderData, link_label_filter=self.link_label_retrieved).one().node + + @property + def res(self): + """ + To be used to get direct access to the parsed parameters. + + :return: an instance of the CalculationResultManager. + + :note: a practical example on how it is meant to be used: let's say that there is a key 'energy' + in the dictionary of the parsed results which contains a list of floats. + The command `calc.res.energy` will return such a list. + """ + return CalculationResultManager(self) + + def get_scheduler_output(self): + """ + Return the output of the scheduler output (a string) if the calculation + has finished, and output node is present, and the output of the + scheduler was retrieved. + + Return None otherwise. + """ + from aiida.common.exceptions import NotExistent + + filename = self.get_option('scheduler_stdout') + + # Shortcut if no error file is set + if filename is None: + return None + + retrieved_node = self.get_retrieved_node() + if retrieved_node is None: + return None + + try: + outfile_content = retrieved_node.get_file_content(filename) + except NotExistent: + # Return None if no file is found + return None + + return outfile_content + + def get_scheduler_error(self): + """ + Return the output of the scheduler error (a string) if the calculation + has finished, and output node is present, and the output of the + scheduler was retrieved. + + Return None otherwise. """ - return self.get_attr('scheduler_state', None) + from aiida.common.exceptions import NotExistent - def _get_scheduler_lastchecktime(self): - """ - Return the time of the last update of the scheduler state by the daemon, - or None if it was never set. + filename = self.get_option('scheduler_stderr') - :return: a datetime object. - """ - return self.get_attr('scheduler_lastchecktime', None) + # Shortcut if no error file is set + if filename is None: + return None - def _set_last_jobinfo(self, last_jobinfo): + retrieved_node = self.get_retrieved_node() + if retrieved_node is None: + return None - self._set_attr('last_jobinfo', last_jobinfo.serialize()) + try: + errfile_content = retrieved_node.get_file_content(filename) + except (NotExistent): + # Return None if no file is found + return None - def _get_last_jobinfo(self): - """ - Get the last information asked to the scheduler - about the status of the job. + return errfile_content - :return: a JobInfo object (that closely resembles a dictionary) or None. + def get_desc(self): """ - from aiida.scheduler.datastructures import JobInfo - - last_jobinfo_serialized = self.get_attr('last_jobinfo', None) - if last_jobinfo_serialized is not None: - jobinfo = JobInfo() - jobinfo.load_from_serialized(last_jobinfo_serialized) - return jobinfo - else: - return None + Returns a string with infos retrieved from a CalcJobNode node's + properties. + """ + return self.get_state() projection_map = { 'pk': ('calculation', 'id'), 'ctime': ('calculation', 'ctime'), 'mtime': ('calculation', 'mtime'), 'scheduler_state': ('calculation', SCHEDULER_STATE_KEY), - 'calculation_state': ('calculation', CALCULATION_STATE_KEY), + 'calc_job_state': ('calculation', CALC_JOB_STATE_KEY), 'process_state': ('calculation', PROCESS_STATE_KEY), 'exit_status': ('calculation', EXIT_STATUS_KEY), 'sealed': ('calculation', SEALED_KEY), @@ -1390,7 +593,7 @@ def _get_last_jobinfo(self): compound_projection_map = { 'state': ('calculation', (PROCESS_STATE_KEY, EXIT_STATUS_KEY)), - 'job_state': ('calculation', (CALCULATION_STATE_KEY, SCHEDULER_STATE_KEY)) + 'job_state': ('calculation', (CALC_JOB_STATE_KEY, SCHEDULER_STATE_KEY)) } @classmethod @@ -1445,7 +648,7 @@ def _list_calculations(cls, 'ctime': 'Creation', 'mtime': 'Modification', 'job_state': 'Job state', - 'calculation_state': 'Calculation state', + 'calc_job_state': 'Calculation job state', 'scheduler_state': 'Scheduler state', 'computer': 'Computer', 'type': 'Type', @@ -1460,7 +663,7 @@ def _list_calculations(cls, # Let's check the states: if states: for state in states: - if state not in calc_states: + if state not in CalcJobState: return "Invalid state provided: {}.".format(state) # Let's check if there is something to order_by: @@ -1491,7 +694,7 @@ def _list_calculations(cls, # filter for states: if states: - calculation_filters['attributes.{}'.format(cls.JOB_STATE_KEY)] = {'in': states} + calculation_filters['attributes.{}'.format(cls.CALC_JOB_STATE_KEY)] = {'in': states} # Filter on the users, if not all users if not all_users: @@ -1633,12 +836,6 @@ def _get_calculation_info_row(cls, res, projections, times_since=None): sealed = 'True' if d['calculation'][SEALED_KEY] == 1 else 'False' d['calculation'][SEALED_KEY] = sealed - try: - if d['calculation'][CALCULATION_STATE_KEY] == calc_states.IMPORTED: - d['calculation'][CALCULATION_STATE_KEY] = d['calculation']['attributes.state'] or "UNKNOWN" - except KeyError: - pass - result = [] for projection in projections: @@ -1668,7 +865,7 @@ def _get_all_with_state(cls, Issue a warning if the state is not in the list of valid states. :param str state: The state to be used to filter (should be a string among - those defined in aiida.common.datastructures.calc_states) + those defined in aiida.common.datastructures.CalcJobState) :param computer: a Django DbComputer entry, or a Computer object, of a computer in the DbComputer table. A string for the hostname is also valid. @@ -1686,12 +883,12 @@ def _get_all_with_state(cls, :return: a list of calculation objects matching the filters. """ - # I assume that calc_states are strings. If this changes in the future, + # I assume that CalcJobState are strings. If this changes in the future, # update the filter below from dbattributes__tval to the correct field. from aiida.orm.computers import Computer from aiida.orm.querybuilder import QueryBuilder - if state not in calc_states: + if state not in CalcJobState: cls._logger.warning("querying for calculation state='{}', but it " "is not a valid calculation state".format(state)) @@ -1740,634 +937,6 @@ def _get_all_with_state(cls, returnresult = next(zip(*returnresult)) return returnresult - def _prepare_for_submission(self, tempfolder, inputdict): - """ - This is the routine to be called when you want to create - the input files and related stuff with a plugin. - - Args: - tempfolder: a aiida.common.folders.Folder subclass where - the plugin should put all its files. - inputdict: A dictionary where - each key is an input link name and each value an AiiDA - node (with the Code!). - The advantage of having this explicitly passed is that this - allows to choose outside which nodes to use, and whether to - use also unstored nodes, e.g. in a test_submit phase. - - TODO: document what it has to return (probably a CalcInfo object) - and what is the behavior on the tempfolder - """ - raise NotImplementedError - - def _get_authinfo(self): - from aiida.common.exceptions import NotExistent - from aiida.orm.authinfos import AuthInfo - - computer = self.get_computer() - if computer is None: - raise NotExistent("No computer has been set for this calculation") - - return AuthInfo.from_backend_entity(self.backend.authinfos.get(computer=computer, user=self.get_user())) - - def _get_transport(self): - """ - Return the transport for this calculation. - """ - return self._get_authinfo().get_transport() - - def submit(self): - """ - Creates a ContinueCalcJob and submits it with the current calculation node as the database - record. This will ensure that even when this legacy entry point is called, that the calculation is - taken through the Process layer. Preferably this legacy method should not be used at all but rather - a JobProcess should be used. - """ - import warnings - from aiida.work.job_processes import ContinueCalcJob - from aiida.work.launch import submit - from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin - warnings.warn('directly creating and submitting calculations is deprecated, use the {}\nSee:{}'.format( - 'ProcessBuilder', DEPRECATION_DOCS_URL), DeprecationWarning) - - submit(ContinueCalcJob, _calc=self) - - def get_parserclass(self): - """ - Return the output parser object for this calculation, or None - if no parser is set. - - :return: a Parser class. - :raise: MissingPluginError from ParserFactory no plugin is found. - """ - from aiida.parsers import ParserFactory - - parser_name = self.get_option('parser_name') - - if parser_name is not None: - return ParserFactory(parser_name) - else: - return None - - def _set_linkname_retrieved(self, linkname): - """ - Set the linkname of the retrieved data folder object. - - :param linkname: a string. - """ - self._set_attr('linkname_retrieved', linkname) - - def _get_linkname_retrieved(self): - """ - Get the linkname of the retrieved data folder object. - - :return: a string - """ - return self.get_attr('linkname_retrieved') - - def get_retrieved_node(self): - """ - Return the retrieved data folder, if present. - - :return: the retrieved data folder object, or None if no such output - node is found. - - :raise MultipleObjectsError: if more than one output node is found. - """ - from aiida.common.exceptions import MultipleObjectsError - from aiida.orm.data.folder import FolderData - - outputs = self.get_outgoing() - - retrieved_node = None - retrieved_linkname = self._get_linkname_retrieved() - - for entry in outputs: - if entry.link_label == retrieved_linkname: - if retrieved_node is None: - retrieved_node = entry.node - else: - raise MultipleObjectsError("More than one output node " - "with label '{}' for calc with pk= {}".format( - retrieved_linkname, self.pk)) - - if retrieved_node is None: - return None - - if not isinstance(retrieved_node, FolderData): - raise TypeError("The retrieved node of calc with pk= {} is not of type FolderData".format(self.pk)) - - return retrieved_node - - def kill(self): - """ - Kill a calculation on the cluster. - - Can only be called if the calculation is in status WITHSCHEDULER. - - The command tries to run the kill command as provided by the scheduler, - and raises an exception is something goes wrong. - No changes of calculation status are done (they will be done later by - the calculation manager). - - .. Note: Deprecated - """ - raise NotImplementedError("deprecated method: to kill a calculation go through 'verdi calculation kill'") - - def _presubmit(self, folder): - """ - Prepares the calculation folder with all inputs, ready to be copied to the cluster. - - :param folder: a SandboxFolder, empty in input, that will be filled with - calculation input files and the scheduling script. - - :return calcinfo: the CalcInfo object containing the information - needed by the daemon to handle operations. - """ - import os - - from six.moves import StringIO as StringIO - - from aiida.common.exceptions import (NotExistent, PluginInternalError, ValidationError) - from aiida.scheduler.datastructures import JobTemplate - from aiida.common.utils import validate_list_of_string_tuples - from aiida.orm import DataFactory - from aiida.common.datastructures import CodeInfo, code_run_modes - from aiida.orm.code import Code - from aiida.orm.utils import load_node, CalculationFactory - import aiida.common.json as json - - computer = self.get_computer() - inputs = self.get_incoming(link_type=LinkType.INPUT_CALC) - - codes = [_ for _ in inputs.all_nodes() if isinstance(_, Code)] - - inputdict = {entry.link_label: entry.node for entry in inputs} - - # THIS IS A MASSIVE HACK: the `_prepare_for_submission` is only implemented for the sub class but this instance - # will be a plain `CalcJobNode`, so we have to recreate an instance of the actual sub class to call the method. - from importlib import import_module - from aiida.plugins.entry_point import is_valid_entry_point_string, load_entry_point_from_string - - if is_valid_entry_point_string(self.process_type): - calc_class = load_entry_point_from_string(self.process_type) - else: - module_name, class_name = self.process_type.rsplit('.', 1) - module = import_module(module_name) - calc_class = getattr(module, class_name) - - calc_instance = calc_class(dbnode=self._dbnode) - - calcinfo = calc_instance._prepare_for_submission(folder, inputdict) - s = computer.get_scheduler() - - for code in codes: - if code.is_local(): - if code.get_local_executable() in folder.get_content_list(): - raise PluginInternalError("The plugin created a file {} that is also " - "the executable name!".format(code.get_local_executable())) - - # I create the job template to pass to the scheduler - job_tmpl = JobTemplate() - job_tmpl.shebang = computer.get_shebang() - # TODO: in the future, allow to customize the following variables - job_tmpl.submit_as_hold = False - job_tmpl.rerunnable = False - job_tmpl.job_environment = {} - # 'email', 'email_on_started', 'email_on_terminated', - job_tmpl.job_name = 'aiida-{}'.format(self.pk) - job_tmpl.sched_output_path = self._SCHED_OUTPUT_FILE - if self._SCHED_ERROR_FILE == self._SCHED_OUTPUT_FILE: - job_tmpl.sched_join_files = True - else: - job_tmpl.sched_error_path = self._SCHED_ERROR_FILE - job_tmpl.sched_join_files = False - - # Set retrieve path, add also scheduler STDOUT and STDERR - retrieve_list = (calcinfo.retrieve_list if calcinfo.retrieve_list is not None else []) - if (job_tmpl.sched_output_path is not None and job_tmpl.sched_output_path not in retrieve_list): - retrieve_list.append(job_tmpl.sched_output_path) - if not job_tmpl.sched_join_files: - if (job_tmpl.sched_error_path is not None and job_tmpl.sched_error_path not in retrieve_list): - retrieve_list.append(job_tmpl.sched_error_path) - self._set_retrieve_list(retrieve_list) - - retrieve_singlefile_list = (calcinfo.retrieve_singlefile_list - if calcinfo.retrieve_singlefile_list is not None else []) - # a validation on the subclasses of retrieve_singlefile_list - SinglefileData = DataFactory('singlefile') - for _, subclassname, _ in retrieve_singlefile_list: - FileSubclass = DataFactory(subclassname) - if not issubclass(FileSubclass, SinglefileData): - raise PluginInternalError("[presubmission of calc {}] " - "retrieve_singlefile_list subclass problem: " - "{} is not subclass of SinglefileData".format(self.pk, FileSubclass.__name__)) - self._set_retrieve_singlefile_list(retrieve_singlefile_list) - - # Handle the retrieve_temporary_list - retrieve_temporary_list = (calcinfo.retrieve_temporary_list - if calcinfo.retrieve_temporary_list is not None else []) - self._set_retrieve_temporary_list(retrieve_temporary_list) - - # the if is done so that if the method returns None, this is - # not added. This has two advantages: - # - it does not add too many \n\n if most of the prepend_text are empty - # - most importantly, skips the cases in which one of the methods - # would return None, in which case the join method would raise - # an exception - job_tmpl.prepend_text = "\n\n".join( - _ for _ in [computer.get_prepend_text()] + [code.get_prepend_text() for code in codes] + - [calcinfo.prepend_text, self.get_option('prepend_text')] if _) - - job_tmpl.append_text = "\n\n".join( - _ for _ in - [self.get_option('append_text'), calcinfo.append_text, - code.get_append_text(), - computer.get_append_text()] if _) - - # Set resources, also with get_default_mpiprocs_per_machine - resources = self.get_option('resources') - def_cpus_machine = computer.get_default_mpiprocs_per_machine() - if def_cpus_machine is not None: - resources['default_mpiprocs_per_machine'] = def_cpus_machine - job_tmpl.job_resource = s.create_job_resource(**resources) - - subst_dict = {'tot_num_mpiprocs': job_tmpl.job_resource.get_tot_num_mpiprocs()} - - for k, v in job_tmpl.job_resource.items(): - subst_dict[k] = v - mpi_args = [arg.format(**subst_dict) for arg in computer.get_mpirun_command()] - extra_mpirun_params = self.get_option('mpirun_extra_params') # this is the same for all codes in the same calc - - # set the codes_info - if not isinstance(calcinfo.codes_info, (list, tuple)): - raise PluginInternalError("codes_info passed to CalcInfo must be a list of CalcInfo objects") - - codes_info = [] - for code_info in calcinfo.codes_info: - - if not isinstance(code_info, CodeInfo): - raise PluginInternalError("Invalid codes_info, must be a list of CodeInfo objects") - - if code_info.code_uuid is None: - raise PluginInternalError("CalcInfo should have " - "the information of the code " - "to be launched") - this_code = load_node(code_info.code_uuid, sub_classes=(Code,)) - - this_withmpi = code_info.withmpi # to decide better how to set the default - if this_withmpi is None: - if len(calcinfo.codes_info) > 1: - raise PluginInternalError("For more than one code, it is " - "necessary to set withmpi in " - "codes_info") - else: - this_withmpi = self.get_option('withmpi') - - if this_withmpi: - this_argv = (mpi_args + extra_mpirun_params + [this_code.get_execname()] + - (code_info.cmdline_params if code_info.cmdline_params is not None else [])) - else: - this_argv = [this_code.get_execname()] + (code_info.cmdline_params - if code_info.cmdline_params is not None else []) - - this_stdin_name = code_info.stdin_name - this_stdout_name = code_info.stdout_name - this_stderr_name = code_info.stderr_name - this_join_files = code_info.join_files - - # overwrite the old cmdline_params and add codename and mpirun stuff - code_info.cmdline_params = this_argv - - codes_info.append(code_info) - job_tmpl.codes_info = codes_info - - # set the codes execution mode - - if len(codes) > 1: - try: - job_tmpl.codes_run_mode = calcinfo.codes_run_mode - except KeyError: - raise PluginInternalError("Need to set the order of the code execution (parallel or serial?)") - else: - job_tmpl.codes_run_mode = code_run_modes.SERIAL - ######################################################################## - - custom_sched_commands = self.get_option('custom_scheduler_commands') - if custom_sched_commands: - job_tmpl.custom_scheduler_commands = custom_sched_commands - - job_tmpl.import_sys_environment = self.get_option('import_sys_environment') - - job_tmpl.job_environment = self.get_option('environment_variables') - - queue_name = self.get_option('queue_name') - account = self.get_option('account') - qos = self.get_option('qos') - if queue_name is not None: - job_tmpl.queue_name = queue_name - if account is not None: - job_tmpl.account = account - if qos is not None: - job_tmpl.qos = qos - priority = self.get_option('priority') - if priority is not None: - job_tmpl.priority = priority - max_memory_kb = self.get_option('max_memory_kb') - if max_memory_kb is not None: - job_tmpl.max_memory_kb = max_memory_kb - max_wallclock_seconds = self.get_option('max_wallclock_seconds') - if max_wallclock_seconds is not None: - job_tmpl.max_wallclock_seconds = max_wallclock_seconds - max_memory_kb = self.get_option('max_memory_kb') - if max_memory_kb is not None: - job_tmpl.max_memory_kb = max_memory_kb - - # TODO: give possibility to use a different name?? - script_filename = '_aiidasubmit.sh' - script_content = s.get_submit_script(job_tmpl) - folder.create_file_from_filelike(StringIO(script_content), script_filename) - - subfolder = folder.get_subfolder('.aiida', create=True) - subfolder.create_file_from_filelike(StringIO(json.dumps(job_tmpl)), 'job_tmpl.json') - subfolder.create_file_from_filelike(StringIO(json.dumps(calcinfo)), 'calcinfo.json') - - if calcinfo.local_copy_list is None: - calcinfo.local_copy_list = [] - - if calcinfo.remote_copy_list is None: - calcinfo.remote_copy_list = [] - - # Some validation - this_pk = self.pk if self.pk is not None else "[UNSTORED]" - local_copy_list = calcinfo.local_copy_list - try: - validate_list_of_string_tuples(local_copy_list, tuple_length=2) - except ValidationError as exc: - raise PluginInternalError("[presubmission of calc {}] " - "local_copy_list format problem: {}".format(this_pk, exc)) - - remote_copy_list = calcinfo.remote_copy_list - try: - validate_list_of_string_tuples(remote_copy_list, tuple_length=3) - except ValidationError as exc: - raise PluginInternalError("[presubmission of calc {}] " - "remote_copy_list format problem: {}".format(this_pk, exc)) - - for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_copy_list: - try: - remote_computer = Computer.objects.get(uuid=remote_computer_uuid) - except NotExistent: - raise PluginInternalError("[presubmission of calc {}] " - "The remote copy requires a computer with UUID={}" - "but no such computer was found in the " - "database".format(this_pk, remote_computer_uuid)) - if os.path.isabs(dest_rel_path): - raise PluginInternalError("[presubmission of calc {}] " - "The destination path of the remote copy " - "is absolute! ({})".format(this_pk, dest_rel_path)) - - return calcinfo, script_filename - - @property - def res(self): - """ - To be used to get direct access to the parsed parameters. - - :return: an instance of the CalculationResultManager. - - :note: a practical example on how it is meant to be used: let's say that there is a key 'energy' - in the dictionary of the parsed results which contains a list of floats. - The command `calc.res.energy` will return such a list. - """ - return CalculationResultManager(self) - - def submit_test(self, folder=None, subfolder_name=None): - """ - Run a test submission by creating the files that would be generated for the real calculation in a local folder, - without actually storing the calculation nor the input nodes. This functionality therefore also does not - require any of the inputs nodes to be stored yet. - - :param folder: a Folder object, within which to create the calculation files. By default a folder - will be created in the current working directory - :param subfolder_name: the name of the subfolder to use within the directory of the ``folder`` object. By - default a unique string will be generated based on the current datetime with the format ``yymmdd-`` - followed by an auto incrementing index - """ - import os - import errno - from aiida.common import timezone - - from aiida.transport.plugins.local import LocalTransport - from aiida.orm.computers import Computer - from aiida.common.folders import Folder - from aiida.common.exceptions import NotExistent - - if folder is None: - folder = Folder(os.path.abspath('submit_test')) - - # In case it is not created yet - folder.create() - - if subfolder_name is None: - subfolder_basename = timezone.localtime(timezone.now()).strftime('%Y%m%d') - else: - subfolder_basename = subfolder_name - - # Find a new subfolder. - # I do not user tempfile.mkdtemp, because it puts random characters - # at the end of the directory name, therefore making difficult to - # understand the order in which directories where stored - counter = 0 - while True: - counter += 1 - subfolder_path = os.path.join(folder.abspath, "{}-{:05d}".format(subfolder_basename, counter)) - # This check just tried to avoid to try to create the folder - # (hoping that a test of existence is faster than a - # test and failure in directory creation) - # But it could be removed - if os.path.exists(subfolder_path): - continue - - try: - # Directory found, and created - os.mkdir(subfolder_path) - break - except OSError as e: - if e.errno == errno.EEXIST: - # The directory has been created in the meantime, - # retry with a new one... - continue - # Some other error: raise, so we avoid infinite loops - # e.g. if we are in a folder in which we do not have write - # permissions - raise - - subfolder = folder.get_subfolder(os.path.relpath(subfolder_path, folder.abspath), reset_limit=True) - - # I use the local transport where possible, to be as similar - # as possible to a real submission - t = LocalTransport() - with t: - t.chdir(subfolder.abspath) - - calcinfo, script_filename = self._presubmit(subfolder) - - code = self.get_code() - - if code.is_local(): - # Note: this will possibly overwrite files - for f in code.get_folder_list(): - t.put(code.get_abs_path(f), f) - t.chmod(code.get_local_executable(), 0o755) # rwxr-xr-x - - local_copy_list = calcinfo.local_copy_list - remote_copy_list = calcinfo.remote_copy_list - remote_symlink_list = calcinfo.remote_symlink_list - - for src_abs_path, dest_rel_path in local_copy_list: - t.put(src_abs_path, dest_rel_path) - - if remote_copy_list: - with io.open(os.path.join(subfolder.abspath, '_aiida_remote_copy_list.txt'), 'w', encoding='utf8') as f: - for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_copy_list: - try: - remote_computer = Computer.objects.get(uuid=remote_computer_uuid) - except NotExistent: - remote_computer = "[unknown]" - f.write(u"* I WOULD REMOTELY COPY " - "FILES/DIRS FROM COMPUTER {} (UUID {}) " - "FROM {} TO {}\n".format(remote_computer.name, remote_computer_uuid, remote_abs_path, - dest_rel_path)) - - if remote_symlink_list: - with io.open( - os.path.join(subfolder.abspath, '_aiida_remote_symlink_list.txt'), 'w', encoding='utf8') as f: - for (remote_computer_uuid, remote_abs_path, dest_rel_path) in remote_symlink_list: - try: - remote_computer = Computer.objects.get(uuid=remote_computer_uuid) - except NotExistent: - remote_computer = "[unknown]" - f.write(u"* I WOULD PUT SYMBOLIC LINKS FOR " - "FILES/DIRS FROM COMPUTER {} (UUID {}) " - "FROM {} TO {}\n".format(remote_computer.name, remote_computer_uuid, remote_abs_path, - dest_rel_path)) - - return subfolder, script_filename - - def get_scheduler_output(self): - """ - Return the output of the scheduler output (a string) if the calculation - has finished, and output node is present, and the output of the - scheduler was retrieved. - - Return None otherwise. - """ - from aiida.common.exceptions import NotExistent - - # Shortcut if no error file is set - if self._SCHED_OUTPUT_FILE is None: - return None - - retrieved_node = self.get_retrieved_node() - if retrieved_node is None: - return None - - try: - outfile_content = retrieved_node.get_file_content(self._SCHED_OUTPUT_FILE) - except (NotExistent): - # Return None if no file is found - return None - - return outfile_content - - def get_scheduler_error(self): - """ - Return the output of the scheduler error (a string) if the calculation - has finished, and output node is present, and the output of the - scheduler was retrieved. - - Return None otherwise. - """ - from aiida.common.exceptions import NotExistent - - # Shortcut if no error file is set - if self._SCHED_ERROR_FILE is None: - return None - - retrieved_node = self.get_retrieved_node() - if retrieved_node is None: - return None - - try: - errfile_content = retrieved_node.get_file_content(self._SCHED_ERROR_FILE) - except (NotExistent): - # Return None if no file is found - return None - - return errfile_content - - def get_desc(self): - """ - Returns a string with infos retrieved from a CalcJobNode node's - properties. - """ - return self.get_state() - - -def _parse_single_arg(function_name, additional_parameter, args, kwargs): - """ - Verifies that a single additional argument has been given (or no - additional argument, if additional_parameter is None). Also - verifies its name. - :param function_name: the name of the caller function, used for - the output messages - :param additional_parameter: None if no additional parameters - should be passed, or a string with the name of the parameter - if one additional parameter should be passed. - :return: None, if additional_parameter is None, or the value of - the additional parameter - :raise TypeError: on wrong number of inputs - """ - # Here all the logic to check if the parameters are correct. - if additional_parameter is not None: - if len(args) == 1: - if kwargs: - raise TypeError("{}() received too many args".format( - function_name)) - additional_parameter_data = args[0] - elif len(args) == 0: - kwargs_copy = kwargs.copy() - try: - additional_parameter_data = kwargs_copy.pop( - additional_parameter) - except KeyError: - if kwargs_copy: - raise TypeError("{}() got an unexpected keyword " - "argument '{}'".format( - function_name, kwargs_copy.keys()[0])) - else: - raise TypeError("{}() requires more " - "arguments".format(function_name)) - if kwargs_copy: - raise TypeError("{}() got an unexpected keyword " - "argument '{}'".format( - function_name, kwargs_copy.keys()[0])) - else: - raise TypeError("{}() received too many args".format( - function_name)) - return additional_parameter_data - else: - if kwargs: - raise TypeError("{}() got an unexpected keyword " - "argument '{}'".format( - function_name, kwargs.keys()[0])) - if len(args) != 0: - raise TypeError("{}() received too many args".format( - function_name)) - - return None - class CalculationResultManager(object): """ diff --git a/aiida/orm/node/process/process.py b/aiida/orm/node/process/process.py index 841263ff26..efdb72fcf4 100644 --- a/aiida/orm/node/process/process.py +++ b/aiida/orm/node/process/process.py @@ -33,8 +33,8 @@ class ProcessNode(Sealable, Node): EXCEPTION_KEY = 'exception' EXIT_MESSAGE_KEY = 'exit_message' EXIT_STATUS_KEY = 'exit_status' - PROCESS_LABEL_KEY = '_process_label' PROCESS_PAUSED_KEY = 'paused' + PROCESS_LABEL_KEY = 'process_label' PROCESS_STATE_KEY = 'process_state' PROCESS_STATUS_KEY = 'process_status' diff --git a/aiida/parsers/parser.py b/aiida/parsers/parser.py index 6f08f09eac..c756934b74 100644 --- a/aiida/parsers/parser.py +++ b/aiida/parsers/parser.py @@ -34,20 +34,7 @@ class Parser(object): def __init__(self, calc): self._logger = AIIDA_LOGGER.getChild('parser').getChild(self.__class__.__name__) - - # THIS IS A MASSIVE HACK: the `_prepare_for_submission` is only implemented for the sub class but this instance - # will be a plain `CalcJobNode`, so we have to recreate an instance of the actual sub class to call the method. - from importlib import import_module - from aiida.plugins.entry_point import is_valid_entry_point_string, load_entry_point_from_string - - if is_valid_entry_point_string(calc.process_type): - calc_class = load_entry_point_from_string(calc.process_type) - else: - module_name, class_name = calc.process_type.rsplit('.', 1) - module = import_module(module_name) - calc_class = getattr(module, class_name) - - self._calc = calc_class(dbnode=calc._dbnode) + self._calc = calc @property def logger(self): @@ -96,7 +83,7 @@ def parse_from_calc(self, retrieved_temporary_folder=None): self.logger.error('No retrieved folder found') return False, () - retrieved = {self._calc._get_linkname_retrieved(): out_folder} + retrieved = {self._calc.link_label_retrieved: out_folder} if retrieved_temporary_folder is not None: key = self.retrieved_temporary_folder_key diff --git a/aiida/parsers/plugins/arithmetic/add.py b/aiida/parsers/plugins/arithmetic/add.py index b38a825a1b..57db0a5d77 100644 --- a/aiida/parsers/plugins/arithmetic/add.py +++ b/aiida/parsers/plugins/arithmetic/add.py @@ -41,15 +41,15 @@ def parse_with_retrieved(self, retrieved): output_nodes = [] try: - output_folder = retrieved[self._calc._get_linkname_retrieved()] + output_folder = retrieved[self._calc.link_label_retrieved] except KeyError: self.logger.error("no retrieved folder found") return False, () # Verify the standard output file is present, parse the value and attach as output node try: - filepath_stdout = output_folder.get_abs_path(self._calc._OUTPUT_FILE_NAME) - except OSError as exception: + filepath_stdout = output_folder.get_abs_path(self._calc.get_attr('output_filename')) + except OSError: self.logger.error("expected output file '{}' was not found".format(filepath_stdout)) return False, () diff --git a/aiida/parsers/plugins/templatereplacer/doubler.py b/aiida/parsers/plugins/templatereplacer/doubler.py index af670c777f..b43a8b521a 100644 --- a/aiida/parsers/plugins/templatereplacer/doubler.py +++ b/aiida/parsers/plugins/templatereplacer/doubler.py @@ -42,7 +42,7 @@ def parse_with_retrieved(self, retrieved): self.logger.error("the output file name 'output_file_name' was not specified in the 'template' input node") return False, () - retrieved_folder = retrieved[self._calc._get_linkname_retrieved()] + retrieved_folder = retrieved[self._calc.link_label_retrieved] try: parsed_value = int(retrieved_folder.get_file_content(output_file).strip()) except (AttributeError, IOError, ValueError) as e: diff --git a/aiida/plugins/entry_point.py b/aiida/plugins/entry_point.py index 5e2d2bf031..aa14850e56 100644 --- a/aiida/plugins/entry_point.py +++ b/aiida/plugins/entry_point.py @@ -248,13 +248,6 @@ def get_entry_point_from_class(class_module, class_name): :param class_name: name of the class :return: a tuple of the corresponding group and entry point or None if not found """ - prefix = 'JobProcess_' - - # Curiosity of the dynamically generated JobProcess classes - if class_name.startswith(prefix): - class_path = class_name[len(prefix):] - class_module, class_name = class_path.rsplit('.', 1) - for group in ENTRYPOINT_MANAGER.get_entry_map().keys(): for entry_point in ENTRYPOINT_MANAGER.iter_entry_points(group): diff --git a/aiida/plugins/loader.py b/aiida/plugins/loader.py index d4174d08e3..9d6180f162 100644 --- a/aiida/plugins/loader.py +++ b/aiida/plugins/loader.py @@ -118,6 +118,8 @@ def get_type_string_from_class(class_module, class_name): return type_string + # orm_class = '{}.{}.'.format(class_module, class_name) + # return get_type_string_from_class_path(orm_class) def get_type_string_from_class_path(class_path): """ diff --git a/aiida/scheduler/__init__.py b/aiida/scheduler/__init__.py index 32bbdc2e89..8296d622bb 100644 --- a/aiida/scheduler/__init__.py +++ b/aiida/scheduler/__init__.py @@ -23,7 +23,7 @@ from aiida.common.escaping import escape_for_bash from aiida.common.exceptions import AiidaException, FeatureNotAvailable from aiida.plugins.factory import BaseFactory -from aiida.scheduler.datastructures import JobTemplate, JobInfo, JOB_STATES +from aiida.scheduler.datastructures import JobTemplate, JobInfo, JobState def SchedulerFactory(entry_point): @@ -214,7 +214,7 @@ def _get_run_line(self, codes_info, codes_run_mode): cmdline_params, stdin_name, stdout_name, stderr_name, join_files. See the documentation of JobTemplate and CodeInfo :parameter codes_run_mode: contains the information on how to launch the - multiple codes. As described in aiida.common.datastructures.code_run_modes + multiple codes. As described in aiida.common.datastructures.CodeRunMode argv: an array with the executable and the command line arguments. @@ -234,7 +234,7 @@ def _get_run_line(self, codes_info, codes_run_mode): Return a string with the following format: [executable] [args] {[ < stdin ]} {[ < stdout ]} {[2>&1 | 2> stderr]} """ - from aiida.common.datastructures import code_run_modes + from aiida.common.datastructures import CodeRunMode list_of_runlines = [] @@ -258,10 +258,10 @@ def _get_run_line(self, codes_info, codes_run_mode): list_of_runlines.append(output_string) self.logger.debug('_get_run_line output: {}'.format(list_of_runlines)) - if codes_run_mode == code_run_modes.PARALLEL: + if codes_run_mode == CodeRunMode.PARALLEL: list_of_runlines.append('wait\n') return " &\n\n".join(list_of_runlines) - elif codes_run_mode == code_run_modes.SERIAL: + elif codes_run_mode == CodeRunMode.SERIAL: return "\n\n".join(list_of_runlines) else: raise NotImplementedError('Unrecognized code run mode') diff --git a/aiida/scheduler/datastructures.py b/aiida/scheduler/datastructures.py index bdcbd12f25..4cf1ae80a5 100644 --- a/aiida/scheduler/datastructures.py +++ b/aiida/scheduler/datastructures.py @@ -18,33 +18,27 @@ from __future__ import print_function from __future__ import division from __future__ import absolute_import -from aiida.common.extendeddicts import (DefaultFieldsAttributeDict, Enumerate) + +from enum import Enum from aiida.common import AIIDA_LOGGER +from aiida.common.extendeddicts import DefaultFieldsAttributeDict SCHEDULER_LOGGER = AIIDA_LOGGER.getChild('scheduler') -class JobState(Enumerate): - pass +class JobState(Enum): + """Enumeration of possible scheduler states of a CalcJob. + There is no FAILED state as every completed job is put in DONE, regardless of success. + """ -# This is the list of possible job states -# Note on names: Jobs are the entities on a -# scheduler; Calcs are the calculations in -# the AiiDA database (whose list of possible -# statuses is defined in aida.common.datastructures -# with the calc_states Enumerate). -# NOTE: for the moment, I don't define FAILED -# (I put everything in DONE) -JOB_STATES = JobState(( - 'UNDETERMINED', - 'QUEUED', - 'QUEUED_HELD', - 'RUNNING', - 'SUSPENDED', - 'DONE', -)) + UNDETERMINED = 'undetermined' + QUEUED = 'queued' + QUEUED_HELD = 'queued held' + RUNNING = 'running' + SUSPENDED = 'suspended' + DONE = 'done' class JobResource(DefaultFieldsAttributeDict): @@ -378,7 +372,7 @@ class JobTemplate(DefaultFieldsAttributeDict): wait The serial execution would be without the &'s. - Values are given by aiida.common.datastructures.code_run_modes. + Values are given by aiida.common.datastructures.CodeRunMode. """ # #TODO: validation key? also call the validate function in the proper @@ -401,8 +395,6 @@ class JobTemplate(DefaultFieldsAttributeDict): 'account', 'qos', 'job_resource', - # 'num_machines', - # 'num_mpiprocs_per_machine', 'priority', 'max_memory_kb', 'max_wallclock_seconds', @@ -410,11 +402,6 @@ class JobTemplate(DefaultFieldsAttributeDict): 'prepend_text', 'append_text', 'import_sys_environment', - # 'stderr_name', # this 5 5keys have been moved to codes_info - # 'join_files', - # 'argv', - # 'stdin_name', - # 'stdout_name', 'codes_run_mode', 'codes_info', ) @@ -459,7 +446,7 @@ class JobInfo(DefaultFieldsAttributeDict): * ``annotation``: human-readable description of the reason for the job being in the current state or substate. * ``job_state``: the job state (one of those defined in - ``aiida.scheduler.datastructures.JOB_STATES``) + ``aiida.scheduler.datastructures.JobState``) * ``job_substate``: a string with the implementation-specific sub-state * ``allocated_machines``: a list of machines used for the current job. This is a list of :py:class:`MachineInfo` objects. @@ -501,8 +488,22 @@ class JobInfo(DefaultFieldsAttributeDict): 'submission_time': 'date', 'dispatch_time': 'date', 'finish_time': 'date', + 'job_state': 'job_state', } + @staticmethod + def _serialize_job_state(job_state): + """Return the serialized value of the JobState instance.""" + if not isinstance(job_state, JobState): + raise TypeError('invalid type for value {}, should be an instance of `JobState`'.format(job_state)) + + return job_state.value + + @staticmethod + def _deserialize_job_state(job_state): + """Return an instance of JobState from the job_state string.""" + return JobState(job_state) + @staticmethod def _serialize_date(value): """ diff --git a/aiida/scheduler/plugins/direct.py b/aiida/scheduler/plugins/direct.py index 426b52d4d1..14e7eb08ce 100644 --- a/aiida/scheduler/plugins/direct.py +++ b/aiida/scheduler/plugins/direct.py @@ -19,7 +19,7 @@ import aiida.scheduler from aiida.common.escaping import escape_for_bash from aiida.scheduler import SchedulerError -from aiida.scheduler.datastructures import (JobInfo, JOB_STATES, NodeNumberJobResource) +from aiida.scheduler.datastructures import (JobInfo, JobState, NodeNumberJobResource) ## From the ps man page on Mac OS X 10.12 # state The state is given by a sequence of characters, for example, @@ -51,20 +51,20 @@ # its parent _MAP_STATUS_PS = { - 'D': JOB_STATES.RUNNING, - 'I': JOB_STATES.RUNNING, - 'R': JOB_STATES.RUNNING, - 'S': JOB_STATES.RUNNING, - 'T': JOB_STATES.SUSPENDED, - 'U': JOB_STATES.RUNNING, - 'W': JOB_STATES.RUNNING, - 'X': JOB_STATES.DONE, - 'Z': JOB_STATES.DONE, + 'D': JobState.RUNNING, + 'I': JobState.RUNNING, + 'R': JobState.RUNNING, + 'S': JobState.RUNNING, + 'T': JobState.SUSPENDED, + 'U': JobState.RUNNING, + 'W': JobState.RUNNING, + 'X': JobState.DONE, + 'Z': JobState.DONE, # Not sure about these three, I comment them out (they used to be in # here, but they don't appear neither on ubuntu nor on Mac) - # 'F': JOB_STATES.DONE, - # 'H': JOB_STATES.QUEUED_HELD, - # 'Q': JOB_STATES.QUEUED, + # 'F': JobState.DONE, + # 'H': JobState.QUEUED_HELD, + # 'Q': JobState.QUEUED, } @@ -250,7 +250,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): job_state_string = job[1][0] # I just check the first character except IndexError: self.logger.debug("No 'job_state' field for job id {}".format(this_job.job_id)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED else: try: this_job.job_state = \ @@ -258,7 +258,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): except KeyError: self.logger.warning("Unrecognized job_state '{}' for job " "id {}".format(job_state_string, this_job.job_id)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED try: # I strip the part after the @: is this always ok? @@ -298,7 +298,7 @@ def getJobs(self, jobs=None, user=None, as_dict=False): for job_id in not_found_jobs: job = JobInfo() job.job_id = job_id - job.job_state = JOB_STATES.DONE + job.job_state = JobState.DONE # Owner and wallclock time is unknown if as_dict: job_stats[job_id] = job diff --git a/aiida/scheduler/plugins/lsf.py b/aiida/scheduler/plugins/lsf.py index 35d107f57d..009a2a0e64 100644 --- a/aiida/scheduler/plugins/lsf.py +++ b/aiida/scheduler/plugins/lsf.py @@ -20,7 +20,7 @@ import aiida.scheduler from aiida.common.escaping import escape_for_bash from aiida.scheduler import SchedulerError, SchedulerParsingError -from aiida.scheduler.datastructures import (JobInfo, JOB_STATES, JobResource) +from aiida.scheduler.datastructures import (JobInfo, JobState, JobResource) # This maps LSF status codes to our own state list # @@ -73,17 +73,17 @@ # as local ZOMBI jobs. In addition, it notifies the submission cluster # that the job is in ZOMBI state and the submission cluster requeues the job. _MAP_STATUS_LSF = { - 'PEND': JOB_STATES.QUEUED, - 'PROV': JOB_STATES.QUEUED, - 'PSUSP': JOB_STATES.QUEUED_HELD, - 'USUSP': JOB_STATES.SUSPENDED, - 'SSUSP': JOB_STATES.SUSPENDED, - 'RUN': JOB_STATES.RUNNING, - 'DONE': JOB_STATES.DONE, - 'EXIT': JOB_STATES.DONE, - 'UNKWN': JOB_STATES.UNDETERMINED, - 'WAIT': JOB_STATES.QUEUED, - 'ZOMBI': JOB_STATES.UNDETERMINED, + 'PEND': JobState.QUEUED, + 'PROV': JobState.QUEUED, + 'PSUSP': JobState.QUEUED_HELD, + 'USUSP': JobState.SUSPENDED, + 'SSUSP': JobState.SUSPENDED, + 'RUN': JobState.RUNNING, + 'DONE': JobState.DONE, + 'EXIT': JobState.DONE, + 'UNKWN': JobState.UNDETERMINED, + 'WAIT': JobState.QUEUED, + 'ZOMBI': JobState.UNDETERMINED, } # Separator between fields in the output of bjobs @@ -540,7 +540,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): except KeyError: self.logger.warning("Unrecognized job_state '{}' for job " "id {}".format(job_state_raw, this_job.job_id)) - job_state_string = JOB_STATES.UNDETERMINED + job_state_string = JobState.UNDETERMINED this_job.job_state = job_state_string @@ -574,7 +574,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): # therefore it requires some parsing, that is unnecessary now. # I just store is as a raw string for the moment, and I leave # this_job.allocated_machines undefined - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: this_job.allocated_machines_raw = allocated_machines this_job.queue_name = partition @@ -586,7 +586,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): # Now get the time in seconds which has been used # Only if it is RUNNING; otherwise it is not meaningful, # and may be not set (in my test, it is set to zero) - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: try: requested_walltime = psd_finish_time - psd_start_time # fix of a weird bug. Since the year is not parsed, it is assumed diff --git a/aiida/scheduler/plugins/pbsbaseclasses.py b/aiida/scheduler/plugins/pbsbaseclasses.py index 9c79591cfa..becacdc732 100644 --- a/aiida/scheduler/plugins/pbsbaseclasses.py +++ b/aiida/scheduler/plugins/pbsbaseclasses.py @@ -20,7 +20,7 @@ from aiida.common.escaping import escape_for_bash from aiida.scheduler import Scheduler, SchedulerError, SchedulerParsingError -from aiida.scheduler.datastructures import (JobInfo, JOB_STATES, MachineInfo, NodeNumberJobResource) +from aiida.scheduler.datastructures import (JobInfo, JobState, MachineInfo, NodeNumberJobResource) _LOGGER = logging.getLogger(__name__) @@ -53,20 +53,20 @@ # S - (Unicos only) job is suspend. [as above] _MAP_STATUS_PBS_COMMON = { - 'B': JOB_STATES.RUNNING, - 'E': JOB_STATES.RUNNING, # If exiting, for our purposes it is still running - 'F': JOB_STATES.DONE, - 'H': JOB_STATES.QUEUED_HELD, - 'M': JOB_STATES.UNDETERMINED, # TODO: check if this is ok? - 'Q': JOB_STATES.QUEUED, - 'R': JOB_STATES.RUNNING, - 'S': JOB_STATES.SUSPENDED, - 'T': JOB_STATES.QUEUED, # We assume that from the AiiDA point of view + 'B': JobState.RUNNING, + 'E': JobState.RUNNING, # If exiting, for our purposes it is still running + 'F': JobState.DONE, + 'H': JobState.QUEUED_HELD, + 'M': JobState.UNDETERMINED, # TODO: check if this is ok? + 'Q': JobState.QUEUED, + 'R': JobState.RUNNING, + 'S': JobState.SUSPENDED, + 'T': JobState.QUEUED, # We assume that from the AiiDA point of view # it is still queued - 'U': JOB_STATES.SUSPENDED, - 'W': JOB_STATES.QUEUED, - 'X': JOB_STATES.DONE, - 'C': JOB_STATES.DONE, # This is the completed state of PBS/Torque + 'U': JobState.SUSPENDED, + 'W': JobState.QUEUED, + 'X': JobState.DONE, + 'C': JobState.DONE, # This is the completed state of PBS/Torque } @@ -490,10 +490,10 @@ def _parse_joblist_output(self, retval, stdout, stderr): except KeyError: _LOGGER.warning("Unrecognized job_state '{}' for job " "id {}".format(job_state_string, this_job.job_id)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED except KeyError: _LOGGER.debug("No 'job_state' field for job id {}".format(this_job.job_id)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED try: this_job.job_substate = raw_data['substate'] diff --git a/aiida/scheduler/plugins/sge.py b/aiida/scheduler/plugins/sge.py index f6e062736e..7fe2aa3981 100644 --- a/aiida/scheduler/plugins/sge.py +++ b/aiida/scheduler/plugins/sge.py @@ -23,7 +23,7 @@ from aiida.common.escaping import escape_for_bash import aiida.scheduler from aiida.scheduler import SchedulerError, SchedulerParsingError -from aiida.scheduler.datastructures import (JobInfo, JOB_STATES, ParEnvJobResource) +from aiida.scheduler.datastructures import (JobInfo, JobState, ParEnvJobResource) # 'http://www.loni.ucla.edu/twiki/bin/view/Infrastructure/GridComputing?skin=plain': # Jobs Status: @@ -63,28 +63,28 @@ # ds, dS, dT, dRs, # dRS, dRT _MAP_STATUS_SGE = { - 'qw': JOB_STATES.QUEUED, - 'w': JOB_STATES.QUEUED, - 'hqw': JOB_STATES.QUEUED_HELD, - 'hRwq': JOB_STATES.QUEUED_HELD, - 'r': JOB_STATES.RUNNING, - 't': JOB_STATES.RUNNING, - 'R': JOB_STATES.RUNNING, - 'Rr': JOB_STATES.RUNNING, - 'Rt': JOB_STATES.RUNNING, - 's': JOB_STATES.SUSPENDED, - 'st': JOB_STATES.SUSPENDED, - 'Rs': JOB_STATES.SUSPENDED, - 'Rts': JOB_STATES.SUSPENDED, - 'dr': JOB_STATES.UNDETERMINED, - 'dt': JOB_STATES.UNDETERMINED, - 'ds': JOB_STATES.UNDETERMINED, - 'dRr': JOB_STATES.UNDETERMINED, - 'dRt': JOB_STATES.UNDETERMINED, - 'dRs': JOB_STATES.UNDETERMINED, - 'Eqw': JOB_STATES.UNDETERMINED, - 'Ehqw': JOB_STATES.UNDETERMINED, - 'EhRqw': JOB_STATES.UNDETERMINED + 'qw': JobState.QUEUED, + 'w': JobState.QUEUED, + 'hqw': JobState.QUEUED_HELD, + 'hRwq': JobState.QUEUED_HELD, + 'r': JobState.RUNNING, + 't': JobState.RUNNING, + 'R': JobState.RUNNING, + 'Rr': JobState.RUNNING, + 'Rt': JobState.RUNNING, + 's': JobState.SUSPENDED, + 'st': JobState.SUSPENDED, + 'Rs': JobState.SUSPENDED, + 'Rts': JobState.SUSPENDED, + 'dr': JobState.UNDETERMINED, + 'dt': JobState.UNDETERMINED, + 'ds': JobState.UNDETERMINED, + 'dRr': JobState.UNDETERMINED, + 'dRt': JobState.UNDETERMINED, + 'dRs': JobState.UNDETERMINED, + 'Eqw': JobState.UNDETERMINED, + 'Ehqw': JobState.UNDETERMINED, + 'EhRqw': JobState.UNDETERMINED } @@ -383,10 +383,10 @@ def _parse_joblist_output(self, retval, stdout, stderr): except KeyError: self.logger.warning("Unrecognized job_state '{}' for job " "id {}".format(job_state_string, this_job.job_id)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED except IndexError: self.logger.warning("No 'job_state' field for job id {} in" "stdout={}".format(this_job.job_id, stdout)) - this_job.job_state = JOB_STATES.UNDETERMINED + this_job.job_state = JobState.UNDETERMINED try: job_element = job.getElementsByTagName('JB_owner').pop(0) @@ -407,7 +407,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): element_child = job_element.childNodes.pop(0) this_job.queue_name = str(element_child.data).strip() except IndexError: - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: self.logger.warning("No 'queue_name' field for job id {}".format(this_job.job_id)) try: @@ -435,7 +435,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): "id {}".format(this_job.job_id)) # There is also cpu_usage, mem_usage, io_usage information available: - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: try: job_element = job.getElementsByTagName('slots').pop(0) element_child = job_element.childNodes.pop(0) diff --git a/aiida/scheduler/plugins/slurm.py b/aiida/scheduler/plugins/slurm.py index ee820f26d5..e18ed574b7 100644 --- a/aiida/scheduler/plugins/slurm.py +++ b/aiida/scheduler/plugins/slurm.py @@ -22,7 +22,7 @@ import aiida.scheduler from aiida.common.escaping import escape_for_bash from aiida.scheduler import SchedulerError -from aiida.scheduler.datastructures import (JobInfo, JOB_STATES, NodeNumberJobResource) +from aiida.scheduler.datastructures import (JobInfo, JobState, NodeNumberJobResource) # This maps SLURM state codes to our own status list @@ -47,17 +47,17 @@ ## TO TIMEOUT Job terminated upon reaching its time limit. _MAP_STATUS_SLURM = { - 'CA': JOB_STATES.DONE, - 'CD': JOB_STATES.DONE, - 'CF': JOB_STATES.QUEUED, - 'CG': JOB_STATES.RUNNING, - 'F': JOB_STATES.DONE, - 'NF': JOB_STATES.DONE, - 'PD': JOB_STATES.QUEUED, - 'PR': JOB_STATES.DONE, - 'R': JOB_STATES.RUNNING, - 'S': JOB_STATES.SUSPENDED, - 'TO': JOB_STATES.DONE, + 'CA': JobState.DONE, + 'CD': JobState.DONE, + 'CF': JobState.QUEUED, + 'CG': JobState.RUNNING, + 'F': JobState.DONE, + 'NF': JobState.DONE, + 'PD': JobState.QUEUED, + 'PR': JobState.DONE, + 'R': JobState.RUNNING, + 'S': JobState.SUSPENDED, + 'TO': JobState.DONE, } # From the manual, @@ -519,7 +519,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): except KeyError: self.logger.warning("Unrecognized job_state '{}' for job " "id {}".format(job_state_raw, this_job.job_id)) - job_state_string = JOB_STATES.UNDETERMINED + job_state_string = JobState.UNDETERMINED # QUEUED_HELD states are not specific states in SLURM; # they are instead set with state QUEUED, and then the # annotation tells if the job is held. @@ -535,9 +535,9 @@ def _parse_joblist_output(self, retval, stdout, stderr): # There are actually a few others, like possible # failures, or partition-related reasons, but for the moment I # leave them in the QUEUED state. - if (job_state_string == JOB_STATES.QUEUED and + if (job_state_string == JobState.QUEUED and this_job.annotation in ['Dependency', 'JobHeldUser', 'JobHeldAdmin', 'BeginTime']): - job_state_string = JOB_STATES.QUEUED_HELD + job_state_string = JobState.QUEUED_HELD this_job.job_state = job_state_string @@ -580,7 +580,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): # therefore it requires some parsing, that is unnecessary now. # I just store is as a raw string for the moment, and I leave # this_job.allocated_machines undefined - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: this_job.allocated_machines_raw = thisjob_dict['allocated_machines'] this_job.queue_name = thisjob_dict['partition'] @@ -592,7 +592,7 @@ def _parse_joblist_output(self, retval, stdout, stderr): # Only if it is RUNNING; otherwise it is not meaningful, # and may be not set (in my test, it is set to zero) - if this_job.job_state == JOB_STATES.RUNNING: + if this_job.job_state == JobState.RUNNING: try: this_job.wallclock_time_seconds = (self._convert_time(thisjob_dict['time_used'])) except ValueError: diff --git a/aiida/scheduler/plugins/test_lsf.py b/aiida/scheduler/plugins/test_lsf.py index 26b80e81d2..3e127b3269 100644 --- a/aiida/scheduler/plugins/test_lsf.py +++ b/aiida/scheduler/plugins/test_lsf.py @@ -66,37 +66,37 @@ def test_parse_common_joblist_output(self): job_queued = 2 job_queue_name = ['8nm', 'test'] - job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JOB_STATES.QUEUED]) - job_queue_name_parsed = [j.queue_name for j in job_list if j.job_state and j.job_state == JOB_STATES.QUEUED] + job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED]) + job_queue_name_parsed = [j.queue_name for j in job_list if j.job_state and j.job_state == JobState.QUEUED] self.assertEquals(job_queued, job_queued_parsed) self.assertEquals(job_queue_name, job_queue_name_parsed) job_done = 2 job_done_title = ['aiida-1033269', 'test'] job_done_annotation = ['TERM_RUNLIMIT: job killed after reaching LSF run time limit', '-'] - job_done_parsed = len([j for j in job_list if j.job_state and j.job_state == JOB_STATES.DONE]) - job_done_title_parsed = [j.title for j in job_list if j.job_state and j.job_state == JOB_STATES.DONE] - job_done_annotation_parsed = [j.annotation for j in job_list if j.job_state and j.job_state == JOB_STATES.DONE] + job_done_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.DONE]) + job_done_title_parsed = [j.title for j in job_list if j.job_state and j.job_state == JobState.DONE] + job_done_annotation_parsed = [j.annotation for j in job_list if j.job_state and j.job_state == JobState.DONE] self.assertEquals(job_done, job_done_parsed) self.assertEquals(job_done_title, job_done_title_parsed) self.assertEquals(job_done_annotation, job_done_annotation_parsed) job_running = 3 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) running_users = ['inewton', 'inewton', 'dbowie'] - parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING] + parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEquals(running_users, parsed_running_users) running_jobs = ['764254593', '764255172', '764245175'] num_machines = [1, 1, 1] allocated_machines = ['lxbsu2710', 'b68ac74822', 'b68ac74822'] - parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING] - parsed_num_machines = [j.num_machines for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING] + parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JobState.RUNNING] + parsed_num_machines = [j.num_machines for j in job_list if j.job_state and j.job_state == JobState.RUNNING] parsed_allocated_machines = [ - j.allocated_machines_raw for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING + j.allocated_machines_raw for j in job_list if j.job_state and j.job_state == JobState.RUNNING ] self.assertEquals(running_jobs, parsed_running_jobs) self.assertEquals(num_machines, parsed_num_machines) @@ -120,7 +120,7 @@ def test_submit_script(self): Test the creation of a simple submission script. """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = LsfScheduler() @@ -133,7 +133,7 @@ def test_submit_script(self): code_info.cmdline_params = ["mpirun", "-np", "2", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) diff --git a/aiida/scheduler/plugins/test_pbspro.py b/aiida/scheduler/plugins/test_pbspro.py index 0b25311f0e..7cf163863b 100644 --- a/aiida/scheduler/plugins/test_pbspro.py +++ b/aiida/scheduler/plugins/test_pbspro.py @@ -14,7 +14,7 @@ import unittest import uuid from aiida.scheduler.plugins.pbspro import * -from aiida.scheduler.datastructures import JOB_STATES +from aiida.scheduler.datastructures import JobState text_qstat_f_to_test = """Job Id: 68350.mycluster Job_Name = cell-Qnormal @@ -780,27 +780,27 @@ def test_parse_common_joblist_output(self): job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 2 job_held_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED_HELD]) + and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 2 job_queued_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED]) + and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) running_users = ['user02', 'user3'] parsed_running_users = [j.job_owner for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_users), set(parsed_running_users)) running_jobs = ['69301.mycluster', '74164.mycluster'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) for j in job_list: @@ -835,27 +835,27 @@ def test_parse_with_unexpected_newlines(self): job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 1 job_held_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED_HELD]) + and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 5 job_queued_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED]) + and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) running_users = ['somebody', 'user_556491'] parsed_running_users = [j.job_owner for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_users), set(parsed_running_users)) running_jobs = ['555716', '556491'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) for j in job_list: @@ -907,7 +907,7 @@ def test_submit_script(self): Test to verify if scripts works fine with default options """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() @@ -920,7 +920,7 @@ def test_submit_script(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -936,7 +936,7 @@ def test_submit_script_bad_shebang(self): Test to verify if scripts works fine with default options """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() code_info = CodeInfo() @@ -951,7 +951,7 @@ def test_submit_script_bad_shebang(self): job_tmpl.shebang = shebang job_tmpl.job_resource = scheduler.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -964,7 +964,7 @@ def test_submit_script_with_num_cores_per_machine(self): num_cores_per_machine value. """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() @@ -978,7 +978,7 @@ def test_submit_script_with_num_cores_per_machine(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -996,7 +996,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): num_cores_per_mpiproc value """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() @@ -1010,7 +1010,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -1030,7 +1030,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): res.num_cores_per_mpiproc * res.num_mpiprocs_per_machine = res.num_cores_per_machine """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = PbsproScheduler() @@ -1044,7 +1044,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) diff --git a/aiida/scheduler/plugins/test_sge.py b/aiida/scheduler/plugins/test_sge.py index 52001b514f..8e0acb1d67 100644 --- a/aiida/scheduler/plugins/test_sge.py +++ b/aiida/scheduler/plugins/test_sge.py @@ -244,23 +244,23 @@ def test_parse_joblist_output(self): # Check if different job states are realized: job_running = 1 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 1 job_held_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED_HELD]) + and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 1 job_queued_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED]) + and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) # check if job id is recognized: running_jobs = ['1212299'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) dispatch_time = [self._parse_time_string('2013-06-18T12:08:23')] @@ -276,7 +276,7 @@ def test_parse_joblist_output(self): running_jobs = [test_raw_data] parsed_running_jobs = [j.raw_data for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) # job_list_raise=sge._parse_joblist_output(retval, \ diff --git a/aiida/scheduler/plugins/test_slurm.py b/aiida/scheduler/plugins/test_slurm.py index 949a5b9a8a..d9561c8890 100644 --- a/aiida/scheduler/plugins/test_slurm.py +++ b/aiida/scheduler/plugins/test_slurm.py @@ -53,23 +53,23 @@ def test_parse_common_joblist_output(self): job_running = 3 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 2 - job_held_parsed = len([j for j in job_list if j.job_state and j.job_state == JOB_STATES.QUEUED_HELD]) + job_held_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 2 - job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JOB_STATES.QUEUED]) + job_queued_parsed = len([j for j in job_list if j.job_state and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) running_users = ['user5', 'user6'] - parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING] + parsed_running_users = [j.job_owner for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEquals(set(running_users), set(parsed_running_users)) running_jobs = ['862538', '861352', '863553'] - parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JOB_STATES.RUNNING] + parsed_running_jobs = [j.job_id for j in job_list if j.job_state and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) self.assertEquals([j.requested_wallclock_time_seconds for j in job_list if j.job_id == '863553'][0], 30 * 60) @@ -152,7 +152,7 @@ def test_submit_script(self): Test the creation of a simple submission script. """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() @@ -165,7 +165,7 @@ def test_submit_script(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -180,7 +180,7 @@ def test_submit_script(self): def test_submit_script_bad_shebang(self): from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() code_info = CodeInfo() @@ -195,7 +195,7 @@ def test_submit_script_bad_shebang(self): job_tmpl.shebang = shebang job_tmpl.job_resource = scheduler.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -208,7 +208,7 @@ def test_submit_script_with_num_cores_per_machine(self): num_cores_per_machine value. """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() @@ -222,7 +222,7 @@ def test_submit_script_with_num_cores_per_machine(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -240,7 +240,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): Test to verify if scripts works fine if we pass only num_cores_per_mpiproc value """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() @@ -254,7 +254,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -275,7 +275,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): res.num_cores_per_mpiproc * res.num_mpiprocs_per_machine = res.num_cores_per_machine """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = SlurmScheduler() @@ -289,7 +289,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) diff --git a/aiida/scheduler/plugins/test_torque.py b/aiida/scheduler/plugins/test_torque.py index 20a6aedd87..711099a7ca 100644 --- a/aiida/scheduler/plugins/test_torque.py +++ b/aiida/scheduler/plugins/test_torque.py @@ -13,7 +13,7 @@ from __future__ import absolute_import import unittest import uuid -from aiida.scheduler.datastructures import JOB_STATES +from aiida.scheduler.datastructures import JobState from aiida.scheduler.plugins.torque import * text_qstat_f_to_test = """Job Id: 68350.mycluster @@ -780,27 +780,27 @@ def test_parse_common_joblist_output(self): job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 2 job_held_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED_HELD]) + and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 2 job_queued_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED]) + and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) running_users = ['user02', 'user3'] parsed_running_users = [j.job_owner for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_users), set(parsed_running_users)) running_jobs = ['69301.mycluster', '74164.mycluster'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) for j in job_list: @@ -835,27 +835,27 @@ def test_parse_with_unexpected_newlines(self): job_running = 2 job_running_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING]) + and j.job_state == JobState.RUNNING]) self.assertEquals(job_running, job_running_parsed) job_held = 1 job_held_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED_HELD]) + and j.job_state == JobState.QUEUED_HELD]) self.assertEquals(job_held, job_held_parsed) job_queued = 5 job_queued_parsed = len([j for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.QUEUED]) + and j.job_state == JobState.QUEUED]) self.assertEquals(job_queued, job_queued_parsed) running_users = ['somebody', 'user_556491'] parsed_running_users = [j.job_owner for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_users), set(parsed_running_users)) running_jobs = ['555716', '556491'] parsed_running_jobs = [j.job_id for j in job_list if j.job_state \ - and j.job_state == JOB_STATES.RUNNING] + and j.job_state == JobState.RUNNING] self.assertEquals(set(running_jobs), set(parsed_running_jobs)) for j in job_list: @@ -878,7 +878,7 @@ def test_submit_script(self): Test to verify if scripts works fine with default options """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode s = TorqueScheduler() @@ -891,7 +891,7 @@ def test_submit_script(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = s.get_submit_script(job_tmpl) @@ -907,7 +907,7 @@ def test_submit_script_with_num_cores_per_machine(self): num_cores_per_machine value. """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode s = TorqueScheduler() @@ -921,7 +921,7 @@ def test_submit_script_with_num_cores_per_machine(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = s.get_submit_script(job_tmpl) @@ -936,7 +936,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): num_cores_per_mpiproc value """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = TorqueScheduler() @@ -950,7 +950,7 @@ def test_submit_script_with_num_cores_per_mpiproc(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) @@ -967,7 +967,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): res.num_cores_per_mpiproc * res.num_mpiprocs_per_machine = res.num_cores_per_machine """ from aiida.scheduler.datastructures import JobTemplate - from aiida.common.datastructures import CodeInfo, code_run_modes + from aiida.common.datastructures import CodeInfo, CodeRunMode scheduler = TorqueScheduler() @@ -981,7 +981,7 @@ def test_submit_script_with_num_cores_per_machine_and_mpiproc1(self): code_info.cmdline_params = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] code_info.stdin_name = 'aiida.in' job_tmpl.codes_info = [code_info] - job_tmpl.codes_run_mode = code_run_modes.SERIAL + job_tmpl.codes_run_mode = CodeRunMode.SERIAL submit_script_text = scheduler.get_submit_script(job_tmpl) diff --git a/aiida/sphinxext/tests/reference_results/workchain.py2.xml b/aiida/sphinxext/tests/reference_results/workchain.py2.xml index 862f9809c1..d4e050173e 100644 --- a/aiida/sphinxext/tests/reference_results/workchain.py2.xml +++ b/aiida/sphinxext/tests/reference_results/workchain.py2.xml @@ -1,7 +1,7 @@ - +
sphinx-aiida demo This is a demo documentation to show off the features of the sphinx-aiida extension. @@ -13,7 +13,7 @@ A demo workchain to show how the workchain auto-documentation works. - Inputs:description, basestring, optional, non_dblabel, basestring, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacestore_provenance, bool, optional, non_dbx, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. + Inputs:metadata, Namespacedescription, basestring, optional, non_dblabel, basestring, optional, non_dboptions, Namespacestore_provenance, bool, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacex, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. Outputs:z, Bool, required – Output of the demoworkchain. @@ -38,7 +38,7 @@ A demo workchain to show how the workchain auto-documentation works. - Inputs:description, basestring, optional, non_dblabel, basestring, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacestore_provenance, bool, optional, non_dbx, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. + Inputs:metadata, Namespacedescription, basestring, optional, non_dblabel, basestring, optional, non_dboptions, Namespacestore_provenance, bool, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacex, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. Outputs:z, Bool, required – Output of the demoworkchain. diff --git a/aiida/sphinxext/tests/reference_results/workchain.py3.xml b/aiida/sphinxext/tests/reference_results/workchain.py3.xml index a8e2a69311..676611a4c8 100644 --- a/aiida/sphinxext/tests/reference_results/workchain.py3.xml +++ b/aiida/sphinxext/tests/reference_results/workchain.py3.xml @@ -1,7 +1,7 @@ - +
sphinx-aiida demo This is a demo documentation to show off the features of the sphinx-aiida extension. @@ -13,7 +13,7 @@ A demo workchain to show how the workchain auto-documentation works. - Inputs:description, str, optional, non_dblabel, str, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacestore_provenance, bool, optional, non_dbx, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. + Inputs:metadata, Namespacedescription, str, optional, non_dblabel, str, optional, non_dboptions, Namespacestore_provenance, bool, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacex, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. Outputs:z, Bool, required – Output of the demoworkchain. @@ -38,7 +38,7 @@ A demo workchain to show how the workchain auto-documentation works. - Inputs:description, str, optional, non_dblabel, str, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacestore_provenance, bool, optional, non_dbx, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. + Inputs:metadata, Namespacedescription, str, optional, non_dblabel, str, optional, non_dboptions, Namespacestore_provenance, bool, optional, non_dbnsp, Namespace – A separate namespace, nsp.nsp2, Namespacex, Float, required – First input argument.y, Namespacez, Int, required – Input in a separate namespace. Outputs:z, Bool, required – Output of the demoworkchain. diff --git a/aiida/tools/dbexporters/tcod.py b/aiida/tools/dbexporters/tcod.py index b2039b9415..3b247bd1f3 100644 --- a/aiida/tools/dbexporters/tcod.py +++ b/aiida/tools/dbexporters/tcod.py @@ -535,8 +535,8 @@ def _collect_calculation_data(calc): this_calc['files'].append(f) for f in files_out: - if os.path.basename(f['name']) != calc._SCHED_OUTPUT_FILE and \ - os.path.basename(f['name']) != calc._SCHED_ERROR_FILE: + if os.path.basename(f['name']) != calc.get_option('scheduler_stdout') and \ + os.path.basename(f['name']) != calc.get_option('scheduler_stderr'): if 'role' not in f.keys(): f['role'] = 'output' this_calc['files'].append(f) @@ -1052,13 +1052,13 @@ def export_cifnode(what, parameters=None, trajectory_index=None, if reduce_symmetry: from aiida.orm.data.cif import refine_inline - ret_dict = refine_inline(node=node, store_provenance=store) + ret_dict = refine_inline(node=node, metadata={'store_provenance': store}) node = ret_dict['cif'] # Addition of the metadata args = ParameterData(dict=kwargs) - function_args = {'what': what, 'args': args, 'store_provenance': store} + function_args = {'what': what, 'args': args, 'metadata': {'store_provenance': store}} if node != what: function_args['node'] = node if parameters is not None: diff --git a/aiida/tools/dbimporters/baseclasses.py b/aiida/tools/dbimporters/baseclasses.py index f64a0afae7..1deff726f4 100644 --- a/aiida/tools/dbimporters/baseclasses.py +++ b/aiida/tools/dbimporters/baseclasses.py @@ -15,8 +15,6 @@ import six -from aiida.orm.calculation.inline import optional_inline - class DbImporter(object): """ diff --git a/aiida/work/__init__.py b/aiida/work/__init__.py index 87b1d5a340..a2437999f1 100644 --- a/aiida/work/__init__.py +++ b/aiida/work/__init__.py @@ -14,7 +14,6 @@ from .exceptions import * from .exit_code import * from .futures import * -from .job_processes import * from .launch import * from .persistence import * from .processes import * @@ -28,7 +27,6 @@ __all__ = (exceptions.__all__ + exit_code.__all__ + futures.__all__ + - job_processes.__all__ + launch.__all__ + persistence.__all__ + processes.__all__ + diff --git a/aiida/work/calcjob.py b/aiida/work/calcjob.py new file mode 100644 index 0000000000..813a2536fa --- /dev/null +++ b/aiida/work/calcjob.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- +"""Implementation of the CalcJob process.""" +from __future__ import absolute_import +import six + +import plumpy + +from aiida import orm +from aiida.common import exceptions +from aiida.common.lang import override +from aiida.common.links import LinkType +from aiida.orm.data.folder import FolderData +from aiida.orm.data.remote import RemoteData +from aiida.orm.data.singlefile import SinglefileData +from aiida.orm.node.process import CalcJobNode + +from .job_processes import Waiting, UPLOAD_COMMAND +from .processes import Process, ProcessState + + +class CalcJob(Process): + """Implementation of the CalcJob process.""" + + _node_class = CalcJobNode + + @classmethod + def define(cls, spec): + # yapf: disable + super(CalcJob, cls).define(spec) + spec.input('code', valid_type=orm.Code, help='The Code to use for this job.') + spec.input('metadata.options.input_filename', valid_type=six.string_types, required=False, + help='Filename to which the input for the code that is to be run will be written.') + spec.input('metadata.options.output_filename', valid_type=six.string_types, required=False, + help='Filename to which the content of stdout of the code that is to be run will be written.') + spec.input('metadata.options.scheduler_stdout', valid_type=six.string_types, default='_scheduler-stdout.txt', + help='Filename to which the content of stdout of the scheduler will be written.') + spec.input('metadata.options.scheduler_stderr', valid_type=six.string_types, default='_scheduler-stderr.txt', + help='Filename to which the content of stderr of the scheduler will be written.') + spec.input('metadata.options.resources', valid_type=dict, non_db=True, required=True, + help='Set the dictionary of resources to be used by the scheduler plugin, like the number of nodes, ' + 'cpus etc. This dictionary is scheduler-plugin dependent. Look at the documentation of the ' + 'scheduler for more details.') + spec.input('metadata.options.max_wallclock_seconds', valid_type=int, non_db=True, required=False, + help='Set the wallclock in seconds asked to the scheduler') + spec.input('metadata.options.custom_scheduler_commands', valid_type=six.string_types, non_db=True, default='', + help='Set a (possibly multiline) string with the commands that the user wants to manually set for the ' + 'scheduler. The difference of this option with respect to the `prepend_text` is the position in ' + 'the scheduler submission file where such text is inserted: with this option, the string is ' + 'inserted before any non-scheduler command') + spec.input('metadata.options.queue_name', valid_type=six.string_types, non_db=True, required=False, + help='Set the name of the queue on the remote computer') + spec.input('metadata.options.account', valid_type=six.string_types, non_db=True, required=False, + help='Set the account to use in for the queue on the remote computer') + spec.input('metadata.options.qos', valid_type=six.string_types, non_db=True, required=False, + help='Set the quality of service to use in for the queue on the remote computer') + spec.input('metadata.options.computer', valid_type=orm.Computer, non_db=True, required=False, + help='Set the computer to be used by the calculation') + spec.input('metadata.options.withmpi', valid_type=bool, non_db=True, default=True, + help='Set the calculation to use mpi',) + spec.input('metadata.options.mpirun_extra_params', valid_type=(list, tuple), non_db=True, default=[], + help='Set the extra params to pass to the mpirun (or equivalent) command after the one provided in ' + 'computer.mpirun_command. Example: mpirun -np 8 extra_params[0] extra_params[1] ... exec.x',) + spec.input('metadata.options.import_sys_environment', valid_type=bool, non_db=True, default=True, + help='If set to true, the submission script will load the system environment variables',) + spec.input('metadata.options.environment_variables', valid_type=dict, non_db=True, default={}, + help='Set a dictionary of custom environment variables for this calculation',) + spec.input('metadata.options.priority', valid_type=six.string_types[0], non_db=True, required=False, + help='Set the priority of the job to be queued') + spec.input('metadata.options.max_memory_kb', valid_type=int, non_db=True, required=False, + help='Set the maximum memory (in KiloBytes) to be asked to the scheduler') + spec.input('metadata.options.prepend_text', valid_type=six.string_types[0], non_db=True, default='', + help='Set the calculation-specific prepend text, which is going to be prepended in the scheduler-job ' + 'script, just before the code execution',) + spec.input('metadata.options.append_text', valid_type=six.string_types[0], non_db=True, default='', + help='Set the calculation-specific append text, which is going to be appended in the scheduler-job ' + 'script, just after the code execution',) + spec.input('metadata.options.parser_name', valid_type=six.string_types[0], non_db=True, required=False, + help='Set a string for the output parser. Can be None if no output plugin is available or needed') + + spec.output('remote_folder', valid_type=RemoteData) + spec.output('retrieved', valid_type=FolderData) + + spec.exit_code(10, 'ERROR_PARSING_FAILED', message='the parsing of the job failed') + spec.exit_code(20, 'ERROR_FAILED', message='the job failed for an unspecified reason') + + @classmethod + def get_state_classes(cls): + # Overwrite the waiting state + states_map = super(CalcJob, cls).get_state_classes() + states_map[ProcessState.WAITING] = Waiting + return states_map + + @override + def run(self): + """Run the calculation, we put it in the TOSUBMIT state and then wait for it to be completed.""" + from aiida.orm import Code, load_node + from aiida.common.folders import SandboxFolder + from aiida.common.exceptions import InputValidationError + + # The following conditional is required for the caching to properly work. Even if the source node has a process + # state of `Finished` the cached process will still enter the running state. The process state will have then + # been overridden by the engine to `Running` so we cannot check that, but if the `exit_status` is anything other + # than `None`, it should mean this node was taken from the cache, so the process should not be rerun. + if self.node.exit_status is not None: + return self.node.exit_status + + with SandboxFolder() as folder: + computer = self.node.get_computer() + if self.node.has_cached_links(): + raise exceptions.InvalidOperation('calculation node has unstored links in cache') + calc_info, script_filename = self.presubmit(folder) + input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in calc_info.codes_info] + + for code in input_codes: + if not code.can_run_on(computer): + raise InputValidationError( + 'The selected code {} for calculation {} cannot run on computer {}'.format( + code.pk, self.node.pk, computer.name)) + + # After this call, no modifications to the folder should be done + self.node._store_raw_input_folder(folder.abspath) # pylint: disable=protected-access + + # Launch the upload operation + return plumpy.Wait(msg='Waiting to upload', data=(UPLOAD_COMMAND, calc_info, script_filename)) + + def prepare_for_submission(self, folder): + """Docs.""" + raise NotImplementedError + + def retrieved(self, retrieved_temporary_folder=None): + """ + Parse a retrieved job calculation. This is called once it's finished waiting + for the calculation to be finished and the data has been retrieved. + """ + import shutil + from aiida.daemon import execmanager + + try: + exit_code = execmanager.parse_results(self.node, retrieved_temporary_folder) + finally: + # Delete the temporary folder + try: + shutil.rmtree(retrieved_temporary_folder) + except OSError as exception: + if exception.errno != 2: + raise + + # Finally link up the outputs and we're done + for entry in self.node.get_outgoing(): + self.out(entry.link_label, entry.node) + + return exit_code + + def presubmit(self, folder): + """ + Prepares the calculation folder with all inputs, ready to be copied to the cluster. + + :param folder: a SandboxFolder, empty in input, that will be filled with + calculation input files and the scheduling script. + + :return calcinfo: the CalcInfo object containing the information + needed by the daemon to handle operations. + """ + # pylint: disable=too-many-locals,too-many-statements,too-many-branches + import os + + from six.moves import StringIO + + from aiida.common.exceptions import PluginInternalError, ValidationError + from aiida.scheduler.datastructures import JobTemplate + from aiida.common.utils import validate_list_of_string_tuples + from aiida.orm import DataFactory + from aiida.common.datastructures import CodeInfo, CodeRunMode + from aiida.orm.code import Code + from aiida.orm.computers import Computer + from aiida.orm.utils import load_node + import aiida.common.json as json + + computer = self.node.get_computer() + inputs = self.node.get_incoming(link_type=LinkType.INPUT_CALC) + + codes = [_ for _ in inputs.all_nodes() if isinstance(_, Code)] + + calcinfo = self.prepare_for_submission(folder) + scheduler = computer.get_scheduler() + + for code in codes: + if code.is_local(): + if code.get_local_executable() in folder.get_content_list(): + raise PluginInternalError("The plugin created a file {} that is also " + "the executable name!".format(code.get_local_executable())) + + # I create the job template to pass to the scheduler + job_tmpl = JobTemplate() + job_tmpl.shebang = computer.get_shebang() + job_tmpl.submit_as_hold = False + job_tmpl.rerunnable = False + job_tmpl.job_environment = {} + # 'email', 'email_on_started', 'email_on_terminated', + job_tmpl.job_name = 'aiida-{}'.format(self.node.pk) + job_tmpl.sched_output_path = self.options.scheduler_stdout + if self.options.scheduler_stderr == self.options.scheduler_stdout: + job_tmpl.sched_join_files = True + else: + job_tmpl.sched_error_path = self.options.scheduler_stderr + job_tmpl.sched_join_files = False + + # Set retrieve path, add also scheduler STDOUT and STDERR + retrieve_list = (calcinfo.retrieve_list if calcinfo.retrieve_list is not None else []) + if (job_tmpl.sched_output_path is not None and job_tmpl.sched_output_path not in retrieve_list): + retrieve_list.append(job_tmpl.sched_output_path) + if not job_tmpl.sched_join_files: + if (job_tmpl.sched_error_path is not None and job_tmpl.sched_error_path not in retrieve_list): + retrieve_list.append(job_tmpl.sched_error_path) + self.node._set_retrieve_list(retrieve_list) # pylint: disable=protected-access + + retrieve_singlefile_list = (calcinfo.retrieve_singlefile_list + if calcinfo.retrieve_singlefile_list is not None else []) + # a validation on the subclasses of retrieve_singlefile_list + for _, subclassname, _ in retrieve_singlefile_list: + file_sub_class = DataFactory(subclassname) + if not issubclass(file_sub_class, SinglefileData): + raise PluginInternalError( + "[presubmission of calc {}] retrieve_singlefile_list subclass problem: {} is " + "not subclass of SinglefileData".format(self.node.pk, file_sub_class.__name__)) + self.node._set_retrieve_singlefile_list(retrieve_singlefile_list) # pylint: disable=protected-access + + # Handle the retrieve_temporary_list + retrieve_temporary_list = (calcinfo.retrieve_temporary_list + if calcinfo.retrieve_temporary_list is not None else []) + self.node._set_retrieve_temporary_list(retrieve_temporary_list) # pylint: disable=protected-access + + # the if is done so that if the method returns None, this is + # not added. This has two advantages: + # - it does not add too many \n\n if most of the prepend_text are empty + # - most importantly, skips the cases in which one of the methods + # would return None, in which case the join method would raise + # an exception + prepend_texts = [computer.get_prepend_text()] + \ + [code.get_prepend_text() for code in codes] + \ + [calcinfo.prepend_text, self.node.get_option('prepend_text')] + job_tmpl.prepend_text = '\n\n'.join(prepend_text for prepend_text in prepend_texts if prepend_text) + + append_texts = [self.node.get_option('append_text'), calcinfo.append_text] + \ + [code.get_append_text() for code in codes] + \ + [computer.get_append_text()] + job_tmpl.append_text = '\n\n'.join(append_text for append_text in append_texts if append_text) + + # Set resources, also with get_default_mpiprocs_per_machine + resources = self.node.get_option('resources') + def_cpus_machine = computer.get_default_mpiprocs_per_machine() + if def_cpus_machine is not None: + resources['default_mpiprocs_per_machine'] = def_cpus_machine + job_tmpl.job_resource = scheduler.create_job_resource(**resources) + + subst_dict = {'tot_num_mpiprocs': job_tmpl.job_resource.get_tot_num_mpiprocs()} + + for key, value in job_tmpl.job_resource.items(): + subst_dict[key] = value + mpi_args = [arg.format(**subst_dict) for arg in computer.get_mpirun_command()] + extra_mpirun_params = self.node.get_option('mpirun_extra_params') # same for all codes in the same calc + + # set the codes_info + if not isinstance(calcinfo.codes_info, (list, tuple)): + raise PluginInternalError("codes_info passed to CalcInfo must be a list of CalcInfo objects") + + codes_info = [] + for code_info in calcinfo.codes_info: + + if not isinstance(code_info, CodeInfo): + raise PluginInternalError("Invalid codes_info, must be a list of CodeInfo objects") + + if code_info.code_uuid is None: + raise PluginInternalError("CalcInfo should have " + "the information of the code " + "to be launched") + this_code = load_node(code_info.code_uuid, sub_classes=(Code,)) + + this_withmpi = code_info.withmpi # to decide better how to set the default + if this_withmpi is None: + if len(calcinfo.codes_info) > 1: + raise PluginInternalError("For more than one code, it is " + "necessary to set withmpi in " + "codes_info") + else: + this_withmpi = self.node.get_option('withmpi') + + if this_withmpi: + this_argv = (mpi_args + extra_mpirun_params + [this_code.get_execname()] + + (code_info.cmdline_params if code_info.cmdline_params is not None else [])) + else: + this_argv = [this_code.get_execname()] + (code_info.cmdline_params + if code_info.cmdline_params is not None else []) + + # overwrite the old cmdline_params and add codename and mpirun stuff + code_info.cmdline_params = this_argv + + codes_info.append(code_info) + job_tmpl.codes_info = codes_info + + # set the codes execution mode + + if len(codes) > 1: + try: + job_tmpl.codes_run_mode = calcinfo.codes_run_mode + except KeyError: + raise PluginInternalError("Need to set the order of the code execution (parallel or serial?)") + else: + job_tmpl.codes_run_mode = CodeRunMode.SERIAL + ######################################################################## + + custom_sched_commands = self.node.get_option('custom_scheduler_commands') + if custom_sched_commands: + job_tmpl.custom_scheduler_commands = custom_sched_commands + + job_tmpl.import_sys_environment = self.node.get_option('import_sys_environment') + + job_tmpl.job_environment = self.node.get_option('environment_variables') + + queue_name = self.node.get_option('queue_name') + account = self.node.get_option('account') + qos = self.node.get_option('qos') + if queue_name is not None: + job_tmpl.queue_name = queue_name + if account is not None: + job_tmpl.account = account + if qos is not None: + job_tmpl.qos = qos + priority = self.node.get_option('priority') + if priority is not None: + job_tmpl.priority = priority + max_memory_kb = self.node.get_option('max_memory_kb') + if max_memory_kb is not None: + job_tmpl.max_memory_kb = max_memory_kb + max_wallclock_seconds = self.node.get_option('max_wallclock_seconds') + if max_wallclock_seconds is not None: + job_tmpl.max_wallclock_seconds = max_wallclock_seconds + max_memory_kb = self.node.get_option('max_memory_kb') + if max_memory_kb is not None: + job_tmpl.max_memory_kb = max_memory_kb + + script_filename = '_aiidasubmit.sh' + script_content = scheduler.get_submit_script(job_tmpl) + folder.create_file_from_filelike(StringIO(script_content), script_filename) + + subfolder = folder.get_subfolder('.aiida', create=True) + subfolder.create_file_from_filelike(StringIO(json.dumps(job_tmpl)), 'job_tmpl.json') + subfolder.create_file_from_filelike(StringIO(json.dumps(calcinfo)), 'calcinfo.json') + + if calcinfo.local_copy_list is None: + calcinfo.local_copy_list = [] + + if calcinfo.remote_copy_list is None: + calcinfo.remote_copy_list = [] + + # Some validation + this_pk = self.node.pk if self.node.pk is not None else "[UNSTORED]" + local_copy_list = calcinfo.local_copy_list + try: + validate_list_of_string_tuples(local_copy_list, tuple_length=2) + except ValidationError as exc: + raise PluginInternalError("[presubmission of calc {}] " + "local_copy_list format problem: {}".format(this_pk, exc)) + + remote_copy_list = calcinfo.remote_copy_list + try: + validate_list_of_string_tuples(remote_copy_list, tuple_length=3) + except ValidationError as exc: + raise PluginInternalError("[presubmission of calc {}] " + "remote_copy_list format problem: {}".format(this_pk, exc)) + + for (remote_computer_uuid, _, dest_rel_path) in remote_copy_list: + try: + Computer.objects.get(uuid=remote_computer_uuid) # pylint: disable=unused-variable + except exceptions.NotExistent: + raise PluginInternalError("[presubmission of calc {}] " + "The remote copy requires a computer with UUID={}" + "but no such computer was found in the " + "database".format(this_pk, remote_computer_uuid)) + if os.path.isabs(dest_rel_path): + raise PluginInternalError("[presubmission of calc {}] " + "The destination path of the remote copy " + "is absolute! ({})".format(this_pk, dest_rel_path)) + + return calcinfo, script_filename diff --git a/aiida/work/job_calcs.py b/aiida/work/job_calcs.py index 345fa241b6..87b84957db 100644 --- a/aiida/work/job_calcs.py +++ b/aiida/work/job_calcs.py @@ -25,7 +25,7 @@ from aiida.common import exceptions from .utils import RefObjectStore -__all__ = tuple() +__all__ = ('JobsList', 'JobManager') class JobsList(object): # pylint: disable=useless-object-inheritance @@ -93,7 +93,7 @@ def _get_jobs_from_scheduler(self): for job_id, job_info in iteritems(scheduler_response): # If the job is done then get detailed job information detailed_job_info = None - if job_info.job_state == schedulers.JOB_STATES.DONE: + if job_info.job_state == schedulers.JobState.DONE: try: detailed_job_info = scheduler.get_detailed_jobinfo(job_id) except exceptions.FeatureNotAvailable: diff --git a/aiida/work/job_processes.py b/aiida/work/job_processes.py index 9c38c45e1c..d31b433d20 100644 --- a/aiida/work/job_processes.py +++ b/aiida/work/job_processes.py @@ -12,7 +12,6 @@ from __future__ import absolute_import import functools import logging -import shutil import sys import tempfile @@ -20,21 +19,14 @@ from tornado.gen import coroutine, Return import plumpy -from plumpy.ports import PortNamespace -from aiida.common.datastructures import calc_states, is_progressive_state_change +from aiida.common.datastructures import CalcJobState from aiida.common.exceptions import TransportTaskException -from aiida.common import exceptions -from aiida.common.lang import override from aiida.daemon import execmanager -from aiida.orm.node.process import CalcJobNode -from aiida.scheduler.datastructures import JOB_STATES -from aiida.work.process_builder import JobProcessBuilder +from aiida.scheduler.datastructures import JobState from aiida.work.utils import exponential_backoff_retry, interruptable_task -from . import persistence from . import processes -__all__ = ('JobProcess',) UPLOAD_COMMAND = 'upload' SUBMIT_COMMAND = 'submit' @@ -67,18 +59,15 @@ def task_upload_job(node, transport_queue, calc_info, script_filename, cancellab :raises: Return if the tasks was successfully completed :raises: TransportTaskException if after the maximum number of retries the transport task still excepted """ + if node.get_state() == CalcJobState.SUBMITTING: + logger.warning('calculation<{}> already marked as SUBMITTING, skipping task_update_job'.format(node.pk)) + raise Return(True) + initial_interval = TRANSPORT_TASK_RETRY_INITIAL_INTERVAL max_attempts = TRANSPORT_TASK_MAXIMUM_ATTEMTPS authinfo = node.get_computer().get_authinfo(node.get_user()) - state_pending = calc_states.SUBMITTING - - if is_progressive_state_change(node.get_state(), state_pending): - node._set_state(state_pending) - else: - logger.warning('ignored invalid proposed state change: {} to {}'.format(node.get_state(), state_pending)) - @coroutine def do_upload(): with transport_queue.request_transport(authinfo) as request: @@ -97,6 +86,7 @@ def do_upload(): raise TransportTaskException('upload_calculation failed {} times consecutively'.format(max_attempts)) else: logger.info('uploading calculation<{}> successful'.format(node.pk)) + node._set_state(CalcJobState.SUBMITTING) raise Return(result) @@ -119,7 +109,7 @@ def task_submit_job(node, transport_queue, calc_info, script_filename, cancellab :raises: Return if the tasks was successfully completed :raises: TransportTaskException if after the maximum number of retries the transport task still excepted """ - if node.get_state() == calc_states.WITHSCHEDULER: + if node.get_state() == CalcJobState.WITHSCHEDULER: assert node.get_job_id() is not None, 'job is WITHSCHEDULER, however, it does not have a job id' logger.warning('calculation<{}> already marked as WITHSCHEDULER, skipping task_submit_job'.format(node.pk)) raise Return(node.get_job_id()) @@ -147,7 +137,7 @@ def do_submit(): raise TransportTaskException('submit_calculation failed {} times consecutively'.format(max_attempts)) else: logger.info('submitting calculation<{}> successful'.format(node.pk)) - node._set_state(calc_states.WITHSCHEDULER) + node._set_state(CalcJobState.WITHSCHEDULER) raise Return(result) @@ -162,15 +152,15 @@ def task_update_job(node, job_manager, cancellable): If all retries fail, the task will raise a TransportTaskException :param node: the node that represents the job calculation - :type node: :class:`aiida.orm.calculation.CalcJobNode` + :type node: :class:`aiida.orm.node.process.calculation.calcjob.CalcJobNode` :param job_manager: The job manager :type job_manager: :class:`aiida.work.job_calcs.JobManager` :param cancellable: A cancel flag :type cancellable: :class:`aiida.work.utils.InterruptableFuture` :raises: Return containing True if the tasks was successfully completed, False otherwise """ - if node.get_state() == calc_states.COMPUTED: - logger.warning('calculation<{}> already marked as COMPUTED, skipping task_update_job'.format(node.pk)) + if node.get_state() == CalcJobState.RETRIEVING: + logger.warning('calculation<{}> already marked as RETRIEVING, skipping task_update_job'.format(node.pk)) raise Return(True) initial_interval = TRANSPORT_TASK_RETRY_INITIAL_INTERVAL @@ -187,12 +177,12 @@ def do_update(): if job_info is None: # If the job is computed or not found assume it's done - node._set_scheduler_state(JOB_STATES.DONE) + node._set_scheduler_state(JobState.DONE) job_done = True else: node._set_last_jobinfo(job_info) node._set_scheduler_state(job_info.job_state) - job_done = job_info.job_state == JOB_STATES.DONE + job_done = job_info.job_state == JobState.DONE raise Return(job_done) @@ -207,7 +197,7 @@ def do_update(): else: logger.info('updating calculation<{}> successful'.format(node.pk)) if job_done: - node._set_state(calc_states.COMPUTED) + node._set_state(CalcJobState.RETRIEVING) raise Return(job_done) @@ -229,7 +219,7 @@ def task_retrieve_job(node, transport_queue, retrieved_temporary_folder, cancell :raises: Return if the tasks was successfully completed :raises: TransportTaskException if after the maximum number of retries the transport task still excepted """ - if node.get_state() == calc_states.PARSING: + if node.get_state() == CalcJobState.PARSING: logger.warning('calculation<{}> already marked as PARSING, skipping task_retrieve_job'.format(node.pk)) raise Return(True) @@ -246,13 +236,6 @@ def do_retrieve(): logger.info('retrieving calculation<{}>'.format(node.pk)) raise Return(execmanager.retrieve_calculation(node, transport, retrieved_temporary_folder)) - state_pending = calc_states.RETRIEVING - - if is_progressive_state_change(node.get_state(), state_pending): - node._set_state(state_pending) - else: - logger.warning('ignored invalid proposed state change: {} to {}'.format(node.get_state(), state_pending)) - try: result = yield exponential_backoff_retry( do_retrieve, initial_interval, max_attempts, logger=node.logger, ignore_exceptions=plumpy.Interruption) @@ -262,7 +245,7 @@ def do_retrieve(): logger.warning('retrieving calculation<{}> failed'.format(node.pk)) raise TransportTaskException('retrieve_calculation failed {} times consecutively'.format(max_attempts)) else: - node._set_state(calc_states.PARSING) + node._set_state(CalcJobState.PARSING) logger.info('retrieving calculation<{}> successful'.format(node.pk)) raise Return(result) @@ -287,7 +270,7 @@ def task_kill_job(node, transport_queue, cancellable): initial_interval = TRANSPORT_TASK_RETRY_INITIAL_INTERVAL max_attempts = TRANSPORT_TASK_MAXIMUM_ATTEMTPS - if node.get_state() in [calc_states.NEW, calc_states.TOSUBMIT, calc_states.SUBMITTING]: + if node.get_state() in [CalcJobState.UPLOADING, CalcJobState.SUBMITTING]: logger.warning('calculation<{}> killed, it was in the {} state'.format(node.pk, node.get_state())) raise Return(True) @@ -309,7 +292,7 @@ def do_kill(): raise TransportTaskException('kill_calculation failed {} times consecutively'.format(max_attempts)) else: logger.info('killing calculation<{}> successful'.format(node.pk)) - node._set_scheduler_state(JOB_STATES.DONE) + node._set_scheduler_state(JobState.DONE) raise Return(result) @@ -331,7 +314,7 @@ def load_instance_state(self, saved_state, load_context): @coroutine def execute(self): - calculation = self.process.calc + calculation = self.process.node transport_queue = self.process.runner.transport if isinstance(self.data, tuple): @@ -450,8 +433,8 @@ def retrieve(self): def retrieved(self, retrieved_temporary_folder): """ Create the next state to go to after retrieving - :param retrieved_temporary_folder: The temporary folder used in retrieving, this will - be used in parsing. + + :param retrieved_temporary_folder: The temporary folder used in retrieving, this will be used in parsing. :return: The appropriate RUNNING state """ return self.create_state( @@ -468,244 +451,3 @@ def interrupt(self, reason): if self._killing is None: self._killing = plumpy.Future() return self._killing - - -class JobProcess(processes.Process): - TRANSPORT_OPERATION = 'TRANSPORT_OPERATION' - CALC_NODE_LABEL = 'calc_node' - OPTIONS_INPUT_LABEL = 'options' - - _calc_class = None - - @classmethod - def get_builder(cls): - return JobProcessBuilder(cls) - - @classmethod - def build(cls, calc_class): - from aiida.orm.data import Data - - def define(cls_, spec): - super(JobProcess, cls_).define(spec) - - spec.input_namespace(cls.OPTIONS_INPUT_LABEL, help='various options') - for key, option in calc_class.options.items(): - spec.input( - '{}.{}'.format(cls.OPTIONS_INPUT_LABEL, key), - required=option.get('required', True), - valid_type=option.get('valid_type', object), # Should match everything, as in all types are valid - non_db=option.get('non_db', True), - help=option.get('help', '') - ) - - # Define the actual inputs based on the use methods of the calculation class - for key, use_method in calc_class._use_methods.items(): - - valid_type = use_method['valid_types'] - docstring = use_method.get('docstring', None) - additional_parameter = use_method.get('additional_parameter') - - if additional_parameter: - spec.input_namespace(key, help=docstring, valid_type=valid_type, required=False, dynamic=True) - else: - spec.input(key, help=docstring, valid_type=valid_type, required=False) - - # Outputs - spec.outputs.valid_type = Data - - dynamic_class_name = persistence.get_object_loader().identify_object(calc_class) - class_name = '{}_{}'.format(cls.__name__, dynamic_class_name) - - # Dynamically create the type for this Process - return type( - class_name, (cls,), - { - plumpy.Process.define.__name__: classmethod(define), - '_calc_class': calc_class - } - ) - - @classmethod - def get_state_classes(cls): - # Overwrite the waiting state - states_map = super(JobProcess, cls).get_state_classes() - states_map[processes.ProcessState.WAITING] = Waiting - return states_map - - # region Process overrides - - @override - def on_killed(self): - super(JobProcess, self).on_killed() - self.calc._set_state(calc_states.FAILED) - - @override - def update_outputs(self): - # DO NOT REMOVE: - # Don't do anything, this is taken care of by the job calculation node itself - pass - - @override - def get_or_create_db_record(self): - return self._calc_class() - - @property - def process_class(self): - """ - Return the class that represents this Process, for the JobProcess this is CalcJobNode class it wraps. - - For a standard Process or sub class of Process, this is the class itself. However, for legacy reasons, - the Process class is a wrapper around another class. This function returns that original class, i.e. the - class that really represents what was being executed. - """ - return self._calc_class - - @override - def _setup_db_inputs(self): - """ - Create the links that connect the inputs to the calculation node that represents this Process - - For a JobProcess, the inputs also need to be mapped onto the `use_` and `set_` methods of the - legacy CalcJobNode class. If a code is defined in the inputs and no computer has been set - yet for the calculation node, the computer configured for the code is used to set on the node. - """ - for name, input_value in self.get_provenance_inputs_iterator(): - - port = self.spec().inputs[name] - - if input_value is None or getattr(port, 'non_db', False): - continue - - # Call the 'set' attribute methods for the contents of the 'option' namespace - if name == self.OPTIONS_INPUT_LABEL: - for option_name, option_value in input_value.items(): - self.calc.set_option(option_name, option_value) - continue - - # Call the 'use' methods to set up the data-calc links - if isinstance(port, PortNamespace): - additional = self._calc_class._use_methods[name]['additional_parameter'] - - for k, v in input_value.items(): - try: - getattr(self.calc, 'use_{}'.format(name))(v, **{additional: k}) - except AttributeError: - raise AttributeError( - "You have provided for an input the key '{}' but" - "the CalcJobNode has no such use_{} method".format(name, name)) - - else: - getattr(self.calc, 'use_{}'.format(name))(input_value) - - # Get the computer from the code if necessary - if self.calc.get_computer() is None and 'code' in self.inputs: - code = self.inputs['code'] - if not code.is_local(): - self.calc.set_computer(code.get_remote_computer()) - - # endregion - - @override - def run(self): - """ - Run the calculation, we put it in the TOSUBMIT state and then wait for it - to be copied over, submitted, retrieved, etc. - """ - from aiida.orm import Code, load_node - from aiida.common.folders import SandboxFolder - from aiida.common.exceptions import InputValidationError - - # Note that the caching mechanism relies on this as it will always enter the run method, even when finished - if self.calc.get_state() == calc_states.FINISHED: - return 0 - - state_current = self.calc.get_state() - state_pending = calc_states.TOSUBMIT - - if is_progressive_state_change(state_current, state_pending): - self.calc._set_state(state_pending) - else: - logger.warning('ignored invalid proposed state change: {} to {}'.format(state_current, state_pending)) - - with SandboxFolder() as folder: - computer = self.calc.get_computer() - if self.calc.has_cached_links(): - raise exceptions.InvalidOperation("This calculation still has links in cache that " - "are not stored in database yet") - calc_info, script_filename = self.calc._presubmit(folder) - input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in calc_info.codes_info] - - for code in input_codes: - if not code.can_run_on(computer): - raise InputValidationError( - 'The selected code {} for calculation {} cannot run on computer {}'.format( - code.pk, self.calc.pk, computer.name)) - - # After this call, no modifications to the folder should be done - self.calc._store_raw_input_folder(folder.abspath) - - # Launch the upload operation - return plumpy.Wait(msg='Waiting to upload', data=(UPLOAD_COMMAND, calc_info, script_filename)) - - def retrieved(self, retrieved_temporary_folder=None): - """ - Parse a retrieved job calculation. This is called once it's finished waiting - for the calculation to be finished and the data has been retrieved. - """ - try: - exit_code = execmanager.parse_results(self.calc, retrieved_temporary_folder) - except Exception: - try: - self.calc._set_state(calc_states.PARSINGFAILED) - except exceptions.ModificationNotAllowed: - pass - raise - finally: - # Delete the temporary folder - try: - shutil.rmtree(retrieved_temporary_folder) - except OSError as exception: - if exception.errno != 2: - raise - - # Finally link up the outputs and we're done - for entry in self.calc.get_outgoing(): - self.out(entry.link_label, entry.node) - - return exit_code - - -class ContinueCalcJob(JobProcess): - - @classmethod - def define(cls, spec): - super(ContinueCalcJob, cls).define(spec) - spec.input('_calc', valid_type=CalcJobNode, required=True, non_db=False) - - def run(self): - state = self.calc.get_state() - - if state == calc_states.NEW: - return super(ContinueCalcJob, self).run() - - if state in [calc_states.TOSUBMIT, calc_states.SUBMITTING]: - return plumpy.Wait(msg='Waiting to submit', data=SUBMIT_COMMAND) - - elif state in calc_states.WITHSCHEDULER: - return plumpy.Wait(msg='Waiting for scheduler', data=UPDATE_COMMAND) - - elif state in [calc_states.COMPUTED, calc_states.RETRIEVING]: - return plumpy.Wait(msg='Waiting to retrieve', data=RETRIEVE_COMMAND) - - elif state == calc_states.PARSING: - return self.retrieved(True) - - # Otherwise nothing to do... - - def get_or_create_db_record(self): - return self.inputs._calc - - @override - def _setup_db_record(self): - self._calc_class = self.inputs._calc.__class__ - super(ContinueCalcJob, self)._setup_db_record() diff --git a/aiida/work/launch.py b/aiida/work/launch.py index 0e3f6dab52..feeddb4f95 100644 --- a/aiida/work/launch.py +++ b/aiida/work/launch.py @@ -91,7 +91,7 @@ def submit(process, **inputs): # Do not wait for the future's result, because in the case of a single worker this would cock-block itself controller.continue_process(process.pid, nowait=False, no_reply=True) - return process.calc + return process.node # Allow one to also use run.get_node and run.get_pid as a shortcut, without having to import the functions themselves diff --git a/aiida/work/persistence.py b/aiida/work/persistence.py index 4a926e48d7..49795527a3 100644 --- a/aiida/work/persistence.py +++ b/aiida/work/persistence.py @@ -24,6 +24,8 @@ LOGGER = logging.getLogger(__name__) OBJECT_LOADER = None +ObjectLoader = plumpy.DefaultObjectLoader + def get_object_loader(): """ @@ -65,7 +67,7 @@ def save_checkpoint(self, process, tag=None): process, traceback.format_exc())) try: - process.calc.set_checkpoint(serialize.serialize(bundle)) + process.node.set_checkpoint(serialize.serialize(bundle)) except Exception: raise plumpy.PersistenceError("Failed to store a checkpoint for '{}': {}".format( process, traceback.format_exc())) @@ -143,32 +145,3 @@ def delete_process_checkpoints(self, pid): :param pid: the process id of the :class:`aiida.work.processes.Process` """ - - -class ObjectLoader(plumpy.DefaultObjectLoader): - """ - The AiiDA specific object loader. - """ - - @staticmethod - def is_wrapped_job_calculation(name): - from aiida.work.job_processes import JobProcess - return name.find(JobProcess.__name__) != -1 - - def load_object(self, identifier): - """ - Given an identifier load an object. - - :param identifier: The identifier - :return: The loaded object - :raises: ValueError if the object cannot be loaded - """ - from aiida.work.job_processes import JobProcess - - if self.is_wrapped_job_calculation(identifier): - idx = identifier.find(JobProcess.__name__) - wrapped_class = identifier[idx + len(JobProcess.__name__) + 1:] - # Recreate the class - return JobProcess.build(super(ObjectLoader, self).load_object(wrapped_class)) - - return super(ObjectLoader, self).load_object(identifier) diff --git a/aiida/work/ports.py b/aiida/work/ports.py index 244e8b7a16..a99ecce6d2 100644 --- a/aiida/work/ports.py +++ b/aiida/work/ports.py @@ -77,7 +77,7 @@ def get_description(self): return description -class PortNamespace(ports.PortNamespace): +class PortNamespace(WithNonDb, ports.PortNamespace): """ Sub class of plumpy.PortNamespace which implements the serialize method to support automatic recursive serialization of a given mapping onto the ports of the PortNamespace. diff --git a/aiida/work/process_builder.py b/aiida/work/process_builder.py index fa131f5d70..e8632e484f 100644 --- a/aiida/work/process_builder.py +++ b/aiida/work/process_builder.py @@ -15,7 +15,7 @@ from collections import Mapping from aiida.work.ports import PortNamespace -__all__ = ['ProcessBuilder', 'JobProcessBuilder', 'ProcessBuilderNamespace'] +__all__ = ['ProcessBuilder', 'CalcJobBuilder', 'ProcessBuilderNamespace'] class ProcessBuilderNamespace(Mapping): @@ -103,9 +103,7 @@ def __getitem__(self, item): class ProcessBuilder(ProcessBuilderNamespace): - """ - A process builder that helps creating a new calculation - """ + """A process builder that helps setting up the inputs for creating a new process.""" def __init__(self, process_class): self._process_class = process_class @@ -117,14 +115,11 @@ def process_class(self): return self._process_class -class JobProcessBuilder(ProcessBuilder): - """ - A process builder specific to CalcJobNode classes, that provides - also the submit_test functionality - """ +class CalcJobBuilder(ProcessBuilder): + """A process builder specific to CalcJob implementations that provides also the `submit_test` functionality.""" def __dir__(self): - return super(JobProcessBuilder, self).__dir__() + ['submit_test'] + return super(CalcJobBuilder, self).__dir__() + ['submit_test'] def submit_test(self, folder=None, subfolder_name=None): """ @@ -142,4 +137,4 @@ def submit_test(self, folder=None, subfolder_name=None): inputs.update(**self) process = self._process_class(inputs=inputs) - return process.calc.submit_test(folder, subfolder_name) + return process.node.submit_test(folder, subfolder_name) diff --git a/aiida/work/process_function.py b/aiida/work/process_function.py index e0859f99ff..8d4ea2fd55 100644 --- a/aiida/work/process_function.py +++ b/aiida/work/process_function.py @@ -112,13 +112,13 @@ def run_get_node(*args, **kwargs): if kwargs and not process_class.spec().inputs.dynamic: raise ValueError('{} does not support these kwargs: {}'.format(function.__name__, kwargs.keys())) - proc = process_class(inputs=inputs, runner=runner) - result = proc.execute() + process = process_class(inputs=inputs, runner=runner) + result = process.execute() # Close the runner properly runner.close() - return result, proc.calc + return result, process.node @functools.wraps(function) def decorated_function(*args, **kwargs): @@ -259,7 +259,7 @@ def execute(self): def _setup_db_record(self): """Set up the database record for the process.""" super(FunctionProcess, self)._setup_db_record() - self.calc.store_source_info(self._func) + self.node.store_source_info(self._func) @override def run(self): diff --git a/aiida/work/process_spec.py b/aiida/work/process_spec.py index 32114c6715..d692479454 100644 --- a/aiida/work/process_spec.py +++ b/aiida/work/process_spec.py @@ -25,6 +25,8 @@ class ProcessSpec(plumpy.ProcessSpec): with the variants implemented in AiiDA """ + METADATA_KEY = 'metadata' + METADATA_OPTIONS_KEY = 'options' INPUT_PORT_TYPE = InputPort PORT_NAMESPACE_TYPE = PortNamespace @@ -32,6 +34,14 @@ def __init__(self): super(ProcessSpec, self).__init__() self._exit_codes = ExitCodesNamespace() + @property + def metadata_key(self): + return self.METADATA_KEY + + @property + def options_key(self): + return self.METADATA_OPTIONS_KEY + @property def exit_codes(self): """ diff --git a/aiida/work/processes.py b/aiida/work/processes.py index a6e29c341d..4b374f631d 100644 --- a/aiida/work/processes.py +++ b/aiida/work/processes.py @@ -28,7 +28,8 @@ from plumpy import ProcessState from aiida.common import exceptions -from aiida.common.lang import override, protected +from aiida.common.extendeddicts import AttributeDict +from aiida.common.lang import classproperty, override, protected from aiida.common.links import LinkType from aiida.common.log import LOG_LEVEL_REPORT from aiida import orm @@ -48,7 +49,6 @@ def instantiate_process(runner, process, *args, **inputs): of the `process`: * Process instance: will simply return the instance - * CalcJobNode class: will construct the JobProcess and instantiate it * ProcessBuilder instance: will instantiate the Process from the class and inputs defined within it * Process class: will instantiate with the specified inputs @@ -57,8 +57,6 @@ def instantiate_process(runner, process, *args, **inputs): :param process: Process instance or class, CalcJobNode class or ProcessBuilder instance :param inputs: the inputs for the process to be instantiated with """ - from aiida.orm.node.process import CalcJobNode - if isinstance(process, Process): assert not args assert not inputs @@ -69,12 +67,10 @@ def instantiate_process(runner, process, *args, **inputs): builder = process process_class = builder.process_class inputs.update(**builder) - elif issubclass(process, CalcJobNode): - process_class = process.process() elif issubclass(process, Process): process_class = process else: - raise ValueError('invalid process {}, needs to be Process, CalcJobNode or ProcessBuilder'.format(type(process))) + raise ValueError('invalid process {}, needs to be Process or ProcessBuilder'.format(type(process))) process = process_class(runner=runner, inputs=inputs) @@ -90,8 +86,8 @@ class Process(plumpy.Process): """ # pylint: disable=too-many-public-methods + _node_class = ProcessNode _spec_type = ProcessSpec - _calc_class = ProcessNode SINGLE_OUTPUT_LINKNAME = 'result' @@ -105,9 +101,12 @@ class SaveKeys(enum.Enum): @classmethod def define(cls, spec): super(Process, cls).define(spec) - spec.input('store_provenance', valid_type=bool, default=True, non_db=True) - spec.input('description', valid_type=six.string_types[0], required=False, non_db=True) - spec.input('label', valid_type=six.string_types[0], required=False, non_db=True) + spec.input_namespace(spec.metadata_key, required=False, non_db=True, default={}) + spec.input_namespace('{}.{}'.format(spec.metadata_key, spec.options_key), required=False) + spec.input('{}.store_provenance'.format(spec.metadata_key), valid_type=bool, default=True, non_db=True) + spec.input( + '{}.description'.format(spec.metadata_key), valid_type=six.string_types[0], required=False, non_db=True) + spec.input('{}.label'.format(spec.metadata_key), valid_type=six.string_types[0], required=False, non_db=True) spec.inputs.valid_type = (orm.Data, ProcessNode) spec.outputs.valid_type = (orm.Data,) @@ -122,7 +121,7 @@ def get_or_create_db_record(cls): this process. :return: A calculation """ - return cls._calc_class() + return cls._node_class() def __init__(self, inputs=None, logger=None, runner=None, parent_pid=None, enable_persistence=True): from aiida.manage import manager @@ -135,7 +134,7 @@ def __init__(self, inputs=None, logger=None, runner=None, parent_pid=None, enabl loop=self._runner.loop, communicator=self.runner.communicator) - self._calc = None + self._node = None self._parent_pid = parent_pid self._enable_persistence = enable_persistence if self._enable_persistence and self.runner.persister is None: @@ -145,11 +144,48 @@ def __init__(self, inputs=None, logger=None, runner=None, parent_pid=None, enabl def init(self): super(Process, self).init() if self._logger is None: - self.set_logger(self._calc.logger) + self.set_logger(self.node.logger) + + @classproperty + def exit_codes(self): + """ + Return the namespace of exit codes defined for this WorkChain through its ProcessSpec. + The namespace supports getitem and getattr operations with an ExitCode label to retrieve a specific code. + Additionally, the namespace can also be called with either the exit code integer status to retrieve it. + + :returns: ExitCodesNamespace of ExitCode named tuples + """ + return self.spec().exit_codes + + @property + def node(self): + """Return the ProcessNode used by this process to represent itself in the database. + + :return: instance of sub class of ProcessNode + """ + return self._node + + @property + def metadata(self): + """Return the metadata passed when launching this process. + + :return: metadata dictionary + """ + try: + return self.inputs.metadata + except AttributeError: + return AttributeDict() @property - def calc(self): - return self._calc + def options(self): + """Return the options of the metadata passed when launching this process. + + :return: options dictionary + """ + try: + return self.metadata.options + except AttributeError: + return AttributeDict() def _save_checkpoint(self): """ @@ -168,8 +204,8 @@ def _save_checkpoint(self): def save_instance_state(self, out_state, save_context): super(Process, self).save_instance_state(out_state, save_context) - if self.inputs.store_provenance: - assert self.calc.is_stored + if self.metadata.store_provenance: + assert self.node.is_stored out_state[self.SaveKeys.CALC_ID.value] = self.pid @@ -189,18 +225,18 @@ def load_instance_state(self, saved_state, load_context): super(Process, self).load_instance_state(saved_state, load_context) if self.SaveKeys.CALC_ID.value in saved_state: - self._calc = orm.load_node(saved_state[self.SaveKeys.CALC_ID.value]) - self._pid = self.calc.pk + self._node = orm.load_node(saved_state[self.SaveKeys.CALC_ID.value]) + self._pid = self.node.pk else: self._pid = self._create_and_setup_db_record() - self.calc.logger.info('Loaded process<{}> from saved state'.format(self.calc.pk)) + self.node.logger.info('Loaded process<{}> from saved state'.format(self.node.pk)) def kill(self, msg=None): """ Kill the process and all the children calculations it called """ - self._calc.logger.debug('Request to kill Process<{}>'.format(self._calc.pk)) + self.node.logger.debug('Request to kill Process<{}>'.format(self.node.pk)) had_been_terminated = self.has_terminated() @@ -209,9 +245,9 @@ def kill(self, msg=None): # Only kill children if we could be killed ourselves if result is not False and not had_been_terminated: killing = [] - for child in self.calc.called: + for child in self.node.called: try: - result = self.runner.controller.kill_process(child.pk, 'Killed by parent<{}>'.format(self.calc.pk)) + result = self.runner.controller.kill_process(child.pk, 'Killed by parent<{}>'.format(self.node.pk)) if isinstance(result, plumpy.Future): killing.append(result) except ConnectionClosed: @@ -278,7 +314,7 @@ def on_terminated(self): self.logger.exception('Failed to delete checkpoint') try: - self.calc.seal() + self.node.seal() except exceptions.ModificationNotAllowed: pass @@ -291,7 +327,7 @@ def on_except(self, exc_info): :param exc_info: the sys.exc_info() object """ super(Process, self).on_except(exc_info) - self.calc._set_exception(''.join(traceback.format_exception(exc_info[0], exc_info[1], None))) # pylint: disable=protected-access + self.node._set_exception(''.join(traceback.format_exception(exc_info[0], exc_info[1], None))) # pylint: disable=protected-access self.report(''.join(traceback.format_exception(*exc_info))) @override @@ -302,10 +338,10 @@ def on_finish(self, result, successful): super(Process, self).on_finish(result, successful) if result is None or isinstance(result, int): - self.calc._set_exit_status(result) # pylint: disable=protected-access + self.node._set_exit_status(result) # pylint: disable=protected-access elif isinstance(result, ExitCode): - self.calc._set_exit_status(result.status) # pylint: disable=protected-access - self.calc._set_exit_message(result.message) # pylint: disable=protected-access + self.node._set_exit_status(result.status) # pylint: disable=protected-access + self.node._set_exit_message(result.message) # pylint: disable=protected-access else: raise ValueError('the result should be an integer, ExitCode or None, got {} {} {}'.format( type(result), result, self.pid)) @@ -317,7 +353,7 @@ def on_paused(self, msg=None): """ super(Process, self).on_paused(msg) self._save_checkpoint() - self.calc.pause() + self.node.pause() @override def on_playing(self): @@ -325,7 +361,7 @@ def on_playing(self): The Process was unpaused so remove the paused attribute on the process node """ super(Process, self).on_playing() - self.calc.unpause() + self.node.unpause() @override def on_output_emitting(self, output_port, value): @@ -350,7 +386,7 @@ def set_status(self, status): :param status: the status message """ super(Process, self).set_status(status) - self.calc._set_process_status(status) # pylint: disable=protected-access + self.node._set_process_status(status) # pylint: disable=protected-access def submit(self, process, *args, **kwargs): return self.runner.submit(process, *args, **kwargs) @@ -380,7 +416,7 @@ def report(self, msg, *args, **kwargs): database through the attached DbLogHandler. The class name and function name of the caller are prepended to the given message """ - message = '[{}|{}|{}]: {}'.format(self.calc.pk, self.__class__.__name__, inspect.stack()[1][3], msg) + message = '[{}|{}|{}]: {}'.format(self.node.pk, self.__class__.__name__, inspect.stack()[1][3], msg) self.logger.log(LOG_LEVEL_REPORT, message, *args, **kwargs) def _create_and_setup_db_record(self): @@ -389,29 +425,29 @@ def _create_and_setup_db_record(self): :return: the uuid of the process """ - self._calc = self.get_or_create_db_record() + self._node = self.get_or_create_db_record() self._setup_db_record() - if self.inputs.store_provenance: + if self.metadata.store_provenance: try: - self.calc.store_all() - if self.calc.is_finished_ok: + self.node.store_all() + if self.node.is_finished_ok: self._state = ProcessState.FINISHED - for entry in self.calc.get_outgoing(link_type=LinkType.RETURN): + for entry in self.node.get_outgoing(link_type=LinkType.RETURN): if entry.link_label.endswith('_{pk}'.format(pk=entry.node.pk)): continue self.out(entry.link_label, entry.node) - # This is needed for JobProcess. In that case, the outputs are + # This is needed for CalcJob. In that case, the outputs are # returned regardless of whether they end in '_pk' - for entry in self.calc.get_outgoing(link_type=LinkType.CREATE): + for entry in self.node.get_outgoing(link_type=LinkType.CREATE): self.out(entry.link_label, entry.node) except exceptions.ModificationNotAllowed: # The calculation was already stored pass - if self.calc.pk is not None: - return self.calc.pk + if self.node.pk is not None: + return self.node.pk - return uuid.UUID(self.calc.uuid) + return uuid.UUID(self.node.uuid) @override def encode_input_args(self, inputs): @@ -435,38 +471,27 @@ def decode_input_args(self, encoded): def update_node_state(self, state): self.update_outputs() - self.calc._set_process_state(state.LABEL) # pylint: disable=protected-access + self.node._set_process_state(state.LABEL) # pylint: disable=protected-access def update_outputs(self): """Attach any new outputs to the node since the last time this was called, if store provenance is True.""" - if self.inputs.store_provenance is False: + if self.metadata.store_provenance is False: return - outputs_stored = self.calc.get_outgoing(link_type=(LinkType.CREATE, LinkType.RETURN)).all_link_labels() + outputs_stored = self.node.get_outgoing(link_type=(LinkType.CREATE, LinkType.RETURN)).all_link_labels() outputs_new = set(self.outputs.keys()) - set(outputs_stored) for link_label in outputs_new: output = self.outputs[link_label] - if isinstance(self.calc, CalculationNode): - output.add_incoming(self.calc, LinkType.CREATE, link_label) - elif isinstance(self.calc, WorkflowNode): - output.add_incoming(self.calc, LinkType.RETURN, link_label) + if isinstance(self.node, CalculationNode): + output.add_incoming(self.node, LinkType.CREATE, link_label) + elif isinstance(self.node, WorkflowNode): + output.add_incoming(self.node, LinkType.RETURN, link_label) output.store() - @property - def process_class(self): - """ - Return the class that represents this Process. - - For a standard Process or sub class of Process, this is the class itself. However, for legacy reasons, - the Process class is a wrapper around another class. This function returns that original class, i.e. the - class that really represents what was being executed. - """ - return self.__class__ - def _setup_db_record(self): """ Create the database record for this process and the links with respect to its inputs @@ -481,12 +506,12 @@ def _setup_db_record(self): linked up as well. """ assert self.inputs is not None - assert not self.calc.is_sealed, 'process node cannot be sealed when setting up the database record' + assert not self.node.is_sealed, 'process node cannot be sealed when setting up the database record' # Store important process attributes in the node proxy - self.calc._set_process_state(None) # pylint: disable=protected-access - self.calc._set_process_label(self.process_class.__name__) # pylint: disable=protected-access - self.calc._set_process_type(self.process_class) # pylint: disable=protected-access + self.node._set_process_state(None) # pylint: disable=protected-access + self.node._set_process_label(self.__class__.__name__) # pylint: disable=protected-access + self.node._set_process_type(self.__class__) # pylint: disable=protected-access parent_calc = self.get_parent_calc() @@ -495,52 +520,64 @@ def _setup_db_record(self): if isinstance(parent_calc, CalculationNode): raise exceptions.InvalidOperation('calling processes from a calculation type process is forbidden.') - if isinstance(self.calc, CalculationNode): - self.calc.add_incoming(parent_calc, LinkType.CALL_CALC, 'CALL_CALC') + if isinstance(self.node, CalculationNode): + self.node.add_incoming(parent_calc, LinkType.CALL_CALC, 'CALL_CALC') + + elif isinstance(self.node, WorkflowNode): + self.node.add_incoming(parent_calc, LinkType.CALL_WORK, 'CALL_WORK') + + self._setup_metadata() + self._setup_inputs() + + def _setup_metadata(self): + """Store the metadata on the ProcessNode.""" + for name, metadata in self.metadata.items(): + if name == 'store_provenance': + continue + elif name == 'label': + self.node.label = metadata + elif name == 'description': + self.node.description = metadata + elif name == 'options': + for option_name, option_value in metadata.items(): + self.node.set_option(option_name, option_value) + else: + raise RuntimeError('unsupported metadata key: {}'.format(name)) - elif isinstance(self.calc, WorkflowNode): - self.calc.add_incoming(parent_calc, LinkType.CALL_WORK, 'CALL_WORK') + def _setup_inputs(self): + """Create the links between the input nodes and the ProcessNode that represents this process.""" + from aiida.orm.data.code import Code - self._setup_db_inputs() - self._add_description_and_label() + for name, node in self._flat_inputs().items(): - def _setup_db_inputs(self): - """ - Create the links that connect the inputs to the calculation node that represents this Process - """ - for name, input_value in self._flat_inputs().items(): + # Special exception: set computer if node is a remote Code and our node does not yet have a computer set + if isinstance(node, Code) and not node.is_local() and not self.node.get_computer(): + self.node.set_computer(node.get_remote_computer()) - if isinstance(input_value, ProcessNode): - input_value = utils.get_or_create_output_group(input_value) + if isinstance(node, ProcessNode): + node = utils.get_or_create_output_group(node) # Need this special case for tests that use ProcessNodes as classes - if isinstance(self.calc, ProcessNode) and not isinstance(self.calc, (CalculationNode, WorkflowNode)): - self.calc.add_incoming(input_value, LinkType.INPUT_WORK, name) + if isinstance(self.node, ProcessNode) and not isinstance(self.node, (CalculationNode, WorkflowNode)): + self.node.add_incoming(node, LinkType.INPUT_WORK, name) - elif isinstance(self.calc, CalculationNode): - self.calc.add_incoming(input_value, LinkType.INPUT_CALC, name) + elif isinstance(self.node, CalculationNode): + self.node.add_incoming(node, LinkType.INPUT_CALC, name) - elif isinstance(self.calc, WorkflowNode): - self.calc.add_incoming(input_value, LinkType.INPUT_WORK, name) - - def _add_description_and_label(self): - """Add the description and label to the calculation node""" - if self.inputs: - description = self.inputs.get('description', None) - if description is not None: - self._calc.description = description - label = self.inputs.get('label', None) - if label is not None: - self._calc.label = label + elif isinstance(self.node, WorkflowNode): + self.node.add_incoming(node, LinkType.INPUT_WORK, name) def _flat_inputs(self): """ - Return a flattened version of the parsed inputs dictionary. The eventual - keys will be a concatenation of the nested keys + Return a flattened version of the parsed inputs dictionary. + + The eventual keys will be a concatenation of the nested keys. Note that the `metadata` dictionary, if present, + is not passed, as those are dealt with separately in `_setup_metadata`. :return: flat dictionary of parsed inputs """ - return dict(self._flatten_inputs(self.spec().inputs, self.inputs)) + inputs = {key: value for key, value in self.inputs.items() if key != self.spec().metadata_key} + return dict(self._flatten_inputs(self.spec().inputs, inputs)) def _flatten_inputs(self, port, port_value, parent_name='', separator='_'): """ @@ -559,6 +596,7 @@ def _flatten_inputs(self, port, port_value, parent_name='', separator='_'): if (port is None and isinstance(port_value, collections.Mapping) or isinstance(port, PortNamespace)): items = [] for name, value in port_value.items(): + prefixed_key = parent_name + separator + name if parent_name else name try: diff --git a/aiida/work/runners.py b/aiida/work/runners.py index b86cef57da..34574f3255 100644 --- a/aiida/work/runners.py +++ b/aiida/work/runners.py @@ -157,7 +157,7 @@ def submit(self, process, *args, **inputs): else: self.loop.add_callback(process.step_until_terminated) - return process.calc + return process.node def schedule(self, process, *args, **inputs): """ @@ -172,7 +172,7 @@ def schedule(self, process, *args, **inputs): process = instantiate_process(self, process, *args, **inputs) self.loop.add_callback(process.step_until_terminated) - return process.calc + return process.node def _run(self, process, *args, **inputs): """ @@ -192,7 +192,7 @@ def _run(self, process, *args, **inputs): with utils.loop_scope(self.loop): process = instantiate_process(self, process, *args, **inputs) process.execute() - return process.outputs, process.calc + return process.outputs, process.node def run(self, process, *args, **inputs): """ diff --git a/aiida/work/test_utils.py b/aiida/work/test_utils.py index f7aabba8ec..2030a87853 100644 --- a/aiida/work/test_utils.py +++ b/aiida/work/test_utils.py @@ -20,7 +20,7 @@ class DummyProcess(Process): """A Process that does nothing when it runs.""" - _calc_class = WorkflowNode + _node_class = WorkflowNode @classmethod def define(cls, spec): @@ -35,7 +35,7 @@ def run(self): class AddProcess(Process): """A simple Process that adds two integers.""" - _calc_class = WorkflowNode + _node_class = WorkflowNode @classmethod def define(cls, spec): @@ -51,7 +51,7 @@ def run(self): class BadOutput(Process): """A Process that emits an output that isn't an AiiDA Data type.""" - _calc_class = WorkflowNode + _node_class = WorkflowNode @classmethod def define(cls, spec): @@ -65,7 +65,7 @@ def run(self): class ExceptionProcess(Process): """A Process that raises a RuntimeError when run.""" - _calc_class = WorkflowNode + _node_class = WorkflowNode def run(self): # pylint: disable=no-self-use raise RuntimeError('CRASH') @@ -74,7 +74,7 @@ def run(self): # pylint: disable=no-self-use class WaitProcess(Process): """A Process that waits until it is asked to continue.""" - _calc_class = WorkflowNode + _node_class = WorkflowNode def run(self): return plumpy.Wait(self.next_step) diff --git a/aiida/work/utils.py b/aiida/work/utils.py index 90a4c312b4..ed0d4f13c1 100644 --- a/aiida/work/utils.py +++ b/aiida/work/utils.py @@ -218,15 +218,15 @@ def set_process_state_change_timestamp(process): from aiida.orm.node.process import ProcessNode, CalculationNode, WorkflowNode from aiida.common import timezone - if isinstance(process.calc, CalculationNode): + if isinstance(process.node, CalculationNode): process_type = 'calculation' - elif isinstance(process.calc, WorkflowNode): + elif isinstance(process.node, WorkflowNode): process_type = 'work' - elif isinstance(process.calc, ProcessNode): + elif isinstance(process.node, ProcessNode): # This will only occur for testing, as in general users cannot launch plain Process classes return else: - raise ValueError('unsupported calculation node type {}'.format(type(process.calc))) + raise ValueError('unsupported calculation node type {}'.format(type(process.node))) key = PROCESS_STATE_CHANGE_KEY.format(process_type) description = PROCESS_STATE_CHANGE_DESCRIPTION.format(process_type) diff --git a/aiida/work/workchain.py b/aiida/work/workchain.py index bceb09c34e..205cd952dd 100644 --- a/aiida/work/workchain.py +++ b/aiida/work/workchain.py @@ -20,7 +20,6 @@ from aiida.common.exceptions import MultipleObjectsError, NotExistent from aiida.common.extendeddicts import AttributeDict from aiida.common.lang import override -from aiida.common.lang import classproperty from aiida.orm import Node from aiida.orm.node.process import WorkChainNode from aiida.orm.utils import load_node @@ -43,7 +42,7 @@ class WorkChain(Process): """ A WorkChain, the base class for AiiDA workflows. """ - _calc_class = WorkChainNode + _node_class = WorkChainNode _spec_type = _WorkChainSpec _STEPPER_STATE = 'stepper_state' _CONTEXT = 'CONTEXT' @@ -89,25 +88,14 @@ def load_instance_state(self, saved_state, load_context): if stepper_state is not None: self._stepper = self.spec().get_outline().recreate_stepper(stepper_state, self) - self.set_logger(self._calc.logger) + self.set_logger(self.node.logger) if self._awaitables: self.action_awaitables() def on_run(self): super(WorkChain, self).on_run() - self.calc.set_stepper_state_info(str(self._stepper)) - - @classproperty - def exit_codes(self): - """ - Return the namespace of exit codes defined for this WorkChain through its ProcessSpec. - The namespace supports getitem and getattr operations with an ExitCode label to retrieve a specific code. - Additionally, the namespace can also be called with either the exit code integer status to retrieve it. - - :returns: ExitCodesNamespace of ExitCode named tuples - """ - return self.spec().exit_codes + self.node.set_stepper_state_info(str(self._stepper)) def insert_awaitable(self, awaitable): """ diff --git a/docs/source/concepts/processes.rst b/docs/source/concepts/processes.rst index 65125f5776..f65646fee8 100644 --- a/docs/source/concepts/processes.rst +++ b/docs/source/concepts/processes.rst @@ -27,7 +27,7 @@ The following table describes which processes exist in AiiDA and what node type Process Database record Used for =================== ======================= ===================== ``WorkChain`` ``WorkChainNode`` Workchain -``JobProcess`` ``CalcJobNode`` CalcJob +``CalcJob`` ``CalcJobNode`` CalcJob ``FunctionProcess`` ``WorkFunctionNode`` Workfunction ``FunctionProcess`` ``CalcFunctionNode`` Calcfunction =================== ======================= ===================== @@ -161,7 +161,7 @@ For more details please refer to the :ref:`process builder section