Skip to content

Commit

Permalink
mobility workflow with cp2k
Browse files Browse the repository at this point in the history
  • Loading branch information
AndresOrtegaGuerrero committed Jan 20, 2025
1 parent 6bf08fa commit 2fba324
Show file tree
Hide file tree
Showing 18 changed files with 3,555 additions and 0 deletions.
2 changes: 2 additions & 0 deletions aiida_lsmo/calcfunctions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@
from .selectivity import calc_selectivity
from .wrappers import PE_PARAMETERS_DEFAULT
from .wrappers import calc_co2_parasitic_energy
from .vac_vaccum import VACCalculation

93 changes: 93 additions & 0 deletions aiida_lsmo/calcfunctions/vac_vaccum.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
from aiida.common import datastructures
from aiida.engine import CalcJob
from aiida.orm import RemoteData, Float

class VACCalculation(CalcJob):
"""AiiDA calculation plugin wrapping the MOFVacLevel executable."""
_DEFAULT_INPUT_FILE = "vac_vacuum.py"
_DEFAULT_OUTPUT_FILE = "log.out"
_DEFAULT_PARENT_CALC_FLDR_NAME = "parent_calc/"
@classmethod
def define(cls, spec):
"""Define inputs and outputs of the calculation."""
super(VACCalculation, cls).define(spec)

# new ports
spec.input('folder', valid_type=RemoteData, help='First file to be compared.')
spec.output('vac_level', valid_type=Float, help='diff between file1 and file2.')

spec.input('metadata.options.output_filename', valid_type=str, default='log.out' )
spec.inputs['metadata']['options']['resources'].default = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
}
spec.inputs['metadata']['options']['parser_name'].default = 'lsmo.vac_vaccum_parser'
spec.exit_code(
200,
"ERROR_NO_RETRIEVED_FOLDER",
message="The retrieved folder data node could not be accessed.",
)
spec.exit_code(300, 'ERROR_MISSING_OUTPUT_FILES',
message='Calculation did not produce all expected output files.')
spec.exit_code(
301, "ERROR_OUTPUT_READ", message="The output file could not be read."
)
spec.exit_code(
302, "ERROR_OUTPUT_PARSE", message="The output file could not be parsed."
)


def prepare_for_submission(self, folder):
"""
Create input files.
:param folder: an `aiida.common.folders.Folder` where the plugin should temporarily place all files needed by
the calculation.
:return: `aiida.common.datastructures.CalcInfo` instance
"""
#lines= ['from mof_vac_level import MOFVacLevel', 'mvl = MOFVacLevel(\'parent_calc/aiida-v_hartree-1_0.cube\')', 'value = mvl.get_vacuum_potential(res=0.4, cube_size= [25, 25, 25] )','print(value)']
lines = "from mof_vac_level import"
with folder.open("vac_vacuum.py", 'w') as fobj:
#with open(folder.get_abs_path(self._DEFAULT_INPUT_FILE), mode='w') as fobj:
#fobj.write(f'from mof_vac_level import \n ')
fobj.write("from mof_vac_level import MOFVacLevel \nmvl = MOFVacLevel(\'parent_calc/aiida-v_hartree-1_0.cube\') \nvalue = mvl.get_vacuum_potential(res=0.5, cube_size= [25, 25, 25] ) \nprint(value)")





codeinfo = datastructures.CodeInfo()
codeinfo.cmdline_params = [self._DEFAULT_INPUT_FILE]
codeinfo.stin_name = self._DEFAULT_INPUT_FILE
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE

# Prepare a `CalcInfo` to be returned to the engine
calcinfo = datastructures.CalcInfo()
calcinfo.cmdline_params = codeinfo.cmdline_params
calcinfo.stdin_name = self._DEFAULT_INPUT_FILE
calcinfo.stdout_name = self._DEFAULT_OUTPUT_FILE

calcinfo.codes_info = [codeinfo]
#calcinfo.local_copy_list = [
# (self.inputs.folder.uuid, self.inputs.folder.filename, self.inputs.folder.filename)
#]
calcinfo.retrieve_list = [self.metadata.options.output_filename]

#Make a link of cube_file folder
calcinfo.remote_symlink_list = []
calcinfo.remote_copy_list = []

comp_uuid = self.inputs.folder.computer.uuid
remote_path = self.inputs.folder.get_remote_path()
copy_info = (comp_uuid, remote_path, self._DEFAULT_PARENT_CALC_FLDR_NAME)
# If running on the same computer - make a symlink.
if self.inputs.code.computer.uuid == comp_uuid:
calcinfo.remote_symlink_list.append(copy_info)
# If not - copy the folder.
else:
calcinfo.remote_copy_list.append(copy_info)

return calcinfo


1 change: 1 addition & 0 deletions aiida_lsmo/parsers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from aiida.orm import Dict, BandsData
from aiida_cp2k.parsers import Cp2kBaseParser
from .parser_functions import parse_cp2k_output_bsse, parse_cp2k_output_advanced
from .vac_vacuum_parser import VACCalculationParser


class Cp2kBsseParser(Cp2kBaseParser): # pylint: disable=too-few-public-methods
Expand Down
37 changes: 37 additions & 0 deletions aiida_lsmo/parsers/vac_vacuum_parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from aiida.engine import ExitCode

from aiida.common import exceptions
from aiida.orm import SinglefileData, Float
from aiida.parsers.parser import Parser
from aiida.plugins import CalculationFactory

VACCalculation = CalculationFactory('lsmo.vac_vaccum')


class VACCalculationParser(Parser):

def parse(self, **kwargs):
"""
Parse outputs, store results in database.
"""




output_filename = self.node.get_option('output_filename')

files_retrieved = self.retrieved.list_object_names()
files_expected = [output_filename]
# Note: set(A) <= set(B) checks whether A is a subset of B
if not set(files_expected) <= set(files_retrieved):
self.logger.error(f"Found files '{files_retrieved}', expected to find '{files_expected}'")
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# add output file
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, 'rb') as handle:
value_file = float(handle.readline())
output_node = Float(value_file)
self.out('vac_level', output_node)

return ExitCode(0)

123 changes: 123 additions & 0 deletions aiida_lsmo/utils/cp2k_utils_master.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
"""Utilities related to CP2K."""

from aiida_lsmo.utils import HARTREE2EV

# Functions to get inputs


def get_input_multiplicity(structure, protocol_settings):
""" Compute the total multiplicity of the structure,
by summing the atomic magnetizations:
multiplicity = 1 + sum_i ( natoms_i * magnetization_i ), for each atom_type i
"""
multiplicity = 1
all_atoms = structure.get_ase().get_chemical_symbols()
for key, value in protocol_settings['initial_magnetization'].items():
multiplicity += all_atoms.count(key) * value
multiplicity = int(round(multiplicity))
multiplicity_dict = {'FORCE_EVAL': {'DFT': {'MULTIPLICITY': multiplicity}}}
if multiplicity != 1:
multiplicity_dict['FORCE_EVAL']['DFT']['UKS'] = True
return multiplicity_dict


def get_kinds_section(structure, protocol_settings):
""" Write the &KIND sections given the structure and the settings_dict"""
kinds = []
all_atoms = set(structure.get_ase().get_chemical_symbols())
for atom in all_atoms:
kinds.append({
'_': atom,
'BASIS_SET': protocol_settings['basis_set'][atom],
'POTENTIAL': protocol_settings['pseudopotential'][atom],
'MAGNETIZATION': protocol_settings['initial_magnetization'][atom],
})
return {'FORCE_EVAL': {'SUBSYS': {'KIND': kinds}}}


def get_kinds_with_ghost_section(structure, protocol_settings):
"""Write the &KIND sections given the structure and the settings_dict, and add also GHOST atoms"""
kinds = []
all_atoms = set(structure.get_ase().get_chemical_symbols())
for atom in all_atoms:
kinds.append({
'_': atom,
'BASIS_SET': protocol_settings['basis_set'][atom],
'POTENTIAL': protocol_settings['pseudopotential'][atom],
'MAGNETIZATION': protocol_settings['initial_magnetization'][atom],
})
kinds.append({'_': atom + "_ghost", 'BASIS_SET': protocol_settings['basis_set'][atom], 'GHOST': True})
return {'FORCE_EVAL': {'SUBSYS': {'KIND': kinds}}}


def get_bsse_section(natoms_a, natoms_b, mult_a=1, mult_b=1, charge_a=0, charge_b=0):
"""Get the &FORCE_EVAL/&BSSE section."""
bsse_section = {
'FORCE_EVAL': {
'BSSE' : {
'FRAGMENT': [{
'LIST': '1..{}'.format(natoms_a)
},
{
'LIST': '{}..{}'.format(natoms_a + 1, natoms_a + natoms_b)
}],
'CONFIGURATION': [
{ # A fragment with basis set A
'MULTIPLICITY': mult_a,
'CHARGE': charge_a,
'GLB_CONF': '1 0',
'SUB_CONF': '1 0',
},
{ # B fragment with basis set B
'MULTIPLICITY': mult_b,
'CHARGE': charge_b,
'GLB_CONF': '0 1',
'SUB_CONF': '0 1',
},
{ # A fragment with basis set A+B
'MULTIPLICITY': mult_a,
'CHARGE': charge_a,
'GLB_CONF': '1 1',
'SUB_CONF': '1 0',
},
{ # B fragment with basis set A+B
'MULTIPLICITY': mult_b,
'CHARGE': charge_b,
'GLB_CONF': '1 1',
'SUB_CONF': '0 1',
},
{ # A+B fragments with basis set A+B
'MULTIPLICITY': mult_a + mult_b - 1,
'CHARGE': charge_a + charge_b,
'GLB_CONF': '1 1',
'SUB_CONF': '1 1',
}
]
}
}
}
return bsse_section


# Functions to parse results


def ot_has_small_bandgap(cp2k_input, cp2k_output, bandgap_thr_ev):
""" Returns True if the calculation used OT and had a smaller bandgap then the guess needed for the OT.
(NOTE: It has been observed also negative bandgap with OT in CP2K!)
cp2k_input: dict
cp2k_output: dict
bandgap_thr_ev: float [eV]
"""
list_true = [True, 'T', 't', '.TRUE.', 'True', 'true'] #add more?
try:
ot_settings = cp2k_input['FORCE_EVAL']['DFT']['SCF']['OT']
if '_' not in ot_settings.keys() or ot_settings['_'] in list_true: #pylint: disable=simplifiable-if-statement
using_ot = True
else:
using_ot = False
except KeyError:
using_ot = False
min_bandgap_ev = min(cp2k_output["bandgap_spin1_au"], cp2k_output["bandgap_spin2_au"]) * HARTREE2EV
is_bandgap_small = (min_bandgap_ev < bandgap_thr_ev)
return using_ot and is_bandgap_small
Loading

0 comments on commit 2fba324

Please sign in to comment.