diff --git a/deepmd/dpmodel/fitting/dipole_fitting.py b/deepmd/dpmodel/fitting/dipole_fitting.py index f5acabf7b1..55b237e0fe 100644 --- a/deepmd/dpmodel/fitting/dipole_fitting.py +++ b/deepmd/dpmodel/fitting/dipole_fitting.py @@ -53,8 +53,6 @@ class DipoleFitting(GeneralFitting): If the weights of fitting net are trainable. Suppose that we have :math:`N_l` hidden layers in the fitting net, this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. - atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision @@ -91,7 +89,6 @@ def __init__( rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[Optional[float]]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, layer_name: Optional[List[Optional[str]]] = None, @@ -102,6 +99,8 @@ def __init__( r_differentiable: bool = True, c_differentiable: bool = True, old_impl=False, + # not used + seed: Optional[int] = None, ): # seed, uniform_seed are not included if tot_ener_zero: @@ -112,8 +111,6 @@ def __init__( raise NotImplementedError("use_aparam_as_mask is not implemented") if layer_name is not None: raise NotImplementedError("layer_name is not implemented") - if atom_ener is not None and atom_ener != []: - raise NotImplementedError("atom_ener is not implemented") self.embedding_width = embedding_width self.r_differentiable = r_differentiable @@ -129,7 +126,6 @@ def __init__( rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, - atom_ener=atom_ener, activation_function=activation_function, precision=precision, layer_name=layer_name, diff --git a/deepmd/dpmodel/fitting/general_fitting.py b/deepmd/dpmodel/fitting/general_fitting.py index 890a065f15..a64fa283ec 100644 --- a/deepmd/dpmodel/fitting/general_fitting.py +++ b/deepmd/dpmodel/fitting/general_fitting.py @@ -55,8 +55,6 @@ class GeneralFitting(NativeOP, BaseFitting): If the weights of fitting net are trainable. Suppose that we have :math:`N_l` hidden layers in the fitting net, this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. - atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision @@ -87,7 +85,6 @@ def __init__( rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[float]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, layer_name: Optional[List[Optional[str]]] = None, @@ -110,7 +107,6 @@ def __init__( self.trainable = [True for ii in range(len(self.neuron) + 1)] if isinstance(self.trainable, bool): self.trainable = [self.trainable] * (len(self.neuron) + 1) - self.atom_ener = atom_ener self.activation_function = activation_function self.precision = precision self.layer_name = layer_name @@ -236,7 +232,6 @@ def serialize(self) -> dict: # not supported "tot_ener_zero": self.tot_ener_zero, "trainable": self.trainable, - "atom_ener": self.atom_ener, "layer_name": self.layer_name, "use_aparam_as_mask": self.use_aparam_as_mask, "spin": self.spin, diff --git a/deepmd/dpmodel/fitting/invar_fitting.py b/deepmd/dpmodel/fitting/invar_fitting.py index 429565d016..3d958301ff 100644 --- a/deepmd/dpmodel/fitting/invar_fitting.py +++ b/deepmd/dpmodel/fitting/invar_fitting.py @@ -115,7 +115,7 @@ def __init__( rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[float]] = None, + atom_ener: Optional[List[float]] = [], activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, layer_name: Optional[List[Optional[str]]] = None, @@ -139,6 +139,7 @@ def __init__( raise NotImplementedError("atom_ener is not implemented") self.dim_out = dim_out + self.atom_ener = atom_ener super().__init__( var_name=var_name, ntypes=ntypes, @@ -150,7 +151,6 @@ def __init__( rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, - atom_ener=atom_ener, activation_function=activation_function, precision=precision, layer_name=layer_name, @@ -163,6 +163,7 @@ def __init__( def serialize(self) -> dict: data = super().serialize() data["dim_out"] = self.dim_out + data["atom_ener"] = self.atom_ener return data def _net_out_dim(self): diff --git a/deepmd/dpmodel/fitting/polarizability_fitting.py b/deepmd/dpmodel/fitting/polarizability_fitting.py index c3cbe7bd1a..9811e8e1c8 100644 --- a/deepmd/dpmodel/fitting/polarizability_fitting.py +++ b/deepmd/dpmodel/fitting/polarizability_fitting.py @@ -56,8 +56,6 @@ class PolarFitting(GeneralFitting): If the weights of fitting net are trainable. Suppose that we have :math:`N_l` hidden layers in the fitting net, this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. - atom_ener - Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. activation_function The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| precision @@ -93,7 +91,6 @@ def __init__( rcond: Optional[float] = None, tot_ener_zero: bool = False, trainable: Optional[List[bool]] = None, - atom_ener: Optional[List[Optional[float]]] = None, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, layer_name: Optional[List[Optional[str]]] = None, @@ -115,8 +112,6 @@ def __init__( raise NotImplementedError("use_aparam_as_mask is not implemented") if layer_name is not None: raise NotImplementedError("layer_name is not implemented") - if atom_ener is not None and atom_ener != []: - raise NotImplementedError("atom_ener is not implemented") self.embedding_width = embedding_width self.fit_diag = fit_diag @@ -142,7 +137,6 @@ def __init__( rcond=rcond, tot_ener_zero=tot_ener_zero, trainable=trainable, - atom_ener=atom_ener, activation_function=activation_function, precision=precision, layer_name=layer_name, diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py index ed2dfbc02b..d0acc6fe2b 100644 --- a/deepmd/pt/model/task/ener.py +++ b/deepmd/pt/model/task/ener.py @@ -78,6 +78,8 @@ class InvarFitting(GeneralFitting): Random seed. exclude_types: List[int] Atomic contributions of the excluded atom types are set zero. + atom_ener + Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set. """ @@ -98,9 +100,11 @@ def __init__( rcond: Optional[float] = None, seed: Optional[int] = None, exclude_types: List[int] = [], + atom_ener: Optional[List[float]] = None, **kwargs, ): self.dim_out = dim_out + self.atom_ener = atom_ener super().__init__( var_name=var_name, ntypes=ntypes, @@ -126,6 +130,7 @@ def _net_out_dim(self): def serialize(self) -> dict: data = super().serialize() data["dim_out"] = self.dim_out + data["atom_ener"] = self.atom_ener return data @property diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py index be20aa9496..080bfb5172 100644 --- a/deepmd/pt/model/task/fitting.py +++ b/deepmd/pt/model/task/fitting.py @@ -428,7 +428,6 @@ def serialize(self) -> dict: ## NOTICE: not supported by far "tot_ener_zero": False, "trainable": [True] * (len(self.neuron) + 1), - "atom_ener": [], "layer_name": None, "use_aparam_as_mask": False, "spin": None, diff --git a/deepmd/tf/fit/dipole.py b/deepmd/tf/fit/dipole.py index 55da62d69b..efb94a85c0 100644 --- a/deepmd/tf/fit/dipole.py +++ b/deepmd/tf/fit/dipole.py @@ -38,8 +38,12 @@ class DipoleFittingSeA(Fitting): Parameters ---------- - descrpt : tf.Tensor - The descrptor + ntypes + The ntypes of the descrptor :math:`\mathcal{D}` + dim_descrpt + The dimension of the descrptor :math:`\mathcal{D}` + embedding_width + The rotation matrix dimension of the descrptor :math:`\mathcal{D}` neuron : List[int] Number of neurons in each hidden layer of the fitting net resnet_dt : bool @@ -59,7 +63,9 @@ class DipoleFittingSeA(Fitting): def __init__( self, - descrpt: tf.Tensor, + ntypes: int, + dim_descrpt: int, + embedding_width: int, neuron: List[int] = [120, 120, 120], resnet_dt: bool = True, sel_type: Optional[List[int]] = None, @@ -70,8 +76,8 @@ def __init__( **kwargs, ) -> None: """Constructor.""" - self.ntypes = descrpt.get_ntypes() - self.dim_descrpt = descrpt.get_dim_out() + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt self.n_neuron = neuron self.resnet_dt = resnet_dt self.sel_type = sel_type @@ -83,9 +89,10 @@ def __init__( self.seed = seed self.uniform_seed = uniform_seed self.seed_shift = one_layer_rand_seed_shift() + self.activation_function_name = activation_function self.fitting_activation_fn = get_activation_func(activation_function) self.fitting_precision = get_precision(precision) - self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1() + self.dim_rot_mat_1 = embedding_width self.dim_rot_mat = self.dim_rot_mat_1 * 3 self.useBN = False self.fitting_net_variables = None @@ -327,3 +334,61 @@ def get_loss(self, loss: dict, lr) -> Loss: tensor_size=3, label_name="dipole", ) + + def serialize(self, suffix: str) -> dict: + """Serialize the model. + + Returns + ------- + dict + The serialized data + """ + data = { + "var_name": "dipole", + "ntypes": self.ntypes, + "dim_descrpt": self.dim_descrpt, + "embedding_width": self.dim_rot_mat_1, + # very bad design: type embedding is not passed to the class + # TODO: refactor the class + "mixed_types": False, + "dim_out": 3, + "neuron": self.n_neuron, + "resnet_dt": self.resnet_dt, + "activation_function": self.activation_function_name, + "precision": self.fitting_precision.name, + "exclude_types": [], + "nets": self.serialize_network( + ntypes=self.ntypes, + # TODO: consider type embeddings + ndim=1, + in_dim=self.dim_descrpt, + out_dim=self.dim_rot_mat_1, + neuron=self.n_neuron, + activation_function=self.activation_function_name, + resnet_dt=self.resnet_dt, + variables=self.fitting_net_variables, + suffix=suffix, + ), + } + return data + + @classmethod + def deserialize(cls, data: dict, suffix: str): + """Deserialize the model. + + Parameters + ---------- + data : dict + The serialized data + + Returns + ------- + Model + The deserialized model + """ + fitting = cls(**data) + fitting.fitting_net_variables = cls.deserialize_network( + data["nets"], + suffix=suffix, + ) + return fitting diff --git a/deepmd/tf/fit/ener.py b/deepmd/tf/fit/ener.py index 19bec5cec0..59a1bfe0bd 100644 --- a/deepmd/tf/fit/ener.py +++ b/deepmd/tf/fit/ener.py @@ -95,8 +95,10 @@ class EnerFitting(Fitting): Parameters ---------- - descrpt - The descrptor :math:`\mathcal{D}` + ntypes + The ntypes of the descrptor :math:`\mathcal{D}` + dim_descrpt + The dimension of the descrptor :math:`\mathcal{D}` neuron Number of neurons :math:`N` in each hidden layer of the fitting net resnet_dt diff --git a/deepmd/tf/fit/fitting.py b/deepmd/tf/fit/fitting.py index 458765f7c1..598d020fe9 100644 --- a/deepmd/tf/fit/fitting.py +++ b/deepmd/tf/fit/fitting.py @@ -6,6 +6,7 @@ from typing import ( Callable, List, + Optional, Type, ) @@ -175,6 +176,7 @@ def serialize_network( activation_function: str, resnet_dt: bool, variables: dict, + out_dim: Optional[int] = 1, suffix: str = "", ) -> dict: """Serialize network. @@ -197,6 +199,8 @@ def serialize_network( The input variables suffix : str, optional The suffix of the scope + out_dim : int, optional + The output dimension Returns ------- @@ -231,7 +235,7 @@ def serialize_network( # initialize the network if it is not initialized fittings[network_idx] = FittingNet( in_dim=in_dim, - out_dim=1, + out_dim=out_dim, neuron=neuron, activation_function=activation_function, resnet_dt=resnet_dt, diff --git a/deepmd/tf/model/model.py b/deepmd/tf/model/model.py index 65413a87c1..09b55f4a04 100644 --- a/deepmd/tf/model/model.py +++ b/deepmd/tf/model/model.py @@ -687,6 +687,8 @@ def __init__( if isinstance(fitting_net, Fitting): self.fitting = fitting_net else: + if fitting_net["type"] in ["dipole", "polar"]: + fitting_net["embedding_width"] = self.descrpt.get_dim_rot_mat_1() self.fitting = Fitting( **fitting_net, descrpt=self.descrpt, diff --git a/deepmd/tf/model/multi.py b/deepmd/tf/model/multi.py index 833a700ebc..2acf00fd52 100644 --- a/deepmd/tf/model/multi.py +++ b/deepmd/tf/model/multi.py @@ -133,6 +133,10 @@ def __init__( if isinstance(item_fitting_param, Fitting): fitting_dict[item] = item_fitting_param else: + if item_fitting_param["type"] in ["dipole", "polar"]: + item_fitting_param[ + "embedding_width" + ] = self.descrpt.get_dim_rot_mat_1() fitting_dict[item] = Fitting( **item_fitting_param, descrpt=self.descrpt, diff --git a/source/tests/consistent/common.py b/source/tests/consistent/common.py index aa1bfe6d9a..622e2ed3cf 100644 --- a/source/tests/consistent/common.py +++ b/source/tests/consistent/common.py @@ -252,9 +252,16 @@ def test_tf_consistent_with_ref(self): tf_obj = self.tf_class.deserialize(data1, suffix=self.unique_id) ret2, data2 = self.get_tf_ret_serialization_from_cls(tf_obj) ret2 = self.extract_ret(ret2, self.RefBackend.TF) + if tf_obj.__class__.__name__.startswith(("Polar", "Dipole")): + # tf, pt serialization mismatch + common_keys = set(data1.keys()) & set(data2.keys()) + data1 = {k: data1[k] for k in common_keys} + data2 = {k: data2[k] for k in common_keys} np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): - np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) + np.testing.assert_allclose( + rr1.ravel(), rr2.ravel(), rtol=self.rtol, atol=self.atol + ) assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}" def test_tf_self_consistent(self): @@ -319,6 +326,11 @@ def test_pt_consistent_with_ref(self): ret2 = self.eval_pt(obj) ret2 = self.extract_ret(ret2, self.RefBackend.PT) data2 = obj.serialize() + if obj.__class__.__name__.startswith(("Polar", "Dipole")): + # tf, pt serialization mismatch + common_keys = set(data1.keys()) & set(data2.keys()) + data1 = {k: data1[k] for k in common_keys} + data2 = {k: data2[k] for k in common_keys} np.testing.assert_equal(data1, data2) for rr1, rr2 in zip(ret1, ret2): np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol) diff --git a/source/tests/consistent/fitting/common.py b/source/tests/consistent/fitting/common.py index 276e81dbc6..bdd4b7cf81 100644 --- a/source/tests/consistent/fitting/common.py +++ b/source/tests/consistent/fitting/common.py @@ -42,3 +42,37 @@ def build_tf_fitting(self, obj, inputs, natoms, atype, fparam, suffix): t_atype: atype, **feed_dict, } + + +class DipoleFittingTest: + """Useful utilities for descriptor tests.""" + + def build_tf_fitting(self, obj, inputs, rot_mat, natoms, atype, fparam, suffix): + t_inputs = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name="i_inputs") + t_rot_mat = tf.placeholder( + GLOBAL_TF_FLOAT_PRECISION, rot_mat.shape, name="i_rot_mat" + ) + t_natoms = tf.placeholder(tf.int32, natoms.shape, name="i_natoms") + t_atype = tf.placeholder(tf.int32, [None], name="i_atype") + extras = {} + feed_dict = {} + if fparam is not None: + t_fparam = tf.placeholder( + GLOBAL_TF_FLOAT_PRECISION, [None], name="i_fparam" + ) + extras["fparam"] = t_fparam + feed_dict[t_fparam] = fparam + t_out = obj.build( + t_inputs, + t_rot_mat, + t_natoms, + {"atype": t_atype, **extras}, + suffix=suffix, + ) + return [t_out], { + t_inputs: inputs, + t_rot_mat: rot_mat, + t_natoms: natoms, + t_atype: atype, + **feed_dict, + } diff --git a/source/tests/consistent/fitting/test_dipole.py b/source/tests/consistent/fitting/test_dipole.py new file mode 100644 index 0000000000..7b5d4d59e8 --- /dev/null +++ b/source/tests/consistent/fitting/test_dipole.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest +from typing import ( + Any, + Tuple, +) + +import numpy as np + +from deepmd.dpmodel.fitting.dipole_fitting import DipoleFitting as DipoleFittingDP +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) + +from ..common import ( + INSTALLED_PT, + INSTALLED_TF, + CommonTest, + parameterized, +) +from .common import ( + DipoleFittingTest, +) + +if INSTALLED_PT: + import torch + + from deepmd.pt.model.task.dipole import DipoleFittingNet as DipoleFittingPT + from deepmd.pt.utils.env import DEVICE as PT_DEVICE +else: + DipoleFittingPT = object +if INSTALLED_TF: + from deepmd.tf.fit.dipole import DipoleFittingSeA as DipoleFittingTF +else: + DipoleFittingTF = object +from deepmd.utils.argcheck import ( + fitting_dipole, +) + + +@parameterized( + (True, False), # resnet_dt + ("float64", "float32"), # precision + (True, False), # mixed_types +) +class TestDipole(CommonTest, DipoleFittingTest, unittest.TestCase): + @property + def data(self) -> dict: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return { + "neuron": [5, 5, 5], + "resnet_dt": resnet_dt, + "precision": precision, + "seed": 20240217, + } + + @property + def skip_tf(self) -> bool: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + # TODO: mixed_types + return mixed_types or CommonTest.skip_pt + + @property + def skip_pt(self) -> bool: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return CommonTest.skip_pt + + tf_class = DipoleFittingTF + dp_class = DipoleFittingDP + pt_class = DipoleFittingPT + args = fitting_dipole() + + def setUp(self): + CommonTest.setUp(self) + + self.ntypes = 2 + self.natoms = np.array([6, 6, 2, 4], dtype=np.int32) + self.inputs = np.ones((1, 6, 20), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.gr = np.ones((1, 6, 30, 3), dtype=GLOBAL_NP_FLOAT_PRECISION) + self.atype = np.array([0, 1, 1, 0, 1, 1], dtype=np.int32) + # inconsistent if not sorted + self.atype.sort() + + @property + def addtional_data(self) -> dict: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return { + "ntypes": self.ntypes, + "dim_descrpt": self.inputs.shape[-1], + "mixed_types": mixed_types, + "var_name": "dipole", + "embedding_width": 30, + } + + def build_tf(self, obj: Any, suffix: str) -> Tuple[list, dict]: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return self.build_tf_fitting( + obj, + self.inputs.ravel(), + self.gr, + self.natoms, + self.atype, + None, + suffix, + ) + + def eval_pt(self, pt_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return ( + pt_obj( + torch.from_numpy(self.inputs).to(device=PT_DEVICE), + torch.from_numpy(self.atype.reshape(1, -1)).to(device=PT_DEVICE), + torch.from_numpy(self.gr).to(device=PT_DEVICE), + None, + )["dipole"] + .detach() + .cpu() + .numpy() + ) + + def eval_dp(self, dp_obj: Any) -> Any: + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + return dp_obj( + self.inputs, + self.atype.reshape(1, -1), + self.gr, + None, + )["dipole"] + + def extract_ret(self, ret: Any, backend) -> Tuple[np.ndarray, ...]: + if backend == self.RefBackend.TF: + # shape is not same + ret = ret[0].reshape(-1, self.natoms[0], 1) + return (ret,) + + @property + def rtol(self) -> float: + """Relative tolerance for comparing the return value.""" + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + if precision == "float64": + return 1e-10 + elif precision == "float32": + return 1e-4 + else: + raise ValueError(f"Unknown precision: {precision}") + + @property + def atol(self) -> float: + """Absolute tolerance for comparing the return value.""" + ( + resnet_dt, + precision, + mixed_types, + ) = self.param + if precision == "float64": + return 1e-10 + elif precision == "float32": + return 1e-4 + else: + raise ValueError(f"Unknown precision: {precision}") diff --git a/source/tests/tf/test_data_large_batch.py b/source/tests/tf/test_data_large_batch.py index 53991fa7f2..4a142192a1 100644 --- a/source/tests/tf/test_data_large_batch.py +++ b/source/tests/tf/test_data_large_batch.py @@ -114,6 +114,7 @@ def test_data_mixed_type(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -311,6 +312,7 @@ def test_stripped_data_mixed_type(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -508,6 +510,7 @@ def test_compressible_data_mixed_type(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_data_modifier.py b/source/tests/tf/test_data_modifier.py index 98b6c41427..cf2c50b761 100644 --- a/source/tests/tf/test_data_modifier.py +++ b/source/tests/tf/test_data_modifier.py @@ -57,7 +57,6 @@ def _setUp(self): restart=None, init_model=None, log_path=None, log_level=30, mpi_log="master" ) jdata = j_loader(INPUT) - # init model model = DPTrainer(jdata, run_opt=run_opt) rcut = model.model.get_rcut() diff --git a/source/tests/tf/test_dipole_se_a.py b/source/tests/tf/test_dipole_se_a.py index f0e495ef21..6905d94371 100644 --- a/source/tests/tf/test_dipole_se_a.py +++ b/source/tests/tf/test_dipole_se_a.py @@ -56,7 +56,9 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"].pop("type", None) jdata["model"]["fitting_net"].pop("fit_diag", None) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["embedding_width"] = descrpt.get_dim_rot_mat_1() fitting = DipoleFittingSeA(**jdata["model"]["fitting_net"], uniform_seed=True) model = DipoleModel(descrpt, fitting) diff --git a/source/tests/tf/test_dipole_se_a_tebd.py b/source/tests/tf/test_dipole_se_a_tebd.py index ed403bd047..57e681ff42 100644 --- a/source/tests/tf/test_dipole_se_a_tebd.py +++ b/source/tests/tf/test_dipole_se_a_tebd.py @@ -66,7 +66,9 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"].pop("type", None) jdata["model"]["fitting_net"].pop("fit_diag", None) - jdata["model"]["fitting_net"]["descrpt"] = descrpt + jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() + jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["embedding_width"] = descrpt.get_dim_rot_mat_1() fitting = DipoleFittingSeA(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_fitting_ener_type.py b/source/tests/tf/test_fitting_ener_type.py index f88692be74..c1c1698d4f 100644 --- a/source/tests/tf/test_fitting_ener_type.py +++ b/source/tests/tf/test_fitting_ener_type.py @@ -56,6 +56,7 @@ def test_fitting(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/tf/test_model_multi.py b/source/tests/tf/test_model_multi.py index b978dff1ab..66b0cce000 100644 --- a/source/tests/tf/test_model_multi.py +++ b/source/tests/tf/test_model_multi.py @@ -69,6 +69,7 @@ def test_model(self): item_fitting_param.pop("type", None) item_fitting_param.pop("fit_diag", None) item_fitting_param["descrpt"] = descrpt + item_fitting_param["embedding_width"] = descrpt.get_dim_rot_mat_1() item_fitting_param["ntypes"] = descrpt.get_ntypes() item_fitting_param["dim_descrpt"] = descrpt.get_dim_out() if item_fitting_type == "ener": diff --git a/source/tests/tf/test_model_se_a.py b/source/tests/tf/test_model_se_a.py index e60cb2307f..414bee2b83 100644 --- a/source/tests/tf/test_model_se_a.py +++ b/source/tests/tf/test_model_se_a.py @@ -76,6 +76,7 @@ def test_model_atom_ener(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -157,6 +158,7 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) @@ -302,6 +304,7 @@ def test_model_atom_ener_type_embedding(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting, typeebd=typeebd) diff --git a/source/tests/tf/test_model_se_a_aparam.py b/source/tests/tf/test_model_se_a_aparam.py index 6bf059f8fa..00a71f9136 100644 --- a/source/tests/tf/test_model_se_a_aparam.py +++ b/source/tests/tf/test_model_se_a_aparam.py @@ -55,6 +55,7 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) model = EnerModel(descrpt, fitting) diff --git a/source/tests/tf/test_model_se_a_ebd.py b/source/tests/tf/test_model_se_a_ebd.py index 599cce6386..e4a6d78d65 100644 --- a/source/tests/tf/test_model_se_a_ebd.py +++ b/source/tests/tf/test_model_se_a_ebd.py @@ -56,6 +56,7 @@ def test_model(self): ) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_ebd_v2.py b/source/tests/tf/test_model_se_a_ebd_v2.py index 22b3c3389d..cab5146312 100644 --- a/source/tests/tf/test_model_se_a_ebd_v2.py +++ b/source/tests/tf/test_model_se_a_ebd_v2.py @@ -72,6 +72,7 @@ def test_model(self): ) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting( **jdata["model"]["fitting_net"], ) diff --git a/source/tests/tf/test_model_se_a_fparam.py b/source/tests/tf/test_model_se_a_fparam.py index 4f94fc1655..3045948480 100644 --- a/source/tests/tf/test_model_se_a_fparam.py +++ b/source/tests/tf/test_model_se_a_fparam.py @@ -54,6 +54,7 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_srtab.py b/source/tests/tf/test_model_se_a_srtab.py index 00f59668a0..2c4d5d70f9 100644 --- a/source/tests/tf/test_model_se_a_srtab.py +++ b/source/tests/tf/test_model_se_a_srtab.py @@ -71,6 +71,7 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/tf/test_model_se_a_type.py b/source/tests/tf/test_model_se_a_type.py index 9c0a07cc98..3432bebf2a 100644 --- a/source/tests/tf/test_model_se_a_type.py +++ b/source/tests/tf/test_model_se_a_type.py @@ -57,6 +57,7 @@ def test_model(self): descrpt = DescrptSeA(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( diff --git a/source/tests/tf/test_model_se_atten.py b/source/tests/tf/test_model_se_atten.py index 13e4c554ca..874931cb40 100644 --- a/source/tests/tf/test_model_se_atten.py +++ b/source/tests/tf/test_model_se_atten.py @@ -69,6 +69,7 @@ def test_model(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -295,6 +296,7 @@ def test_compressible_model(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -523,6 +525,7 @@ def test_stripped_type_embedding_model(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet( @@ -762,6 +765,7 @@ def test_smoothness_of_stripped_type_embedding_smooth_model(self): descrpt = DescrptSeAtten(**jdata["model"]["descriptor"], uniform_seed=True) jdata["model"]["fitting_net"]["ntypes"] = descrpt.get_ntypes() jdata["model"]["fitting_net"]["dim_descrpt"] = descrpt.get_dim_out() + jdata["model"]["fitting_net"]["dim_rot_mat_1"] = descrpt.get_dim_rot_mat_1() fitting = EnerFitting(**jdata["model"]["fitting_net"], uniform_seed=True) typeebd_param = jdata["model"]["type_embedding"] typeebd = TypeEmbedNet(