Skip to content

Commit

Permalink
Feat: add dipole consistency test (#3321)
Browse files Browse the repository at this point in the history
This PR is to add cross framework consistency test on DipoleFittingNet.

Known Limitations:

1. There are some mismatched keys in the serialized model, only common
keys are tested.

---------

Signed-off-by: Anyang Peng <137014849+anyangml@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
anyangml and pre-commit-ci[bot] authored Feb 23, 2024
1 parent d949bc8 commit 260ef21
Show file tree
Hide file tree
Showing 28 changed files with 359 additions and 33 deletions.
8 changes: 2 additions & 6 deletions deepmd/dpmodel/fitting/dipole_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ class DipoleFitting(GeneralFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -91,7 +89,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[Optional[float]]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -102,6 +99,8 @@ def __init__(
r_differentiable: bool = True,
c_differentiable: bool = True,
old_impl=False,
# not used
seed: Optional[int] = None,
):
# seed, uniform_seed are not included
if tot_ener_zero:
Expand All @@ -112,8 +111,6 @@ def __init__(
raise NotImplementedError("use_aparam_as_mask is not implemented")
if layer_name is not None:
raise NotImplementedError("layer_name is not implemented")
if atom_ener is not None and atom_ener != []:
raise NotImplementedError("atom_ener is not implemented")

self.embedding_width = embedding_width
self.r_differentiable = r_differentiable
Expand All @@ -129,7 +126,6 @@ def __init__(
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand Down
5 changes: 0 additions & 5 deletions deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ class GeneralFitting(NativeOP, BaseFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -87,7 +85,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[float]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -110,7 +107,6 @@ def __init__(
self.trainable = [True for ii in range(len(self.neuron) + 1)]
if isinstance(self.trainable, bool):
self.trainable = [self.trainable] * (len(self.neuron) + 1)
self.atom_ener = atom_ener
self.activation_function = activation_function
self.precision = precision
self.layer_name = layer_name
Expand Down Expand Up @@ -236,7 +232,6 @@ def serialize(self) -> dict:
# not supported
"tot_ener_zero": self.tot_ener_zero,
"trainable": self.trainable,
"atom_ener": self.atom_ener,
"layer_name": self.layer_name,
"use_aparam_as_mask": self.use_aparam_as_mask,
"spin": self.spin,
Expand Down
5 changes: 3 additions & 2 deletions deepmd/dpmodel/fitting/invar_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[float]] = None,
atom_ener: Optional[List[float]] = [],
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -139,6 +139,7 @@ def __init__(
raise NotImplementedError("atom_ener is not implemented")

self.dim_out = dim_out
self.atom_ener = atom_ener
super().__init__(
var_name=var_name,
ntypes=ntypes,
Expand All @@ -150,7 +151,6 @@ def __init__(
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand All @@ -163,6 +163,7 @@ def __init__(
def serialize(self) -> dict:
data = super().serialize()
data["dim_out"] = self.dim_out
data["atom_ener"] = self.atom_ener
return data

def _net_out_dim(self):
Expand Down
6 changes: 0 additions & 6 deletions deepmd/dpmodel/fitting/polarizability_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ class PolarFitting(GeneralFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -93,7 +91,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[Optional[float]]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -115,8 +112,6 @@ def __init__(
raise NotImplementedError("use_aparam_as_mask is not implemented")
if layer_name is not None:
raise NotImplementedError("layer_name is not implemented")
if atom_ener is not None and atom_ener != []:
raise NotImplementedError("atom_ener is not implemented")

self.embedding_width = embedding_width
self.fit_diag = fit_diag
Expand All @@ -142,7 +137,6 @@ def __init__(
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand Down
5 changes: 5 additions & 0 deletions deepmd/pt/model/task/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ class InvarFitting(GeneralFitting):
Random seed.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
"""

Expand All @@ -98,9 +100,11 @@ def __init__(
rcond: Optional[float] = None,
seed: Optional[int] = None,
exclude_types: List[int] = [],
atom_ener: Optional[List[float]] = None,
**kwargs,
):
self.dim_out = dim_out
self.atom_ener = atom_ener
super().__init__(
var_name=var_name,
ntypes=ntypes,
Expand All @@ -126,6 +130,7 @@ def _net_out_dim(self):
def serialize(self) -> dict:
data = super().serialize()
data["dim_out"] = self.dim_out
data["atom_ener"] = self.atom_ener
return data

@property
Expand Down
1 change: 0 additions & 1 deletion deepmd/pt/model/task/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,6 @@ def serialize(self) -> dict:
## NOTICE: not supported by far
"tot_ener_zero": False,
"trainable": [True] * (len(self.neuron) + 1),
"atom_ener": [],
"layer_name": None,
"use_aparam_as_mask": False,
"spin": None,
Expand Down
77 changes: 71 additions & 6 deletions deepmd/tf/fit/dipole.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,12 @@ class DipoleFittingSeA(Fitting):
Parameters
----------
descrpt : tf.Tensor
The descrptor
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
embedding_width
The rotation matrix dimension of the descrptor :math:`\mathcal{D}`
neuron : List[int]
Number of neurons in each hidden layer of the fitting net
resnet_dt : bool
Expand All @@ -59,7 +63,9 @@ class DipoleFittingSeA(Fitting):

def __init__(
self,
descrpt: tf.Tensor,
ntypes: int,
dim_descrpt: int,
embedding_width: int,
neuron: List[int] = [120, 120, 120],
resnet_dt: bool = True,
sel_type: Optional[List[int]] = None,
Expand All @@ -70,8 +76,8 @@ def __init__(
**kwargs,
) -> None:
"""Constructor."""
self.ntypes = descrpt.get_ntypes()
self.dim_descrpt = descrpt.get_dim_out()
self.ntypes = ntypes
self.dim_descrpt = dim_descrpt
self.n_neuron = neuron
self.resnet_dt = resnet_dt
self.sel_type = sel_type
Expand All @@ -83,9 +89,10 @@ def __init__(
self.seed = seed
self.uniform_seed = uniform_seed
self.seed_shift = one_layer_rand_seed_shift()
self.activation_function_name = activation_function
self.fitting_activation_fn = get_activation_func(activation_function)
self.fitting_precision = get_precision(precision)
self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
self.dim_rot_mat_1 = embedding_width
self.dim_rot_mat = self.dim_rot_mat_1 * 3
self.useBN = False
self.fitting_net_variables = None
Expand Down Expand Up @@ -327,3 +334,61 @@ def get_loss(self, loss: dict, lr) -> Loss:
tensor_size=3,
label_name="dipole",
)

def serialize(self, suffix: str) -> dict:
"""Serialize the model.
Returns
-------
dict
The serialized data
"""
data = {
"var_name": "dipole",
"ntypes": self.ntypes,
"dim_descrpt": self.dim_descrpt,
"embedding_width": self.dim_rot_mat_1,
# very bad design: type embedding is not passed to the class
# TODO: refactor the class
"mixed_types": False,
"dim_out": 3,
"neuron": self.n_neuron,
"resnet_dt": self.resnet_dt,
"activation_function": self.activation_function_name,
"precision": self.fitting_precision.name,
"exclude_types": [],
"nets": self.serialize_network(
ntypes=self.ntypes,
# TODO: consider type embeddings
ndim=1,
in_dim=self.dim_descrpt,
out_dim=self.dim_rot_mat_1,
neuron=self.n_neuron,
activation_function=self.activation_function_name,
resnet_dt=self.resnet_dt,
variables=self.fitting_net_variables,
suffix=suffix,
),
}
return data

@classmethod
def deserialize(cls, data: dict, suffix: str):
"""Deserialize the model.
Parameters
----------
data : dict
The serialized data
Returns
-------
Model
The deserialized model
"""
fitting = cls(**data)
fitting.fitting_net_variables = cls.deserialize_network(
data["nets"],
suffix=suffix,
)
return fitting
6 changes: 4 additions & 2 deletions deepmd/tf/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,10 @@ class EnerFitting(Fitting):
Parameters
----------
descrpt
The descrptor :math:`\mathcal{D}`
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
neuron
Number of neurons :math:`N` in each hidden layer of the fitting net
resnet_dt
Expand Down
6 changes: 5 additions & 1 deletion deepmd/tf/fit/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from typing import (
Callable,
List,
Optional,
Type,
)

Expand Down Expand Up @@ -175,6 +176,7 @@ def serialize_network(
activation_function: str,
resnet_dt: bool,
variables: dict,
out_dim: Optional[int] = 1,
suffix: str = "",
) -> dict:
"""Serialize network.
Expand All @@ -197,6 +199,8 @@ def serialize_network(
The input variables
suffix : str, optional
The suffix of the scope
out_dim : int, optional
The output dimension
Returns
-------
Expand Down Expand Up @@ -231,7 +235,7 @@ def serialize_network(
# initialize the network if it is not initialized
fittings[network_idx] = FittingNet(
in_dim=in_dim,
out_dim=1,
out_dim=out_dim,
neuron=neuron,
activation_function=activation_function,
resnet_dt=resnet_dt,
Expand Down
2 changes: 2 additions & 0 deletions deepmd/tf/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,6 +687,8 @@ def __init__(
if isinstance(fitting_net, Fitting):
self.fitting = fitting_net
else:
if fitting_net["type"] in ["dipole", "polar"]:
fitting_net["embedding_width"] = self.descrpt.get_dim_rot_mat_1()
self.fitting = Fitting(
**fitting_net,
descrpt=self.descrpt,
Expand Down
4 changes: 4 additions & 0 deletions deepmd/tf/model/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,10 @@ def __init__(
if isinstance(item_fitting_param, Fitting):
fitting_dict[item] = item_fitting_param
else:
if item_fitting_param["type"] in ["dipole", "polar"]:
item_fitting_param[
"embedding_width"
] = self.descrpt.get_dim_rot_mat_1()
fitting_dict[item] = Fitting(
**item_fitting_param,
descrpt=self.descrpt,
Expand Down
14 changes: 13 additions & 1 deletion source/tests/consistent/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,9 +252,16 @@ def test_tf_consistent_with_ref(self):
tf_obj = self.tf_class.deserialize(data1, suffix=self.unique_id)
ret2, data2 = self.get_tf_ret_serialization_from_cls(tf_obj)
ret2 = self.extract_ret(ret2, self.RefBackend.TF)
if tf_obj.__class__.__name__.startswith(("Polar", "Dipole")):
# tf, pt serialization mismatch
common_keys = set(data1.keys()) & set(data2.keys())
data1 = {k: data1[k] for k in common_keys}
data2 = {k: data2[k] for k in common_keys}
np.testing.assert_equal(data1, data2)
for rr1, rr2 in zip(ret1, ret2):
np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol)
np.testing.assert_allclose(
rr1.ravel(), rr2.ravel(), rtol=self.rtol, atol=self.atol
)
assert rr1.dtype == rr2.dtype, f"{rr1.dtype} != {rr2.dtype}"

def test_tf_self_consistent(self):
Expand Down Expand Up @@ -319,6 +326,11 @@ def test_pt_consistent_with_ref(self):
ret2 = self.eval_pt(obj)
ret2 = self.extract_ret(ret2, self.RefBackend.PT)
data2 = obj.serialize()
if obj.__class__.__name__.startswith(("Polar", "Dipole")):
# tf, pt serialization mismatch
common_keys = set(data1.keys()) & set(data2.keys())
data1 = {k: data1[k] for k in common_keys}
data2 = {k: data2[k] for k in common_keys}
np.testing.assert_equal(data1, data2)
for rr1, rr2 in zip(ret1, ret2):
np.testing.assert_allclose(rr1, rr2, rtol=self.rtol, atol=self.atol)
Expand Down
Loading

0 comments on commit 260ef21

Please sign in to comment.