diff --git a/python/fate/arch/_standalone.py b/python/fate/arch/_standalone.py index 4c9fc90168..d03ab1f432 100644 --- a/python/fate/arch/_standalone.py +++ b/python/fate/arch/_standalone.py @@ -632,6 +632,7 @@ def get(self, name: str, tag: str, parties: List[PartyMeta]) -> List: for party in parties: _tagged_key = self._federation_object_key(name, tag, party, self._party) + results.append(self._meta.wait_status_set(_tagged_key)) rtn = [] diff --git a/python/fate/arch/federation/standalone/_federation.py b/python/fate/arch/federation/standalone/_federation.py index 454195f732..b01b764900 100644 --- a/python/fate/arch/federation/standalone/_federation.py +++ b/python/fate/arch/federation/standalone/_federation.py @@ -81,6 +81,7 @@ def pull( if (name, tag, party) in self._get_history: raise ValueError(f"get from {party} with duplicate tag: {name}.{tag}") self._get_history.add((name, tag, party)) + rtn = self._federation.get(name=name, tag=tag, parties=parties) return [Table(r) if isinstance(r, RawTable) else r for r in rtn] diff --git a/python/fate/ml/nn/hetero/agg_layer/plaintext_agg_layer.py b/python/fate/ml/nn/hetero/agg_layer/plaintext_agg_layer.py deleted file mode 100644 index 8db07cd538..0000000000 --- a/python/fate/ml/nn/hetero/agg_layer/plaintext_agg_layer.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -import torch as t -from fate.arch import Context -from typing import Union, List -from torch.autograd import grad -from fate.ml.nn.hetero.agg_layer._base import InteractiveLayer, backward_loss - - -class InteractiveLayerGuest(InteractiveLayer): - - def __init__(self, ctx: Context, - out_features: int, - guest_in_features: int, - host_in_features: Union[int, List[int]], - activation: str = "relu", - lr=0.01 - ): - super(InteractiveLayerGuest, self).__init__(ctx) - self._out_features = out_features - self._guest_in_features = guest_in_features - - assert activation in ["relu", "sigmoid", "tanh"], "activation should be relu, sigmoid or tanh" - assert isinstance(guest_in_features, int), "guest_in_features should be int" - assert isinstance(host_in_features, (int, list)), "host_in_features should be int or list[int]" - - self._guest_model = t.nn.Linear(guest_in_features, out_features) - self._host_model = t.nn.ModuleList() - if isinstance(host_in_features, int): - host_in_features = [host_in_features] - for host_in_feature in host_in_features: - self._host_model.append(t.nn.Linear(host_in_feature, out_features)) - - if activation == "relu": - self._activation_layer = t.nn.ReLU() - elif activation == "sigmoid": - self._activation_layer = t.nn.Sigmoid() - elif activation == "tanh": - self._activation_layer = t.nn.Tanh() - - self._host_num = len(self._host_model) - self._guest_input_cache = None - self._host_input_caches = None - - self._lr = lr - - def _clear_state(self): - self._guest_input_cache = None - self._host_input_caches = None - - def _forward(self, x_g: t.Tensor, x_h: List[t.Tensor]) -> t.Tensor: - guest_out = self._guest_model(x_g) - for h_idx in range(self._host_num): - host_out = self._host_model[h_idx](x_h[h_idx]) - guest_out += host_out - final_out = self._activation_layer(guest_out) - return final_out - - def forward(self, x: t.Tensor) -> t.Tensor: - - # save input for backwards - self._guest_input_cache = t.Tensor(x.detach()).type(t.float64) - self._host_input_caches = [] - host_x = self.ctx.hosts.get(self._fw_suffix.format(self._fw_count)) - self._fw_count += 1 - for h in range(self._host_num): - host_input_cache = t.Tensor(host_x[h].detach()).type(t.float64) - self._host_input_caches.append(host_input_cache) - with torch.no_grad(): - out = self._forward(self._guest_input_cache, self._host_input_caches) - final_out = self._activation_layer(out) - return final_out.detach() - - def backward(self, error) -> t.Tensor: - - # compute backward grads - self._guest_input_cache = self._guest_input_cache.requires_grad_(True) - self._host_input_caches = [h.requires_grad_(True) for h in self._host_input_caches] - out = self._forward(self._guest_input_cache, self._host_input_caches) - loss = backward_loss(out, error) - backward_list = [self._guest_input_cache] - backward_list.extend(self._host_input_caches) - ret_error = grad(loss, backward_list) - - # update model - self._guest_input_cache = self._guest_input_cache.requires_grad_(False) - self._host_input_caches = [h.requires_grad_(False) for h in self._host_input_caches] - out = self._forward(self._guest_input_cache, self._host_input_caches) - loss = backward_loss(out, error) - loss.backward() - - self._clear_state() - - # send error back to hosts - host_errors = ret_error[1: ] - idx = 0 - for host in self.ctx.hosts: - host.put(self._bw_suffix.format(self._bw_count), host_errors[idx]) - idx += 1 - self._bw_count += 1 - return ret_error[0] # guest error - - def predict(self, x): - - # save input for backwards - host_x = self.ctx.hosts.get(self._pred_suffix.format(self._pred_count)) - self._pred_count += 1 - with torch.no_grad(): - out = self._forward(x, host_x) - final_out = self._activation_layer(out) - return final_out.detach() - - -class InteractiveLayerHost(InteractiveLayer): - - def __init__(self, ctx): - super(InteractiveLayerHost, self).__init__(ctx) - - def forward(self, x) -> None: - self.ctx.guest.put(self._fw_suffix.format(self._fw_count), x) - self._fw_count += 1 - def backward(self, error=None) -> t.Tensor: - error = self.ctx.guest.get(self._bw_suffix.format(self._bw_count)) - self._bw_count += 1 - return error - - def predict(self, x): - self.ctx.guest.put(self._pred_suffix.format(self._pred_count), x) - self._pred_count += 1 diff --git a/python/fate/ml/nn/hetero/agg_layer/__init__.py b/python/fate/ml/nn/hetero/hetero_nn.py similarity index 100% rename from python/fate/ml/nn/hetero/agg_layer/__init__.py rename to python/fate/ml/nn/hetero/hetero_nn.py diff --git a/python/fate/ml/nn/hetero/model/guest.py b/python/fate/ml/nn/hetero/model/guest.py deleted file mode 100644 index fc54cd43a9..0000000000 --- a/python/fate/ml/nn/hetero/model/guest.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -import torch as t -from torch.autograd import grad -from fate.arch import Context -from fate.ml.nn.hetero.agg_layer.plaintext_agg_layer import InteractiveLayerGuest - - -def backward_loss(z, backward_error): - return t.sum(z * backward_error) - - -class SplitNNGuest(t.nn.Module): - - def __init__(self, - ctx: Context, - bottom_model: t.nn.Module, - top_model: t.nn.Module, - interactive_layer: InteractiveLayerGuest, - ): - - super(SplitNNGuest, self).__init__() - self._bottom_model = bottom_model - self._top_model = top_model - self._interactive_layer = interactive_layer - - # cached variables - self._bottom_fw_rg = None # for backward usage - self._bottom_fw = None # for forward & error compute - self._interactive_fw_rg = None # for backward usage - self._interactive_fw = None # for fw & error compute - - # ctx - self._ctx = ctx - - def __repr__(self): - return f"HeteroNNGuest(bottom_model={self._bottom_model}\ntop_model={self._top_model})" - - def __call__(self, *args, **kwargs): - return self.forward(*args, **kwargs) - - def _clear_state(self): - self._bottom_fw_rg = None - self._bottom_fw = None - self._interactive_fw_rg = None - self._interactive_fw = None - def forward(self, x): - - b_out = self._bottom_model(x) - # bottom layer - self._bottom_fw_rg = b_out - self._bottom_fw = t.Tensor(b_out.detach()).requires_grad_(False) - # hetero layer - interactive_out = self._interactive_layer.forward(b_out) - self._interactive_fw_rg = interactive_out.requires_grad_(True) - self._interactive_fw = interactive_out - # top layer - top_out = self._top_model(self._interactive_fw_rg) - - return top_out - - def backward(self, loss): - - interactive_error = grad(loss, self._interactive_fw_rg, retain_graph=True)[0] # compute backward error - loss.backward() # update top - bottom_error = self._interactive_layer.backward(interactive_error) # compute bottom error & update hetero - bottom_loss = backward_loss(self._bottom_fw_rg, bottom_error) - bottom_loss.backward() - self._clear_state() - - def predict(self, x): - - with torch.no_grad(): - b_out = self._bottom_model(x) - interactive_out = self._interactive_layer.predict(b_out) - top_out = self._top_model(interactive_out) - - return top_out diff --git a/python/fate/ml/nn/hetero/model/host.py b/python/fate/ml/nn/hetero/model/host.py deleted file mode 100644 index 20e7e1e90d..0000000000 --- a/python/fate/ml/nn/hetero/model/host.py +++ /dev/null @@ -1,56 +0,0 @@ -import torch -import torch as t -from fate.arch import Context -from fate.ml.nn.hetero.agg_layer.plaintext_agg_layer import InteractiveLayerHost - - -def backward_loss(z, backward_error): - return t.sum(z * backward_error) - - -class SplitNNHost(t.nn.Module): - - def __init__(self, bottom_model: t.nn.Module, - interactive_layer: InteractiveLayerHost, - ctx: Context - ): - - super().__init__() - self._bottom_model = bottom_model - self._interactive_layer = interactive_layer - - # cached variables - self._bottom_fw_rg = None # for backward usage - self._bottom_fw = None # for forward & error compute - - # ctx - self._ctx = ctx - - def __call__(self, *args, **kwargs): - return self.forward(*args, **kwargs) - - def _clear_state(self): - self._bottom_fw_rg = None - self._bottom_fw = None - - def forward(self, x): - - b_out = self._bottom_model(x) - # bottom layer - self._bottom_fw_rg = b_out - self._bottom_fw = t.Tensor(b_out.detach()).requires_grad_(False) - # hetero layer - self._interactive_layer.forward(b_out) - - def backward(self): - - error = self._interactive_layer.backward() - loss = backward_loss(self._bottom_fw_rg, error) - loss.backward() - self._clear_state() - - def predict(self, x): - - with torch.no_grad(): - b_out = self._bottom_model(x) - self._interactive_layer.predict(b_out) \ No newline at end of file diff --git a/python/fate/ml/nn/hetero/model/__init__.py b/python/fate/ml/nn/model_zoo/agg_layer/__init__.py similarity index 100% rename from python/fate/ml/nn/hetero/model/__init__.py rename to python/fate/ml/nn/model_zoo/agg_layer/__init__.py diff --git a/python/fate/ml/nn/hetero/agg_layer/_base.py b/python/fate/ml/nn/model_zoo/agg_layer/_base.py similarity index 63% rename from python/fate/ml/nn/hetero/agg_layer/_base.py rename to python/fate/ml/nn/model_zoo/agg_layer/_base.py index bd8b86cb87..92cea7dcd5 100644 --- a/python/fate/ml/nn/hetero/agg_layer/_base.py +++ b/python/fate/ml/nn/model_zoo/agg_layer/_base.py @@ -5,15 +5,16 @@ def backward_loss(z, backward_error): return t.sum(z * backward_error) class InteractiveLayer(t.nn.Module): - def __init__(self, ctx: Context): + def __init__(self): super().__init__() - self.ctx = ctx + self._ctx = None self._fw_suffix = "interactive_fw_{}" self._bw_suffix = "interactive_bw_{}" self._pred_suffix = "interactive_pred_{}" self._fw_count = 0 self._bw_count = 0 self._pred_count = 0 + self._has_ctx = False def forward(self, x): raise NotImplementedError() @@ -24,6 +25,17 @@ def backward(self, error): def predict(self, x): raise NotImplementedError() + def set_context(self, ctx: Context): + self._ctx = ctx + self._has_ctx = True + def has_context(self): + return self._has_ctx + @property + def ctx(self): + if self._ctx is None or self._has_ctx == False: + raise ValueError("Context is not set yet, please call set_context() first") + return self._ctx + def _clear_state(self): pass diff --git a/python/fate/ml/nn/hetero/agg_layer/he_agg_layer.py b/python/fate/ml/nn/model_zoo/agg_layer/he_agg_layer.py similarity index 100% rename from python/fate/ml/nn/hetero/agg_layer/he_agg_layer.py rename to python/fate/ml/nn/model_zoo/agg_layer/he_agg_layer.py diff --git a/python/fate/ml/nn/model_zoo/agg_layer/plaintext_agg_layer.py b/python/fate/ml/nn/model_zoo/agg_layer/plaintext_agg_layer.py new file mode 100644 index 0000000000..a38ac4435d --- /dev/null +++ b/python/fate/ml/nn/model_zoo/agg_layer/plaintext_agg_layer.py @@ -0,0 +1,151 @@ +import torch +import torch as t +from typing import Union, List +from fate.ml.nn.model_zoo.agg_layer._base import InteractiveLayer, backward_loss + + +class InteractiveLayerGuest(InteractiveLayer): + + def __init__(self, + out_features: int, + host_in_features: Union[int, List[int], None], + guest_in_features: Union[int, None], + activation: str = "relu", + lr=0.01 + ): + super(InteractiveLayerGuest, self).__init__() + self._out_features = out_features + self._guest_in_features = guest_in_features + + assert activation in ["relu", "sigmoid", "tanh"], "activation should be relu, sigmoid or tanh" + + if guest_in_features is not None: + assert isinstance(guest_in_features, int), "guest_in_features should be int" + self._guest_model = t.nn.Linear(guest_in_features, out_features) + else: + self._guest_model = None + + assert isinstance(host_in_features, (int, list)), "host_in_features should be int or list[int]" + self._host_model = t.nn.ModuleList() + if isinstance(host_in_features, int): + host_in_features = [host_in_features] + for host_in_feature in host_in_features: + self._host_model.append(t.nn.Linear(host_in_feature, out_features)) + + if activation == "relu": + self._activation_layer = t.nn.ReLU() + elif activation == "sigmoid": + self._activation_layer = t.nn.Sigmoid() + elif activation == "tanh": + self._activation_layer = t.nn.Tanh() + + self._host_num = len(self._host_model) + self._guest_input_cache = None + self._host_input_caches = None + self._out_cache = None + + self._lr = lr + + def _clear_state(self): + self._guest_input_cache = None + self._host_input_caches = None + self._out_cache = None + + def _forward(self, x_g: t.Tensor = None, x_h: List[t.Tensor] = None) -> t.Tensor: + + if x_g is None and x_h is None: + raise ValueError("guest input and host inputs cannot be both None") + + if x_g is not None: + guest_out = self._guest_model(x_g) + else: + guest_out = 0 + + if x_h is not None: + for h_idx in range(self._host_num): + host_out = self._host_model[h_idx](x_h[h_idx]) + guest_out += host_out + + final_out = self._activation_layer(guest_out) + return final_out + + def forward(self, x: t.Tensor = None) -> t.Tensor: + + # save input for backwards + if self._has_ctx: + self._host_input_caches = [] + host_x = self.ctx.hosts.get(self._fw_suffix.format(self._fw_count)) + self._fw_count += 1 + for h in range(self._host_num): + host_input_cache = t.Tensor(host_x[h].detach()).type(t.float64).requires_grad_(True) + self._host_input_caches.append(host_input_cache) + else: + self._host_input_caches = None + + if self._guest_model is not None: + self._guest_input_cache = t.Tensor(x.detach()).type(t.float64).requires_grad_(True) + else: + self._guest_input_cache = None + + out = self._forward(self._guest_input_cache, self._host_input_caches) + final_out = self._activation_layer(out) + self._out_cache = final_out + return final_out.detach() + + def backward(self, error) -> t.Tensor: + + # compute backward grads + has_guest_error = 0 + backward_list = [] + if self._guest_input_cache is not None: + backward_list.append(self._guest_input_cache) + has_guest_error = 1 + if self._host_input_caches is not None: + for h in self._host_input_caches: + backward_list.append(h) + + loss = backward_loss(self._out_cache, error) + loss.backward() + ret_error = [i.grad for i in backward_list] + + # send error back to hosts + if self._has_ctx: + host_errors = ret_error[has_guest_error: ] + idx = 0 + for host in self.ctx.hosts: + host.put(self._bw_suffix.format(self._bw_count), host_errors[idx]) + idx += 1 + self._bw_count += 1 + + self._clear_state() + return ret_error[0] if has_guest_error else None + + def predict(self, x): + + # save input for backwards + host_x = None + if self._has_ctx: + host_x = self.ctx.hosts.get(self._pred_suffix.format(self._pred_count)) + self._pred_count += 1 + with torch.no_grad(): + out = self._forward(x, host_x) + final_out = self._activation_layer(out) + return final_out.detach() + + +class InteractiveLayerHost(InteractiveLayer): + + def __init__(self): + super(InteractiveLayerHost, self).__init__() + + def forward(self, x) -> None: + self.ctx.guest.put(self._fw_suffix.format(self._fw_count), x) + self._fw_count += 1 + def backward(self, error=None) -> t.Tensor: + error = self.ctx.guest.get(self._bw_suffix.format(self._bw_count)) + self._bw_count += 1 + return error + + def predict(self, x): + self.ctx.guest.put(self._pred_suffix.format(self._pred_count), x) + self._pred_count += 1 diff --git a/python/fate/ml/nn/model_zoo/hetero_nn_model.py b/python/fate/ml/nn/model_zoo/hetero_nn_model.py new file mode 100644 index 0000000000..18ed0ec048 --- /dev/null +++ b/python/fate/ml/nn/model_zoo/hetero_nn_model.py @@ -0,0 +1,131 @@ +import torch +import torch as t +from fate.arch import Context +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerGuest +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerHost + + +def backward_loss(z, backward_error): + return t.sum(z * backward_error) + + +class HeteroNNModelGuest(t.nn.Module): + + def __init__(self, + top_model: t.nn.Module, + interactive_layer: InteractiveLayerGuest, + bottom_model: t.nn.Module = None, + ): + + super(HeteroNNModelGuest, self).__init__() + self._bottom_model = bottom_model + self._top_model = top_model + self._interactive_layer = interactive_layer + + # cached variables + self._bottom_fw = None # for backward usage + self._interactive_fw_rg = None # for backward usage + + # ctx + self._ctx = None + + def __repr__(self): + return f"HeteroNNGuest(bottom_model={self._bottom_model}\ntop_model={self._top_model})" + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) + + def _clear_state(self): + self._bottom_fw = None + self._interactive_fw_rg = None + + def set_context(self, ctx: Context): + self._ctx = ctx + self._interactive_layer.set_context(ctx) + + def forward(self, x = None): + + if self._bottom_model is None: + b_out = None + else: + b_out = self._bottom_model(x) + # bottom layer + self._bottom_fw = b_out + + # hetero layer + interactive_out = self._interactive_layer.forward(b_out) + self._interactive_fw_rg = interactive_out.requires_grad_(True) + # top layer + top_out = self._top_model(self._interactive_fw_rg) + + return top_out + + def backward(self, loss): + + loss.backward() # update top + interactive_error = self._interactive_fw_rg.grad + bottom_error = self._interactive_layer.backward(interactive_error) # compute bottom error & update hetero + if bottom_error is not None: + bottom_loss = backward_loss(self._bottom_fw, bottom_error) + bottom_loss.backward() + self._clear_state() + + def predict(self, x = None): + + with torch.no_grad(): + if self._bottom_model is None: + b_out = None + else: + b_out = self._bottom_model(x) + interactive_out = self._interactive_layer.predict(b_out) + top_out = self._top_model(interactive_out) + + return top_out + + +class HeteroNNModelHost(t.nn.Module): + + def __init__(self, bottom_model: t.nn.Module, + interactive_layer: InteractiveLayerHost + ): + + super().__init__() + self._bottom_model = bottom_model + self._interactive_layer = interactive_layer + + # cached variables + self._bottom_fw = None # for backward usage + + # ctx + self._ctx = None + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) + + def _clear_state(self): + self._bottom_fw = None + + def set_context(self, ctx: Context): + self._ctx = ctx + self._interactive_layer.set_context(ctx) + + def forward(self, x): + + b_out = self._bottom_model(x) + # bottom layer + self._bottom_fw = b_out + # hetero layer + self._interactive_layer.forward(b_out) + + def backward(self): + + error = self._interactive_layer.backward() + loss = backward_loss(self._bottom_fw, error) + loss.backward() + self._clear_state() + + def predict(self, x): + + with torch.no_grad(): + b_out = self._bottom_model(x) + self._interactive_layer.predict(b_out) \ No newline at end of file diff --git a/python/fate/ml/nn/test/test_interactive.py b/python/fate/ml/nn/test/test_interactive.py index fba4d85322..2b25a1aa50 100644 --- a/python/fate/ml/nn/test/test_interactive.py +++ b/python/fate/ml/nn/test/test_interactive.py @@ -1,4 +1,4 @@ -from fate.ml.nn.hetero.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost import sys from datetime import datetime diff --git a/python/fate/ml/nn/test/test_plaintext_hetero_nn.py b/python/fate/ml/nn/test/test_plaintext_hetero_nn.py index 84909bfaec..2014e0bf6f 100644 --- a/python/fate/ml/nn/test/test_plaintext_hetero_nn.py +++ b/python/fate/ml/nn/test/test_plaintext_hetero_nn.py @@ -1,6 +1,5 @@ -from fate.ml.nn.hetero.model.guest import SplitNNGuest -from fate.ml.nn.hetero.model.host import SplitNNHost -from fate.ml.nn.hetero.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost +from fate.ml.nn.model_zoo.hetero_nn_model import HeteroNNModelGuest, HeteroNNModelHost +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost import sys from datetime import datetime import pandas as pd @@ -71,8 +70,8 @@ def forward(self, x_g, x_h): set_seed(42) - batch_size = 569 - epoch = 2 + batch_size = 64 + epoch = 10 guest_bottom = t.nn.Linear(10, 4).double() guest_top = t.nn.Sequential( t.nn.Linear(4, 1), @@ -87,18 +86,18 @@ def forward(self, x_g, x_h): ctx = create_ctx(guest, get_current_datetime_str()) - df = pd.read_csv('/examples/data/breast_hetero_guest.csv') + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_guest.csv') X_g = t.Tensor(df.drop(columns=['id', 'y']).values).type(t.float64)[0: sample_num] y = t.Tensor(df['y'].values).type(t.float64)[0: sample_num] - df = pd.read_csv('/examples/data/breast_hetero_host.csv') + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_host.csv') X_h = t.Tensor(df.drop(columns=['id']).values).type(t.float64)[0: sample_num] - interactive_layer = InteractiveLayerGuest(ctx, 4, 4, 4) + interactive_layer = InteractiveLayerGuest(4, 4, 4) local_model = HeteroNNLocalModel(guest_bottom, guest_top, host_bottom, interactive_layer._guest_model.double(), interactive_layer._host_model[0].double()) loss_fn = t.nn.BCELoss() - optimizer = t.optim.SGD(local_model.parameters(), lr=0.01) + optimizer = t.optim.Adam(local_model.parameters(), lr=0.01) dataset = TensorDataset(X_g, X_h, y) for i in range(epoch): @@ -115,29 +114,31 @@ def forward(self, x_g, x_h): print(loss_sum / batch_idx) pred = local_model(X_g, X_h) + from sklearn.metrics import roc_auc_score + print(roc_auc_score(y, pred.detach().numpy())) if party == "guest": ctx = create_ctx(guest, get_current_datetime_str()) - df = pd.read_csv('/examples/data/breast_hetero_guest.csv') + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_guest.csv') X_g = t.Tensor(df.drop(columns=['id', 'y']).values).type(t.float64)[0: sample_num] y = t.Tensor(df['y'].values).type(t.float64)[0: sample_num] dataset = TensorDataset(X_g, y) - interactive_layer = InteractiveLayerGuest(ctx, 4,4,4) + interactive_layer = InteractiveLayerGuest(4,4,4) interactive_layer._guest_model = interactive_layer._guest_model.double() interactive_layer._host_model[0] = interactive_layer._host_model[0].double() loss_fn = t.nn.BCELoss() - model = SplitNNGuest( + model = HeteroNNModelGuest( top_model=guest_top, bottom_model=guest_bottom, - interactive_layer=interactive_layer, - ctx=ctx + interactive_layer=interactive_layer ) + model.set_context(ctx) - optimizer = t.optim.SGD(model.parameters(), lr=0.01) + optimizer = t.optim.Adam(model.parameters(), lr=0.01) for i in range(epoch): loss_sum = 0 @@ -153,20 +154,22 @@ def forward(self, x_g, x_h): print(loss_sum / batch_idx) pred = model(X_g) - + from sklearn.metrics import roc_auc_score + print(roc_auc_score(y, pred.detach())) elif party == "host": ctx = create_ctx(host, get_current_datetime_str()) - df = pd.read_csv('/examples/data/breast_hetero_host.csv') + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_host.csv') X_h = t.Tensor(df.drop(columns=['id']).values).type(t.float64)[0: sample_num] dataset = TensorDataset(X_h) - layer = InteractiveLayerHost(ctx) - model = SplitNNHost(host_bottom, interactive_layer=layer, ctx=ctx) - optimizer = t.optim.SGD(model.parameters(), lr=0.01) + layer = InteractiveLayerHost() + model = HeteroNNModelHost(host_bottom, interactive_layer=layer) + optimizer = t.optim.Adam(model.parameters(), lr=0.01) + model.set_context(ctx) for i in range(epoch): for x_ in DataLoader(dataset, batch_size=batch_size): @@ -175,4 +178,4 @@ def forward(self, x_g, x_h): model.backward() optimizer.step() - pred = model(X_h) \ No newline at end of file + pred = model(X_h) diff --git a/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_guest.py b/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_guest.py new file mode 100644 index 0000000000..3fa9030173 --- /dev/null +++ b/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_guest.py @@ -0,0 +1,145 @@ +from fate.ml.nn.model_zoo.hetero_nn_model import HeteroNNModelGuest, HeteroNNModelHost +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost +import sys +from datetime import datetime +import pandas as pd +from torch.utils.data import TensorDataset, DataLoader +import tqdm + + +def get_current_datetime_str(): + return datetime.now().strftime("%Y-%m-%d-%H-%M") + + +guest = ("guest", "10000") +host = ("host", "9999") +name = get_current_datetime_str() + + +def create_ctx(local, context_name): + from fate.arch import Context + from fate.arch.computing.standalone import CSession + from fate.arch.federation.standalone import StandaloneFederation + import logging + + # prepare log + logger = logging.getLogger() + logger.setLevel(logging.INFO) + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + # init fate context + computing = CSession() + return Context( + computing=computing, federation=StandaloneFederation(computing, context_name, local, [guest, host]) + ) + + +if __name__ == "__main__": + + party = sys.argv[1] + import torch as t + + + def set_seed(seed): + t.manual_seed(seed) + if t.cuda.is_available(): + t.cuda.manual_seed_all(seed) + t.backends.cudnn.deterministic = True + t.backends.cudnn.benchmark = False + + + class HeteroNNLocalModel(t.nn.Module): + + def __init__(self, guest_b, guest_t, host_b, guest_i, host_i): + super(HeteroNNLocalModel, self).__init__() + self._guest_b = guest_b + self._guest_t = guest_t + self._host_b = host_b + self._guest_i = guest_i + self._host_i = host_i + def forward(self, x_g, x_h): + fw_g = self._guest_i(self._guest_b(x_g)) + fw_h = self._host_i(self._host_b(x_h)) + fw_ = fw_g + fw_h + fw_ = t.nn.ReLU()(fw_) + fw_ = self._guest_t(fw_) + return fw_ + + set_seed(42) + + batch_size = 64 + epoch = 10 + guest_bottom = t.nn.Linear(10, 4).double() + guest_top = t.nn.Sequential( + t.nn.Linear(4, 1), + t.nn.Sigmoid() + ).double() + host_bottom = t.nn.Linear(20, 4).double() + + # # make random fake data + sample_num = 569 + + if party == "guest": + + ctx = create_ctx(guest, get_current_datetime_str()) + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_guest.csv') + y = t.Tensor(df['y'].values).type(t.float64)[0: sample_num] + + dataset = TensorDataset(y) + interactive_layer = InteractiveLayerGuest(4,4,guest_in_features=None) + interactive_layer._host_model[0] = interactive_layer._host_model[0].double() + loss_fn = t.nn.BCELoss() + + model = HeteroNNModelGuest( + top_model=guest_top, + interactive_layer=interactive_layer, + bottom_model=None + ) + model.set_context(ctx) + + optimizer = t.optim.Adam(model.parameters(), lr=0.01) + + for i in range(epoch): + loss_sum = 0 + batch_idx = 0 + for y_ in tqdm.tqdm(DataLoader(dataset, batch_size=batch_size)): + y_ = y_[0] + optimizer.zero_grad() + fw = model(None) # no guest X + loss_ = loss_fn(fw.flatten(), y_) + model.backward(loss_) + optimizer.step() + loss_sum += loss_.item() + batch_idx += 1 + print(loss_sum / batch_idx) + + pred = model.predict() + # compute auc + from sklearn.metrics import roc_auc_score + print(roc_auc_score(y, pred)) + + elif party == "host": + + ctx = create_ctx(host, get_current_datetime_str()) + + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_host.csv') + X_h = t.Tensor(df.drop(columns=['id']).values).type(t.float64)[0: sample_num] + + dataset = TensorDataset(X_h) + + layer = InteractiveLayerHost() + model = HeteroNNModelHost(host_bottom, interactive_layer=layer) + optimizer = t.optim.Adam(model.parameters(), lr=0.01) + model.set_context(ctx) + + for i in range(epoch): + for x_ in DataLoader(dataset, batch_size=batch_size): + optimizer.zero_grad() + model.forward(x_[0]) + model.backward() + optimizer.step() + + pred = model.predict(X_h) \ No newline at end of file diff --git a/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_host.py b/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_host.py new file mode 100644 index 0000000000..927a31ece7 --- /dev/null +++ b/python/fate/ml/nn/test/test_plaintext_hetero_nn_no_host.py @@ -0,0 +1,119 @@ +from fate.ml.nn.model_zoo.hetero_nn_model import HeteroNNModelGuest, HeteroNNModelHost +from fate.ml.nn.model_zoo.agg_layer.plaintext_agg_layer import InteractiveLayerGuest, InteractiveLayerHost +import sys +from datetime import datetime +import pandas as pd +from torch.utils.data import TensorDataset, DataLoader +import tqdm + + +def get_current_datetime_str(): + return datetime.now().strftime("%Y-%m-%d-%H-%M") + + +guest = ("guest", "10000") +host = ("host", "9999") +name = get_current_datetime_str() + + +def create_ctx(local, context_name): + from fate.arch import Context + from fate.arch.computing.standalone import CSession + from fate.arch.federation.standalone import StandaloneFederation + import logging + + # prepare log + logger = logging.getLogger() + logger.setLevel(logging.INFO) + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + # init fate context + computing = CSession() + return Context( + computing=computing, federation=StandaloneFederation(computing, context_name, local, [guest, host]) + ) + + +if __name__ == "__main__": + + party = sys.argv[1] + import torch as t + + + def set_seed(seed): + t.manual_seed(seed) + if t.cuda.is_available(): + t.cuda.manual_seed_all(seed) + t.backends.cudnn.deterministic = True + t.backends.cudnn.benchmark = False + + + class HeteroNNLocalModel(t.nn.Module): + + def __init__(self, guest_b, guest_t, host_b, guest_i, host_i): + super(HeteroNNLocalModel, self).__init__() + self._guest_b = guest_b + self._guest_t = guest_t + self._host_b = host_b + self._guest_i = guest_i + self._host_i = host_i + def forward(self, x_g, x_h): + fw_g = self._guest_i(self._guest_b(x_g)) + fw_h = self._host_i(self._host_b(x_h)) + fw_ = fw_g + fw_h + fw_ = t.nn.ReLU()(fw_) + fw_ = self._guest_t(fw_) + return fw_ + + set_seed(42) + + batch_size = 64 + epoch = 10 + guest_bottom = t.nn.Linear(10, 4).double() + guest_top = t.nn.Sequential( + t.nn.Linear(4, 1), + t.nn.Sigmoid() + ).double() + host_bottom = t.nn.Linear(20, 4).double() + + # # make random fake data + sample_num = 569 + + if party == "guest": + + ctx = create_ctx(guest, get_current_datetime_str()) + df = pd.read_csv('/home/cwj/FATE/FATE-2.0/FATE/examples/data/breast_hetero_guest.csv') + X_g = t.Tensor(df.drop(columns=['id', 'y']).values).type(t.float64)[0: sample_num] + y = t.Tensor(df['y'].values).type(t.float64)[0: sample_num] + + dataset = TensorDataset(X_g, y) + interactive_layer = InteractiveLayerGuest(4,4,guest_in_features=4) + interactive_layer._guest_model = interactive_layer._guest_model.double() + loss_fn = t.nn.BCELoss() + model = HeteroNNModelGuest( + top_model=guest_top, + interactive_layer=interactive_layer, + bottom_model=guest_bottom + ) + optimizer = t.optim.Adam(model.parameters(), lr=0.01) + + for i in range(epoch): + loss_sum = 0 + batch_idx = 0 + for x_, y_ in tqdm.tqdm(DataLoader(dataset, batch_size=batch_size)): + optimizer.zero_grad() + fw = model(x_) + loss_ = loss_fn(fw.flatten(), y_) + model.backward(loss_) + optimizer.step() + loss_sum += loss_.item() + batch_idx += 1 + print(loss_sum / batch_idx) + + pred = model.predict(X_g) + # compute auc + from sklearn.metrics import roc_auc_score + print(roc_auc_score(y, pred)) \ No newline at end of file