diff --git a/examples/benchmark_performance/coordinated_lr/test_lr.py b/examples/benchmark_performance/coordinated_lr/test_lr.py index a5652d0fbc..d92d2787ae 100644 --- a/examples/benchmark_performance/coordinated_lr/test_lr.py +++ b/examples/benchmark_performance/coordinated_lr/test_lr.py @@ -74,7 +74,7 @@ def main(config="../../config.yaml", param="./lr_config.yaml", namespace=""): input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["auc", "binary_precision", "binary_accuracy", "binary_recall"], input_data=lr_0.outputs["train_output_data"]) pipeline.add_task(reader_0) diff --git a/examples/benchmark_performance/sshe_lr/test_lr.py b/examples/benchmark_performance/sshe_lr/test_lr.py index 22bdc8c20f..5634db65f4 100644 --- a/examples/benchmark_performance/sshe_lr/test_lr.py +++ b/examples/benchmark_performance/sshe_lr/test_lr.py @@ -73,7 +73,7 @@ def main(config="../../config.yaml", param="./lr_config.yaml", namespace=""): input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["auc", "binary_precision", "binary_accuracy", "binary_recall"], input_data=lr_0.outputs["train_output_data"]) pipeline.add_task(reader_0) diff --git a/examples/benchmark_quality/linr/fate-linr.py b/examples/benchmark_quality/linr/fate-linr.py index 397a2f6b33..b314f1a4ba 100644 --- a/examples/benchmark_quality/linr/fate-linr.py +++ b/examples/benchmark_quality/linr/fate-linr.py @@ -69,7 +69,7 @@ def main(config="../../config.yaml", param="./linr_config.yaml", namespace=""): input_model=linr_0.outputs["output_model"])""" evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["r2_score", "mse", "rmse"], diff --git a/examples/benchmark_quality/linr/fate-sshe-linr.py b/examples/benchmark_quality/linr/fate-sshe-linr.py index fab0c78e15..f77bd29872 100644 --- a/examples/benchmark_quality/linr/fate-sshe-linr.py +++ b/examples/benchmark_quality/linr/fate-sshe-linr.py @@ -69,7 +69,7 @@ def main(config="../../config.yaml", param="./linr_config.yaml", namespace=""): input_model=linr_0.outputs["output_model"])""" evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["r2_score", "mse", "rmse"], diff --git a/examples/benchmark_quality/lr/pipeline-lr-binary.py b/examples/benchmark_quality/lr/pipeline-lr-binary.py index 2d8e74f870..e78ddea7fe 100644 --- a/examples/benchmark_quality/lr/pipeline-lr-binary.py +++ b/examples/benchmark_quality/lr/pipeline-lr-binary.py @@ -71,7 +71,7 @@ def main(config="../../config.yaml", param="./breast_config.yaml", namespace="") input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["auc", "binary_precision", "binary_accuracy", "binary_recall"], input_data=lr_0.outputs["train_output_data"]) pipeline.add_task(reader_0) diff --git a/examples/benchmark_quality/lr/pipeline-lr-multi.py b/examples/benchmark_quality/lr/pipeline-lr-multi.py index 8e26174e5e..c0914f1a69 100644 --- a/examples/benchmark_quality/lr/pipeline-lr-multi.py +++ b/examples/benchmark_quality/lr/pipeline-lr-multi.py @@ -70,7 +70,7 @@ def main(config="../../config.yaml", param="./vehicle_config.yaml", namespace="" input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation('evaluation_0', - runtime_roles=['guest'], + runtime_parties=['guest'], input_data=lr_0.outputs["train_output_data"], predict_column_name='predict_result', metrics=['multi_recall', 'multi_accuracy', 'multi_precision']) diff --git a/examples/benchmark_quality/lr/pipeline-sshe-lr-binary.py b/examples/benchmark_quality/lr/pipeline-sshe-lr-binary.py index ae73096e10..a36e012c47 100644 --- a/examples/benchmark_quality/lr/pipeline-sshe-lr-binary.py +++ b/examples/benchmark_quality/lr/pipeline-sshe-lr-binary.py @@ -73,7 +73,7 @@ def main(config="../../config.yaml", param="./breast_config.yaml", namespace="") input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation("evaluation_0", - runtime_roles=["guest"], + runtime_parties=["guest"], metrics=["auc", "binary_precision", "binary_accuracy", "binary_recall"], input_data=lr_0.outputs["train_output_data"]) pipeline.add_task(reader_0) diff --git a/examples/benchmark_quality/lr/pipeline-sshe-lr-multi.py b/examples/benchmark_quality/lr/pipeline-sshe-lr-multi.py index 96b79b6253..64bc413fcb 100644 --- a/examples/benchmark_quality/lr/pipeline-sshe-lr-multi.py +++ b/examples/benchmark_quality/lr/pipeline-sshe-lr-multi.py @@ -71,7 +71,7 @@ def main(config="../../config.yaml", param="./vehicle_config.yaml", namespace="" input_model=lr_0.outputs["output_model"]) evaluation_0 = Evaluation('evaluation_0', - runtime_roles=['guest'], + runtime_parties=['guest'], input_data=lr_0.outputs["train_output_data"], predict_column_name='predict_result', metrics=['multi_recall', 'multi_accuracy', 'multi_precision'])