diff --git a/components/ibm-components/watson/manage/monitor_fairness/Dockerfile b/components/ibm-components/watson/manage/monitor_fairness/Dockerfile new file mode 100644 index 00000000000..365f8552058 --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_fairness/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.6.8-stretch + +RUN pip install --upgrade pip +RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale --no-cache | tail -n 1 +RUN pip install psycopg2-binary | tail -n 1 + +ENV APP_HOME /app +COPY src $APP_HOME +WORKDIR $APP_HOME + +ENTRYPOINT ["python"] +CMD ["monitor_fairness.py"] diff --git a/components/ibm-components/watson/manage/monitor_fairness/component.yaml b/components/ibm-components/watson/manage/monitor_fairness/component.yaml new file mode 100644 index 00000000000..d6b74a0b3e5 --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_fairness/component.yaml @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Enable fairness monitoring on Watson OpenScale +description: | + Enable model fairness monitoring on Watson OpenScale. +inputs: + - {name: model_name, description: 'Deployed model name on OpenScale.', default: 'AIOS Spark German Risk Model - Final'} + - {name: fairness_threshold, description: 'Amount of threshold for fairness monitoring.', default: '0.95'} + - {name: fairness_min_records, description: 'Minimum amount of records for performing a fairness monitor.', default: '5'} + - {name: aios_manifest_path, description: 'Object storage file path for the aios manifest file.', default: 'aios.json'} + - {name: cos_bucket_name, description: 'Object storage bucket name.', default: 'bucket-name'} +implementation: + container: + image: docker.io/aipipeline/monitor_fairness:latest + args: [ + -u, monitor_fairness.py, + --model_name, {inputValue: model_name}, + --fairness_threshold, {inputValue: fairness_threshold}, + --fairness_min_records, {inputValue: fairness_min_records}, + --aios_manifest_path, {inputValue: aios_manifest_path}, + --cos_bucket_name, {inputValue: cos_bucket_name} + ] diff --git a/components/ibm-components/watson/manage/monitor_fairness/src/monitor_fairness.py b/components/ibm-components/watson/manage/monitor_fairness/src/monitor_fairness.py new file mode 100644 index 00000000000..214b1911621 --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_fairness/src/monitor_fairness.py @@ -0,0 +1,84 @@ +import json +import argparse +import ibm_boto3 +from ibm_botocore.client import Config +from ibm_ai_openscale import APIClient +from ibm_ai_openscale.engines import * +from ibm_ai_openscale.utils import * +from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature +from ibm_ai_openscale.supporting_classes.enums import * + +def get_secret_creds(path): + with open(path, 'r') as f: + cred = f.readline().strip('\'') + f.close() + return cred + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--model_name', type=str, help='Deployed model name', default='AIOS Spark German Risk Model - Final') + parser.add_argument('--fairness_threshold', type=float, help='Amount of threshold for fairness monitoring', default=0.95) + parser.add_argument('--fairness_min_records', type=int, help='Minimum amount of records for performing a fairness monitor', default=5) + parser.add_argument('--aios_manifest_path', type=str, help='Object storage file path for the aios manifest file', default='aios.json') + parser.add_argument('--cos_bucket_name', type=str, help='Object storage bucket name', default='bucket-name') + args = parser.parse_args() + + model_name = args.model_name + fairness_threshold = args.fairness_threshold + fairness_min_records = args.fairness_min_records + cos_bucket_name = args.cos_bucket_name + aios_manifest_path = args.aios_manifest_path + + aios_guid = get_secret_creds("/app/secrets/aios_guid") + cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key") + cos_url = get_secret_creds("/app/secrets/cos_url") + cos_apikey = get_secret_creds("/app/secrets/cos_apikey") + cos_resource_instance_id = get_secret_creds("/app/secrets/cos_resource_id") + + ''' Upload data to IBM Cloud object storage ''' + cos = ibm_boto3.resource('s3', + ibm_api_key_id=cos_apikey, + ibm_service_instance_id=cos_resource_instance_id, + ibm_auth_endpoint='https://iam.bluemix.net/oidc/token', + config=Config(signature_version='oauth'), + endpoint_url=cos_url) + + cos.Bucket(cos_bucket_name).download_file(aios_manifest_path, 'aios.json') + + print('Fairness definition file ' + aios_manifest_path + ' is downloaded') + + """ Load manifest JSON file """ + with open('aios.json') as f: + aios_manifest = json.load(f) + + """ Initiate AIOS client """ + + AIOS_CREDENTIALS = { + "instance_guid": aios_guid, + "apikey": cloud_api_key, + "url": "https://api.aiopenscale.cloud.ibm.com" + } + + ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS) + print('AIOS client version:' + ai_client.version) + + ''' Setup fairness monitoring ''' + subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() + for sub in subscriptions_uids: + if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name: + subscription = ai_client.data_mart.subscriptions.get(sub) + + feature_list = [] + for feature in aios_manifest['fairness_features']: + feature_list.append(Feature(feature['feature_name'], majority=feature['majority'], minority=feature['minority'], threshold=feature['threshold'])) + + subscription.fairness_monitoring.enable( + features=feature_list, + prediction_column='predictedLabel', + favourable_classes=aios_manifest['fairness_favourable_classes'], + unfavourable_classes=aios_manifest['fairness_unfavourable_classes'], + min_records=fairness_min_records + ) + + run_details = subscription.fairness_monitoring.run() + print('Fairness monitoring is enabled.') diff --git a/components/ibm-components/watson/manage/monitor_quality/Dockerfile b/components/ibm-components/watson/manage/monitor_quality/Dockerfile new file mode 100644 index 00000000000..933f4402464 --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_quality/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.6.8-stretch + +RUN pip install --upgrade pip +RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale --no-cache | tail -n 1 +RUN pip install psycopg2-binary | tail -n 1 + +ENV APP_HOME /app +COPY src $APP_HOME +WORKDIR $APP_HOME + +ENTRYPOINT ["python"] +CMD ["monitor_quality.py"] diff --git a/components/ibm-components/watson/manage/monitor_quality/component.yaml b/components/ibm-components/watson/manage/monitor_quality/component.yaml new file mode 100644 index 00000000000..cbfaa8d575f --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_quality/component.yaml @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Enable quality monitoring on Watson OpenScale +description: | + Enable model quality monitoring on Watson OpenScale. +inputs: + - {name: model_name, description: 'Deployed model name on OpenScale.', default: 'AIOS Spark German Risk Model - Final'} + - {name: problem_type, description: 'Model problem type.', default: 'BINARY_CLASSIFICATION'} + - {name: quality_threshold, description: 'Amount of threshold for quality monitoring', default: '0.7'} + - {name: quality_min_records, description: 'Minimum amount of records for performing a quality monitor.', default: '5'} +implementation: + container: + image: docker.io/aipipeline/monitor_quality:latest + args: [ + -u, monitor_quality.py, + --model_name, {inputValue: model_name}, + --problem_type, {inputValue: problem_type}, + --quality_threshold, {inputValue: quality_threshold}, + --quality_min_records, {inputValue: quality_min_records} + ] diff --git a/components/ibm-components/watson/manage/monitor_quality/src/monitor_quality.py b/components/ibm-components/watson/manage/monitor_quality/src/monitor_quality.py new file mode 100644 index 00000000000..52c74ce3672 --- /dev/null +++ b/components/ibm-components/watson/manage/monitor_quality/src/monitor_quality.py @@ -0,0 +1,58 @@ +import json +import argparse +from ibm_ai_openscale import APIClient +from ibm_ai_openscale.engines import * +from ibm_ai_openscale.utils import * +from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature +from ibm_ai_openscale.supporting_classes.enums import * + +def get_secret_creds(path): + with open(path, 'r') as f: + cred = f.readline().strip('\'') + f.close() + return cred + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--model_name', type=str, help='Deployed model name', default="AIOS Spark German Risk Model - Final") + parser.add_argument('--problem_type', type=str, help='Model problem type', default="BINARY_CLASSIFICATION") + parser.add_argument('--quality_threshold', type=float, help='Amount of threshold for quality monitoring', default=0.7) + parser.add_argument('--quality_min_records', type=int, help='Minimum amount of records for performing a quality monitor', default=5) + args = parser.parse_args() + + model_name = args.model_name + problem_type = args.problem_type + quality_threshold = args.quality_threshold + quality_min_records = args.quality_min_records + + aios_guid = get_secret_creds("/app/secrets/aios_guid") + cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key") + + AIOS_CREDENTIALS = { + "instance_guid": aios_guid, + "apikey": cloud_api_key, + "url": "https://api.aiopenscale.cloud.ibm.com" + } + + ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS) + print('AIOS client version:' + ai_client.version) + + ''' Setup quality monitoring ''' + subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() + for sub in subscriptions_uids: + if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name: + subscription = ai_client.data_mart.subscriptions.get(sub) + + PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION + if problem_type == 'BINARY_CLASSIFICATION': + PROBLEMTYPE = ProblemType.BINARY_CLASSIFICATION + elif problem_type == 'MULTICLASS_CLASSIFICATION': + PROBLEMTYPE = ProblemType.MULTICLASS_CLASSIFICATION + elif problem_type == 'REGRESSION': + PROBLEMTYPE = ProblemType.REGRESSION + + subscription.quality_monitoring.enable(problem_type=PROBLEMTYPE, threshold=quality_threshold, min_records=quality_min_records) + # Runs need to post the minial payload records in order to trigger the monitoring run. + # run_details = subscription.quality_monitoring.run() + + print('Quality monitoring is enabled.') diff --git a/components/ibm-components/watson/manage/subscribe/Dockerfile b/components/ibm-components/watson/manage/subscribe/Dockerfile new file mode 100644 index 00000000000..2c9f66930f5 --- /dev/null +++ b/components/ibm-components/watson/manage/subscribe/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.6.8-stretch + +RUN pip install --upgrade pip +RUN pip install --upgrade watson-machine-learning-client ibm-ai-openscale --no-cache | tail -n 1 +RUN pip install psycopg2-binary | tail -n 1 + +ENV APP_HOME /app +COPY src $APP_HOME +WORKDIR $APP_HOME + +ENTRYPOINT ["python"] +CMD ["subscribe.py"] diff --git a/components/ibm-components/watson/manage/subscribe/component.yaml b/components/ibm-components/watson/manage/subscribe/component.yaml new file mode 100644 index 00000000000..c19c86745f8 --- /dev/null +++ b/components/ibm-components/watson/manage/subscribe/component.yaml @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Subscribe Watson OpenScale +description: | + Binding deployed models and subscribe them to Watson OpenScale service. +inputs: + - {name: model_name, description: 'Deployed model name.', default: 'AIOS Spark German Risk Model - Final'} + - {name: model_uid, description: 'Deployed model uid.', default: 'dummy uid'} + - {name: aios_schema, description: 'OpenScale Schema Name', default: 'data_mart_credit_risk'} + - {name: label_column, description: 'Model label column name.', default: 'Risk'} +outputs: + - {name: model_name, description: 'Deployed model name.'} +implementation: + container: + image: docker.io/aipipeline/subscribe:latest + args: [ + -u, subscribe.py, + --model_name, {inputValue: model_name}, + --model_uid, {inputValue: model_uid}, + --aios_schema, {inputValue: aios_schema}, + --label_column, {inputValue: label_column} + ] + fileOutputs: + job_id: /tmp/model_name.txt diff --git a/components/ibm-components/watson/manage/subscribe/src/subscribe.py b/components/ibm-components/watson/manage/subscribe/src/subscribe.py new file mode 100644 index 00000000000..ea59d5dbc29 --- /dev/null +++ b/components/ibm-components/watson/manage/subscribe/src/subscribe.py @@ -0,0 +1,124 @@ +import json +import argparse +from ibm_ai_openscale import APIClient +from ibm_ai_openscale.engines import * +from ibm_ai_openscale.utils import * +from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature +from ibm_ai_openscale.supporting_classes.enums import * +from watson_machine_learning_client import WatsonMachineLearningAPIClient + +def get_secret_creds(path): + with open(path, 'r') as f: + cred = f.readline().strip('\'') + f.close() + return cred + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--aios_schema', type=str, help='AI OpenScale Schema Name', default="data_mart_credit_risk") + parser.add_argument('--model_name', type=str, help='Deployed model name', default="AIOS Spark German Risk Model - Final") + parser.add_argument('--model_uid', type=str, help='Deployed model uid', default="dummy uid") + parser.add_argument('--label_column', type=str, help='Model label column name', default="Risk") + args = parser.parse_args() + + aios_schema = args.aios_schema + model_name = args.model_name + model_uid = args.model_uid + label_column = args.label_column + + wml_creds = get_secret_creds("/app/secrets/wml_credentials") + aios_guid = get_secret_creds("/app/secrets/aios_guid") + cloud_api_key = get_secret_creds("/app/secrets/cloud_api_key") + postgres_uri = get_secret_creds("/app/secrets/postgres_uri") + + WML_CREDENTIALS = json.loads(wml_creds) + + AIOS_CREDENTIALS = { + "instance_guid": aios_guid, + "apikey": cloud_api_key, + "url": "https://api.aiopenscale.cloud.ibm.com" + } + + if postgres_uri == '': + POSTGRES_CREDENTIALS = None + else: + POSTGRES_CREDENTIALS = { + "uri": postgres_uri + } + + wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS) + ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS) + print('AIOS client version:' + ai_client.version) + + ''' Setup Postgres SQL and AIOS binding ''' + SCHEMA_NAME = aios_schema + try: + data_mart_details = ai_client.data_mart.get_details() + if 'internal_database' in data_mart_details['database_configuration'] and data_mart_details['database_configuration']['internal_database']: + if POSTGRES_CREDENTIALS is None: + print('Using existing internal datamart') + else: + print('Switching to external datamart') + ai_client.data_mart.delete(force=True) + create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME) + ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME) + else: + print('Using existing external datamart') + except: + if POSTGRES_CREDENTIALS is None: + print('Setting up internal datamart') + ai_client.data_mart.setup(internal_db=True) + else: + print('Setting up external datamart') + create_postgres_schema(postgres_credentials=POSTGRES_CREDENTIALS, schema_name=SCHEMA_NAME) + ai_client.data_mart.setup(db_credentials=POSTGRES_CREDENTIALS, schema=SCHEMA_NAME) + + data_mart_details = ai_client.data_mart.get_details() + print(data_mart_details) + + binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance(WML_CREDENTIALS)) + if binding_uid is None: + binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid'] + bindings_details = ai_client.data_mart.bindings.get_details() + + print('\nWML binding ID is ' + binding_uid + '\n') + + ''' Create subscriptions ''' + subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() + for subscription in subscriptions_uids: + sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name'] + if sub_name == model_name: + ai_client.data_mart.subscriptions.delete(subscription) + print('Deleted existing subscription for', model_name) + + subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset( + model_uid, + label_column=label_column, + prediction_column='predictedLabel', + probability_column='probability' + )) + if subscription is None: + print('Exists already') + # subscription already exists; get the existing one + subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() + for sub in subscriptions_uids: + if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == model_name: + subscription = ai_client.data_mart.subscriptions.get(sub) + + subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() + print(subscription.get_details()) + + ''' Scoring the model and make sure the subscriptions are setup properly ''' + credit_risk_scoring_endpoint = None + deployment_uid = subscription.get_deployment_uids()[0] + + print('\n' + deployment_uid + '\n') + + for deployment in wml_client.deployments.get_details()['resources']: + if deployment_uid in deployment['metadata']['guid']: + credit_risk_scoring_endpoint = deployment['entity']['scoring_url'] + + print('Scoring endpoint is: ' + credit_risk_scoring_endpoint + '\n') + + with open("/tmp/model_name.txt", "w") as report: + report.write(model_name)