From d9b2fcb83a750e0bed47cf0fa065215932bb7608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 30 Oct 2024 15:55:11 +0100 Subject: [PATCH 01/21] Merged with develop --- src/connectors/aibuilder/__init__.py | 0 .../aibuilder/aibuilder_mlmodel_connector.py | 190 ++++++++++++++++++ src/database/model/platform/platform_names.py | 1 + 3 files changed, 191 insertions(+) create mode 100644 src/connectors/aibuilder/__init__.py create mode 100644 src/connectors/aibuilder/aibuilder_mlmodel_connector.py diff --git a/src/connectors/aibuilder/__init__.py b/src/connectors/aibuilder/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py new file mode 100644 index 00000000..01e4af48 --- /dev/null +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -0,0 +1,190 @@ +"""AIBuilder MLModel Connector +This module knows how to load an AIBuilder object from their API and to +convert the AIBuilder response to the AIoD MLModel format. +""" + +#import dateutil.parser +#import requests +#import logging + +#from requests.exceptions import HTTPError +# +#from typing import Iterator, Any + +# +# +#from database.model import field_length +#from database.model.ai_resource.text import Text +#from database.model.concept.aiod_entry import AIoDEntryCreate + + +#from database.model.agent.contact import Contact +#from database.model.models_and_experiments.runnable_distribution import RunnableDistribution +# +#from database.model.resource_read_and_create import resource_create +# + +from sqlmodel import SQLModel + +from database.model.models_and_experiments.ml_model import MLModel +from database.model.platform.platform_names import PlatformName + +from connectors.abstract.resource_connector_by_id import ResourceConnectorByDate +from connectors.resource_with_relations import ResourceWithRelations +from connectors.record_error import RecordError + +class AIBuilderMLModelConnector(ResourceConnectorByDate[MLModel]): + @property + def resource_class(self) -> type[MLModel]: + return MLModel + + @property + def platform_name(self) -> PlatformName: + return PlatformName.aibuilder + + def retry(self, identifier: int) -> ResourceWithRelations[SQLModel] | RecordError: + return self.fetch_record(identifier) + + + + + + + + + def fetch_record(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError: + url_mlmodel = f"https://www.openml.org/api/v1/json/flow/{identifier}" + response = requests.get(url_mlmodel) + if not response.ok: + msg = response.json()["error"]["message"] + return RecordError( + identifier=str(identifier), + error=f"Error while fetching flow from OpenML: '{msg}'.", + ) + mlmodel_json = response.json()["flow"] + + description_or_error = _description(mlmodel_json, identifier) + if isinstance(description_or_error, RecordError): + return description_or_error + description = description_or_error + + distribution = _distributions(mlmodel_json) + + openml_creator = _as_list(mlmodel_json.get("creator", None)) + openml_contributor = _as_list(mlmodel_json.get("contributor", None)) + pydantic_class_contact = resource_create(Contact) + creator_names = [ + pydantic_class_contact(name=name) for name in openml_creator + openml_contributor + ] + + tags = _as_list(mlmodel_json.get("tag", None)) + + pydantic_class = resource_create(MLModel) + mlmodel = pydantic_class( + aiod_entry=AIoDEntryCreate( + status="published", + ), + platform_resource_identifier=identifier, + platform=self.platform_name, + name=mlmodel_json["name"], + same_as=url_mlmodel, + description=description, + date_published=dateutil.parser.parse(mlmodel_json["upload_date"]), + license=mlmodel_json.get("licence", None), + distribution=distribution, + is_accessible_for_free=True, + keyword=[tag for tag in tags] if tags else [], + version=mlmodel_json["version"], + ) + + return ResourceWithRelations[pydantic_class]( # type:ignore + resource=mlmodel, + resource_ORM_class=MLModel, + related_resources={"creator": creator_names}, + ) + + def fetch( + self, offset: int, from_identifier: int + ) -> Iterator[ResourceWithRelations[SQLModel] | RecordError]: + url_mlmodel = ( + "https://www.openml.org/api/v1/json/flow/list/" + f"limit/{self.limit_per_iteration}/offset/{offset}" + ) + response = requests.get(url_mlmodel) + + if not response.ok: + status_code = response.status_code + msg = response.json()["error"]["message"] + err_msg = f"Error while fetching {url_mlmodel} from OpenML: ({status_code}) {msg}" + logging.error(err_msg) + err = HTTPError(err_msg) + yield RecordError(identifier=None, error=err) + return + + try: + mlmodel_summaries = response.json()["flows"]["flow"] + except Exception as e: + yield RecordError(identifier=None, error=e) + return + + for summary in mlmodel_summaries: + identifier = None + # ToDo: discuss how to accommodate pipelines. Excluding sklearn pipelines for now. + # Note: weka doesn't have a standard method to define pipeline. + # There are no mlr pipelines in OpenML. + identifier = summary["id"] + if "sklearn.pipeline" not in summary["name"]: + try: + if identifier < from_identifier: + yield RecordError(identifier=identifier, error="Id too low", ignore=True) + if from_identifier is None or identifier >= from_identifier: + yield self.fetch_record(identifier) + except Exception as e: + yield RecordError(identifier=identifier, error=e) + else: + yield RecordError(identifier=identifier, error="Sklearn pipeline not processed!") + + +def _description(mlmodel_json: dict[str, Any], identifier: int) -> Text | None | RecordError: + description = ( + mlmodel_json["full_description"] + if mlmodel_json.get("full_description", None) + else mlmodel_json.get("description", None) + ) + if isinstance(description, type(None)): + return None + if isinstance(description, list) and len(description) == 0: + return None + elif not isinstance(description, str): + return RecordError(identifier=str(identifier), error="Description of unknown format.") + if len(description) > field_length.LONG: + text_break = " [...]" + description = description[: field_length.LONG - len(text_break)] + text_break + if description: + return Text(plain=description) + return None + + +def _distributions(mlmodel_json) -> list[RunnableDistribution]: + if ( + (mlmodel_json.get("installation_notes") is None) + and (mlmodel_json.get("dependencies") is None) + and (mlmodel_json.get("binary_url") is None) + ): + return [] + return [ + RunnableDistribution( + dependency=mlmodel_json.get("dependencies", None), + installation=mlmodel_json.get("installation_notes", None), + content_url=mlmodel_json.get("binary_url", None), + ) + ] + + +def _as_list(value: Any | list[Any]) -> list[Any]: + """Wrap it with a list, if it is not a list""" + if not value: + return [] + if not isinstance(value, list): + return [value] + return value diff --git a/src/database/model/platform/platform_names.py b/src/database/model/platform/platform_names.py index c43fbc6e..98d65bc1 100644 --- a/src/database/model/platform/platform_names.py +++ b/src/database/model/platform/platform_names.py @@ -15,3 +15,4 @@ class PlatformName(str, enum.Enum): huggingface = "huggingface" zenodo = "zenodo" ai4experiments = "ai4experiments" + aibuilder = "aibuilder" From 23bff69458294d59a0560484baf3539f79c0497d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 12 Nov 2024 13:56:19 +0100 Subject: [PATCH 02/21] Structure with empty token --- .env | 3 + connectors/aibuilder/Dockerfile | 12 + connectors/aibuilder/cron | 1 + connectors/aibuilder/entry.sh | 7 + connectors/aibuilder/mlmodels.sh | 20 + docker-compose.yaml | 20 + .../aibuilder/aibuilder_mappings.py | 9 + .../aibuilder/aibuilder_mlmodel_connector.py | 343 +++++++++++------- 8 files changed, 282 insertions(+), 133 deletions(-) create mode 100644 connectors/aibuilder/Dockerfile create mode 100644 connectors/aibuilder/cron create mode 100755 connectors/aibuilder/entry.sh create mode 100755 connectors/aibuilder/mlmodels.sh create mode 100644 src/connectors/aibuilder/aibuilder_mappings.py diff --git a/.env b/.env index d3f0a6d5..529cbb41 100644 --- a/.env +++ b/.env @@ -17,6 +17,9 @@ AIOD_KEYCLOAK_PORT=8080 EGICHECKINALIAS= +#AIBUILDER +API_TOKEN="" + #ELASTICSEARCH ES_USER=elastic ES_PASSWORD=changeme diff --git a/connectors/aibuilder/Dockerfile b/connectors/aibuilder/Dockerfile new file mode 100644 index 00000000..18ba0ed4 --- /dev/null +++ b/connectors/aibuilder/Dockerfile @@ -0,0 +1,12 @@ +FROM aiod_metadata_catalogue + +COPY cron /etc/cron.d/aiod +COPY mlmodels.sh /opt/connectors/script/mlmodels.sh +COPY entry.sh /opt/connectors/script/entry.sh + +USER root +RUN apt -y install cron +RUN chmod +x /etc/cron.d/aiod /opt/connectors/script/mlmodels.sh +RUN crontab /etc/cron.d/aiod + +WORKDIR /app \ No newline at end of file diff --git a/connectors/aibuilder/cron b/connectors/aibuilder/cron new file mode 100644 index 00000000..0b8f3d1a --- /dev/null +++ b/connectors/aibuilder/cron @@ -0,0 +1 @@ +40 * * * * bash /opt/connectors/script/mlmodels.sh >> /opt/connectors/data/aibuilder/mlmodel/cron.log 2>&1 diff --git a/connectors/aibuilder/entry.sh b/connectors/aibuilder/entry.sh new file mode 100755 index 00000000..c211a6eb --- /dev/null +++ b/connectors/aibuilder/entry.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# If this directory does not exist, the cron job cannot log (and cannot run) +mkdir -p /opt/connectors/data/aibuilder/mlmodel + +# Run cron on the foreground with log level WARN +/usr/sbin/cron -f -l 4 diff --git a/connectors/aibuilder/mlmodels.sh b/connectors/aibuilder/mlmodels.sh new file mode 100755 index 00000000..2b11f74c --- /dev/null +++ b/connectors/aibuilder/mlmodels.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +WORK_DIR=/opt/connectors/data/aibuilder/mlmodel +CONNECTOR=connectors.aibuilder.aibuilder_mlmodel_connector.AIBuilderMLModelConnector + +another_instance() +{ + echo $(date -u) "This script is already running in a different thread." + exit 1 +} +exec 9< "$0" +flock -n -x 9 || another_instance + +echo $(date -u) "Starting synchronization..." +PYTHONPATH=/app /usr/local/bin/python3 /app/connectors/synchronization.py \ + -c $CONNECTOR \ + -w $WORK_DIR \ + --from-date "2020-06-21" \ + --save-every 100 >> ${WORK_DIR}/connector.log 2>&1 +echo $(date -u) "Synchronization Done." diff --git a/docker-compose.yaml b/docker-compose.yaml index a188d4a4..81c0fd61 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -120,6 +120,26 @@ services: app: condition: service_healthy + aibuilder-connector: + profiles: ["aibuilder"] + build: + context: connectors/aibuilder + dockerfile: Dockerfile + image: aiod_aibuilder_connector + container_name: aibuilder-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app + - ${DATA_PATH}/connectors:/opt/connectors/data + - ./connectors/aibuilder/:/opt/connectors/script + command: > + /bin/bash -c "/opt/connectors/script/entry.sh" + depends_on: + app: + condition: service_healthy + sqlserver: image: mysql:8.3.0 container_name: sqlserver diff --git a/src/connectors/aibuilder/aibuilder_mappings.py b/src/connectors/aibuilder/aibuilder_mappings.py new file mode 100644 index 00000000..667d055b --- /dev/null +++ b/src/connectors/aibuilder/aibuilder_mappings.py @@ -0,0 +1,9 @@ +mlmodel_mapping = { + 'platform_resource_identifier': "fullId", + 'name': "name", + 'date_published': "created", + 'contact': "authors", + 'creator': "publisher", + 'description': "description", + 'keyword': "tags" +} diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 01e4af48..15e29d22 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -3,36 +3,40 @@ convert the AIBuilder response to the AIoD MLModel format. """ -#import dateutil.parser -#import requests -#import logging - -#from requests.exceptions import HTTPError -# -#from typing import Iterator, Any - -# -# -#from database.model import field_length -#from database.model.ai_resource.text import Text -#from database.model.concept.aiod_entry import AIoDEntryCreate - - -#from database.model.agent.contact import Contact -#from database.model.models_and_experiments.runnable_distribution import RunnableDistribution -# -#from database.model.resource_read_and_create import resource_create -# +import os +import logging +import requests +from requests.exceptions import HTTPError from sqlmodel import SQLModel +from datetime import datetime +from ratelimit import limits, sleep_and_retry +from typing import Iterator, Tuple, Any + +from config import REQUEST_TIMEOUT from database.model.models_and_experiments.ml_model import MLModel from database.model.platform.platform_names import PlatformName - -from connectors.abstract.resource_connector_by_id import ResourceConnectorByDate +from database.model.ai_resource.text import Text +from database.model import field_length +from database.model.models_and_experiments.runnable_distribution import RunnableDistribution +from database.model.resource_read_and_create import resource_create +from database.model.agent.contact import Contact +from database.model.concept.aiod_entry import AIoDEntryCreate + +from connectors.abstract.resource_connector_by_date import ResourceConnectorByDate from connectors.resource_with_relations import ResourceWithRelations from connectors.record_error import RecordError +from .aibuilder_mappings import mlmodel_mapping + +TOKEN = os.getenv("API_TOKEN", "") +API_URL = "https://aiexp-dev.ai4europe.eu/federation" +GLOBAL_MAX_CALLS_MINUTE = 60 +GLOBAL_MAX_CALLS_HOUR = 2000 +ONE_MINUTE = 60 +ONE_HOUR = 3600 + class AIBuilderMLModelConnector(ResourceConnectorByDate[MLModel]): @property def resource_class(self) -> type[MLModel]: @@ -42,144 +46,217 @@ def resource_class(self) -> type[MLModel]: def platform_name(self) -> PlatformName: return PlatformName.aibuilder - def retry(self, identifier: int) -> ResourceWithRelations[SQLModel] | RecordError: - return self.fetch_record(identifier) - - + def retry(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError: + raise NotImplementedError("Not implemented.") - - - - - - def fetch_record(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError: - url_mlmodel = f"https://www.openml.org/api/v1/json/flow/{identifier}" - response = requests.get(url_mlmodel) + @sleep_and_retry + @limits(calls=GLOBAL_MAX_CALLS_MINUTE, period=ONE_MINUTE) + @limits(calls=GLOBAL_MAX_CALLS_HOUR, period=ONE_HOUR) + def get_response(self, url) -> requests.Response | RecordError: + response = requests.get(url, timeout=REQUEST_TIMEOUT) if not response.ok: - msg = response.json()["error"]["message"] - return RecordError( - identifier=str(identifier), - error=f"Error while fetching flow from OpenML: '{msg}'.", - ) - mlmodel_json = response.json()["flow"] - - description_or_error = _description(mlmodel_json, identifier) - if isinstance(description_or_error, RecordError): - return description_or_error - description = description_or_error - - distribution = _distributions(mlmodel_json) - - openml_creator = _as_list(mlmodel_json.get("creator", None)) - openml_contributor = _as_list(mlmodel_json.get("contributor", None)) - pydantic_class_contact = resource_create(Contact) - creator_names = [ - pydantic_class_contact(name=name) for name in openml_creator + openml_contributor - ] - - tags = _as_list(mlmodel_json.get("tag", None)) + status_code = response.status_code + msg = response.json()['error']['message'] + err_msg = (f"Error while fetching {url} from AIBuilder: ({status_code}) {msg}") + logging.error(err_msg) + err = HTTPError(err_msg) + return RecordError(identifier=None, error=err) + return response + + def _is_aware(self, date): + return date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None + + def _mlmodel_from_solution( + self, solution: dict, id: str, url: str + ) -> ResourceWithRelations[MLModel] | RecordError: + + if not set(mlmodel_mapping.values()) <= set(solution.keys()): + err_msg = "Bad structure on the received solution." + return RecordError(identifier=id, error=err_msg) + + identifier = "" + if 'platform_resource_identifier' in mlmodel_mapping.keys(): + identifier = solution[mlmodel_mapping['platform_resource_identifier']] + + if not identifier: + err_msg = "The platform identifier is mandatory." + return RecordError(identifier=id, error=err_msg) + + if identifier != id: + err_msg = f"The identifier {identifier} does not correspond with the fetched solution." + return RecordError(identifier=id, error=err_msg) + + name = "" + if 'name' in mlmodel_mapping.keys(): + name = solution[mlmodel_mapping['name']] + + if not name: + err_msg = "The name field is mandatory." + return RecordError(identifier=id, error=err_msg) + + date_published = "" + if 'date_published' in mlmodel_mapping.keys(): + date_published = solution[mlmodel_mapping['date_published']] + + # TODO: Review the AIBuilder schema to map version + version = "" + if 'version' in mlmodel_mapping.keys(): + version = solution[mlmodel_mapping['version']] + + description = "" + if 'description' in mlmodel_mapping.keys(): + description = _description_format(solution[mlmodel_mapping['description']]) + + # TODO: Review the AIBuilder schema to map distribution + distribution = [] + if 'distribution' in mlmodel_mapping.keys(): + distribution = _distributions_format(solution[mlmodel_mapping['distribution']]) + + tags = [] + if 'keyword' in mlmodel_mapping.keys(): + tags = solution[mlmodel_mapping['keyword']] + + # TODO: Review the AIBuilder schema to map license + license = "" + if 'license' in mlmodel_mapping.keys(): + license = solution[mlmodel_mapping['license']] + + related_resources = {} + + if 'contact' in mlmodel_mapping.keys(): + pydantic_class_contact = resource_create(Contact) + contact_names = [ + pydantic_class_contact(name=name) + for name in _as_list(solution[mlmodel_mapping['contact']]) + ] + related_resources['contact'] = contact_names + + if 'creator' in mlmodel_mapping.keys(): + pydantic_class_creator = resource_create(Contact) + creator_names = [ + pydantic_class_creator(name=name) + for name in _as_list(solution[mlmodel_mapping['creator']]) + ] + related_resources['creator'] = creator_names pydantic_class = resource_create(MLModel) mlmodel = pydantic_class( - aiod_entry=AIoDEntryCreate( - status="published", - ), + platform="aibuilder", platform_resource_identifier=identifier, - platform=self.platform_name, - name=mlmodel_json["name"], - same_as=url_mlmodel, + name=name, + date_published=date_published, + same_as=url, # TODO: Review the concept of having the TOKEN inside the url!!! + is_accessible_for_free=True, + version=version, + aiod_entry=AIoDEntryCreate(status="published",), description=description, - date_published=dateutil.parser.parse(mlmodel_json["upload_date"]), - license=mlmodel_json.get("licence", None), distribution=distribution, - is_accessible_for_free=True, - keyword=[tag for tag in tags] if tags else [], - version=mlmodel_json["version"], + keyword=tags, + license=license, ) return ResourceWithRelations[pydantic_class]( # type:ignore resource=mlmodel, resource_ORM_class=MLModel, - related_resources={"creator": creator_names}, + related_resources=related_resources, ) def fetch( - self, offset: int, from_identifier: int - ) -> Iterator[ResourceWithRelations[SQLModel] | RecordError]: - url_mlmodel = ( - "https://www.openml.org/api/v1/json/flow/list/" - f"limit/{self.limit_per_iteration}/offset/{offset}" - ) - response = requests.get(url_mlmodel) - - if not response.ok: - status_code = response.status_code - msg = response.json()["error"]["message"] - err_msg = f"Error while fetching {url_mlmodel} from OpenML: ({status_code}) {msg}" - logging.error(err_msg) - err = HTTPError(err_msg) - yield RecordError(identifier=None, error=err) + self, from_incl: datetime, to_excl: datetime + ) -> Iterator[Tuple[datetime | None, MLModel | ResourceWithRelations[MLModel] | RecordError]]: + """ + It fetches the entire list of catalogs and, for each catalog, the entire list of solutions. + Then it filters by date and fetches every solution within [`from_incl`, `to_excl`). + """ + # TODO: The AIBuilder API will soon include date search for the catalog list of solutions. + + self.is_concluded = False + + if not self._is_aware(from_incl): + from_incl = from_incl.replace(tzinfo=pytz.UTC) + if not self._is_aware(to_excl): + to_excl = to_excl.replace(tzinfo=pytz.UTC) + + url_get_catalog_list = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" + response = self.get_response(url_get_catalog_list).json() + if isinstance(response, RecordError): + self.is_concluded = True + yield None, response return try: - mlmodel_summaries = response.json()["flows"]["flow"] + catalog_list = [catalog['catalogId'] for catalog in response] except Exception as e: - yield RecordError(identifier=None, error=e) + self.is_concluded = True + yield None, RecordError(identifier=None, error=e) + return + + if len(catalog_list) == 0: + self.is_concluded = True + yield None, RecordError(identifier=None, error="Empty catalog list.") return - for summary in mlmodel_summaries: - identifier = None - # ToDo: discuss how to accommodate pipelines. Excluding sklearn pipelines for now. - # Note: weka doesn't have a standard method to define pipeline. - # There are no mlr pipelines in OpenML. - identifier = summary["id"] - if "sklearn.pipeline" not in summary["name"]: + for num_catalog, catalog in enumerate(catalog_list): + url_get_catalog_solutions = ( + f"{API_URL}/get_catalog_solutions?catalogId={catalog}&apiToken={TOKEN}" + ) + response = self.get_response(url_get_catalog_solutions).json() + if isinstance(response, RecordError): + self.is_concluded = num_catalog == len(catalog_list) + yield None, response + return + + try: + solutions_list = [ + solution['fullId'] for solution in response + if from_incl <= datetime.fromisoformat(solution['lastModified']) < to_excl + ] + except Exception as e: + self.is_concluded = num_catalog == len(catalog_list) + yield None, RecordError(identifier=None, error=e) + return + + if len(solutions_list) == 0: + self.is_concluded = num_catalog == len(catalog_list) + yield None, RecordError(identifier=None, error="Empty solutions list.") + return + + for num_solution, solution in enumerate(solutions_list): + url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" + response = self.get_response(url_get_solution).json() + if isinstance(response, RecordError): + self.is_concluded = ( + num_catalog == len(catalog_list) and num_solution == len(solutions_list) + ) + yield None, response + return + try: - if identifier < from_identifier: - yield RecordError(identifier=identifier, error="Id too low", ignore=True) - if from_identifier is None or identifier >= from_identifier: - yield self.fetch_record(identifier) + self.is_concluded = ( + num_catalog == len(catalog_list) and num_solution == len(solutions_list) + ) + yield ( + datetime.fromisoformat(response['lastModified']), + self._mlmodel_from_solution(response, solution, url_get_solution) + ) except Exception as e: - yield RecordError(identifier=identifier, error=e) - else: - yield RecordError(identifier=identifier, error="Sklearn pipeline not processed!") - - -def _description(mlmodel_json: dict[str, Any], identifier: int) -> Text | None | RecordError: - description = ( - mlmodel_json["full_description"] - if mlmodel_json.get("full_description", None) - else mlmodel_json.get("description", None) - ) - if isinstance(description, type(None)): - return None - if isinstance(description, list) and len(description) == 0: - return None - elif not isinstance(description, str): - return RecordError(identifier=str(identifier), error="Description of unknown format.") + self.is_concluded = ( + num_catalog == len(catalog_list) and num_solution == len(solutions_list) + ) + yield None, RecordError(identifier=solution, error=e) + return + +def _description_format(description: str) -> Text: + if not description: + description = "" if len(description) > field_length.LONG: text_break = " [...]" - description = description[: field_length.LONG - len(text_break)] + text_break - if description: - return Text(plain=description) - return None - - -def _distributions(mlmodel_json) -> list[RunnableDistribution]: - if ( - (mlmodel_json.get("installation_notes") is None) - and (mlmodel_json.get("dependencies") is None) - and (mlmodel_json.get("binary_url") is None) - ): - return [] - return [ - RunnableDistribution( - dependency=mlmodel_json.get("dependencies", None), - installation=mlmodel_json.get("installation_notes", None), - content_url=mlmodel_json.get("binary_url", None), - ) - ] + description = description[:field_length.LONG-len(text_break)] + text_break + return Text(plain=description) +# TODO: Review the AIBuilder schema to map distribution +def _distribution_format(distribution) -> list[RunnableDistribution]: + return [] def _as_list(value: Any | list[Any]) -> list[Any]: """Wrap it with a list, if it is not a list""" From 9315029bb38a84e4b7336312ad16df79c97cf34e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 12 Nov 2024 14:19:30 +0100 Subject: [PATCH 03/21] Wotking with hidden token --- docker-compose.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index a615e4cd..d42e7d8d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -131,6 +131,7 @@ services: env_file: .env environment: - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + - API_TOKEN=$API_TOKEN volumes: - ./src:/app - ${DATA_PATH}/connectors:/opt/connectors/data From 44b2a25e7375af1bb3a842a5eb0ceec373f658b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 12 Nov 2024 16:49:03 +0100 Subject: [PATCH 04/21] Infinite loop on empty lists solved --- README.md | 4 ++++ .../aibuilder/aibuilder_mlmodel_connector.py | 22 +++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index a4a3b97c..44d54c7a 100644 --- a/README.md +++ b/README.md @@ -137,6 +137,10 @@ docker compose --profile examples --profile huggingface-datasets --profile openm Make sure you use the same profile for `up` and `down`, otherwise some containers might keep running. +#### Configuring AIBuilder connector +To access the AIBuilder API you need to provide a valid API token though the `API_TOKEN` variable. \ +Use the `override.env` file for that as explained above. + #### Local Installation If you want to run the server locally, you need **Python 3.11**. diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 15e29d22..371f3b78 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -202,9 +202,8 @@ def fetch( ) response = self.get_response(url_get_catalog_solutions).json() if isinstance(response, RecordError): - self.is_concluded = num_catalog == len(catalog_list) + self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, response - return try: solutions_list = [ @@ -212,28 +211,27 @@ def fetch( if from_incl <= datetime.fromisoformat(solution['lastModified']) < to_excl ] except Exception as e: - self.is_concluded = num_catalog == len(catalog_list) + self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, RecordError(identifier=None, error=e) - return if len(solutions_list) == 0: - self.is_concluded = num_catalog == len(catalog_list) - yield None, RecordError(identifier=None, error="Empty solutions list.") - return + self.is_concluded = num_catalog == len(catalog_list) - 1 + yield None, RecordError(identifier=None, error="Empty solution list.", ignore=True) for num_solution, solution in enumerate(solutions_list): url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" response = self.get_response(url_get_solution).json() if isinstance(response, RecordError): self.is_concluded = ( - num_catalog == len(catalog_list) and num_solution == len(solutions_list) + num_catalog == len(catalog_list) - 1 and + num_solution == len(solutions_list) - 1 ) yield None, response - return try: self.is_concluded = ( - num_catalog == len(catalog_list) and num_solution == len(solutions_list) + num_catalog == len(catalog_list) - 1 and + num_solution == len(solutions_list) - 1 ) yield ( datetime.fromisoformat(response['lastModified']), @@ -241,10 +239,10 @@ def fetch( ) except Exception as e: self.is_concluded = ( - num_catalog == len(catalog_list) and num_solution == len(solutions_list) + num_catalog == len(catalog_list) - 1 and + num_solution == len(solutions_list) - 1 ) yield None, RecordError(identifier=solution, error=e) - return def _description_format(description: str) -> Text: if not description: From fbb2e8b0d0071e6e58a2eaea8acc058ac60a3130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 25 Nov 2024 19:11:07 +0100 Subject: [PATCH 05/21] Happy path test of aibuilder connector --- src/tests/connectors/aibuilder/__init__.py | 0 .../test_aibuilder_mlmodel_connector.py | 57 +++++++++++++++++++ .../connectors/aibuilder/catalog_list.json | 12 ++++ .../aibuilder/catalog_solutions.json | 14 +++++ .../connectors/aibuilder/solution_1.json | 31 ++++++++++ .../connectors/aibuilder/solution_2.json | 31 ++++++++++ 6 files changed, 145 insertions(+) create mode 100644 src/tests/connectors/aibuilder/__init__.py create mode 100644 src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py create mode 100644 src/tests/resources/connectors/aibuilder/catalog_list.json create mode 100644 src/tests/resources/connectors/aibuilder/catalog_solutions.json create mode 100644 src/tests/resources/connectors/aibuilder/solution_1.json create mode 100644 src/tests/resources/connectors/aibuilder/solution_2.json diff --git a/src/tests/connectors/aibuilder/__init__.py b/src/tests/connectors/aibuilder/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py new file mode 100644 index 00000000..c74d7a16 --- /dev/null +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -0,0 +1,57 @@ +import os +import json +import responses + +from datetime import datetime + +from connectors.aibuilder.aibuilder_mlmodel_connector import AIBuilderMLModelConnector +from connectors.aibuilder.aibuilder_mlmodel_connector import API_URL, TOKEN +from connectors.resource_with_relations import ResourceWithRelations +from database.model.models_and_experiments.ml_model import MLModel +from database.model.platform.platform_names import PlatformName +from tests.testutils.paths import path_test_resources +from database.model.ai_resource.text import Text + +def test_fetch_happy_path(): + connector = AIBuilderMLModelConnector() + test_resources_path = os.path.join(path_test_resources, "connectors", "aibuilder") + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" + catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") + catalog_solutions_url = f"{API_URL}/get_catalog_solutions?catalogId=1&apiToken={TOKEN}" + solution_1_path = os.path.join(test_resources_path, "solution_1.json") + solution_1_url = f"{API_URL}/get_solution?fullId=1&apiToken={TOKEN}" + solution_2_path = os.path.join(test_resources_path, "solution_2.json") + solution_2_url = f"{API_URL}/get_solution?fullId=2&apiToken={TOKEN}" + mocked_datetime_from = datetime.fromisoformat("2023-09-01T00:00:00Z") + mocked_datetime_to = datetime.fromisoformat("2023-09-01T00:00:01Z") + expected_resources = [] + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, 'r') as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, 'r') as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + with open(solution_1_path, 'r') as f: + response = json.load(f) + expected_resources.append(response) + mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) + with open(solution_2_path, 'r') as f: + response = json.load(f) + expected_resources.append(response) + mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == len(expected_resources) + for i, (datetime, mlmodel) in enumerate(fetched_resources): + assert datetime == mocked_datetime_from + assert type(mlmodel) == ResourceWithRelations[MLModel] + assert mlmodel.platform == PlatformName.aibuilder + assert mlmodel.platform_resource_identifier == str(i) + assert mlmodel.name == f"Mocking Full Solution {i}" + assert mlmodel.date_published == "2023-09-01T00:00:00Z" + assert mlmodel.description == Text(plain=f"The mocked full solution {i}.") + assert set(mlmodel.keyword) == {f"Mocked tag {i}."} + assert mlmodel.is_accessible_for_free diff --git a/src/tests/resources/connectors/aibuilder/catalog_list.json b/src/tests/resources/connectors/aibuilder/catalog_list.json new file mode 100644 index 00000000..552416d9 --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/catalog_list.json @@ -0,0 +1,12 @@ +[ + { + "catalogId": "1", + "name": "Mocking Catalog", + "description": "A Mocking Catalog for AIBuilder.", + "type": "STANDARD", + "size": 2, + "accessTypeCode": "PB", + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } +] diff --git a/src/tests/resources/connectors/aibuilder/catalog_solutions.json b/src/tests/resources/connectors/aibuilder/catalog_solutions.json new file mode 100644 index 00000000..af68a215 --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/catalog_solutions.json @@ -0,0 +1,14 @@ +[ + { + "fullId": "1", + "name": "Mocking Solution 1", + "toolkitType": "SK", + "lastModified": "2023-09-01T00:00:00Z" + }, + { + "fullId": "2", + "name": "Mocking Solution 2", + "toolkitType": "CP", + "lastModified": "2023-09-01T00:00:00Z" + }, +] diff --git a/src/tests/resources/connectors/aibuilder/solution_1.json b/src/tests/resources/connectors/aibuilder/solution_1.json new file mode 100644 index 00000000..bd8cccef --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/solution_1.json @@ -0,0 +1,31 @@ +{ + "fullId": "1", + "name": "Mocking Full Solution 1", + "description": "The mocked full solution 1.", + "toolkitType": "SK", + "category": "PR", + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z", + "publisher": "The mocked publisher 1.", + "authors": [ + "The mocker author 1" + ], + "artifacts": [ + { + "artifactId": "1", + "artifactTypeCode": "PJ", + "name": "Mocking artifact 1", + "description": "The mocked artifact 1.", + "uri": "mocked_artifact_file_1.json", + "filename": "mocked_artifact_file_1.json", + "version": "1.0.0", + "size": 1, + "metadata": null, + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } + ], + "tags": [ + "Mocked tag 1." + ] +} diff --git a/src/tests/resources/connectors/aibuilder/solution_2.json b/src/tests/resources/connectors/aibuilder/solution_2.json new file mode 100644 index 00000000..57d063d7 --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/solution_2.json @@ -0,0 +1,31 @@ +{ + "fullId": "2", + "name": "Mocking Full Solution 2", + "description": "The mocked full solution 2.", + "toolkitType": "SK", + "category": "PR", + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z", + "publisher": "The mocked publisher 2.", + "authors": [ + "The mocker author 2" + ], + "artifacts": [ + { + "artifactId": "2", + "artifactTypeCode": "PJ", + "name": "Mocking artifact 2", + "description": "The mocked artifact 2.", + "uri": "mocked_artifact_file_2.json", + "filename": "mocked_artifact_file_2.json", + "version": "1.0.0", + "size": 1, + "metadata": null, + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } + ], + "tags": [ + "Mocked tag 2." + ] +} From a56bceb8dda81b89cda29c4fbd2262bb3661059f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 3 Dec 2024 16:14:38 +0100 Subject: [PATCH 06/21] First working test for aibuilder connector --- .../aibuilder/aibuilder_mappings.py | 14 +- .../aibuilder/aibuilder_mlmodel_connector.py | 87 +++--- .../test_aibuilder_mlmodel_connector.py | 32 +-- .../aibuilder/catalog_solutions.json | 2 +- venv_aibuilder/bin/Activate.ps1 | 247 ++++++++++++++++++ venv_aibuilder/bin/activate | 63 +++++ venv_aibuilder/bin/activate.csh | 26 ++ venv_aibuilder/bin/activate.fish | 69 +++++ venv_aibuilder/bin/dotenv | 8 + venv_aibuilder/bin/futurize | 8 + venv_aibuilder/bin/gen_symkey.py | 31 +++ venv_aibuilder/bin/ghp-import | 8 + venv_aibuilder/bin/httpx | 8 + venv_aibuilder/bin/huggingface-cli | 8 + venv_aibuilder/bin/identify-cli | 8 + venv_aibuilder/bin/jwdecrypt.py | 78 ++++++ venv_aibuilder/bin/jwenc.py | 111 ++++++++ venv_aibuilder/bin/jwk_create.py | 45 ++++ venv_aibuilder/bin/jwk_export.py | 25 ++ venv_aibuilder/bin/jwkutil.py | 154 +++++++++++ venv_aibuilder/bin/mako-render | 8 + venv_aibuilder/bin/markdown_py | 8 + venv_aibuilder/bin/mkdocs | 8 + venv_aibuilder/bin/mkdocs-get-deps | 8 + venv_aibuilder/bin/nodeenv | 8 + venv_aibuilder/bin/normalizer | 8 + venv_aibuilder/bin/oic-client-management | 8 + venv_aibuilder/bin/pasteurize | 8 + venv_aibuilder/bin/peek.py | 18 ++ venv_aibuilder/bin/pip | 8 + venv_aibuilder/bin/pip3 | 8 + venv_aibuilder/bin/pip3.11 | 8 + venv_aibuilder/bin/pre-commit | 8 + venv_aibuilder/bin/py.test | 8 + venv_aibuilder/bin/pybabel | 8 + venv_aibuilder/bin/pygmentize | 8 + venv_aibuilder/bin/pyrsa-decrypt | 8 + venv_aibuilder/bin/pyrsa-encrypt | 8 + venv_aibuilder/bin/pyrsa-keygen | 8 + venv_aibuilder/bin/pyrsa-priv2pub | 8 + venv_aibuilder/bin/pyrsa-sign | 8 + venv_aibuilder/bin/pyrsa-verify | 8 + venv_aibuilder/bin/pytest | 8 + venv_aibuilder/bin/python | 1 + venv_aibuilder/bin/python3 | 1 + venv_aibuilder/bin/python3.11 | 1 + venv_aibuilder/bin/tqdm | 8 + venv_aibuilder/bin/uvicorn | 8 + venv_aibuilder/bin/virtualenv | 8 + venv_aibuilder/bin/watchmedo | 8 + venv_aibuilder/pyvenv.cfg | 5 + 51 files changed, 1203 insertions(+), 63 deletions(-) create mode 100644 venv_aibuilder/bin/Activate.ps1 create mode 100644 venv_aibuilder/bin/activate create mode 100644 venv_aibuilder/bin/activate.csh create mode 100644 venv_aibuilder/bin/activate.fish create mode 100755 venv_aibuilder/bin/dotenv create mode 100755 venv_aibuilder/bin/futurize create mode 100755 venv_aibuilder/bin/gen_symkey.py create mode 100755 venv_aibuilder/bin/ghp-import create mode 100755 venv_aibuilder/bin/httpx create mode 100755 venv_aibuilder/bin/huggingface-cli create mode 100755 venv_aibuilder/bin/identify-cli create mode 100755 venv_aibuilder/bin/jwdecrypt.py create mode 100755 venv_aibuilder/bin/jwenc.py create mode 100755 venv_aibuilder/bin/jwk_create.py create mode 100755 venv_aibuilder/bin/jwk_export.py create mode 100755 venv_aibuilder/bin/jwkutil.py create mode 100755 venv_aibuilder/bin/mako-render create mode 100755 venv_aibuilder/bin/markdown_py create mode 100755 venv_aibuilder/bin/mkdocs create mode 100755 venv_aibuilder/bin/mkdocs-get-deps create mode 100755 venv_aibuilder/bin/nodeenv create mode 100755 venv_aibuilder/bin/normalizer create mode 100755 venv_aibuilder/bin/oic-client-management create mode 100755 venv_aibuilder/bin/pasteurize create mode 100755 venv_aibuilder/bin/peek.py create mode 100755 venv_aibuilder/bin/pip create mode 100755 venv_aibuilder/bin/pip3 create mode 100755 venv_aibuilder/bin/pip3.11 create mode 100755 venv_aibuilder/bin/pre-commit create mode 100755 venv_aibuilder/bin/py.test create mode 100755 venv_aibuilder/bin/pybabel create mode 100755 venv_aibuilder/bin/pygmentize create mode 100755 venv_aibuilder/bin/pyrsa-decrypt create mode 100755 venv_aibuilder/bin/pyrsa-encrypt create mode 100755 venv_aibuilder/bin/pyrsa-keygen create mode 100755 venv_aibuilder/bin/pyrsa-priv2pub create mode 100755 venv_aibuilder/bin/pyrsa-sign create mode 100755 venv_aibuilder/bin/pyrsa-verify create mode 100755 venv_aibuilder/bin/pytest create mode 120000 venv_aibuilder/bin/python create mode 120000 venv_aibuilder/bin/python3 create mode 120000 venv_aibuilder/bin/python3.11 create mode 100755 venv_aibuilder/bin/tqdm create mode 100755 venv_aibuilder/bin/uvicorn create mode 100755 venv_aibuilder/bin/virtualenv create mode 100755 venv_aibuilder/bin/watchmedo create mode 100644 venv_aibuilder/pyvenv.cfg diff --git a/src/connectors/aibuilder/aibuilder_mappings.py b/src/connectors/aibuilder/aibuilder_mappings.py index 667d055b..c010371a 100644 --- a/src/connectors/aibuilder/aibuilder_mappings.py +++ b/src/connectors/aibuilder/aibuilder_mappings.py @@ -1,9 +1,9 @@ mlmodel_mapping = { - 'platform_resource_identifier': "fullId", - 'name': "name", - 'date_published': "created", - 'contact': "authors", - 'creator': "publisher", - 'description': "description", - 'keyword': "tags" + "platform_resource_identifier": "fullId", + "name": "name", + "date_published": "created", + "contact": "authors", + "creator": "publisher", + "description": "description", + "keyword": "tags", } diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 371f3b78..704ee0b6 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -4,11 +4,11 @@ """ import os +import pytz import logging import requests from requests.exceptions import HTTPError -from sqlmodel import SQLModel from datetime import datetime from ratelimit import limits, sleep_and_retry from typing import Iterator, Tuple, Any @@ -37,6 +37,7 @@ ONE_MINUTE = 60 ONE_HOUR = 3600 + class AIBuilderMLModelConnector(ResourceConnectorByDate[MLModel]): @property def resource_class(self) -> type[MLModel]: @@ -56,8 +57,8 @@ def get_response(self, url) -> requests.Response | RecordError: response = requests.get(url, timeout=REQUEST_TIMEOUT) if not response.ok: status_code = response.status_code - msg = response.json()['error']['message'] - err_msg = (f"Error while fetching {url} from AIBuilder: ({status_code}) {msg}") + msg = response.json()["error"]["message"] + err_msg = f"Error while fetching {url} from AIBuilder: ({status_code}) {msg}" logging.error(err_msg) err = HTTPError(err_msg) return RecordError(identifier=None, error=err) @@ -75,8 +76,8 @@ def _mlmodel_from_solution( return RecordError(identifier=id, error=err_msg) identifier = "" - if 'platform_resource_identifier' in mlmodel_mapping.keys(): - identifier = solution[mlmodel_mapping['platform_resource_identifier']] + if "platform_resource_identifier" in mlmodel_mapping.keys(): + identifier = solution[mlmodel_mapping["platform_resource_identifier"]] if not identifier: err_msg = "The platform identifier is mandatory." @@ -87,57 +88,57 @@ def _mlmodel_from_solution( return RecordError(identifier=id, error=err_msg) name = "" - if 'name' in mlmodel_mapping.keys(): - name = solution[mlmodel_mapping['name']] + if "name" in mlmodel_mapping.keys(): + name = solution[mlmodel_mapping["name"]] if not name: err_msg = "The name field is mandatory." return RecordError(identifier=id, error=err_msg) date_published = "" - if 'date_published' in mlmodel_mapping.keys(): - date_published = solution[mlmodel_mapping['date_published']] + if "date_published" in mlmodel_mapping.keys(): + date_published = solution[mlmodel_mapping["date_published"]] # TODO: Review the AIBuilder schema to map version version = "" - if 'version' in mlmodel_mapping.keys(): - version = solution[mlmodel_mapping['version']] + if "version" in mlmodel_mapping.keys(): + version = solution[mlmodel_mapping["version"]] description = "" - if 'description' in mlmodel_mapping.keys(): - description = _description_format(solution[mlmodel_mapping['description']]) + if "description" in mlmodel_mapping.keys(): + description = _description_format(solution[mlmodel_mapping["description"]]) # TODO: Review the AIBuilder schema to map distribution distribution = [] - if 'distribution' in mlmodel_mapping.keys(): - distribution = _distributions_format(solution[mlmodel_mapping['distribution']]) + if "distribution" in mlmodel_mapping.keys(): + distribution = _distribution_format(solution[mlmodel_mapping["distribution"]]) tags = [] - if 'keyword' in mlmodel_mapping.keys(): - tags = solution[mlmodel_mapping['keyword']] + if "keyword" in mlmodel_mapping.keys(): + tags = solution[mlmodel_mapping["keyword"]] # TODO: Review the AIBuilder schema to map license license = "" - if 'license' in mlmodel_mapping.keys(): - license = solution[mlmodel_mapping['license']] + if "license" in mlmodel_mapping.keys(): + license = solution[mlmodel_mapping["license"]] related_resources = {} - if 'contact' in mlmodel_mapping.keys(): + if "contact" in mlmodel_mapping.keys(): pydantic_class_contact = resource_create(Contact) contact_names = [ pydantic_class_contact(name=name) - for name in _as_list(solution[mlmodel_mapping['contact']]) + for name in _as_list(solution[mlmodel_mapping["contact"]]) ] - related_resources['contact'] = contact_names + related_resources["contact"] = contact_names - if 'creator' in mlmodel_mapping.keys(): + if "creator" in mlmodel_mapping.keys(): pydantic_class_creator = resource_create(Contact) creator_names = [ pydantic_class_creator(name=name) - for name in _as_list(solution[mlmodel_mapping['creator']]) + for name in _as_list(solution[mlmodel_mapping["creator"]]) ] - related_resources['creator'] = creator_names + related_resources["creator"] = creator_names pydantic_class = resource_create(MLModel) mlmodel = pydantic_class( @@ -145,10 +146,12 @@ def _mlmodel_from_solution( platform_resource_identifier=identifier, name=name, date_published=date_published, - same_as=url, # TODO: Review the concept of having the TOKEN inside the url!!! + same_as=url, # TODO: Review the concept of having the TOKEN inside the url!!! is_accessible_for_free=True, version=version, - aiod_entry=AIoDEntryCreate(status="published",), + aiod_entry=AIoDEntryCreate( + status="published", + ), description=description, distribution=distribution, keyword=tags, @@ -171,7 +174,7 @@ def fetch( # TODO: The AIBuilder API will soon include date search for the catalog list of solutions. self.is_concluded = False - + if not self._is_aware(from_incl): from_incl = from_incl.replace(tzinfo=pytz.UTC) if not self._is_aware(to_excl): @@ -185,7 +188,7 @@ def fetch( return try: - catalog_list = [catalog['catalogId'] for catalog in response] + catalog_list = [catalog["catalogId"] for catalog in response] except Exception as e: self.is_concluded = True yield None, RecordError(identifier=None, error=e) @@ -207,8 +210,9 @@ def fetch( try: solutions_list = [ - solution['fullId'] for solution in response - if from_incl <= datetime.fromisoformat(solution['lastModified']) < to_excl + solution["fullId"] + for solution in response + if from_incl <= datetime.fromisoformat(solution["lastModified"]) < to_excl ] except Exception as e: self.is_concluded = num_catalog == len(catalog_list) - 1 @@ -223,39 +227,42 @@ def fetch( response = self.get_response(url_get_solution).json() if isinstance(response, RecordError): self.is_concluded = ( - num_catalog == len(catalog_list) - 1 and - num_solution == len(solutions_list) - 1 + num_catalog == len(catalog_list) - 1 + and num_solution == len(solutions_list) - 1 ) yield None, response try: self.is_concluded = ( - num_catalog == len(catalog_list) - 1 and - num_solution == len(solutions_list) - 1 + num_catalog == len(catalog_list) - 1 + and num_solution == len(solutions_list) - 1 ) yield ( - datetime.fromisoformat(response['lastModified']), - self._mlmodel_from_solution(response, solution, url_get_solution) + datetime.fromisoformat(response["lastModified"]), + self._mlmodel_from_solution(response, solution, url_get_solution), ) except Exception as e: self.is_concluded = ( - num_catalog == len(catalog_list) - 1 and - num_solution == len(solutions_list) - 1 + num_catalog == len(catalog_list) - 1 + and num_solution == len(solutions_list) - 1 ) yield None, RecordError(identifier=solution, error=e) + def _description_format(description: str) -> Text: if not description: description = "" if len(description) > field_length.LONG: text_break = " [...]" - description = description[:field_length.LONG-len(text_break)] + text_break + description = description[: field_length.LONG - len(text_break)] + text_break return Text(plain=description) + # TODO: Review the AIBuilder schema to map distribution def _distribution_format(distribution) -> list[RunnableDistribution]: return [] + def _as_list(value: Any | list[Any]) -> list[Any]: """Wrap it with a list, if it is not a list""" if not value: diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index c74d7a16..b50beef8 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -12,9 +12,10 @@ from tests.testutils.paths import path_test_resources from database.model.ai_resource.text import Text + def test_fetch_happy_path(): connector = AIBuilderMLModelConnector() - test_resources_path = os.path.join(path_test_resources, "connectors", "aibuilder") + test_resources_path = os.path.join(path_test_resources(), "connectors", "aibuilder") catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") @@ -28,30 +29,31 @@ def test_fetch_happy_path(): expected_resources = [] fetched_resources = [] with responses.RequestsMock() as mocked_requests: - with open(catalog_list_path, 'r') as f: + with open(catalog_list_path, "r") as f: response = json.load(f) mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) - with open(catalog_solutions_path, 'r') as f: + with open(catalog_solutions_path, "r") as f: response = json.load(f) mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) - with open(solution_1_path, 'r') as f: + with open(solution_1_path, "r") as f: response = json.load(f) expected_resources.append(response) mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) - with open(solution_2_path, 'r') as f: + with open(solution_2_path, "r") as f: response = json.load(f) expected_resources.append(response) mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) assert len(fetched_resources) == len(expected_resources) - for i, (datetime, mlmodel) in enumerate(fetched_resources): - assert datetime == mocked_datetime_from - assert type(mlmodel) == ResourceWithRelations[MLModel] - assert mlmodel.platform == PlatformName.aibuilder - assert mlmodel.platform_resource_identifier == str(i) - assert mlmodel.name == f"Mocking Full Solution {i}" - assert mlmodel.date_published == "2023-09-01T00:00:00Z" - assert mlmodel.description == Text(plain=f"The mocked full solution {i}.") - assert set(mlmodel.keyword) == {f"Mocked tag {i}."} - assert mlmodel.is_accessible_for_free + for i, (last_modified, mlmodel) in enumerate(fetched_resources): + assert last_modified == mocked_datetime_from + assert type(mlmodel) == ResourceWithRelations + assert mlmodel.resource_ORM_class == MLModel + assert mlmodel.resource.platform == PlatformName.aibuilder + assert mlmodel.resource.platform_resource_identifier == str(i + 1) + assert mlmodel.resource.name == f"Mocking Full Solution {i + 1}" + assert mlmodel.resource.date_published == mocked_datetime_from + assert mlmodel.resource.description == Text(plain=f"The mocked full solution {i + 1}.") + assert set(mlmodel.resource.keyword) == {f"Mocked tag {i + 1}."} + assert mlmodel.resource.is_accessible_for_free diff --git a/src/tests/resources/connectors/aibuilder/catalog_solutions.json b/src/tests/resources/connectors/aibuilder/catalog_solutions.json index af68a215..d77cbac8 100644 --- a/src/tests/resources/connectors/aibuilder/catalog_solutions.json +++ b/src/tests/resources/connectors/aibuilder/catalog_solutions.json @@ -10,5 +10,5 @@ "name": "Mocking Solution 2", "toolkitType": "CP", "lastModified": "2023-09-01T00:00:00Z" - }, + } ] diff --git a/venv_aibuilder/bin/Activate.ps1 b/venv_aibuilder/bin/Activate.ps1 new file mode 100644 index 00000000..b49d77ba --- /dev/null +++ b/venv_aibuilder/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/venv_aibuilder/bin/activate b/venv_aibuilder/bin/activate new file mode 100644 index 00000000..92ef1888 --- /dev/null +++ b/venv_aibuilder/bin/activate @@ -0,0 +1,63 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # Call hash to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + hash -r 2> /dev/null + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(venv_aibuilder) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(venv_aibuilder) " + export VIRTUAL_ENV_PROMPT +fi + +# Call hash to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +hash -r 2> /dev/null diff --git a/venv_aibuilder/bin/activate.csh b/venv_aibuilder/bin/activate.csh new file mode 100644 index 00000000..8a14298b --- /dev/null +++ b/venv_aibuilder/bin/activate.csh @@ -0,0 +1,26 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(venv_aibuilder) $prompt" + setenv VIRTUAL_ENV_PROMPT "(venv_aibuilder) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/venv_aibuilder/bin/activate.fish b/venv_aibuilder/bin/activate.fish new file mode 100644 index 00000000..53ecf33d --- /dev/null +++ b/venv_aibuilder/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(venv_aibuilder) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(venv_aibuilder) " +end diff --git a/venv_aibuilder/bin/dotenv b/venv_aibuilder/bin/dotenv new file mode 100755 index 00000000..e6465a8a --- /dev/null +++ b/venv_aibuilder/bin/dotenv @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from dotenv.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv_aibuilder/bin/futurize b/venv_aibuilder/bin/futurize new file mode 100755 index 00000000..c7cc17b1 --- /dev/null +++ b/venv_aibuilder/bin/futurize @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from libfuturize.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/gen_symkey.py b/venv_aibuilder/bin/gen_symkey.py new file mode 100755 index 00000000..34a29330 --- /dev/null +++ b/venv_aibuilder/bin/gen_symkey.py @@ -0,0 +1,31 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import json +import random +import string + +import argparse + +from jwkest.jwk import SYMKey + +__author__ = 'regu0004' + + +def rndstr(size=6, chars=string.ascii_uppercase + string.digits): + return ''.join(random.choice(chars) for _ in range(size)) + + +def main(): + parser = argparse.ArgumentParser( + description="Generate a new symmetric key and print it to stdout.") + parser.add_argument("-n", dest="key_length", default=48, type=int, + help="Length of the random string used as key.") + parser.add_argument("--kid", dest="kid", help="Key id.") + args = parser.parse_args() + + key = SYMKey(key=rndstr(args.key_length), kid=args.kid).serialize() + jwks = dict(keys=[key]) + print(json.dumps(jwks)) + + +if __name__ == "__main__": + main() diff --git a/venv_aibuilder/bin/ghp-import b/venv_aibuilder/bin/ghp-import new file mode 100755 index 00000000..1daaac36 --- /dev/null +++ b/venv_aibuilder/bin/ghp-import @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from ghp_import import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/httpx b/venv_aibuilder/bin/httpx new file mode 100755 index 00000000..e439d3c7 --- /dev/null +++ b/venv_aibuilder/bin/httpx @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from httpx import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/huggingface-cli b/venv_aibuilder/bin/huggingface-cli new file mode 100755 index 00000000..a3cf0a54 --- /dev/null +++ b/venv_aibuilder/bin/huggingface-cli @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from huggingface_hub.commands.huggingface_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/identify-cli b/venv_aibuilder/bin/identify-cli new file mode 100755 index 00000000..97f262e9 --- /dev/null +++ b/venv_aibuilder/bin/identify-cli @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from identify.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/jwdecrypt.py b/venv_aibuilder/bin/jwdecrypt.py new file mode 100755 index 00000000..07f6a397 --- /dev/null +++ b/venv_aibuilder/bin/jwdecrypt.py @@ -0,0 +1,78 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import sys + +__author__ = 'rohe0002' + +import argparse +import requests +from jwkest.jwk import load_jwks_from_url, RSAKey +from jwkest.jwk import rsa_load +from jwkest.jwk import load_x509_cert +from jwkest.jwk import load_jwks +from jwkest.jwk import import_rsa_key_from_file +from jwkest.jwe import JWE + + +def assign(lst): + _keys = {} + for key in lst: + try: + _keys[key.kty].append(key) + except KeyError: + _keys[key.kty] = [key] + return _keys + + +def lrequest(url, method="GET", **kwargs): + return requests.request(method, url, **kwargs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-d', dest='debug', action='store_true', + help="Print debug information") + # parser.add_argument('-v', dest='verbose', action='store_true', + # help="Print runtime information") + parser.add_argument('-x', dest="x509_file", + help="File containing a X509 certificate") + parser.add_argument('-X', dest="x509_url", + help="URL pointing to a file containing a X509 " + "certificate") + parser.add_argument('-j', dest="jwk_file", + help="File containing a JWK") + parser.add_argument('-J', dest="jwk_url", + help="URL pointing to a file containing a JWK") + parser.add_argument('-r', dest="rsa_file", + help="A file containing a RSA key") + parser.add_argument("-i", dest="int", help="Integrity method") + parser.add_argument("-f", dest="file", help="File with the message") + parser.add_argument("message", nargs="?", help="The message to encrypt") + + args = parser.parse_args() + + keys = {} + if args.jwk_url: + keys = load_jwks_from_url(args.jwk_url) + elif args.jwk_file: + keys = load_jwks(open(args.jwk_file).read()) + elif args.x509_url: + keys = load_x509_cert(args.x509_url, {}) + elif args.x509_file: + keys = [import_rsa_key_from_file(args.x509_file)] + elif args.rsa_file: + key = rsa_load(args.rsa_file) + rsa_key = RSAKey(key=key) + rsa_key.serialize() + keys = [rsa_key] + else: + print("Needs encryption key") + exit() + + if args.file: + msg = open(args.file).read() + msg = msg.strip("\n\r") + else: + msg = args.message + + jwe = JWE() + print(jwe.decrypt(msg, keys)) diff --git a/venv_aibuilder/bin/jwenc.py b/venv_aibuilder/bin/jwenc.py new file mode 100755 index 00000000..da34d168 --- /dev/null +++ b/venv_aibuilder/bin/jwenc.py @@ -0,0 +1,111 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +from __future__ import print_function +import argparse + +import sys + +__author__ = 'rohe0002' + +import requests +from jwkest.jwk import load_jwks_from_url, RSAKey +from jwkest.jwk import rsa_load +from jwkest.jwk import load_x509_cert +from jwkest.jwk import load_jwks +from jwkest.jwe import SUPPORTED +from jwkest.jwe import JWE +from jwkest.jwk import import_rsa_key_from_file + + +def assign(lst): + _keys = {} + for key in lst: + try: + _keys[key.kty].append(key) + except KeyError: + _keys[key.kty] = [key] + return _keys + + +def lrequest(url, method="GET", **kwargs): + return requests.request(method, url, **kwargs) + + +# arg can be RSA-OAEP +# enc for instance A128CBC+HS256 + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-d', dest='debug', action='store_true', + help="Print debug information") + parser.add_argument('-v', dest='verbose', action='store_true', + help="Print runtime information") + parser.add_argument('-x', dest="x509_file", + help="File containing a X509 certificate") + parser.add_argument('-X', dest="x509_url", + help="URL pointing to a file containing a X509 " + "certificate") + parser.add_argument('-j', dest="jwk_file", + help="File containing a JWK") + parser.add_argument('-J', dest="jwk_url", + help="URL pointing to a file containing a JWK") + parser.add_argument('-r', dest="rsa_file", + help="A file containing a RSA key") + parser.add_argument('-a', dest="alg", + help="The encryption algorithm") + parser.add_argument("-e", dest="enc", help="The encryption method") + parser.add_argument("-m", dest="mode", default="public", + help="Whether a public or private key should be used") + parser.add_argument("-f", dest="file", + help="File to be encrypted") + parser.add_argument("message", nargs="?", help="The message to encrypt") + + args = parser.parse_args() + + keys = {} + if args.jwk_url: + keys = load_jwks_from_url(args.jwk_url) + elif args.jwk_file: + keys = load_jwks(open(args.jwk_file).read()) + elif args.x509_url: + # load_x509_cert returns list of 2-tuples + keys = [RSAKey(key=x) for x, y in load_x509_cert(lrequest, + args.x509_url)] + for key in keys: + key.serialize() + elif args.x509_file: + # import_rsa_key_from_file returns RSA key instance + _key = RSAKey(key=import_rsa_key_from_file(args.x509_file)) + _key.serialize() + keys = [_key] + elif args.rsa_file: + _key = RSAKey(key=rsa_load(args.rsa_file)) + _key.serialize() + keys = [_key] + else: + print("Needs encryption key", file=sys.stderr) + exit() + + if not args.enc or not args.alg: + print("There are no default encryption methods", file=sys.stderr) + exit() + + if args.enc not in SUPPORTED["enc"]: + print("Encryption method %s not supported", args.enc, file=sys.stderr) + print("Methods supported: %s", SUPPORTED["enc"], file=sys.stderr) + exit() + + if args.alg not in SUPPORTED["alg"]: + print("Encryption algorithm %s not supported", args.alg, + file=sys.stderr) + print("Algorithms supported: %s", SUPPORTED["alg"], file=sys.stderr) + exit() + + if args.file: + message = open(args.file).read() + elif args.message == "-": + message = sys.stdin.read() + else: + message = args.message + + jwe = JWE(message, alg=args.alg, enc=args.enc) + print(jwe.encrypt(keys)) diff --git a/venv_aibuilder/bin/jwk_create.py b/venv_aibuilder/bin/jwk_create.py new file mode 100755 index 00000000..9706c250 --- /dev/null +++ b/venv_aibuilder/bin/jwk_create.py @@ -0,0 +1,45 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import json +from Cryptodome.PublicKey import RSA +import argparse +import os +from jwkest.jwk import RSAKey + +__author__ = 'rolandh' + + +def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024): + key = RSA.generate(size) + + keyfile = os.path.join(path, name) + + f = open("%s.key" % keyfile, "w") + f.write(key.exportKey("PEM")) + f.close() + f = open("%s.pub" % keyfile, "w") + f.write(key.publickey().exportKey("PEM")) + f.close() + + rsa_key = RSAKey(key=key) + rsa_key.serialize() + # This will create JWK from the public RSA key + jwk_spec = json.dumps(rsa_key.to_dict(), "enc") + f = open(keyfile + ".jwk", "w") + f.write(str(jwk_spec)) + f.close() + + return key + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('-n', dest="name", default="pyoidc", + help="file names") + parser.add_argument('-p', dest="path", default=".", + help="Path to the directory for the files") + parser.add_argument('-s', dest="size", default=1024, + help="Key size", type=int) + + args = parser.parse_args() + + create_and_store_rsa_key_pair(args.name, args.path, args.size) diff --git a/venv_aibuilder/bin/jwk_export.py b/venv_aibuilder/bin/jwk_export.py new file mode 100755 index 00000000..0a3f73f5 --- /dev/null +++ b/venv_aibuilder/bin/jwk_export.py @@ -0,0 +1,25 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import os +import argparse +from jwkest.jwk import RSAKey +from jwkest.jwk import rsa_load +from jwkest.jwk import dump_jwks + +__author__ = 'rolandh' + +parser = argparse.ArgumentParser() +parser.add_argument('-n', dest="name", default="pyoidc", + help="file names") +parser.add_argument('-p', dest="path", default=".", + help="Path to the directory for the files") +parser.add_argument('-k', dest="key", help="Key file") + +args = parser.parse_args() + +rsa_key = RSAKey(key=rsa_load(args.key)) + +keyfile = os.path.join(args.path, args.name) + +f = open(keyfile + ".jwk", "w") +f.write(dump_jwks([rsa_key])) +f.close() diff --git a/venv_aibuilder/bin/jwkutil.py b/venv_aibuilder/bin/jwkutil.py new file mode 100755 index 00000000..b80333da --- /dev/null +++ b/venv_aibuilder/bin/jwkutil.py @@ -0,0 +1,154 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import json +import logging +import sys +from jwkest.jwe import JWE + +__author__ = 'rohe0002' + +import argparse +import requests +from jwkest.jwk import RSAKey, KEYS +from jwkest.jwk import keyrep +from jwkest.jwk import import_rsa_key_from_file +from jwkest.jwk import SYMKey +from jwkest.jws import JWS + + +def setup_logging(log_file): + logger = logging.getLogger("") + hdlr = logging.FileHandler(log_file) + base_formatter = logging.Formatter( + "%(asctime)s %(name)s:%(levelname)s %(message)s") + hdlr.setFormatter(base_formatter) + logger.addHandler(hdlr) + logger.setLevel(logging.DEBUG) + + +def assign(lst): + keys = {} + for typ, key in lst: + try: + keys[typ].append(key) + except KeyError: + keys[typ] = [key] + return keys + + +def lrequest(url, method="GET", **kwargs): + return requests.request(method, url, **kwargs) + + +def sign(msg, key, alg="", msgtype=None): + """ + + :param msg: The message to sign + :param key: The signing key + :param alg: Which signing algorithm to use, this information may + appear in the headers dictionary + :param msgtype: The type of payload + :return: A JWS + """ + _jws = JWS(msg, alg=alg) + if msgtype: + _jws["typ"] = msgtype + + return _jws.sign_compact(key) + + +def verify(msg, keys, allow_none=False, sigalg=None): + _jws = JWS() + return _jws.verify_compact(msg, keys, allow_none, sigalg) + + +def encrypt(msg, keys, alg, enc): + _jwe = JWE(msg, alg=alg, enc=enc) + return _jwe.encrypt(keys) + + +def decrypt(msg, keys): + _jwe = JWE() + return _jwe.decrypt(msg, keys) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-s', dest="sign", action='store_true') + parser.add_argument('-v', dest="verify", action='store_true') + parser.add_argument('-e', dest="encrypt", action='store_true') + parser.add_argument('-d', dest="decrypt", action='store_true') + parser.add_argument('-f', dest="msg_file", + help="File containing a message") + parser.add_argument('-r', dest="rsa_file", + help="File containing a RSA key") + parser.add_argument('-k', dest="hmac_key", + help="If using a HMAC algorithm this is the key") + parser.add_argument('-a', dest="alg", + help="The signing algorithm") + parser.add_argument('-A', dest="encalg", + help="The encryption alg algorithm") + parser.add_argument('-E', dest="encenc", + help="The encryption enc algorithm") + parser.add_argument('-j', dest="jwk", help="JSON Web Key") + parser.add_argument('-J', dest="jwks", help="JSON Web Keys") + parser.add_argument('-i', dest="kid", help="key id") + parser.add_argument('-l', dest="log", help="logfile name") + parser.add_argument('-t', dest="msgtype", help="JWT message type") + parser.add_argument('-u', dest="jwks_url", help="JSON Web Keys URL") + parser.add_argument("message", nargs="?", help="The message") + + args = parser.parse_args() + + if args.log: + setup_logging(args.log) + + _kid = args.kid + keys = [] + if args.rsa_file: + keys.append(RSAKey(key=import_rsa_key_from_file(args.rsa_file), + kid=_kid)) + if args.hmac_key: + keys.append(SYMKey(key=args.hmac_key)) + + if args.jwk: + kspec = json.loads(open(args.jwk).read()) + keys.append(keyrep(kspec)) + + if args.jwks: + _k = KEYS() + _k.load_jwks(open(args.jwks).read()) + keys.extend(_k._keys) + + if args.jwks_url: + _k = KEYS() + _k.load_from_url(args.jwks_url, False) + keys.extend(_k._keys) + + if not keys: + exit(-1) + + if args.msg_file: + message = open(args.msg_file).read().strip("\n") + elif args.message == "-": + message = sys.stdin.read() + else: + message = args.message + + if args.sign: + _msg = sign(message, keys, args.alg, args.msgtype) + if args.encrypt: + _msg = encrypt(_msg, keys, args.encalg, args.encenc) + print(_msg) + elif args.encrypt: + print(encrypt(message, keys, args.encalg, args.encenc)) + else: + if args.decrypt: + _msg, _res = decrypt(message, keys) + else: + _msg = message + + if args.verify: + print(verify(_msg, keys)) + +# -e -J edmund.jwks -f text.json -E "A128CBC-HS256" -A "RSA1_5" -l ju.log +# -d -r op.key -f edmund.jwe -i a0 diff --git a/venv_aibuilder/bin/mako-render b/venv_aibuilder/bin/mako-render new file mode 100755 index 00000000..6b7192e3 --- /dev/null +++ b/venv_aibuilder/bin/mako-render @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from mako.cmd import cmdline +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cmdline()) diff --git a/venv_aibuilder/bin/markdown_py b/venv_aibuilder/bin/markdown_py new file mode 100755 index 00000000..49ee0672 --- /dev/null +++ b/venv_aibuilder/bin/markdown_py @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from markdown.__main__ import run +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv_aibuilder/bin/mkdocs b/venv_aibuilder/bin/mkdocs new file mode 100755 index 00000000..fac6f9e6 --- /dev/null +++ b/venv_aibuilder/bin/mkdocs @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from mkdocs.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv_aibuilder/bin/mkdocs-get-deps b/venv_aibuilder/bin/mkdocs-get-deps new file mode 100755 index 00000000..abeef869 --- /dev/null +++ b/venv_aibuilder/bin/mkdocs-get-deps @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from mkdocs_get_deps.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv_aibuilder/bin/nodeenv b/venv_aibuilder/bin/nodeenv new file mode 100755 index 00000000..fdffa529 --- /dev/null +++ b/venv_aibuilder/bin/nodeenv @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from nodeenv import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/normalizer b/venv_aibuilder/bin/normalizer new file mode 100755 index 00000000..f1707cbc --- /dev/null +++ b/venv_aibuilder/bin/normalizer @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_detect()) diff --git a/venv_aibuilder/bin/oic-client-management b/venv_aibuilder/bin/oic-client-management new file mode 100755 index 00000000..b5309dfa --- /dev/null +++ b/venv_aibuilder/bin/oic-client-management @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from oic.utils.client_management import run +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv_aibuilder/bin/pasteurize b/venv_aibuilder/bin/pasteurize new file mode 100755 index 00000000..a2056915 --- /dev/null +++ b/venv_aibuilder/bin/pasteurize @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from libpasteurize.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/peek.py b/venv_aibuilder/bin/peek.py new file mode 100755 index 00000000..5e7989e3 --- /dev/null +++ b/venv_aibuilder/bin/peek.py @@ -0,0 +1,18 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +import sys +from jwkest import jwe +from jwkest import jws + +__author__ = 'roland' + +jwt = sys.argv[1] + +_jw = jwe.factory(jwt) +if _jw: + print("jwe") +else: + _jw = jws.factory(jwt) + if _jw: + print("jws") + print(_jw.jwt.headers) + print(_jw.jwt.part[1]) diff --git a/venv_aibuilder/bin/pip b/venv_aibuilder/bin/pip new file mode 100755 index 00000000..a7b1d6b0 --- /dev/null +++ b/venv_aibuilder/bin/pip @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3 b/venv_aibuilder/bin/pip3 new file mode 100755 index 00000000..a7b1d6b0 --- /dev/null +++ b/venv_aibuilder/bin/pip3 @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3.11 b/venv_aibuilder/bin/pip3.11 new file mode 100755 index 00000000..a7b1d6b0 --- /dev/null +++ b/venv_aibuilder/bin/pip3.11 @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/pre-commit b/venv_aibuilder/bin/pre-commit new file mode 100755 index 00000000..d80e4c81 --- /dev/null +++ b/venv_aibuilder/bin/pre-commit @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pre_commit.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/py.test b/venv_aibuilder/bin/py.test new file mode 100755 index 00000000..0623f734 --- /dev/null +++ b/venv_aibuilder/bin/py.test @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/venv_aibuilder/bin/pybabel b/venv_aibuilder/bin/pybabel new file mode 100755 index 00000000..931a5c57 --- /dev/null +++ b/venv_aibuilder/bin/pybabel @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from babel.messages.frontend import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/pygmentize b/venv_aibuilder/bin/pygmentize new file mode 100755 index 00000000..201dcd5e --- /dev/null +++ b/venv_aibuilder/bin/pygmentize @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pygments.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/pyrsa-decrypt b/venv_aibuilder/bin/pyrsa-decrypt new file mode 100755 index 00000000..522e6ff5 --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-decrypt @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import decrypt +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(decrypt()) diff --git a/venv_aibuilder/bin/pyrsa-encrypt b/venv_aibuilder/bin/pyrsa-encrypt new file mode 100755 index 00000000..3b074149 --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-encrypt @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import encrypt +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(encrypt()) diff --git a/venv_aibuilder/bin/pyrsa-keygen b/venv_aibuilder/bin/pyrsa-keygen new file mode 100755 index 00000000..0a03cc21 --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-keygen @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import keygen +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(keygen()) diff --git a/venv_aibuilder/bin/pyrsa-priv2pub b/venv_aibuilder/bin/pyrsa-priv2pub new file mode 100755 index 00000000..e44773f9 --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-priv2pub @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.util import private_to_public +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(private_to_public()) diff --git a/venv_aibuilder/bin/pyrsa-sign b/venv_aibuilder/bin/pyrsa-sign new file mode 100755 index 00000000..667af884 --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-sign @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import sign +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(sign()) diff --git a/venv_aibuilder/bin/pyrsa-verify b/venv_aibuilder/bin/pyrsa-verify new file mode 100755 index 00000000..47d7b8fd --- /dev/null +++ b/venv_aibuilder/bin/pyrsa-verify @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from rsa.cli import verify +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(verify()) diff --git a/venv_aibuilder/bin/pytest b/venv_aibuilder/bin/pytest new file mode 100755 index 00000000..0623f734 --- /dev/null +++ b/venv_aibuilder/bin/pytest @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/venv_aibuilder/bin/python b/venv_aibuilder/bin/python new file mode 120000 index 00000000..6e7f3c7d --- /dev/null +++ b/venv_aibuilder/bin/python @@ -0,0 +1 @@ +python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/python3 b/venv_aibuilder/bin/python3 new file mode 120000 index 00000000..6e7f3c7d --- /dev/null +++ b/venv_aibuilder/bin/python3 @@ -0,0 +1 @@ +python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/python3.11 b/venv_aibuilder/bin/python3.11 new file mode 120000 index 00000000..3cf1fbdc --- /dev/null +++ b/venv_aibuilder/bin/python3.11 @@ -0,0 +1 @@ +/opt/homebrew/opt/python@3.11/bin/python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/tqdm b/venv_aibuilder/bin/tqdm new file mode 100755 index 00000000..d0df18d1 --- /dev/null +++ b/venv_aibuilder/bin/tqdm @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from tqdm.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/uvicorn b/venv_aibuilder/bin/uvicorn new file mode 100755 index 00000000..50beb370 --- /dev/null +++ b/venv_aibuilder/bin/uvicorn @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from uvicorn.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/bin/virtualenv b/venv_aibuilder/bin/virtualenv new file mode 100755 index 00000000..b01fd394 --- /dev/null +++ b/venv_aibuilder/bin/virtualenv @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from virtualenv.__main__ import run_with_catch +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run_with_catch()) diff --git a/venv_aibuilder/bin/watchmedo b/venv_aibuilder/bin/watchmedo new file mode 100755 index 00000000..c2fe2c41 --- /dev/null +++ b/venv_aibuilder/bin/watchmedo @@ -0,0 +1,8 @@ +#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from watchdog.watchmedo import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv_aibuilder/pyvenv.cfg b/venv_aibuilder/pyvenv.cfg new file mode 100644 index 00000000..c18904e9 --- /dev/null +++ b/venv_aibuilder/pyvenv.cfg @@ -0,0 +1,5 @@ +home = /opt/homebrew/opt/python@3.11/bin +include-system-site-packages = false +version = 3.11.9 +executable = /opt/homebrew/Cellar/python@3.11/3.11.9_1/Frameworks/Python.framework/Versions/3.11/bin/python3.11 +command = /opt/homebrew/opt/python@3.11/bin/python3.11 -m venv /Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder From 4e673f44551c83e74358e077cf7bb2a8020f8535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 4 Dec 2024 01:55:41 +0100 Subject: [PATCH 07/21] Catalog list tests done --- .../aibuilder/aibuilder_mlmodel_connector.py | 8 +- .../test_aibuilder_mlmodel_connector.py | 85 +++++++++++++++---- .../aibuilder/catalog_list_format_error.json | 11 +++ .../aibuilder/empty_catalog_list.json | 1 + venv_aibuilder/bin/dotenv | 5 +- venv_aibuilder/bin/futurize | 5 +- venv_aibuilder/bin/gen_symkey.py | 16 ++-- venv_aibuilder/bin/ghp-import | 5 +- venv_aibuilder/bin/httpx | 5 +- venv_aibuilder/bin/huggingface-cli | 5 +- venv_aibuilder/bin/identify-cli | 5 +- venv_aibuilder/bin/jwdecrypt.py | 23 ++--- venv_aibuilder/bin/jwenc.py | 43 ++++------ venv_aibuilder/bin/jwk_create.py | 12 ++- venv_aibuilder/bin/jwk_export.py | 10 +-- venv_aibuilder/bin/jwkutil.py | 46 +++++----- venv_aibuilder/bin/mako-render | 5 +- venv_aibuilder/bin/markdown_py | 5 +- venv_aibuilder/bin/mkdocs | 5 +- venv_aibuilder/bin/mkdocs-get-deps | 5 +- venv_aibuilder/bin/nodeenv | 5 +- venv_aibuilder/bin/normalizer | 5 +- venv_aibuilder/bin/oic-client-management | 5 +- venv_aibuilder/bin/pasteurize | 5 +- venv_aibuilder/bin/peek.py | 2 +- venv_aibuilder/bin/pip | 5 +- venv_aibuilder/bin/pip3 | 5 +- venv_aibuilder/bin/pip3.11 | 5 +- venv_aibuilder/bin/pre-commit | 5 +- venv_aibuilder/bin/py.test | 5 +- venv_aibuilder/bin/pybabel | 5 +- venv_aibuilder/bin/pygmentize | 5 +- venv_aibuilder/bin/pyrsa-decrypt | 5 +- venv_aibuilder/bin/pyrsa-encrypt | 5 +- venv_aibuilder/bin/pyrsa-keygen | 5 +- venv_aibuilder/bin/pyrsa-priv2pub | 5 +- venv_aibuilder/bin/pyrsa-sign | 5 +- venv_aibuilder/bin/pyrsa-verify | 5 +- venv_aibuilder/bin/pytest | 5 +- venv_aibuilder/bin/tqdm | 5 +- venv_aibuilder/bin/uvicorn | 5 +- venv_aibuilder/bin/virtualenv | 5 +- venv_aibuilder/bin/watchmedo | 5 +- 43 files changed, 248 insertions(+), 169 deletions(-) create mode 100644 src/tests/resources/connectors/aibuilder/catalog_list_format_error.json create mode 100644 src/tests/resources/connectors/aibuilder/empty_catalog_list.json diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 704ee0b6..94b68fd3 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -62,7 +62,7 @@ def get_response(self, url) -> requests.Response | RecordError: logging.error(err_msg) err = HTTPError(err_msg) return RecordError(identifier=None, error=err) - return response + return response.json() def _is_aware(self, date): return date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None @@ -181,7 +181,7 @@ def fetch( to_excl = to_excl.replace(tzinfo=pytz.UTC) url_get_catalog_list = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" - response = self.get_response(url_get_catalog_list).json() + response = self.get_response(url_get_catalog_list) if isinstance(response, RecordError): self.is_concluded = True yield None, response @@ -203,7 +203,7 @@ def fetch( url_get_catalog_solutions = ( f"{API_URL}/get_catalog_solutions?catalogId={catalog}&apiToken={TOKEN}" ) - response = self.get_response(url_get_catalog_solutions).json() + response = self.get_response(url_get_catalog_solutions) if isinstance(response, RecordError): self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, response @@ -224,7 +224,7 @@ def fetch( for num_solution, solution in enumerate(solutions_list): url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" - response = self.get_response(url_get_solution).json() + response = self.get_response(url_get_solution) if isinstance(response, RecordError): self.is_concluded = ( num_catalog == len(catalog_list) - 1 diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index b50beef8..5bf78943 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -3,29 +3,32 @@ import responses from datetime import datetime +from requests.exceptions import HTTPError from connectors.aibuilder.aibuilder_mlmodel_connector import AIBuilderMLModelConnector from connectors.aibuilder.aibuilder_mlmodel_connector import API_URL, TOKEN from connectors.resource_with_relations import ResourceWithRelations +from connectors.record_error import RecordError from database.model.models_and_experiments.ml_model import MLModel from database.model.platform.platform_names import PlatformName from tests.testutils.paths import path_test_resources from database.model.ai_resource.text import Text +connector = AIBuilderMLModelConnector() +test_resources_path = os.path.join(path_test_resources(), "connectors", "aibuilder") +catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" +mocked_datetime_from = datetime.fromisoformat("2023-09-01T00:00:00Z") +mocked_datetime_to = datetime.fromisoformat("2023-09-01T00:00:01Z") + def test_fetch_happy_path(): - connector = AIBuilderMLModelConnector() - test_resources_path = os.path.join(path_test_resources(), "connectors", "aibuilder") catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") - catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") catalog_solutions_url = f"{API_URL}/get_catalog_solutions?catalogId=1&apiToken={TOKEN}" solution_1_path = os.path.join(test_resources_path, "solution_1.json") solution_1_url = f"{API_URL}/get_solution?fullId=1&apiToken={TOKEN}" solution_2_path = os.path.join(test_resources_path, "solution_2.json") solution_2_url = f"{API_URL}/get_solution?fullId=2&apiToken={TOKEN}" - mocked_datetime_from = datetime.fromisoformat("2023-09-01T00:00:00Z") - mocked_datetime_to = datetime.fromisoformat("2023-09-01T00:00:01Z") expected_resources = [] fetched_resources = [] with responses.RequestsMock() as mocked_requests: @@ -46,14 +49,66 @@ def test_fetch_happy_path(): fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) assert len(fetched_resources) == len(expected_resources) - for i, (last_modified, mlmodel) in enumerate(fetched_resources): + for i, (last_modified, resource) in enumerate(fetched_resources): assert last_modified == mocked_datetime_from - assert type(mlmodel) == ResourceWithRelations - assert mlmodel.resource_ORM_class == MLModel - assert mlmodel.resource.platform == PlatformName.aibuilder - assert mlmodel.resource.platform_resource_identifier == str(i + 1) - assert mlmodel.resource.name == f"Mocking Full Solution {i + 1}" - assert mlmodel.resource.date_published == mocked_datetime_from - assert mlmodel.resource.description == Text(plain=f"The mocked full solution {i + 1}.") - assert set(mlmodel.resource.keyword) == {f"Mocked tag {i + 1}."} - assert mlmodel.resource.is_accessible_for_free + assert type(resource) == ResourceWithRelations + assert resource.resource_ORM_class == MLModel + assert resource.resource.platform == PlatformName.aibuilder + assert resource.resource.platform_resource_identifier == str(i + 1) + assert resource.resource.name == f"Mocking Full Solution {i + 1}" + assert resource.resource.date_published == mocked_datetime_from + assert resource.resource.description == Text(plain=f"The mocked full solution {i + 1}.") + assert set(resource.resource.keyword) == {f"Mocked tag {i + 1}."} + assert resource.resource.is_accessible_for_free + + +def test_catalog_list_http_error(): + error = {"error": {"message": "HTTP Error."}} + err_msg = f"Error while fetching {catalog_list_url} from AIBuilder: (500) HTTP Error." + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + mocked_requests.add(responses.GET, catalog_list_url, json=error, status=500) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == HTTPError + assert str(resource.error) == err_msg + + +def test_catalog_list_format_error(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list_format_error.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == KeyError + assert str(resource.error) == "'catalogId'" + + +def test_empty_catalog_list(): + catalog_list_path = os.path.join(test_resources_path, "empty_catalog_list.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert resource.error == "Empty catalog list." diff --git a/src/tests/resources/connectors/aibuilder/catalog_list_format_error.json b/src/tests/resources/connectors/aibuilder/catalog_list_format_error.json new file mode 100644 index 00000000..daa179e5 --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/catalog_list_format_error.json @@ -0,0 +1,11 @@ +[ + { + "name": "Mocking Catalog", + "description": "A Mocking Catalog for AIBuilder.", + "type": "STANDARD", + "size": 2, + "accessTypeCode": "PB", + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } +] diff --git a/src/tests/resources/connectors/aibuilder/empty_catalog_list.json b/src/tests/resources/connectors/aibuilder/empty_catalog_list.json new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/empty_catalog_list.json @@ -0,0 +1 @@ +[] diff --git a/venv_aibuilder/bin/dotenv b/venv_aibuilder/bin/dotenv index e6465a8a..5f165172 100755 --- a/venv_aibuilder/bin/dotenv +++ b/venv_aibuilder/bin/dotenv @@ -3,6 +3,7 @@ import re import sys from dotenv.__main__ import cli -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(cli()) diff --git a/venv_aibuilder/bin/futurize b/venv_aibuilder/bin/futurize index c7cc17b1..70bcef7d 100755 --- a/venv_aibuilder/bin/futurize +++ b/venv_aibuilder/bin/futurize @@ -3,6 +3,7 @@ import re import sys from libfuturize.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/gen_symkey.py b/venv_aibuilder/bin/gen_symkey.py index 34a29330..76bfe9d6 100755 --- a/venv_aibuilder/bin/gen_symkey.py +++ b/venv_aibuilder/bin/gen_symkey.py @@ -7,18 +7,24 @@ from jwkest.jwk import SYMKey -__author__ = 'regu0004' +__author__ = "regu0004" def rndstr(size=6, chars=string.ascii_uppercase + string.digits): - return ''.join(random.choice(chars) for _ in range(size)) + return "".join(random.choice(chars) for _ in range(size)) def main(): parser = argparse.ArgumentParser( - description="Generate a new symmetric key and print it to stdout.") - parser.add_argument("-n", dest="key_length", default=48, type=int, - help="Length of the random string used as key.") + description="Generate a new symmetric key and print it to stdout." + ) + parser.add_argument( + "-n", + dest="key_length", + default=48, + type=int, + help="Length of the random string used as key.", + ) parser.add_argument("--kid", dest="kid", help="Key id.") args = parser.parse_args() diff --git a/venv_aibuilder/bin/ghp-import b/venv_aibuilder/bin/ghp-import index 1daaac36..6a140ea3 100755 --- a/venv_aibuilder/bin/ghp-import +++ b/venv_aibuilder/bin/ghp-import @@ -3,6 +3,7 @@ import re import sys from ghp_import import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/httpx b/venv_aibuilder/bin/httpx index e439d3c7..89b4d127 100755 --- a/venv_aibuilder/bin/httpx +++ b/venv_aibuilder/bin/httpx @@ -3,6 +3,7 @@ import re import sys from httpx import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/huggingface-cli b/venv_aibuilder/bin/huggingface-cli index a3cf0a54..e7138a18 100755 --- a/venv_aibuilder/bin/huggingface-cli +++ b/venv_aibuilder/bin/huggingface-cli @@ -3,6 +3,7 @@ import re import sys from huggingface_hub.commands.huggingface_cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/identify-cli b/venv_aibuilder/bin/identify-cli index 97f262e9..881ab93a 100755 --- a/venv_aibuilder/bin/identify-cli +++ b/venv_aibuilder/bin/identify-cli @@ -3,6 +3,7 @@ import re import sys from identify.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/jwdecrypt.py b/venv_aibuilder/bin/jwdecrypt.py index 07f6a397..def148fb 100755 --- a/venv_aibuilder/bin/jwdecrypt.py +++ b/venv_aibuilder/bin/jwdecrypt.py @@ -1,7 +1,7 @@ #!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python import sys -__author__ = 'rohe0002' +__author__ = "rohe0002" import argparse import requests @@ -29,21 +29,16 @@ def lrequest(url, method="GET", **kwargs): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('-d', dest='debug', action='store_true', - help="Print debug information") + parser.add_argument("-d", dest="debug", action="store_true", help="Print debug information") # parser.add_argument('-v', dest='verbose', action='store_true', # help="Print runtime information") - parser.add_argument('-x', dest="x509_file", - help="File containing a X509 certificate") - parser.add_argument('-X', dest="x509_url", - help="URL pointing to a file containing a X509 " - "certificate") - parser.add_argument('-j', dest="jwk_file", - help="File containing a JWK") - parser.add_argument('-J', dest="jwk_url", - help="URL pointing to a file containing a JWK") - parser.add_argument('-r', dest="rsa_file", - help="A file containing a RSA key") + parser.add_argument("-x", dest="x509_file", help="File containing a X509 certificate") + parser.add_argument( + "-X", dest="x509_url", help="URL pointing to a file containing a X509 " "certificate" + ) + parser.add_argument("-j", dest="jwk_file", help="File containing a JWK") + parser.add_argument("-J", dest="jwk_url", help="URL pointing to a file containing a JWK") + parser.add_argument("-r", dest="rsa_file", help="A file containing a RSA key") parser.add_argument("-i", dest="int", help="Integrity method") parser.add_argument("-f", dest="file", help="File with the message") parser.add_argument("message", nargs="?", help="The message to encrypt") diff --git a/venv_aibuilder/bin/jwenc.py b/venv_aibuilder/bin/jwenc.py index da34d168..e71825ed 100755 --- a/venv_aibuilder/bin/jwenc.py +++ b/venv_aibuilder/bin/jwenc.py @@ -4,7 +4,7 @@ import sys -__author__ = 'rohe0002' +__author__ = "rohe0002" import requests from jwkest.jwk import load_jwks_from_url, RSAKey @@ -35,28 +35,21 @@ def lrequest(url, method="GET", **kwargs): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('-d', dest='debug', action='store_true', - help="Print debug information") - parser.add_argument('-v', dest='verbose', action='store_true', - help="Print runtime information") - parser.add_argument('-x', dest="x509_file", - help="File containing a X509 certificate") - parser.add_argument('-X', dest="x509_url", - help="URL pointing to a file containing a X509 " - "certificate") - parser.add_argument('-j', dest="jwk_file", - help="File containing a JWK") - parser.add_argument('-J', dest="jwk_url", - help="URL pointing to a file containing a JWK") - parser.add_argument('-r', dest="rsa_file", - help="A file containing a RSA key") - parser.add_argument('-a', dest="alg", - help="The encryption algorithm") + parser.add_argument("-d", dest="debug", action="store_true", help="Print debug information") + parser.add_argument("-v", dest="verbose", action="store_true", help="Print runtime information") + parser.add_argument("-x", dest="x509_file", help="File containing a X509 certificate") + parser.add_argument( + "-X", dest="x509_url", help="URL pointing to a file containing a X509 " "certificate" + ) + parser.add_argument("-j", dest="jwk_file", help="File containing a JWK") + parser.add_argument("-J", dest="jwk_url", help="URL pointing to a file containing a JWK") + parser.add_argument("-r", dest="rsa_file", help="A file containing a RSA key") + parser.add_argument("-a", dest="alg", help="The encryption algorithm") parser.add_argument("-e", dest="enc", help="The encryption method") - parser.add_argument("-m", dest="mode", default="public", - help="Whether a public or private key should be used") - parser.add_argument("-f", dest="file", - help="File to be encrypted") + parser.add_argument( + "-m", dest="mode", default="public", help="Whether a public or private key should be used" + ) + parser.add_argument("-f", dest="file", help="File to be encrypted") parser.add_argument("message", nargs="?", help="The message to encrypt") args = parser.parse_args() @@ -68,8 +61,7 @@ def lrequest(url, method="GET", **kwargs): keys = load_jwks(open(args.jwk_file).read()) elif args.x509_url: # load_x509_cert returns list of 2-tuples - keys = [RSAKey(key=x) for x, y in load_x509_cert(lrequest, - args.x509_url)] + keys = [RSAKey(key=x) for x, y in load_x509_cert(lrequest, args.x509_url)] for key in keys: key.serialize() elif args.x509_file: @@ -95,8 +87,7 @@ def lrequest(url, method="GET", **kwargs): exit() if args.alg not in SUPPORTED["alg"]: - print("Encryption algorithm %s not supported", args.alg, - file=sys.stderr) + print("Encryption algorithm %s not supported", args.alg, file=sys.stderr) print("Algorithms supported: %s", SUPPORTED["alg"], file=sys.stderr) exit() diff --git a/venv_aibuilder/bin/jwk_create.py b/venv_aibuilder/bin/jwk_create.py index 9706c250..23f04288 100755 --- a/venv_aibuilder/bin/jwk_create.py +++ b/venv_aibuilder/bin/jwk_create.py @@ -5,7 +5,7 @@ import os from jwkest.jwk import RSAKey -__author__ = 'rolandh' +__author__ = "rolandh" def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024): @@ -30,15 +30,13 @@ def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024): return key + if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('-n', dest="name", default="pyoidc", - help="file names") - parser.add_argument('-p', dest="path", default=".", - help="Path to the directory for the files") - parser.add_argument('-s', dest="size", default=1024, - help="Key size", type=int) + parser.add_argument("-n", dest="name", default="pyoidc", help="file names") + parser.add_argument("-p", dest="path", default=".", help="Path to the directory for the files") + parser.add_argument("-s", dest="size", default=1024, help="Key size", type=int) args = parser.parse_args() diff --git a/venv_aibuilder/bin/jwk_export.py b/venv_aibuilder/bin/jwk_export.py index 0a3f73f5..e4d28340 100755 --- a/venv_aibuilder/bin/jwk_export.py +++ b/venv_aibuilder/bin/jwk_export.py @@ -5,14 +5,12 @@ from jwkest.jwk import rsa_load from jwkest.jwk import dump_jwks -__author__ = 'rolandh' +__author__ = "rolandh" parser = argparse.ArgumentParser() -parser.add_argument('-n', dest="name", default="pyoidc", - help="file names") -parser.add_argument('-p', dest="path", default=".", - help="Path to the directory for the files") -parser.add_argument('-k', dest="key", help="Key file") +parser.add_argument("-n", dest="name", default="pyoidc", help="file names") +parser.add_argument("-p", dest="path", default=".", help="Path to the directory for the files") +parser.add_argument("-k", dest="key", help="Key file") args = parser.parse_args() diff --git a/venv_aibuilder/bin/jwkutil.py b/venv_aibuilder/bin/jwkutil.py index b80333da..d95ca36c 100755 --- a/venv_aibuilder/bin/jwkutil.py +++ b/venv_aibuilder/bin/jwkutil.py @@ -4,7 +4,7 @@ import sys from jwkest.jwe import JWE -__author__ = 'rohe0002' +__author__ = "rohe0002" import argparse import requests @@ -18,8 +18,7 @@ def setup_logging(log_file): logger = logging.getLogger("") hdlr = logging.FileHandler(log_file) - base_formatter = logging.Formatter( - "%(asctime)s %(name)s:%(levelname)s %(message)s") + base_formatter = logging.Formatter("%(asctime)s %(name)s:%(levelname)s %(message)s") hdlr.setFormatter(base_formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) @@ -73,28 +72,22 @@ def decrypt(msg, keys): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('-s', dest="sign", action='store_true') - parser.add_argument('-v', dest="verify", action='store_true') - parser.add_argument('-e', dest="encrypt", action='store_true') - parser.add_argument('-d', dest="decrypt", action='store_true') - parser.add_argument('-f', dest="msg_file", - help="File containing a message") - parser.add_argument('-r', dest="rsa_file", - help="File containing a RSA key") - parser.add_argument('-k', dest="hmac_key", - help="If using a HMAC algorithm this is the key") - parser.add_argument('-a', dest="alg", - help="The signing algorithm") - parser.add_argument('-A', dest="encalg", - help="The encryption alg algorithm") - parser.add_argument('-E', dest="encenc", - help="The encryption enc algorithm") - parser.add_argument('-j', dest="jwk", help="JSON Web Key") - parser.add_argument('-J', dest="jwks", help="JSON Web Keys") - parser.add_argument('-i', dest="kid", help="key id") - parser.add_argument('-l', dest="log", help="logfile name") - parser.add_argument('-t', dest="msgtype", help="JWT message type") - parser.add_argument('-u', dest="jwks_url", help="JSON Web Keys URL") + parser.add_argument("-s", dest="sign", action="store_true") + parser.add_argument("-v", dest="verify", action="store_true") + parser.add_argument("-e", dest="encrypt", action="store_true") + parser.add_argument("-d", dest="decrypt", action="store_true") + parser.add_argument("-f", dest="msg_file", help="File containing a message") + parser.add_argument("-r", dest="rsa_file", help="File containing a RSA key") + parser.add_argument("-k", dest="hmac_key", help="If using a HMAC algorithm this is the key") + parser.add_argument("-a", dest="alg", help="The signing algorithm") + parser.add_argument("-A", dest="encalg", help="The encryption alg algorithm") + parser.add_argument("-E", dest="encenc", help="The encryption enc algorithm") + parser.add_argument("-j", dest="jwk", help="JSON Web Key") + parser.add_argument("-J", dest="jwks", help="JSON Web Keys") + parser.add_argument("-i", dest="kid", help="key id") + parser.add_argument("-l", dest="log", help="logfile name") + parser.add_argument("-t", dest="msgtype", help="JWT message type") + parser.add_argument("-u", dest="jwks_url", help="JSON Web Keys URL") parser.add_argument("message", nargs="?", help="The message") args = parser.parse_args() @@ -105,8 +98,7 @@ def decrypt(msg, keys): _kid = args.kid keys = [] if args.rsa_file: - keys.append(RSAKey(key=import_rsa_key_from_file(args.rsa_file), - kid=_kid)) + keys.append(RSAKey(key=import_rsa_key_from_file(args.rsa_file), kid=_kid)) if args.hmac_key: keys.append(SYMKey(key=args.hmac_key)) diff --git a/venv_aibuilder/bin/mako-render b/venv_aibuilder/bin/mako-render index 6b7192e3..62771c85 100755 --- a/venv_aibuilder/bin/mako-render +++ b/venv_aibuilder/bin/mako-render @@ -3,6 +3,7 @@ import re import sys from mako.cmd import cmdline -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(cmdline()) diff --git a/venv_aibuilder/bin/markdown_py b/venv_aibuilder/bin/markdown_py index 49ee0672..37506799 100755 --- a/venv_aibuilder/bin/markdown_py +++ b/venv_aibuilder/bin/markdown_py @@ -3,6 +3,7 @@ import re import sys from markdown.__main__ import run -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(run()) diff --git a/venv_aibuilder/bin/mkdocs b/venv_aibuilder/bin/mkdocs index fac6f9e6..f2f1aa1d 100755 --- a/venv_aibuilder/bin/mkdocs +++ b/venv_aibuilder/bin/mkdocs @@ -3,6 +3,7 @@ import re import sys from mkdocs.__main__ import cli -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(cli()) diff --git a/venv_aibuilder/bin/mkdocs-get-deps b/venv_aibuilder/bin/mkdocs-get-deps index abeef869..4403292c 100755 --- a/venv_aibuilder/bin/mkdocs-get-deps +++ b/venv_aibuilder/bin/mkdocs-get-deps @@ -3,6 +3,7 @@ import re import sys from mkdocs_get_deps.__main__ import cli -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(cli()) diff --git a/venv_aibuilder/bin/nodeenv b/venv_aibuilder/bin/nodeenv index fdffa529..28b2d983 100755 --- a/venv_aibuilder/bin/nodeenv +++ b/venv_aibuilder/bin/nodeenv @@ -3,6 +3,7 @@ import re import sys from nodeenv import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/normalizer b/venv_aibuilder/bin/normalizer index f1707cbc..8ca8e7b7 100755 --- a/venv_aibuilder/bin/normalizer +++ b/venv_aibuilder/bin/normalizer @@ -3,6 +3,7 @@ import re import sys from charset_normalizer.cli import cli_detect -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(cli_detect()) diff --git a/venv_aibuilder/bin/oic-client-management b/venv_aibuilder/bin/oic-client-management index b5309dfa..b3c7007e 100755 --- a/venv_aibuilder/bin/oic-client-management +++ b/venv_aibuilder/bin/oic-client-management @@ -3,6 +3,7 @@ import re import sys from oic.utils.client_management import run -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(run()) diff --git a/venv_aibuilder/bin/pasteurize b/venv_aibuilder/bin/pasteurize index a2056915..ac7f75c8 100755 --- a/venv_aibuilder/bin/pasteurize +++ b/venv_aibuilder/bin/pasteurize @@ -3,6 +3,7 @@ import re import sys from libpasteurize.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/peek.py b/venv_aibuilder/bin/peek.py index 5e7989e3..2382d63f 100755 --- a/venv_aibuilder/bin/peek.py +++ b/venv_aibuilder/bin/peek.py @@ -3,7 +3,7 @@ from jwkest import jwe from jwkest import jws -__author__ = 'roland' +__author__ = "roland" jwt = sys.argv[1] diff --git a/venv_aibuilder/bin/pip b/venv_aibuilder/bin/pip index a7b1d6b0..4c0174a8 100755 --- a/venv_aibuilder/bin/pip +++ b/venv_aibuilder/bin/pip @@ -3,6 +3,7 @@ import re import sys from pip._internal.cli.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3 b/venv_aibuilder/bin/pip3 index a7b1d6b0..4c0174a8 100755 --- a/venv_aibuilder/bin/pip3 +++ b/venv_aibuilder/bin/pip3 @@ -3,6 +3,7 @@ import re import sys from pip._internal.cli.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3.11 b/venv_aibuilder/bin/pip3.11 index a7b1d6b0..4c0174a8 100755 --- a/venv_aibuilder/bin/pip3.11 +++ b/venv_aibuilder/bin/pip3.11 @@ -3,6 +3,7 @@ import re import sys from pip._internal.cli.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/pre-commit b/venv_aibuilder/bin/pre-commit index d80e4c81..1b2fb1b7 100755 --- a/venv_aibuilder/bin/pre-commit +++ b/venv_aibuilder/bin/pre-commit @@ -3,6 +3,7 @@ import re import sys from pre_commit.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/py.test b/venv_aibuilder/bin/py.test index 0623f734..ba0e8dce 100755 --- a/venv_aibuilder/bin/py.test +++ b/venv_aibuilder/bin/py.test @@ -3,6 +3,7 @@ import re import sys from pytest import console_main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(console_main()) diff --git a/venv_aibuilder/bin/pybabel b/venv_aibuilder/bin/pybabel index 931a5c57..40102580 100755 --- a/venv_aibuilder/bin/pybabel +++ b/venv_aibuilder/bin/pybabel @@ -3,6 +3,7 @@ import re import sys from babel.messages.frontend import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/pygmentize b/venv_aibuilder/bin/pygmentize index 201dcd5e..dec8b2ac 100755 --- a/venv_aibuilder/bin/pygmentize +++ b/venv_aibuilder/bin/pygmentize @@ -3,6 +3,7 @@ import re import sys from pygments.cmdline import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/pyrsa-decrypt b/venv_aibuilder/bin/pyrsa-decrypt index 522e6ff5..44f5598f 100755 --- a/venv_aibuilder/bin/pyrsa-decrypt +++ b/venv_aibuilder/bin/pyrsa-decrypt @@ -3,6 +3,7 @@ import re import sys from rsa.cli import decrypt -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(decrypt()) diff --git a/venv_aibuilder/bin/pyrsa-encrypt b/venv_aibuilder/bin/pyrsa-encrypt index 3b074149..15695f7e 100755 --- a/venv_aibuilder/bin/pyrsa-encrypt +++ b/venv_aibuilder/bin/pyrsa-encrypt @@ -3,6 +3,7 @@ import re import sys from rsa.cli import encrypt -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(encrypt()) diff --git a/venv_aibuilder/bin/pyrsa-keygen b/venv_aibuilder/bin/pyrsa-keygen index 0a03cc21..dfba74a4 100755 --- a/venv_aibuilder/bin/pyrsa-keygen +++ b/venv_aibuilder/bin/pyrsa-keygen @@ -3,6 +3,7 @@ import re import sys from rsa.cli import keygen -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(keygen()) diff --git a/venv_aibuilder/bin/pyrsa-priv2pub b/venv_aibuilder/bin/pyrsa-priv2pub index e44773f9..d07fdcb2 100755 --- a/venv_aibuilder/bin/pyrsa-priv2pub +++ b/venv_aibuilder/bin/pyrsa-priv2pub @@ -3,6 +3,7 @@ import re import sys from rsa.util import private_to_public -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(private_to_public()) diff --git a/venv_aibuilder/bin/pyrsa-sign b/venv_aibuilder/bin/pyrsa-sign index 667af884..4c36dd56 100755 --- a/venv_aibuilder/bin/pyrsa-sign +++ b/venv_aibuilder/bin/pyrsa-sign @@ -3,6 +3,7 @@ import re import sys from rsa.cli import sign -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(sign()) diff --git a/venv_aibuilder/bin/pyrsa-verify b/venv_aibuilder/bin/pyrsa-verify index 47d7b8fd..3784e174 100755 --- a/venv_aibuilder/bin/pyrsa-verify +++ b/venv_aibuilder/bin/pyrsa-verify @@ -3,6 +3,7 @@ import re import sys from rsa.cli import verify -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(verify()) diff --git a/venv_aibuilder/bin/pytest b/venv_aibuilder/bin/pytest index 0623f734..ba0e8dce 100755 --- a/venv_aibuilder/bin/pytest +++ b/venv_aibuilder/bin/pytest @@ -3,6 +3,7 @@ import re import sys from pytest import console_main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(console_main()) diff --git a/venv_aibuilder/bin/tqdm b/venv_aibuilder/bin/tqdm index d0df18d1..4fa88c0e 100755 --- a/venv_aibuilder/bin/tqdm +++ b/venv_aibuilder/bin/tqdm @@ -3,6 +3,7 @@ import re import sys from tqdm.cli import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/uvicorn b/venv_aibuilder/bin/uvicorn index 50beb370..c7ffbfc0 100755 --- a/venv_aibuilder/bin/uvicorn +++ b/venv_aibuilder/bin/uvicorn @@ -3,6 +3,7 @@ import re import sys from uvicorn.main import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) diff --git a/venv_aibuilder/bin/virtualenv b/venv_aibuilder/bin/virtualenv index b01fd394..654eebe6 100755 --- a/venv_aibuilder/bin/virtualenv +++ b/venv_aibuilder/bin/virtualenv @@ -3,6 +3,7 @@ import re import sys from virtualenv.__main__ import run_with_catch -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(run_with_catch()) diff --git a/venv_aibuilder/bin/watchmedo b/venv_aibuilder/bin/watchmedo index c2fe2c41..0e181958 100755 --- a/venv_aibuilder/bin/watchmedo +++ b/venv_aibuilder/bin/watchmedo @@ -3,6 +3,7 @@ import re import sys from watchdog.watchmedo import main -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + +if __name__ == "__main__": + sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) sys.exit(main()) From b743d3cdaa0a5b0159179e28b3fb723e0ad1c64e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 4 Dec 2024 16:37:13 +0100 Subject: [PATCH 08/21] All AIBuilder Connector test passed --- .../aibuilder/aibuilder_mlmodel_connector.py | 4 + .../test_aibuilder_mlmodel_connector.py | 246 +++++++++++++++++- .../catalog_solutions_format_error.json | 7 + .../aibuilder/empty_catalog_solutions.json | 1 + .../aibuilder/solution_1_format_error.json | 30 +++ .../solution_1_last_modified_exception.json | 30 +++ 6 files changed, 311 insertions(+), 7 deletions(-) create mode 100644 src/tests/resources/connectors/aibuilder/catalog_solutions_format_error.json create mode 100644 src/tests/resources/connectors/aibuilder/empty_catalog_solutions.json create mode 100644 src/tests/resources/connectors/aibuilder/solution_1_format_error.json create mode 100644 src/tests/resources/connectors/aibuilder/solution_1_last_modified_exception.json diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 94b68fd3..94e0aaf0 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -207,6 +207,7 @@ def fetch( if isinstance(response, RecordError): self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, response + continue try: solutions_list = [ @@ -217,10 +218,12 @@ def fetch( except Exception as e: self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, RecordError(identifier=None, error=e) + continue if len(solutions_list) == 0: self.is_concluded = num_catalog == len(catalog_list) - 1 yield None, RecordError(identifier=None, error="Empty solution list.", ignore=True) + continue for num_solution, solution in enumerate(solutions_list): url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" @@ -231,6 +234,7 @@ def fetch( and num_solution == len(solutions_list) - 1 ) yield None, response + continue try: self.is_concluded = ( diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index 5bf78943..dcaec095 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -17,6 +17,9 @@ connector = AIBuilderMLModelConnector() test_resources_path = os.path.join(path_test_resources(), "connectors", "aibuilder") catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" +catalog_solutions_url = f"{API_URL}/get_catalog_solutions?catalogId=1&apiToken={TOKEN}" +solution_1_url = f"{API_URL}/get_solution?fullId=1&apiToken={TOKEN}" +solution_2_url = f"{API_URL}/get_solution?fullId=2&apiToken={TOKEN}" mocked_datetime_from = datetime.fromisoformat("2023-09-01T00:00:00Z") mocked_datetime_to = datetime.fromisoformat("2023-09-01T00:00:01Z") @@ -24,12 +27,8 @@ def test_fetch_happy_path(): catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") - catalog_solutions_url = f"{API_URL}/get_catalog_solutions?catalogId=1&apiToken={TOKEN}" solution_1_path = os.path.join(test_resources_path, "solution_1.json") - solution_1_url = f"{API_URL}/get_solution?fullId=1&apiToken={TOKEN}" solution_2_path = os.path.join(test_resources_path, "solution_2.json") - solution_2_url = f"{API_URL}/get_solution?fullId=2&apiToken={TOKEN}" - expected_resources = [] fetched_resources = [] with responses.RequestsMock() as mocked_requests: with open(catalog_list_path, "r") as f: @@ -40,15 +39,52 @@ def test_fetch_happy_path(): mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) with open(solution_1_path, "r") as f: response = json.load(f) - expected_resources.append(response) mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) with open(solution_2_path, "r") as f: response = json.load(f) - expected_resources.append(response) mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) - assert len(fetched_resources) == len(expected_resources) + assert len(fetched_resources) == 2 + for i, (last_modified, resource) in enumerate(fetched_resources): + assert last_modified == mocked_datetime_from + assert type(resource) == ResourceWithRelations + assert resource.resource_ORM_class == MLModel + assert resource.resource.platform == PlatformName.aibuilder + assert resource.resource.platform_resource_identifier == str(i + 1) + assert resource.resource.name == f"Mocking Full Solution {i + 1}" + assert resource.resource.date_published == mocked_datetime_from + assert resource.resource.description == Text(plain=f"The mocked full solution {i + 1}.") + assert set(resource.resource.keyword) == {f"Mocked tag {i + 1}."} + assert resource.resource.is_accessible_for_free + + +def test_fetch_happy_path_unaware_datetime(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") + solution_1_path = os.path.join(test_resources_path, "solution_1.json") + solution_2_path = os.path.join(test_resources_path, "solution_2.json") + mocked_unaware_datetime_from = datetime.fromisoformat("2023-09-01T00:00:00") + mocked_unaware_datetime_to = datetime.fromisoformat("2023-09-01T00:00:01") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + with open(solution_1_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) + with open(solution_2_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) + fetched_resources = list( + connector.fetch(mocked_unaware_datetime_from, mocked_unaware_datetime_to) + ) + + assert len(fetched_resources) == 2 for i, (last_modified, resource) in enumerate(fetched_resources): assert last_modified == mocked_datetime_from assert type(resource) == ResourceWithRelations @@ -77,6 +113,7 @@ def test_catalog_list_http_error(): assert resource.identifier is None assert type(resource.error) == HTTPError assert str(resource.error) == err_msg + assert connector.is_concluded def test_catalog_list_format_error(): @@ -95,6 +132,7 @@ def test_catalog_list_format_error(): assert resource.identifier is None assert type(resource.error) == KeyError assert str(resource.error) == "'catalogId'" + assert connector.is_concluded def test_empty_catalog_list(): @@ -112,3 +150,197 @@ def test_empty_catalog_list(): assert type(resource) == RecordError assert resource.identifier is None assert resource.error == "Empty catalog list." + assert connector.is_concluded + + +def test_catalog_solutions_http_error(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + error = {"error": {"message": "HTTP Error."}} + err_msg = f"Error while fetching {catalog_solutions_url} from AIBuilder: (500) HTTP Error." + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + mocked_requests.add(responses.GET, catalog_solutions_url, json=error, status=500) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == HTTPError + assert str(resource.error) == err_msg + # TODO: assert continuation when just one instance of several catalogs fails + # (not connector.is_concluded and next resources) + + +def test_catalog_solutions_format_error(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join( + test_resources_path, "catalog_solutions_format_error.json" + ) + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == KeyError + assert str(resource.error) == "'fullId'" + # TODO: assert continuation when just one instance of several catalogs fails + # (not connector.is_concluded and next resources) + + +def test_empty_catalog_solutions(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join(test_resources_path, "empty_catalog_solutions.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert resource.error == "Empty solution list." + # TODO: assert continuation when just one instance of several catalogs fails + # (not connector.is_concluded and next resources) + + +def test_solution_http_error(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") + error = {"error": {"message": "HTTP Error."}} + err_msg = f"Error while fetching {solution_1_url} from AIBuilder: (500) HTTP Error." + solution_2_path = os.path.join(test_resources_path, "solution_2.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + mocked_requests.add(responses.GET, solution_1_url, json=error, status=500) + with open(solution_2_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 2 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == HTTPError + assert str(resource.error) == err_msg + last_modified, resource = fetched_resources[1] + assert last_modified == mocked_datetime_from + assert type(resource) == ResourceWithRelations + assert resource.resource_ORM_class == MLModel + assert resource.resource.platform == PlatformName.aibuilder + assert resource.resource.platform_resource_identifier == "2" + assert resource.resource.name == "Mocking Full Solution 2" + assert resource.resource.date_published == mocked_datetime_from + assert resource.resource.description == Text(plain="The mocked full solution 2.") + assert set(resource.resource.keyword) == {"Mocked tag 2."} + assert resource.resource.is_accessible_for_free + + +def test_solution_format_error(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") + solution_1_path = os.path.join(test_resources_path, "solution_1_format_error.json") + solution_2_path = os.path.join(test_resources_path, "solution_2.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + with open(solution_1_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) + with open(solution_2_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 2 + last_modified, resource = fetched_resources[0] + assert last_modified == mocked_datetime_from + assert type(resource) == RecordError + assert resource.identifier == "1" + assert resource.error == "Bad structure on the received solution." + last_modified, resource = fetched_resources[1] + assert last_modified == mocked_datetime_from + assert type(resource) == ResourceWithRelations + assert resource.resource_ORM_class == MLModel + assert resource.resource.platform == PlatformName.aibuilder + assert resource.resource.platform_resource_identifier == "2" + assert resource.resource.name == "Mocking Full Solution 2" + assert resource.resource.date_published == mocked_datetime_from + assert resource.resource.description == Text(plain="The mocked full solution 2.") + assert set(resource.resource.keyword) == {"Mocked tag 2."} + assert resource.resource.is_accessible_for_free + + +def test_solution_last_modified_exception(): + catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") + catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") + solution_1_path = os.path.join(test_resources_path, "solution_1_last_modified_exception.json") + solution_2_path = os.path.join(test_resources_path, "solution_2.json") + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + with open(catalog_list_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_list_url, json=response, status=200) + with open(catalog_solutions_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, catalog_solutions_url, json=response, status=200) + with open(solution_1_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_1_url, json=response, status=200) + with open(solution_2_path, "r") as f: + response = json.load(f) + mocked_requests.add(responses.GET, solution_2_url, json=response, status=200) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 2 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier == "1" + assert type(resource.error) == KeyError + assert str(resource.error) == "'lastModified'" + last_modified, resource = fetched_resources[1] + assert last_modified == mocked_datetime_from + assert type(resource) == ResourceWithRelations + assert resource.resource_ORM_class == MLModel + assert resource.resource.platform == PlatformName.aibuilder + assert resource.resource.platform_resource_identifier == "2" + assert resource.resource.name == "Mocking Full Solution 2" + assert resource.resource.date_published == mocked_datetime_from + assert resource.resource.description == Text(plain="The mocked full solution 2.") + assert set(resource.resource.keyword) == {"Mocked tag 2."} + assert resource.resource.is_accessible_for_free diff --git a/src/tests/resources/connectors/aibuilder/catalog_solutions_format_error.json b/src/tests/resources/connectors/aibuilder/catalog_solutions_format_error.json new file mode 100644 index 00000000..914be58c --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/catalog_solutions_format_error.json @@ -0,0 +1,7 @@ +[ + { + "name": "Mocking Solution 1", + "toolkitType": "SK", + "lastModified": "2023-09-01T00:00:00Z" + } +] diff --git a/src/tests/resources/connectors/aibuilder/empty_catalog_solutions.json b/src/tests/resources/connectors/aibuilder/empty_catalog_solutions.json new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/empty_catalog_solutions.json @@ -0,0 +1 @@ +[] diff --git a/src/tests/resources/connectors/aibuilder/solution_1_format_error.json b/src/tests/resources/connectors/aibuilder/solution_1_format_error.json new file mode 100644 index 00000000..d8ef00b4 --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/solution_1_format_error.json @@ -0,0 +1,30 @@ +{ + "name": "Mocking Full Solution 1", + "description": "The mocked full solution 1.", + "toolkitType": "SK", + "category": "PR", + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z", + "publisher": "The mocked publisher 1.", + "authors": [ + "The mocker author 1" + ], + "artifacts": [ + { + "artifactId": "1", + "artifactTypeCode": "PJ", + "name": "Mocking artifact 1", + "description": "The mocked artifact 1.", + "uri": "mocked_artifact_file_1.json", + "filename": "mocked_artifact_file_1.json", + "version": "1.0.0", + "size": 1, + "metadata": null, + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } + ], + "tags": [ + "Mocked tag 1." + ] +} diff --git a/src/tests/resources/connectors/aibuilder/solution_1_last_modified_exception.json b/src/tests/resources/connectors/aibuilder/solution_1_last_modified_exception.json new file mode 100644 index 00000000..103d48cd --- /dev/null +++ b/src/tests/resources/connectors/aibuilder/solution_1_last_modified_exception.json @@ -0,0 +1,30 @@ +{ + "fullId": "1", + "name": "Mocking Full Solution 1", + "description": "The mocked full solution 1.", + "toolkitType": "SK", + "category": "PR", + "created": "2023-09-01T00:00:00Z", + "publisher": "The mocked publisher 1.", + "authors": [ + "The mocker author 1" + ], + "artifacts": [ + { + "artifactId": "1", + "artifactTypeCode": "PJ", + "name": "Mocking artifact 1", + "description": "The mocked artifact 1.", + "uri": "mocked_artifact_file_1.json", + "filename": "mocked_artifact_file_1.json", + "version": "1.0.0", + "size": 1, + "metadata": null, + "lastModified": "2023-09-01T00:00:00Z", + "created": "2023-09-01T00:00:00Z" + } + ], + "tags": [ + "Mocked tag 1." + ] +} From c45c4f7d8ef3ea72f876742076b397a5e75aed63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 5 Dec 2024 00:23:07 +0100 Subject: [PATCH 09/21] TODO removed. Continuation after catalog error is not needed. --- .../aibuilder/test_aibuilder_mlmodel_connector.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index dcaec095..8f40487d 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -172,8 +172,6 @@ def test_catalog_solutions_http_error(): assert resource.identifier is None assert type(resource.error) == HTTPError assert str(resource.error) == err_msg - # TODO: assert continuation when just one instance of several catalogs fails - # (not connector.is_concluded and next resources) def test_catalog_solutions_format_error(): @@ -198,8 +196,6 @@ def test_catalog_solutions_format_error(): assert resource.identifier is None assert type(resource.error) == KeyError assert str(resource.error) == "'fullId'" - # TODO: assert continuation when just one instance of several catalogs fails - # (not connector.is_concluded and next resources) def test_empty_catalog_solutions(): @@ -221,8 +217,6 @@ def test_empty_catalog_solutions(): assert type(resource) == RecordError assert resource.identifier is None assert resource.error == "Empty solution list." - # TODO: assert continuation when just one instance of several catalogs fails - # (not connector.is_concluded and next resources) def test_solution_http_error(): From 6f9e5c930a087e2de10b9a6c4898bad78b48a140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 12:28:40 +0100 Subject: [PATCH 10/21] Changed the venv name and removed the venv_aiod directory --- venv_aibuilder/bin/Activate.ps1 | 247 ----------------------- venv_aibuilder/bin/activate | 63 ------ venv_aibuilder/bin/activate.csh | 26 --- venv_aibuilder/bin/activate.fish | 69 ------- venv_aibuilder/bin/dotenv | 9 - venv_aibuilder/bin/futurize | 9 - venv_aibuilder/bin/gen_symkey.py | 37 ---- venv_aibuilder/bin/ghp-import | 9 - venv_aibuilder/bin/httpx | 9 - venv_aibuilder/bin/huggingface-cli | 9 - venv_aibuilder/bin/identify-cli | 9 - venv_aibuilder/bin/jwdecrypt.py | 73 ------- venv_aibuilder/bin/jwenc.py | 102 ---------- venv_aibuilder/bin/jwk_create.py | 43 ---- venv_aibuilder/bin/jwk_export.py | 23 --- venv_aibuilder/bin/jwkutil.py | 146 -------------- venv_aibuilder/bin/mako-render | 9 - venv_aibuilder/bin/markdown_py | 9 - venv_aibuilder/bin/mkdocs | 9 - venv_aibuilder/bin/mkdocs-get-deps | 9 - venv_aibuilder/bin/nodeenv | 9 - venv_aibuilder/bin/normalizer | 9 - venv_aibuilder/bin/oic-client-management | 9 - venv_aibuilder/bin/pasteurize | 9 - venv_aibuilder/bin/peek.py | 18 -- venv_aibuilder/bin/pip | 9 - venv_aibuilder/bin/pip3 | 9 - venv_aibuilder/bin/pip3.11 | 9 - venv_aibuilder/bin/pre-commit | 9 - venv_aibuilder/bin/py.test | 9 - venv_aibuilder/bin/pybabel | 9 - venv_aibuilder/bin/pygmentize | 9 - venv_aibuilder/bin/pyrsa-decrypt | 9 - venv_aibuilder/bin/pyrsa-encrypt | 9 - venv_aibuilder/bin/pyrsa-keygen | 9 - venv_aibuilder/bin/pyrsa-priv2pub | 9 - venv_aibuilder/bin/pyrsa-sign | 9 - venv_aibuilder/bin/pyrsa-verify | 9 - venv_aibuilder/bin/pytest | 9 - venv_aibuilder/bin/python | 1 - venv_aibuilder/bin/python3 | 1 - venv_aibuilder/bin/python3.11 | 1 - venv_aibuilder/bin/tqdm | 9 - venv_aibuilder/bin/uvicorn | 9 - venv_aibuilder/bin/virtualenv | 9 - venv_aibuilder/bin/watchmedo | 9 - venv_aibuilder/pyvenv.cfg | 5 - 47 files changed, 1143 deletions(-) delete mode 100644 venv_aibuilder/bin/Activate.ps1 delete mode 100644 venv_aibuilder/bin/activate delete mode 100644 venv_aibuilder/bin/activate.csh delete mode 100644 venv_aibuilder/bin/activate.fish delete mode 100755 venv_aibuilder/bin/dotenv delete mode 100755 venv_aibuilder/bin/futurize delete mode 100755 venv_aibuilder/bin/gen_symkey.py delete mode 100755 venv_aibuilder/bin/ghp-import delete mode 100755 venv_aibuilder/bin/httpx delete mode 100755 venv_aibuilder/bin/huggingface-cli delete mode 100755 venv_aibuilder/bin/identify-cli delete mode 100755 venv_aibuilder/bin/jwdecrypt.py delete mode 100755 venv_aibuilder/bin/jwenc.py delete mode 100755 venv_aibuilder/bin/jwk_create.py delete mode 100755 venv_aibuilder/bin/jwk_export.py delete mode 100755 venv_aibuilder/bin/jwkutil.py delete mode 100755 venv_aibuilder/bin/mako-render delete mode 100755 venv_aibuilder/bin/markdown_py delete mode 100755 venv_aibuilder/bin/mkdocs delete mode 100755 venv_aibuilder/bin/mkdocs-get-deps delete mode 100755 venv_aibuilder/bin/nodeenv delete mode 100755 venv_aibuilder/bin/normalizer delete mode 100755 venv_aibuilder/bin/oic-client-management delete mode 100755 venv_aibuilder/bin/pasteurize delete mode 100755 venv_aibuilder/bin/peek.py delete mode 100755 venv_aibuilder/bin/pip delete mode 100755 venv_aibuilder/bin/pip3 delete mode 100755 venv_aibuilder/bin/pip3.11 delete mode 100755 venv_aibuilder/bin/pre-commit delete mode 100755 venv_aibuilder/bin/py.test delete mode 100755 venv_aibuilder/bin/pybabel delete mode 100755 venv_aibuilder/bin/pygmentize delete mode 100755 venv_aibuilder/bin/pyrsa-decrypt delete mode 100755 venv_aibuilder/bin/pyrsa-encrypt delete mode 100755 venv_aibuilder/bin/pyrsa-keygen delete mode 100755 venv_aibuilder/bin/pyrsa-priv2pub delete mode 100755 venv_aibuilder/bin/pyrsa-sign delete mode 100755 venv_aibuilder/bin/pyrsa-verify delete mode 100755 venv_aibuilder/bin/pytest delete mode 120000 venv_aibuilder/bin/python delete mode 120000 venv_aibuilder/bin/python3 delete mode 120000 venv_aibuilder/bin/python3.11 delete mode 100755 venv_aibuilder/bin/tqdm delete mode 100755 venv_aibuilder/bin/uvicorn delete mode 100755 venv_aibuilder/bin/virtualenv delete mode 100755 venv_aibuilder/bin/watchmedo delete mode 100644 venv_aibuilder/pyvenv.cfg diff --git a/venv_aibuilder/bin/Activate.ps1 b/venv_aibuilder/bin/Activate.ps1 deleted file mode 100644 index b49d77ba..00000000 --- a/venv_aibuilder/bin/Activate.ps1 +++ /dev/null @@ -1,247 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current PowerShell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - -.Notes -On Windows, it may be required to enable this Activate.ps1 script by setting the -execution policy for the user. You can do this by issuing the following PowerShell -command: - -PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser - -For more information on Execution Policies: -https://go.microsoft.com/fwlink/?LinkID=135170 - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove VIRTUAL_ENV_PROMPT altogether. - if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { - Remove-Item -Path env:VIRTUAL_ENV_PROMPT - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0, 1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} -else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} -else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } - $env:VIRTUAL_ENV_PROMPT = $Prompt -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/venv_aibuilder/bin/activate b/venv_aibuilder/bin/activate deleted file mode 100644 index 92ef1888..00000000 --- a/venv_aibuilder/bin/activate +++ /dev/null @@ -1,63 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # Call hash to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - hash -r 2> /dev/null - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - unset VIRTUAL_ENV_PROMPT - if [ ! "${1:-}" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - PS1="(venv_aibuilder) ${PS1:-}" - export PS1 - VIRTUAL_ENV_PROMPT="(venv_aibuilder) " - export VIRTUAL_ENV_PROMPT -fi - -# Call hash to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -hash -r 2> /dev/null diff --git a/venv_aibuilder/bin/activate.csh b/venv_aibuilder/bin/activate.csh deleted file mode 100644 index 8a14298b..00000000 --- a/venv_aibuilder/bin/activate.csh +++ /dev/null @@ -1,26 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - set prompt = "(venv_aibuilder) $prompt" - setenv VIRTUAL_ENV_PROMPT "(venv_aibuilder) " -endif - -alias pydoc python -m pydoc - -rehash diff --git a/venv_aibuilder/bin/activate.fish b/venv_aibuilder/bin/activate.fish deleted file mode 100644 index 53ecf33d..00000000 --- a/venv_aibuilder/bin/activate.fish +++ /dev/null @@ -1,69 +0,0 @@ -# This file must be used with "source /bin/activate.fish" *from fish* -# (https://fishshell.com/); you cannot run it directly. - -function deactivate -d "Exit virtual environment and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - set -e _OLD_FISH_PROMPT_OVERRIDE - # prevents error when using nested fish instances (Issue #93858) - if functions -q _old_fish_prompt - functions -e fish_prompt - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - end - - set -e VIRTUAL_ENV - set -e VIRTUAL_ENV_PROMPT - if test "$argv[1]" != "nondestructive" - # Self-destruct! - functions -e deactivate - end -end - -# Unset irrelevant variables. -deactivate nondestructive - -set -gx VIRTUAL_ENV "/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# Unset PYTHONHOME if set. -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # Save the current fish_prompt function as the function _old_fish_prompt. - functions -c fish_prompt _old_fish_prompt - - # With the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command. - set -l old_status $status - - # Output the venv prompt; color taken from the blue of the Python logo. - printf "%s%s%s" (set_color 4B8BBE) "(venv_aibuilder) " (set_color normal) - - # Restore the return status of the previous command. - echo "exit $old_status" | . - # Output the original/"old" prompt. - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" - set -gx VIRTUAL_ENV_PROMPT "(venv_aibuilder) " -end diff --git a/venv_aibuilder/bin/dotenv b/venv_aibuilder/bin/dotenv deleted file mode 100755 index 5f165172..00000000 --- a/venv_aibuilder/bin/dotenv +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from dotenv.__main__ import cli - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(cli()) diff --git a/venv_aibuilder/bin/futurize b/venv_aibuilder/bin/futurize deleted file mode 100755 index 70bcef7d..00000000 --- a/venv_aibuilder/bin/futurize +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from libfuturize.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/gen_symkey.py b/venv_aibuilder/bin/gen_symkey.py deleted file mode 100755 index 76bfe9d6..00000000 --- a/venv_aibuilder/bin/gen_symkey.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import json -import random -import string - -import argparse - -from jwkest.jwk import SYMKey - -__author__ = "regu0004" - - -def rndstr(size=6, chars=string.ascii_uppercase + string.digits): - return "".join(random.choice(chars) for _ in range(size)) - - -def main(): - parser = argparse.ArgumentParser( - description="Generate a new symmetric key and print it to stdout." - ) - parser.add_argument( - "-n", - dest="key_length", - default=48, - type=int, - help="Length of the random string used as key.", - ) - parser.add_argument("--kid", dest="kid", help="Key id.") - args = parser.parse_args() - - key = SYMKey(key=rndstr(args.key_length), kid=args.kid).serialize() - jwks = dict(keys=[key]) - print(json.dumps(jwks)) - - -if __name__ == "__main__": - main() diff --git a/venv_aibuilder/bin/ghp-import b/venv_aibuilder/bin/ghp-import deleted file mode 100755 index 6a140ea3..00000000 --- a/venv_aibuilder/bin/ghp-import +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from ghp_import import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/httpx b/venv_aibuilder/bin/httpx deleted file mode 100755 index 89b4d127..00000000 --- a/venv_aibuilder/bin/httpx +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from httpx import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/huggingface-cli b/venv_aibuilder/bin/huggingface-cli deleted file mode 100755 index e7138a18..00000000 --- a/venv_aibuilder/bin/huggingface-cli +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from huggingface_hub.commands.huggingface_cli import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/identify-cli b/venv_aibuilder/bin/identify-cli deleted file mode 100755 index 881ab93a..00000000 --- a/venv_aibuilder/bin/identify-cli +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from identify.cli import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/jwdecrypt.py b/venv_aibuilder/bin/jwdecrypt.py deleted file mode 100755 index def148fb..00000000 --- a/venv_aibuilder/bin/jwdecrypt.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import sys - -__author__ = "rohe0002" - -import argparse -import requests -from jwkest.jwk import load_jwks_from_url, RSAKey -from jwkest.jwk import rsa_load -from jwkest.jwk import load_x509_cert -from jwkest.jwk import load_jwks -from jwkest.jwk import import_rsa_key_from_file -from jwkest.jwe import JWE - - -def assign(lst): - _keys = {} - for key in lst: - try: - _keys[key.kty].append(key) - except KeyError: - _keys[key.kty] = [key] - return _keys - - -def lrequest(url, method="GET", **kwargs): - return requests.request(method, url, **kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-d", dest="debug", action="store_true", help="Print debug information") - # parser.add_argument('-v', dest='verbose', action='store_true', - # help="Print runtime information") - parser.add_argument("-x", dest="x509_file", help="File containing a X509 certificate") - parser.add_argument( - "-X", dest="x509_url", help="URL pointing to a file containing a X509 " "certificate" - ) - parser.add_argument("-j", dest="jwk_file", help="File containing a JWK") - parser.add_argument("-J", dest="jwk_url", help="URL pointing to a file containing a JWK") - parser.add_argument("-r", dest="rsa_file", help="A file containing a RSA key") - parser.add_argument("-i", dest="int", help="Integrity method") - parser.add_argument("-f", dest="file", help="File with the message") - parser.add_argument("message", nargs="?", help="The message to encrypt") - - args = parser.parse_args() - - keys = {} - if args.jwk_url: - keys = load_jwks_from_url(args.jwk_url) - elif args.jwk_file: - keys = load_jwks(open(args.jwk_file).read()) - elif args.x509_url: - keys = load_x509_cert(args.x509_url, {}) - elif args.x509_file: - keys = [import_rsa_key_from_file(args.x509_file)] - elif args.rsa_file: - key = rsa_load(args.rsa_file) - rsa_key = RSAKey(key=key) - rsa_key.serialize() - keys = [rsa_key] - else: - print("Needs encryption key") - exit() - - if args.file: - msg = open(args.file).read() - msg = msg.strip("\n\r") - else: - msg = args.message - - jwe = JWE() - print(jwe.decrypt(msg, keys)) diff --git a/venv_aibuilder/bin/jwenc.py b/venv_aibuilder/bin/jwenc.py deleted file mode 100755 index e71825ed..00000000 --- a/venv_aibuilder/bin/jwenc.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -from __future__ import print_function -import argparse - -import sys - -__author__ = "rohe0002" - -import requests -from jwkest.jwk import load_jwks_from_url, RSAKey -from jwkest.jwk import rsa_load -from jwkest.jwk import load_x509_cert -from jwkest.jwk import load_jwks -from jwkest.jwe import SUPPORTED -from jwkest.jwe import JWE -from jwkest.jwk import import_rsa_key_from_file - - -def assign(lst): - _keys = {} - for key in lst: - try: - _keys[key.kty].append(key) - except KeyError: - _keys[key.kty] = [key] - return _keys - - -def lrequest(url, method="GET", **kwargs): - return requests.request(method, url, **kwargs) - - -# arg can be RSA-OAEP -# enc for instance A128CBC+HS256 - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-d", dest="debug", action="store_true", help="Print debug information") - parser.add_argument("-v", dest="verbose", action="store_true", help="Print runtime information") - parser.add_argument("-x", dest="x509_file", help="File containing a X509 certificate") - parser.add_argument( - "-X", dest="x509_url", help="URL pointing to a file containing a X509 " "certificate" - ) - parser.add_argument("-j", dest="jwk_file", help="File containing a JWK") - parser.add_argument("-J", dest="jwk_url", help="URL pointing to a file containing a JWK") - parser.add_argument("-r", dest="rsa_file", help="A file containing a RSA key") - parser.add_argument("-a", dest="alg", help="The encryption algorithm") - parser.add_argument("-e", dest="enc", help="The encryption method") - parser.add_argument( - "-m", dest="mode", default="public", help="Whether a public or private key should be used" - ) - parser.add_argument("-f", dest="file", help="File to be encrypted") - parser.add_argument("message", nargs="?", help="The message to encrypt") - - args = parser.parse_args() - - keys = {} - if args.jwk_url: - keys = load_jwks_from_url(args.jwk_url) - elif args.jwk_file: - keys = load_jwks(open(args.jwk_file).read()) - elif args.x509_url: - # load_x509_cert returns list of 2-tuples - keys = [RSAKey(key=x) for x, y in load_x509_cert(lrequest, args.x509_url)] - for key in keys: - key.serialize() - elif args.x509_file: - # import_rsa_key_from_file returns RSA key instance - _key = RSAKey(key=import_rsa_key_from_file(args.x509_file)) - _key.serialize() - keys = [_key] - elif args.rsa_file: - _key = RSAKey(key=rsa_load(args.rsa_file)) - _key.serialize() - keys = [_key] - else: - print("Needs encryption key", file=sys.stderr) - exit() - - if not args.enc or not args.alg: - print("There are no default encryption methods", file=sys.stderr) - exit() - - if args.enc not in SUPPORTED["enc"]: - print("Encryption method %s not supported", args.enc, file=sys.stderr) - print("Methods supported: %s", SUPPORTED["enc"], file=sys.stderr) - exit() - - if args.alg not in SUPPORTED["alg"]: - print("Encryption algorithm %s not supported", args.alg, file=sys.stderr) - print("Algorithms supported: %s", SUPPORTED["alg"], file=sys.stderr) - exit() - - if args.file: - message = open(args.file).read() - elif args.message == "-": - message = sys.stdin.read() - else: - message = args.message - - jwe = JWE(message, alg=args.alg, enc=args.enc) - print(jwe.encrypt(keys)) diff --git a/venv_aibuilder/bin/jwk_create.py b/venv_aibuilder/bin/jwk_create.py deleted file mode 100755 index 23f04288..00000000 --- a/venv_aibuilder/bin/jwk_create.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import json -from Cryptodome.PublicKey import RSA -import argparse -import os -from jwkest.jwk import RSAKey - -__author__ = "rolandh" - - -def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024): - key = RSA.generate(size) - - keyfile = os.path.join(path, name) - - f = open("%s.key" % keyfile, "w") - f.write(key.exportKey("PEM")) - f.close() - f = open("%s.pub" % keyfile, "w") - f.write(key.publickey().exportKey("PEM")) - f.close() - - rsa_key = RSAKey(key=key) - rsa_key.serialize() - # This will create JWK from the public RSA key - jwk_spec = json.dumps(rsa_key.to_dict(), "enc") - f = open(keyfile + ".jwk", "w") - f.write(str(jwk_spec)) - f.close() - - return key - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("-n", dest="name", default="pyoidc", help="file names") - parser.add_argument("-p", dest="path", default=".", help="Path to the directory for the files") - parser.add_argument("-s", dest="size", default=1024, help="Key size", type=int) - - args = parser.parse_args() - - create_and_store_rsa_key_pair(args.name, args.path, args.size) diff --git a/venv_aibuilder/bin/jwk_export.py b/venv_aibuilder/bin/jwk_export.py deleted file mode 100755 index e4d28340..00000000 --- a/venv_aibuilder/bin/jwk_export.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import os -import argparse -from jwkest.jwk import RSAKey -from jwkest.jwk import rsa_load -from jwkest.jwk import dump_jwks - -__author__ = "rolandh" - -parser = argparse.ArgumentParser() -parser.add_argument("-n", dest="name", default="pyoidc", help="file names") -parser.add_argument("-p", dest="path", default=".", help="Path to the directory for the files") -parser.add_argument("-k", dest="key", help="Key file") - -args = parser.parse_args() - -rsa_key = RSAKey(key=rsa_load(args.key)) - -keyfile = os.path.join(args.path, args.name) - -f = open(keyfile + ".jwk", "w") -f.write(dump_jwks([rsa_key])) -f.close() diff --git a/venv_aibuilder/bin/jwkutil.py b/venv_aibuilder/bin/jwkutil.py deleted file mode 100755 index d95ca36c..00000000 --- a/venv_aibuilder/bin/jwkutil.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import json -import logging -import sys -from jwkest.jwe import JWE - -__author__ = "rohe0002" - -import argparse -import requests -from jwkest.jwk import RSAKey, KEYS -from jwkest.jwk import keyrep -from jwkest.jwk import import_rsa_key_from_file -from jwkest.jwk import SYMKey -from jwkest.jws import JWS - - -def setup_logging(log_file): - logger = logging.getLogger("") - hdlr = logging.FileHandler(log_file) - base_formatter = logging.Formatter("%(asctime)s %(name)s:%(levelname)s %(message)s") - hdlr.setFormatter(base_formatter) - logger.addHandler(hdlr) - logger.setLevel(logging.DEBUG) - - -def assign(lst): - keys = {} - for typ, key in lst: - try: - keys[typ].append(key) - except KeyError: - keys[typ] = [key] - return keys - - -def lrequest(url, method="GET", **kwargs): - return requests.request(method, url, **kwargs) - - -def sign(msg, key, alg="", msgtype=None): - """ - - :param msg: The message to sign - :param key: The signing key - :param alg: Which signing algorithm to use, this information may - appear in the headers dictionary - :param msgtype: The type of payload - :return: A JWS - """ - _jws = JWS(msg, alg=alg) - if msgtype: - _jws["typ"] = msgtype - - return _jws.sign_compact(key) - - -def verify(msg, keys, allow_none=False, sigalg=None): - _jws = JWS() - return _jws.verify_compact(msg, keys, allow_none, sigalg) - - -def encrypt(msg, keys, alg, enc): - _jwe = JWE(msg, alg=alg, enc=enc) - return _jwe.encrypt(keys) - - -def decrypt(msg, keys): - _jwe = JWE() - return _jwe.decrypt(msg, keys) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-s", dest="sign", action="store_true") - parser.add_argument("-v", dest="verify", action="store_true") - parser.add_argument("-e", dest="encrypt", action="store_true") - parser.add_argument("-d", dest="decrypt", action="store_true") - parser.add_argument("-f", dest="msg_file", help="File containing a message") - parser.add_argument("-r", dest="rsa_file", help="File containing a RSA key") - parser.add_argument("-k", dest="hmac_key", help="If using a HMAC algorithm this is the key") - parser.add_argument("-a", dest="alg", help="The signing algorithm") - parser.add_argument("-A", dest="encalg", help="The encryption alg algorithm") - parser.add_argument("-E", dest="encenc", help="The encryption enc algorithm") - parser.add_argument("-j", dest="jwk", help="JSON Web Key") - parser.add_argument("-J", dest="jwks", help="JSON Web Keys") - parser.add_argument("-i", dest="kid", help="key id") - parser.add_argument("-l", dest="log", help="logfile name") - parser.add_argument("-t", dest="msgtype", help="JWT message type") - parser.add_argument("-u", dest="jwks_url", help="JSON Web Keys URL") - parser.add_argument("message", nargs="?", help="The message") - - args = parser.parse_args() - - if args.log: - setup_logging(args.log) - - _kid = args.kid - keys = [] - if args.rsa_file: - keys.append(RSAKey(key=import_rsa_key_from_file(args.rsa_file), kid=_kid)) - if args.hmac_key: - keys.append(SYMKey(key=args.hmac_key)) - - if args.jwk: - kspec = json.loads(open(args.jwk).read()) - keys.append(keyrep(kspec)) - - if args.jwks: - _k = KEYS() - _k.load_jwks(open(args.jwks).read()) - keys.extend(_k._keys) - - if args.jwks_url: - _k = KEYS() - _k.load_from_url(args.jwks_url, False) - keys.extend(_k._keys) - - if not keys: - exit(-1) - - if args.msg_file: - message = open(args.msg_file).read().strip("\n") - elif args.message == "-": - message = sys.stdin.read() - else: - message = args.message - - if args.sign: - _msg = sign(message, keys, args.alg, args.msgtype) - if args.encrypt: - _msg = encrypt(_msg, keys, args.encalg, args.encenc) - print(_msg) - elif args.encrypt: - print(encrypt(message, keys, args.encalg, args.encenc)) - else: - if args.decrypt: - _msg, _res = decrypt(message, keys) - else: - _msg = message - - if args.verify: - print(verify(_msg, keys)) - -# -e -J edmund.jwks -f text.json -E "A128CBC-HS256" -A "RSA1_5" -l ju.log -# -d -r op.key -f edmund.jwe -i a0 diff --git a/venv_aibuilder/bin/mako-render b/venv_aibuilder/bin/mako-render deleted file mode 100755 index 62771c85..00000000 --- a/venv_aibuilder/bin/mako-render +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from mako.cmd import cmdline - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(cmdline()) diff --git a/venv_aibuilder/bin/markdown_py b/venv_aibuilder/bin/markdown_py deleted file mode 100755 index 37506799..00000000 --- a/venv_aibuilder/bin/markdown_py +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from markdown.__main__ import run - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(run()) diff --git a/venv_aibuilder/bin/mkdocs b/venv_aibuilder/bin/mkdocs deleted file mode 100755 index f2f1aa1d..00000000 --- a/venv_aibuilder/bin/mkdocs +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from mkdocs.__main__ import cli - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(cli()) diff --git a/venv_aibuilder/bin/mkdocs-get-deps b/venv_aibuilder/bin/mkdocs-get-deps deleted file mode 100755 index 4403292c..00000000 --- a/venv_aibuilder/bin/mkdocs-get-deps +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from mkdocs_get_deps.__main__ import cli - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(cli()) diff --git a/venv_aibuilder/bin/nodeenv b/venv_aibuilder/bin/nodeenv deleted file mode 100755 index 28b2d983..00000000 --- a/venv_aibuilder/bin/nodeenv +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from nodeenv import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/normalizer b/venv_aibuilder/bin/normalizer deleted file mode 100755 index 8ca8e7b7..00000000 --- a/venv_aibuilder/bin/normalizer +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from charset_normalizer.cli import cli_detect - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(cli_detect()) diff --git a/venv_aibuilder/bin/oic-client-management b/venv_aibuilder/bin/oic-client-management deleted file mode 100755 index b3c7007e..00000000 --- a/venv_aibuilder/bin/oic-client-management +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from oic.utils.client_management import run - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(run()) diff --git a/venv_aibuilder/bin/pasteurize b/venv_aibuilder/bin/pasteurize deleted file mode 100755 index ac7f75c8..00000000 --- a/venv_aibuilder/bin/pasteurize +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from libpasteurize.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/peek.py b/venv_aibuilder/bin/peek.py deleted file mode 100755 index 2382d63f..00000000 --- a/venv_aibuilder/bin/peek.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -import sys -from jwkest import jwe -from jwkest import jws - -__author__ = "roland" - -jwt = sys.argv[1] - -_jw = jwe.factory(jwt) -if _jw: - print("jwe") -else: - _jw = jws.factory(jwt) - if _jw: - print("jws") - print(_jw.jwt.headers) - print(_jw.jwt.part[1]) diff --git a/venv_aibuilder/bin/pip b/venv_aibuilder/bin/pip deleted file mode 100755 index 4c0174a8..00000000 --- a/venv_aibuilder/bin/pip +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 -# -*- coding: utf-8 -*- -import re -import sys -from pip._internal.cli.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3 b/venv_aibuilder/bin/pip3 deleted file mode 100755 index 4c0174a8..00000000 --- a/venv_aibuilder/bin/pip3 +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 -# -*- coding: utf-8 -*- -import re -import sys -from pip._internal.cli.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/pip3.11 b/venv_aibuilder/bin/pip3.11 deleted file mode 100755 index 4c0174a8..00000000 --- a/venv_aibuilder/bin/pip3.11 +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python3.11 -# -*- coding: utf-8 -*- -import re -import sys -from pip._internal.cli.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/pre-commit b/venv_aibuilder/bin/pre-commit deleted file mode 100755 index 1b2fb1b7..00000000 --- a/venv_aibuilder/bin/pre-commit +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from pre_commit.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/py.test b/venv_aibuilder/bin/py.test deleted file mode 100755 index ba0e8dce..00000000 --- a/venv_aibuilder/bin/py.test +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from pytest import console_main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(console_main()) diff --git a/venv_aibuilder/bin/pybabel b/venv_aibuilder/bin/pybabel deleted file mode 100755 index 40102580..00000000 --- a/venv_aibuilder/bin/pybabel +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from babel.messages.frontend import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/pygmentize b/venv_aibuilder/bin/pygmentize deleted file mode 100755 index dec8b2ac..00000000 --- a/venv_aibuilder/bin/pygmentize +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from pygments.cmdline import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/pyrsa-decrypt b/venv_aibuilder/bin/pyrsa-decrypt deleted file mode 100755 index 44f5598f..00000000 --- a/venv_aibuilder/bin/pyrsa-decrypt +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.cli import decrypt - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(decrypt()) diff --git a/venv_aibuilder/bin/pyrsa-encrypt b/venv_aibuilder/bin/pyrsa-encrypt deleted file mode 100755 index 15695f7e..00000000 --- a/venv_aibuilder/bin/pyrsa-encrypt +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.cli import encrypt - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(encrypt()) diff --git a/venv_aibuilder/bin/pyrsa-keygen b/venv_aibuilder/bin/pyrsa-keygen deleted file mode 100755 index dfba74a4..00000000 --- a/venv_aibuilder/bin/pyrsa-keygen +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.cli import keygen - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(keygen()) diff --git a/venv_aibuilder/bin/pyrsa-priv2pub b/venv_aibuilder/bin/pyrsa-priv2pub deleted file mode 100755 index d07fdcb2..00000000 --- a/venv_aibuilder/bin/pyrsa-priv2pub +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.util import private_to_public - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(private_to_public()) diff --git a/venv_aibuilder/bin/pyrsa-sign b/venv_aibuilder/bin/pyrsa-sign deleted file mode 100755 index 4c36dd56..00000000 --- a/venv_aibuilder/bin/pyrsa-sign +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.cli import sign - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(sign()) diff --git a/venv_aibuilder/bin/pyrsa-verify b/venv_aibuilder/bin/pyrsa-verify deleted file mode 100755 index 3784e174..00000000 --- a/venv_aibuilder/bin/pyrsa-verify +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from rsa.cli import verify - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(verify()) diff --git a/venv_aibuilder/bin/pytest b/venv_aibuilder/bin/pytest deleted file mode 100755 index ba0e8dce..00000000 --- a/venv_aibuilder/bin/pytest +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from pytest import console_main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(console_main()) diff --git a/venv_aibuilder/bin/python b/venv_aibuilder/bin/python deleted file mode 120000 index 6e7f3c7d..00000000 --- a/venv_aibuilder/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/python3 b/venv_aibuilder/bin/python3 deleted file mode 120000 index 6e7f3c7d..00000000 --- a/venv_aibuilder/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/python3.11 b/venv_aibuilder/bin/python3.11 deleted file mode 120000 index 3cf1fbdc..00000000 --- a/venv_aibuilder/bin/python3.11 +++ /dev/null @@ -1 +0,0 @@ -/opt/homebrew/opt/python@3.11/bin/python3.11 \ No newline at end of file diff --git a/venv_aibuilder/bin/tqdm b/venv_aibuilder/bin/tqdm deleted file mode 100755 index 4fa88c0e..00000000 --- a/venv_aibuilder/bin/tqdm +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from tqdm.cli import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/uvicorn b/venv_aibuilder/bin/uvicorn deleted file mode 100755 index c7ffbfc0..00000000 --- a/venv_aibuilder/bin/uvicorn +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from uvicorn.main import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/bin/virtualenv b/venv_aibuilder/bin/virtualenv deleted file mode 100755 index 654eebe6..00000000 --- a/venv_aibuilder/bin/virtualenv +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from virtualenv.__main__ import run_with_catch - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(run_with_catch()) diff --git a/venv_aibuilder/bin/watchmedo b/venv_aibuilder/bin/watchmedo deleted file mode 100755 index 0e181958..00000000 --- a/venv_aibuilder/bin/watchmedo +++ /dev/null @@ -1,9 +0,0 @@ -#!/Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder/bin/python -# -*- coding: utf-8 -*- -import re -import sys -from watchdog.watchmedo import main - -if __name__ == "__main__": - sys.argv[0] = re.sub(r"(-script\.pyw|\.exe)?$", "", sys.argv[0]) - sys.exit(main()) diff --git a/venv_aibuilder/pyvenv.cfg b/venv_aibuilder/pyvenv.cfg deleted file mode 100644 index c18904e9..00000000 --- a/venv_aibuilder/pyvenv.cfg +++ /dev/null @@ -1,5 +0,0 @@ -home = /opt/homebrew/opt/python@3.11/bin -include-system-site-packages = false -version = 3.11.9 -executable = /opt/homebrew/Cellar/python@3.11/3.11.9_1/Frameworks/Python.framework/Versions/3.11/bin/python3.11 -command = /opt/homebrew/opt/python@3.11/bin/python3.11 -m venv /Users/adrian/Documents/Repositorios/AIOD_aibuilder/venv_aibuilder From f6fe3302f269c283a310205c0688626da2015da1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 12:39:49 +0100 Subject: [PATCH 11/21] Added aibuilder to the example for usin connectors on the README --- docs/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/README.md b/docs/README.md index 77f6b8f7..2f9d4233 100644 --- a/docs/README.md +++ b/docs/README.md @@ -132,13 +132,17 @@ It follows the same structure as the default file, but you only need to specify You can specify different connectors using ```bash -docker compose --profile examples --profile huggingface-datasets --profile openml --profile zenodo-datasets up -d -docker compose --profile examples --profile huggingface-datasets --profile openml --profile zenodo-datasets down +docker compose --profile aibuilder --profile examples --profile huggingface-datasets --profile openml --profile zenodo-datasets up -d +docker compose --profile aibuilder --profile examples --profile huggingface-datasets --profile openml --profile zenodo-datasets down ``` Make sure you use the same profile for `up` and `down`, or use `./scripts/down.sh` (see below), otherwise some containers might keep running. +##### Configuring AIBuilder connector +To access the AIBuilder API you need to provide a valid API token though the `API_TOKEN` variable. \ +Use the `override.env` file for that as explained above. + ### Shorthands We provide two auxiliary scripts for launching docker containers and bringing them down. The first, `./scripts/up.sh` invokes `docker compose up -d` and takes any number of profiles to launch as parameters. @@ -150,10 +154,6 @@ E.g., with `USE_LOCAL_DEV` set to `true`, `./scripts/up.sh` resolves to: The second script is a convenience for bringing down all services, including all profiles: `./scripts/down.sh` -#### Configuring AIBuilder connector -To access the AIBuilder API you need to provide a valid API token though the `API_TOKEN` variable. \ -Use the `override.env` file for that as explained above. - #### Local Installation If you want to run the server locally, you need **Python 3.11**. From 4d7eb434488b0a6eae1a69b5080ac9686c6e5a6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 13:03:33 +0100 Subject: [PATCH 12/21] Documentation texts added to the aibuilder connector class and methods --- .../aibuilder/aibuilder_mlmodel_connector.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 94e0aaf0..abd85198 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -39,6 +39,11 @@ class AIBuilderMLModelConnector(ResourceConnectorByDate[MLModel]): + """The class that implements the AIBuilder MLModel Connector. + It inheritates from `ResourceConnectorByDate` because the AIBuilder + platform entities have a `lastModified` field. + """ + @property def resource_class(self) -> type[MLModel]: return MLModel @@ -53,7 +58,11 @@ def retry(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError @sleep_and_retry @limits(calls=GLOBAL_MAX_CALLS_MINUTE, period=ONE_MINUTE) @limits(calls=GLOBAL_MAX_CALLS_HOUR, period=ONE_HOUR) - def get_response(self, url) -> requests.Response | RecordError: + def get_response(self, url) -> dict | list | RecordError: + """ + Performs the `url` request checking for correctness and returns the + `list` or `dict`structure received or a `RecordError`. + """ response = requests.get(url, timeout=REQUEST_TIMEOUT) if not response.ok: status_code = response.status_code @@ -65,11 +74,16 @@ def get_response(self, url) -> requests.Response | RecordError: return response.json() def _is_aware(self, date): + """Returns True if `date` is a timezone-aware `datetime`.""" return date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None def _mlmodel_from_solution( self, solution: dict, id: str, url: str ) -> ResourceWithRelations[MLModel] | RecordError: + """ + Generates an fills a `ResourceWithRelations` object with the `MlModel` + attributes received in a `dict`. + """ if not set(mlmodel_mapping.values()) <= set(solution.keys()): err_msg = "Bad structure on the received solution." @@ -254,6 +268,7 @@ def fetch( def _description_format(description: str) -> Text: + """Generates a `Text` class with a plain text description from a `str`.""" if not description: description = "" if len(description) > field_length.LONG: From 393a4ebf9ca55bf84b641337dce33a59f3321c4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 16:03:47 +0100 Subject: [PATCH 13/21] Modified same_as field for security reasons --- src/connectors/aibuilder/aibuilder_mlmodel_connector.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index abd85198..1711b339 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -241,6 +241,7 @@ def fetch( for num_solution, solution in enumerate(solutions_list): url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" + url_to_show = f"{API_URL}/get_solution?fullId={solution}&apiToken=API_TOKEN" response = self.get_response(url_get_solution) if isinstance(response, RecordError): self.is_concluded = ( @@ -257,7 +258,7 @@ def fetch( ) yield ( datetime.fromisoformat(response["lastModified"]), - self._mlmodel_from_solution(response, solution, url_get_solution), + self._mlmodel_from_solution(response, solution, url_to_show), ) except Exception as e: self.is_concluded = ( From 06de5cd7420f065b01a645012d747f20a8298f38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 16:08:39 +0100 Subject: [PATCH 14/21] Comment about same_as field aded to the README file --- docs/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 2f9d4233..54a012e3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -140,8 +140,10 @@ Make sure you use the same profile for `up` and `down`, or use `./scripts/down.s otherwise some containers might keep running. ##### Configuring AIBuilder connector -To access the AIBuilder API you need to provide a valid API token though the `API_TOKEN` variable. \ +To access the AIBuilder API you need to provide a valid API token trhough the `API_TOKEN` variable. \ Use the `override.env` file for that as explained above. +Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to \ +substitute `API_TOKEN` on the url for your actual API token value. ### Shorthands We provide two auxiliary scripts for launching docker containers and bringing them down. From 9648c0919bee63f8daf9615a0536ce98e00538a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 16:10:10 +0100 Subject: [PATCH 15/21] Comment about same_as field aded to the README file --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 54a012e3..308fceae 100644 --- a/docs/README.md +++ b/docs/README.md @@ -140,7 +140,7 @@ Make sure you use the same profile for `up` and `down`, or use `./scripts/down.s otherwise some containers might keep running. ##### Configuring AIBuilder connector -To access the AIBuilder API you need to provide a valid API token trhough the `API_TOKEN` variable. \ +To access the AIBuilder API you need to provide a valid API token through the `API_TOKEN` variable. \ Use the `override.env` file for that as explained above. Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to \ substitute `API_TOKEN` on the url for your actual API token value. From 9d4749e31d1c5dc3903fd31d8aec54dd550bfa82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Dec 2024 16:11:42 +0100 Subject: [PATCH 16/21] Comment about same_as field aded to the README file --- docs/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/README.md b/docs/README.md index 308fceae..67f692b1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -141,9 +141,8 @@ otherwise some containers might keep running. ##### Configuring AIBuilder connector To access the AIBuilder API you need to provide a valid API token through the `API_TOKEN` variable. \ -Use the `override.env` file for that as explained above. -Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to \ -substitute `API_TOKEN` on the url for your actual API token value. +Use the `override.env` file for that as explained above. \ +Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to substitute `API_TOKEN` on the url for your actual API token value. ### Shorthands We provide two auxiliary scripts for launching docker containers and bringing them down. From 4c6a2e7cc8b2da71e731888b7836d9057259a3a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 18 Dec 2024 17:53:37 +0100 Subject: [PATCH 17/21] Modifications for the Pull Request --- .env | 2 +- connectors/aibuilder/Dockerfile | 6 +++-- docker-compose.yaml | 3 +-- docs/README.md | 4 +-- .../aibuilder/aibuilder_mlmodel_connector.py | 27 ++++++++++++++----- .../test_aibuilder_mlmodel_connector.py | 5 ++-- 6 files changed, 32 insertions(+), 15 deletions(-) diff --git a/.env b/.env index 91dfb558..91fd51c5 100644 --- a/.env +++ b/.env @@ -21,7 +21,7 @@ AIOD_KEYCLOAK_PORT=8080 EGICHECKINALIAS= #AIBUILDER -API_TOKEN="" +AIBUILDER_API_TOKEN="" #ELASTICSEARCH ES_USER=elastic diff --git a/connectors/aibuilder/Dockerfile b/connectors/aibuilder/Dockerfile index 18ba0ed4..c410b2fa 100644 --- a/connectors/aibuilder/Dockerfile +++ b/connectors/aibuilder/Dockerfile @@ -1,11 +1,13 @@ FROM aiod_metadata_catalogue +USER root +RUN apt-get update +RUN apt -y install cron + COPY cron /etc/cron.d/aiod COPY mlmodels.sh /opt/connectors/script/mlmodels.sh COPY entry.sh /opt/connectors/script/entry.sh -USER root -RUN apt -y install cron RUN chmod +x /etc/cron.d/aiod /opt/connectors/script/mlmodels.sh RUN crontab /etc/cron.d/aiod diff --git a/docker-compose.yaml b/docker-compose.yaml index 4e62cc98..ae26a10a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -119,11 +119,10 @@ services: env_file: .env environment: - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - - API_TOKEN=$API_TOKEN + - AIBUILDER_API_TOKEN=$AIBUILDER_API_TOKEN volumes: - ./src:/app - ${DATA_PATH}/connectors:/opt/connectors/data - - ./connectors/aibuilder/:/opt/connectors/script command: > /bin/bash -c "/opt/connectors/script/entry.sh" depends_on: diff --git a/docs/README.md b/docs/README.md index 67f692b1..33a4cd65 100644 --- a/docs/README.md +++ b/docs/README.md @@ -140,9 +140,9 @@ Make sure you use the same profile for `up` and `down`, or use `./scripts/down.s otherwise some containers might keep running. ##### Configuring AIBuilder connector -To access the AIBuilder API you need to provide a valid API token through the `API_TOKEN` variable. \ +To access the AIBuilder API you need to provide a valid API token through the `AIBUILDER_API_TOKEN` variable. \ Use the `override.env` file for that as explained above. \ -Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to substitute `API_TOKEN` on the url for your actual API token value. +Please note that for using the url of the `same_as` field of the AIBuilder models, you will need to substitute `AIBUILDER_API_TOKEN` on the url for your actual API token value. ### Shorthands We provide two auxiliary scripts for launching docker containers and bringing them down. diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 1711b339..c5ea9843 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -30,7 +30,7 @@ from .aibuilder_mappings import mlmodel_mapping -TOKEN = os.getenv("API_TOKEN", "") +TOKEN = os.getenv("AIBUILDER_API_TOKEN", "") API_URL = "https://aiexp-dev.ai4europe.eu/federation" GLOBAL_MAX_CALLS_MINUTE = 60 GLOBAL_MAX_CALLS_HOUR = 2000 @@ -44,6 +44,13 @@ class AIBuilderMLModelConnector(ResourceConnectorByDate[MLModel]): platform entities have a `lastModified` field. """ + def __init__(self, token=TOKEN): + self.token = token + if self.token == "": + raise ValueError( + "You need to asign a value to AIBUILDER_API_TOKEN environment variable." + ) + @property def resource_class(self) -> type[MLModel]: return MLModel @@ -55,6 +62,9 @@ def platform_name(self) -> PlatformName: def retry(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError: raise NotImplementedError("Not implemented.") + def set_token(self, token: str): + token = token + @sleep_and_retry @limits(calls=GLOBAL_MAX_CALLS_MINUTE, period=ONE_MINUTE) @limits(calls=GLOBAL_MAX_CALLS_HOUR, period=ONE_HOUR) @@ -63,7 +73,10 @@ def get_response(self, url) -> dict | list | RecordError: Performs the `url` request checking for correctness and returns the `list` or `dict`structure received or a `RecordError`. """ - response = requests.get(url, timeout=REQUEST_TIMEOUT) + try: + response = requests.get(url, timeout=REQUEST_TIMEOUT) + except Exception as e: + return RecordError(identifier=None, error=e) if not response.ok: status_code = response.status_code msg = response.json()["error"]["message"] @@ -194,7 +207,7 @@ def fetch( if not self._is_aware(to_excl): to_excl = to_excl.replace(tzinfo=pytz.UTC) - url_get_catalog_list = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" + url_get_catalog_list = f"{API_URL}/get_catalog_list?apiToken={self.token}" response = self.get_response(url_get_catalog_list) if isinstance(response, RecordError): self.is_concluded = True @@ -215,7 +228,7 @@ def fetch( for num_catalog, catalog in enumerate(catalog_list): url_get_catalog_solutions = ( - f"{API_URL}/get_catalog_solutions?catalogId={catalog}&apiToken={TOKEN}" + f"{API_URL}/get_catalog_solutions?catalogId={catalog}&apiToken={self.token}" ) response = self.get_response(url_get_catalog_solutions) if isinstance(response, RecordError): @@ -240,8 +253,10 @@ def fetch( continue for num_solution, solution in enumerate(solutions_list): - url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={TOKEN}" - url_to_show = f"{API_URL}/get_solution?fullId={solution}&apiToken=API_TOKEN" + url_get_solution = f"{API_URL}/get_solution?fullId={solution}&apiToken={self.token}" + url_to_show = ( + f"{API_URL}/get_solution?fullId={solution}&apiToken=AIBUILDER_API_TOKEN" + ) response = self.get_response(url_get_solution) if isinstance(response, RecordError): self.is_concluded = ( diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index 8f40487d..8875707e 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -6,7 +6,7 @@ from requests.exceptions import HTTPError from connectors.aibuilder.aibuilder_mlmodel_connector import AIBuilderMLModelConnector -from connectors.aibuilder.aibuilder_mlmodel_connector import API_URL, TOKEN +from connectors.aibuilder.aibuilder_mlmodel_connector import API_URL from connectors.resource_with_relations import ResourceWithRelations from connectors.record_error import RecordError from database.model.models_and_experiments.ml_model import MLModel @@ -14,7 +14,8 @@ from tests.testutils.paths import path_test_resources from database.model.ai_resource.text import Text -connector = AIBuilderMLModelConnector() +TOKEN = "TEST_AIBUILDER_API_TOKEN" +connector = AIBuilderMLModelConnector(f"{TOKEN}") test_resources_path = os.path.join(path_test_resources(), "connectors", "aibuilder") catalog_list_url = f"{API_URL}/get_catalog_list?apiToken={TOKEN}" catalog_solutions_url = f"{API_URL}/get_catalog_solutions?catalogId=1&apiToken={TOKEN}" From cf691b9eda88e91c405e499948a902adbfbba933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 18 Dec 2024 17:58:37 +0100 Subject: [PATCH 18/21] Removed extra set_token method --- src/connectors/aibuilder/aibuilder_mlmodel_connector.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index c5ea9843..0da98884 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -62,9 +62,6 @@ def platform_name(self) -> PlatformName: def retry(self, identifier: int) -> ResourceWithRelations[MLModel] | RecordError: raise NotImplementedError("Not implemented.") - def set_token(self, token: str): - token = token - @sleep_and_retry @limits(calls=GLOBAL_MAX_CALLS_MINUTE, period=ONE_MINUTE) @limits(calls=GLOBAL_MAX_CALLS_HOUR, period=ONE_HOUR) From a8a17c992dd28564b667e9adac4382f7a812e2e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 19 Dec 2024 12:37:50 +0100 Subject: [PATCH 19/21] Handle successfull requests without json response. Also specific message for 401 unauthorized. --- .../aibuilder/aibuilder_mlmodel_connector.py | 11 ++++++++--- .../aibuilder/test_aibuilder_mlmodel_connector.py | 8 +++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py index 0da98884..4eafed09 100644 --- a/src/connectors/aibuilder/aibuilder_mlmodel_connector.py +++ b/src/connectors/aibuilder/aibuilder_mlmodel_connector.py @@ -9,6 +9,7 @@ import requests from requests.exceptions import HTTPError +from starlette import status from datetime import datetime from ratelimit import limits, sleep_and_retry from typing import Iterator, Tuple, Any @@ -74,14 +75,18 @@ def get_response(self, url) -> dict | list | RecordError: response = requests.get(url, timeout=REQUEST_TIMEOUT) except Exception as e: return RecordError(identifier=None, error=e) - if not response.ok: + if response.status_code == status.HTTP_200_OK: + return response.json() + else: status_code = response.status_code - msg = response.json()["error"]["message"] + if status_code == status.HTTP_401_UNAUTHORIZED: + msg = "Unauthorized token." + else: + msg = response.reason err_msg = f"Error while fetching {url} from AIBuilder: ({status_code}) {msg}" logging.error(err_msg) err = HTTPError(err_msg) return RecordError(identifier=None, error=err) - return response.json() def _is_aware(self, date): """Returns True if `date` is a timezone-aware `datetime`.""" diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index 8875707e..e0d2083e 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -101,7 +101,7 @@ def test_fetch_happy_path_unaware_datetime(): def test_catalog_list_http_error(): error = {"error": {"message": "HTTP Error."}} - err_msg = f"Error while fetching {catalog_list_url} from AIBuilder: (500) HTTP Error." + err_msg = f"Error while fetching {catalog_list_url} from AIBuilder: (500) Internal Server Error" fetched_resources = [] with responses.RequestsMock() as mocked_requests: mocked_requests.add(responses.GET, catalog_list_url, json=error, status=500) @@ -157,7 +157,9 @@ def test_empty_catalog_list(): def test_catalog_solutions_http_error(): catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") error = {"error": {"message": "HTTP Error."}} - err_msg = f"Error while fetching {catalog_solutions_url} from AIBuilder: (500) HTTP Error." + err_msg = ( + f"Error while fetching {catalog_solutions_url} from AIBuilder: (500) Internal Server Error" + ) fetched_resources = [] with responses.RequestsMock() as mocked_requests: with open(catalog_list_path, "r") as f: @@ -224,7 +226,7 @@ def test_solution_http_error(): catalog_list_path = os.path.join(test_resources_path, "catalog_list.json") catalog_solutions_path = os.path.join(test_resources_path, "catalog_solutions.json") error = {"error": {"message": "HTTP Error."}} - err_msg = f"Error while fetching {solution_1_url} from AIBuilder: (500) HTTP Error." + err_msg = f"Error while fetching {solution_1_url} from AIBuilder: (500) Internal Server Error" solution_2_path = os.path.join(test_resources_path, "solution_2.json") fetched_resources = [] with responses.RequestsMock() as mocked_requests: From 16b63d3d2b63805905ff81f57c19a4af2597a680 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 19 Dec 2024 12:42:20 +0100 Subject: [PATCH 20/21] Added unitary test for Unauthorized token error. --- .../test_aibuilder_mlmodel_connector.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py index e0d2083e..f55d821c 100644 --- a/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py +++ b/src/tests/connectors/aibuilder/test_aibuilder_mlmodel_connector.py @@ -99,6 +99,24 @@ def test_fetch_happy_path_unaware_datetime(): assert resource.resource.is_accessible_for_free +def test_unautorized_token_error(): + error = {"error": {"message": "Unauthorized token."}} + err_msg = f"Error while fetching {catalog_list_url} from AIBuilder: (401) Unauthorized token." + fetched_resources = [] + with responses.RequestsMock() as mocked_requests: + mocked_requests.add(responses.GET, catalog_list_url, json=error, status=401) + fetched_resources = list(connector.fetch(mocked_datetime_from, mocked_datetime_to)) + + assert len(fetched_resources) == 1 + last_modified, resource = fetched_resources[0] + assert last_modified is None + assert type(resource) == RecordError + assert resource.identifier is None + assert type(resource.error) == HTTPError + assert str(resource.error) == err_msg + assert connector.is_concluded + + def test_catalog_list_http_error(): error = {"error": {"message": "HTTP Error."}} err_msg = f"Error while fetching {catalog_list_url} from AIBuilder: (500) Internal Server Error" From f92b15e9435cac7861ce1bb8f79af940be5c37c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 19 Dec 2024 12:43:53 +0100 Subject: [PATCH 21/21] Removed .env file from docker_compose --- docker-compose.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index ae26a10a..edc2baaf 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -116,7 +116,6 @@ services: dockerfile: Dockerfile image: aiod_aibuilder_connector container_name: aibuilder-connector - env_file: .env environment: - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - AIBUILDER_API_TOKEN=$AIBUILDER_API_TOKEN