diff --git a/lightly/openapi_generated/swagger_client/__init__.py b/lightly/openapi_generated/swagger_client/__init__.py index b4113ed1c..b7e6c3ed8 100644 --- a/lightly/openapi_generated/swagger_client/__init__.py +++ b/lightly/openapi_generated/swagger_client/__init__.py @@ -52,9 +52,13 @@ from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData from lightly.openapi_generated.swagger_client.models.active_learning_score_types_v2_data import ActiveLearningScoreTypesV2Data from lightly.openapi_generated.swagger_client.models.active_learning_score_v2_data import ActiveLearningScoreV2Data +from lightly.openapi_generated.swagger_client.models.annotation_savings import AnnotationSavings from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData +from lightly.openapi_generated.swagger_client.models.categorical_distribution import CategoricalDistribution +from lightly.openapi_generated.swagger_client.models.categorical_distribution_metrics import CategoricalDistributionMetrics +from lightly.openapi_generated.swagger_client.models.categorical_distribution_per_set import CategoricalDistributionPerSet from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest @@ -66,11 +70,14 @@ from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest from lightly.openapi_generated.swagger_client.models.creator import Creator from lightly.openapi_generated.swagger_client.models.crop_data import CropData +from lightly.openapi_generated.swagger_client.models.dataset_analysis import DatasetAnalysis from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData +from lightly.openapi_generated.swagger_client.models.dataset_information import DatasetInformation +from lightly.openapi_generated.swagger_client.models.dataset_sizes import DatasetSizes from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig @@ -101,6 +108,8 @@ from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data_row import DatasourceRawSamplesPredictionsDataRow from lightly.openapi_generated.swagger_client.models.delegated_access_external_ids_inner import DelegatedAccessExternalIdsInner +from lightly.openapi_generated.swagger_client.models.detection_frame_prediction import DetectionFramePrediction +from lightly.openapi_generated.swagger_client.models.detection_task_information import DetectionTaskInformation from lightly.openapi_generated.swagger_client.models.dimensionality_reduction_method import DimensionalityReductionMethod from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest @@ -178,10 +187,13 @@ from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData +from lightly.openapi_generated.swagger_client.models.embedding_data2_d import EmbeddingData2D +from lightly.openapi_generated.swagger_client.models.embedding_information import EmbeddingInformation from lightly.openapi_generated.swagger_client.models.expiry_handling_strategy_v3 import ExpiryHandlingStrategyV3 from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl +from lightly.openapi_generated.swagger_client.models.general_information import GeneralInformation from lightly.openapi_generated.swagger_client.models.image_type import ImageType from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType @@ -200,6 +212,11 @@ from lightly.openapi_generated.swagger_client.models.lightly_model_v3 import LightlyModelV3 from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v2 import LightlyTrainerPrecisionV2 from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v3 import LightlyTrainerPrecisionV3 +from lightly.openapi_generated.swagger_client.models.metadata_information import MetadataInformation +from lightly.openapi_generated.swagger_client.models.numeric_distribution import NumericDistribution +from lightly.openapi_generated.swagger_client.models.numeric_distribution_metrics import NumericDistributionMetrics +from lightly.openapi_generated.swagger_client.models.numeric_distribution_per_set import NumericDistributionPerSet +from lightly.openapi_generated.swagger_client.models.object_detection_prediction import ObjectDetectionPrediction from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton from lightly.openapi_generated.swagger_client.models.prediction_singleton_base import PredictionSingletonBase from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification import PredictionSingletonClassification @@ -212,6 +229,7 @@ from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection_all_of import PredictionSingletonObjectDetectionAllOf from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation import PredictionSingletonSemanticSegmentation from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation_all_of import PredictionSingletonSemanticSegmentationAllOf +from lightly.openapi_generated.swagger_client.models.prediction_task_information import PredictionTaskInformation from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema from lightly.openapi_generated.swagger_client.models.prediction_task_schema_base import PredictionTaskSchemaBase from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category import PredictionTaskSchemaCategory @@ -223,6 +241,8 @@ from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple_all_of import PredictionTaskSchemaSimpleAllOf from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas from lightly.openapi_generated.swagger_client.models.questionnaire_data import QuestionnaireData +from lightly.openapi_generated.swagger_client.models.report_v2 import ReportV2 +from lightly.openapi_generated.swagger_client.models.run_information import RunInformation from lightly.openapi_generated.swagger_client.models.s3_region import S3Region from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask from lightly.openapi_generated.swagger_client.models.sama_task_data import SamaTaskData @@ -239,7 +259,10 @@ from lightly.openapi_generated.swagger_client.models.sampling_config_stopping_condition import SamplingConfigStoppingCondition from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest from lightly.openapi_generated.swagger_client.models.sampling_method import SamplingMethod +from lightly.openapi_generated.swagger_client.models.scatter_plot_data import ScatterPlotData +from lightly.openapi_generated.swagger_client.models.scatter_plot_example_image import ScatterPlotExampleImage from lightly.openapi_generated.swagger_client.models.sector import Sector +from lightly.openapi_generated.swagger_client.models.selected_and_removed_image_pair import SelectedAndRemovedImagePair from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig from lightly.openapi_generated.swagger_client.models.selection_config_all_of import SelectionConfigAllOf from lightly.openapi_generated.swagger_client.models.selection_config_base import SelectionConfigBase @@ -290,6 +313,7 @@ from lightly.openapi_generated.swagger_client.models.tag_data import TagData from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest +from lightly.openapi_generated.swagger_client.models.task_annotation_savings import TaskAnnotationSavings from lightly.openapi_generated.swagger_client.models.task_type import TaskType from lightly.openapi_generated.swagger_client.models.team_basic_data import TeamBasicData from lightly.openapi_generated.swagger_client.models.team_data import TeamData @@ -299,4 +323,5 @@ from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest from lightly.openapi_generated.swagger_client.models.user_type import UserType from lightly.openapi_generated.swagger_client.models.video_frame_data import VideoFrameData +from lightly.openapi_generated.swagger_client.models.worker_information import WorkerInformation from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData diff --git a/lightly/openapi_generated/swagger_client/api/datasources_api.py b/lightly/openapi_generated/swagger_client/api/datasources_api.py index 63d82eac1..e21ec5d4f 100644 --- a/lightly/openapi_generated/swagger_client/api/datasources_api.py +++ b/lightly/openapi_generated/swagger_client/api/datasources_api.py @@ -1222,7 +1222,7 @@ def get_list_of_raw_samples_metadata_from_datasource_by_dataset_id_with_http_inf _request_auth=_params.get('_request_auth')) @validate_arguments - def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> DatasourceRawSamplesPredictionsData: # noqa: E501 + def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> DatasourceRawSamplesPredictionsData: # noqa: E501 """get_list_of_raw_samples_predictions_from_datasource_by_dataset_id # noqa: E501 Get list of the raw samples predictions from datasource for a specific taskName # noqa: E501 @@ -1267,7 +1267,7 @@ def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(self, data return self.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(dataset_id, task_name, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, **kwargs) # noqa: E501 @validate_arguments - def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_list_of_raw_samples_predictions_from_datasource_by_dataset_id # noqa: E501 Get list of the raw samples predictions from datasource for a specific taskName # noqa: E501 diff --git a/lightly/openapi_generated/swagger_client/api/predictions_api.py b/lightly/openapi_generated/swagger_client/api/predictions_api.py index ce7abcd7c..23b3674a6 100644 --- a/lightly/openapi_generated/swagger_client/api/predictions_api.py +++ b/lightly/openapi_generated/swagger_client/api/predictions_api.py @@ -395,7 +395,7 @@ def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, d _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchema: # noqa: E501 + def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchema: # noqa: E501 """get_prediction_task_schema_by_task_name # noqa: E501 Get a prediction task schemas named taskName for a datasetId # noqa: E501 @@ -428,7 +428,7 @@ def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr( return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, task_name, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_prediction_task_schema_by_task_name # noqa: E501 Get a prediction task schemas named taskName for a datasetId # noqa: E501 @@ -711,7 +711,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : _request_auth=_params.get('_request_auth')) @validate_arguments - def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 + def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 """get_predictions_by_dataset_id # noqa: E501 Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 @@ -750,7 +750,7 @@ def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=Tru return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, page_size, page_offset, lean, task_name, **kwargs) # noqa: E501 @validate_arguments - def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, lean : Annotated[Optional[StrictBool], Field(description="if lean is set to true, all prediction singletons are returned without their \"heavy\" part. This is useful for large datasets where the full prediction singletons are not needed. e.g SEGMENTATION does not need to return the RLE ")] = None, task_name : Annotated[Optional[constr(strict=True)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_predictions_by_dataset_id # noqa: E501 Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 diff --git a/lightly/openapi_generated/swagger_client/api/scores_api.py b/lightly/openapi_generated/swagger_client/api/scores_api.py index fadd3efcc..650b558b8 100644 --- a/lightly/openapi_generated/swagger_client/api/scores_api.py +++ b/lightly/openapi_generated/swagger_client/api/scores_api.py @@ -220,7 +220,7 @@ def create_or_update_active_learning_score_by_tag_id_with_http_info(self, datase _request_auth=_params.get('_request_auth')) @validate_arguments - def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501 + def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501 """create_or_update_active_learning_v2_score_by_dataset_id # noqa: E501 Create or update active learning score object for a dataset, taskName, predictionUUIDTimestamp # noqa: E501 @@ -255,7 +255,7 @@ def create_or_update_active_learning_v2_score_by_dataset_id(self, dataset_id : A return self.create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(dataset_id, task_name, active_learning_score_create_request, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def create_or_update_active_learning_v2_score_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True), Field(..., description="The prediction task name for which one wants to list the predictions")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Deprecated, currently ignored. The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 """create_or_update_active_learning_v2_score_by_dataset_id # noqa: E501 Create or update active learning score object for a dataset, taskName, predictionUUIDTimestamp # noqa: E501 diff --git a/lightly/openapi_generated/swagger_client/configuration.py b/lightly/openapi_generated/swagger_client/configuration.py index 5f6e4b662..4284ffc0f 100644 --- a/lightly/openapi_generated/swagger_client/configuration.py +++ b/lightly/openapi_generated/swagger_client/configuration.py @@ -464,7 +464,7 @@ def get_host_settings(self): 'description': "No description provided", }, { - 'url': "http://localhost:5000", + 'url': "http://localhost:5001", 'description': "No description provided", } ] diff --git a/lightly/openapi_generated/swagger_client/models/__init__.py b/lightly/openapi_generated/swagger_client/models/__init__.py index eeb543079..9f8015b83 100644 --- a/lightly/openapi_generated/swagger_client/models/__init__.py +++ b/lightly/openapi_generated/swagger_client/models/__init__.py @@ -19,9 +19,13 @@ from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData from lightly.openapi_generated.swagger_client.models.active_learning_score_types_v2_data import ActiveLearningScoreTypesV2Data from lightly.openapi_generated.swagger_client.models.active_learning_score_v2_data import ActiveLearningScoreV2Data +from lightly.openapi_generated.swagger_client.models.annotation_savings import AnnotationSavings from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData +from lightly.openapi_generated.swagger_client.models.categorical_distribution import CategoricalDistribution +from lightly.openapi_generated.swagger_client.models.categorical_distribution_metrics import CategoricalDistributionMetrics +from lightly.openapi_generated.swagger_client.models.categorical_distribution_per_set import CategoricalDistributionPerSet from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest @@ -33,11 +37,14 @@ from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest from lightly.openapi_generated.swagger_client.models.creator import Creator from lightly.openapi_generated.swagger_client.models.crop_data import CropData +from lightly.openapi_generated.swagger_client.models.dataset_analysis import DatasetAnalysis from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData +from lightly.openapi_generated.swagger_client.models.dataset_information import DatasetInformation +from lightly.openapi_generated.swagger_client.models.dataset_sizes import DatasetSizes from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig @@ -68,6 +75,8 @@ from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data_row import DatasourceRawSamplesPredictionsDataRow from lightly.openapi_generated.swagger_client.models.delegated_access_external_ids_inner import DelegatedAccessExternalIdsInner +from lightly.openapi_generated.swagger_client.models.detection_frame_prediction import DetectionFramePrediction +from lightly.openapi_generated.swagger_client.models.detection_task_information import DetectionTaskInformation from lightly.openapi_generated.swagger_client.models.dimensionality_reduction_method import DimensionalityReductionMethod from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest @@ -145,10 +154,13 @@ from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData +from lightly.openapi_generated.swagger_client.models.embedding_data2_d import EmbeddingData2D +from lightly.openapi_generated.swagger_client.models.embedding_information import EmbeddingInformation from lightly.openapi_generated.swagger_client.models.expiry_handling_strategy_v3 import ExpiryHandlingStrategyV3 from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl +from lightly.openapi_generated.swagger_client.models.general_information import GeneralInformation from lightly.openapi_generated.swagger_client.models.image_type import ImageType from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType @@ -167,6 +179,11 @@ from lightly.openapi_generated.swagger_client.models.lightly_model_v3 import LightlyModelV3 from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v2 import LightlyTrainerPrecisionV2 from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v3 import LightlyTrainerPrecisionV3 +from lightly.openapi_generated.swagger_client.models.metadata_information import MetadataInformation +from lightly.openapi_generated.swagger_client.models.numeric_distribution import NumericDistribution +from lightly.openapi_generated.swagger_client.models.numeric_distribution_metrics import NumericDistributionMetrics +from lightly.openapi_generated.swagger_client.models.numeric_distribution_per_set import NumericDistributionPerSet +from lightly.openapi_generated.swagger_client.models.object_detection_prediction import ObjectDetectionPrediction from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton from lightly.openapi_generated.swagger_client.models.prediction_singleton_base import PredictionSingletonBase from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification import PredictionSingletonClassification @@ -179,6 +196,7 @@ from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection_all_of import PredictionSingletonObjectDetectionAllOf from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation import PredictionSingletonSemanticSegmentation from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation_all_of import PredictionSingletonSemanticSegmentationAllOf +from lightly.openapi_generated.swagger_client.models.prediction_task_information import PredictionTaskInformation from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema from lightly.openapi_generated.swagger_client.models.prediction_task_schema_base import PredictionTaskSchemaBase from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category import PredictionTaskSchemaCategory @@ -190,6 +208,8 @@ from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple_all_of import PredictionTaskSchemaSimpleAllOf from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas from lightly.openapi_generated.swagger_client.models.questionnaire_data import QuestionnaireData +from lightly.openapi_generated.swagger_client.models.report_v2 import ReportV2 +from lightly.openapi_generated.swagger_client.models.run_information import RunInformation from lightly.openapi_generated.swagger_client.models.s3_region import S3Region from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask from lightly.openapi_generated.swagger_client.models.sama_task_data import SamaTaskData @@ -206,7 +226,10 @@ from lightly.openapi_generated.swagger_client.models.sampling_config_stopping_condition import SamplingConfigStoppingCondition from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest from lightly.openapi_generated.swagger_client.models.sampling_method import SamplingMethod +from lightly.openapi_generated.swagger_client.models.scatter_plot_data import ScatterPlotData +from lightly.openapi_generated.swagger_client.models.scatter_plot_example_image import ScatterPlotExampleImage from lightly.openapi_generated.swagger_client.models.sector import Sector +from lightly.openapi_generated.swagger_client.models.selected_and_removed_image_pair import SelectedAndRemovedImagePair from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig from lightly.openapi_generated.swagger_client.models.selection_config_all_of import SelectionConfigAllOf from lightly.openapi_generated.swagger_client.models.selection_config_base import SelectionConfigBase @@ -257,6 +280,7 @@ from lightly.openapi_generated.swagger_client.models.tag_data import TagData from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest +from lightly.openapi_generated.swagger_client.models.task_annotation_savings import TaskAnnotationSavings from lightly.openapi_generated.swagger_client.models.task_type import TaskType from lightly.openapi_generated.swagger_client.models.team_basic_data import TeamBasicData from lightly.openapi_generated.swagger_client.models.team_data import TeamData @@ -266,4 +290,5 @@ from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest from lightly.openapi_generated.swagger_client.models.user_type import UserType from lightly.openapi_generated.swagger_client.models.video_frame_data import VideoFrameData +from lightly.openapi_generated.swagger_client.models.worker_information import WorkerInformation from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData diff --git a/lightly/openapi_generated/swagger_client/models/active_learning_score_types_v2_data.py b/lightly/openapi_generated/swagger_client/models/active_learning_score_types_v2_data.py index 5455a44d3..050582f64 100644 --- a/lightly/openapi_generated/swagger_client/models/active_learning_score_types_v2_data.py +++ b/lightly/openapi_generated/swagger_client/models/active_learning_score_types_v2_data.py @@ -29,7 +29,7 @@ class ActiveLearningScoreTypesV2Data(BaseModel): id: constr(strict=True) = Field(..., description="MongoDB ObjectId") dataset_id: constr(strict=True) = Field(..., alias="datasetId", description="MongoDB ObjectId") prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds") - task_name: constr(strict=True, min_length=1) = Field(..., alias="taskName", description="A name which is safe to have as a file/folder name in a file system") + task_name: constr(strict=True) = Field(..., alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score") created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds") __properties = ["id", "datasetId", "predictionUUIDTimestamp", "taskName", "scoreType", "createdAt"] @@ -51,8 +51,8 @@ def dataset_id_validate_regular_expression(cls, value): @validator('task_name') def task_name_validate_regular_expression(cls, value): """Validates the regular expression""" - if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value): - raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/") + if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value): + raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/") return value @validator('score_type') diff --git a/lightly/openapi_generated/swagger_client/models/active_learning_score_v2_data.py b/lightly/openapi_generated/swagger_client/models/active_learning_score_v2_data.py index 1970b7c58..290ddee31 100644 --- a/lightly/openapi_generated/swagger_client/models/active_learning_score_v2_data.py +++ b/lightly/openapi_generated/swagger_client/models/active_learning_score_v2_data.py @@ -29,7 +29,7 @@ class ActiveLearningScoreV2Data(BaseModel): id: constr(strict=True) = Field(..., description="MongoDB ObjectId") dataset_id: constr(strict=True) = Field(..., alias="datasetId", description="MongoDB ObjectId") prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds") - task_name: constr(strict=True, min_length=1) = Field(..., alias="taskName", description="A name which is safe to have as a file/folder name in a file system") + task_name: constr(strict=True) = Field(..., alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score") scores: conlist(Union[StrictFloat, StrictInt], min_items=1) = Field(..., description="Array of active learning scores") created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds") @@ -52,8 +52,8 @@ def dataset_id_validate_regular_expression(cls, value): @validator('task_name') def task_name_validate_regular_expression(cls, value): """Validates the regular expression""" - if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value): - raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/") + if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value): + raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/") return value @validator('score_type') diff --git a/lightly/openapi_generated/swagger_client/models/annotation_savings.py b/lightly/openapi_generated/swagger_client/models/annotation_savings.py new file mode 100644 index 000000000..d7bcc4fb2 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/annotation_savings.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.task_annotation_savings import TaskAnnotationSavings + +class AnnotationSavings(BaseModel): + """ + AnnotationSavings + """ + image_classification: TaskAnnotationSavings = Field(..., alias="imageClassification") + object_detection: TaskAnnotationSavings = Field(..., alias="objectDetection") + semantic_segmentation: TaskAnnotationSavings = Field(..., alias="semanticSegmentation") + __properties = ["imageClassification", "objectDetection", "semanticSegmentation"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> AnnotationSavings: + """Create an instance of AnnotationSavings from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of image_classification + if self.image_classification: + _dict['imageClassification' if by_alias else 'image_classification'] = self.image_classification.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of object_detection + if self.object_detection: + _dict['objectDetection' if by_alias else 'object_detection'] = self.object_detection.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of semantic_segmentation + if self.semantic_segmentation: + _dict['semanticSegmentation' if by_alias else 'semantic_segmentation'] = self.semantic_segmentation.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> AnnotationSavings: + """Create an instance of AnnotationSavings from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return AnnotationSavings.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in AnnotationSavings) in the input: " + str(obj)) + + _obj = AnnotationSavings.parse_obj({ + "image_classification": TaskAnnotationSavings.from_dict(obj.get("imageClassification")) if obj.get("imageClassification") is not None else None, + "object_detection": TaskAnnotationSavings.from_dict(obj.get("objectDetection")) if obj.get("objectDetection") is not None else None, + "semantic_segmentation": TaskAnnotationSavings.from_dict(obj.get("semanticSegmentation")) if obj.get("semanticSegmentation") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/categorical_distribution.py b/lightly/openapi_generated/swagger_client/models/categorical_distribution.py new file mode 100644 index 000000000..3328bce22 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/categorical_distribution.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Dict +from pydantic import Extra, BaseModel, Field, StrictInt +from lightly.openapi_generated.swagger_client.models.categorical_distribution_metrics import CategoricalDistributionMetrics + +class CategoricalDistribution(BaseModel): + """ + CategoricalDistribution + """ + counts: Dict[str, StrictInt] = Field(...) + metrics: CategoricalDistributionMetrics = Field(...) + __properties = ["counts", "metrics"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> CategoricalDistribution: + """Create an instance of CategoricalDistribution from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of metrics + if self.metrics: + _dict['metrics' if by_alias else 'metrics'] = self.metrics.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> CategoricalDistribution: + """Create an instance of CategoricalDistribution from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return CategoricalDistribution.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in CategoricalDistribution) in the input: " + str(obj)) + + _obj = CategoricalDistribution.parse_obj({ + "counts": obj.get("counts"), + "metrics": CategoricalDistributionMetrics.from_dict(obj.get("metrics")) if obj.get("metrics") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/categorical_distribution_metrics.py b/lightly/openapi_generated/swagger_client/models/categorical_distribution_metrics.py new file mode 100644 index 000000000..bbe41e547 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/categorical_distribution_metrics.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt + +class CategoricalDistributionMetrics(BaseModel): + """ + CategoricalDistributionMetrics + """ + gini_imbalance: Union[StrictFloat, StrictInt] = Field(..., alias="giniImbalance") + normalized_entropy: Union[StrictFloat, StrictInt] = Field(..., alias="normalizedEntropy") + __properties = ["giniImbalance", "normalizedEntropy"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> CategoricalDistributionMetrics: + """Create an instance of CategoricalDistributionMetrics from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> CategoricalDistributionMetrics: + """Create an instance of CategoricalDistributionMetrics from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return CategoricalDistributionMetrics.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in CategoricalDistributionMetrics) in the input: " + str(obj)) + + _obj = CategoricalDistributionMetrics.parse_obj({ + "gini_imbalance": obj.get("giniImbalance"), + "normalized_entropy": obj.get("normalizedEntropy") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/categorical_distribution_per_set.py b/lightly/openapi_generated/swagger_client/models/categorical_distribution_per_set.py new file mode 100644 index 000000000..f94564a9b --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/categorical_distribution_per_set.py @@ -0,0 +1,102 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Optional +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.categorical_distribution import CategoricalDistribution + +class CategoricalDistributionPerSet(BaseModel): + """ + CategoricalDistributionPerSet + """ + input: CategoricalDistribution = Field(...) + selected: CategoricalDistribution = Field(...) + random: CategoricalDistribution = Field(...) + preselected_datapool: Optional[CategoricalDistribution] = Field(None, alias="preselectedDatapool") + selected_with_datapool: Optional[CategoricalDistribution] = Field(None, alias="selectedWithDatapool") + __properties = ["input", "selected", "random", "preselectedDatapool", "selectedWithDatapool"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> CategoricalDistributionPerSet: + """Create an instance of CategoricalDistributionPerSet from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of input + if self.input: + _dict['input' if by_alias else 'input'] = self.input.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of selected + if self.selected: + _dict['selected' if by_alias else 'selected'] = self.selected.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of random + if self.random: + _dict['random' if by_alias else 'random'] = self.random.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of preselected_datapool + if self.preselected_datapool: + _dict['preselectedDatapool' if by_alias else 'preselected_datapool'] = self.preselected_datapool.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of selected_with_datapool + if self.selected_with_datapool: + _dict['selectedWithDatapool' if by_alias else 'selected_with_datapool'] = self.selected_with_datapool.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> CategoricalDistributionPerSet: + """Create an instance of CategoricalDistributionPerSet from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return CategoricalDistributionPerSet.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in CategoricalDistributionPerSet) in the input: " + str(obj)) + + _obj = CategoricalDistributionPerSet.parse_obj({ + "input": CategoricalDistribution.from_dict(obj.get("input")) if obj.get("input") is not None else None, + "selected": CategoricalDistribution.from_dict(obj.get("selected")) if obj.get("selected") is not None else None, + "random": CategoricalDistribution.from_dict(obj.get("random")) if obj.get("random") is not None else None, + "preselected_datapool": CategoricalDistribution.from_dict(obj.get("preselectedDatapool")) if obj.get("preselectedDatapool") is not None else None, + "selected_with_datapool": CategoricalDistribution.from_dict(obj.get("selectedWithDatapool")) if obj.get("selectedWithDatapool") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/crop_data.py b/lightly/openapi_generated/swagger_client/models/crop_data.py index 3626d9303..5f0de27f4 100644 --- a/lightly/openapi_generated/swagger_client/models/crop_data.py +++ b/lightly/openapi_generated/swagger_client/models/crop_data.py @@ -29,7 +29,7 @@ class CropData(BaseModel): parent_id: constr(strict=True) = Field(..., alias="parentId", description="MongoDB ObjectId") prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds") prediction_index: conint(strict=True, ge=0) = Field(..., alias="predictionIndex", description="the index of this crop within all found prediction singletons of a sampleId (the parentId)") - prediction_task_name: constr(strict=True, min_length=1) = Field(..., alias="predictionTaskName", description="A name which is safe to have as a file/folder name in a file system") + prediction_task_name: constr(strict=True) = Field(..., alias="predictionTaskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") prediction_task_category_id: conint(strict=True, ge=0) = Field(..., alias="predictionTaskCategoryId", description="The id of the category. Needs to be a positive integer but can be any integer (gaps are allowed, does not need to be sequential)") prediction_task_score: Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)] = Field(..., alias="predictionTaskScore", description="the score for the prediction task which yielded this crop") __properties = ["parentId", "predictionUUIDTimestamp", "predictionIndex", "predictionTaskName", "predictionTaskCategoryId", "predictionTaskScore"] @@ -44,8 +44,8 @@ def parent_id_validate_regular_expression(cls, value): @validator('prediction_task_name') def prediction_task_name_validate_regular_expression(cls, value): """Validates the regular expression""" - if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value): - raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/") + if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value): + raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/") return value class Config: diff --git a/lightly/openapi_generated/swagger_client/models/dataset_analysis.py b/lightly/openapi_generated/swagger_client/models/dataset_analysis.py new file mode 100644 index 000000000..88ed53c0a --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/dataset_analysis.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.embedding_information import EmbeddingInformation +from lightly.openapi_generated.swagger_client.models.metadata_information import MetadataInformation + +class DatasetAnalysis(BaseModel): + """ + DatasetAnalysis + """ + embedding_information: EmbeddingInformation = Field(..., alias="embeddingInformation") + metadata_information: MetadataInformation = Field(..., alias="metadataInformation") + __properties = ["embeddingInformation", "metadataInformation"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> DatasetAnalysis: + """Create an instance of DatasetAnalysis from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of embedding_information + if self.embedding_information: + _dict['embeddingInformation' if by_alias else 'embedding_information'] = self.embedding_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of metadata_information + if self.metadata_information: + _dict['metadataInformation' if by_alias else 'metadata_information'] = self.metadata_information.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> DatasetAnalysis: + """Create an instance of DatasetAnalysis from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return DatasetAnalysis.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in DatasetAnalysis) in the input: " + str(obj)) + + _obj = DatasetAnalysis.parse_obj({ + "embedding_information": EmbeddingInformation.from_dict(obj.get("embeddingInformation")) if obj.get("embeddingInformation") is not None else None, + "metadata_information": MetadataInformation.from_dict(obj.get("metadataInformation")) if obj.get("metadataInformation") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/dataset_information.py b/lightly/openapi_generated/swagger_client/models/dataset_information.py new file mode 100644 index 000000000..af08274c5 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/dataset_information.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Optional +from pydantic import Extra, BaseModel, Field, StrictStr +from lightly.openapi_generated.swagger_client.models.dataset_sizes import DatasetSizes + +class DatasetInformation(BaseModel): + """ + DatasetInformation + """ + dataset_sizes: DatasetSizes = Field(..., alias="datasetSizes") + video_level_sizes: Optional[DatasetSizes] = Field(None, alias="videoLevelSizes") + dataset_id: StrictStr = Field(..., alias="datasetId") + dataset_url: StrictStr = Field(..., alias="datasetUrl") + dataset_name: StrictStr = Field(..., alias="datasetName") + __properties = ["datasetSizes", "videoLevelSizes", "datasetId", "datasetUrl", "datasetName"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> DatasetInformation: + """Create an instance of DatasetInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of dataset_sizes + if self.dataset_sizes: + _dict['datasetSizes' if by_alias else 'dataset_sizes'] = self.dataset_sizes.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of video_level_sizes + if self.video_level_sizes: + _dict['videoLevelSizes' if by_alias else 'video_level_sizes'] = self.video_level_sizes.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> DatasetInformation: + """Create an instance of DatasetInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return DatasetInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in DatasetInformation) in the input: " + str(obj)) + + _obj = DatasetInformation.parse_obj({ + "dataset_sizes": DatasetSizes.from_dict(obj.get("datasetSizes")) if obj.get("datasetSizes") is not None else None, + "video_level_sizes": DatasetSizes.from_dict(obj.get("videoLevelSizes")) if obj.get("videoLevelSizes") is not None else None, + "dataset_id": obj.get("datasetId"), + "dataset_url": obj.get("datasetUrl"), + "dataset_name": obj.get("datasetName") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/dataset_sizes.py b/lightly/openapi_generated/swagger_client/models/dataset_sizes.py new file mode 100644 index 000000000..348b0c325 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/dataset_sizes.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field, StrictInt + +class DatasetSizes(BaseModel): + """ + DatasetSizes + """ + n_input: StrictInt = Field(..., alias="nInput") + n_corrupt: StrictInt = Field(..., alias="nCorrupt") + n_duplicate: StrictInt = Field(..., alias="nDuplicate") + n_removed: StrictInt = Field(..., alias="nRemoved") + n_selected: StrictInt = Field(..., alias="nSelected") + n_preselected_datapool: StrictInt = Field(..., alias="nPreselectedDatapool") + n_selected_with_datapool: StrictInt = Field(..., alias="nSelectedWithDatapool") + __properties = ["nInput", "nCorrupt", "nDuplicate", "nRemoved", "nSelected", "nPreselectedDatapool", "nSelectedWithDatapool"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> DatasetSizes: + """Create an instance of DatasetSizes from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> DatasetSizes: + """Create an instance of DatasetSizes from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return DatasetSizes.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in DatasetSizes) in the input: " + str(obj)) + + _obj = DatasetSizes.parse_obj({ + "n_input": obj.get("nInput"), + "n_corrupt": obj.get("nCorrupt"), + "n_duplicate": obj.get("nDuplicate"), + "n_removed": obj.get("nRemoved"), + "n_selected": obj.get("nSelected"), + "n_preselected_datapool": obj.get("nPreselectedDatapool"), + "n_selected_with_datapool": obj.get("nSelectedWithDatapool") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/detection_frame_prediction.py b/lightly/openapi_generated/swagger_client/models/detection_frame_prediction.py new file mode 100644 index 000000000..1dd6b4915 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/detection_frame_prediction.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Optional +from pydantic import Extra, BaseModel, StrictStr, conlist +from lightly.openapi_generated.swagger_client.models.object_detection_prediction import ObjectDetectionPrediction + +class DetectionFramePrediction(BaseModel): + """ + DetectionFramePrediction + """ + filename: Optional[StrictStr] = None + predictions: Optional[conlist(ObjectDetectionPrediction)] = None + __properties = ["filename", "predictions"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> DetectionFramePrediction: + """Create an instance of DetectionFramePrediction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of each item in predictions (list) + _items = [] + if self.predictions: + for _item in self.predictions: + if _item: + _items.append(_item.to_dict(by_alias=by_alias)) + _dict['predictions' if by_alias else 'predictions'] = _items + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> DetectionFramePrediction: + """Create an instance of DetectionFramePrediction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return DetectionFramePrediction.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in DetectionFramePrediction) in the input: " + str(obj)) + + _obj = DetectionFramePrediction.parse_obj({ + "filename": obj.get("filename"), + "predictions": [ObjectDetectionPrediction.from_dict(_item) for _item in obj.get("predictions")] if obj.get("predictions") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/detection_task_information.py b/lightly/openapi_generated/swagger_client/models/detection_task_information.py new file mode 100644 index 000000000..c2b5fa30d --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/detection_task_information.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Optional +from pydantic import Extra, BaseModel, Field, conlist +from lightly.openapi_generated.swagger_client.models.detection_frame_prediction import DetectionFramePrediction +from lightly.openapi_generated.swagger_client.models.embedding_information import EmbeddingInformation + +class DetectionTaskInformation(BaseModel): + """ + DetectionTaskInformation + """ + bbox_examples: conlist(DetectionFramePrediction) = Field(..., alias="bboxExamples") + crop_embedding_information: Optional[EmbeddingInformation] = Field(None, alias="cropEmbeddingInformation") + __properties = ["bboxExamples", "cropEmbeddingInformation"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> DetectionTaskInformation: + """Create an instance of DetectionTaskInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of each item in bbox_examples (list) + _items = [] + if self.bbox_examples: + for _item in self.bbox_examples: + if _item: + _items.append(_item.to_dict(by_alias=by_alias)) + _dict['bboxExamples' if by_alias else 'bbox_examples'] = _items + # override the default output from pydantic by calling `to_dict()` of crop_embedding_information + if self.crop_embedding_information: + _dict['cropEmbeddingInformation' if by_alias else 'crop_embedding_information'] = self.crop_embedding_information.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> DetectionTaskInformation: + """Create an instance of DetectionTaskInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return DetectionTaskInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in DetectionTaskInformation) in the input: " + str(obj)) + + _obj = DetectionTaskInformation.parse_obj({ + "bbox_examples": [DetectionFramePrediction.from_dict(_item) for _item in obj.get("bboxExamples")] if obj.get("bboxExamples") is not None else None, + "crop_embedding_information": EmbeddingInformation.from_dict(obj.get("cropEmbeddingInformation")) if obj.get("cropEmbeddingInformation") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v2_docker_object_level.py b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v2_docker_object_level.py index 9122bcb62..75057dc4f 100644 --- a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v2_docker_object_level.py +++ b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v2_docker_object_level.py @@ -28,7 +28,7 @@ class DockerWorkerConfigV2DockerObjectLevel(BaseModel): """ crop_dataset_name: Optional[constr(strict=True)] = Field(None, alias="cropDatasetName", description="Identical limitations than DatasetName however it can be empty") padding: Optional[Union[StrictFloat, StrictInt]] = None - task_name: Optional[constr(strict=True)] = Field(None, alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic. ") + task_name: Optional[constr(strict=True)] = Field(None, alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") __properties = ["cropDatasetName", "padding", "taskName"] @validator('crop_dataset_name') diff --git a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker_training.py b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker_training.py index 2fba227c8..e52cdb2ff 100644 --- a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker_training.py +++ b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker_training.py @@ -26,7 +26,7 @@ class DockerWorkerConfigV3DockerTraining(BaseModel): """ DockerWorkerConfigV3DockerTraining """ - task_name: Optional[constr(strict=True)] = Field(None, alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic. ") + task_name: Optional[constr(strict=True)] = Field(None, alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") __properties = ["taskName"] @validator('task_name') diff --git a/lightly/openapi_generated/swagger_client/models/docker_worker_registry_entry_data.py b/lightly/openapi_generated/swagger_client/models/docker_worker_registry_entry_data.py index ca386c607..54861d933 100644 --- a/lightly/openapi_generated/swagger_client/models/docker_worker_registry_entry_data.py +++ b/lightly/openapi_generated/swagger_client/models/docker_worker_registry_entry_data.py @@ -36,7 +36,7 @@ class DockerWorkerRegistryEntryData(BaseModel): created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds") last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds") labels: conlist(StrictStr) = Field(..., description="The labels used for specifying the run-worker-relationship") - is_default: StrictBool = Field(..., alias="isDefault", description="If true, this worker was created by the API/System and not by a user. ") + is_default: Optional[StrictBool] = Field(None, alias="isDefault", description="If true, this worker was created by the API/System and not by a user. ") docker_version: Optional[StrictStr] = Field(None, alias="dockerVersion") __properties = ["id", "userId", "name", "workerType", "state", "createdAt", "lastModifiedAt", "labels", "isDefault", "dockerVersion"] @@ -105,7 +105,7 @@ def from_dict(cls, obj: dict) -> DockerWorkerRegistryEntryData: "created_at": obj.get("createdAt"), "last_modified_at": obj.get("lastModifiedAt"), "labels": obj.get("labels"), - "is_default": obj.get("isDefault") if obj.get("isDefault") is not None else False, + "is_default": obj.get("isDefault"), "docker_version": obj.get("dockerVersion") }) return _obj diff --git a/lightly/openapi_generated/swagger_client/models/embedding_data2_d.py b/lightly/openapi_generated/swagger_client/models/embedding_data2_d.py new file mode 100644 index 000000000..e097a3f3d --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/embedding_data2_d.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Optional, Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conlist +from lightly.openapi_generated.swagger_client.models.scatter_plot_example_image import ScatterPlotExampleImage + +class EmbeddingData2D(BaseModel): + """ + EmbeddingData2D + """ + input2d_embeddings: conlist(conlist(Union[StrictFloat, StrictInt], max_items=2, min_items=2)) = Field(..., alias="input2dEmbeddings") + datapool2d_embeddings: Optional[conlist(conlist(Union[StrictFloat, StrictInt], max_items=2, min_items=2))] = Field(None, alias="datapool2dEmbeddings") + selected_excluding_datapool2d_embeddings: conlist(conlist(Union[StrictFloat, StrictInt], max_items=2, min_items=2)) = Field(..., alias="selectedExcludingDatapool2dEmbeddings") + example_images: conlist(ScatterPlotExampleImage) = Field(..., alias="exampleImages") + __properties = ["input2dEmbeddings", "datapool2dEmbeddings", "selectedExcludingDatapool2dEmbeddings", "exampleImages"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> EmbeddingData2D: + """Create an instance of EmbeddingData2D from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of each item in example_images (list) + _items = [] + if self.example_images: + for _item in self.example_images: + if _item: + _items.append(_item.to_dict(by_alias=by_alias)) + _dict['exampleImages' if by_alias else 'example_images'] = _items + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> EmbeddingData2D: + """Create an instance of EmbeddingData2D from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return EmbeddingData2D.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in EmbeddingData2D) in the input: " + str(obj)) + + _obj = EmbeddingData2D.parse_obj({ + "input2d_embeddings": obj.get("input2dEmbeddings"), + "datapool2d_embeddings": obj.get("datapool2dEmbeddings"), + "selected_excluding_datapool2d_embeddings": obj.get("selectedExcludingDatapool2dEmbeddings"), + "example_images": [ScatterPlotExampleImage.from_dict(_item) for _item in obj.get("exampleImages")] if obj.get("exampleImages") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/embedding_information.py b/lightly/openapi_generated/swagger_client/models/embedding_information.py new file mode 100644 index 000000000..cf3c1f0b6 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/embedding_information.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.numeric_distribution_per_set import NumericDistributionPerSet +from lightly.openapi_generated.swagger_client.models.scatter_plot_data import ScatterPlotData + +class EmbeddingInformation(BaseModel): + """ + EmbeddingInformation + """ + distance_to_nearest_neighbor_in_set: NumericDistributionPerSet = Field(..., alias="distanceToNearestNeighborInSet") + distance_to_cover_input: NumericDistributionPerSet = Field(..., alias="distanceToCoverInput") + scatter_plot_data: ScatterPlotData = Field(..., alias="scatterPlotData") + __properties = ["distanceToNearestNeighborInSet", "distanceToCoverInput", "scatterPlotData"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> EmbeddingInformation: + """Create an instance of EmbeddingInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of distance_to_nearest_neighbor_in_set + if self.distance_to_nearest_neighbor_in_set: + _dict['distanceToNearestNeighborInSet' if by_alias else 'distance_to_nearest_neighbor_in_set'] = self.distance_to_nearest_neighbor_in_set.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of distance_to_cover_input + if self.distance_to_cover_input: + _dict['distanceToCoverInput' if by_alias else 'distance_to_cover_input'] = self.distance_to_cover_input.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of scatter_plot_data + if self.scatter_plot_data: + _dict['scatterPlotData' if by_alias else 'scatter_plot_data'] = self.scatter_plot_data.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> EmbeddingInformation: + """Create an instance of EmbeddingInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return EmbeddingInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in EmbeddingInformation) in the input: " + str(obj)) + + _obj = EmbeddingInformation.parse_obj({ + "distance_to_nearest_neighbor_in_set": NumericDistributionPerSet.from_dict(obj.get("distanceToNearestNeighborInSet")) if obj.get("distanceToNearestNeighborInSet") is not None else None, + "distance_to_cover_input": NumericDistributionPerSet.from_dict(obj.get("distanceToCoverInput")) if obj.get("distanceToCoverInput") is not None else None, + "scatter_plot_data": ScatterPlotData.from_dict(obj.get("scatterPlotData")) if obj.get("scatterPlotData") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/general_information.py b/lightly/openapi_generated/swagger_client/models/general_information.py new file mode 100644 index 000000000..dced0fd80 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/general_information.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List +from pydantic import Extra, BaseModel, Field, conlist +from lightly.openapi_generated.swagger_client.models.annotation_savings import AnnotationSavings +from lightly.openapi_generated.swagger_client.models.dataset_information import DatasetInformation +from lightly.openapi_generated.swagger_client.models.run_information import RunInformation +from lightly.openapi_generated.swagger_client.models.selected_and_removed_image_pair import SelectedAndRemovedImagePair +from lightly.openapi_generated.swagger_client.models.worker_information import WorkerInformation + +class GeneralInformation(BaseModel): + """ + GeneralInformation + """ + worker_information: WorkerInformation = Field(..., alias="workerInformation") + run_information: RunInformation = Field(..., alias="runInformation") + dataset_information: DatasetInformation = Field(..., alias="datasetInformation") + annotation_savings: AnnotationSavings = Field(..., alias="annotationSavings") + selected_and_removed_image_examples: conlist(SelectedAndRemovedImagePair) = Field(..., alias="selectedAndRemovedImageExamples") + __properties = ["workerInformation", "runInformation", "datasetInformation", "annotationSavings", "selectedAndRemovedImageExamples"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> GeneralInformation: + """Create an instance of GeneralInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of worker_information + if self.worker_information: + _dict['workerInformation' if by_alias else 'worker_information'] = self.worker_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of run_information + if self.run_information: + _dict['runInformation' if by_alias else 'run_information'] = self.run_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of dataset_information + if self.dataset_information: + _dict['datasetInformation' if by_alias else 'dataset_information'] = self.dataset_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of annotation_savings + if self.annotation_savings: + _dict['annotationSavings' if by_alias else 'annotation_savings'] = self.annotation_savings.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of each item in selected_and_removed_image_examples (list) + _items = [] + if self.selected_and_removed_image_examples: + for _item in self.selected_and_removed_image_examples: + if _item: + _items.append(_item.to_dict(by_alias=by_alias)) + _dict['selectedAndRemovedImageExamples' if by_alias else 'selected_and_removed_image_examples'] = _items + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> GeneralInformation: + """Create an instance of GeneralInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return GeneralInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in GeneralInformation) in the input: " + str(obj)) + + _obj = GeneralInformation.parse_obj({ + "worker_information": WorkerInformation.from_dict(obj.get("workerInformation")) if obj.get("workerInformation") is not None else None, + "run_information": RunInformation.from_dict(obj.get("runInformation")) if obj.get("runInformation") is not None else None, + "dataset_information": DatasetInformation.from_dict(obj.get("datasetInformation")) if obj.get("datasetInformation") is not None else None, + "annotation_savings": AnnotationSavings.from_dict(obj.get("annotationSavings")) if obj.get("annotationSavings") is not None else None, + "selected_and_removed_image_examples": [SelectedAndRemovedImagePair.from_dict(_item) for _item in obj.get("selectedAndRemovedImageExamples")] if obj.get("selectedAndRemovedImageExamples") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/metadata_information.py b/lightly/openapi_generated/swagger_client/models/metadata_information.py new file mode 100644 index 000000000..d002c16ed --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/metadata_information.py @@ -0,0 +1,120 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Dict +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.categorical_distribution_per_set import CategoricalDistributionPerSet +from lightly.openapi_generated.swagger_client.models.numeric_distribution_per_set import NumericDistributionPerSet + +class MetadataInformation(BaseModel): + """ + MetadataInformation + """ + lightly_numeric: Dict[str, NumericDistributionPerSet] = Field(..., alias="lightlyNumeric") + custom_numeric: Dict[str, NumericDistributionPerSet] = Field(..., alias="customNumeric") + custom_categorical: Dict[str, CategoricalDistributionPerSet] = Field(..., alias="customCategorical") + __properties = ["lightlyNumeric", "customNumeric", "customCategorical"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> MetadataInformation: + """Create an instance of MetadataInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of each value in lightly_numeric (dict) + _field_dict = {} + if self.lightly_numeric: + for _key in self.lightly_numeric: + if self.lightly_numeric[_key]: + _field_dict[_key] = self.lightly_numeric[_key].to_dict(by_alias=by_alias) + _dict['lightlyNumeric' if by_alias else 'lightly_numeric'] = _field_dict + # override the default output from pydantic by calling `to_dict()` of each value in custom_numeric (dict) + _field_dict = {} + if self.custom_numeric: + for _key in self.custom_numeric: + if self.custom_numeric[_key]: + _field_dict[_key] = self.custom_numeric[_key].to_dict(by_alias=by_alias) + _dict['customNumeric' if by_alias else 'custom_numeric'] = _field_dict + # override the default output from pydantic by calling `to_dict()` of each value in custom_categorical (dict) + _field_dict = {} + if self.custom_categorical: + for _key in self.custom_categorical: + if self.custom_categorical[_key]: + _field_dict[_key] = self.custom_categorical[_key].to_dict(by_alias=by_alias) + _dict['customCategorical' if by_alias else 'custom_categorical'] = _field_dict + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> MetadataInformation: + """Create an instance of MetadataInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return MetadataInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in MetadataInformation) in the input: " + str(obj)) + + _obj = MetadataInformation.parse_obj({ + "lightly_numeric": dict( + (_k, NumericDistributionPerSet.from_dict(_v)) + for _k, _v in obj.get("lightlyNumeric").items() + ) + if obj.get("lightlyNumeric") is not None + else None, + "custom_numeric": dict( + (_k, NumericDistributionPerSet.from_dict(_v)) + for _k, _v in obj.get("customNumeric").items() + ) + if obj.get("customNumeric") is not None + else None, + "custom_categorical": dict( + (_k, CategoricalDistributionPerSet.from_dict(_v)) + for _k, _v in obj.get("customCategorical").items() + ) + if obj.get("customCategorical") is not None + else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/numeric_distribution.py b/lightly/openapi_generated/swagger_client/models/numeric_distribution.py new file mode 100644 index 000000000..d30dc80a5 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/numeric_distribution.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conlist +from lightly.openapi_generated.swagger_client.models.numeric_distribution_metrics import NumericDistributionMetrics + +class NumericDistribution(BaseModel): + """ + NumericDistribution + """ + counts: conlist(Union[StrictFloat, StrictInt]) = Field(...) + metrics: NumericDistributionMetrics = Field(...) + __properties = ["counts", "metrics"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> NumericDistribution: + """Create an instance of NumericDistribution from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of metrics + if self.metrics: + _dict['metrics' if by_alias else 'metrics'] = self.metrics.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> NumericDistribution: + """Create an instance of NumericDistribution from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return NumericDistribution.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in NumericDistribution) in the input: " + str(obj)) + + _obj = NumericDistribution.parse_obj({ + "counts": obj.get("counts"), + "metrics": NumericDistributionMetrics.from_dict(obj.get("metrics")) if obj.get("metrics") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/numeric_distribution_metrics.py b/lightly/openapi_generated/swagger_client/models/numeric_distribution_metrics.py new file mode 100644 index 000000000..e94e03849 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/numeric_distribution_metrics.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, confloat, conint + +class NumericDistributionMetrics(BaseModel): + """ + NumericDistributionMetrics + """ + mean: Union[StrictFloat, StrictInt] = Field(...) + std: Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)] = Field(...) + min: Union[StrictFloat, StrictInt] = Field(...) + median: Union[StrictFloat, StrictInt] = Field(...) + max: Union[StrictFloat, StrictInt] = Field(...) + __properties = ["mean", "std", "min", "median", "max"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> NumericDistributionMetrics: + """Create an instance of NumericDistributionMetrics from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> NumericDistributionMetrics: + """Create an instance of NumericDistributionMetrics from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return NumericDistributionMetrics.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in NumericDistributionMetrics) in the input: " + str(obj)) + + _obj = NumericDistributionMetrics.parse_obj({ + "mean": obj.get("mean"), + "std": obj.get("std"), + "min": obj.get("min"), + "median": obj.get("median"), + "max": obj.get("max") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/numeric_distribution_per_set.py b/lightly/openapi_generated/swagger_client/models/numeric_distribution_per_set.py new file mode 100644 index 000000000..cc177f456 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/numeric_distribution_per_set.py @@ -0,0 +1,106 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Optional, Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conlist +from lightly.openapi_generated.swagger_client.models.numeric_distribution import NumericDistribution + +class NumericDistributionPerSet(BaseModel): + """ + NumericDistributionPerSet + """ + bins: StrictInt = Field(...) + range: conlist(Union[StrictFloat, StrictInt], max_items=2, min_items=2) = Field(..., description="Tuple representing the range, converted to an array of two floats.") + input: Optional[NumericDistribution] = None + selected: NumericDistribution = Field(...) + random: Optional[NumericDistribution] = None + preselected_datapool: Optional[NumericDistribution] = Field(None, alias="preselectedDatapool") + selected_with_datapool: Optional[NumericDistribution] = Field(None, alias="selectedWithDatapool") + __properties = ["bins", "range", "input", "selected", "random", "preselectedDatapool", "selectedWithDatapool"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> NumericDistributionPerSet: + """Create an instance of NumericDistributionPerSet from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of input + if self.input: + _dict['input' if by_alias else 'input'] = self.input.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of selected + if self.selected: + _dict['selected' if by_alias else 'selected'] = self.selected.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of random + if self.random: + _dict['random' if by_alias else 'random'] = self.random.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of preselected_datapool + if self.preselected_datapool: + _dict['preselectedDatapool' if by_alias else 'preselected_datapool'] = self.preselected_datapool.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of selected_with_datapool + if self.selected_with_datapool: + _dict['selectedWithDatapool' if by_alias else 'selected_with_datapool'] = self.selected_with_datapool.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> NumericDistributionPerSet: + """Create an instance of NumericDistributionPerSet from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return NumericDistributionPerSet.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in NumericDistributionPerSet) in the input: " + str(obj)) + + _obj = NumericDistributionPerSet.parse_obj({ + "bins": obj.get("bins"), + "range": obj.get("range"), + "input": NumericDistribution.from_dict(obj.get("input")) if obj.get("input") is not None else None, + "selected": NumericDistribution.from_dict(obj.get("selected")) if obj.get("selected") is not None else None, + "random": NumericDistribution.from_dict(obj.get("random")) if obj.get("random") is not None else None, + "preselected_datapool": NumericDistribution.from_dict(obj.get("preselectedDatapool")) if obj.get("preselectedDatapool") is not None else None, + "selected_with_datapool": NumericDistribution.from_dict(obj.get("selectedWithDatapool")) if obj.get("selectedWithDatapool") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/object_detection_prediction.py b/lightly/openapi_generated/swagger_client/models/object_detection_prediction.py new file mode 100644 index 000000000..49f677830 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/object_detection_prediction.py @@ -0,0 +1,91 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Optional, Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, confloat, conint, conlist + +class ObjectDetectionPrediction(BaseModel): + """ + ObjectDetectionPrediction + """ + category_id: StrictInt = Field(..., alias="categoryId", description="Category id of the prediction.") + bbox: conlist(Union[StrictFloat, StrictInt]) = Field(..., description="Bounding box in (x, y, width, height) format where (x=0, y=0) is the top-left corner of the image.") + score: Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)] = Field(..., description="Detection confidence, range [0, 1].") + probabilities: Optional[conlist(Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)])] = Field(None, description="List with probability for each possible category (must sum to 1).") + epsilon: Union[confloat(ge=0, strict=True), conint(ge=0, strict=True)] = Field(...) + __properties = ["categoryId", "bbox", "score", "probabilities", "epsilon"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> ObjectDetectionPrediction: + """Create an instance of ObjectDetectionPrediction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # set to None if probabilities (nullable) is None + # and __fields_set__ contains the field + if self.probabilities is None and "probabilities" in self.__fields_set__: + _dict['probabilities' if by_alias else 'probabilities'] = None + + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> ObjectDetectionPrediction: + """Create an instance of ObjectDetectionPrediction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return ObjectDetectionPrediction.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in ObjectDetectionPrediction) in the input: " + str(obj)) + + _obj = ObjectDetectionPrediction.parse_obj({ + "category_id": obj.get("categoryId"), + "bbox": obj.get("bbox"), + "score": obj.get("score"), + "probabilities": obj.get("probabilities"), + "epsilon": obj.get("epsilon") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/prediction_singleton_base.py b/lightly/openapi_generated/swagger_client/models/prediction_singleton_base.py index caff2751a..668c1280c 100644 --- a/lightly/openapi_generated/swagger_client/models/prediction_singleton_base.py +++ b/lightly/openapi_generated/swagger_client/models/prediction_singleton_base.py @@ -28,7 +28,7 @@ class PredictionSingletonBase(BaseModel): PredictionSingletonBase """ type: StrictStr = Field(...) - task_name: constr(strict=True, min_length=1) = Field(..., alias="taskName", description="A name which is safe to have as a file/folder name in a file system") + task_name: constr(strict=True) = Field(..., alias="taskName", description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") crop_dataset_id: Optional[constr(strict=True)] = Field(None, alias="cropDatasetId", description="MongoDB ObjectId") crop_sample_id: Optional[constr(strict=True)] = Field(None, alias="cropSampleId", description="MongoDB ObjectId") category_id: conint(strict=True, ge=0) = Field(..., alias="categoryId", description="The id of the category. Needs to be a positive integer but can be any integer (gaps are allowed, does not need to be sequential)") @@ -38,8 +38,8 @@ class PredictionSingletonBase(BaseModel): @validator('task_name') def task_name_validate_regular_expression(cls, value): """Validates the regular expression""" - if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value): - raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/") + if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value): + raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/") return value @validator('crop_dataset_id') diff --git a/lightly/openapi_generated/swagger_client/models/prediction_task_information.py b/lightly/openapi_generated/swagger_client/models/prediction_task_information.py new file mode 100644 index 000000000..ebfc742c3 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/prediction_task_information.py @@ -0,0 +1,131 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Dict, Optional +from pydantic import Extra, BaseModel, Field, StrictStr +from lightly.openapi_generated.swagger_client.models.categorical_distribution_per_set import CategoricalDistributionPerSet +from lightly.openapi_generated.swagger_client.models.detection_task_information import DetectionTaskInformation +from lightly.openapi_generated.swagger_client.models.numeric_distribution_per_set import NumericDistributionPerSet + +class PredictionTaskInformation(BaseModel): + """ + PredictionTaskInformation + """ + task_type: StrictStr = Field(..., alias="taskType") + category_id_to_name: Dict[str, StrictStr] = Field(..., alias="categoryIdToName") + active_learning_score_distributions: Dict[str, NumericDistributionPerSet] = Field(..., alias="activeLearningScoreDistributions") + category_distribution: CategoricalDistributionPerSet = Field(..., alias="categoryDistribution") + category_to_counts_per_sample: Dict[str, NumericDistributionPerSet] = Field(..., alias="categoryToCountsPerSample") + prediction_score_distribution: Optional[NumericDistributionPerSet] = Field(None, alias="predictionScoreDistribution") + detection_task_information: Optional[DetectionTaskInformation] = Field(None, alias="detectionTaskInformation") + category_to_image_with_category_count: Optional[CategoricalDistributionPerSet] = Field(None, alias="categoryToImageWithCategoryCount") + __properties = ["taskType", "categoryIdToName", "activeLearningScoreDistributions", "categoryDistribution", "categoryToCountsPerSample", "predictionScoreDistribution", "detectionTaskInformation", "categoryToImageWithCategoryCount"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> PredictionTaskInformation: + """Create an instance of PredictionTaskInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of each value in active_learning_score_distributions (dict) + _field_dict = {} + if self.active_learning_score_distributions: + for _key in self.active_learning_score_distributions: + if self.active_learning_score_distributions[_key]: + _field_dict[_key] = self.active_learning_score_distributions[_key].to_dict(by_alias=by_alias) + _dict['activeLearningScoreDistributions' if by_alias else 'active_learning_score_distributions'] = _field_dict + # override the default output from pydantic by calling `to_dict()` of category_distribution + if self.category_distribution: + _dict['categoryDistribution' if by_alias else 'category_distribution'] = self.category_distribution.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of each value in category_to_counts_per_sample (dict) + _field_dict = {} + if self.category_to_counts_per_sample: + for _key in self.category_to_counts_per_sample: + if self.category_to_counts_per_sample[_key]: + _field_dict[_key] = self.category_to_counts_per_sample[_key].to_dict(by_alias=by_alias) + _dict['categoryToCountsPerSample' if by_alias else 'category_to_counts_per_sample'] = _field_dict + # override the default output from pydantic by calling `to_dict()` of prediction_score_distribution + if self.prediction_score_distribution: + _dict['predictionScoreDistribution' if by_alias else 'prediction_score_distribution'] = self.prediction_score_distribution.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of detection_task_information + if self.detection_task_information: + _dict['detectionTaskInformation' if by_alias else 'detection_task_information'] = self.detection_task_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of category_to_image_with_category_count + if self.category_to_image_with_category_count: + _dict['categoryToImageWithCategoryCount' if by_alias else 'category_to_image_with_category_count'] = self.category_to_image_with_category_count.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> PredictionTaskInformation: + """Create an instance of PredictionTaskInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return PredictionTaskInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in PredictionTaskInformation) in the input: " + str(obj)) + + _obj = PredictionTaskInformation.parse_obj({ + "task_type": obj.get("taskType"), + "category_id_to_name": obj.get("categoryIdToName"), + "active_learning_score_distributions": dict( + (_k, NumericDistributionPerSet.from_dict(_v)) + for _k, _v in obj.get("activeLearningScoreDistributions").items() + ) + if obj.get("activeLearningScoreDistributions") is not None + else None, + "category_distribution": CategoricalDistributionPerSet.from_dict(obj.get("categoryDistribution")) if obj.get("categoryDistribution") is not None else None, + "category_to_counts_per_sample": dict( + (_k, NumericDistributionPerSet.from_dict(_v)) + for _k, _v in obj.get("categoryToCountsPerSample").items() + ) + if obj.get("categoryToCountsPerSample") is not None + else None, + "prediction_score_distribution": NumericDistributionPerSet.from_dict(obj.get("predictionScoreDistribution")) if obj.get("predictionScoreDistribution") is not None else None, + "detection_task_information": DetectionTaskInformation.from_dict(obj.get("detectionTaskInformation")) if obj.get("detectionTaskInformation") is not None else None, + "category_to_image_with_category_count": CategoricalDistributionPerSet.from_dict(obj.get("categoryToImageWithCategoryCount")) if obj.get("categoryToImageWithCategoryCount") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/prediction_task_schema_base.py b/lightly/openapi_generated/swagger_client/models/prediction_task_schema_base.py index a657ec939..d16f60906 100644 --- a/lightly/openapi_generated/swagger_client/models/prediction_task_schema_base.py +++ b/lightly/openapi_generated/swagger_client/models/prediction_task_schema_base.py @@ -27,15 +27,15 @@ class PredictionTaskSchemaBase(BaseModel): """ The schema for predictions or labels when doing classification, object detection, keypoint detection or instance segmentation """ - name: constr(strict=True, min_length=1) = Field(..., description="A name which is safe to have as a file/folder name in a file system") + name: constr(strict=True) = Field(..., description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") type: StrictStr = Field(..., description="This is the TaskType. Due to openapi.oneOf fuckery with discriminators, this needs to be a string") __properties = ["name", "type"] @validator('name') def name_validate_regular_expression(cls, value): """Validates the regular expression""" - if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value): - raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/") + if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value): + raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/") return value class Config: diff --git a/lightly/openapi_generated/swagger_client/models/report_v2.py b/lightly/openapi_generated/swagger_client/models/report_v2.py new file mode 100644 index 000000000..b87758ce3 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/report_v2.py @@ -0,0 +1,103 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Dict, Optional +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.dataset_analysis import DatasetAnalysis +from lightly.openapi_generated.swagger_client.models.general_information import GeneralInformation +from lightly.openapi_generated.swagger_client.models.prediction_task_information import PredictionTaskInformation + +class ReportV2(BaseModel): + """ + ReportV2 + """ + general_information: Optional[GeneralInformation] = Field(None, alias="generalInformation") + dataset_analysis: Optional[DatasetAnalysis] = Field(None, alias="datasetAnalysis") + prediction_task_information: Optional[Dict[str, PredictionTaskInformation]] = Field(None, alias="predictionTaskInformation") + __properties = ["generalInformation", "datasetAnalysis", "predictionTaskInformation"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> ReportV2: + """Create an instance of ReportV2 from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of general_information + if self.general_information: + _dict['generalInformation' if by_alias else 'general_information'] = self.general_information.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of dataset_analysis + if self.dataset_analysis: + _dict['datasetAnalysis' if by_alias else 'dataset_analysis'] = self.dataset_analysis.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of each value in prediction_task_information (dict) + _field_dict = {} + if self.prediction_task_information: + for _key in self.prediction_task_information: + if self.prediction_task_information[_key]: + _field_dict[_key] = self.prediction_task_information[_key].to_dict(by_alias=by_alias) + _dict['predictionTaskInformation' if by_alias else 'prediction_task_information'] = _field_dict + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> ReportV2: + """Create an instance of ReportV2 from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return ReportV2.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in ReportV2) in the input: " + str(obj)) + + _obj = ReportV2.parse_obj({ + "general_information": GeneralInformation.from_dict(obj.get("generalInformation")) if obj.get("generalInformation") is not None else None, + "dataset_analysis": DatasetAnalysis.from_dict(obj.get("datasetAnalysis")) if obj.get("datasetAnalysis") is not None else None, + "prediction_task_information": dict( + (_k, PredictionTaskInformation.from_dict(_v)) + for _k, _v in obj.get("predictionTaskInformation").items() + ) + if obj.get("predictionTaskInformation") is not None + else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/run_information.py b/lightly/openapi_generated/swagger_client/models/run_information.py new file mode 100644 index 000000000..85fd67344 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/run_information.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, StrictStr + +class RunInformation(BaseModel): + """ + RunInformation + """ + start_timestamp: Union[StrictFloat, StrictInt] = Field(..., alias="startTimestamp") + end_timestamp: Union[StrictFloat, StrictInt] = Field(..., alias="endTimestamp") + total_processing_time_s: Union[StrictFloat, StrictInt] = Field(..., alias="totalProcessingTimeS") + run_id: StrictStr = Field(..., alias="runId") + run_url: StrictStr = Field(..., alias="runUrl") + __properties = ["startTimestamp", "endTimestamp", "totalProcessingTimeS", "runId", "runUrl"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> RunInformation: + """Create an instance of RunInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> RunInformation: + """Create an instance of RunInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return RunInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in RunInformation) in the input: " + str(obj)) + + _obj = RunInformation.parse_obj({ + "start_timestamp": obj.get("startTimestamp"), + "end_timestamp": obj.get("endTimestamp"), + "total_processing_time_s": obj.get("totalProcessingTimeS"), + "run_id": obj.get("runId"), + "run_url": obj.get("runUrl") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/scatter_plot_data.py b/lightly/openapi_generated/swagger_client/models/scatter_plot_data.py new file mode 100644 index 000000000..78b352b40 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/scatter_plot_data.py @@ -0,0 +1,87 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field +from lightly.openapi_generated.swagger_client.models.embedding_data2_d import EmbeddingData2D + +class ScatterPlotData(BaseModel): + """ + ScatterPlotData + """ + pca_embedding_data: EmbeddingData2D = Field(..., alias="pcaEmbeddingData") + umap_embedding_data: EmbeddingData2D = Field(..., alias="umapEmbeddingData") + __properties = ["pcaEmbeddingData", "umapEmbeddingData"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> ScatterPlotData: + """Create an instance of ScatterPlotData from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of pca_embedding_data + if self.pca_embedding_data: + _dict['pcaEmbeddingData' if by_alias else 'pca_embedding_data'] = self.pca_embedding_data.to_dict(by_alias=by_alias) + # override the default output from pydantic by calling `to_dict()` of umap_embedding_data + if self.umap_embedding_data: + _dict['umapEmbeddingData' if by_alias else 'umap_embedding_data'] = self.umap_embedding_data.to_dict(by_alias=by_alias) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> ScatterPlotData: + """Create an instance of ScatterPlotData from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return ScatterPlotData.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in ScatterPlotData) in the input: " + str(obj)) + + _obj = ScatterPlotData.parse_obj({ + "pca_embedding_data": EmbeddingData2D.from_dict(obj.get("pcaEmbeddingData")) if obj.get("pcaEmbeddingData") is not None else None, + "umap_embedding_data": EmbeddingData2D.from_dict(obj.get("umapEmbeddingData")) if obj.get("umapEmbeddingData") is not None else None + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/scatter_plot_example_image.py b/lightly/openapi_generated/swagger_client/models/scatter_plot_example_image.py new file mode 100644 index 000000000..d520c2528 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/scatter_plot_example_image.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import List, Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, StrictStr, conlist + +class ScatterPlotExampleImage(BaseModel): + """ + ScatterPlotExampleImage + """ + location: conlist(Union[StrictFloat, StrictInt], max_items=2, min_items=2) = Field(...) + filename: StrictStr = Field(...) + __properties = ["location", "filename"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> ScatterPlotExampleImage: + """Create an instance of ScatterPlotExampleImage from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> ScatterPlotExampleImage: + """Create an instance of ScatterPlotExampleImage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return ScatterPlotExampleImage.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in ScatterPlotExampleImage) in the input: " + str(obj)) + + _obj = ScatterPlotExampleImage.parse_obj({ + "location": obj.get("location"), + "filename": obj.get("filename") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/selected_and_removed_image_pair.py b/lightly/openapi_generated/swagger_client/models/selected_and_removed_image_pair.py new file mode 100644 index 000000000..43864dfa3 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/selected_and_removed_image_pair.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, StrictStr + +class SelectedAndRemovedImagePair(BaseModel): + """ + SelectedAndRemovedImagePair + """ + selected_image: StrictStr = Field(..., alias="selectedImage") + removed_image: StrictStr = Field(..., alias="removedImage") + distance: Union[StrictFloat, StrictInt] = Field(...) + __properties = ["selectedImage", "removedImage", "distance"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> SelectedAndRemovedImagePair: + """Create an instance of SelectedAndRemovedImagePair from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> SelectedAndRemovedImagePair: + """Create an instance of SelectedAndRemovedImagePair from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return SelectedAndRemovedImagePair.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in SelectedAndRemovedImagePair) in the input: " + str(obj)) + + _obj = SelectedAndRemovedImagePair.parse_obj({ + "selected_image": obj.get("selectedImage"), + "removed_image": obj.get("removedImage"), + "distance": obj.get("distance") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_entry_input.py b/lightly/openapi_generated/swagger_client/models/selection_config_entry_input.py index 28b2d923b..65cf32e69 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_entry_input.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_entry_input.py @@ -29,7 +29,7 @@ class SelectionConfigEntryInput(BaseModel): SelectionConfigEntryInput """ type: SelectionInputType = Field(...) - task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic. ") + task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") score: Optional[constr(strict=True, min_length=1)] = Field(None, description="Type of active learning score") key: Optional[constr(strict=True, min_length=1)] = None name: Optional[SelectionInputPredictionsName] = None diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_input.py b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_input.py index aac4c59da..539a0e7be 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_input.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_v3_entry_input.py @@ -29,7 +29,7 @@ class SelectionConfigV3EntryInput(BaseModel): SelectionConfigV3EntryInput """ type: SelectionInputType = Field(...) - task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic. ") + task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") score: Optional[constr(strict=True, min_length=1)] = Field(None, description="Type of active learning score") key: Optional[constr(strict=True, min_length=1)] = None name: Optional[SelectionInputPredictionsName] = None diff --git a/lightly/openapi_generated/swagger_client/models/selection_config_v4_entry_input.py b/lightly/openapi_generated/swagger_client/models/selection_config_v4_entry_input.py index 73e239a05..e7770d3f9 100644 --- a/lightly/openapi_generated/swagger_client/models/selection_config_v4_entry_input.py +++ b/lightly/openapi_generated/swagger_client/models/selection_config_v4_entry_input.py @@ -29,7 +29,7 @@ class SelectionConfigV4EntryInput(BaseModel): SelectionConfigV4EntryInput """ type: SelectionInputType = Field(...) - task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic. ") + task: Optional[constr(strict=True)] = Field(None, description="Since we sometimes stitch together SelectionInputTask+ActiveLearningScoreType, they need to follow the same specs of ActiveLearningScoreType. However, this can be an empty string due to internal logic (no minLength). Also v2config.filespecs.ts has this pattern for predictionTaskJSONSchema as well. ") score: Optional[constr(strict=True, min_length=1)] = Field(None, description="Type of active learning score") key: Optional[constr(strict=True, min_length=1)] = None name: Optional[SelectionInputPredictionsName] = None diff --git a/lightly/openapi_generated/swagger_client/models/task_annotation_savings.py b/lightly/openapi_generated/swagger_client/models/task_annotation_savings.py new file mode 100644 index 000000000..f0f1f6073 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/task_annotation_savings.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + +from typing import Union +from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt + +class TaskAnnotationSavings(BaseModel): + """ + TaskAnnotationSavings + """ + cost_savings: Union[StrictFloat, StrictInt] = Field(..., alias="costSavings") + co2_savings: Union[StrictFloat, StrictInt] = Field(..., alias="co2Savings") + __properties = ["costSavings", "co2Savings"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> TaskAnnotationSavings: + """Create an instance of TaskAnnotationSavings from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> TaskAnnotationSavings: + """Create an instance of TaskAnnotationSavings from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return TaskAnnotationSavings.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in TaskAnnotationSavings) in the input: " + str(obj)) + + _obj = TaskAnnotationSavings.parse_obj({ + "cost_savings": obj.get("costSavings"), + "co2_savings": obj.get("co2Savings") + }) + return _obj + diff --git a/lightly/openapi_generated/swagger_client/models/worker_information.py b/lightly/openapi_generated/swagger_client/models/worker_information.py new file mode 100644 index 000000000..a4130b5c8 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/worker_information.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Contact: support@lightly.ai + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + + + +from pydantic import Extra, BaseModel, Field, StrictStr + +class WorkerInformation(BaseModel): + """ + WorkerInformation + """ + build_time: StrictStr = Field(..., alias="buildTime") + build_version: StrictStr = Field(..., alias="buildVersion") + __properties = ["buildTime", "buildVersion"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + use_enum_values = True + extra = Extra.forbid + + def to_str(self, by_alias: bool = False) -> str: + """Returns the string representation of the model""" + return pprint.pformat(self.dict(by_alias=by_alias)) + + def to_json(self, by_alias: bool = False) -> str: + """Returns the JSON representation of the model""" + return json.dumps(self.to_dict(by_alias=by_alias)) + + @classmethod + def from_json(cls, json_str: str) -> WorkerInformation: + """Create an instance of WorkerInformation from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self, by_alias: bool = False): + """Returns the dictionary representation of the model""" + _dict = self.dict(by_alias=by_alias, + exclude={ + }, + exclude_none=True) + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> WorkerInformation: + """Create an instance of WorkerInformation from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return WorkerInformation.parse_obj(obj) + + # raise errors for additional fields in the input + for _key in obj.keys(): + if _key not in cls.__properties: + raise ValueError("Error due to additional fields (not defined in WorkerInformation) in the input: " + str(obj)) + + _obj = WorkerInformation.parse_obj({ + "build_time": obj.get("buildTime"), + "build_version": obj.get("buildVersion") + }) + return _obj +