diff --git a/evalai/submissions.py b/evalai/submissions.py index d1ddd8ae..ffe6bc2c 100644 --- a/evalai/submissions.py +++ b/evalai/submissions.py @@ -20,6 +20,7 @@ display_submission_details, display_submission_result, convert_bytes_to, + get_submission_meta_attributes, ) from evalai.utils.urls import URLS from evalai.utils.config import ( @@ -126,6 +127,147 @@ def push(image, phase, url, public, private): max_docker_image_size = response.get("max_docker_image_size") docker_image_size = docker_image.__dict__.get("attrs").get("VirtualSize") + # Prompt for submission details + if click.confirm("Do you want to include the Submission Details?"): + submission_metadata["method_name"] = click.prompt( + style("Method Name", fg="yellow"), type=str, default="" + ) + submission_metadata["method_description"] = click.prompt( + style("Method Description", fg="yellow"), + type=str, + default="", + ) + submission_metadata["project_url"] = click.prompt( + style("Project URL", fg="yellow"), type=str, default="" + ) + submission_metadata["publication_url"] = click.prompt( + style("Publication URL", fg="yellow"), type=str, default="" + ) + + submission_meta_attributes = get_submission_meta_attributes( + challenge_pk, phase_pk + ) + + submission_attribute_metadata = [] + + if (submission_meta_attributes and len(submission_meta_attributes) > 0): + if click.confirm( + "Do you want to include the Submission Metadata?" + ): + for attribute in submission_meta_attributes: + attribute_type = attribute["type"] + attribute_name = attribute["name"] + attribute_description = attribute["description"] + attribute_required = attribute.get("required") + attribute_data = { + 'name': attribute_name, + 'type': attribute_type, + 'description': attribute_description, + 'required': attribute_required, + } + if attribute_required: + attribute_name = attribute_name + '*' + value = None + message = "{} ({})".format( + attribute_name, attribute_description + ) + if attribute_type == "text": + while True: + value = click.prompt( + style(message, fg="yellow"), + type=str, + default="", + ) + if not attribute_required or value != "": + break + echo( + "Error: {} is a required field".format( + attribute["name"] + ) + ) + attribute_data['value'] = value + if attribute_type == "boolean": + while True: + value = click.prompt( + style(message, fg="yellow"), type=bool, default="" + ) + if not attribute_required or value != "": + break + echo( + "Error: {} is a required field".format( + attribute["name"] + ) + ) + attribute_data['value'] = value + if attribute_type == "radio": + while True: + value = click.prompt( + style( + "{}:\nChoices:{}".format( + message, attribute["options"] + ), + fg="yellow", + ), + type=click.Choice(attribute["options"]), + default="" + ) + if not attribute_required or value != "": + break + echo( + "Error: {} is a required field".format( + attribute["name"] + ) + ) + attribute_data['options'] = attribute['options'] + attribute_data['value'] = value + if attribute_type == "checkbox": + option_chosen = True + while option_chosen: + value = [] + choices = click.prompt( + style( + "{}:\nChoices(separated by comma):{}".format( + message, attribute["options"] + ), + fg="yellow", + ), + type=str, + show_default=False, + default="" + ) + if choices != "": + choices = [ + choice.strip(" ") + for choice in choices.split(",") + ] + else: + choices = [] + option_chosen = False + if attribute_required and len(choices) == 0: + echo( + "Error: {} is a required field. Please select atleast one option".format( + attribute["name"] + ) + ) + option_chosen = True + for choice in choices: + if choice in attribute["options"]: + value.append(choice) + option_chosen = False + else: + echo( + "Error: Choose correct value(s) from the given options only" + ) + option_chosen = True + break + attribute_data['options'] = attribute['options'] + attribute_data['values'] = value + submission_attribute_metadata.append(attribute_data) + + # After collecting submission_attribute_metadata + if submission_attribute_metadata: + submission_metadata["submission_meta_attributes"] = submission_attribute_metadata + if docker_image_size > max_docker_image_size: max_docker_image_size = convert_bytes_to(max_docker_image_size, "gb") message = "\nError: Image is too large. The maximum image size allowed is {} GB".format( diff --git a/evalai/utils/challenges.py b/evalai/utils/challenges.py index 0abb6f34..5db668fc 100644 --- a/evalai/utils/challenges.py +++ b/evalai/utils/challenges.py @@ -442,7 +442,7 @@ def pretty_print_challenge_phase_data(phase): title = "{} {} {}".format(phase_title, challenge_id, phase_id) - cleaned_desc = BeautifulSoup(phase["description"], "lxml").text + cleaned_desc = BeautifulSoup(phase["description"], "html.parser").text description = "{}\n".format(cleaned_desc) start_date = "Start Date : {}".format( diff --git a/evalai/utils/common.py b/evalai/utils/common.py index d398f389..88fd45a2 100644 --- a/evalai/utils/common.py +++ b/evalai/utils/common.py @@ -135,7 +135,7 @@ def clean_data(data): """ Strip HTML and clean spaces """ - data = BeautifulSoup(data, "lxml").text.strip() + data = BeautifulSoup(data, "html.parser").text.strip() data = " ".join(data.split()).encode("utf-8") return data diff --git a/tests/data/challenge_response.py b/tests/data/challenge_response.py index 762840e2..0231e215 100644 --- a/tests/data/challenge_response.py +++ b/tests/data/challenge_response.py @@ -314,7 +314,43 @@ "is_public": true, "is_active": true, "codename": "test2019", - "slug": "philip-phase-2019" + "slug": "philip-phase-2019", + "submission_meta_attributes": [ + { + "name": "TextAttribute", + "type": "text", + "required": "True", + "description": "Sample" + }, + { + "name": "SingleOptionAttribute", + "type": "radio", + "options": [ + "A", + "B", + "C" + ], + "required": "True", + "description": "Sample" + }, + { + "name": "MultipleChoiceAttribute", + "type": "checkbox", + "options": [ + "alpha", + "beta", + "gamma" + ], + "required": "True", + "description": "Sample" + }, + { + "name": "TrueFalseField", + "type": "boolean", + "required": "True", + "description": "Sample" + } + ] } """ diff --git a/tests/test_submissions.py b/tests/test_submissions.py index 3443f68d..696fc266 100644 --- a/tests/test_submissions.py +++ b/tests/test_submissions.py @@ -8,6 +8,7 @@ from click.testing import CliRunner from datetime import datetime from dateutil import tz +from unittest.mock import patch from evalai.challenges import challenge from evalai.submissions import submission, push @@ -371,25 +372,57 @@ def test_make_submission_for_docker_based_challenge_teardown(): return (registry_port, image_tag) @responses.activate - def test_make_submission_for_docker_based_challenge( + def test_make_submission_for_docker_based_challenge_without_submission_metadata( self, test_make_submission_for_docker_based_challenge_setup ): registry_port, image_tag = ( test_make_submission_for_docker_based_challenge_setup ) - runner = CliRunner() - with runner.isolated_filesystem(): - result = runner.invoke( - push, - [ - image_tag, - "-p", - "philip-phase-2019", - "-u", - "localhost:{0}".format(registry_port), - ], - ) - assert result.exit_code == 0 + with patch( + 'evalai.submissions.get_submission_meta_attributes', + return_value=json.loads(challenge_response.challenge_phase_details_slug)["submission_meta_attributes"] + ): + runner = CliRunner() + with runner.isolated_filesystem(): + result = runner.invoke( + push, + [ + image_tag, + "-p", + "philip-phase-2019", + "-u", + "localhost:{0}".format(registry_port), + ], + input="N\nN\n", + ) + assert result.exit_code == 0 + + @responses.activate + def test_make_submission_for_docker_based_challenge_with_submission_metadata( + self, test_make_submission_for_docker_based_challenge_setup + ): + registry_port, image_tag = ( + test_make_submission_for_docker_based_challenge_setup + ) + with patch( + 'evalai.submissions.get_submission_meta_attributes', + return_value=json.loads(challenge_response.challenge_phase_details_slug)["submission_meta_attributes"] + ): + runner = CliRunner() + with runner.isolated_filesystem(): + result = runner.invoke( + push, + [ + image_tag, + "-p", + "philip-phase-2019", + "-u", + "localhost:{0}".format(registry_port), + "--public" + ], + input="\nY\nTest\nTest\nTest\nTest\nY\nTest\nA\nalpha\nTrue\n", + ) + assert result.exit_code == 0 @responses.activate def test_make_submission_using_presigned_url(self, request):