Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add docker based submission fix #379

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
142 changes: 142 additions & 0 deletions evalai/submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
display_submission_details,
display_submission_result,
convert_bytes_to,
get_submission_meta_attributes,
)
from evalai.utils.urls import URLS
from evalai.utils.config import (
Expand Down Expand Up @@ -126,6 +127,147 @@ def push(image, phase, url, public, private):
max_docker_image_size = response.get("max_docker_image_size")

docker_image_size = docker_image.__dict__.get("attrs").get("VirtualSize")
# Prompt for submission details
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove comment

if click.confirm("Do you want to include the Submission Details?"):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this based on the submit method?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No

submission_metadata["method_name"] = click.prompt(
style("Method Name", fg="yellow"), type=str, default=""
)
submission_metadata["method_description"] = click.prompt(
style("Method Description", fg="yellow"),
type=str,
default="",
)
submission_metadata["project_url"] = click.prompt(
style("Project URL", fg="yellow"), type=str, default=""
)
submission_metadata["publication_url"] = click.prompt(
style("Publication URL", fg="yellow"), type=str, default=""
)

submission_meta_attributes = get_submission_meta_attributes(
challenge_pk, phase_pk
)

submission_attribute_metadata = []

if (submission_meta_attributes and len(submission_meta_attributes) > 0):
if click.confirm(
"Do you want to include the Submission Metadata?"
):
for attribute in submission_meta_attributes:
attribute_type = attribute["type"]
attribute_name = attribute["name"]
attribute_description = attribute["description"]
attribute_required = attribute.get("required")
attribute_data = {
'name': attribute_name,
'type': attribute_type,
'description': attribute_description,
'required': attribute_required,
}
if attribute_required:
attribute_name = attribute_name + '*'
value = None
message = "{} ({})".format(
attribute_name, attribute_description
)
if attribute_type == "text":
while True:
value = click.prompt(
style(message, fg="yellow"),
type=str,
default="",
)
if not attribute_required or value != "":
break
echo(
"Error: {} is a required field".format(
attribute["name"]
)
)
attribute_data['value'] = value
if attribute_type == "boolean":
while True:
value = click.prompt(
style(message, fg="yellow"), type=bool, default=""
)
if not attribute_required or value != "":
break
echo(
"Error: {} is a required field".format(
attribute["name"]
)
)
attribute_data['value'] = value
if attribute_type == "radio":
while True:
value = click.prompt(
style(
"{}:\nChoices:{}".format(
message, attribute["options"]
),
fg="yellow",
),
type=click.Choice(attribute["options"]),
default=""
)
if not attribute_required or value != "":
break
echo(
"Error: {} is a required field".format(
attribute["name"]
)
)
attribute_data['options'] = attribute['options']
attribute_data['value'] = value
if attribute_type == "checkbox":
option_chosen = True
while option_chosen:
value = []
choices = click.prompt(
style(
"{}:\nChoices(separated by comma):{}".format(
message, attribute["options"]
),
fg="yellow",
),
type=str,
show_default=False,
default=""
)
if choices != "":
choices = [
choice.strip(" ")
for choice in choices.split(",")
]
else:
choices = []
option_chosen = False
if attribute_required and len(choices) == 0:
echo(
"Error: {} is a required field. Please select atleast one option".format(
attribute["name"]
)
)
option_chosen = True
for choice in choices:
if choice in attribute["options"]:
value.append(choice)
option_chosen = False
else:
echo(
"Error: Choose correct value(s) from the given options only"
)
option_chosen = True
break
attribute_data['options'] = attribute['options']
attribute_data['values'] = value
submission_attribute_metadata.append(attribute_data)

# After collecting submission_attribute_metadata
if submission_attribute_metadata:
submission_metadata["submission_meta_attributes"] = submission_attribute_metadata
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is this submission metadata pushed to EvalAI?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see it on line 352. Nvm.


if docker_image_size > max_docker_image_size:
max_docker_image_size = convert_bytes_to(max_docker_image_size, "gb")
message = "\nError: Image is too large. The maximum image size allowed is {} GB".format(
Expand Down
2 changes: 1 addition & 1 deletion evalai/utils/challenges.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def pretty_print_challenge_phase_data(phase):

title = "{} {} {}".format(phase_title, challenge_id, phase_id)

cleaned_desc = BeautifulSoup(phase["description"], "lxml").text
cleaned_desc = BeautifulSoup(phase["description"], "html.parser").text
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are we making this change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When I tried to run CLI and fetch the challenge by id, it showed a parsing error, so I used this to fix it.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are you sure it doesn't break any of the old functionality?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gchhablani I think this lxml, html.parser is used to actually write fetched text in a cleaner way in terminal(CLI)
So I have tested this in my local environment by fetching challenge details etc

I don't see this breaking any functionality.

description = "{}\n".format(cleaned_desc)

start_date = "Start Date : {}".format(
Expand Down
2 changes: 1 addition & 1 deletion evalai/utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def clean_data(data):
"""
Strip HTML and clean spaces
"""
data = BeautifulSoup(data, "lxml").text.strip()
data = BeautifulSoup(data, "html.parser").text.strip()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here, why this change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This Fix Parsing error on CLI

data = " ".join(data.split()).encode("utf-8")
return data

Expand Down
38 changes: 37 additions & 1 deletion tests/data/challenge_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,43 @@
"is_public": true,
"is_active": true,
"codename": "test2019",
"slug": "philip-phase-2019"
"slug": "philip-phase-2019",
"submission_meta_attributes": [
{
"name": "TextAttribute",
"type": "text",
"required": "True",
"description": "Sample"
},
{
"name": "SingleOptionAttribute",
"type": "radio",
"options": [
"A",
"B",
"C"
],
"required": "True",
"description": "Sample"
},
{
"name": "MultipleChoiceAttribute",
"type": "checkbox",
"options": [
"alpha",
"beta",
"gamma"
],
"required": "True",
"description": "Sample"
},
{
"name": "TrueFalseField",
"type": "boolean",
"required": "True",
"description": "Sample"
}
]
}
"""

Expand Down
61 changes: 47 additions & 14 deletions tests/test_submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from click.testing import CliRunner
from datetime import datetime
from dateutil import tz
from unittest.mock import patch

from evalai.challenges import challenge
from evalai.submissions import submission, push
Expand Down Expand Up @@ -371,25 +372,57 @@ def test_make_submission_for_docker_based_challenge_teardown():
return (registry_port, image_tag)

@responses.activate
def test_make_submission_for_docker_based_challenge(
def test_make_submission_for_docker_based_challenge_without_submission_metadata(
self, test_make_submission_for_docker_based_challenge_setup
):
registry_port, image_tag = (
test_make_submission_for_docker_based_challenge_setup
)
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
push,
[
image_tag,
"-p",
"philip-phase-2019",
"-u",
"localhost:{0}".format(registry_port),
],
)
assert result.exit_code == 0
with patch(
'evalai.submissions.get_submission_meta_attributes',
return_value=json.loads(challenge_response.challenge_phase_details_slug)["submission_meta_attributes"]
):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
push,
[
image_tag,
"-p",
"philip-phase-2019",
"-u",
"localhost:{0}".format(registry_port),
],
input="N\nN\n",
)
assert result.exit_code == 0

@responses.activate
def test_make_submission_for_docker_based_challenge_with_submission_metadata(
self, test_make_submission_for_docker_based_challenge_setup
):
registry_port, image_tag = (
test_make_submission_for_docker_based_challenge_setup
)
with patch(
'evalai.submissions.get_submission_meta_attributes',
return_value=json.loads(challenge_response.challenge_phase_details_slug)["submission_meta_attributes"]
):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
push,
[
image_tag,
"-p",
"philip-phase-2019",
"-u",
"localhost:{0}".format(registry_port),
"--public"
],
input="\nY\nTest\nTest\nTest\nTest\nY\nTest\nA\nalpha\nTrue\n",
)
assert result.exit_code == 0

@responses.activate
def test_make_submission_using_presigned_url(self, request):
Expand Down