Skip to content

Commit

Permalink
Merge branch 'romang/develop-doc' into azure-webapp-dev
Browse files Browse the repository at this point in the history
  • Loading branch information
himatsus committed Sep 10, 2024
2 parents 35ce577 + 4140264 commit 44cc7b0
Show file tree
Hide file tree
Showing 26 changed files with 6,303 additions and 3,694 deletions.
2 changes: 1 addition & 1 deletion Appraise/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@
# https://docs.djangoproject.com/en/1.11/howto/static-files/

STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_ROOT = os.environ.get('APPRAISE_STATIC_ROOT', os.path.join(BASE_DIR, 'static'))

# TODO: This is a temporary hack for running Appraise locally for regression
# testing and development as WhiteNoise staticfiles app does not work.
Expand Down
51 changes: 27 additions & 24 deletions Campaign/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,14 @@ def campaign_status(request, campaign_name, sort_key=2):
'item__itemID',
'item__targetID',
'item__itemType',
'item__sourceText',
'item__id',
'item__documentID',
)
# compute time override based on document times
import collections
_time_pairs = collections.defaultdict(list)
for x in _data:
_time_pairs[x[7]].append((x[0], x[1]))
_time_pairs[x[7]+ " ||| " +x[4]].append((x[0], x[1]))
_time_pairs = [
(min([x[0] for x in doc_v]), max([x[1] for x in doc_v]))
for doc, doc_v in _time_pairs.items()
Expand All @@ -127,14 +127,14 @@ def campaign_status(request, campaign_name, sort_key=2):
'item__itemID',
'item__targetID',
'item__itemType',
'item__sourceText',
'item__id',
'item__documentID',
)
# compute time override based on document times
import collections
_time_pairs = collections.defaultdict(list)
for x in _data:
_time_pairs[x[7]].append((x[0], x[1]))
_time_pairs[x[7]+ " ||| " +x[4]].append((x[0], x[1]))
_time_pairs = [
(min([x[0] for x in doc_v]), max([x[1] for x in doc_v]))
for doc, doc_v in _time_pairs.items()
Expand Down Expand Up @@ -163,41 +163,50 @@ def campaign_status(request, campaign_name, sort_key=2):
_end_times = [x[1] for x in _data]

# Compute first modified time
_first_modified = (
_first_modified_raw = (
seconds_to_timedelta(min(_start_times)) if _start_times else None
)
if _first_modified:
_date_modified = datetime(1970, 1, 1) + _first_modified
if _first_modified_raw:
_date_modified = datetime(1970, 1, 1) + _first_modified_raw
_first_modified = str(_date_modified).split('.')[0]
else:
_first_modified = 'Never'

# Compute last modified time
_last_modified = (
_last_modified_raw = (
seconds_to_timedelta(max(_end_times)) if _end_times else None
)
if _last_modified:
_date_modified = datetime(1970, 1, 1) + _last_modified
if _last_modified_raw:
_date_modified = datetime(1970, 1, 1) + _last_modified_raw
_last_modified = str(_date_modified).split('.')[0]
else:
_last_modified = 'Never'

# Compute total annotation time
if is_mqm_or_esa:
# if MQM or ESA, then let's use the manually computed times
pass
if is_mqm_or_esa and _first_modified_raw and _last_modified_raw:
# for MQM and ESA compute the lower and upper annotation times
# use only the end times
_annotation_time_upper = (_last_modified_raw-_first_modified_raw).seconds
_hours = int(floor(_annotation_time_upper / 3600))
_minutes = int(floor((_annotation_time_upper % 3600) / 60))
_annotation_time_upper = f'{_hours:0>2d}h{_minutes:0>2d}m'
else:
_time_pairs = list(zip(_start_times, _end_times))
_annotation_time_upper = None
_annotation_time = _compute_user_total_annotation_time(_time_pairs)

# Format total annotation time
if _annotation_time:
_hours = int(floor(_annotation_time / 3600))
_minutes = int(floor((_annotation_time % 3600) / 60))
_annotation_time = '{0:0>2d}h{1:0>2d}m'.format(_hours, _minutes)
_annotation_time = f'{_hours:0>2d}h{_minutes:0>2d}m'
# for MQM and ESA join it together
if is_mqm_or_esa and _annotation_time_upper:
_annotation_time = f'{_annotation_time}--{_annotation_time_upper}'
else:
_annotation_time = 'n/a'


_item = (
user.username,
user.is_active,
Expand Down Expand Up @@ -263,12 +272,10 @@ def stat_reliable_testing(_data, campaign_opts, result_type):
# Script generating batches for data assessment task does not
# keep equal itemIDs for respective TGT and BAD items, so it
# cannot be used as a key.
if "esa" in campaign_opts or "mqm" in campaign_opts:
_key = str(_x[6]) + " ||| " + _x[4]
elif result_type is DataAssessmentResult:
_key = str(_x[4])
if result_type is DataAssessmentResult:
_key = f"{_x[4]}"
else:
_key = '{0}-{1}'.format(_x[3], _x[4])
_key = f'{_x[3]}-{_x[4]}'
_dst[_key].append(_z_score)

_x = []
Expand All @@ -285,10 +292,6 @@ def stat_reliable_testing(_data, campaign_opts, result_type):
_t, pvalue = mannwhitneyu(_x, _y, alternative='less')
_reliable = pvalue

except ImportError:
print("scipy is not installed")
pass

# Possible for mannwhitneyu() to throw in some scenarios
except ValueError:
pass
Expand Down
4 changes: 2 additions & 2 deletions Dashboard/templates/Dashboard/dashboard.html
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
<div class="jumbotron">
<!--
<div class="alert alert-info" role="alert">
Accounts and tasks are live now &mdash; check <a href="https://github.com/AppraiseDev/WMT21SrcDA/">github.com/AppraiseDev/WMT21SrcDA/</a> for latest updates.
Accounts and tasks are live now.
</div>
-->

<h1>Dashboard</h1>
<h4>Evaluation campaign for shared tasks hosted at <a href="https://statmt.org/wmt23">the 8th Conference on Machine Translation</a> (WMT23)</h4>
<h4>Evaluation campaign for shared tasks hosted at <a href="https://statmt.org/wmt24">the 9th Conference on Machine Translation</a> (WMT24)</h4>

<div class="panel panel-primary" style="margin-top: 20px;">
<div class="panel-heading">
Expand Down
7 changes: 1 addition & 6 deletions Dashboard/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ def run_quality_control(username):
'item__target1ID',
'item__itemType',
'item__id',
'item__sourceText',
)
else:
_data = _data.values_list(
Expand All @@ -95,7 +94,6 @@ def run_quality_control(username):
'item__targetID',
'item__itemType',
'item__id',
'item__sourceText',
)

_annotations = len(set([x[6] for x in _data]))
Expand Down Expand Up @@ -124,10 +122,7 @@ def run_quality_control(username):

_z_score = (_x[2] - _user_mean) / _user_stdev

if "esa" in campaign_opts:
_key = f"{_x[7]} ||| {_x[4]}"
else:
_key = f'{_x[3]}-{_x[4]}'
_key = f'{_x[3]}-{_x[4]}'

_dst[_key].append(_z_score)

Expand Down
5 changes: 5 additions & 0 deletions Dashboard/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ def sso_login(request, username, password):
logout(request)

user = authenticate(username=username, password=password)

# login failed
if user is None:
return redirect('dashboard')

login(request, user)

LOGGER.info(
Expand Down
Loading

0 comments on commit 44cc7b0

Please sign in to comment.