Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Changes to mxnet.metric #18083

Merged
merged 27 commits into from
May 14, 2020
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f07d35e
finish 5 changes
Apr 15, 2020
575f23b
move metric.py to gluon, replace mx.metric with mx.gluon.metric in py…
acphile Apr 16, 2020
8992995
fix importError
acphile Apr 16, 2020
1b8f521
replace mx.metric with mx.gluon.metric in tests/python
acphile Apr 16, 2020
2ff2e38
remove global support
acphile Apr 20, 2020
c06f363
remove macro support
acphile Apr 20, 2020
6beba21
rewrite BinaryAccuracy
acphile Apr 20, 2020
b1fc42b
extend F1 to multiclass/multilabel
acphile Apr 21, 2020
4b091b0
add tests for new F1, remove global tests
acphile Apr 21, 2020
1dfe0e0
use mxnet.numpy instead of numpy
acphile Apr 22, 2020
083e85b
Merge remote-tracking branch 'upstream/master'
acphile Apr 24, 2020
59d98b3
fix sanity
acphile Apr 25, 2020
40e87e3
rewrite ce and ppl, improve some details
acphile Apr 27, 2020
5e153e1
use mxnet.numpy.float64
acphile Apr 27, 2020
bf68c6d
remove sklearn
acphile Apr 28, 2020
56b846e
remove reset_local() and get_global in other files
acphile Apr 29, 2020
8a437e9
fix test_mlp
acphile Apr 29, 2020
b7c2b3b
replace mx.metric with mx.gluon.metric in example
acphile Apr 29, 2020
ec615a5
fix context difference
acphile Apr 29, 2020
c4a3b67
Disable -DUSE_TVM_OP on GPU builds
leezu Apr 30, 2020
0456416
Fix disable tvm op for gpu runs
leezu Apr 30, 2020
2a80a0a
resolve conflicts
acphile May 6, 2020
8163fbb
use label.ctx in metric.py; remove gluoncv dependency in test_cvnets
acphile May 7, 2020
d53e6ef
fix sanity
acphile May 7, 2020
3adfa5e
Merge branch 'master' into master
leezu May 7, 2020
a2b0ffe
fix importError
acphile May 8, 2020
ef3058a
remove nose
acphile May 9, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmark/python/sparse/sparse_end2end.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def row_sparse_pull(kv, key, data, slices, weight_array, priority):
learning_rate=0.1, rescale_grad=1.0/batch_size/num_worker)
mod.init_optimizer(optimizer=sgd, kvstore=kv)
# use accuracy as the metric
metric = mx.metric.create('acc')
metric = mx.gluon.metric.create('acc')

index = mod._exec_group.param_names.index('w')
# weight_array bound to executors of the contexts
Expand Down
6 changes: 3 additions & 3 deletions example/adversary/adversary_generation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@
"epoch = 3\n",
"for e in range(epoch):\n",
" train_loss = 0.\n",
" acc = mx.metric.Accuracy()\n",
" acc = mx.gluon.metric.Accuracy()\n",
" for i, (data, label) in enumerate(train_data):\n",
" data = data.as_in_context(ctx)\n",
" label = label.as_in_context(ctx)\n",
Expand Down Expand Up @@ -223,7 +223,7 @@
" l = loss(output, label)\n",
"l.backward()\n",
"\n",
"acc = mx.metric.Accuracy()\n",
"acc = mx.gluon.metric.Accuracy()\n",
"acc.update(label, output)\n",
"\n",
"print(\"Validation batch accuracy {}\".format(acc.get()[1]))"
Expand Down Expand Up @@ -256,7 +256,7 @@
"\n",
"output = net(data_perturbated) \n",
"\n",
"acc = mx.metric.Accuracy()\n",
"acc = mx.gluon.metric.Accuracy()\n",
"acc.update(label, output)\n",
"\n",
"print(\"Validation batch accuracy after perturbation {}\".format(acc.get()[1]))"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@
],
"source": [
"# calculate the ELBO which is minus the loss for test set\n",
"metric = mx.metric.Loss()\n",
"metric = mx.gluon.metric.Loss()\n",
"model.score(nd_iter_test, metric)"
]
},
Expand Down
2 changes: 1 addition & 1 deletion example/caffe/caffe_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,6 @@ def parse_args():

# train
if use_caffe_loss:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data), mx.metric.Caffe())
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data), mx.gluon.metric.Caffe())
else:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data))
2 changes: 1 addition & 1 deletion example/caffe/train_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
eval_metrics = ['accuracy']
# TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
eval_metrics.append(mx.gluon.metric.create('top_k_accuracy', top_k=top_k))

if batch_end_callback is not None:
if not isinstance(batch_end_callback, list):
Expand Down
2 changes: 1 addition & 1 deletion example/capsnet/capsulenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def to4d(img):
return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255


class LossMetric(mx.metric.EvalMetric):
class LossMetric(mx.gluon.metric.EvalMetric):
"""Evaluate the loss function"""
def __init__(self, batch_size, num_gpus):
super(LossMetric, self).__init__('LossMetric')
Expand Down
2 changes: 1 addition & 1 deletion example/ctc/lstm_ocr_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def main():
module.fit(train_data=data_train,
eval_data=data_val,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
eval_metric=mx.gluon.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='sgd',
optimizer_params={'learning_rate': hp.learning_rate,
'momentum': hp.momentum,
Expand Down
4 changes: 2 additions & 2 deletions example/deep-embedded-clustering/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_metric(mx.gluon.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
Expand All @@ -188,7 +188,7 @@ def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_metric(mx.gluon.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
Expand Down
2 changes: 1 addition & 1 deletion example/deep-embedded-clustering/dec.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def cluster(self, X, y=None, update_interval=None):

def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
solver.set_metric(mx.gluon.metric.CustomMetric(ce))

label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
Expand Down
4 changes: 2 additions & 2 deletions example/distributed_training-horovod/gluon_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def conv_nets():
# Function to evaluate accuracy for a model
def evaluate(model, data_iter, context):
data_iter.reset()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
for _, batch in enumerate(data_iter):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
Expand Down Expand Up @@ -149,7 +149,7 @@ def evaluate(model, data_iter, context):

# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()

# Train model
for epoch in range(args.epochs):
Expand Down
2 changes: 1 addition & 1 deletion example/distributed_training-horovod/module_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def conv_net():
num_epoch=args.epochs) # train for at most 10 dataset passes

# Step 7: evaluate model accuracy
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()
model.score(val_iter, acc)

if hvd.rank() == 0:
Expand Down
10 changes: 5 additions & 5 deletions example/distributed_training-horovod/resnet50_imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,8 @@ def evaluate(epoch):
return

val_data.reset()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
for _, batch in enumerate(val_data):
data, label = batch_fn(batch, context)
output = net(data.astype(args.dtype, copy=False))
Expand Down Expand Up @@ -321,7 +321,7 @@ def evaluate(epoch):

# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()

# Train model
for epoch in range(args.num_epochs):
Expand Down Expand Up @@ -450,8 +450,8 @@ def train_module():

# Evaluate performance if not using synthetic data
if args.use_rec:
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
res = mod.score(val_data, [acc_top1, acc_top5])
for name, val in res:
logging.info('Epoch[%d] Rank[%d] Validation-%s=%f',
Expand Down
2 changes: 1 addition & 1 deletion example/distributed_training/cifar10_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def evaluate_accuracy(data_iterator, network):
----------
tuple of array element
"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()

# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
Expand Down
4 changes: 2 additions & 2 deletions example/distributed_training/cifar10_kvstore_hvd.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def evaluate(data_iterator, network, context):
----------
tuple of array element
"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()

# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
Expand Down Expand Up @@ -208,7 +208,7 @@ def __len__(self):
optimizer_params={'learning_rate': args.lr},
kvstore=store)

train_metric = mx.metric.Accuracy()
train_metric = mx.gluon.metric.Accuracy()

# Run as many epochs as required
for epoch in range(args.epochs):
Expand Down
2 changes: 1 addition & 1 deletion example/fcn-xs/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from collections import namedtuple
from mxnet import optimizer as opt
from mxnet.optimizer import get_updater
from mxnet import metric
from mxnet.gluon import metric

# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams', ['epoch', 'nbatch', 'eval_metric'])
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/audio/urban_sounds/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

def evaluate_accuracy(data_iterator, net):
"""Function to evaluate accuracy of any data iterator passed to it as an argument"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/dc_gan/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def main():
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)

metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')

Expand Down
2 changes: 1 addition & 1 deletion example/gluon/image_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from mxnet.gluon.model_zoo import vision as models
from mxnet import autograd as ag
from mxnet.test_utils import get_mnist_iterator
from mxnet.metric import Accuracy, TopKAccuracy, CompositeEvalMetric
from mxnet.gluon.metric import Accuracy, TopKAccuracy, CompositeEvalMetric
import numpy as np

from data import (get_cifar10_iterator, get_imagenet_iterator,
Expand Down
4 changes: 2 additions & 2 deletions example/gluon/mnist/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def transformer(data, label):
# train

def test(ctx):
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
for data, label in val_data:
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
Expand All @@ -86,7 +86,7 @@ def train(epochs, ctx):
# Trainer is for updating parameters with gradient.
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': opt.lr, 'momentum': opt.momentum})
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()

for epoch in range(epochs):
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/sn_gan/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def facc(label, pred):
g_net.collect_params().zero_grad()
d_net.collect_params().zero_grad()
# define evaluation metric
metric = mx.metric.CustomMetric(facc)
metric = mx.gluon.metric.CustomMetric(facc)
# initialize labels
real_label = nd.ones(BATCH_SIZE, CTX)
fake_label = nd.zeros(BATCH_SIZE, CTX)
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/super_resolution/super_resolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def hybrid_forward(self, F, x):
return x

net = SuperResolutionNet(upscale_factor)
metric = mx.metric.MSE()
metric = mx.gluon.metric.MSE()

def test(ctx):
val_data.reset()
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/tree_lstm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)

# use pearson correlation and mean-square error for evaluation
metric = mx.metric.create(['pearsonr', 'mse'])
metric = mx.gluon.metric.create(['pearsonr', 'mse'])

def to_target(x):
target = np.zeros((1, num_classes))
Expand Down
4 changes: 2 additions & 2 deletions example/image-classification/common/fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def fit(args, network, data_loader, **kwargs):
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
eval_metrics.append(mx.gluon.metric.create(
'top_k_accuracy', top_k=args.top_k))

supported_loss = ['ce', 'nll_loss']
Expand All @@ -306,7 +306,7 @@ def fit(args, network, data_loader, **kwargs):
logging.warning(loss_type + ' is not an valid loss type, only cross-entropy or ' \
'negative likelihood loss is supported!')
else:
eval_metrics.append(mx.metric.create(loss_type))
eval_metrics.append(mx.gluon.metric.create(loss_type))
else:
logging.warning("The output is not softmax_output, loss argument will be skipped!")

Expand Down
4 changes: 2 additions & 2 deletions example/image-classification/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ def score(model, data_val, metrics, gpus, batch_size, rgb_mean=None, mean_img=No
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

metrics = [mx.metric.create('acc'),
mx.metric.create('top_k_accuracy', top_k = 5)]
metrics = [mx.gluon.metric.create('acc'),
mx.gluon.metric.create('top_k_accuracy', top_k = 5)]

(speed,) = score(metrics = metrics, **vars(args))
logging.info('Finished with %f images per second', speed)
Expand Down
4 changes: 2 additions & 2 deletions example/image-classification/test_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_imagenet1k_resnet(imagenet_val_5k_settings):
models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']
accs = [.77, .78]
for (m, g) in zip(models, accs):
acc = mx.metric.create('acc')
acc = mx.gluon.metric.create('acc')
(speed,) = score(model=m, data_val=imagenet_val_5k,
rgb_mean='0,0,0', metrics=acc, **kwargs)
r = acc.get()[1]
Expand All @@ -52,7 +52,7 @@ def test_imagenet1k_resnet(imagenet_val_5k_settings):

def test_imagenet1k_inception_bn(imagenet_val_5k_settings):
imagenet_val_5k, kwargs = imagenet_val_5k_settings
acc = mx.metric.create('acc')
acc = mx.gluon.metric.create('acc')
m = 'imagenet1k-inception-bn'
g = 0.75
(speed,) = score(model=m,
Expand Down
4 changes: 2 additions & 2 deletions example/kaggle-ndsb2/Train.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def encode_csv(label_csv, systole_csv, diastole_csv):
wd = 0.00001,
momentum = 0.9)

systole_model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
systole_model.fit(X=data_train, eval_metric = mx.gluon.metric.np(CRPS))


# # Predict systole
Expand Down Expand Up @@ -139,7 +139,7 @@ def encode_csv(label_csv, systole_csv, diastole_csv):
wd = 0.00001,
momentum = 0.9)

diastole_model.fit(X=data_train, eval_metric = mx.metric.np(CRPS))
diastole_model.fit(X=data_train, eval_metric = mx.gluon.metric.np(CRPS))


# # Predict diastole
Expand Down
2 changes: 1 addition & 1 deletion example/model-parallel/matrix_factorization/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
'rescale_grad': 1.0/batch_size}

# use MSE as the metric
metric = mx.metric.create(['MSE'])
metric = mx.gluon.metric.create(['MSE'])

speedometer = mx.callback.Speedometer(batch_size, print_every)

Expand Down
2 changes: 1 addition & 1 deletion example/module/mnist_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
mod.init_params()

mod.init_optimizer(optimizer_params={'learning_rate':0.01, 'momentum': 0.9})
metric = mx.metric.create('acc')
metric = mx.gluon.metric.create('acc')

for i_epoch in range(n_epoch):
for i_iter, batch in enumerate(train_dataiter):
Expand Down
8 changes: 4 additions & 4 deletions example/multi-task/multi-task-learning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -267,8 +267,8 @@
"outputs": [],
"source": [
"def evaluate_accuracy(net, data_iterator):\n",
" acc_digits = mx.metric.Accuracy(name='digits')\n",
" acc_odd_even = mx.metric.Accuracy(name='odd_even')\n",
" acc_digits = mx.gluon.metric.Accuracy(name='digits')\n",
" acc_odd_even = mx.gluon.metric.Accuracy(name='odd_even')\n",
" \n",
" for i, (data, label_digit, label_odd_even) in enumerate(data_iterator):\n",
" data = data.as_in_context(ctx)\n",
Expand Down Expand Up @@ -335,8 +335,8 @@
"source": [
"for e in range(epochs):\n",
" # Accuracies for each task\n",
" acc_digits = mx.metric.Accuracy(name='digits')\n",
" acc_odd_even = mx.metric.Accuracy(name='odd_even')\n",
" acc_digits = mx.gluon.metric.Accuracy(name='digits')\n",
" acc_odd_even = mx.gluon.metric.Accuracy(name='odd_even')\n",
" # Accumulative losses\n",
" l_digits_ = 0.\n",
" l_odd_even_ = 0. \n",
Expand Down
8 changes: 4 additions & 4 deletions example/multivariate_time_series/src/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ def get_custom_metrics():
"""
:return: mxnet metric object
"""
_rse = mx.metric.create(rse)
_rae = mx.metric.create(rae)
_corr = mx.metric.create(corr)
return mx.metric.create([_rae, _rse, _corr])
_rse = mx.gluon.metric.create(rse)
_rae = mx.gluon.metric.create(rae)
_corr = mx.gluon.metric.create(corr)
return mx.gluon.metric.create([_rae, _rse, _corr])

def evaluate(pred, label):
return {"RAE":rae(label, pred), "RSE":rse(label,pred),"CORR": corr(label,pred)}
Loading