Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Changes to mxnet.metric #18083

Merged
merged 27 commits into from
May 14, 2020
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f07d35e
finish 5 changes
Apr 15, 2020
575f23b
move metric.py to gluon, replace mx.metric with mx.gluon.metric in py…
acphile Apr 16, 2020
8992995
fix importError
acphile Apr 16, 2020
1b8f521
replace mx.metric with mx.gluon.metric in tests/python
acphile Apr 16, 2020
2ff2e38
remove global support
acphile Apr 20, 2020
c06f363
remove macro support
acphile Apr 20, 2020
6beba21
rewrite BinaryAccuracy
acphile Apr 20, 2020
b1fc42b
extend F1 to multiclass/multilabel
acphile Apr 21, 2020
4b091b0
add tests for new F1, remove global tests
acphile Apr 21, 2020
1dfe0e0
use mxnet.numpy instead of numpy
acphile Apr 22, 2020
083e85b
Merge remote-tracking branch 'upstream/master'
acphile Apr 24, 2020
59d98b3
fix sanity
acphile Apr 25, 2020
40e87e3
rewrite ce and ppl, improve some details
acphile Apr 27, 2020
5e153e1
use mxnet.numpy.float64
acphile Apr 27, 2020
bf68c6d
remove sklearn
acphile Apr 28, 2020
56b846e
remove reset_local() and get_global in other files
acphile Apr 29, 2020
8a437e9
fix test_mlp
acphile Apr 29, 2020
b7c2b3b
replace mx.metric with mx.gluon.metric in example
acphile Apr 29, 2020
ec615a5
fix context difference
acphile Apr 29, 2020
c4a3b67
Disable -DUSE_TVM_OP on GPU builds
leezu Apr 30, 2020
0456416
Fix disable tvm op for gpu runs
leezu Apr 30, 2020
2a80a0a
resolve conflicts
acphile May 6, 2020
8163fbb
use label.ctx in metric.py; remove gluoncv dependency in test_cvnets
acphile May 7, 2020
d53e6ef
fix sanity
acphile May 7, 2020
3adfa5e
Merge branch 'master' into master
leezu May 7, 2020
a2b0ffe
fix importError
acphile May 8, 2020
ef3058a
remove nose
acphile May 9, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmark/python/sparse/sparse_end2end.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def row_sparse_pull(kv, key, data, slices, weight_array, priority):
learning_rate=0.1, rescale_grad=1.0/batch_size/num_worker)
mod.init_optimizer(optimizer=sgd, kvstore=kv)
# use accuracy as the metric
metric = mx.metric.create('acc')
metric = mx.gluon.metric.create('acc')

index = mod._exec_group.param_names.index('w')
# weight_array bound to executors of the contexts
Expand Down
49 changes: 2 additions & 47 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,6 @@ build_ubuntu_gpu_mkldnn() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CPP_PACKAGE=ON \
Expand All @@ -737,7 +736,6 @@ build_ubuntu_gpu_mkldnn_nocudnn() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=OFF \
Expand All @@ -752,7 +750,6 @@ build_ubuntu_gpu_cuda101_cudnn7() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
Expand All @@ -775,7 +772,6 @@ build_ubuntu_gpu_cuda101_cudnn7_make() {
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_TVM_OP=1 \
USE_CPP_PACKAGE=1 \
USE_DIST_KVSTORE=1 \
CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
Expand All @@ -795,7 +791,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() {
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_TVM_OP=0 \
USE_CPP_PACKAGE=1 \
USE_DIST_KVSTORE=1 \
CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
Expand All @@ -805,23 +800,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() {
make cython PYTHON=python3
}

build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() {
set -ex
cd /work/build
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_TVM_OP=OFF \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
-DUSE_MKLDNN=OFF \
-DBUILD_CYTHON_MODULES=ON \
-DUSE_DIST_KVSTORE=ON \
-G Ninja /work/mxnet
ninja
}

build_ubuntu_amalgamation() {
set -ex
# Amalgamation can not be run with -j nproc
Expand Down Expand Up @@ -852,7 +830,6 @@ build_ubuntu_gpu_cmake() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
Expand All @@ -873,7 +850,6 @@ build_ubuntu_gpu_cmake_no_rtc() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=ON \
Expand All @@ -888,27 +864,6 @@ build_ubuntu_gpu_cmake_no_rtc() {
ninja
}

build_ubuntu_gpu_cmake_no_tvm_op() {
set -ex
cd /work/build
CC=gcc-7 CXX=g++-7 cmake \
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=OFF \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DBUILD_CYTHON_MODULES=1 \
-G Ninja \
/work/mxnet

ninja
}

build_ubuntu_cpu_large_tensor() {
set -ex
cd /work/build
Expand All @@ -931,7 +886,6 @@ build_ubuntu_gpu_large_tensor() {
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=ON \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
Expand Down Expand Up @@ -989,7 +943,8 @@ cd_unittest_ubuntu() {

# Adding these here as CI doesn't test all CUDA environments
pytest example/image-classification/test_score.py
integrationtest_ubuntu_gpu_dist_kvstore
# TODO(szha): fix and reenable the hanging issue. tracked in #18098
# integrationtest_ubuntu_gpu_dist_kvstore
fi

if [[ ${mxnet_variant} = *mkl ]]; then
Expand Down
27 changes: 0 additions & 27 deletions ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -289,20 +289,6 @@ def compile_unix_full_gpu_mkldnn_cpp_test() {
}]
}

def compile_unix_full_gpu_no_tvm_op() {
return ['GPU: CUDA10.1+cuDNN7 TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
}
}
}
}]
}

def compile_unix_cmake_gpu() {
return ['GPU: CMake': {
node(NODE_LINUX_CPU) {
Expand All @@ -317,19 +303,6 @@ def compile_unix_cmake_gpu() {
}]
}

def compile_unix_cmake_gpu_no_tvm_op() {
return ['GPU: CMake TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-cmake-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cmake_no_tvm_op', false)
}
}
}
}]
}

def compile_unix_cmake_gpu_no_rtc() {
return ['GPU: CMake CUDA RTC OFF': {
node(NODE_LINUX_CPU) {
Expand Down
2 changes: 0 additions & 2 deletions ci/jenkins/Jenkinsfile_unix_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ core_logic: {
custom_steps.compile_unix_cmake_gpu(),
custom_steps.compile_unix_tensorrt_gpu(),
custom_steps.compile_unix_int64_gpu(),
custom_steps.compile_unix_full_gpu_no_tvm_op(),
custom_steps.compile_unix_cmake_gpu_no_tvm_op(),
custom_steps.compile_unix_cmake_gpu_no_rtc(),
custom_steps.compile_unix_full_gpu_mkldnn_cpp_test()
])
Expand Down
6 changes: 3 additions & 3 deletions example/adversary/adversary_generation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@
"epoch = 3\n",
"for e in range(epoch):\n",
" train_loss = 0.\n",
" acc = mx.metric.Accuracy()\n",
" acc = mx.gluon.metric.Accuracy()\n",
" for i, (data, label) in enumerate(train_data):\n",
" data = data.as_in_context(ctx)\n",
" label = label.as_in_context(ctx)\n",
Expand Down Expand Up @@ -223,7 +223,7 @@
" l = loss(output, label)\n",
"l.backward()\n",
"\n",
"acc = mx.metric.Accuracy()\n",
"acc = mx.gluon.metric.Accuracy()\n",
"acc.update(label, output)\n",
"\n",
"print(\"Validation batch accuracy {}\".format(acc.get()[1]))"
Expand Down Expand Up @@ -256,7 +256,7 @@
"\n",
"output = net(data_perturbated) \n",
"\n",
"acc = mx.metric.Accuracy()\n",
"acc = mx.gluon.metric.Accuracy()\n",
"acc.update(label, output)\n",
"\n",
"print(\"Validation batch accuracy after perturbation {}\".format(acc.get()[1]))"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@
],
"source": [
"# calculate the ELBO which is minus the loss for test set\n",
"metric = mx.metric.Loss()\n",
"metric = mx.gluon.metric.Loss()\n",
"model.score(nd_iter_test, metric)"
]
},
Expand Down
2 changes: 1 addition & 1 deletion example/caffe/caffe_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,6 @@ def parse_args():

# train
if use_caffe_loss:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data), mx.metric.Caffe())
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data), mx.gluon.metric.Caffe())
else:
train_model.fit(args, net, get_iterator(data_shape, use_caffe_data))
2 changes: 1 addition & 1 deletion example/caffe/train_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
eval_metrics = ['accuracy']
# TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
eval_metrics.append(mx.gluon.metric.create('top_k_accuracy', top_k=top_k))

if batch_end_callback is not None:
if not isinstance(batch_end_callback, list):
Expand Down
2 changes: 1 addition & 1 deletion example/capsnet/capsulenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def to4d(img):
return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255


class LossMetric(mx.metric.EvalMetric):
class LossMetric(mx.gluon.metric.EvalMetric):
"""Evaluate the loss function"""
def __init__(self, batch_size, num_gpus):
super(LossMetric, self).__init__('LossMetric')
Expand Down
2 changes: 1 addition & 1 deletion example/ctc/lstm_ocr_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def main():
module.fit(train_data=data_train,
eval_data=data_val,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
eval_metric=mx.gluon.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='sgd',
optimizer_params={'learning_rate': hp.learning_rate,
'momentum': hp.momentum,
Expand Down
4 changes: 2 additions & 2 deletions example/deep-embedded-clustering/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_metric(mx.gluon.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
Expand All @@ -188,7 +188,7 @@ def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_metric(mx.gluon.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
Expand Down
2 changes: 1 addition & 1 deletion example/deep-embedded-clustering/dec.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def cluster(self, X, y=None, update_interval=None):

def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
solver.set_metric(mx.gluon.metric.CustomMetric(ce))

label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
Expand Down
4 changes: 2 additions & 2 deletions example/distributed_training-horovod/gluon_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def conv_nets():
# Function to evaluate accuracy for a model
def evaluate(model, data_iter, context):
data_iter.reset()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
for _, batch in enumerate(data_iter):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
Expand Down Expand Up @@ -149,7 +149,7 @@ def evaluate(model, data_iter, context):

# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()

# Train model
for epoch in range(args.epochs):
Expand Down
2 changes: 1 addition & 1 deletion example/distributed_training-horovod/module_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def conv_net():
num_epoch=args.epochs) # train for at most 10 dataset passes

# Step 7: evaluate model accuracy
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()
model.score(val_iter, acc)

if hvd.rank() == 0:
Expand Down
10 changes: 5 additions & 5 deletions example/distributed_training-horovod/resnet50_imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,8 @@ def evaluate(epoch):
return

val_data.reset()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
for _, batch in enumerate(val_data):
data, label = batch_fn(batch, context)
output = net(data.astype(args.dtype, copy=False))
Expand Down Expand Up @@ -321,7 +321,7 @@ def evaluate(epoch):

# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()

# Train model
for epoch in range(args.num_epochs):
Expand Down Expand Up @@ -450,8 +450,8 @@ def train_module():

# Evaluate performance if not using synthetic data
if args.use_rec:
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1 = mx.gluon.metric.Accuracy()
acc_top5 = mx.gluon.metric.TopKAccuracy(5)
res = mod.score(val_data, [acc_top1, acc_top5])
for name, val in res:
logging.info('Epoch[%d] Rank[%d] Validation-%s=%f',
Expand Down
2 changes: 1 addition & 1 deletion example/distributed_training/cifar10_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def evaluate_accuracy(data_iterator, network):
----------
tuple of array element
"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()

# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
Expand Down
4 changes: 2 additions & 2 deletions example/distributed_training/cifar10_kvstore_hvd.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def evaluate(data_iterator, network, context):
----------
tuple of array element
"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()

# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
Expand Down Expand Up @@ -208,7 +208,7 @@ def __len__(self):
optimizer_params={'learning_rate': args.lr},
kvstore=store)

train_metric = mx.metric.Accuracy()
train_metric = mx.gluon.metric.Accuracy()

# Run as many epochs as required
for epoch in range(args.epochs):
Expand Down
2 changes: 1 addition & 1 deletion example/fcn-xs/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from collections import namedtuple
from mxnet import optimizer as opt
from mxnet.optimizer import get_updater
from mxnet import metric
from mxnet.gluon import metric

# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams', ['epoch', 'nbatch', 'eval_metric'])
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/audio/urban_sounds/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

def evaluate_accuracy(data_iterator, net):
"""Function to evaluate accuracy of any data iterator passed to it as an argument"""
acc = mx.metric.Accuracy()
acc = mx.gluon.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
Expand Down
2 changes: 1 addition & 1 deletion example/gluon/dc_gan/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def main():
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)

metric = mx.metric.Accuracy()
metric = mx.gluon.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')

Expand Down
Loading