Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Implement all miscellaneous ops (#17511)
Browse files Browse the repository at this point in the history
* Initial commit - added first batch of misc ops

* Initial commit - added first batch of misc ops

* Added remaining misc ops, including Custom op logic

* Added more test cases, fixed lint errors

* Update documentation

* Added run_backward=True for ops supporting backwards runs

* Added issue link for bilinear UpSampling

* Added remaining misc ops, including Custom op logic

* Update documentation

* Updated alias map

* Fixed missing and incorrect alias issues

* Added remaining missing aliases

* Fixed Custom profile dump parsing and alias

* Switched to using sets for O(1) op membership checks

* Added fix for dtype issue in master
  • Loading branch information
connorgoggins authored Feb 14, 2020
1 parent d004c2b commit 8438d98
Show file tree
Hide file tree
Showing 6 changed files with 217 additions and 30 deletions.
124 changes: 124 additions & 0 deletions benchmark/opperf/nd_operations/misc_operators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Performance benchmark tests for MXNet NDArray Miscellaneous Operations.
Below 16 Miscellaneous Operators are covered:
['reset_arrays', 'multi_all_finite', 'multi_sum_sq', 'add_n', 'UpSampling', 'Custom', 'squeeze',
'all_finite', 'clip', 'multi_lars', 'SequenceReverse', 'SequenceLast', 'SequenceMask', 'cast_storage',
'cumsum', 'fill_element_0index']
"""

import mxnet as mx

from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_remaining_miscellaneous_operators

from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

from benchmark.opperf.custom_operations.custom_operations import CustomAddOneProp


def run_mx_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the miscellaneous
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Individual tests for ops with positional args
array_ops_benchmark = run_performance_test([getattr(MX_OP_MODULE, "reset_arrays"),
getattr(MX_OP_MODULE, "multi_all_finite"),
getattr(MX_OP_MODULE, "multi_sum_sq")],
run_backward=False,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args": [(1024, 1024)],
"num_arrays": 1},
{"args": [(10000, 1)],
"num_arrays": 1},
{"args": [(10000, 10)],
"num_arrays": 1}],
warmup=warmup,
runs=runs)
add_n_benchmark = run_performance_test([getattr(MX_OP_MODULE, "add_n")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args": [(1024, 1024)]},
{"args": [(10000, 1)]},
{"args": [(10000, 10)]}],
warmup=warmup,
runs=runs)
# There are currently issus with UpSampling with bilinear interpolation.
# track issue here: https://github.com/apache/incubator-mxnet/issues/9138
upsampling_benchmark = run_performance_test([getattr(MX_OP_MODULE, "UpSampling")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args": (32, 3, 256, 256),
"scale": 2,
"sample_type": "nearest"},
{"args": (32, 3, 10000, 1),
"scale": 4,
"sample_type": "nearest"}],
warmup=warmup,
runs=runs)
# Create and register CustomAddOne operator for use in Custom op testing
c = CustomAddOneProp()
c.create_operator(ctx, [(1024,1024)], [dtype])
custom_benchmark = run_performance_test([getattr(MX_OP_MODULE, "Custom")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args": [(1024, 1024)],
"op_type": "CustomAddOne"},
{"args": [(10000, 1)],
"op_type": "CustomAddOne"},
{"args": [(10000, 10)],
"op_type": "CustomAddOne"}],
warmup=warmup,
runs=runs)

# Fetch remaining Miscellaneous Operators
mx_misc_ops = get_remaining_miscellaneous_operators()
# Run benchmarks
mx_misc_op_results = run_op_benchmarks(mx_misc_ops, dtype, ctx, profiler, warmup, runs)
return merge_map_list(array_ops_benchmark + add_n_benchmark + upsampling_benchmark + custom_benchmark + [mx_misc_op_results])
4 changes: 4 additions & 0 deletions benchmark/opperf/opperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
from benchmark.opperf.nd_operations.indexing_routines import run_indexing_routines_benchmarks
from benchmark.opperf.nd_operations.nn_loss_operators import run_loss_operators_benchmarks
from benchmark.opperf.nd_operations.linalg_operators import run_linalg_operators_benchmarks
from benchmark.opperf.nd_operations.misc_operators import run_mx_misc_operators_benchmarks

from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file
from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark, \
Expand Down Expand Up @@ -114,6 +115,9 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n

# Run all NN loss operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_loss_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, warmup=warmup, runs=runs))

# Run all Miscellaneous operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_misc_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, warmup=warmup, runs=runs))

# Run all Linear Algebra operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_linalg_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, warmup=warmup, runs=runs))
Expand Down
36 changes: 34 additions & 2 deletions benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,22 @@
DEFAULT_DATA_4d = [(1, 4, 2, 4), (10, 25, 10, 100)]
DEFAULT_BLOCK_SIZE = [2, 5]

# For miscellaneous operators
DEFAULT_DATA_SQUEEZE = [(1, 1024, 1024), (32, 1, 256, 256)]
DEFAULT_AXIS_SQUEEZE = [0, 1]
DEFAULT_A_MIN = [0.1]
DEFAULT_A_MAX = [0.9]
DEFAULT_LRS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_WSS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_GSS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_WDS = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_ETA = [.5]
DEFAULT_STYPE = ['default', 'csr', 'row_sparse']
DEFAULT_A = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_LHS_FEI = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MHS = [(1024,), (10000,), (10000,)]
DEFAULT_RHS_FEI = [(1024,), (10000,), (10000,)]

# For swapaxis operator
DEFAULT_DIM_1 = [0]
DEFAULT_DIM_2 = [1]
Expand Down Expand Up @@ -236,7 +252,22 @@
"axes": DEFAULT_AXES,
"act_type_leakyrelu": DEFAULT_ACT_TYPE_LR,
"label_softmax": DEFAULT_LABEL_SOFTMAX,
"act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION}
"act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION,
"data_squeeze": DEFAULT_DATA_SQUEEZE,
"axis_squeeze": DEFAULT_AXIS_SQUEEZE,
"a_min": DEFAULT_A_MIN,
"a_max": DEFAULT_A_MAX,
"lrs": DEFAULT_LRS,
"weights_sum_sq": DEFAULT_WSS,
"grads_sum_sq": DEFAULT_GSS,
"wds": DEFAULT_WDS,
"eta": DEFAULT_ETA,
"eps": DEFAULT_EPSILON,
"stype": DEFAULT_STYPE,
"a": DEFAULT_A,
"lhs_fill_element_0index": DEFAULT_LHS_FEI,
"rhs_fill_element_0index": DEFAULT_RHS_FEI,
"mhs": DEFAULT_MHS}


# These are names of MXNet operator parameters that is of type NDArray.
Expand All @@ -250,4 +281,5 @@
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d",
"v", "z", "g", "delta", "args", "indices", "shape_like", "y",
"x", "condition", "a", "index", "raveL_data", "label", "grid",
"A", "B", "C", "r1", "r2", "rois"]
"A", "B", "C", "r1", "r2", "rois", "lrs", "wds", "weights_sum_sq",
"grads_sum_sq", "mhs"]
2 changes: 1 addition & 1 deletion benchmark/opperf/utils/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from benchmark.opperf.rules.default_params import PARAMS_OF_TYPE_NDARRAY
from .profiler_utils import cpp_profile, python_profile

no_backward = ['gather_nd', 'softmax_cross_entropy', 'linalg_gelqf', 'linalg_slogdet', 'moments']
no_backward = ['gather_nd', 'softmax_cross_entropy', 'linalg_gelqf', 'linalg_slogdet', 'moments', 'SequenceLast']

def _prepare_op_inputs(inputs, run_backward, dtype, ctx):
mx.random.seed(41)
Expand Down
Loading

0 comments on commit 8438d98

Please sign in to comment.