Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[Opperf] Add optimizer update operator benchmarks to opperf #15522

Merged
merged 12 commits into from
Jul 28, 2019
17 changes: 1 addition & 16 deletions benchmark/opperf/nd_operations/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,18 @@
0. LogisticRegressionOutput
1. broadcast_axes
2. ravel_multi_index
3. multi_sgd_mom_update
4. smooth_l1
5. scatter_nd
6. reshape
7. one_hot
8. linalg_potri
9. mp_sgd_update
10. multi_sgd_update
11. signum_update
12. Convolution_v1
13. repeat
14. Custom
15. softmax_cross_entropy
16. SwapAxis
17. norm
18. Softmax
19. rmspropalex_update
20. fill_element_0index
21. cast
22. UpSampling
Expand All @@ -52,20 +47,17 @@
30. Activation
31. LinearRegressionOutput
32. Pooling_v1
33. ftml_update
34. Crop
35. ElementWiseSum
36. diag
37. Reshape
38. Pad
39. linalg_gemm2
40. crop
41. rmsprop_update
43. RNN
44. argmin
45. SoftmaxOutput
46. linalg_extractdiag
47. sgd_mom_update
48. SequenceLast
49. Deconvolution
50. flip
Expand All @@ -75,13 +67,11 @@
54. linalg_trsm
55. where
56. SoftmaxActivation
57. signsgd_update
58. slice
59. linalg_gelqf
60. softmin
61. linalg_gemm
62. BilinearSampler
63. mp_sgd_mom_update
64. choose_element_0index
65. tile
66. space_to_depth
Expand All @@ -93,7 +83,6 @@
72. stack
73. topk
74. khatri_rao
75. multi_mp_sgd_update
76. linalg_sumlogdiag
77. broadcast_to
78. IdentityAttachKLSparseReg
Expand All @@ -103,7 +92,6 @@
82. uniform
83. InstanceNorm
84. expand_dims
85. multi_mp_sgd_mom_update
86. reverse
87. add_n
88. clip
Expand All @@ -119,7 +107,6 @@
98. linalg_syrk
99. squeeze
101. ROIPooling
102. ftrl_update
103. SliceChannel
104. slice_like
105. depth_to_space
Expand All @@ -138,6 +125,4 @@
119. normal
120. take
121. MakeLoss
122. sgd_update
123. adam_update
124. concat
124. concat
68 changes: 68 additions & 0 deletions benchmark/opperf/nd_operations/nn_optimizer_operators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_all_optimizer_operators

"""Performance benchmark tests for MXNet NDArray Optimizer Operators.

1. Stochastic Gradient Descent (SGD)
1.1 multi_sgd_mom_update
1.2 mp_sgd_update
1.3 multi_sgd_update
1.4 sgd_mom_update
1.5 signsgd_update
1.6 mp_sgd_mom_update
1.7 multi_mp_sgd_update
1.8 multi_mp_sgd_mom_update
1.9 sgd_update
3. signum_update
4. rmspropalex_update
5. ftml_update
6. rmsprop_update
7. ftrl_update
8. adam_update
"""


def run_optimizer_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
ChaiBapchya marked this conversation as resolved.
Show resolved Hide resolved
"""Runs benchmarks with the given context and precision (dtype) for all the neural network
optimizer operators in MXNet.

Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results

Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.

"""
# Fetch all optimizer operators
mx_optimizer_ops = get_all_optimizer_operators()

# Run benchmarks
mx_optimizer_op_results = run_op_benchmarks(mx_optimizer_ops, dtype, ctx, warmup, runs)
return mx_optimizer_op_results
23 changes: 23 additions & 0 deletions benchmark/opperf/utils/op_registry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,29 @@ def get_all_reduction_operators():
return reduction_mx_operators


def get_all_optimizer_operators():
"""Gets all Optimizer operators registered with MXNet.

Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
optimizer_ops = ['multi_sgd_mom_update', 'mp_sgd_update', 'multi_sgd_update', 'signum_update',
'rmspropalex_update', 'ftml_update', 'rmsprop_update', 'sgd_mom_update', 'signsgd_update',
'mp_sgd_mom_update', 'multi_mp_sgd_update', 'multi_mp_sgd_mom_update', 'ftrl_update', 'sgd_update',
'adam_update']

# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()

# Filter for Optimizer operators
optimizer_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in optimizer_ops and op_name not in unique_ops:
optimizer_mx_operators[op_name] = mx_operators[op_name]
return optimizer_mx_operators


def get_operators_with_no_benchmark(operators_with_benchmark):
"""Gets all MXNet operators with not benchmark.

Expand Down