From b1f53cdccf9a5e591090315ec1871f5885470d31 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 2 Aug 2019 08:59:53 -0700 Subject: [PATCH 1/9] handle args --- benchmark/opperf/rules/default_params.py | 10 ++++++++-- benchmark/opperf/utils/benchmark_utils.py | 20 +++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index c864c7d829b6..ba8260e71e4c 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -25,6 +25,10 @@ """Default Input Tensor shapes to use for benchmarking""" +# For operators like concat, Elementwisesum, squeeze, stack +# argument data is passed as variable arg (*args) +DEFAULT_ARGS = [(1024, 1024)] + # For Unary operators like abs, arccos, arcsin etc.. DEFAULT_DATA = [(1024, 1024), (10000, 1), (10000, 100)] @@ -146,7 +150,8 @@ "data_4d": DEFAULT_DATA_4d, "dim1": DEFAULT_DIM_1, "dim2": DEFAULT_DIM_2, - "block_size": DEFAULT_BLOCK_SIZE} + "block_size": DEFAULT_BLOCK_SIZE, + "args": DEFAULT_ARGS} # These are names of MXNet operator parameters that is of type NDArray. @@ -157,4 +162,5 @@ PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "sample", "mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p", "low", "high", "weight", "bias", "moving_mean", "moving_var", - "weight", "weight32", "grad", "mean", "var", "mom", "n", "d", "v", "z", "g", "delta"] + "weight", "weight32", "grad", "mean", "var", "mom", "n", "d", + "v", "z", "g", "delta", "args"] diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index adf5d533ff52..ceefc550b05a 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -28,11 +28,17 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): kwargs_list = [] + args_list = [] for inp in inputs: kwargs = {} for key, value in inp.items(): - if key in PARAMS_OF_TYPE_NDARRAY: + if key in PARAMS_OF_TYPE_NDARRAY and key=='args': + args_list.append(get_mx_ndarray(ctx=ctx, in_tensor=value, + dtype=dtype, + initializer=nd.normal, + attach_grad=run_backward)) + elif key in PARAMS_OF_TYPE_NDARRAY: kwargs[key] = get_mx_ndarray(ctx=ctx, in_tensor=value, dtype=dtype, initializer=nd.normal, @@ -41,23 +47,23 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): kwargs[key] = value kwargs_list.append(kwargs) - return kwargs_list + return args_list, kwargs_list -def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list): +def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, args_list, kwargs_list): if run_backward: benchmark_helper_func = nd_forward_backward_and_profile else: benchmark_helper_func = nd_forward_and_profile # Warm up, ignore the profiler output - _, _ = benchmark_helper_func(op, warmup, **kwargs_list[0]) + _, _ = benchmark_helper_func(op, warmup, args_list, **kwargs_list[0]) # Run Benchmarks op_benchmark_result = {op.__name__: []} logging.info("Begin Benchmark - {name}".format(name=op.__name__)) for idx, kwargs in enumerate(kwargs_list): - _, profiler_output = benchmark_helper_func(op, runs, **kwargs) + _, profiler_output = benchmark_helper_func(op, runs, args_list, **kwargs) # Add inputs used for profiling this operator into result profiler_output["inputs"] = inputs[idx] @@ -98,7 +104,7 @@ def run_performance_test(ops, inputs, run_backward=True, List of dictionary of benchmark results. key -> name of the operator, Value is benchmark results. """ - kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx) + args_list, kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx) if not isinstance(ops, list): ops = [ops] @@ -106,7 +112,7 @@ def run_performance_test(ops, inputs, run_backward=True, op_benchmark_result = [] for op in ops: if hasattr(mx.nd, op.__name__): - benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list) + benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, args_list, kwargs_list) else: raise ValueError("Unknown NDArray operator provided to benchmark. - ", op.__name__) op_benchmark_result.append(benchmark_result) From 9a30e613de6fc1a3510368612a06fa6808148063 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 2 Aug 2019 22:37:56 +0000 Subject: [PATCH 2/9] intermediate, error with getting 2 values for data param for other ops --- benchmark/opperf/utils/benchmark_utils.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index ceefc550b05a..51a6935ce7d8 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -46,7 +46,7 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): else: kwargs[key] = value kwargs_list.append(kwargs) - + print(args_list, kwargs_list) return args_list, kwargs_list @@ -56,14 +56,17 @@ def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, ar else: benchmark_helper_func = nd_forward_and_profile + if not args_list: + _, _ = benchmark_helper_func(op, warmup, [], **kwargs_list[0]) + else: # Warm up, ignore the profiler output - _, _ = benchmark_helper_func(op, warmup, args_list, **kwargs_list[0]) + _, _ = benchmark_helper_func(op, warmup, args_list[0], **kwargs_list[0]) # Run Benchmarks op_benchmark_result = {op.__name__: []} logging.info("Begin Benchmark - {name}".format(name=op.__name__)) - for idx, kwargs in enumerate(kwargs_list): - _, profiler_output = benchmark_helper_func(op, runs, args_list, **kwargs) + for idx, (args,kwargs) in enumerate(zip(args_list,kwargs_list)): + _, profiler_output = benchmark_helper_func(op, runs, args, **kwargs) # Add inputs used for profiling this operator into result profiler_output["inputs"] = inputs[idx] From 71a41280017d91a55c5bfa0d23fd0e2eaab0f8b0 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 5 Aug 2019 16:39:07 -0700 Subject: [PATCH 3/9] handle args --- benchmark/opperf/utils/ndarray_utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/benchmark/opperf/utils/ndarray_utils.py b/benchmark/opperf/utils/ndarray_utils.py index 7ed2fa107066..54ab0527ea05 100644 --- a/benchmark/opperf/utils/ndarray_utils.py +++ b/benchmark/opperf/utils/ndarray_utils.py @@ -47,7 +47,10 @@ def nd_forward_backward_and_profile(op, runs, *args, **kwargs): """ for _ in range(runs): with mx.autograd.record(): - res = op(*args, **kwargs) + if(args==[]): + res = op(**kwargs) + else: + res = op(*args, **kwargs) res.backward() nd.waitall() return res @@ -76,7 +79,10 @@ def nd_forward_and_profile(op, runs, *args, **kwargs): any results from NDArray operation execution """ for _ in range(runs): - res = op(*args, **kwargs) + if(args==[]): + res = op(**kwargs) + else: + res = op(*args, **kwargs) nd.waitall() return res From 2f0a1635b601f2f12f9e4a05bba445dacb495099 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 5 Aug 2019 17:18:06 -0700 Subject: [PATCH 4/9] None type issue --- benchmark/opperf/utils/benchmark_utils.py | 21 ++++++++++++++------- benchmark/opperf/utils/ndarray_utils.py | 4 ++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 51a6935ce7d8..6bc0996b8c0c 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -46,7 +46,6 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): else: kwargs[key] = value kwargs_list.append(kwargs) - print(args_list, kwargs_list) return args_list, kwargs_list @@ -57,7 +56,7 @@ def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, ar benchmark_helper_func = nd_forward_and_profile if not args_list: - _, _ = benchmark_helper_func(op, warmup, [], **kwargs_list[0]) + _, _ = benchmark_helper_func(op, warmup, None, **kwargs_list[0]) else: # Warm up, ignore the profiler output _, _ = benchmark_helper_func(op, warmup, args_list[0], **kwargs_list[0]) @@ -65,12 +64,20 @@ def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, ar # Run Benchmarks op_benchmark_result = {op.__name__: []} logging.info("Begin Benchmark - {name}".format(name=op.__name__)) - for idx, (args,kwargs) in enumerate(zip(args_list,kwargs_list)): - _, profiler_output = benchmark_helper_func(op, runs, args, **kwargs) + if not args_list: + for idx, kwargs in enumerate(kwargs_list): + _, profiler_output = benchmark_helper_func(op, runs, None, **kwargs) + + # Add inputs used for profiling this operator into result + profiler_output["inputs"] = inputs[idx] + op_benchmark_result[op.__name__].append(profiler_output) + else: + for idx, (args,kwargs) in enumerate(zip(args_list,kwargs_list)): + _, profiler_output = benchmark_helper_func(op, runs, args, **kwargs) - # Add inputs used for profiling this operator into result - profiler_output["inputs"] = inputs[idx] - op_benchmark_result[op.__name__].append(profiler_output) + # Add inputs used for profiling this operator into result + profiler_output["inputs"] = inputs[idx] + op_benchmark_result[op.__name__].append(profiler_output) logging.info("Complete Benchmark - {name}".format(name=op.__name__)) return op_benchmark_result diff --git a/benchmark/opperf/utils/ndarray_utils.py b/benchmark/opperf/utils/ndarray_utils.py index 54ab0527ea05..ca272ef00913 100644 --- a/benchmark/opperf/utils/ndarray_utils.py +++ b/benchmark/opperf/utils/ndarray_utils.py @@ -47,7 +47,7 @@ def nd_forward_backward_and_profile(op, runs, *args, **kwargs): """ for _ in range(runs): with mx.autograd.record(): - if(args==[]): + if(args==(None,)): res = op(**kwargs) else: res = op(*args, **kwargs) @@ -79,7 +79,7 @@ def nd_forward_and_profile(op, runs, *args, **kwargs): any results from NDArray operation execution """ for _ in range(runs): - if(args==[]): + if(args==(None,)): res = op(**kwargs) else: res = op(*args, **kwargs) From 7d3725783c7644c5eb6ba1d649101dfe540658fc Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 5 Aug 2019 17:50:27 -0700 Subject: [PATCH 5/9] try --- benchmark/opperf/utils/benchmark_utils.py | 4 ++-- benchmark/opperf/utils/ndarray_utils.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 6bc0996b8c0c..16b8d488b8be 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -56,7 +56,7 @@ def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, ar benchmark_helper_func = nd_forward_and_profile if not args_list: - _, _ = benchmark_helper_func(op, warmup, None, **kwargs_list[0]) + _, _ = benchmark_helper_func(op, warmup, [], **kwargs_list[0]) else: # Warm up, ignore the profiler output _, _ = benchmark_helper_func(op, warmup, args_list[0], **kwargs_list[0]) @@ -66,7 +66,7 @@ def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, ar logging.info("Begin Benchmark - {name}".format(name=op.__name__)) if not args_list: for idx, kwargs in enumerate(kwargs_list): - _, profiler_output = benchmark_helper_func(op, runs, None, **kwargs) + _, profiler_output = benchmark_helper_func(op, runs, [], **kwargs) # Add inputs used for profiling this operator into result profiler_output["inputs"] = inputs[idx] diff --git a/benchmark/opperf/utils/ndarray_utils.py b/benchmark/opperf/utils/ndarray_utils.py index ca272ef00913..b37a46e5f0fc 100644 --- a/benchmark/opperf/utils/ndarray_utils.py +++ b/benchmark/opperf/utils/ndarray_utils.py @@ -47,7 +47,7 @@ def nd_forward_backward_and_profile(op, runs, *args, **kwargs): """ for _ in range(runs): with mx.autograd.record(): - if(args==(None,)): + if not isinstance(args[0],nd.NDArray): res = op(**kwargs) else: res = op(*args, **kwargs) @@ -79,7 +79,7 @@ def nd_forward_and_profile(op, runs, *args, **kwargs): any results from NDArray operation execution """ for _ in range(runs): - if(args==(None,)): + if not isinstance(args[0],nd.NDArray): res = op(**kwargs) else: res = op(*args, **kwargs) From 9878e9a3a3a1490c95a1d48182ab6ac0ae55cb53 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Tue, 13 Aug 2019 13:13:16 -0700 Subject: [PATCH 6/9] indent fix --- benchmark/opperf/utils/benchmark_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 16b8d488b8be..5de9c69184b2 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -35,9 +35,9 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): for key, value in inp.items(): if key in PARAMS_OF_TYPE_NDARRAY and key=='args': args_list.append(get_mx_ndarray(ctx=ctx, in_tensor=value, - dtype=dtype, - initializer=nd.normal, - attach_grad=run_backward)) + dtype=dtype, + initializer=nd.normal, + attach_grad=run_backward)) elif key in PARAMS_OF_TYPE_NDARRAY: kwargs[key] = get_mx_ndarray(ctx=ctx, in_tensor=value, dtype=dtype, From 970e34622a6bb619a932ed60185a07e02ece024a Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 16 Aug 2019 08:33:38 -0700 Subject: [PATCH 7/9] lint fix --- benchmark/opperf/rules/default_params.py | 2 +- benchmark/opperf/utils/benchmark_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index 335024df0aee..3b00a324314f 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -25,7 +25,7 @@ """Default Input Tensor shapes to use for benchmarking""" -# For operators like concat, Elementwisesum, squeeze, stack +# For operators like concat, ElementWiseSum, squeeze, stack # argument data is passed as variable arg (*args) DEFAULT_ARGS = [(1024, 1024)] diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 1432f0f846d4..adede8cfb062 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -134,7 +134,7 @@ def run_performance_test(ops, inputs, run_backward=True, op_benchmark_result = [] for op in ops: if hasattr(mx.nd, op.__name__): - benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs,args_list, kwargs_list, profiler) + benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, args_list, kwargs_list, profiler) else: raise ValueError("Unknown NDArray operator provided to benchmark. - ", op.__name__) op_benchmark_result.append(benchmark_result) From 885a2c8c503be99b171cb442d451c792c7e51700 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 16 Aug 2019 14:50:24 -0700 Subject: [PATCH 8/9] Trigger notification From 9569183fc452801fbef400edc58d24ea44862cfa Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Sat, 17 Aug 2019 11:25:51 -0700 Subject: [PATCH 9/9] Trigger notification bcoz validation/edge passed but shows pending