diff --git a/.lintrunner.toml b/.lintrunner.toml
index 86be8d0d0bd38..c44a66200ad1b 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -97,33 +97,6 @@ init_command = [
]
is_formatter = true
-[[linter]]
-code = 'PYLINT'
-include_patterns = [
- # TODO: Opt in to pylint by adding paths here
-]
-exclude_patterns = [
-]
-command = [
- 'python',
- '-m',
- 'lintrunner_adapters',
- 'run',
- 'pylint_linter',
- '--rcfile=pyproject.toml',
- '--',
- '@{{PATHSFILE}}'
-]
-init_command = [
- 'python',
- '-m',
- 'lintrunner_adapters',
- 'run',
- 'pip_init',
- '--dry-run={{DRYRUN}}',
- '--requirement=requirements-lintrunner.txt',
-]
-
[[linter]]
code = 'RUSTFMT'
include_patterns = ['**/*.rs']
diff --git a/VERSION_NUMBER b/VERSION_NUMBER
index 4a02d2c3170bd..c807441cfed77 100644
--- a/VERSION_NUMBER
+++ b/VERSION_NUMBER
@@ -1 +1 @@
-1.16.2
+1.16.3
diff --git a/docs/python/README.rst b/docs/python/README.rst
index bcf7c635afd82..bcc9ef2124130 100644
--- a/docs/python/README.rst
+++ b/docs/python/README.rst
@@ -8,6 +8,11 @@ For more information on ONNX Runtime, please see `aka.ms/onnxruntime `_
or the `Github project `_.
"""
-__version__ = "1.16.2"
+__version__ = "1.16.3"
__author__ = "Microsoft"
# we need to do device version validation (for example to check Cuda version for an onnxruntime-training package).
diff --git a/onnxruntime/core/framework/session_state_utils.cc b/onnxruntime/core/framework/session_state_utils.cc
index df3a7afebc176..df11fe8302aef 100644
--- a/onnxruntime/core/framework/session_state_utils.cc
+++ b/onnxruntime/core/framework/session_state_utils.cc
@@ -455,11 +455,10 @@ common::Status SaveInputOutputNamesToNodeMapping(const onnxruntime::GraphViewer&
// utils::CopyOneInputAcrossDevices is happy.
auto& input_map = session_state.GetInputNodeInfoMap();
- auto end_map = input_map.cend();
for (const auto& graph_input : graph_inputs) {
const auto& name = graph_input->Name();
- if (input_map.find(name) == end_map) {
+ if (input_map.find(name) == input_map.cend()) {
// dummy entry for an input that we didn't find a use of in the graph. log it in case that's a bug.
// utils::CopyOneInputAcrossDevices will use the input OrtValue as is given we don't believe it's used anywhere.
LOGS(session_state.Logger(), INFO) << (graph.IsSubgraph() ? "Subgraph" : "Graph") << " input with name "
diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc
index ac92d46ca87fc..6f4c144421104 100644
--- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc
+++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc
@@ -1126,6 +1126,11 @@ TensorrtExecutionProvider::~TensorrtExecutionProvider() {
}
}
+ if (external_stream_) {
+ ORT_IGNORE_RETURN_VALUE(CUBLAS_CALL(cublasDestroy(external_cublas_handle_)));
+ ORT_IGNORE_RETURN_VALUE(CUDNN_CALL(cudnnDestroy(external_cudnn_handle_)));
+ }
+
if (!external_stream_ && stream_) {
ORT_IGNORE_RETURN_VALUE(CUDA_CALL(cudaStreamDestroy(stream_)));
}
diff --git a/onnxruntime/core/session/onnxruntime_c_api.cc b/onnxruntime/core/session/onnxruntime_c_api.cc
index 70d2d0fe5d511..85dd517ccf304 100644
--- a/onnxruntime/core/session/onnxruntime_c_api.cc
+++ b/onnxruntime/core/session/onnxruntime_c_api.cc
@@ -2744,7 +2744,7 @@ static_assert(offsetof(OrtApi, GetBuildInfoString) / sizeof(void*) == 254, "Size
static_assert(offsetof(OrtApi, KernelContext_GetResource) / sizeof(void*) == 265, "Size of version 16 API cannot change");
// So that nobody forgets to finish an API version, this check will serve as a reminder:
-static_assert(std::string_view(ORT_VERSION) == "1.16.2",
+static_assert(std::string_view(ORT_VERSION) == "1.16.3",
"ORT_Version change detected, please follow below steps to ensure OrtApi is updated properly");
// 1. Update the hardcoded version string in above static_assert to silence it
// 2. If there were any APIs added to ort_api_1_to_16 above:
diff --git a/onnxruntime/python/backend/backend.py b/onnxruntime/python/backend/backend.py
index 1edae383e93e6..97b7358f2a223 100644
--- a/onnxruntime/python/backend/backend.py
+++ b/onnxruntime/python/backend/backend.py
@@ -63,7 +63,7 @@ def is_opset_supported(cls, model):
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
- " Got Domain '{}' version '{}'.".format(domain, opset.version)
+ f" Got Domain '{domain}' version '{opset.version}'."
)
return False, error_message
except AttributeError:
@@ -74,7 +74,7 @@ def is_opset_supported(cls, model):
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
- " Got Domain '{}' version '{}'.".format(domain, opset.version)
+ f" Got Domain '{domain}' version '{opset.version}'."
)
return False, error_message
return True, ""
diff --git a/onnxruntime/python/tools/onnxruntime_test.py b/onnxruntime/python/tools/onnxruntime_test.py
index c20e055d72720..5605568edaccc 100644
--- a/onnxruntime/python/tools/onnxruntime_test.py
+++ b/onnxruntime/python/tools/onnxruntime_test.py
@@ -40,7 +40,7 @@ def generate_feeds(sess, symbolic_dims: dict | None = None):
if not dim:
# unknown dim
shape.append(1)
- elif type(dim) == str:
+ elif isinstance(dim, str):
# symbolic dim. see if we have a value otherwise use 1
if dim in symbolic_dims:
shape.append(int(symbolic_dims[dim]))
diff --git a/onnxruntime/python/tools/profile_explorer/profile_explorer.py b/onnxruntime/python/tools/profile_explorer/profile_explorer.py
index 78f8805a89076..6e0747883989f 100644
--- a/onnxruntime/python/tools/profile_explorer/profile_explorer.py
+++ b/onnxruntime/python/tools/profile_explorer/profile_explorer.py
@@ -82,8 +82,8 @@ def _shape_to_string(shape):
for dict_obj in shape:
if len(dict_obj) > 1:
raise ValueError("Unhandled type in _shape_to_string()")
- key = list(dict_obj.keys())[0]
- value = list(dict_obj.values())[0]
+ key = next(iter(dict_obj.keys()))
+ value = next(iter(dict_obj.values()))
if len(res) != 0:
res += ","
res += f'{key}({"x".join(str(v) for v in value)})'
diff --git a/onnxruntime/python/tools/quantization/calibrate.py b/onnxruntime/python/tools/quantization/calibrate.py
index 26e74a6dfbac9..c8aa610d07e2a 100644
--- a/onnxruntime/python/tools/quantization/calibrate.py
+++ b/onnxruntime/python/tools/quantization/calibrate.py
@@ -363,9 +363,9 @@ def compute_data(self) -> TensorsData:
else:
min_value_array = min(merged_added_output_dict[added_output_names[i]])
max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])
- if type(min_value_array) == int or min_value_array.size > 0:
+ if isinstance(min_value_array, int) or min_value_array.size > 0:
min_value = float(min_value_array)
- if type(max_value_array) == int or max_value_array.size > 0:
+ if isinstance(max_value_array, int) or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
diff --git a/onnxruntime/python/tools/quantization/onnx_quantizer.py b/onnxruntime/python/tools/quantization/onnx_quantizer.py
index bb968d660c30c..3a66051a8e3d0 100644
--- a/onnxruntime/python/tools/quantization/onnx_quantizer.py
+++ b/onnxruntime/python/tools/quantization/onnx_quantizer.py
@@ -597,7 +597,7 @@ def _get_quantization_params(self, param_name, use_scale=None, use_zeropoint=Non
if params is None or len(params) != 2:
raise ValueError(
"Quantization parameters should contain zero point and scale. "
- "Specified values for output {}: {}".format(param_name, params)
+ f"Specified values for output {param_name}: {params}"
)
zero_point_values = [params["zero_point"]]
diff --git a/onnxruntime/python/tools/symbolic_shape_infer.py b/onnxruntime/python/tools/symbolic_shape_infer.py
index 69f8530dff39a..963ae799dadc5 100755
--- a/onnxruntime/python/tools/symbolic_shape_infer.py
+++ b/onnxruntime/python/tools/symbolic_shape_infer.py
@@ -24,7 +24,7 @@ def get_attribute(node, attr_name, default_value=None):
def get_dim_from_proto(dim):
- return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) == str else None
+ return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) is str else None # noqa: E721
def is_sequence(type_proto):
@@ -82,7 +82,7 @@ def handle_negative_axis(axis, rank):
def get_opset(mp, domain=None):
domain = domain or ["", "onnx", "ai.onnx"]
- if type(domain) != list:
+ if type(domain) != list: # noqa: E721
domain = [domain]
for opset in mp.opset_import:
if opset.domain in domain:
@@ -92,7 +92,7 @@ def get_opset(mp, domain=None):
def as_scalar(x):
- if type(x) == list:
+ if type(x) == list: # noqa: E721
assert len(x) == 1
return x[0]
elif type(x) == np.ndarray:
@@ -102,7 +102,7 @@ def as_scalar(x):
def as_list(x, keep_none):
- if type(x) == list:
+ if type(x) == list: # noqa: E721
return x
elif type(x) == np.ndarray:
return list(x)
@@ -113,7 +113,7 @@ def as_list(x, keep_none):
def sympy_reduce_product(x):
- if type(x) == list:
+ if type(x) == list: # noqa: E721
value = sympy.Integer(1)
for v in x:
value = value * v
@@ -249,7 +249,7 @@ def __init__(self, int_max, auto_merge, guess_output_rank, verbose, prefix=""):
self.prefix_ = prefix
def _add_suggested_merge(self, symbols, apply=False):
- assert all([(type(s) == str and s in self.symbolic_dims_) or is_literal(s) for s in symbols])
+ assert all([(type(s) == str and s in self.symbolic_dims_) or is_literal(s) for s in symbols]) # noqa: E721
symbols = set(symbols)
for k, v in self.suggested_merge_.items():
if k in symbols:
@@ -319,7 +319,7 @@ def _preprocess(self, in_mp):
)
def _merge_symbols(self, dims):
- if not all([type(d) == str for d in dims]):
+ if not all([type(d) == str for d in dims]): # noqa: E721
if self.auto_merge_:
unique_dims = list(set(dims))
is_int = [is_literal(d) for d in unique_dims]
@@ -402,7 +402,7 @@ def _get_shape_rank(self, node, idx):
def _get_sympy_shape(self, node, idx):
sympy_shape = []
for d in self._get_shape(node, idx):
- if type(d) == str:
+ if type(d) == str: # noqa: E721
sympy_shape.append(
self.symbolic_dims_[d]
if d in self.symbolic_dims_
@@ -428,7 +428,7 @@ def _try_get_value(self, node, idx):
def _update_computed_dims(self, new_sympy_shape):
for i, new_dim in enumerate(new_sympy_shape):
- if not is_literal(new_dim) and type(new_dim) != str:
+ if not is_literal(new_dim) and type(new_dim) != str: # noqa: E721
str_dim = str(new_dim)
if str_dim in self.suggested_merge_:
if is_literal(self.suggested_merge_[str_dim]):
@@ -556,7 +556,7 @@ def _onnx_infer_subgraph(self, node, subgraph, use_node_input=True, inc_subgraph
# for new symbolic dims from subgraph output, add to main graph symbolic dims
subgraph_shapes = [get_shape_from_value_info(o) for o in symbolic_shape_inference.out_mp_.graph.output]
subgraph_new_symbolic_dims = {
- d for s in subgraph_shapes if s for d in s if type(d) == str and d not in self.symbolic_dims_
+ d for s in subgraph_shapes if s for d in s if type(d) == str and d not in self.symbolic_dims_ # noqa: E721
}
new_dims = {}
for d in subgraph_new_symbolic_dims:
@@ -586,14 +586,14 @@ def int_or_float(value, allow_float_values):
assert len(v.shape) == 1
new_v = [int_or_float(vv, allow_float_values) for vv in v]
values[i] = new_v
- values_len = [len(v) if type(v) == list else 0 for v in values]
+ values_len = [len(v) if isinstance(v, list) else 0 for v in values]
max_len = max(values_len)
if max_len >= 1 and broadcast:
# broadcast
for i, v in enumerate(values):
if v is None:
continue # don't broadcast if value is unknown
- if type(v) == list:
+ if isinstance(v, list):
if len(v) < max_len:
values[i] = v * max_len
else:
@@ -614,7 +614,7 @@ def _compute_on_sympy_data(self, node, op_func):
values = self._get_int_or_float_values(node, broadcast=True)
if all([v is not None for v in values]):
- is_list = [type(v) == list for v in values]
+ is_list = [isinstance(v, list) for v in values]
as_list = any(is_list)
if as_list:
self.sympy_data_[node.output[0]] = [op_func(vs) for vs in zip(*values)]
@@ -871,7 +871,7 @@ def _infer_Concat(self, node): # noqa: N802
self.sympy_data_[node.output[0]] = []
for i in range(len(node.input)):
value = values[i]
- if type(value) == list:
+ if isinstance(value, list):
self.sympy_data_[node.output[0]].extend(value)
else:
self.sympy_data_[node.output[0]].append(value)
@@ -891,7 +891,7 @@ def _infer_Concat(self, node): # noqa: N802
if all([d == dims[0] for d in dims]):
continue
merged = self._merge_symbols(dims)
- if type(merged) == str:
+ if type(merged) == str: # noqa: E721
sympy_shape[d] = self.symbolic_dims_[merged] if merged else None
else:
sympy_shape[d] = merged
@@ -931,7 +931,7 @@ def _infer_ConstantOfShape(self, node): # noqa: N802
sympy_shape = self._get_int_or_float_values(node)[0]
vi = self.known_vi_[node.output[0]]
if sympy_shape is not None:
- if type(sympy_shape) != list:
+ if type(sympy_shape) != list: # noqa: E721
sympy_shape = [sympy_shape]
self._update_computed_dims(sympy_shape)
# update sympy data if output type is int, and shape is known
@@ -1002,7 +1002,7 @@ def _infer_Einsum(self, node): # noqa: N802
letter = term[-i]
if letter != 46: # letter != b'.'
dim = shape[-i]
- if letter not in letter_to_dim.keys():
+ if letter not in letter_to_dim:
letter_to_dim[letter] = dim
elif type(dim) != sympy.Symbol:
letter_to_dim[letter] = dim
@@ -1071,7 +1071,7 @@ def _infer_Gather(self, node): # noqa: N802
idx = self._try_get_value(node, 1)
if idx is not None:
data = self.sympy_data_[node.input[0]]
- if type(data) == list:
+ if type(data) == list: # noqa: E721
if type(idx) == np.ndarray and len(idx.shape) == 1:
self.sympy_data_[node.output[0]] = [data[int(i)] for i in idx]
else:
@@ -1563,12 +1563,12 @@ def _infer_Reshape(self, node): # noqa: N802
)
else:
input_sympy_shape = self._get_sympy_shape(node, 0)
- total = int(1)
+ total = 1
for d in input_sympy_shape:
total = total * d
new_sympy_shape = []
deferred_dim_idx = -1
- non_deferred_size = int(1)
+ non_deferred_size = 1
for i, d in enumerate(shape_value):
if type(d) == sympy.Symbol:
new_sympy_shape.append(d)
@@ -1874,7 +1874,7 @@ def handle_negative_index(index, bound):
and len(steps) == 1
):
input_sympy_data = self.sympy_data_[node.input[0]]
- if type(input_sympy_data) == list or (
+ if type(input_sympy_data) == list or ( # noqa: E721
type(input_sympy_data) == np.array and len(input_sympy_data.shape) == 1
):
self.sympy_data_[node.output[0]] = input_sympy_data[starts[0] : ends[0] : steps[0]]
@@ -1942,7 +1942,7 @@ def _infer_Squeeze(self, node): # noqa: N802
# For symbolic dimensions we guess they are !=1.
output_shape = [s for s in input_shape if s != 1]
if self.verbose_ > 0:
- symbolic_dimensions = [s for s in input_shape if type(s) != int]
+ symbolic_dimensions = [s for s in input_shape if type(s) != int] # noqa: E721
if len(symbolic_dimensions) > 0:
logger.debug(
f"Symbolic dimensions in input shape of op: '{node.op_type}' node: '{node.name}'. "
@@ -1955,8 +1955,8 @@ def _infer_Squeeze(self, node): # noqa: N802
if i not in axes:
output_shape.append(input_shape[i])
else:
- assert input_shape[i] == 1 or type(input_shape[i]) != int
- if self.verbose_ > 0 and type(input_shape[i]) != int:
+ assert input_shape[i] == 1 or type(input_shape[i]) != int # noqa: E721
+ if self.verbose_ > 0 and type(input_shape[i]) != int: # noqa: E721
logger.debug(
f"Symbolic dimensions in input shape of op: '{node.op_type}' node: '{node.name}'. "
f"Assuming the dimension '{input_shape[i]}' at index {i} of the input to be equal to 1."
@@ -2458,7 +2458,7 @@ def _propagate_shape_and_type(self, node, input_index=0, output_index=0):
vi.CopyFrom(helper.make_tensor_value_info(node.output[output_index], output_dtype, shape))
def _is_none_dim(self, dim_value):
- if type(dim_value) != str:
+ if type(dim_value) != str: # noqa: E721
return False
if "unk__" not in dim_value:
return False
@@ -2492,7 +2492,7 @@ def _infer_impl(self, start_sympy_data=None):
# some models use None for symbolic dim in input, replace it with a string
input_dims[i_dim].dim_param = str(self._new_symbolic_dim(i.name, i_dim))
- self.input_symbols_.update([d for d in input_shape if type(d) == str])
+ self.input_symbols_.update([d for d in input_shape if type(d) == str]) # noqa: E721
for s in self.input_symbols_:
if s in self.suggested_merge_:
diff --git a/onnxruntime/python/tools/tensorrt/perf/benchmark.py b/onnxruntime/python/tools/tensorrt/perf/benchmark.py
index d440cafb23236..9c0ae2386b918 100644
--- a/onnxruntime/python/tools/tensorrt/perf/benchmark.py
+++ b/onnxruntime/python/tools/tensorrt/perf/benchmark.py
@@ -812,7 +812,7 @@ def write_map_to_file(result, file_name):
if os.path.exists(file_name):
existed_result = read_map_from_file(file_name)
- for model, _ep_list in result.items():
+ for model in result:
if model in existed_result:
existed_result[model] = {**existed_result[model], **result[model]}
else:
@@ -1122,7 +1122,7 @@ def calculate_gain(value, ep1, ep2):
def add_improvement_information(model_to_latency):
- for _key, value in model_to_latency.items():
+ for value in model_to_latency.values():
if trt in value and cuda in value:
gain = calculate_gain(value, trt, cuda)
value[trt_cuda_gain] = f"{gain:.2f} %"
@@ -1209,13 +1209,13 @@ def add_status_dict(status_dict, model_name, ep, status):
def build_status(status_dict, results, is_fail):
if is_fail:
for model, model_info in results.items():
- for ep, _ep_info in model_info.items():
+ for ep in model_info:
model_name = model
status = "Fail"
add_status_dict(status_dict, model_name, ep, status)
else:
for model, value in results.items():
- for ep, _ep_info in value.items():
+ for ep in value:
model_name = model
status = "Pass"
add_status_dict(status_dict, model_name, ep, status)
@@ -2270,7 +2270,7 @@ def main():
logger.info(f"\nTotal models: {len(models)}")
fail_model_cnt = 0
- for key, _value in models.items():
+ for key in models:
if key in model_to_fail_ep:
fail_model_cnt += 1
logger.info(f"Fail models: {fail_model_cnt}")
diff --git a/onnxruntime/python/tools/tensorrt/perf/perf_utils.py b/onnxruntime/python/tools/tensorrt/perf/perf_utils.py
index c46cadc2c1752..c639c6c73c82b 100644
--- a/onnxruntime/python/tools/tensorrt/perf/perf_utils.py
+++ b/onnxruntime/python/tools/tensorrt/perf/perf_utils.py
@@ -279,7 +279,7 @@ def calculate_trt_latency_percentage(trt_op_map):
op_map = trt_op_map[ep]
total_time = 0
- for _key, value in op_map.items():
+ for value in op_map.values():
total_time += int(value)
if ep == "TensorrtExecutionProvider":
diff --git a/onnxruntime/python/tools/transformers/float16.py b/onnxruntime/python/tools/transformers/float16.py
index 95e7437493bc8..f680a15fc2c1b 100644
--- a/onnxruntime/python/tools/transformers/float16.py
+++ b/onnxruntime/python/tools/transformers/float16.py
@@ -403,7 +403,7 @@ def convert_float_to_float16(
queue = next_level
- for _key, value in fp32_initializers.items():
+ for value in fp32_initializers.values():
# By default, to avoid precision loss, do not convert an initializer to fp16 when it is used only by fp32 nodes.
if force_fp16_initializers or value.fp16_nodes:
value.initializer = convert_tensor_float_to_float16(value.initializer, min_positive_val, max_finite_val)
diff --git a/onnxruntime/python/tools/transformers/fusion_attention.py b/onnxruntime/python/tools/transformers/fusion_attention.py
index edaf78edb2021..c1b241aa1a5ec 100644
--- a/onnxruntime/python/tools/transformers/fusion_attention.py
+++ b/onnxruntime/python/tools/transformers/fusion_attention.py
@@ -673,8 +673,8 @@ def create_multihead_attention_node(
else:
mha_inputs.extend([q_matmul.output[0], k_matmul.output[0], v_matmul.output[0]])
elif (
- type(k_matmul) == str
- and type(v_matmul) == str
+ type(k_matmul) == str # noqa: E721
+ and type(v_matmul) == str # noqa: E721
and k_matmul in graph_input_names
and v_matmul in graph_input_names
):
diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py
index 4a2e9eb3443da..8b7579653d1b5 100644
--- a/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py
+++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/diffusion_models.py
@@ -448,7 +448,7 @@ def add_hidden_states_graph_output(self, model: ModelProto, optimized_onnx_path,
assert self.clip_skip >= 0 and self.clip_skip < hidden_layers
- node_output_name = "/text_model/encoder/layers.{}/Add_1_output_0".format(hidden_layers - 1 - self.clip_skip)
+ node_output_name = f"/text_model/encoder/layers.{hidden_layers - 1 - self.clip_skip}/Add_1_output_0"
# search the name in outputs of all node
found = False
diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_tensorrt.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_tensorrt.py
index 4a924abfb8600..61a9c0d2c8fa9 100644
--- a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_tensorrt.py
+++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder_tensorrt.py
@@ -490,7 +490,7 @@ def load_engines(
def max_device_memory(self):
max_device_memory = 0
- for _model_name, engine in self.engines.items():
+ for engine in self.engines.values():
max_device_memory = max(max_device_memory, engine.engine.device_memory_size)
return max_device_memory
diff --git a/onnxruntime/python/tools/transformers/onnx_model.py b/onnxruntime/python/tools/transformers/onnx_model.py
index 5fda3e6d84c1b..7bdbc08cf733a 100644
--- a/onnxruntime/python/tools/transformers/onnx_model.py
+++ b/onnxruntime/python/tools/transformers/onnx_model.py
@@ -675,7 +675,7 @@ def convert_float_to_float16(self, use_symbolic_shape_infer=True, **kwargs):
if vi.name in name_vi:
del name_vi[vi.name]
for vi in name_vi.values():
- model.graph.value_info.append(vi) # noqa: PERF402
+ model.graph.value_info.append(vi)
except Exception:
logger.warning(
"Failed to run symbolic shape inference. Please file an issue in https://github.com/microsoft/onnxruntime."
diff --git a/onnxruntime/python/tools/transformers/profiler.py b/onnxruntime/python/tools/transformers/profiler.py
index fc2417ea897c3..8e45b149eaf03 100644
--- a/onnxruntime/python/tools/transformers/profiler.py
+++ b/onnxruntime/python/tools/transformers/profiler.py
@@ -454,7 +454,7 @@ def group_node_results(sess_time, kernel_time_only, use_gpu):
def get_dim_from_type_proto(dim):
- return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) == str else None
+ return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) == str else None # noqa: E721
def get_shape_from_type_proto(type_proto):
@@ -573,7 +573,7 @@ def create_gpt2_inputs(onnx_model, batch_size, sequence_length, past_sequence_le
shape = get_shape_from_type_proto(graph_input.type)
for i, dim in enumerate(shape):
if isinstance(dim, str):
- if dim not in symbols.keys():
+ if dim not in symbols:
raise RuntimeError(f"symbol is not supported: {dim}")
else:
shape[i] = symbols[dim]
@@ -615,7 +615,7 @@ def create_longformer_inputs(onnx_model, batch_size, sequence_length, global_len
shape = get_shape_from_type_proto(graph_input.type)
for i, dim in enumerate(shape):
if isinstance(dim, str):
- if dim not in symbols.keys():
+ if dim not in symbols:
raise RuntimeError(f"symbol is not supported: {dim}")
else:
shape[i] = symbols[dim]
diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
index 257ce977700a6..5e746ed0c62d4 100644
--- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
+++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
@@ -1238,7 +1238,7 @@ TEST(MathOpTest, Sum_8_Test1) {
// This test runs fine on CPU Plugin
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
#else
- test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
+ test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
#endif
}
@@ -1264,7 +1264,7 @@ TEST(MathOpTest, Sum_8_Test1_double) {
// This test runs fine on CPU Plugin
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider});
#else
- test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
+ test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: Expected output shape [{3,3,3}] did not match run output shape [{3,1,1}] for sum
#endif
}
TEST(MathOpTest, Sum_8_Test2) {
diff --git a/onnxruntime/test/providers/cpu/reduction/reduction_ops_test.cc b/onnxruntime/test/providers/cpu/reduction/reduction_ops_test.cc
index 1dfaf9b10ee2c..655f2a84d6dda 100644
--- a/onnxruntime/test/providers/cpu/reduction/reduction_ops_test.cc
+++ b/onnxruntime/test/providers/cpu/reduction/reduction_ops_test.cc
@@ -1086,7 +1086,7 @@ TEST(ReductionOpTest, ReduceMax_int32) {
#if defined(OPENVINO_CONFIG_GPU_FP32) || defined(OPENVINO_CONFIG_GPU_FP16)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Disabled temporarily
#else
- test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
+ test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
#endif
}
@@ -1107,7 +1107,7 @@ TEST(ReductionOpTest, ReduceMax_int64) {
#if defined(OPENVINO_CONFIG_GPU_FP32) || defined(OPENVINO_CONFIG_GPU_FP16)
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Disabled temporarily
#else
- test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
+ test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: axis must be 0
#endif
}
diff --git a/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc b/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc
index 0e783a94c5479..b3e1025e7367c 100644
--- a/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc
+++ b/onnxruntime/test/providers/nnapi/nnapi_basic_test.cc
@@ -556,10 +556,11 @@ TEST(NnapiExecutionProviderTest, ActivationOutsideOfPartition) {
constexpr auto* model_file_name = ORT_TSTR("testdata/mnist.basic.ort");
// stop NNAPI partitioning at Relu so NNAPI EP only takes first Conv
const auto nnapi_partitioning_stop_ops = "Relu";
- TestModelLoad(model_file_name, std::make_unique(0, nnapi_partitioning_stop_ops),
- // expect one NNAPI partition
- [](const Graph& graph) { ASSERT_EQ(CountAssignedNodes(graph, kNnapiExecutionProvider), 1)
- << "Exactly one node should have been taken by the NNAPI EP"; });
+ TestModelLoad(
+ model_file_name, std::make_unique(0, nnapi_partitioning_stop_ops),
+ // expect one NNAPI partition
+ [](const Graph& graph) { ASSERT_EQ(CountAssignedNodes(graph, kNnapiExecutionProvider), 1)
+ << "Exactly one node should have been taken by the NNAPI EP"; });
}
} // namespace test
diff --git a/onnxruntime/test/python/onnx_backend_test_series.py b/onnxruntime/test/python/onnx_backend_test_series.py
index e8dc93049e18e..ecf4b001eec68 100644
--- a/onnxruntime/test/python/onnx_backend_test_series.py
+++ b/onnxruntime/test/python/onnx_backend_test_series.py
@@ -73,7 +73,7 @@ def apply_filters(filters, category):
opset_version = f"opset{onnx.defs.onnx_opset_version()}"
validated_filters = []
for f in filters[category]:
- if type(f) is list:
+ if type(f) is list: # noqa: E721
opset_regex = f[0]
filter_regex = f[1]
opset_match = re.match(opset_regex, opset_version)
diff --git a/onnxruntime/test/python/quantization/test_symmetric_flag.py b/onnxruntime/test/python/quantization/test_symmetric_flag.py
index f24daddbbcf83..701da80d543d3 100644
--- a/onnxruntime/test/python/quantization/test_symmetric_flag.py
+++ b/onnxruntime/test/python/quantization/test_symmetric_flag.py
@@ -74,10 +74,10 @@ def get_next(self):
# Extract quantization parameters: scales and zero points for activations, weights, and results
model = onnx.load("quantized-model.onnx")
- act_zp = [init for init in model.graph.initializer if init.name == "ACT_zero_point"][0].int32_data[0]
- act_sc = [init for init in model.graph.initializer if init.name == "ACT_scale"][0].float_data[0]
- wgt_zp = [init for init in model.graph.initializer if init.name == "WGT_zero_point"][0].int32_data[0]
- wgt_sc = [init for init in model.graph.initializer if init.name == "WGT_scale"][0].float_data[0]
+ act_zp = next(init for init in model.graph.initializer if init.name == "ACT_zero_point").int32_data[0]
+ act_sc = next(init for init in model.graph.initializer if init.name == "ACT_scale").float_data[0]
+ wgt_zp = next(init for init in model.graph.initializer if init.name == "WGT_zero_point").int32_data[0]
+ wgt_sc = next(init for init in model.graph.initializer if init.name == "WGT_scale").float_data[0]
# Return quantization parameters
return act_zp, act_sc, wgt_zp, wgt_sc
diff --git a/onnxruntime/test/python/transformers/test_optimizer.py b/onnxruntime/test/python/transformers/test_optimizer.py
index eedadfd8d4448..c7db636a2f11f 100644
--- a/onnxruntime/test/python/transformers/test_optimizer.py
+++ b/onnxruntime/test/python/transformers/test_optimizer.py
@@ -122,7 +122,7 @@ def _test_optimizer_on_huggingface_model(
"SkipLayerNormalization": expected_fusion_result_list[6],
}
- for _onnx_path, value in model_fusion_statistics.items():
+ for value in model_fusion_statistics.values():
actual_node_count = value
for op_type, count in expected_node_count.items():
@@ -354,7 +354,7 @@ def _test_optimizer_on_tf_model(self, model_name, expected_fusion_result_list, i
fusion_options,
)
- onnx_model = list(model_fusion_statistics.keys())[0]
+ onnx_model = next(iter(model_fusion_statistics.keys()))
fusion_result_list = list(model_fusion_statistics[onnx_model].values())
if validate_model:
diff --git a/onnxruntime/test/testdata/CNTK/gen.py b/onnxruntime/test/testdata/CNTK/gen.py
index 51ad5e781c243..37241a46808b5 100644
--- a/onnxruntime/test/testdata/CNTK/gen.py
+++ b/onnxruntime/test/testdata/CNTK/gen.py
@@ -48,10 +48,10 @@ def Save(dir, func, feed, outputs): # noqa: N802
if actual_input_name.startswith(cntk_name):
cntk_to_actual_names[cntk_name] = actual_input_name
- if type(feed) is not dict:
+ if type(feed) is not dict: # noqa: E721
feed = {func.arguments[0]: feed}
- if type(outputs) is not dict:
+ if type(outputs) is not dict: # noqa: E721
outputs = {func.outputs[0]: outputs}
test_data_dir = os.path.join(dir, data_dir)
@@ -213,7 +213,7 @@ def GenScan(): # noqa: N802
for n in out_mp.graph.node:
if n.op_type == "Scan":
- body = [attr for attr in n.attribute if attr.name == "body"][0]
+ body = next(attr for attr in n.attribute if attr.name == "body")
for vi in list(body.g.input) + list(body.g.output) + list(body.g.value_info):
dim = vi.type.tensor_type.shape.dim
dim[0].dim_param = "batch"
diff --git a/orttraining/orttraining/python/ort_trainer.py b/orttraining/orttraining/python/ort_trainer.py
index 7c90054a85dc5..5286c087cfb64 100644
--- a/orttraining/orttraining/python/ort_trainer.py
+++ b/orttraining/orttraining/python/ort_trainer.py
@@ -65,7 +65,7 @@ def generate_sample(desc, device=None):
def get_device_index(device):
- if type(device) == str:
+ if type(device) == str: # noqa: E721
# could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0
device = torch.device(device)
return 0 if device.index is None else device.index
@@ -580,10 +580,10 @@ def _load_single_checkpoint(model, checkpoint_dir, checkpoint_prefix, is_partiti
if is_partitioned:
assert_msg = (
- "Couldn't find checkpoint file {}."
+ f"Couldn't find checkpoint file {checkpoint_file}."
"Optimizer partitioning is enabled using ZeRO. Please make sure that the "
- "checkpoint file exists for rank {} of {}."
- ).format(checkpoint_file, model.world_rank, model.world_size)
+ f"checkpoint file exists for rank {model.world_rank} of {model.world_size}."
+ )
else:
assert_msg = f"Couldn't find checkpoint file {checkpoint_file}."
@@ -1005,7 +1005,7 @@ def _prepare_input_and_fetches(
self, input_desc_with_, internal_learning_rate, internal_loss_scale, *args, **kwargs
):
fetches = None
- if type(args) == tuple and len(args) == 1 and type(args[0]) == list:
+ if type(args) == tuple and len(args) == 1 and type(args[0]) == list: # noqa: E721
input = tuple(args[0])
else:
input = args
@@ -1174,7 +1174,7 @@ def eval_step(self, *args, **kwargs):
)
if len(session_run_results) == 1:
- return session_run_results[list(session_run_results.keys())[0]]
+ return session_run_results[next(iter(session_run_results.keys()))]
else:
return [session_run_results[output_desc.name_] for output_desc in output_desc]
diff --git a/orttraining/orttraining/python/training/onnxblock/model_accessor.py b/orttraining/orttraining/python/training/onnxblock/model_accessor.py
index cff435c5626c4..ac7a53a554e0a 100644
--- a/orttraining/orttraining/python/training/onnxblock/model_accessor.py
+++ b/orttraining/orttraining/python/training/onnxblock/model_accessor.py
@@ -69,7 +69,7 @@ def base(model: onnx.ModelProto):
"model from scratch."
)
- _GLOBAL_ACCESSOR = ModelAccessor(model_clone) # noqa: PLW0603
+ _GLOBAL_ACCESSOR = ModelAccessor(model_clone)
try:
yield _GLOBAL_ACCESSOR
finally:
@@ -112,7 +112,7 @@ def empty_base(opset_version: int | None = None):
)
)
- _GLOBAL_ACCESSOR = ModelAccessor(model) # noqa: PLW0603
+ _GLOBAL_ACCESSOR = ModelAccessor(model)
try:
yield _GLOBAL_ACCESSOR
finally:
@@ -144,7 +144,7 @@ def custom_op_library(custom_op_library_path: os.PathLike):
if not os.path.exists(custom_op_library_path):
raise RuntimeError(f"Custom op library path {custom_op_library_path} does not exist.")
- _GLOBAL_CUSTOM_OP_LIBRARY = copy.copy(custom_op_library_path) # noqa: PLW0603
+ _GLOBAL_CUSTOM_OP_LIBRARY = copy.copy(custom_op_library_path)
try:
yield _GLOBAL_CUSTOM_OP_LIBRARY
finally:
diff --git a/orttraining/orttraining/python/training/optim/_ds_modifier.py b/orttraining/orttraining/python/training/optim/_ds_modifier.py
index 6a52550882c25..6b1c98cc02a52 100644
--- a/orttraining/orttraining/python/training/optim/_ds_modifier.py
+++ b/orttraining/orttraining/python/training/optim/_ds_modifier.py
@@ -55,7 +55,7 @@ def can_be_modified(self):
if not get_accelerator().device_name().startswith("cuda"):
warnings.warn(
"Skip modifying optimizer as device is not supported, "
- "device name: {}".format(get_accelerator().device_name()),
+ f"device name: {get_accelerator().device_name()}",
UserWarning,
)
return False
diff --git a/orttraining/orttraining/python/training/ortmodule/_custom_op_symbolic_registry.py b/orttraining/orttraining/python/training/ortmodule/_custom_op_symbolic_registry.py
index 938bc568b6b32..d5bcf16a9ca05 100644
--- a/orttraining/orttraining/python/training/ortmodule/_custom_op_symbolic_registry.py
+++ b/orttraining/orttraining/python/training/ortmodule/_custom_op_symbolic_registry.py
@@ -156,7 +156,7 @@ def cross_entropy_loss(g, node, logits, target, weight, reduction, ignore_index,
output_type = logits_casted.type()
else:
# For higher version torch we can get node output types
- loss_output = list(node.outputs())[0]
+ loss_output = next(iter(node.outputs()))
output_type = loss_output.type()
##################################
diff --git a/orttraining/orttraining/python/training/ortmodule/_fallback.py b/orttraining/orttraining/python/training/ortmodule/_fallback.py
index 44f96dcff7fb0..56bb45d064d8a 100644
--- a/orttraining/orttraining/python/training/ortmodule/_fallback.py
+++ b/orttraining/orttraining/python/training/ortmodule/_fallback.py
@@ -175,9 +175,9 @@ def fallback(self, log_level: _logger.LogLevel, *inputs, **kwargs):
# This warning will not be raised again if retry is not enabled
self._logger.warning(
- "Fallback to PyTorch due to exception {} was triggered. "
+ f"Fallback to PyTorch due to exception {exception_type} was triggered. "
"Report this issue with a minimal repro at https://www.github.com/microsoft/onnxruntime. "
- "See details below:\n\n{}".format(exception_type, exception_string)
+ f"See details below:\n\n{exception_string}"
)
self._raised_fallback_exception = True
diff --git a/orttraining/orttraining/python/training/ortmodule/_training_manager.py b/orttraining/orttraining/python/training/ortmodule/_training_manager.py
index 3be4c05797978..e9f383abc888d 100644
--- a/orttraining/orttraining/python/training/ortmodule/_training_manager.py
+++ b/orttraining/orttraining/python/training/ortmodule/_training_manager.py
@@ -403,7 +403,7 @@ def _create_execution_agent(self):
session_options, providers, provider_options = self._get_session_config()
fw_feed_names = [input.name for input in self._onnx_models.optimized_model.graph.input]
- device_type = self._device if type(self._device) is str else self._device.type.lower()
+ device_type = self._device if type(self._device) is str else self._device.type.lower() # noqa: E721
if device_type == "ort":
fw_outputs_device_info = [C.get_ort_device(self._device.index)] * (
len(self._graph_info.user_output_names) + len(self._graph_info.frontier_node_arg_map)
diff --git a/orttraining/orttraining/python/training/orttrainer.py b/orttraining/orttraining/python/training/orttrainer.py
index a6c6c8af2723b..d5a488c436a1d 100644
--- a/orttraining/orttraining/python/training/orttrainer.py
+++ b/orttraining/orttraining/python/training/orttrainer.py
@@ -306,8 +306,8 @@ def _check_model_export(self, input):
# Mute the dropout nodes
dropout_nodes = [n for n in onnx_model_copy.graph.node if n.op_type == "Dropout"]
for node in dropout_nodes:
- ratio_node = [n for n in onnx_model_copy.graph.node if node.input[1] in n.output][0]
- training_mode_node = [n for n in onnx_model_copy.graph.node if node.input[2] in n.output][0]
+ ratio_node = next(n for n in onnx_model_copy.graph.node if node.input[1] in n.output)
+ training_mode_node = next(n for n in onnx_model_copy.graph.node if node.input[2] in n.output)
training_mode_node.attribute.pop()
ratio_node.attribute.pop()
@@ -844,7 +844,7 @@ def _init_session(self, optimizer_state_dict={}, session_options=None, provider_
def _prepare_model_input(self, inputs_desc, lr, loss_scale, *inputs, **kwargs):
# Normalize input to tuple of samples
- if type(inputs) == tuple and len(inputs) == 1 and type(inputs[0]) == list:
+ if type(inputs) == tuple and len(inputs) == 1 and type(inputs[0]) == list: # noqa: E721
input = tuple(inputs[0])
else:
input = inputs
diff --git a/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py b/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py
index e96b90138c3d5..3e2d1a7154bfd 100644
--- a/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py
+++ b/orttraining/orttraining/test/python/orttraining_run_frontend_batch_size_test.py
@@ -60,7 +60,7 @@ def run_with_config(config):
cmds.append("--transformer_layer_recompute")
# access to azure storage shared disk is much slower so we need a longer timeout.
- subprocess.run(cmds, timeout=1200).check_returncode()
+ subprocess.run(cmds, timeout=1200).check_returncode() # noqa: PLW1510
for config in configs:
diff --git a/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py b/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py
index 71d13fdcfd290..21372caaf6779 100644
--- a/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py
+++ b/orttraining/orttraining/test/python/orttraining_test_checkpoint_storage.py
@@ -79,7 +79,7 @@ def _get_dict(separated_key):
"int2": 2,
"int_list": [1, 2, 3, 5, 6],
"dict1": {"np_array": np.arange(100), "dict2": {"int3": 3, "int4": 4}, "str1": "onnxruntime"},
- "bool1": bool(True),
+ "bool1": True,
"int5": 5,
"float1": 2.345,
"np_array_float": np.array([1.234, 2.345, 3.456]),
diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py
index 0e88ce8e6c2d1..fcc1e9ff668d5 100644
--- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py
+++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py
@@ -3904,9 +3904,9 @@ def forward(self, input1, bool_argument, int_argument, float_argument):
out = self.relu(out)
return out
- assert type(bool_argument) is bool
- assert type(int_argument) is int
- assert type(float_argument) is float
+ assert type(bool_argument) is bool # noqa: E721
+ assert type(int_argument) is int # noqa: E721
+ assert type(float_argument) is float # noqa: E721
device = "cuda"
N, D_in, H, D_out = 32, 784, 500, 10 # noqa: N806
@@ -3942,8 +3942,8 @@ def forward(self, input1, bool_argument):
out = self.relu(out)
return out
- assert type(bool_arguments[0]) is bool
- assert type(bool_arguments[1]) is bool
+ assert type(bool_arguments[0]) is bool # noqa: E721
+ assert type(bool_arguments[1]) is bool # noqa: E721
device = "cuda"
N, D_in, H, D_out = 32, 784, 500, 10 # noqa: N806
@@ -5893,7 +5893,7 @@ def generate_inputs(batch_size, max_seq_length, vocab_size):
assert len([node.op_type for node in training_model.graph.node if node.op_type == "ShrunkenGather"]) == 2
else:
assert len([node.op_type for node in training_model.graph.node if node.op_type == "ShrunkenGather"]) == 1
- gathergrad_node = [node for node in training_model.graph.node if node.op_type == "PadAndUnflatten"][0]
+ gathergrad_node = next(node for node in training_model.graph.node if node.op_type == "PadAndUnflatten")
def find_input_node_type(model, arg):
result = []
diff --git a/orttraining/orttraining/test/python/utils_multiple_choice.py b/orttraining/orttraining/test/python/utils_multiple_choice.py
index f425cf3d61545..e0febaf2d6334 100644
--- a/orttraining/orttraining/test/python/utils_multiple_choice.py
+++ b/orttraining/orttraining/test/python/utils_multiple_choice.py
@@ -98,8 +98,6 @@ def __init__(
overwrite_cache=False,
mode: Split = Split.train,
):
- processor = processor
-
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}".format(
diff --git a/orttraining/tools/ci_test/run_batch_size_test.py b/orttraining/tools/ci_test/run_batch_size_test.py
index ba2be03618197..348d490678e9a 100755
--- a/orttraining/tools/ci_test/run_batch_size_test.py
+++ b/orttraining/tools/ci_test/run_batch_size_test.py
@@ -108,7 +108,7 @@ def main():
if config.enable_mixed_precision:
cmds.append("--use_mixed_precision"),
- subprocess.run(cmds, timeout=120).check_returncode()
+ subprocess.run(cmds, timeout=120).check_returncode() # noqa: PLW1510
return 0
diff --git a/orttraining/tools/ci_test/run_bert_perf_test.py b/orttraining/tools/ci_test/run_bert_perf_test.py
index fbc1403583ba0..bb15d6f5965b6 100644
--- a/orttraining/tools/ci_test/run_bert_perf_test.py
+++ b/orttraining/tools/ci_test/run_bert_perf_test.py
@@ -97,7 +97,7 @@ def main():
cmds.append("--use_mixed_precision"),
cmds.append("--allreduce_in_fp16"),
- subprocess.run(cmds).check_returncode()
+ subprocess.run(cmds).check_returncode() # noqa: PLW1510
if c.expected_perf > 0.0:
json_filename = "onnxruntime_perf_metrics_{}.onnx_bert_{}_{}_Lamb.json".format(
model, precision_prefix, c.max_seq_length
diff --git a/orttraining/tools/ci_test/run_convergence_test.py b/orttraining/tools/ci_test/run_convergence_test.py
index 58250e7f8ae8c..2ec32bca77640 100755
--- a/orttraining/tools/ci_test/run_convergence_test.py
+++ b/orttraining/tools/ci_test/run_convergence_test.py
@@ -35,7 +35,7 @@ def main():
convergence_test_output_path = os.path.join(output_dir, "convergence_test_out.csv")
# run BERT training
- subprocess.run(
+ subprocess.run( # noqa: PLW1510
[
os.path.join(args.binary_dir, "onnxruntime_training_bert"),
"--model_name",
diff --git a/orttraining/tools/ci_test/run_gpt2_perf_test.py b/orttraining/tools/ci_test/run_gpt2_perf_test.py
index e64fc3c7812e3..18e59d275b6b5 100644
--- a/orttraining/tools/ci_test/run_gpt2_perf_test.py
+++ b/orttraining/tools/ci_test/run_gpt2_perf_test.py
@@ -62,7 +62,7 @@ def main():
if c.use_mixed_precision:
cmds.append("--use_mixed_precision"),
- subprocess.run(cmds).check_returncode()
+ subprocess.run(cmds).check_returncode() # noqa: PLW1510
return 0
diff --git a/requirements-lintrunner.txt b/requirements-lintrunner.txt
index 2068040443a20..abd6c03f684a3 100644
--- a/requirements-lintrunner.txt
+++ b/requirements-lintrunner.txt
@@ -1,11 +1,9 @@
# This file is auto updated by dependabot
-lintrunner-adapters>=0.8.0
-# RUFF, RUFF-FIX
-ruff==0.0.278
+lintrunner-adapters>=0.11.0
+# RUFF
+ruff==0.1.4
# BLACK-ISORT
-black==23.7.0
+black==23.10.1
isort==5.12.0
-# PYLINT
-pylint==2.17.2
# CLANGFORMAT
-clang-format==16.0.6
+clang-format==17.0.5
diff --git a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py b/samples/python/training/orttrainer/pytorch_transformer/pt_model.py
index 07752f52d7a84..4f2e03192c6cf 100644
--- a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py
+++ b/samples/python/training/orttrainer/pytorch_transformer/pt_model.py
@@ -22,7 +22,7 @@ def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
- mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
+ mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, 0.0)
return mask
def init_weights(self):
diff --git a/tools/android_custom_build/build_custom_android_package.py b/tools/android_custom_build/build_custom_android_package.py
index aa57cf341942c..35adb41690e98 100755
--- a/tools/android_custom_build/build_custom_android_package.py
+++ b/tools/android_custom_build/build_custom_android_package.py
@@ -22,7 +22,7 @@ def is_windows():
def run(cmd_arg_list, **kwargs):
print(f"Running command:\n {shlex.join(cmd_arg_list)}")
kwargs.update({"check": True})
- return subprocess.run(cmd_arg_list, **kwargs)
+ return subprocess.run(cmd_arg_list, **kwargs) # noqa: PLW1510
def parse_args():
diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py
index 638196e73a77b..0b1cd205d8ca0 100644
--- a/tools/ci_build/build.py
+++ b/tools/ci_build/build.py
@@ -1264,8 +1264,8 @@ def generate_build_tree(
args.apple_deploy_target,
]
arg_names = [
- "--ios_sysroot " + "", # noqa: ISC003
- "--apple_deploy_target " + "", # noqa: ISC003
+ "--ios_sysroot " + "",
+ "--apple_deploy_target " + "",
]
if not all(needed_args):
raise BuildError(
@@ -1634,9 +1634,7 @@ def run_adb_shell(cmd):
# GCOV_PREFIX specifies the root directory
# for creating the runtime code coverage files.
if args.code_coverage:
- adb_shell(
- "cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}".format(device_dir, cwd.count(os.sep) + 1, cmd)
- )
+ adb_shell(f"cd {device_dir} && GCOV_PREFIX={device_dir} GCOV_PREFIX_STRIP={cwd.count(os.sep) + 1} {cmd}")
else:
adb_shell(f"cd {device_dir} && {cmd}")
@@ -1682,9 +1680,9 @@ def run_adb_shell(cmd):
)
if args.use_nnapi:
- run_adb_shell("{0}/onnx_test_runner -e nnapi {0}/test".format(device_dir))
+ run_adb_shell(f"{device_dir}/onnx_test_runner -e nnapi {device_dir}/test")
else:
- run_adb_shell("{0}/onnx_test_runner {0}/test".format(device_dir))
+ run_adb_shell(f"{device_dir}/onnx_test_runner {device_dir}/test")
# run shared_lib_test if necessary
if args.build_shared_lib:
@@ -1695,9 +1693,9 @@ def run_adb_shell(cmd):
adb_push("onnxruntime_customopregistration_test", device_dir, cwd=cwd)
adb_shell(f"chmod +x {device_dir}/onnxruntime_shared_lib_test")
adb_shell(f"chmod +x {device_dir}/onnxruntime_customopregistration_test")
- run_adb_shell("LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_shared_lib_test".format(device_dir))
+ run_adb_shell(f"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{device_dir} {device_dir}/onnxruntime_shared_lib_test")
run_adb_shell(
- "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_customopregistration_test".format(device_dir)
+ f"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{device_dir} {device_dir}/onnxruntime_customopregistration_test"
)
diff --git a/tools/ci_build/github/linux/ort_minimal/readelf_utils.py b/tools/ci_build/github/linux/ort_minimal/readelf_utils.py
index 43bc107df401b..dec070e3f5c75 100644
--- a/tools/ci_build/github/linux/ort_minimal/readelf_utils.py
+++ b/tools/ci_build/github/linux/ort_minimal/readelf_utils.py
@@ -23,7 +23,7 @@ def get_section_sizes(binary_path, readelf_path, dump_to_file=None):
"""
cmd = [readelf_path, "--sections", "--wide", binary_path]
- result = subprocess.run(cmd, stdout=subprocess.PIPE)
+ result = subprocess.run(cmd, stdout=subprocess.PIPE) # noqa: PLW1510
result.check_returncode()
output = result.stdout.decode("utf-8")
diff --git a/tools/ci_build/upload_python_package_to_azure_storage.py b/tools/ci_build/upload_python_package_to_azure_storage.py
index 365cb67381ce7..b7969f02e518e 100755
--- a/tools/ci_build/upload_python_package_to_azure_storage.py
+++ b/tools/ci_build/upload_python_package_to_azure_storage.py
@@ -55,7 +55,7 @@ def upload_whl(python_wheel_path, final_storage=False):
with open(download_path_to_html) as f:
lines = f.read().splitlines()
- new_line = '{blobname}
'.format(blobname=blob_name_plus_replaced)
+ new_line = f'{blob_name_plus_replaced}
'
if new_line not in lines:
lines.append(new_line)
lines.sort()
diff --git a/tools/python/dump_subgraphs.py b/tools/python/dump_subgraphs.py
index a1b9782374ca7..529d798d50149 100644
--- a/tools/python/dump_subgraphs.py
+++ b/tools/python/dump_subgraphs.py
@@ -19,11 +19,11 @@ def dump_subgraph(model, output_dir, level=0):
for node in graph.node:
if node.op_type == "Scan" or node.op_type == "Loop":
- body_attribute = list(filter(lambda attr: attr.name == "body", node.attribute))[0]
+ body_attribute = next(iter(filter(lambda attr: attr.name == "body", node.attribute)))
export_and_recurse(node, body_attribute, output_dir, level)
if node.op_type == "If":
- then_attribute = list(filter(lambda attr: attr.name == "then_branch", node.attribute))[0]
- else_attribute = list(filter(lambda attr: attr.name == "else_branch", node.attribute))[0]
+ then_attribute = next(iter(filter(lambda attr: attr.name == "then_branch", node.attribute)))
+ else_attribute = next(iter(filter(lambda attr: attr.name == "else_branch", node.attribute)))
export_and_recurse(node, then_attribute, output_dir, level)
export_and_recurse(node, else_attribute, output_dir, level)
diff --git a/tools/python/find_optimizer_opset_version_updates_required.py b/tools/python/find_optimizer_opset_version_updates_required.py
index 0076d27fe950e..8a5e57b51e38d 100644
--- a/tools/python/find_optimizer_opset_version_updates_required.py
+++ b/tools/python/find_optimizer_opset_version_updates_required.py
@@ -54,7 +54,7 @@ def get_call_args_from_file(filename: str, function_or_declaration: str) -> typi
# TODO: handle automatically by merging lines
log.error(
"Call/Declaration is split over multiple lines. Please check manually."
- "File:{} Line:{}".format(filename, line_num)
+ f"File:{filename} Line:{line_num}"
)
continue
diff --git a/tools/python/gen_opkernel_doc.py b/tools/python/gen_opkernel_doc.py
index 2d0d16cf9a0de..1075ed8192fdd 100644
--- a/tools/python/gen_opkernel_doc.py
+++ b/tools/python/gen_opkernel_doc.py
@@ -150,7 +150,7 @@ def main(output_path: pathlib.Path, provider_filter: [str]):
tnameindex += 1
tclist = []
for tc in sorted(tcset):
- tclist.append(tc) # noqa: PERF402
+ tclist.append(tc)
fout.write("**" + tname + "** = " + format_type_constraints(tclist))
if tnameindex < len(typemap):
fout.write("
")
diff --git a/tools/python/ort_test_dir_utils.py b/tools/python/ort_test_dir_utils.py
index 2fc4921a7bb67..cd1f5022af526 100644
--- a/tools/python/ort_test_dir_utils.py
+++ b/tools/python/ort_test_dir_utils.py
@@ -212,8 +212,8 @@ def run_test_dir(model_or_dir):
models = onnx_models + ort_models
if len(models) > 1:
raise ValueError(
- "'Multiple .onnx and/or .ort files found in {}. '"
- "'Please provide specific .onnx or .ort file as input.".format(model_dir)
+ f"'Multiple .onnx and/or .ort files found in {model_dir}. '"
+ "'Please provide specific .onnx or .ort file as input."
)
elif len(models) == 0:
raise ValueError(f"'No .onnx or .ort files found in {model_dir}.")
diff --git a/tools/python/sparsify_initializers.py b/tools/python/sparsify_initializers.py
index 8f5034c4ef5cc..f9cc8db38ecff 100644
--- a/tools/python/sparsify_initializers.py
+++ b/tools/python/sparsify_initializers.py
@@ -78,7 +78,7 @@ def convert_tensor_to_sparse(
indices.append(index)
nnz_count += 1
- sparsity = float(1.0) - float(nnz_count) / data_len
+ sparsity = 1.0 - float(nnz_count) / data_len
ind_data_type = TensorProto.INT8
ind_dtype = np.int8
@@ -126,7 +126,7 @@ def convert_tensor_to_sparse(
# int32 indices are often selected, thus we really want to guard against loosing
# rather than winning.
if tensor_data_bytes <= total_sparse_bytes:
- sparsity = float(1.0) - float(tensor_data_bytes) / total_sparse_bytes
+ sparsity = 1.0 - float(tensor_data_bytes) / total_sparse_bytes
logger.debug(f"initializer={tensor.name}, adjusted_sparsity={sparsity}")
return (object(), sparsity)
diff --git a/tools/python/util/get_azcopy.py b/tools/python/util/get_azcopy.py
index 76c75ad8c60eb..bfcf228a956eb 100644
--- a/tools/python/util/get_azcopy.py
+++ b/tools/python/util/get_azcopy.py
@@ -27,7 +27,7 @@
def _check_version(azcopy_path):
- proc = subprocess.run([azcopy_path, "--version"], stdout=subprocess.PIPE, text=True)
+ proc = subprocess.run([azcopy_path, "--version"], stdout=subprocess.PIPE, text=True) # noqa: PLW1510
match = re.search(r"\d+(?:\.\d+)+", proc.stdout)
if not match:
diff --git a/tools/python/util/ort_format_model/operator_type_usage_processors.py b/tools/python/util/ort_format_model/operator_type_usage_processors.py
index 5905000a14972..22d7dff3e13b2 100644
--- a/tools/python/util/ort_format_model/operator_type_usage_processors.py
+++ b/tools/python/util/ort_format_model/operator_type_usage_processors.py
@@ -193,7 +193,7 @@ def process_node(self, node: fbs.Node, value_name_to_typeinfo: dict):
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
- if 0 not in self._input_types.keys():
+ if 0 not in self._input_types:
# currently all standard typed registrations are for input 0.
# custom registrations can be handled by operator specific processors (e.g. OneHotProcessor below).
raise RuntimeError(f"Expected typed registration to use type from input 0. Node:{self.name}")
diff --git a/winml/lib/Api.Image/inc/ImageConversionHelpers.h b/winml/lib/Api.Image/inc/ImageConversionHelpers.h
index 8e3dca2ae11e8..5a9c8f21255b5 100644
--- a/winml/lib/Api.Image/inc/ImageConversionHelpers.h
+++ b/winml/lib/Api.Image/inc/ImageConversionHelpers.h
@@ -52,5 +52,6 @@ bool VideoFramesHaveSameDevice(const wm::IVideoFrame& video_frame_1, const wm::I
wgdx::Direct3D11::IDirect3DDevice GetDeviceFromDirect3DSurface(const wgdx::Direct3D11::IDirect3DSurface& d3dSurface);
constexpr std::array supportedWinMLFormats = {
- DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM};
+ DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM
+};
} // namespace _winml::Imaging
diff --git a/winml/lib/Api.Image/inc/TensorToVideoFrameConverter.h b/winml/lib/Api.Image/inc/TensorToVideoFrameConverter.h
index 12f676459293b..b09046471bdf6 100644
--- a/winml/lib/Api.Image/inc/TensorToVideoFrameConverter.h
+++ b/winml/lib/Api.Image/inc/TensorToVideoFrameConverter.h
@@ -40,10 +40,7 @@ class TensorToVideoFrameConverter : public ImageConverter {
private:
GUID _d3d11TextureGUID = {
- 0x14bf1054,
- 0x6ce7,
- 0x4c00,
- {0xa1, 0x32, 0xb0, 0xf2, 0x11, 0x5D, 0xE0, 0x7f}
+ 0x14bf1054, 0x6ce7, 0x4c00, {0xa1, 0x32, 0xb0, 0xf2, 0x11, 0x5D, 0xE0, 0x7f}
}; // {14BF1054-6CE7-4C00-A132-B0F2115DE07F}
GUID _handleGUID = {
0x700148fc, 0xc0cb, 0x4a7e, {0xa7, 0xc0, 0xe7, 0x43, 0xc1, 0x9, 0x9d, 0x62}
diff --git a/winml/lib/Api.Image/inc/VideoFrameToTensorConverter.h b/winml/lib/Api.Image/inc/VideoFrameToTensorConverter.h
index e34030bbd6833..3e57af42f4d09 100644
--- a/winml/lib/Api.Image/inc/VideoFrameToTensorConverter.h
+++ b/winml/lib/Api.Image/inc/VideoFrameToTensorConverter.h
@@ -50,10 +50,7 @@ class VideoFrameToTensorConverter : public ImageConverter {
private:
GUID d3d11_texture_GUID_ = {
- 0x485e4bb3,
- 0x3fe8,
- 0x497b,
- {0x85, 0x9e, 0xc7, 0x5, 0x18, 0xdb, 0x11, 0x2a}
+ 0x485e4bb3, 0x3fe8, 0x497b, {0x85, 0x9e, 0xc7, 0x5, 0x18, 0xdb, 0x11, 0x2a}
}; // {485E4BB3-3FE8-497B-859E-C70518DB112A}
GUID handle_GUID_ = {
0xce43264e, 0x41f7, 0x4882, {0x9e, 0x20, 0xfa, 0xa5, 0x1e, 0x37, 0x64, 0xfc}
diff --git a/winml/lib/Api.Ort/OnnxruntimeModel.cpp b/winml/lib/Api.Ort/OnnxruntimeModel.cpp
index fb8413a897e75..24eb44b73dd3c 100644
--- a/winml/lib/Api.Ort/OnnxruntimeModel.cpp
+++ b/winml/lib/Api.Ort/OnnxruntimeModel.cpp
@@ -81,7 +81,8 @@ HRESULT ModelInfo::RuntimeClassInitialize(_In_ OnnxruntimeEngineFactory* engine_
winml_adapter_api->ModelGetInputCount,
winml_adapter_api->ModelGetInputName,
winml_adapter_api->ModelGetInputDescription,
- winml_adapter_api->ModelGetInputTypeInfo};
+ winml_adapter_api->ModelGetInputTypeInfo
+ };
// Create inputs
std::vector inputs;
@@ -93,7 +94,8 @@ HRESULT ModelInfo::RuntimeClassInitialize(_In_ OnnxruntimeEngineFactory* engine_
winml_adapter_api->ModelGetOutputCount,
winml_adapter_api->ModelGetOutputName,
winml_adapter_api->ModelGetOutputDescription,
- winml_adapter_api->ModelGetOutputTypeInfo};
+ winml_adapter_api->ModelGetOutputTypeInfo
+ };
std::vector outputs;
RETURN_IF_FAILED(CreateFeatureDescriptors(engine_factory, &output_helpers, ort_model, outputs));
diff --git a/winml/lib/Api/impl/TensorBase.h b/winml/lib/Api/impl/TensorBase.h
index c9299a00ddaa2..00176038d574f 100644
--- a/winml/lib/Api/impl/TensorBase.h
+++ b/winml/lib/Api/impl/TensorBase.h
@@ -217,7 +217,8 @@ struct TensorBase : TBase {
}
D3D12_HEAP_PROPERTIES heapProperties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
diff --git a/winml/test/adapter/AdapterDmlEpTest.cpp b/winml/test/adapter/AdapterDmlEpTest.cpp
index 81437f9db2de3..b4220650abb9c 100644
--- a/winml/test/adapter/AdapterDmlEpTest.cpp
+++ b/winml/test/adapter/AdapterDmlEpTest.cpp
@@ -116,7 +116,8 @@ std::array tensor_values = {};
winrt::com_ptr CreateD3D12Resource(ID3D12Device& device) {
constexpr uint64_t buffer_size = tensor_size * sizeof(float);
constexpr D3D12_HEAP_PROPERTIES heap_properties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
constexpr D3D12_RESOURCE_DESC resource_desc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
@@ -365,6 +366,7 @@ const AdapterDmlEpTestApi& getapi() {
DmlCopyTensor,
CreateCustomRegistry,
ValueGetDeviceId,
- SessionGetInputRequiredDeviceId};
+ SessionGetInputRequiredDeviceId
+ };
return api;
}
diff --git a/winml/test/adapter/AdapterSessionTest.cpp b/winml/test/adapter/AdapterSessionTest.cpp
index 1b1a36004264c..8c9124b2ff4ae 100644
--- a/winml/test/adapter/AdapterSessionTest.cpp
+++ b/winml/test/adapter/AdapterSessionTest.cpp
@@ -368,7 +368,8 @@ const AdapterSessionTestAPI& getapi() {
Profiling,
CopyInputAcrossDevices,
CopyInputAcrossDevices_DML,
- GetNumberOfIntraOpThreads};
+ GetNumberOfIntraOpThreads
+ };
if (SkipGpuTests()) {
api.AppendExecutionProvider_DML = SkipTest;
diff --git a/winml/test/api/LearningModelAPITest.cpp b/winml/test/api/LearningModelAPITest.cpp
index ab45e2414854d..01ca2b8930506 100644
--- a/winml/test/api/LearningModelAPITest.cpp
+++ b/winml/test/api/LearningModelAPITest.cpp
@@ -247,9 +247,11 @@ static void CheckLearningModelPixelRange() {
// Normalized_0_1 and image output
L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_1.onnx",
// Normalized_1_1 and image output
- L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"};
+ L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"
+ };
std::vector pixelRanges = {
- LearningModelPixelRange::ZeroTo255, LearningModelPixelRange::ZeroToOne, LearningModelPixelRange::MinusOneToOne};
+ LearningModelPixelRange::ZeroTo255, LearningModelPixelRange::ZeroToOne, LearningModelPixelRange::MinusOneToOne
+ };
for (uint32_t model_i = 0; model_i < modelPaths.size(); model_i++) {
LearningModel learningModel = nullptr;
WINML_EXPECT_NO_THROW(APITest::LoadModel(modelPaths[model_i], learningModel));
@@ -329,7 +331,8 @@ const LearningModelApiTestsApi& getapi() {
CloseModelCheckEval,
CloseModelNoNewSessions,
CheckMetadataCaseInsensitive,
- CreateCorruptModel};
+ CreateCorruptModel
+ };
if (RuntimeParameterExists(L"noVideoFrameTests")) {
api.CloseModelCheckEval = SkipTest;
diff --git a/winml/test/api/LearningModelBindingAPITest.cpp b/winml/test/api/LearningModelBindingAPITest.cpp
index b77421e191020..8279f4f89f0ed 100644
--- a/winml/test/api/LearningModelBindingAPITest.cpp
+++ b/winml/test/api/LearningModelBindingAPITest.cpp
@@ -669,7 +669,8 @@ const LearningModelBindingAPITestsApi& getapi() {
VerifyOutputAfterEvaluateAsyncCalledTwice,
VerifyOutputAfterImageBindCalledTwice,
SequenceLengthTensorFloat,
- SequenceConstructTensorString};
+ SequenceConstructTensorString
+ };
if (SkipGpuTests()) {
api.GpuSqueezeNet = SkipTest;
diff --git a/winml/test/api/LearningModelSessionAPITest.cpp b/winml/test/api/LearningModelSessionAPITest.cpp
index 21cdaa62bc898..4ec79b8a0f4c6 100644
--- a/winml/test/api/LearningModelSessionAPITest.cpp
+++ b/winml/test/api/LearningModelSessionAPITest.cpp
@@ -793,7 +793,8 @@ static void STFT(
auto n_dfts = static_cast(1 + floor((signal_size - dft_size) / hop_size));
auto input_shape = std::vector{1, INT64(signal_size)};
auto output_shape = std::vector{
- INT64(batch_size), INT64(n_dfts), is_onesided ? ((INT64(dft_size) >> 1) + 1) : INT64(dft_size), 2};
+ INT64(batch_size), INT64(n_dfts), is_onesided ? ((INT64(dft_size) >> 1) + 1) : INT64(dft_size), 2
+ };
auto dft_length = TensorInt64Bit::CreateFromArray({}, {INT64(dft_size)});
auto model =
@@ -1372,7 +1373,8 @@ static void ModelBuilding_GridSample_Internal(LearningModelDeviceKind kind) {
5.0000f,
5.0000f,
10.0000f,
- 10.0000f};
+ 10.0000f
+ };
input_dims = {1, 1, 3, 2};
grid_dims = {1, 2, 4, 2};
@@ -2312,7 +2314,8 @@ const LearningModelSessionAPITestsApi& getapi() {
ModelBuilding_STFT,
ModelBuilding_MelSpectrogramOnThreeToneSignal,
ModelBuilding_MelWeightMatrix,
- SetName};
+ SetName
+ };
if (SkipGpuTests()) {
api.CreateSessionDeviceDirectX = SkipTest;
diff --git a/winml/test/api/RawApiHelpers.cpp b/winml/test/api/RawApiHelpers.cpp
index b6f39f8e88224..e84af6d239799 100644
--- a/winml/test/api/RawApiHelpers.cpp
+++ b/winml/test/api/RawApiHelpers.cpp
@@ -38,7 +38,8 @@ void RunOnDevice(ml::learning_model& model, ml::learning_model_device& device, I
auto channel_buffers_pointers = std::vector{
&input_data.at(0),
&input_data.at(0) + channel_buffers_sizes[0],
- &input_data.at(0) + channel_buffers_sizes[0] + +channel_buffers_sizes[1]};
+ &input_data.at(0) + channel_buffers_sizes[0] + +channel_buffers_sizes[1]
+ };
WINML_EXPECT_HRESULT_SUCCEEDED(binding->bind_as_references(
input_name,
diff --git a/winml/test/api/RawApiTestsGpu.cpp b/winml/test/api/RawApiTestsGpu.cpp
index 9c1c06a01603f..f12ba0f36cebf 100644
--- a/winml/test/api/RawApiTestsGpu.cpp
+++ b/winml/test/api/RawApiTestsGpu.cpp
@@ -165,7 +165,8 @@ const RawApiTestsGpuApi& getapi() {
CreateDirectXMinPowerDevice,
Evaluate,
EvaluateNoInputCopy,
- EvaluateManyBuffers};
+ EvaluateManyBuffers
+ };
if (SkipGpuTests()) {
api.CreateDirectXDevice = SkipTest;
diff --git a/winml/test/concurrency/ConcurrencyTests.cpp b/winml/test/concurrency/ConcurrencyTests.cpp
index 46528ef70d377..404afbf67ea1c 100644
--- a/winml/test/concurrency/ConcurrencyTests.cpp
+++ b/winml/test/concurrency/ConcurrencyTests.cpp
@@ -141,7 +141,8 @@ void EvalAsyncDifferentBindings() {
std::vector evaluation_units(num_units, EvaluationUnit());
std::vector ifvs = {
- FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
+ FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
+ };
// same session, different binding
auto model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"model.onnx");
@@ -191,7 +192,8 @@ void MultiThreadMultiSessionOnDevice(const LearningModelDevice& device) {
auto path = FileHelpers::GetModulePath() + L"model.onnx";
auto model = LearningModel::LoadFromFilePath(path);
std::vector ivfs = {
- FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
+ FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
+ };
std::vector max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
@@ -257,7 +259,8 @@ void MultiThreadSingleSessionOnDevice(const LearningModelDevice& device) {
LearningModelSession model_session = nullptr;
WINML_EXPECT_NO_THROW(model_session = LearningModelSession(model, device));
std::vector ivfs = {
- FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")};
+ FileHelpers::LoadImageFeatureValue(L"kitten_224.png"), FileHelpers::LoadImageFeatureValue(L"fish.png")
+ };
std::vector max_indices = {
281, // tabby, tabby cat
0 // tench, Tinca tinca
@@ -322,7 +325,8 @@ const ConcurrencyTestsApi& getapi() {
MultiThreadSingleSessionGpu,
EvalAsyncDifferentModels,
EvalAsyncDifferentSessions,
- EvalAsyncDifferentBindings};
+ EvalAsyncDifferentBindings
+ };
if (SkipGpuTests()) {
api.MultiThreadMultiSessionGpu = SkipTest;
diff --git a/winml/test/image/imageTestHelper.cpp b/winml/test/image/imageTestHelper.cpp
index b7c1eb42965f8..91eed2a807782 100644
--- a/winml/test/image/imageTestHelper.cpp
+++ b/winml/test/image/imageTestHelper.cpp
@@ -148,7 +148,8 @@ TensorFloat LoadInputImageFromGPU(SoftwareBitmap softwareBitmap, const std::wstr
// 3 is number of channels we use. R G B without alpha.
UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth() * softwareBitmap.PixelHeight();
D3D12_HEAP_PROPERTIES heapProperties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
diff --git a/winml/test/image/imagetests.cpp b/winml/test/image/imagetests.cpp
index 6157520ca96a3..2251954c59e4c 100644
--- a/winml/test/image/imagetests.cpp
+++ b/winml/test/image/imagetests.cpp
@@ -939,7 +939,8 @@ TEST_F(ImageTests, ImageBindingAsGPUTensor) {
UINT64 buffer_byte_size =
static_cast(software_bitmap.PixelWidth()) * software_bitmap.PixelHeight() * 3 * sizeof(float);
D3D12_HEAP_PROPERTIES heap_properties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
D3D12_RESOURCE_DESC resource_desc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
diff --git a/winml/test/model/model_tests.cpp b/winml/test/model/model_tests.cpp
index b2dd331ccef2c..d4cb664c9e564 100644
--- a/winml/test/model/model_tests.cpp
+++ b/winml/test/model/model_tests.cpp
@@ -231,7 +231,8 @@ static std::vector GetAllTestCases() {
ORT_TSTR("tf_resnet_v2_152"),
ORT_TSTR("vgg19"),
ORT_TSTR("yolov3"),
- ORT_TSTR("zfnet512")};
+ ORT_TSTR("zfnet512")
+ };
allDisabledTests.insert(std::begin(x86DisabledTests), std::end(x86DisabledTests));
#endif
// Bad onnx test output caused by previously wrong SAME_UPPER/SAME_LOWER for ConvTranspose
diff --git a/winml/test/scenario/cppwinrt/CustomNullOp.h b/winml/test/scenario/cppwinrt/CustomNullOp.h
index 33709c5f72d3c..b50909548a6bf 100644
--- a/winml/test/scenario/cppwinrt/CustomNullOp.h
+++ b/winml/test/scenario/cppwinrt/CustomNullOp.h
@@ -69,7 +69,8 @@ struct NullOperatorFactory : winrt::implements allowedEdges{
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Double),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float),
- CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)};
+ CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)
+ };
typeConstraint.allowedTypes = allowedEdges.data();
typeConstraint.allowedTypeCount = static_cast(allowedEdges.size());
diff --git a/winml/test/scenario/cppwinrt/CustomOps.cpp b/winml/test/scenario/cppwinrt/CustomOps.cpp
index 075bf5ed877a3..58d0fe6e64efc 100644
--- a/winml/test/scenario/cppwinrt/CustomOps.cpp
+++ b/winml/test/scenario/cppwinrt/CustomOps.cpp
@@ -305,7 +305,8 @@ static void CustomKernelWithBuiltInSchema() {
// Register the kernel
MLOperatorEdgeDescription floatTensorType = {
- MLOperatorEdgeType::Tensor, static_cast(MLOperatorTensorDataType::Float)};
+ MLOperatorEdgeType::Tensor, static_cast(MLOperatorTensorDataType::Float)
+ };
MLOperatorEdgeTypeConstrant constraint = {"T", &floatTensorType, 1};
@@ -318,7 +319,8 @@ static void CustomKernelWithBuiltInSchema() {
1,
nullptr,
0,
- MLOperatorKernelOptions::AllowDynamicInputShapes};
+ MLOperatorKernelOptions::AllowDynamicInputShapes
+ };
Microsoft::WRL::ComPtr factory =
wil::MakeOrThrow(CreateABIFooKernel);
@@ -614,7 +616,8 @@ static void CustomKernelWithCustomSchema() {
MLOperatorEdgeTypeConstrant kernelConstraint = {"T1", &floatTensorEdgeDesc, 1};
MLOperatorKernelDescription kernelDesc = {
- "", "Foo", 7, MLOperatorExecutionType::Cpu, &kernelConstraint, testCases[caseIndex].useTypeLabel ? 1u : 0u};
+ "", "Foo", 7, MLOperatorExecutionType::Cpu, &kernelConstraint, testCases[caseIndex].useTypeLabel ? 1u : 0u
+ };
if (!testCases[caseIndex].attributeDefaultsInSchema) {
kernelDesc.defaultAttributes = defaultAttributes;
@@ -693,10 +696,8 @@ static void CustomKernelWithCustomSchema() {
const CustomOpsTestsApi& getapi() {
static CustomOpsTestsApi api = {
- CustomOpsScenarioTestsClassSetup,
- CustomOperatorFusion,
- CustomKernelWithBuiltInSchema,
- CustomKernelWithCustomSchema};
+ CustomOpsScenarioTestsClassSetup, CustomOperatorFusion, CustomKernelWithBuiltInSchema, CustomKernelWithCustomSchema
+ };
if (SkipGpuTests()) {
api.CustomOperatorFusion = SkipTest;
diff --git a/winml/test/scenario/cppwinrt/NoisyReluCpu.h b/winml/test/scenario/cppwinrt/NoisyReluCpu.h
index 5f89b20beebb9..5cccbae67407c 100644
--- a/winml/test/scenario/cppwinrt/NoisyReluCpu.h
+++ b/winml/test/scenario/cppwinrt/NoisyReluCpu.h
@@ -157,7 +157,8 @@ struct NoisyReluOperatorFactory : winrt::implements allowedEdges{
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Double),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float),
- CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)};
+ CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)
+ };
typeConstraint.allowedTypes = allowedEdges.data();
typeConstraint.allowedTypeCount = static_cast(allowedEdges.size());
@@ -194,7 +195,8 @@ struct NoisyReluOperatorFactory : winrt::implements attributeDefaultValues{
- noisyReluMeanAttributeValue, noisyReluVarianceAttributeValue};
+ noisyReluMeanAttributeValue, noisyReluVarianceAttributeValue
+ };
noisyReluSchema.defaultAttributes = attributeDefaultValues.data();
noisyReluSchema.defaultAttributeCount = static_cast(attributeDefaultValues.size());
@@ -216,7 +218,8 @@ struct NoisyReluOperatorFactory : winrt::implements allowedEdges{
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Double),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float),
- CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)};
+ CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)
+ };
typeConstraint.allowedTypes = allowedEdges.data();
typeConstraint.allowedTypeCount = static_cast(allowedEdges.size());
@@ -239,7 +242,8 @@ struct NoisyReluOperatorFactory : winrt::implements attributeDefaultValues{
- noisyReluMeanAttributeValue, noisyReluVarianceAttributeValue};
+ noisyReluMeanAttributeValue, noisyReluVarianceAttributeValue
+ };
kernelDescription.defaultAttributes = attributeDefaultValues.data();
kernelDescription.defaultAttributeCount = static_cast(attributeDefaultValues.size());
kernelDescription.options = MLOperatorKernelOptions::None;
diff --git a/winml/test/scenario/cppwinrt/ReluCpu.h b/winml/test/scenario/cppwinrt/ReluCpu.h
index c72285a4de7fb..7bb275f7b399b 100644
--- a/winml/test/scenario/cppwinrt/ReluCpu.h
+++ b/winml/test/scenario/cppwinrt/ReluCpu.h
@@ -114,7 +114,8 @@ struct ReluOperatorFactory : winrt::implements allowedEdges{
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Double),
CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float),
- CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)};
+ CreateEdgeDescriptor(MLOperatorEdgeType::Tensor, MLOperatorTensorDataType::Float16)
+ };
typeConstraint.allowedTypes = allowedEdges.data();
typeConstraint.allowedTypeCount = static_cast(allowedEdges.size());
diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp
index 9b389d014c953..9a03172340bf7 100644
--- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp
+++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp
@@ -510,7 +510,8 @@ static void Scenario9LoadBindEvalInputTensorGPU() {
UINT64 bufferbytesize = 720 * 720 * 3 * sizeof(float);
D3D12_HEAP_PROPERTIES heapProperties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
@@ -983,7 +984,8 @@ static void Scenario22ImageBindingAsGPUTensor() {
// 3 is number of channels we use. R G B without alpha.
UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth() * softwareBitmap.PixelHeight();
D3D12_HEAP_PROPERTIES heapProperties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0};
+ D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0, 0
+ };
D3D12_RESOURCE_DESC resourceDesc = {
D3D12_RESOURCE_DIMENSION_BUFFER,
0,
@@ -1085,7 +1087,8 @@ static void Scenario23NominalPixelRange() {
std::vector modelPaths = {// Normalized_0_1 and image output
modulePath + L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_1.onnx",
// Normalized_1_1 and image output
- modulePath + L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"};
+ modulePath + L"Add_ImageNet1920WithImageMetadataBgr8_SRGB_1_1.onnx"
+ };
for (uint32_t model_i = 0; model_i < modelPaths.size(); model_i++) {
// load model and create session