Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add full iOS job in package pipeline #9036

Merged
merged 2 commits into from
Sep 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions onnxruntime/core/framework/allocation_planner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,8 @@ class PlannerImpl {

const optional<std::pair<int, int>>& variadic_alias_offsets = ci.kernel_def->VariadicAlias();
if (variadic_alias_offsets.has_value()) {
int input_offset = variadic_alias_offsets.value().first;
int output_offset = variadic_alias_offsets.value().second;
int input_offset = variadic_alias_offsets->first;
int output_offset = variadic_alias_offsets->second;
// we _must_ reuse this input to satisfy aliasing requirement: (e.g., for AllReduce)
int alias_input_index = output_arg_num - output_offset + input_offset;
if (alias_input_index >= 0 && static_cast<size_t>(alias_input_index) < input_args.size()) {
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/framework/config_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ bool ConfigOptions::TryGetConfigEntry(const std::string& config_key, std::string
auto entry = GetConfigEntry(config_key);
const bool found = entry.has_value();
if (found) {
config_value = std::move(entry.value());
config_value = std::move(*entry);
}
return found;
}
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/core/optimizer/matmul_scale_fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ optional<std::pair<float, int>> GetScaleFromNode(

if (!divisor.has_value()) return {};

return {std::make_pair(1.0f / divisor.value(), scale_reciprocal_arg_index)};
return {std::make_pair(1.0f / *divisor, scale_reciprocal_arg_index)};
}

if (graph_utils::IsSupportedOptypeVersionAndDomain(scale_node, "Mul", {7, 13, 14})) {
Expand All @@ -93,7 +93,7 @@ optional<std::pair<float, int>> GetScaleFromNode(

if (!multiplier.has_value()) continue;

return {std::make_pair(multiplier.value(), scale_arg_index)};
return {std::make_pair(*multiplier, scale_arg_index)};
}

return {};
Expand Down Expand Up @@ -128,12 +128,12 @@ std::vector<ScaleMergeInfo> GetInputNodeMerges(
if (!scale_and_index.has_value()) continue;

// assume scale nodes have 2 input defs, so to_scale_index == 1 - scale_index
ORT_ENFORCE(input_node.InputDefs().size() == 2 && scale_and_index.value().second < 2);
const int to_scale_index = 1 - scale_and_index.value().second;
ORT_ENFORCE(input_node.InputDefs().size() == 2 && scale_and_index->second < 2);
const int to_scale_index = 1 - scale_and_index->second;

input_node_merges.push_back(
{input_edge,
scale_and_index.value().first,
scale_and_index->first,
to_scale_index,
input_edge->GetDstArgIndex()});
}
Expand All @@ -160,7 +160,7 @@ std::vector<ScaleMergeInfo> GetOutputNodeMerges(

output_node_merges.push_back(
{output_edge,
scale_and_index.value().first,
scale_and_index->first,
scaled_index,
output_edge->GetSrcArgIndex()});
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/platform/env_var_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ template <typename T>
T ParseEnvironmentVariableWithDefault(const std::string& name, const T& default_value) {
const auto parsed = ParseEnvironmentVariable<T>(name);
if (parsed.has_value()) {
return parsed.value();
return *parsed;
}

return default_value;
Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/core/providers/cpu/reduction/reduction_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@ TensorOpCost ParallelReduceFastCost(int64_t n_row, int64_t n_col, int64_t elemen
This only improves reduce function when reduced axes are contiguous:
if len(shape) == 4, any single axis is ok, axes=(0, 1) or (1, 2) or (2, 3) is ok,
axes=(0, 2) is not covered by this change, former implementation prevails.
In that case, the shape can be compressed into three cases:
In that case, the shape can be compressed into three cases:
(K = axis not reduced, R = reduced axis):

* KR - reduction on the last dimensions
* RK - reduction on the first dimensions
* KRK - reduction on the middle dimensions.

For these three configuration, the reduction may be optimized
with vectors operations. Method WhichFastReduce() returns which case
case be optimized for which aggregator.
Expand Down Expand Up @@ -630,7 +630,7 @@ class ReduceKernelBase {
}
int64_t keepdims = 1;
if (keepdims_override.has_value()) {
keepdims = keepdims_override.value();
keepdims = *keepdims_override;
} else {
ORT_ENFORCE(info.GetAttr("keepdims", &keepdims).IsOK());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ ApplicableMatrixReduction get_applicable_matrix_reduction(
return ApplicableMatrixReduction::None;
}


// Remove all dims with value 1. This can help to optimize case like:
// dims=[2,3,1,4,1,5] and axes=[0,2,4], which is same as dims=[2,3,4,5] and axes=[0].
std::vector<int64_t> new_dims;
Expand Down Expand Up @@ -136,8 +135,8 @@ ApplicableMatrixReduction get_applicable_matrix_reduction(
return ApplicableMatrixReduction::None;
}

const auto& min_axis = min_and_max_axes.value().first;
const auto& max_axis = min_and_max_axes.value().second;
const auto& min_axis = min_and_max_axes->first;
const auto& max_axis = min_and_max_axes->second;

// axes from beginning means row reduction, axes to end means column reduction
// for axes from beginning to end, either works and we do row reduction
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/common/path_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ TEST(PathTest, Concat) {
[](const optional<std::string>& a, const std::string& b, const std::string& expected_a, bool expect_throw = false) {
Path p_a{}, p_expected_a{};
if (a.has_value()) {
ASSERT_STATUS_OK(Path::Parse(ToPathString(a.value()), p_a));
ASSERT_STATUS_OK(Path::Parse(ToPathString(*a), p_a));
}
ASSERT_STATUS_OK(Path::Parse(ToPathString(expected_a), p_expected_a));

Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/common/tensor_op_test_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace test {

RandomValueGenerator::RandomValueGenerator(optional<RandomSeedType> seed)
: random_seed_{
seed.has_value() ? seed.value() : static_cast<RandomSeedType>(GetTestRandomSeed())},
seed.has_value() ? *seed : static_cast<RandomSeedType>(GetTestRandomSeed())},
generator_{random_seed_},
output_trace_{__FILE__, __LINE__, "ORT test random seed: " + std::to_string(random_seed_)} {
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/contrib_ops/layer_norm_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static void TestLayerNorm(const std::vector<int64_t>& x_dims,
test.AddAttribute("axis", axis);
test.AddAttribute("keep_dims", keep_dims);
if (epsilon.has_value()) {
test.AddAttribute("epsilon", epsilon.value());
test.AddAttribute("epsilon", *epsilon);
}

// create rand inputs
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/providers/cpu/nn/batch_norm_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void TestBatchNorm(const unordered_map<string, vector<T>>& input_data_map,
int opset_version = 9) {
OpTester test("BatchNormalization", opset_version);
if (epsilon.has_value()) {
test.AddAttribute("epsilon", epsilon.value());
test.AddAttribute("epsilon", *epsilon);
}
if (opset_version < 9) { // spatial is only defined for opset-8 and below in the spec
test.AddAttribute("spatial", spatial_mode);
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/test/providers/provider_test_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,13 @@ struct TensorCheck<uint8_t> {
// For any other EPs, we still expect an exact match for the results
if (provider_type == kNnapiExecutionProvider && (has_abs_err || has_rel_err)) {
double threshold = has_abs_err
? params.absolute_error_.value()
? *(params.absolute_error_)
: 0.0;

for (int i = 0; i < size; ++i) {
if (has_rel_err) {
EXPECT_NEAR(expected[i], output[i],
params.relative_error_.value() * expected[i]) // expected[i] is unsigned, can't be negative
*(params.relative_error_) * expected[i]) // expected[i] is unsigned, can't be negative
<< "i:" << i << ", provider_type: " << provider_type;
} else { // has_abs_err
EXPECT_NEAR(expected[i], output[i], threshold)
Expand Down Expand Up @@ -184,12 +184,12 @@ struct TensorCheck<double> {
} else {
if (has_abs_err) {
ASSERT_NEAR(expected[i], output[i],
params.absolute_error_.value())
*(params.absolute_error_))
<< "i:" << i << ", provider_type: " << provider_type;
}
if (has_rel_err) {
ASSERT_NEAR(expected[i], output[i],
params.relative_error_.value() *
*(params.relative_error_) *
std::abs(expected[i]))
<< "i:" << i << ", provider_type: " << provider_type;
}
Expand Down Expand Up @@ -243,12 +243,12 @@ void InternalNumericalCheck(const Tensor& expected_tensor,
} else {
if (has_abs_err) {
ASSERT_NEAR(expected[i], output[i],
params.absolute_error_.value())
*(params.absolute_error_))
<< "i:" << i << ", provider_type: " << provider_type;
}
if (has_rel_err) {
ASSERT_NEAR(expected[i], output[i],
params.relative_error_.value() *
*(params.relative_error_) *
std::abs(expected[i]))
<< "i:" << i << ", provider_type: " << provider_type;
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/util/scoped_env_vars.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace {
Status SetEnvironmentVar(const std::string& name, const optional<std::string>& value) {
if (value.has_value()) {
ORT_RETURN_IF_NOT(
setenv(name.c_str(), value.value().c_str(), 1) == 0,
setenv(name.c_str(), value->c_str(), 1) == 0,
"setenv() failed: ", errno);
} else {
ORT_RETURN_IF_NOT(
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/util/test_random_seed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ RandomSeedType GetTestRandomSeed() {
ParseEnvironmentVariable<RandomSeedType>(test_random_seed_env_vars::kValue);
if (fixed_random_seed.has_value()) {
// use fixed value
return fixed_random_seed.value();
return *fixed_random_seed;
}

auto generate_from_time = []() {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"build_osx_archs": {
"iphoneos": [
"arm64"
],
"iphonesimulator": [
"arm64",
"x86_64"
]
},
"build_params": [
"--ios",
"--parallel",
"--use_xcode",
"--build_apple_framework",
"--use_coreml",
"--skip_tests",
"--apple_deploy_target=11.0"
]
}
4 changes: 2 additions & 2 deletions tools/ci_build/github/azure-pipelines/mac-ios-ci-pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
--ios \
--ios_sysroot iphonesimulator \
--osx_arch x86_64 \
--apple_deploy_target 12.1 \
--apple_deploy_target 11.0 \
--use_xcode \
--config RelWithDebInfo \
--build_apple_framework \
Expand All @@ -25,7 +25,7 @@ jobs:
--ios \
--ios_sysroot iphonesimulator \
--osx_arch x86_64 \
--apple_deploy_target 12.1 \
--apple_deploy_target 11.0 \
--use_xcode \
--config RelWithDebInfo \
--build_apple_framework \
Expand Down
30 changes: 30 additions & 0 deletions tools/ci_build/github/azure-pipelines/templates/c-api-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,36 @@ jobs:
artifactName: 'onnxruntime-android-full-aar'
job_name_suffix: 'Full'

- job: iOS_Full_xcframework
workspace:
clean: all
pool:
vmImage: 'macOS-10.15'
timeoutInMinutes: 180
steps:
- script: |
set -e -x
python3 tools/ci_build/github/apple/build_ios_framework.py \
--build_dir "$(Build.BinariesDirectory)/ios_framework" \
tools/ci_build/github/apple/default_full_ios_framework_build_settings.json
mkdir $(Build.BinariesDirectory)/artifacts
pushd $(Build.BinariesDirectory)/ios_framework/framework_out/
zip -vr $(Build.BinariesDirectory)/artifacts/onnxruntime_xcframework.zip onnxruntime.xcframework
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: is it possible to add a top level folder to the zip called something like onnxruntime-ios-xcframework? the extraction script that sets things up for insertion into the nuget package expects a directory containing the binaries with a trailing '-xyz' that it trims off.

$dirname = $dirname.SubString(0,$dirname.LastIndexOf('-'))

Copy link
Contributor Author

@guoyu-wang guoyu-wang Sep 13, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can this be done in the download artifacts step? You can add whatever folder structure you want in the step.
Or is the folder structure need to be inside the zip?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will address this in a follow up PR

popd
displayName: "Build iOS xcframework"

- script: |
python3 tools/ci_build/github/apple/test_ios_packages.py \
--fail_if_cocoapods_missing \
--framework_info_file "$(Build.BinariesDirectory)/ios_framework/framework_info.json" \
--c_framework_dir "$(Build.BinariesDirectory)/ios_framework/framework_out"
displayName: "Test iOS framework"

- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.BinariesDirectory)/artifacts'
artifactName: 'onnxruntime-ios-full-xcframework'

- template: win-ci.yml
parameters:
DoCompliance: ${{ parameters.DoCompliance }}
Expand Down