Skip to content

Commit

Permalink
Migrate backends/xnnpack to the new namespace (#5865)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #5865

Move the XNNPACK backend out of the `torch::` namespace, and update to avoid using the `torch::` or `exec_aten::` namespaces.

Also update utils.h to use a namespace that matches the rest of the code (`xnnpack::utils` instead of `qnnpack_utils`).

Reviewed By: mcr229

Differential Revision: D63876576

fbshipit-source-id: 42624fdaf35ee8c45c71e57bc9768613367ac44b
  • Loading branch information
dbort authored and facebook-github-bot committed Oct 4, 2024
1 parent a6d67c7 commit 34e7ad8
Show file tree
Hide file tree
Showing 14 changed files with 142 additions and 103 deletions.
12 changes: 8 additions & 4 deletions backends/xnnpack/runtime/XNNCompiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,15 @@
#pragma clang diagnostic ignored "-Wmissing-prototypes"
#pragma clang diagnostic ignored "-Wglobal-constructors"

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

using executorch::runtime::Error;
using executorch::runtime::MemoryAllocator;
using executorch::runtime::Result;

/*
* Provide compile-time allocation.
*/
Expand Down Expand Up @@ -1811,5 +1815,5 @@ ET_NODISCARD Error XNNCompiler::compileModel(

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
12 changes: 6 additions & 6 deletions backends/xnnpack/runtime/XNNCompiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
#include <memory>
#include <vector>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

Expand All @@ -25,15 +25,15 @@ class XNNCompiler {
// Takes Flatbuffer Serialized XNNPACK Model and rebuilds the xnn-subgraph
// returns an executor object that holds the xnn runtime object which we
// can then use to set inputs and run inference using the xnn graph.
ET_NODISCARD static Error compileModel(
ET_NODISCARD static executorch::runtime::Error compileModel(
const void* buffer_pointer,
size_t num_bytes,
XNNExecutor* executor,
MemoryAllocator* runtime_allocator,
executorch::runtime::MemoryAllocator* runtime_allocator,
xnn_workspace_t workspace);
};

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
21 changes: 13 additions & 8 deletions backends/xnnpack/runtime/XNNExecutor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,19 @@

#include <executorch/backends/xnnpack/runtime/XNNExecutor.h>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

using Tensor = exec_aten::Tensor;
using ScalarType = exec_aten::ScalarType;
using SizesType = exec_aten::SizesType;
using executorch::aten::ScalarType;
using executorch::aten::SizesType;
using executorch::aten::Tensor;
using executorch::runtime::BackendExecutionContext;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::is_contiguous_dim_order;
using executorch::runtime::kTensorDimensionLimit;

/**
* Initializes the XNNExecutor with the runtime and given number of
Expand Down Expand Up @@ -204,7 +209,7 @@ ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const {
expected_output_size[d] = static_cast<SizesType>(dims[d]);
}

exec_aten::ArrayRef<SizesType> output_size{
executorch::aten::ArrayRef<SizesType> output_size{
expected_output_size, static_cast<size_t>(num_dim)};

ET_LOG(Debug, "Resizing output tensor to a new shape");
Expand All @@ -231,5 +236,5 @@ ET_NODISCARD Error XNNExecutor::resize_outputs(EValue** args) const {

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
19 changes: 11 additions & 8 deletions backends/xnnpack/runtime/XNNExecutor.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
#include <memory>
#include <vector>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

Expand Down Expand Up @@ -51,7 +51,7 @@ class XNNExecutor {
* The input/output ids are expected to be sorted in order of their
* flatbuffer id_outs
*/
ET_NODISCARD Error initialize(
ET_NODISCARD executorch::runtime::Error initialize(
xnn_runtime_t runtime,
std::vector<uint32_t>&& input_ids,
std::vector<uint32_t>&& output_ids);
Expand All @@ -62,24 +62,27 @@ class XNNExecutor {
* input shapes will be propagated through the runtime, and perform
* any additional memory planning as needed
*/
ET_NODISCARD Error prepare_args(EValue** args);
ET_NODISCARD executorch::runtime::Error prepare_args(
executorch::runtime::EValue** args);

/**
* Executes the graph using the args prepared at prepare_args().
*/
ET_NODISCARD Error forward(BackendExecutionContext& context);
ET_NODISCARD executorch::runtime::Error forward(
executorch::runtime::BackendExecutionContext& context);

/**
* Prepares the outputs to be returned by the delegate
*
* Performs any post processing of outputs like tensor resizing
*/
ET_NODISCARD Error resize_outputs(EValue** args) const;
ET_NODISCARD executorch::runtime::Error resize_outputs(
executorch::runtime::EValue** args) const;

friend class XNNCompiler;
};

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
11 changes: 7 additions & 4 deletions backends/xnnpack/runtime/XNNHeader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,14 @@

#pragma clang diagnostic ignored "-Wdeprecated"

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

using executorch::runtime::Error;
using executorch::runtime::Result;

namespace {
/// Interprets the 8 bytes at `data` as a little-endian uint64_t.
uint64_t GetUInt64LE(const uint8_t* data) {
Expand Down Expand Up @@ -73,5 +76,5 @@ constexpr char XNNHeader::kMagic[kMagicSize];

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
12 changes: 7 additions & 5 deletions backends/xnnpack/runtime/XNNHeader.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@

#include <executorch/runtime/core/result.h>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

Expand Down Expand Up @@ -98,7 +98,9 @@ struct XNNHeader {
* error if size was too short, if the header was not found, or if the
* header appeared to be corrupt.
*/
static Result<XNNHeader> Parse(const void* data, size_t size);
static executorch::runtime::Result<XNNHeader> Parse(
const void* data,
size_t size);

/**
* The offset in bytes to the beginning of the flatbuffer data.
Expand All @@ -121,5 +123,5 @@ struct XNNHeader {

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
19 changes: 15 additions & 4 deletions backends/xnnpack/runtime/XNNPACKBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,19 @@

#pragma clang diagnostic ignored "-Wglobal-constructors"

namespace torch {
namespace executor {
namespace executorch {
namespace backends {

using executorch::runtime::ArrayRef;
using executorch::runtime::Backend;
using executorch::runtime::BackendExecutionContext;
using executorch::runtime::BackendInitContext;
using executorch::runtime::CompileSpec;
using executorch::runtime::DelegateHandle;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::Result;

class XnnpackBackend final : public ::executorch::runtime::BackendInterface {
public:
Expand Down Expand Up @@ -145,5 +156,5 @@ Backend backend{"XnnpackBackend", &cls};
static auto success_with_compiler = register_backend(backend);
} // namespace

} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
8 changes: 4 additions & 4 deletions backends/xnnpack/runtime/XNNStatus.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@
#include <assert.h>
#include <xnnpack.h>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {

Expand All @@ -34,5 +34,5 @@ inline const char* xnn_status_to_string(enum xnn_status type) {

} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
12 changes: 8 additions & 4 deletions backends/xnnpack/runtime/profiling/XNNProfiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,13 @@
#include <unordered_map>
// NOLINTEND

namespace torch::executor::xnnpack::delegate::profiling {
namespace executorch::backends::xnnpack::delegate::profiling {

using executorch::runtime::Error;
using executorch::runtime::EventTracer;

#if defined(ET_EVENT_TRACER_ENABLED) || defined(ENABLE_XNNPACK_PROFILING)

XNNProfiler::XNNProfiler()
: state_(XNNProfilerState::Uninitialized), run_count_(0) {}

Expand Down Expand Up @@ -210,10 +214,10 @@ void XNNProfiler::submit_trace() {

auto end_time = time + interval_ticks;

torch::executor::event_tracer_log_profiling_delegate(
executorch::runtime::event_tracer_log_profiling_delegate(
event_tracer_,
name_formatted.c_str(),
/*delegate_debug_id=*/static_cast<torch::executor::DebugHandle>(-1),
/*delegate_debug_id=*/static_cast<executorch::runtime::DebugHandle>(-1),
time,
end_time);

Expand Down Expand Up @@ -246,4 +250,4 @@ Error XNNProfiler::end() {

#endif

} // namespace torch::executor::xnnpack::delegate::profiling
} // namespace executorch::backends::xnnpack::delegate::profiling
23 changes: 12 additions & 11 deletions backends/xnnpack/runtime/profiling/XNNProfiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
#include <xnnpack.h>
#include <vector>

namespace torch {
namespace executor {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace delegate {
namespace profiling {
Expand All @@ -30,24 +30,25 @@ class XNNProfiler {
* Initialize the profiler. This must be called after model is
* compiled and before calling begin_execution.
*/
Error initialize(xnn_runtime_t runtime);
executorch::runtime::Error initialize(xnn_runtime_t runtime);

/**
* Start a new profiling session. This is typically invoked
* immediately before invoking the XNNPACK runtime as part
* of a forward pass.
*/
Error start(EventTracer* event_tracer);
executorch::runtime::Error start(
executorch::runtime::EventTracer* event_tracer);

/**
* End a profiling session. This is typically invoked immediately
* after the XNNPACK runtime invocation completes.
*/
Error end();
executorch::runtime::Error end();

private:
#if defined(ET_EVENT_TRACER_ENABLED) || defined(ENABLE_XNNPACK_PROFILING)
EventTracer* event_tracer_;
executorch::runtime::EventTracer* event_tracer_;
xnn_runtime_t runtime_;
XNNProfilerState state_;

Expand All @@ -64,9 +65,9 @@ class XNNProfiler {
std::vector<uint64_t> op_timings_sum_;
#endif

Error get_runtime_operator_names();
Error get_runtime_num_operators();
Error get_runtime_operator_timings();
executorch::runtime::Error get_runtime_operator_names();
executorch::runtime::Error get_runtime_num_operators();
executorch::runtime::Error get_runtime_operator_timings();

void log_operator_timings();

Expand All @@ -80,5 +81,5 @@ class XNNProfiler {
} // namespace profiling
} // namespace delegate
} // namespace xnnpack
} // namespace executor
} // namespace torch
} // namespace backends
} // namespace executorch
20 changes: 12 additions & 8 deletions backends/xnnpack/runtime/utils/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@
#include <executorch/runtime/platform/assert.h>
#include <cinttypes>

namespace torch {
namespace executor {
namespace qnnpack_utils {
namespace executorch {
namespace backends {
namespace xnnpack {
namespace utils {

using Tensor = exec_aten::Tensor;
using ScalarType = exec_aten::ScalarType;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::Error;

constexpr float SMALL_SCALE_THRESHOLD = 6.1e-5f;

Expand Down Expand Up @@ -222,6 +224,8 @@ void quantize_tensor_arm64_q8_wrapper<int8_t>(
quantize_tensor_arm64_q8<int8_t, int8x8_t>(in, out, N, scale, zero_point);
}
#endif
} // namespace qnnpack_utils
} // namespace executor
} // namespace torch

} // namespace utils
} // namespace xnnpack
} // namespace backends
} // namespace executorch
Loading

0 comments on commit 34e7ad8

Please sign in to comment.