Skip to content

Commit

Permalink
2024-06-14 nightly release (089858b)
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Jun 14, 2024
1 parent 5bb3505 commit 1d2fb27
Show file tree
Hide file tree
Showing 119 changed files with 130,413 additions and 1,402 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ jobs:
sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024
# Test ethos-u delegate examples with run.sh
PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/ buck2
PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/
test-arm-reference-delegation:
name: test-arm-reference-delegation
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,6 @@
[submodule "third-party/ios-cmake"]
path = third-party/ios-cmake
url = https://github.com/leetal/ios-cmake
[submodule "examples/models/phi-3-mini/third-party/sentencepiece"]
path = examples/models/phi-3-mini/third-party/sentencepiece
url = https://github.com/google/sentencepiece.git
2 changes: 1 addition & 1 deletion backends/apple/coreml/scripts/install_requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ rm -rf "$COREML_DIR_PATH/third-party"
mkdir "$COREML_DIR_PATH/third-party"

echo "${green}ExecuTorch: Cloning coremltools."
git clone --depth 1 --branch 7.2 "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
git clone --depth 1 --branch 8.0b1 "https://github.com/apple/coremltools.git" $COREMLTOOLS_DIR_PATH
cd $COREMLTOOLS_DIR_PATH

STATUS=$?
Expand Down
88 changes: 88 additions & 0 deletions backends/apple/mps/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,96 @@
# Any targets that should be shared between fbcode and xplat must be defined in
# targets.bzl. This file can contain fbcode-only targets.

load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load(":targets.bzl", "define_common_targets")

oncall("executorch")

define_common_targets()

runtime.python_library(
name = "backend",
srcs = [
"__init__.py",
"mps_preprocess.py",
],
visibility = [
"@EXECUTORCH_CLIENTS",
],
deps = [
":operators",
":serialization",
":utils",
"//caffe2:torch",
"//executorch/exir/backend:backend_details",
"//executorch/exir/backend:compile_spec_schema",
],
)

runtime.python_library(
name = "operators",
srcs = glob([
"operators/*.py",
]),
deps = [
":serialization",
":utils",
"//executorch/backends/transforms:lib",
],
)

runtime.python_library(
name = "partitioner",
srcs = glob([
"partition/*.py",
]),
visibility = [
"@EXECUTORCH_CLIENTS",
],
deps = [
":backend",
"//caffe2:torch",
"//executorch/exir/backend:compile_spec_schema",
"//executorch/exir/backend:partitioner",
"//executorch/exir/backend/canonical_partitioners:canonical_partitioner_lib",
],
)

runtime.python_library(
name = "serialization",
srcs = glob([
"serialization/*.py",
]),
resources = [
"serialization/schema.fbs",
],
)

runtime.python_library(
name = "utils",
srcs = glob([
"utils/*.py",
]),
deps = [
":serialization",
"//caffe2:torch",
"//executorch/exir:lib",
],
)

runtime.python_test(
name = "test",
srcs = glob([
"test/*.py",
]),
deps = [
":backend",
":partitioner",
"//caffe2:torch",
"//executorch/examples/models:models",
"//executorch/exir/tests:models",
"//executorch/sdk:lib",
"//executorch/sdk/bundled_program/serialize:lib",
"fbsource//third-party/pypi/pytest:pytest",
],
)
67 changes: 55 additions & 12 deletions backends/apple/mps/test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
from enum import Enum

import torch
from examples.models import MODEL_NAME_TO_MODEL
from examples.models.model_factory import EagerModelFactory
from executorch.backends.apple.mps.test.test_mps_models import MPS_MODEL_NAME_TO_MODEL
from executorch.backends.apple.mps.test.test_mps_utils import (
OpSequencesAddConv2d,
randomize_bn,
TestMPS,
)
from executorch.examples.models import MODEL_NAME_TO_MODEL
from executorch.examples.models.model_factory import EagerModelFactory

from executorch.exir.tests.models import (
BasicSinMax,
Expand Down Expand Up @@ -757,6 +757,7 @@ def forward(self, x):
module, model_inputs, func_name=inspect.stack()[0].function[5:]
)

@unittest.skip
def test_mps_backend_min_dim(self):
class MinModule(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -1231,6 +1232,7 @@ def forward(self, x):
module, model_inputs, func_name=inspect.stack()[0].function[5:]
)

@unittest.skip
def test_boolean_array_indexing(self):
class IndexGet(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -1818,27 +1820,43 @@ def forward(self, x, y):
)

def test_mps_backend_clone(self):
class Clone(torch.nn.Module):
def forward(self, x):
return torch.clone(x)

model_inputs = (torch.randn(1, 3, 3),)
self.lower_and_test_with_partitioner(
torch.clone, model_inputs, func_name=inspect.stack()[0].function[5:]
Clone(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_floor(self):
class Floor(torch.nn.Module):
def forward(self, x):
return torch.floor(x)

model_inputs = (torch.randn(1, 3, 3),)
self.lower_and_test_with_partitioner(
torch.floor, model_inputs, func_name=inspect.stack()[0].function[5:]
Floor(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_sqrt(self):
class Sqrt(torch.nn.Module):
def forward(self, x):
return torch.sqrt(x)

model_inputs = (torch.randn(1, 3, 3).abs(),)
self.lower_and_test_with_partitioner(
torch.sqrt, model_inputs, func_name=inspect.stack()[0].function[5:]
Sqrt(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_ceil(self):
class Ceil(torch.nn.Module):
def forward(self, x):
return torch.ceil(x)

model_inputs = (torch.randn(1, 3, 3),)
self.lower_and_test_with_partitioner(
torch.ceil, model_inputs, func_name=inspect.stack()[0].function[5:]
Ceil(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_hardswish(self):
Expand Down Expand Up @@ -1884,6 +1902,7 @@ def forward(self, x):
LeakyReLUModule(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

@unittest.skip
def test_mps_channels_last_tagged_reshape_pass_output(self):
op_sequences = OpSequencesAddConv2d(2, 2)
op_sequences.eval()
Expand Down Expand Up @@ -2317,42 +2336,62 @@ def forward(self, x):
)

def test_mps_backend_bitwise_and(self):
class BitwiseAnd(torch.nn.Module):
def forward(self, x, y):
return torch.bitwise_and(x, y)

model_inputs = (
torch.tensor([-1, -2, 3], dtype=torch.int8),
torch.tensor([1, 0, 3], dtype=torch.int8),
)
self.lower_and_test_with_partitioner(
torch.bitwise_and, model_inputs, func_name=inspect.stack()[0].function[5:]
BitwiseAnd(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_bitwise_or(self):
class BitwiseOr(torch.nn.Module):
def forward(self, x, y):
return torch.bitwise_or(x, y)

model_inputs = (
torch.tensor([-1, -2, 3], dtype=torch.int8),
torch.tensor([1, 0, 3], dtype=torch.int8),
)
self.lower_and_test_with_partitioner(
torch.bitwise_or, model_inputs, func_name=inspect.stack()[0].function[5:]
BitwiseOr(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_bitwise_xor(self):
class BitwiseXor(torch.nn.Module):
def forward(self, x, y):
return torch.bitwise_xor(x, y)

model_inputs = (
torch.tensor([True, True, False]),
torch.tensor([False, True, False]),
)
self.lower_and_test_with_partitioner(
torch.bitwise_xor, model_inputs, func_name=inspect.stack()[0].function[5:]
BitwiseXor(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_bitwise_not(self):
class BitwiseNot(torch.nn.Module):
def forward(self, x):
return torch.bitwise_not(x)

model_inputs = (torch.tensor([-1, -2, 3], dtype=torch.int8),)
self.lower_and_test_with_partitioner(
torch.bitwise_not, model_inputs, func_name=inspect.stack()[0].function[5:]
BitwiseNot(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_bitwise_not_with_bool(self):
class BitwiseNot(torch.nn.Module):
def forward(self, x):
return torch.bitwise_not(x)

model_inputs = (torch.tensor([True, True, False]),)
self.lower_and_test_with_partitioner(
torch.bitwise_not, model_inputs, func_name=inspect.stack()[0].function[5:]
BitwiseNot(), model_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_bitwise_with_scalar(self):
Expand Down Expand Up @@ -2390,12 +2429,16 @@ def forward(self):
)

def test_mps_backend_where(self):
class Where(torch.nn.Module):
def forward(self, cond, x, y):
return torch.where(cond, x, y)

x = torch.randn(3, 2)
y = torch.ones(3, 2)
cond = x > 0
module_inputs = (cond, x, y)
self.lower_and_test_with_partitioner(
torch.where, module_inputs, func_name=inspect.stack()[0].function[5:]
Where(), module_inputs, func_name=inspect.stack()[0].function[5:]
)

def test_mps_backend_scalar_tensor(self):
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/tosa_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def dbg_node(node):
logger.info(" node.meta = ")
for k, v in node.meta.items():
logger.info(f" '{k}' = {v}")
if type([]) == type(v):
if isinstance(v, list):
for i in v:
logger.info(f" {i} ")

Expand Down
6 changes: 5 additions & 1 deletion backends/qualcomm/passes/convert_to_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,11 @@ def _convert(self, graph_module: torch.fx.GraphModule):
for _, src_partitions in partitions.items():
for src_partition in src_partitions:
op_cnt = Counter(
[n.target for n in src_partition.nodes if type(n.target) == edge_op]
[
n.target
for n in src_partition.nodes
if isinstance(n.target, edge_op)
]
)
if self.linear in op_cnt:
continue
Expand Down
2 changes: 1 addition & 1 deletion backends/qualcomm/passes/fold_qdq.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def _fold(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:

# collecting quant nodes to be removed
for i in range(1, len(n.args)):
if type(n.args[i]) == torch.fx.node.Node:
if isinstance(n.args[i], torch.fx.node.Node):
to_be_removed.append(n.args[i])
# could be a commonly shared attribute between q & dq
if n.args[i].target == exir_ops.edge.aten._to_copy.default:
Expand Down
4 changes: 3 additions & 1 deletion backends/qualcomm/passes/insert_io_qdq.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ def _ceate_args(self, target: torch.fx.node.Target, quant_attrs: Dict):
if name == "out_dtype":
continue
value = quant_attrs[name]
if type(arg_schema.type) == torch.tensor and type(value) in [int, float]:
if isinstance(arg_schema.type, torch.tensor) and (
isinstance(value, int) or isinstance(value, float)
):
value = torch.tensor(value)
ret.append(value)
return ret
Expand Down
2 changes: 1 addition & 1 deletion backends/qualcomm/passes/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def get_quant_attrs(
attr_n = quant_node.args[i]

value = attr_n
if type(attr_n) == torch.fx.node.Node:
if isinstance(attr_n, torch.fx.node.Node):
# could be a commonly shared attribute between q & dq
if attr_n.target == exir_ops.edge.aten._to_copy.default:
value = get_parameter(attr_n.args[0], edge_program)
Expand Down
2 changes: 1 addition & 1 deletion backends/qualcomm/quantizer/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def _get_quant_config(self, op: str | OpOverload) -> Optional[QuantizationConfig
1. is one of use_per_channel_weight_quant_ops
2. int8 / int16 config
"""
if type(op) == str:
if isinstance(op, str):
return

if op in self.use_per_channel_weight_quant_ops:
Expand Down
12 changes: 10 additions & 2 deletions backends/transforms/i64_to_i32.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,14 @@

class I64toI32(ExportPass):

def __init__(self, _skip_dim_order=False):
super(I64toI32, self).__init__()
self.copy_op = (
exir_ops.edge.aten._to_copy.default
if _skip_dim_order
else exir_ops.edge.dim_order_ops._to_dim_order_copy.default
)

def _is_i64_tensor(self, node_val):
return isinstance(node_val, FakeTensor) and node_val.dtype == torch.int64

Expand All @@ -34,7 +42,7 @@ def _apply_to_i32(self, graph: torch.fx.Graph):
args = (node,)
node_i32 = graph.create_node(
"call_function",
exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
self.copy_op,
args,
{"dtype": torch.int32},
)
Expand Down Expand Up @@ -62,7 +70,7 @@ def _apply_to_i32(self, graph: torch.fx.Graph):
args = (node.args[0][i],)
node_i64 = graph.create_node(
"call_function",
exir_ops.edge.dim_order_ops._to_dim_order_copy.default,
self.copy_op,
args,
{"dtype": torch.int64},
)
Expand Down
Loading

0 comments on commit 1d2fb27

Please sign in to comment.