diff --git a/core/shark_turbine/aot/builtins/jittable.py b/core/shark_turbine/aot/builtins/jittable.py index d2c85b73f..58c9fa790 100644 --- a/core/shark_turbine/aot/builtins/jittable.py +++ b/core/shark_turbine/aot/builtins/jittable.py @@ -7,7 +7,17 @@ """Tracing builtins.""" -from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) import torch from torch._decomp import get_decompositions @@ -206,6 +216,12 @@ def flat_wrapped_f(*args): if "functorch_functionalize" in self._passes: transformed_f = functorch_functionalize(transformed_f, *flat_pytorch_args) + for node in transformed_f.graph.nodes: + if node.op == "call_function": + if node.target == torch._ops.ops.aten.lift_fresh_copy.default: + node.target = torch._ops.ops.aten.clone.default + transformed_f.recompile() + # Ask dynamo to give us an aten graph. # TODO: Cache this for repeated calls. logger.debug("Performing dynamo.export(constraints=%r)", constraints)