Skip to content

Commit ef48cc2

Browse files
authored
Revert "Milestone2.1: Partition to_dim_order_copy op in XNN delegate (#12220)" (#12542)
This reverts commit dd6caa3. The imported diff is breaking an internal test: [D78368033](https://www.internalfb.com/diff/D78368033). Please see the diff for more details.
1 parent 07c8f0f commit ef48cc2

File tree

4 files changed

+1
-164
lines changed

4 files changed

+1
-164
lines changed

backends/xnnpack/partition/config/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@
5050
SquareRootConfig,
5151
SubConfig,
5252
TanhConfig,
53-
ToDimOrderCopyConfig,
5453
UpsampleBilinear2dConfig,
5554
)
5655
from executorch.backends.xnnpack.partition.config.node_configs import (
@@ -103,7 +102,6 @@
103102
ReciprocalSquareRootConfig,
104103
ReLUConfig,
105104
TanhConfig,
106-
ToDimOrderCopyConfig,
107105
SigmoidConfig,
108106
SliceCopyConfig,
109107
SoftmaxConfig,

backends/xnnpack/partition/config/generic_node_configs.py

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -425,35 +425,6 @@ def supported_precision_types(self) -> List[ConfigPrecisionType]:
425425
return [ConfigPrecisionType.FP32]
426426

427427

428-
class ToDimOrderCopyConfig(GenericNodePartitionerConfig):
429-
target_name = "_to_dim_order_copy.default"
430-
431-
def check_constraints(self, node: torch.fx.Node, ep: ExportedProgram) -> bool:
432-
"""
433-
Only support dim order conversion partitioning, not DType conversions
434-
"""
435-
if not self.check_common_constraints(node, ep):
436-
return False
437-
438-
# Get input node and compare dtypes
439-
input_node = get_input_node(node, 0)
440-
input_dtype = input_node.meta["val"].dtype
441-
output_dtype = node.meta["val"].dtype
442-
443-
# Return False if doing dtype conversion
444-
if input_dtype != output_dtype:
445-
why(
446-
node,
447-
reason=f"dtype conversion from {input_dtype} to {output_dtype} is not supported",
448-
)
449-
return False
450-
451-
return True
452-
453-
def supported_precision_types(self) -> List[ConfigPrecisionType]:
454-
return [ConfigPrecisionType.FP32, ConfigPrecisionType.STATIC_QUANT]
455-
456-
457428
class MeanDimConfig(GenericNodePartitionerConfig):
458429
target_name = "mean.dim"
459430

backends/xnnpack/test/ops/test_to_copy.py

Lines changed: 0 additions & 113 deletions
This file was deleted.

backends/xnnpack/test/passes/test_channels_last_tagged_reshape.py

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,7 @@ def run_tester(self, module, inputs):
5454
module.eval(),
5555
inputs,
5656
)
57-
tester.export().to_edge_transform_and_lower().check_not(
58-
["executorch_exir_dialects_edge__ops_aten__to_copy_default"]
59-
).to_executorch().serialize().run_method_and_compare_outputs()
57+
tester.export().to_edge_transform_and_lower().to_executorch().serialize().run_method_and_compare_outputs()
6058

6159
class LinearConv(torch.nn.Module):
6260
def __init__(self):
@@ -181,23 +179,6 @@ def test_fp32_channels_last_tagged_reshape_pass(self):
181179
.run_method_and_compare_outputs()
182180
)
183181

184-
class LinearConvDimSwap(torch.nn.Module):
185-
def __init__(self):
186-
super().__init__()
187-
self.conv1 = torch.nn.Conv2d(3, 3, 3)
188-
self.linear1 = torch.nn.Linear(4, 3)
189-
190-
def forward(self, x):
191-
y = self.linear1(x)
192-
y = y.to(memory_format=torch.channels_last)
193-
y = y.to(memory_format=torch.contiguous_format)
194-
return self.conv1(y)
195-
196-
LinearConvDimSwapModule = LinearConvDimSwap()
197-
198-
def test_conv_linear_dim_order_swap_partitioner(self):
199-
self.run_tester(self.LinearConvDimSwapModule, (torch.randn(1, 3, 6, 4),))
200-
201182
def test_qs8_channels_last_tagged_reshape_pass(self):
202183
for module, num_reshape in self.modules.items():
203184
(

0 commit comments

Comments
 (0)