Skip to content

Commit cf15e34

Browse files
pytorchbotZonglin Peng
andauthored
jarvis-nightly-operators-test-aten-where-out (#15574)
This PR was created by the merge bot to help merge the original PR into the main branch. ghstack PR number: #15500 by @zonglinpeng ^ Please use this as the source of truth for the PR details, comments, and reviews ghstack PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/12/base ghstack PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/12/head Merge bot PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/orig Merge bot PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/12/orig Differential Revision: [D85364554](https://our.internmc.facebook.com/intern/diff/D85364554/) @diff-train-skip-merge --------- Co-authored-by: Zonglin Peng <zonglinpeng@fb.com>
1 parent de1d149 commit cf15e34

File tree

1 file changed

+42
-23
lines changed

1 file changed

+42
-23
lines changed

backends/cadence/utils/facto_util.py

Lines changed: 42 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -189,47 +189,37 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
189189
if index == 0: # condition
190190
tensor_constraints = [
191191
cp.Dtype.In(lambda deps: [torch.bool]),
192-
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
193-
cp.Value.Le(lambda deps, dtype, struct: 2**4),
192+
cp.Value.Ge(lambda deps, dtype, struct: 0),
193+
cp.Value.Le(lambda deps, dtype, struct: 1),
194194
cp.Rank.Ge(lambda deps: 1),
195195
cp.Size.Ge(lambda deps, r, d: 1),
196196
max_size_constraint,
197197
]
198198
elif index == 1: # input tensor(a)
199199
tensor_constraints = [
200-
cp.Dtype.In(
201-
lambda deps: [
202-
torch.int8,
203-
torch.int16,
204-
torch.uint8,
205-
torch.uint16,
206-
torch.int32,
207-
torch.float32,
208-
]
209-
),
200+
cp.Dtype.In(lambda deps: [torch.float32]),
210201
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
211202
cp.Value.Le(lambda deps, dtype, struct: 2**4),
212203
cp.Rank.Ge(lambda deps: 1),
213204
cp.Size.Ge(lambda deps, r, d: 1),
205+
cp.Size.In(
206+
lambda deps, r, d: fn.broadcast_with(deps[0].shape, r, d)
207+
),
214208
max_size_constraint,
215209
]
216210
else: # input tensor(b)
217211
tensor_constraints = [
218-
cp.Dtype.In(
219-
lambda deps: [
220-
torch.int8,
221-
torch.int16,
222-
torch.uint8,
223-
torch.uint16,
224-
torch.int32,
225-
torch.float32,
226-
]
227-
),
212+
cp.Dtype.In(lambda deps: [torch.float32]),
228213
cp.Dtype.Eq(lambda deps: deps[1].dtype),
229214
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
230215
cp.Value.Le(lambda deps, dtype, struct: 2**4),
231216
cp.Rank.Ge(lambda deps: 1),
232217
cp.Size.Ge(lambda deps, r, d: 1),
218+
cp.Size.In(
219+
lambda deps, r, d: fn.broadcast_with(
220+
fn.broadcasted_shape(deps[0].shape, deps[1].shape), r, d
221+
)
222+
),
233223
max_size_constraint,
234224
]
235225
case "embedding.default":
@@ -276,6 +266,9 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
276266
tensor_constraints.extend(
277267
[
278268
cp.Dtype.In(lambda deps: [torch.float32, torch.int32]),
269+
# Avoid NaN/Inf values that expose clamp NaN handling bugs
270+
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
271+
cp.Value.Le(lambda deps, dtype, struct: 2**4),
279272
]
280273
)
281274
case "rsqrt.default":
@@ -466,6 +459,7 @@ def apply_scalar_contraints(op_name: str) -> list[ScalarDtype]:
466459
| "mul.Scalar"
467460
| "div.Scalar"
468461
| "constant_pad_nd.default"
462+
| "clamp.default"
469463
):
470464
return [ScalarDtype.int]
471465
case "full.default":
@@ -493,7 +487,32 @@ def facto_testcase_gen( # noqa: C901
493487
cp.Size.Le(lambda deps, r, d: 2**2),
494488
]
495489
)
496-
if in_spec.name == "max_val": # hardtanh
490+
# Special handling for clamp.default to ensure min < max with sufficient gap (at least 2) and never None
491+
if op_name == "clamp.default":
492+
if in_spec.name == "min":
493+
# min must always be provided (not None) and bounded, leave room for max
494+
spec.inspec[index].constraints.extend(
495+
[
496+
cp.Optional.Eq(lambda deps: False), # Never None
497+
cp.Value.Ge(lambda deps, dtype: -(2**4)),
498+
cp.Value.Le(
499+
lambda deps, dtype: 2**4 - 2
500+
), # Leave room for max (at least 2 units)
501+
]
502+
)
503+
elif in_spec.name == "max":
504+
# max must always be provided (not None), be >= min + 2 (sufficient gap), and bounded
505+
spec.inspec[index].deps = [0, 1] # deps on input tensor and min
506+
spec.inspec[index].constraints.extend(
507+
[
508+
cp.Optional.Eq(lambda deps: False), # Never None
509+
cp.Value.Ge(
510+
lambda deps, dtype: deps[1] + 2
511+
), # max >= min + 2 (sufficient gap)
512+
cp.Value.Le(lambda deps, dtype: 2**4),
513+
]
514+
)
515+
elif in_spec.name == "max_val": # hardtanh
497516
spec.inspec[index].deps = [0, 1]
498517
spec.inspec[index].constraints.extend(
499518
[cp.Value.Ge(lambda deps, _: deps[1])]

0 commit comments

Comments
 (0)