@@ -68,6 +68,15 @@ TEST(BroadcastIndexesRangeTest, ScalarBroadcastToOneD) {
68
68
EXPECT_EQ (expected, actual);
69
69
}
70
70
71
+ template <typename Range>
72
+ void test_operator_plus (const Range& range) {
73
+ size_t idx = 0 ;
74
+ for (const auto indexes : range) {
75
+ EXPECT_EQ (*(range.begin () + idx), indexes);
76
+ idx++;
77
+ }
78
+ }
79
+
71
80
// [1] -> [H, W]
72
81
// [W] -> [H, W]
73
82
// [1, 1] -> [H, W]
@@ -87,14 +96,15 @@ TEST(BroadcastIndexesRangeTest, OneAndTwoDExhaustive) {
87
96
88
97
Tensor in_not_broadcast = tf.zeros ({3 , 4 });
89
98
90
- auto actual = range_to_vec ( BroadcastIndexesRange<6 >(
99
+ const auto range = BroadcastIndexesRange<6 >(
91
100
out,
92
101
in_0d_scalar,
93
102
in_1d_scalar,
94
103
in_2d_scalar,
95
104
in_row,
96
105
in_col,
97
- in_not_broadcast));
106
+ in_not_broadcast);
107
+ auto actual = range_to_vec (range);
98
108
decltype (actual) expected = {
99
109
{0 , 0 , 0 , 0 , 0 , 0 , 0 },
100
110
{1 , 0 , 0 , 0 , 1 , 0 , 1 },
@@ -110,6 +120,8 @@ TEST(BroadcastIndexesRangeTest, OneAndTwoDExhaustive) {
110
120
{11 , 0 , 0 , 0 , 3 , 2 , 11 },
111
121
};
112
122
EXPECT_EQ (expected, actual);
123
+
124
+ test_operator_plus (range);
113
125
}
114
126
115
127
// Make sure nothing is thrown off by a size-1 dim in the output:
@@ -138,20 +150,20 @@ TEST(BroadcastIndexesRangeTest, OneAndTwoDWith1InOutputShapeExhaustive) {
138
150
Tensor in_col = tf.zeros ({H, 1 });
139
151
140
152
size_t idx = 0 ;
153
+ const auto range_row = BroadcastIndexesRange<5 >(
154
+ out_row,
155
+ in_0d_scalar,
156
+ in_1d_scalar,
157
+ in_2d_scalar,
158
+ in_row,
159
+ in_leading_one_row);
141
160
for (const auto
142
161
[out_idx,
143
162
in_0d_idx,
144
163
in_1d_idx,
145
164
in_2d_idx,
146
165
in_row_idx,
147
- in_leading_one_row_idx] :
148
- BroadcastIndexesRange<5 >(
149
- out_row,
150
- in_0d_scalar,
151
- in_1d_scalar,
152
- in_2d_scalar,
153
- in_row,
154
- in_leading_one_row)) {
166
+ in_leading_one_row_idx] : range_row) {
155
167
EXPECT_EQ (out_idx, idx++);
156
168
EXPECT_EQ (in_0d_idx, 0 );
157
169
EXPECT_EQ (in_1d_idx, 0 );
@@ -160,16 +172,21 @@ TEST(BroadcastIndexesRangeTest, OneAndTwoDWith1InOutputShapeExhaustive) {
160
172
EXPECT_EQ (in_leading_one_row_idx, out_idx);
161
173
}
162
174
175
+ test_operator_plus (range_row);
176
+
163
177
idx = 0 ;
178
+ const auto range_col = BroadcastIndexesRange<4 >(
179
+ out_col, in_0d_scalar, in_1d_scalar, in_2d_scalar, in_col);
164
180
for (const auto [out_idx, in_0d_idx, in_1d_idx, in_2d_idx, in_col_idx] :
165
- BroadcastIndexesRange<4 >(
166
- out_col, in_0d_scalar, in_1d_scalar, in_2d_scalar, in_col)) {
181
+ range_col) {
167
182
EXPECT_EQ (out_idx, idx++);
168
183
EXPECT_EQ (in_0d_idx, 0 );
169
184
EXPECT_EQ (in_1d_idx, 0 );
170
185
EXPECT_EQ (in_2d_idx, 0 );
171
186
EXPECT_EQ (in_col_idx, out_idx);
172
187
}
188
+
189
+ test_operator_plus (range_col);
173
190
}
174
191
175
192
// [1, 1, 1] -> [C, H, W]
@@ -197,16 +214,17 @@ TEST(BroadcastIndexesRangeTest, ThreeDBroadcasting) {
197
214
// take the opportunity to mutation test against delinearize_index
198
215
// and linearize_access_indexes.
199
216
int idx = 0 ;
200
- for (const auto indexes : BroadcastIndexesRange<8 >(
201
- out,
202
- input_tensors[0 ],
203
- input_tensors[1 ],
204
- input_tensors[2 ],
205
- input_tensors[3 ],
206
- input_tensors[4 ],
207
- input_tensors[5 ],
208
- input_tensors[6 ],
209
- input_tensors[7 ])) {
217
+ const auto range = BroadcastIndexesRange<8 >(
218
+ out,
219
+ input_tensors[0 ],
220
+ input_tensors[1 ],
221
+ input_tensors[2 ],
222
+ input_tensors[3 ],
223
+ input_tensors[4 ],
224
+ input_tensors[5 ],
225
+ input_tensors[6 ],
226
+ input_tensors[7 ]);
227
+ for (const auto indexes : range) {
210
228
const auto out_idx = indexes[0 ];
211
229
EXPECT_EQ (out_idx, idx++);
212
230
size_t out_indexes[executorch::runtime::kTensorDimensionLimit ];
@@ -219,6 +237,7 @@ TEST(BroadcastIndexesRangeTest, ThreeDBroadcasting) {
219
237
out_indexes, out.dim (), input_tensors[tensor_idx]));
220
238
}
221
239
}
240
+ test_operator_plus (range);
222
241
}
223
242
224
243
// 4-D should generalize, but we will go ahead and test:
@@ -235,8 +254,9 @@ void four_d_broadcasting_test() {
235
254
// take the opportunity to mutation test against delinearize_index
236
255
// and linearize_access_indexes.
237
256
int idx = 0 ;
238
- for (const auto [out_idx, in_cw_idx, in_nh_idx] :
239
- BroadcastIndexesRange<2 >(out, in_broadcast_cw, in_broadcast_nh)) {
257
+ const auto range =
258
+ BroadcastIndexesRange<2 >(out, in_broadcast_cw, in_broadcast_nh);
259
+ for (const auto [out_idx, in_cw_idx, in_nh_idx] : range) {
240
260
EXPECT_EQ (out_idx, idx++);
241
261
size_t out_indexes[executorch::runtime::kTensorDimensionLimit ];
242
262
delinearize_index (
@@ -248,6 +268,8 @@ void four_d_broadcasting_test() {
248
268
in_nh_idx,
249
269
linearize_access_indexes (out_indexes, out.dim (), in_broadcast_nh));
250
270
}
271
+
272
+ test_operator_plus (range);
251
273
}
252
274
253
275
TEST (BroadcastIndexesRangeTest, FourDBroadcasting) {
0 commit comments