@@ -742,36 +742,36 @@ inline Tensor tile(const Tensor& x,
742
742
Array<Expr> reps_shape;
743
743
Array<Expr> new_shape;
744
744
if (ndim == rdim) {
745
- for (size_t i = 0 ; i < ndim; ++i) {
745
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i) {
746
746
data_shape.push_back (x->shape [i]);
747
747
reps_shape.push_back (reps[i]);
748
748
}
749
749
} else if (ndim > rdim) {
750
- for (size_t i = 0 ; i < ndim; ++i)
750
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
751
751
data_shape.push_back (x->shape [i]);
752
- for (size_t i = 0 ; i < ndim - rdim; ++i)
752
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim - rdim) ; ++i)
753
753
reps_shape.push_back (1 );
754
- for (size_t i = 0 ; i < rdim; ++i)
754
+ for (size_t i = 0 ; i < static_cast < size_t >( rdim) ; ++i)
755
755
reps_shape.push_back (reps[i]);
756
756
} else {
757
- for (size_t i = 0 ; i < rdim - ndim; ++i)
757
+ for (size_t i = 0 ; i < static_cast < size_t >( rdim - ndim) ; ++i)
758
758
data_shape.push_back (1 );
759
- for (size_t i = 0 ; i < ndim; ++i)
759
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
760
760
data_shape.push_back (x->shape [i]);
761
- for (size_t i = 0 ; i < rdim; ++i)
761
+ for (size_t i = 0 ; i < static_cast < size_t >( rdim) ; ++i)
762
762
reps_shape.push_back (reps[i]);
763
763
}
764
- for (size_t i = 0 ; i < tdim; ++i)
764
+ for (size_t i = 0 ; i < static_cast < size_t >( tdim) ; ++i)
765
765
new_shape.push_back (data_shape[i] * reps_shape[i]);
766
766
767
767
return compute (
768
768
new_shape, [&](const Array<Var>& indices) {
769
769
Array<Expr> idx;
770
770
if (ndim >= rdim) {
771
- for (size_t i = 0 ; i < ndim; ++i)
771
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
772
772
idx.push_back (indices[i] % x->shape [i]);
773
773
} else {
774
- for (size_t i = 0 ; i < ndim; ++i)
774
+ for (size_t i = 0 ; i < static_cast < size_t >( ndim) ; ++i)
775
775
idx.push_back (indices[rdim - ndim + i] % x->shape [i]);
776
776
}
777
777
return x (idx);
0 commit comments