Skip to content

Commit 9f681e4

Browse files
authored
ops/declarable compiler warnings (deeplearning4j#10183)
1 parent 4d53d52 commit 9f681e4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

79 files changed

+231
-237
lines changed

libnd4j/include/memory/impl/MemoryTracker.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ void MemoryTracker::countIn(MemoryType type, Pointer ptr, LongType numBytes) {
106106

107107
std::string stack("");
108108
messages = backtrace_symbols(array, size);
109-
for (int i = 1; i < size && messages != NULL; ++i) {
109+
for (size_t i = 1; i < size && messages != NULL; ++i) {
110110
stack += demangle(messages[i]) + "\n";
111111
}
112112

libnd4j/include/ops/declarable/generic/bitwise/toggle_bits.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
namespace sd {
3131
namespace ops {
3232
OP_IMPL(toggle_bits, -1, -1, true) {
33-
for (int i = 0; i < block.width(); i++) {
33+
for (size_t i = 0; i < block.width(); i++) {
3434
auto x = INPUT_VARIABLE(i);
3535
auto z = OUTPUT_VARIABLE(i);
3636

libnd4j/include/ops/declarable/generic/blas/batched_gemm.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ CUSTOM_OP_IMPL(batched_gemm, -1, -1, false, 0, 9) {
139139
}
140140
}
141141

142-
REQUIRE_TRUE(vA.size() == vB.size() && vA.size() == vC.size() && vA.size() == batchSize, 0,
142+
REQUIRE_TRUE(vA.size() == vB.size() && vA.size() == vC.size() && vA.size() == static_cast<size_t>(batchSize), 0,
143143
"BatchedGemm: mismatched numbers of A, B, C for unknown reason");
144144

145145
helpers::bgemm(vA,
@@ -175,7 +175,7 @@ DECLARE_SHAPE_FN(batched_gemm) {
175175
auto firstInput = inputShape->at(2);
176176
auto secondInput = inputShape->at(batchSize + 2);
177177
auto firstType = ArrayOptions::dataType(inputShape->at(0));
178-
for (int e = 1; e < block.width(); e++) {
178+
for (size_t e = 1; e < block.width(); e++) {
179179
REQUIRE_TRUE(firstType == ArrayOptions::dataType(inputShape->at(1)), 0,
180180
"BatchedGemm: all inputs must have same data type");
181181
}

libnd4j/include/ops/declarable/generic/blas/tensormmul.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ struct IndexComparator
110110
std::vector<LongType> argsort(const std::vector<LongType>& array)
111111
{
112112
std::vector<LongType> indices(array.size());
113-
for (LongType i = 0; i < array.size(); ++i) indices[i] = i;
113+
for (size_t i = 0; i < array.size(); ++i) indices[i] = i;
114114

115115
std::sort(indices.begin(), indices.end(), IndexComparator(array));
116116

@@ -155,7 +155,7 @@ CUSTOM_OP_IMPL(tensormmul_bp, 4, 2, false, 0, -1) {
155155
for (LongType i = 0; i < Arank; ++i)
156156
axes_a_grad.push_back(i);
157157

158-
for (LongType i = 0; i < axes0Sum.size(); ++i)
158+
for (size_t i = 0; i < axes0Sum.size(); ++i)
159159
axes_a_grad.erase(std::remove(axes_a_grad.begin(), axes_a_grad.end(), axes0Sum[i]), axes_a_grad.end());
160160

161161

@@ -165,7 +165,7 @@ CUSTOM_OP_IMPL(tensormmul_bp, 4, 2, false, 0, -1) {
165165
for (LongType i = 0; i < Brank; ++i)
166166
axes_b_grad.push_back(i);
167167

168-
for (LongType i = 0; i < axes1Sum.size(); ++i)
168+
for (size_t i = 0; i < axes1Sum.size(); ++i)
169169
axes_b_grad.erase(std::remove(axes_b_grad.begin(), axes_b_grad.end(), axes1Sum[i]), axes_b_grad.end());
170170

171171
//used for post result permute to reshape result to be expected output
@@ -185,12 +185,12 @@ CUSTOM_OP_IMPL(tensormmul_bp, 4, 2, false, 0, -1) {
185185
}
186186

187187
std::vector<LongType> axes_b_gradA;
188-
for (LongType i = 0; i < axes_b_grad.size(); i++) {
188+
for (size_t i = 0; i < axes_b_grad.size(); i++) {
189189
axes_b_gradA.push_back(i);
190190
}
191191

192192
std::vector<LongType> axes_a_gradB;
193-
for (LongType i = 0; i < axes_a_grad.size(); i++) {
193+
for (size_t i = 0; i < axes_a_grad.size(); i++) {
194194
axes_a_gradB.push_back(i);
195195
}
196196

libnd4j/include/ops/declarable/generic/boolean/choose.cpp

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -59,26 +59,16 @@ DECLARE_TYPES(choose) {
5959
}
6060

6161
DECLARE_SHAPE_FN(choose) {
62-
LongType const* shape;
63-
int rank;
6462
int mode = INT_ARG(0);
6563
auto numResults = NDArrayFactory::create<LongType>(0L);
6664
if (block.width() > 1) {
6765
auto first = INPUT_VARIABLE(0);
6866
auto second = INPUT_VARIABLE(1);
69-
if (first->lengthOf() > second->lengthOf()) {
70-
shape = first->shapeInfo();
71-
rank = first->rankOf();
72-
} else {
73-
shape = second->shapeInfo();
74-
rank = second->rankOf();
75-
}
67+
7668

7769
helpers::chooseFunctorArray(block.launchContext(), first, second, mode, nullptr, &numResults);
7870
} else {
7971
auto first = INPUT_VARIABLE(0);
80-
shape = first->shapeInfo();
81-
rank = first->rankOf();
8272
double scalar = T_ARG(0);
8373

8474
helpers::chooseFunctorScalar(block.launchContext(), first, scalar, mode, nullptr, &numResults);

libnd4j/include/ops/declarable/generic/boolean/where.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ DECLARE_SHAPE_FN(Where) {
108108
for (LongType i = 0; i < condition->lengthOf(); i++)
109109
if (condition->e<bool>(i)) numOfTrue++;
110110

111-
LongType const* theNewShape;
111+
LongType * theNewShape;
112112
if (numOfTrue > 0) {
113113
LongType* newShape;
114114
ALLOCATE(newShape, block.getWorkspace(), shape::shapeInfoLength(2), sd::LongType);

libnd4j/include/ops/declarable/generic/broadcastable/multiply.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ BROADCASTABLE_OP_IMPL(multiply, 0, 0) {
3636

3737
BROADCAST_CHECK_EMPTY(x, y, z);
3838

39-
const LongType* zShapeInfo = nullptr;
39+
LongType* zShapeInfo = nullptr;
4040
const bool areShapesBroadcastable =
4141
ShapeUtils::evalBroadcastShapeInfo(x->shapeInfo(), y->shapeInfo(), true, zShapeInfo, block.getWorkspace());
4242
REQUIRE_TRUE(areShapesBroadcastable, 0, "MULTIPLY OP: the shapes of x %s and y %s are not suitable for broadcast !",
@@ -72,7 +72,7 @@ CUSTOM_OP_IMPL(multiply_bp, 3, 2, false, 0, 0) {
7272
auto dLdx = OUTPUT_VARIABLE(0);
7373
auto dLdy = OUTPUT_VARIABLE(1);
7474

75-
const LongType* dLdzShapeInfo = nullptr;
75+
LongType* dLdzShapeInfo = nullptr;
7676
const bool areShapesBroadcastable =
7777
ShapeUtils::evalBroadcastShapeInfo(x->shapeInfo(), y->shapeInfo(), true, dLdzShapeInfo, block.getWorkspace());
7878
REQUIRE_TRUE(areShapesBroadcastable, 0,

libnd4j/include/ops/declarable/generic/broadcastable/pow.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ CUSTOM_OP_IMPL(Pow_bp, 3, 2, false, 0, 0) {
6363
auto dLdx = OUTPUT_VARIABLE(0);
6464
auto dLdy = OUTPUT_VARIABLE(1);
6565

66-
const LongType* dLdzShapeInfo = nullptr;
66+
LongType* dLdzShapeInfo = nullptr;
6767
const bool areShapesBroadcastable =
6868
ShapeUtils::evalBroadcastShapeInfo(x->shapeInfo(), y->shapeInfo(), true, dLdzShapeInfo, block.getWorkspace());
6969
REQUIRE_TRUE(areShapesBroadcastable, 0,

libnd4j/include/ops/declarable/generic/datatypes/bitcast.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ CUSTOM_OP_IMPL(bitcast, 1, 1, false, 0, 1) {
3939
auto outputSize = DataTypeUtils::sizeOf(newType);
4040
auto lastSize = outputSize / inputSize;
4141
if (inputSize < outputSize) {
42-
REQUIRE_TRUE(input->sizeAt(-1) == lastSize, 0,
42+
REQUIRE_TRUE(static_cast<size_t>(input->sizeAt(-1)) == lastSize, 0,
4343
"BITCAST: %llu > %llu. So last dimension should be %i, but %i given.", inputSize, outputSize, lastSize,
4444
input->sizeAt(-1));
4545
}
@@ -84,12 +84,13 @@ DECLARE_SHAPE_FN(bitcast) {
8484
auto outputShape = ConstantShapeHelper::getInstance().bufferForShapeInfo(newType, shape::order(inShape), shapeOf)->primary();
8585
return SHAPELIST(outputShape);
8686
}
87-
REQUIRE_TRUE(shape::sizeAt(inShape, static_cast<sd::LongType>(-1)) == outputSize / inputSize, 0,
87+
REQUIRE_TRUE(shape::sizeAt(inShape, static_cast<size_t>(static_cast<sd::LongType>(-1))) ==
88+
static_cast<sd::LongType>(outputSize / inputSize), 0,
8889
"BITCAST: %llu > %llu. So last dimension should be %i, but %i given.", inputSize, outputSize,
8990
outputSize / inputSize, shape::sizeAt(inShape, static_cast<sd::LongType>(-1)));
9091
std::vector<sd::LongType> shapeOf(inputRank - 1);
9192

92-
for (auto i = 0; i < shapeOf.size(); ++i) {
93+
for (size_t i = 0; i < shapeOf.size(); ++i) {
9394
shapeOf[i] = inShape[i + 1];
9495
}
9596

libnd4j/include/ops/declarable/generic/images/adjust_contrast.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ CONFIGURABLE_OP_IMPL(adjust_contrast, 1, 1, true, 0, 0) {
5353

5454
// fill up axes vector first
5555
std::vector<LongType> axes(input->rankOf() - 1);
56-
for (auto i = 0; i < axes.size(); ++i) axes[i] = i;
56+
for (size_t i = 0; i < axes.size(); ++i) axes[i] = i;
5757
// mean as reduction for last dimension set
5858
auto mean = input->reduceAlongDimension(reduce::Mean, &axes);
5959
auto part1 = (*input - mean);

0 commit comments

Comments
 (0)