Skip to content

Commit

Permalink
feat: Add additional condition for loading batches
Browse files Browse the repository at this point in the history
When the first layer is Dense, the batch is only one matrix, where
each row is one example.
  • Loading branch information
IlievskiV authored and lmoneta committed Apr 8, 2018
1 parent d13e778 commit a94c617
Show file tree
Hide file tree
Showing 2 changed files with 142 additions and 60 deletions.
108 changes: 76 additions & 32 deletions tmva/tmva/src/DNN/Architectures/Cpu/CpuBuffer.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -301,15 +301,26 @@ void TTensorDataLoader<TensorInput, TCpu<Real_t>>::CopyTensorInput(TCpuBuffer<Re
{
const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = static_cast<Real_t>(inputTensor[sampleIndex](j, k));
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
size_t bufferIndex = j * fBatchHeight + i;
buffer[bufferIndex] = static_cast<Real_t>(inputTensor[0](sampleIndex, j));
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = static_cast<Real_t>(inputTensor[sampleIndex](j, k));
}
}
sampleIterator++;
}
sampleIterator++;
}
}

Expand Down Expand Up @@ -352,16 +363,26 @@ void TTensorDataLoader<TensorInput, TCpu<Double_t>>::CopyTensorInput(TCpuBuffer<
{
const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
// because of the column-major ordering
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = inputTensor[sampleIndex](j, k);
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
size_t bufferIndex = j * fBatchHeight + i;
buffer[bufferIndex] = inputTensor[0](sampleIndex, j);
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = inputTensor[sampleIndex](j, k);
}
}
sampleIterator++;
}
sampleIterator++;
}
}

Expand Down Expand Up @@ -405,17 +426,29 @@ void TTensorDataLoader<TMVAInput_t, TCpu<Double_t>>::CopyTensorInput(TCpuBuffer<
// one event, one example in the batch
Event *event = fData.front();

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
event = fData[sampleIndex];
// because of the column-major ordering
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = event->GetValue(j * fBatchHeight + k);
size_t bufferIndex = j * fBatchHeight + i;
buffer[bufferIndex] = event->GetValue(j);
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
event = fData[sampleIndex];
// because of the column-major ordering
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = event->GetValue(j * fBatchHeight + k);
}
}
sampleIterator++;
}
sampleIterator++;
}
}

Expand Down Expand Up @@ -473,20 +506,31 @@ template <>
void TTensorDataLoader<TMVAInput_t, TCpu<Real_t>>::CopyTensorInput(TCpuBuffer<Real_t> &buffer,
IndexIterator_t sampleIterator)
{

Event *event = fData.front();

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
event = fData[sampleIndex];
// because of the column-major ordering
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = static_cast<Real_t>(event->GetValue(j * fBatchHeight + k));
size_t bufferIndex = j * fBatchHeight + i;
buffer[bufferIndex] = static_cast<Real_t>(event->GetValue(j));
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
event = fData[sampleIndex];
// because of the column-major ordering
size_t bufferIndex = i * fBatchHeight * fBatchWidth + k * fBatchHeight + j;
buffer[bufferIndex] = static_cast<Real_t>(event->GetValue(j * fBatchHeight + k));
}
}
sampleIterator++;
}
sampleIterator++;
}
}

Expand Down
94 changes: 66 additions & 28 deletions tmva/tmva/src/DNN/Architectures/Reference/TensorDataLoader.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,24 @@ void TTensorDataLoader<TensorInput, TReference<Real_t>>::CopyTensorInput(std::ve
{
const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
tensor[i](j, k) = static_cast<Real_t>(inputTensor[sampleIndex](j, k));
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
tensor[0](i, j) = static_cast<Real_t>(inputTensor[0](sampleIndex, j));
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
tensor[i](j, k) = static_cast<Real_t>(inputTensor[sampleIndex](j, k));
}
}
sampleIterator++;
}

sampleIterator++;
}
}

Expand Down Expand Up @@ -92,15 +101,24 @@ void TTensorDataLoader<TensorInput, TReference<Double_t>>::CopyTensorInput(std::
{
const std::vector<TMatrixT<Double_t>> &inputTensor = std::get<0>(fData);

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
tensor[i](j, k) = inputTensor[sampleIndex](j, k);
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
tensor[0](i, j) = inputTensor[0](sampleIndex, j);
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
tensor[i](j, k) = inputTensor[sampleIndex](j, k);
}
}
sampleIterator++;
}

sampleIterator++;
}
}

Expand Down Expand Up @@ -144,16 +162,26 @@ void TTensorDataLoader<TMVAInput_t, TReference<Real_t>>::CopyTensorInput(std::ve
// one event, one example in the batch
Event *event = fData.front();

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
event = fData[sampleIndex];
tensor[i](j, k) = static_cast<Real_t>(event->GetValue(j * fBatchHeight + k));
tensor[0](i, j) = static_cast<Real_t>(event->GetValue(j));
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
event = fData[sampleIndex];
tensor[i](j, k) = static_cast<Real_t>(event->GetValue(j * fBatchHeight + k));
}
}
sampleIterator++;
}

sampleIterator++;
}
}

Expand Down Expand Up @@ -208,16 +236,26 @@ void TTensorDataLoader<TMVAInput_t, TReference<Double_t>>::CopyTensorInput(std::
// one event, one example in the batch
Event *event = fData.front();

for (size_t i = 0; i < fBatchSize; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
if (fBatchDepth == 1) {
for (size_t i = 0; i < fBatchHeight; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchWidth; j++) {
event = fData[sampleIndex];
tensor[i](j, k) = event->GetValue(j * fBatchHeight + k);
tensor[0](i, j) = event->GetValue(j);
}
sampleIterator++;
}
} else {
for (size_t i = 0; i < fBatchDepth; i++) {
size_t sampleIndex = *sampleIterator;
for (size_t j = 0; j < fBatchHeight; j++) {
for (size_t k = 0; k < fBatchWidth; k++) {
event = fData[sampleIndex];
tensor[i](j, k) = event->GetValue(j * fBatchHeight + k);
}
}
sampleIterator++;
}

sampleIterator++;
}
}

Expand Down

0 comments on commit a94c617

Please sign in to comment.