Skip to content

Commit

Permalink
Merge pull request #11 from boingoing/ref2
Browse files Browse the repository at this point in the history
Replace pointer parameters with references
  • Loading branch information
boingoing authored Oct 17, 2023
2 parents d073713 + 6ba2369 commit 644c7b6
Show file tree
Hide file tree
Showing 8 changed files with 106 additions and 106 deletions.
12 changes: 6 additions & 6 deletions src/FeedForwardNeuralNetwork.cc
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ void FeedForwardNeuralNetwork::UpdateSlopes() {
}
}

void FeedForwardNeuralNetwork::TrainOffline(TrainingData* training_data,
void FeedForwardNeuralNetwork::TrainOffline(TrainingData& training_data,
size_t epoch_count) {
ResetPreviousSlopes();
ResetWeightSteps();
Expand All @@ -327,7 +327,7 @@ void FeedForwardNeuralNetwork::TrainOffline(TrainingData* training_data,
GetRandom().ShuffleVector(training_data);

// Train the network using offline weight updates - batching.
for (const auto& example : *training_data) {
for (const auto& example : training_data) {
// Run the network forward to get values in the output neurons.
RunForward(example.input);

Expand All @@ -339,11 +339,11 @@ void FeedForwardNeuralNetwork::TrainOffline(TrainingData* training_data,
}

// Update weights.
UpdateWeightsOffline(i, training_data->size());
UpdateWeightsOffline(i, training_data.size());
}
}

void FeedForwardNeuralNetwork::TrainOnline(TrainingData* training_data,
void FeedForwardNeuralNetwork::TrainOnline(TrainingData& training_data,
size_t epoch_count) {
ResetWeightSteps();

Expand All @@ -352,7 +352,7 @@ void FeedForwardNeuralNetwork::TrainOnline(TrainingData* training_data,
GetRandom().ShuffleVector(training_data);

// Train the network using online weight updates - no batching.
for (const auto& example : *training_data) {
for (const auto& example : training_data) {
// Run the network forward to get values in the output neurons.
RunForward(example.input);

Expand All @@ -365,7 +365,7 @@ void FeedForwardNeuralNetwork::TrainOnline(TrainingData* training_data,
}
}

void FeedForwardNeuralNetwork::Train(TrainingData* training_data,
void FeedForwardNeuralNetwork::Train(TrainingData& training_data,
size_t epoch_count) {
switch (training_algorithm_type_) {
case TrainingAlgorithmType::Backpropagation:
Expand Down
6 changes: 3 additions & 3 deletions src/FeedForwardNeuralNetwork.h
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ class FeedForwardNeuralNetwork : public Perceptron {
* @see TrainingAlgorithmType
* @see TrainingData
*/
void Train(TrainingData* training_data, size_t epoch_count);
void Train(TrainingData& training_data, size_t epoch_count);

protected:
void UpdateSlopes();
Expand All @@ -241,8 +241,8 @@ class FeedForwardNeuralNetwork : public Perceptron {
void ResetSlopes();
void ResetPreviousSlopes();

void TrainOffline(TrainingData* training_data, size_t epoch_count);
void TrainOnline(TrainingData* training_data, size_t epoch_count);
void TrainOffline(TrainingData& training_data, size_t epoch_count);
void TrainOnline(TrainingData& training_data, size_t epoch_count);

private:
static constexpr double DefaultLearningRate = 0.7;
Expand Down
6 changes: 3 additions & 3 deletions src/Perceptron.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,11 +295,11 @@ void Perceptron::SetWeights(const std::vector<double>& weights) {
weights_.assign(weights.cbegin(), weights.cend());
}

void Perceptron::GetOutput(std::vector<double>* output) const {
output->resize(GetOutputNeuronCount());
void Perceptron::GetOutput(std::vector<double>& output) const {
output.resize(GetOutputNeuronCount());
for (size_t i = 0; i < GetOutputNeuronCount(); i++) {
const auto& neuron = GetOutputNeuron(i);
output->at(i) = neuron.value;
output[i] = neuron.value;
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/Perceptron.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class Perceptron : public MultiLayerNeuralTopology {
* Writes all of the output neuron values into |output|.<br/>
* Existing values in |output| will be discarded.
*/
void GetOutput(std::vector<double>* output) const;
void GetOutput(std::vector<double>& output) const;

/**
* Build the neural network.<br/>
Expand Down
4 changes: 2 additions & 2 deletions src/RandomWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ class RandomWrapper {
* Shuffle the elements of |vec| into random positions.
*/
template <typename T>
void ShuffleVector(std::vector<T>* vec) {
std::shuffle(vec->begin(), vec->end(), Engine());
void ShuffleVector(std::vector<T>& vec) {
std::shuffle(vec.begin(), vec.end(), Engine());
}

protected:
Expand Down
56 changes: 28 additions & 28 deletions src/TrainingData.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,44 +14,44 @@

namespace {

void SimpleScale(std::vector<double>* vec, double old_min, double factor,
void SimpleScale(std::vector<double>& vec, double old_min, double factor,
double new_min) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val - old_min) * factor + new_min;
}
}

void SimpleDescale(std::vector<double>* vec, double old_min, double factor,
void SimpleDescale(std::vector<double>& vec, double old_min, double factor,
double new_min) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val - new_min) / factor + old_min;
}
}

void StdevScale(std::vector<double>* vec, double mean, double stdev,
void StdevScale(std::vector<double>& vec, double mean, double stdev,
double multiplier) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val - mean) / (stdev * multiplier);
}
}

void StdevDescale(std::vector<double>* vec, double mean, double stdev,
void StdevDescale(std::vector<double>& vec, double mean, double stdev,
double multiplier) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val * stdev * multiplier) + mean;
}
}

void UniformNormScale(std::vector<double>* vec, double mean,
void UniformNormScale(std::vector<double>& vec, double mean,
double uniform_norm, double multiplier) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val - mean) / (uniform_norm * multiplier);
}
}

void UniformNormDescale(std::vector<double>* vec, double mean,
void UniformNormDescale(std::vector<double>& vec, double mean,
double uniform_norm, double multiplier) {
for (auto& val : *vec) {
for (auto& val : vec) {
val = (val * uniform_norm * multiplier) + mean;
}
}
Expand Down Expand Up @@ -193,9 +193,9 @@ void TrainingData::ScaleSimple() {
CalculateMinMax();

for (auto& example : *this) {
SimpleScale(&example.input, input_old_min_, input_factor_,
SimpleScale(example.input, input_old_min_, input_factor_,
simple_scaling_new_min_);
SimpleScale(&example.output, output_old_min_, output_factor_,
SimpleScale(example.output, output_old_min_, output_factor_,
simple_scaling_new_min_);
}
}
Expand All @@ -205,9 +205,9 @@ void TrainingData::ScaleStandardDeviation() {
CalculateStdev();

for (auto& example : *this) {
StdevScale(&example.input, input_mean_, input_standard_deviation_,
StdevScale(example.input, input_mean_, input_standard_deviation_,
standard_deviation_multiplier_);
StdevScale(&example.output, output_mean_, output_standard_deviation_,
StdevScale(example.output, output_mean_, output_standard_deviation_,
standard_deviation_multiplier_);
}
}
Expand All @@ -217,36 +217,36 @@ void TrainingData::ScaleUniformNorm() {
CalculateUniformNorm();

for (auto& example : *this) {
UniformNormScale(&example.input, input_mean_, input_uniform_norm_,
UniformNormScale(example.input, input_mean_, input_uniform_norm_,
uniform_norm_multiplier_);
UniformNormScale(&example.output, output_mean_, output_uniform_norm_,
UniformNormScale(example.output, output_mean_, output_uniform_norm_,
uniform_norm_multiplier_);
}
}

void TrainingData::DescaleSimple() {
for (auto& example : *this) {
SimpleDescale(&example.input, input_old_min_, input_factor_,
SimpleDescale(example.input, input_old_min_, input_factor_,
simple_scaling_new_min_);
SimpleDescale(&example.output, output_old_min_, output_factor_,
SimpleDescale(example.output, output_old_min_, output_factor_,
simple_scaling_new_min_);
}
}

void TrainingData::DescaleStandardDeviation() {
for (auto& example : *this) {
StdevDescale(&example.input, input_mean_, input_standard_deviation_,
StdevDescale(example.input, input_mean_, input_standard_deviation_,
standard_deviation_multiplier_);
StdevDescale(&example.output, output_mean_, output_standard_deviation_,
StdevDescale(example.output, output_mean_, output_standard_deviation_,
standard_deviation_multiplier_);
}
}

void TrainingData::DescaleUniformNorm() {
for (auto& example : *this) {
UniformNormDescale(&example.input, input_mean_, input_uniform_norm_,
UniformNormDescale(example.input, input_mean_, input_uniform_norm_,
uniform_norm_multiplier_);
UniformNormDescale(&example.output, output_mean_, output_uniform_norm_,
UniformNormDescale(example.output, output_mean_, output_uniform_norm_,
uniform_norm_multiplier_);
}
}
Expand Down Expand Up @@ -281,7 +281,7 @@ void TrainingData::Descale() {
}
}

void TrainingData::ScaleInput(std::vector<double>* vec) const {
void TrainingData::ScaleInput(std::vector<double>& vec) const {
switch (scaling_algorithm_) {
case ScalingAlgorithm::None:
return;
Expand All @@ -299,7 +299,7 @@ void TrainingData::ScaleInput(std::vector<double>* vec) const {
}
}

void TrainingData::ScaleOutput(std::vector<double>* vec) const {
void TrainingData::ScaleOutput(std::vector<double>& vec) const {
switch (scaling_algorithm_) {
case ScalingAlgorithm::None:
return;
Expand All @@ -317,7 +317,7 @@ void TrainingData::ScaleOutput(std::vector<double>* vec) const {
}
}

void TrainingData::DescaleInput(std::vector<double>* vec) const {
void TrainingData::DescaleInput(std::vector<double>& vec) const {
switch (scaling_algorithm_) {
case ScalingAlgorithm::None:
return;
Expand All @@ -335,7 +335,7 @@ void TrainingData::DescaleInput(std::vector<double>* vec) const {
}
}

void TrainingData::DescaleOutput(std::vector<double>* vec) const {
void TrainingData::DescaleOutput(std::vector<double>& vec) const {
switch (scaling_algorithm_) {
case ScalingAlgorithm::None:
return;
Expand Down
8 changes: 4 additions & 4 deletions src/TrainingData.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,28 +135,28 @@ class TrainingData : public std::vector<Example> {
* Uses the scaling parameters calculated during a previous call to Scale.
* @see Scale
*/
void ScaleInput(std::vector<double>* vec) const;
void ScaleInput(std::vector<double>& vec) const;

/**
* Scale one vector of output.<br/>
* Uses the scaling parameters calculated during a previous call to Scale.
* @see Scale
*/
void ScaleOutput(std::vector<double>* vec) const;
void ScaleOutput(std::vector<double>& vec) const;

/**
* Descale one vector of input.<br/>
* Uses the scaling parameters calculated during a previous call to Scale.
* @see Scale
*/
void DescaleInput(std::vector<double>* vec) const;
void DescaleInput(std::vector<double>& vec) const;

/**
* Descale one vector of output.<br/>
* Uses the scaling parameters calculated during a previous call to Scale.
* @see Scale
*/
void DescaleOutput(std::vector<double>* vec) const;
void DescaleOutput(std::vector<double>& vec) const;

/**
* Convert sequential data into examples.<br/>
Expand Down
Loading

0 comments on commit 644c7b6

Please sign in to comment.