Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 65 additions & 17 deletions ml_library_include/ml/regression/MultiLinearRegression.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@

/**
* @file MultilinearRegression.hpp
* @brief A simple implementation of Multi Linear Regression.
* @brief A simple implementation of Multilinear Regression with improvements.
*/

/**
* @class Multilinear Regression
* @class MultilinearRegression
* @brief A class that implements Multilinear Regression for predicting values
* based on multiple features.
*/
Expand All @@ -23,9 +23,10 @@ class MultilinearRegression {
*
* @param learningRate The rate at which the model learns (default 0.01).
* @param iterations The number of iterations for the gradient descent (default 1000).
* @param regularizationParameter The regularization parameter lambda (default 0.0, no regularization).
*/
MultilinearRegression(double learningRate = 0.01, int iterations = 1000)
: learningRate(learningRate), iterations(iterations) {}
MultilinearRegression(double learningRate = 0.01, int iterations = 1000, double regularizationParameter = 0.0)
: learningRate_(learningRate), iterations_(iterations), lambda_(regularizationParameter) {}

/**
* @brief Trains the Multilinear Regression model on the provided data.
Expand All @@ -39,10 +40,23 @@ class MultilinearRegression {
throw std::invalid_argument("Features and target data sizes do not match.");
}

int numFeatures = features[0].size();
weights.resize(numFeatures, 0.0); // Initialize weights
size_t numSamples = features.size();
size_t numFeatures = features[0].size();

for (int i = 0; i < iterations; ++i) {
// Validate that all feature vectors have the same size
for (const auto& feature : features) {
if (feature.size() != numFeatures) {
throw std::invalid_argument("All feature vectors must have the same number of elements.");
}
}

// Initialize weights and bias if they haven't been initialized yet
if (weights_.empty()) {
weights_.resize(numFeatures, 0.0);
bias_ = 0.0;
}

for (int iter = 0; iter < iterations_; ++iter) {
gradientDescentStep(features, target);
}
}
Expand All @@ -54,13 +68,38 @@ class MultilinearRegression {
* @return The predicted value.
*/
double predict(const std::vector<double>& features) const {
return std::inner_product(weights.begin(), weights.end(), features.begin(), 0.0);
if (features.size() != weights_.size()) {
throw std::invalid_argument("Feature vector size does not match the number of weights.");
}
double result = std::inner_product(weights_.begin(), weights_.end(), features.begin(), 0.0);
result += bias_;
return result;
}

/**
* @brief Gets the current weights of the model.
*
* @return A vector containing the weights.
*/
std::vector<double> getWeights() const {
return weights_;
}

/**
* @brief Gets the current bias of the model.
*
* @return The bias term.
*/
double getBias() const {
return bias_;
}

private:
double learningRate; ///< The learning rate for gradient descent.
int iterations; ///< The number of iterations for training.
std::vector<double> weights; ///< The weights for the model.
double learningRate_; ///< The learning rate for gradient descent.
int iterations_; ///< The number of iterations for training.
double lambda_; ///< Regularization parameter (lambda).
std::vector<double> weights_; ///< The weights for the model.
double bias_ = 0.0; ///< Bias term.

/**
* @brief Performs a single iteration of gradient descent to update the model weights.
Expand All @@ -69,20 +108,29 @@ class MultilinearRegression {
* @param target A vector containing the target values.
*/
void gradientDescentStep(const std::vector<std::vector<double>>& features, const std::vector<double>& target) {
std::vector<double> gradients(weights.size(), 0.0);
size_t numSamples = features.size();
size_t numFeatures = weights_.size();

std::vector<double> gradients(numFeatures, 0.0);
double biasGradient = 0.0;

for (size_t i = 0; i < features.size(); ++i) {
for (size_t i = 0; i < numSamples; ++i) {
double prediction = predict(features[i]);
double error = prediction - target[i];

for (size_t j = 0; j < weights.size(); ++j) {
gradients[j] += error * features[i][j];
for (size_t j = 0; j < numFeatures; ++j) {
gradients[j] += (error * features[i][j]) + (lambda_ * weights_[j]);
}

biasGradient += error;
}

for (size_t j = 0; j < weights.size(); ++j) {
weights[j] -= (learningRate / features.size()) * gradients[j];
// Update weights and bias
for (size_t j = 0; j < numFeatures; ++j) {
weights_[j] -= (learningRate_ / numSamples) * gradients[j];
}

bias_ -= (learningRate_ / numSamples) * biasGradient;
}
};

Expand Down
Loading