88
99/* *
1010 * @file MultilinearRegression.hpp 
11-  * @brief A simple implementation of Multi Linear  Regression. 
11+  * @brief A simple implementation of Multilinear  Regression with improvements . 
1212 */  
1313
1414/* *
15-  * @class Multilinear Regression  
15+  * @class MultilinearRegression  
1616 * @brief A class that implements Multilinear Regression for predicting values 
1717 * based on multiple features. 
1818 */  
@@ -23,9 +23,10 @@ class MultilinearRegression {
2323     * 
2424     * @param learningRate The rate at which the model learns (default 0.01). 
2525     * @param iterations The number of iterations for the gradient descent (default 1000). 
26+      * @param regularizationParameter The regularization parameter lambda (default 0.0, no regularization). 
2627     */  
27-     MultilinearRegression (double  learningRate = 0.01 , int  iterations = 1000 )
28-         : learningRate (learningRate), iterations (iterations) {}
28+     MultilinearRegression (double  learningRate = 0.01 , int  iterations = 1000 ,  double  regularizationParameter =  0.0 )
29+         : learningRate_ (learningRate), iterations_ (iterations), lambda_(regularizationParameter ) {}
2930
3031    /* *
3132     * @brief Trains the Multilinear Regression model on the provided data. 
@@ -39,10 +40,23 @@ class MultilinearRegression {
3940            throw  std::invalid_argument (" Features and target data sizes do not match." 
4041        }
4142
42-         int  numFeatures  = features[ 0 ] .size ();
43-         weights. resize ( numFeatures,  0.0 );   //  Initialize weights 
43+         size_t  numSamples  = features.size ();
44+         size_t   numFeatures = features[ 0 ]. size (); 
4445
45-         for  (int  i = 0 ; i < iterations; ++i) {
46+         //  Validate that all feature vectors have the same size
47+         for  (const  auto & feature : features) {
48+             if  (feature.size () != numFeatures) {
49+                 throw  std::invalid_argument (" All feature vectors must have the same number of elements." 
50+             }
51+         }
52+ 
53+         //  Initialize weights and bias if they haven't been initialized yet
54+         if  (weights_.empty ()) {
55+             weights_.resize (numFeatures, 0.0 );
56+             bias_ = 0.0 ;
57+         }
58+ 
59+         for  (int  iter = 0 ; iter < iterations_; ++iter) {
4660            gradientDescentStep (features, target);
4761        }
4862    }
@@ -54,13 +68,38 @@ class MultilinearRegression {
5468     * @return The predicted value. 
5569     */  
5670    double  predict (const  std::vector<double >& features) const  {
57-         return  std::inner_product (weights.begin (), weights.end (), features.begin (), 0.0 );
71+         if  (features.size () != weights_.size ()) {
72+             throw  std::invalid_argument (" Feature vector size does not match the number of weights." 
73+         }
74+         double  result = std::inner_product (weights_.begin (), weights_.end (), features.begin (), 0.0 );
75+         result += bias_;
76+         return  result;
77+     }
78+ 
79+     /* *
80+      * @brief Gets the current weights of the model. 
81+      * 
82+      * @return A vector containing the weights. 
83+      */  
84+     std::vector<double > getWeights () const  {
85+         return  weights_;
86+     }
87+ 
88+     /* *
89+      * @brief Gets the current bias of the model. 
90+      * 
91+      * @return The bias term. 
92+      */  
93+     double  getBias () const  {
94+         return  bias_;
5895    }
5996
6097private: 
61-     double  learningRate;            // /< The learning rate for gradient descent.
62-     int  iterations;                 // /< The number of iterations for training.
63-     std::vector<double > weights;    // /< The weights for the model.
98+     double  learningRate_;            // /< The learning rate for gradient descent.
99+     int  iterations_;                 // /< The number of iterations for training.
100+     double  lambda_;                  // /< Regularization parameter (lambda).
101+     std::vector<double > weights_;    // /< The weights for the model.
102+     double  bias_ = 0.0 ;              // /< Bias term.
64103
65104    /* *
66105     * @brief Performs a single iteration of gradient descent to update the model weights. 
@@ -69,20 +108,29 @@ class MultilinearRegression {
69108     * @param target A vector containing the target values. 
70109     */  
71110    void  gradientDescentStep (const  std::vector<std::vector<double >>& features, const  std::vector<double >& target) {
72-         std::vector<double > gradients (weights.size (), 0.0 );
111+         size_t  numSamples = features.size ();
112+         size_t  numFeatures = weights_.size ();
113+ 
114+         std::vector<double > gradients (numFeatures, 0.0 );
115+         double  biasGradient = 0.0 ;
73116
74-         for  (size_t  i = 0 ; i < features. size () ; ++i) {
117+         for  (size_t  i = 0 ; i < numSamples ; ++i) {
75118            double  prediction = predict (features[i]);
76119            double  error = prediction - target[i];
77120
78-             for  (size_t  j = 0 ; j < weights. size () ; ++j) {
79-                 gradients[j] += error * features[i][j];
121+             for  (size_t  j = 0 ; j < numFeatures ; ++j) {
122+                 gradients[j] += ( error * features[i][j]) + (lambda_ * weights_[j]) ;
80123            }
124+ 
125+             biasGradient += error;
81126        }
82127
83-         for  (size_t  j = 0 ; j < weights.size (); ++j) {
84-             weights[j] -= (learningRate / features.size ()) * gradients[j];
128+         //  Update weights and bias
129+         for  (size_t  j = 0 ; j < numFeatures; ++j) {
130+             weights_[j] -= (learningRate_ / numSamples) * gradients[j];
85131        }
132+ 
133+         bias_ -= (learningRate_ / numSamples) * biasGradient;
86134    }
87135};
88136
0 commit comments