Skip to content

Commit

Permalink
Add gradient descent step.
Browse files Browse the repository at this point in the history
trekhleb committed Dec 6, 2018
1 parent ec55e79 commit 00690fa
Showing 1 changed file with 35 additions and 40 deletions.
75 changes: 35 additions & 40 deletions homemade/logistic_regression/logistic_regression.py
Original file line number Diff line number Diff line change
@@ -32,74 +32,69 @@ def train(self, lambda_param=0):
lambda_param
)

# def rosen(x):
# """The Rosenbrock function"""
# return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)
#
# def rosen_der(x):
# xm = x[1:-1]
# xm_m1 = x[:-2]
# xm_p1 = x[2:]
# der = np.zeros_like(x)
# der[1:-1] = 200 * (xm - xm_m1 ** 2) - 400 * (xm_p1 - xm ** 2) * xm - 2 * (1 - xm)
# der[0] = -400 * x[0] * (x[1] - x[0] ** 2) - 2 * (1 - x[0])
# der[-1] = 200 * (x[-1] - x[-2] ** 2)
# return der
#
# x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
#
# res = minimize(
# rosen,
# x0,
# method='CG',
# jac=rosen_der,
# options={
# 'maxiter': 500
# }
# )
#
# print(res.x)
# print(res.fun)
# print(res.jac)
# print(res.success)

# cost_history = self.gradient_descent(lambda_param)

# return self.theta, cost_history
pass
return optimized_theta, cost_history

@staticmethod
def gradient_descent(data, labels, initial_theta, lambda_param, max_iteration=500):
"""GRADIENT DESCENT function.
Iteratively optimizes theta model parameters.
:param data: the set of training or test data.
:param labels: training set outputs (0 or 1 that defines the class of an example).
:param initial_theta: initial model parameters.
:param lambda_param: regularization parameter.
:param max_iteration: maximum number of gradient descent steps.
"""

# Initialize J_history with zeros.
cost_history = []

# Calculate the number of features.
num_features = data.shape[1]

# Launch gradient descent.
minification_result = minimize(
# Function that we're going to minimize.
lambda current_theta: LogisticRegression.cost_function(
data, labels, current_theta.reshape((num_features, 1)), lambda_param
),
# Initial values of model parameter.
initial_theta,
# We will use conjugate gradient algorithm.
method='CG',
# jac=lambda current_theta: LogisticRegression.gradient_step(
# data, labels, current_theta.reshape((num_features, 1)), lambda_param
# ),
# Function that will help to calculate gradient direction on each step.
jac=lambda current_theta: LogisticRegression.gradient_step(
data, labels, current_theta.reshape((num_features, 1)), lambda_param
),
# Record gradient descent progress for debugging.
callback=lambda current_theta: cost_history.append(LogisticRegression.cost_function(
data, labels, current_theta.reshape((num_features, 1)), lambda_param
)),
options={'maxiter': max_iteration}
)

# Throw an error in case if gradient descent ended up with error.
if not minification_result.success:
raise ArithmeticError('Can not minimize cost function')

# Reshape the final version of model parameters.
optimized_theta = minification_result.jac.reshape((num_features, 1))

return optimized_theta, cost_history

@staticmethod
def gradient_step(data, labels, theta, lambda_param):
"""GRADIENT STEP function.
It performs one step of gradient descent for theta parameters.
:param data: the set of training or test data.
:param labels: training set outputs (0 or 1 that defines the class of an example).
:param theta: model parameters.
:param lambda_param: regularization parameter.
"""

# Initialize number of training examples.
num_examples = labels.shape[0]

@@ -117,7 +112,7 @@ def gradient_step(data, labels, theta, lambda_param):
# We should NOT regularize the parameter theta_zero.
regularized_gradients[0] = (1 / num_examples) * (data[:, [0]].T @ label_diff)

return regularized_gradients
return regularized_gradients.T.flatten()

@staticmethod
def cost_function(data, labels, theta, lambda_param):
@@ -128,7 +123,7 @@ def cost_function(data, labels, theta, lambda_param):
:param data: the set of training or test data.
:param labels: training set outputs (0 or 1 that defines the class of an example).
:param theta: model parameters.
:param lambda_param: regularization parameter
:param lambda_param: regularization parameter.
"""

# Calculate the number of training examples and features.

0 comments on commit 00690fa

Please sign in to comment.