Skip to content

Develop optimization #83

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 15 additions & 14 deletions bayesml/autoregressive/_autoregressive.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,31 +475,32 @@ def update_posterior(self,x,padding=None):
If \"zeros\" is given, the zero vector is used as a initial value.
"""
_check.float_vec(x,'x',DataFormatError)
if x.shape[0] <= self.c_degree:
n = x.shape[0]
if n <= self.c_degree:
raise(DataFormatError("The length of x must greater than self.c_degree"))
x_mat = np.zeros((x.shape[0],self.c_degree+1))
x_mat = np.zeros((n,self.c_degree+1))
x_mat[:,0] = 1.0
for n in range(1,self.c_degree+1):
x_mat[n,-n:] = x[:n]
for n in range(self.c_degree+1,x.shape[0]):
x_mat[n,1:] = x[n-self.c_degree:n]
for i in range(1,self.c_degree+1):
x_mat[i,-i:] = x[:i]
for i in range(self.c_degree+1,n):
x_mat[i,1:] = x[i-self.c_degree:i]

mu_tmp = np.array(self.hn_mu_vec)
lambda_tmp = np.array(self.hn_lambda_mat)
if padding == "zeros":
self.hn_lambda_mat += x_mat.T @ x_mat
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat,
lambda_tmp @ mu_tmp[:,np.newaxis] + x_mat.T @ x[:,np.newaxis])[:,0]
self.hn_alpha += x.shape[0] / 2.0
self.hn_beta += (-self.hn_mu_vec[np.newaxis,:] @ self.hn_lambda_mat @ self.hn_mu_vec[:,np.newaxis]
+ x @ x + mu_tmp[np.newaxis,:] @ lambda_tmp @ mu_tmp[:,np.newaxis])[0,0] / 2.0
lambda_tmp @ mu_tmp + x_mat.T @ x)
self.hn_alpha += n / 2.0
self.hn_beta += (-self.hn_mu_vec @ self.hn_lambda_mat @ self.hn_mu_vec
+ x @ x + mu_tmp @ lambda_tmp @ mu_tmp) / 2.0
else:
self.hn_lambda_mat += x_mat[self.c_degree:].T @ x_mat[self.c_degree:]
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat,
lambda_tmp @ mu_tmp[:,np.newaxis] + x_mat[self.c_degree:].T @ x[self.c_degree:,np.newaxis])[:,0]
self.hn_alpha += (x.shape[0]-self.c_degree) / 2.0
self.hn_beta += (-self.hn_mu_vec[np.newaxis,:] @ self.hn_lambda_mat @ self.hn_mu_vec[:,np.newaxis]
+ x[self.c_degree:] @ x[self.c_degree:] + mu_tmp[np.newaxis,:] @ lambda_tmp @ mu_tmp[:,np.newaxis])[0,0] / 2.0
lambda_tmp @ mu_tmp + x_mat[self.c_degree:].T @ x[self.c_degree:])
self.hn_alpha += (n-self.c_degree) / 2.0
self.hn_beta += (-self.hn_mu_vec @ self.hn_lambda_mat @ self.hn_mu_vec
+ x[self.c_degree:] @ x[self.c_degree:] + mu_tmp @ lambda_tmp @ mu_tmp) / 2.0
return self

def estimate_params(self,loss="squared"):
Expand Down
36 changes: 14 additions & 22 deletions bayesml/gaussianmixture/_gaussianmixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -724,19 +724,12 @@ def _calc_vl(self):

def _calc_n_x_bar_s(self,x):
self.ns[:] = self.r_vecs.sum(axis=0)
indices = self.ns.astype(bool)
if np.all(indices):
self.x_bar_vecs[:] = (self.r_vecs[:,:,np.newaxis] * x[:,np.newaxis,:]).sum(axis=0) / self.ns[:,np.newaxis]
self.s_mats[:] = np.sum(self.r_vecs[:,:,np.newaxis,np.newaxis]
* ((x[:,np.newaxis,:] - self.x_bar_vecs)[:,:,:,np.newaxis]
@ (x[:,np.newaxis,:] - self.x_bar_vecs)[:,:,np.newaxis,:]),
axis=0) / self.ns[:,np.newaxis,np.newaxis]
else:
self.x_bar_vecs[indices] = (self.r_vecs[:,indices,np.newaxis] * x[:,np.newaxis,:]).sum(axis=0) / self.ns[indices,np.newaxis]
self.s_mats[indices] = np.sum(self.r_vecs[:,indices,np.newaxis,np.newaxis]
* ((x[:,np.newaxis,:] - self.x_bar_vecs[indices])[:,:,:,np.newaxis]
@ (x[:,np.newaxis,:] - self.x_bar_vecs[indices])[:,:,np.newaxis,:]),
axis=0) / self.ns[indices,np.newaxis,np.newaxis]
self.x_bar_vecs[:] = self.r_vecs.T @ x
for k in range(self.c_num_classes):
if self.ns[k] > 0:
self.x_bar_vecs[k] /= self.ns[k]
diff = x - self.x_bar_vecs[k]
self.s_mats[k] = ((self.r_vecs[:,k]*diff.T) @ diff) / self.ns[k]

def _init_random_responsibility(self,x):
self.r_vecs[:] = self.rng.dirichlet(np.ones(self.c_num_classes),self.r_vecs.shape[0])
Expand Down Expand Up @@ -778,15 +771,14 @@ def _update_q_mu_lambda(self):

def _update_q_z(self,x):
self._ln_rho[:] = (self._e_ln_pi_vec
+ (self._e_ln_lambda_dets
- self.c_degree * np.log(2*np.pi)
- self.c_degree / self.hn_kappas
- ((x[:,np.newaxis,:]-self.hn_m_vecs)[:,:,np.newaxis,:]
@ self._e_lambda_mats
@ (x[:,np.newaxis,:]-self.hn_m_vecs)[:,:,:,np.newaxis]
)[:,:,0,0]
) / 2.0
)
+ (self._e_ln_lambda_dets
- self.c_degree * np.log(2*np.pi)
- self.c_degree / self.hn_kappas
) / 2.0
)
for k in range(self.c_num_classes):
diff = x-self.hn_m_vecs[k]
self._ln_rho[:,k] -= np.sum((diff @ self._e_lambda_mats[k]) * diff,axis=1) / 2.0
self.r_vecs[:] = np.exp(self._ln_rho - self._ln_rho.max(axis=1,keepdims=True))
self.r_vecs[:] /= self.r_vecs.sum(axis=1,keepdims=True)
self._calc_n_x_bar_s(x)
Expand Down
26 changes: 9 additions & 17 deletions bayesml/hiddenmarkovnormal/_hiddenmarkovnormal.py
Original file line number Diff line number Diff line change
Expand Up @@ -837,19 +837,12 @@ def _calc_prior_features(self):
def _calc_n_m_x_bar_s(self,x):
self.ns[:] = self.gamma_vecs.sum(axis=0)
self.ms[:] = self.xi_mats.sum(axis=0) # xi must be initialized as a zero matrix
indices = self.ns.astype(bool)
if np.all(indices):
self.x_bar_vecs[:] = (self.gamma_vecs[:,:,np.newaxis] * x[:,np.newaxis,:]).sum(axis=0) / self.ns[:,np.newaxis]
self.s_mats[:] = np.sum(self.gamma_vecs[:,:,np.newaxis,np.newaxis]
* ((x[:,np.newaxis,:] - self.x_bar_vecs)[:,:,:,np.newaxis]
@ (x[:,np.newaxis,:] - self.x_bar_vecs)[:,:,np.newaxis,:]),
axis=0) / self.ns[:,np.newaxis,np.newaxis]
else:
self.x_bar_vecs[indices] = (self.gamma_vecs[:,indices,np.newaxis] * x[:,np.newaxis,:]).sum(axis=0) / self.ns[indices,np.newaxis]
self.s_mats[indices] = np.sum(self.gamma_vecs[:,indices,np.newaxis,np.newaxis]
* ((x[:,np.newaxis,:] - self.x_bar_vecs[indices])[:,:,:,np.newaxis]
@ (x[:,np.newaxis,:] - self.x_bar_vecs[indices])[:,:,np.newaxis,:]),
axis=0) / self.ns[indices,np.newaxis,np.newaxis]
self.x_bar_vecs[:] = self.gamma_vecs.T @ x
for k in range(self.c_num_classes):
if self.ns[k] > 0:
self.x_bar_vecs[k] /= self.ns[k]
diff = x - self.x_bar_vecs[k]
self.s_mats[k] = ((self.gamma_vecs[:,k]*diff.T) @ diff) / self.ns[k]

def _calc_q_pi_features(self):
self._ln_pi_tilde_vec[:] = digamma(self.hn_eta_vec) - digamma(self.hn_eta_vec.sum())
Expand Down Expand Up @@ -996,12 +989,11 @@ def _calc_rho(self,x):
self._ln_rho[:] = ((self._e_ln_lambda_dets
- self.c_degree * np.log(2*np.pi)
- self.c_degree / self.hn_kappas
- ((x[:,np.newaxis,:]-self.hn_m_vecs)[:,:,np.newaxis,:]
@ self._e_lambda_mats
@ (x[:,np.newaxis,:]-self.hn_m_vecs)[:,:,:,np.newaxis]
)[:,:,0,0]
) / 2.0
)
for k in range(self.c_num_classes):
diff = x-self.hn_m_vecs[k]
self._ln_rho[:,k] -= np.sum((diff @ self._e_lambda_mats[k]) * diff,axis=1) / 2.0
self._rho[:] = np.exp(self._ln_rho)

def _forward(self):
Expand Down
24 changes: 12 additions & 12 deletions bayesml/linearregression/_linearregression.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,29 +536,29 @@ def update_posterior(self, x, y):
float array.
"""
x,y = self._check_sample(x,y)
n = x.shape[0]

hn1_Lambda = np.array(self.hn_lambda_mat)
hn1_mu = np.array(self.hn_mu_vec)
self.hn_lambda_mat += x.T @ x
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat, x.T @ y[:,np.newaxis] + hn1_Lambda @ hn1_mu[:,np.newaxis])[:,0]
self.hn_alpha += x.shape[0]/2.0
self.hn_beta += (-self.hn_mu_vec[np.newaxis,:] @ self.hn_lambda_mat @ self.hn_mu_vec[:,np.newaxis]
+ y @ y + hn1_mu[np.newaxis,:] @ hn1_Lambda @ hn1_mu[:,np.newaxis])[0,0] /2.0
self._n += x.shape[0]
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat, x.T @ y + hn1_Lambda @ hn1_mu)
self.hn_alpha += n/2.0
self.hn_beta += (-self.hn_mu_vec @ self.hn_lambda_mat @ self.hn_mu_vec
+ y @ y + hn1_mu @ hn1_Lambda @ hn1_mu) /2.0
self._n += n
return self

def _update_posterior(self, x, y):
"""Update opsterior without input check."""
x = x.reshape(-1,self.c_degree)
y = np.ravel(y)
n = x.shape[0]
hn1_Lambda = np.array(self.hn_lambda_mat)
hn1_mu = np.array(self.hn_mu_vec)
self.hn_lambda_mat += x.T @ x
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat, x.T @ y[:,np.newaxis] + hn1_Lambda @ hn1_mu[:,np.newaxis])[:,0]
self.hn_alpha += x.shape[0]/2.0
self.hn_beta += (-self.hn_mu_vec[np.newaxis,:] @ self.hn_lambda_mat @ self.hn_mu_vec[:,np.newaxis]
+ y @ y + hn1_mu[np.newaxis,:] @ hn1_Lambda @ hn1_mu[:,np.newaxis])[0,0] /2.0
self._n += x.shape[0]
self.hn_mu_vec[:] = np.linalg.solve(self.hn_lambda_mat, x.T @ y + hn1_Lambda @ hn1_mu)
self.hn_alpha += n/2.0
self.hn_beta += (-self.hn_mu_vec @ self.hn_lambda_mat @ self.hn_mu_vec
+ y @ y + hn1_mu @ hn1_Lambda @ hn1_mu) /2.0
self._n += n
return self

def estimate_params(self,loss="squared",dict_out=False):
Expand Down
6 changes: 4 additions & 2 deletions bayesml/multivariate_normal/_multivariatenormal.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,9 +510,11 @@ def update_posterior(self,x):

n = x.shape[0]
x_bar = x.sum(axis=0)/n
diff1 = x-x_bar
diff2 = x_bar-self.hn_m_vec

self.hn_w_mat_inv[:] = (self.hn_w_mat_inv + (x-x_bar).T @ (x-x_bar)
+ (x_bar - self.hn_m_vec)[:,np.newaxis] @ (x_bar - self.hn_m_vec)[np.newaxis,:]
self.hn_w_mat_inv[:] = (self.hn_w_mat_inv + diff1.T @ diff1
+ diff2[:,np.newaxis] @ diff2[np.newaxis,:]
* self.hn_kappa * n / (self.hn_kappa + n))
self.hn_m_vec[:] = (self.hn_kappa*self.hn_m_vec + n*x_bar) / (self.hn_kappa+n)
self.hn_kappa += n
Expand Down