Skip to content

Commit

Permalink
Merge pull request #8 from JoKra1/main
Browse files Browse the repository at this point in the history
v0.1.24
  • Loading branch information
JoKra1 authored Dec 8, 2023
2 parents 3fbb15a + f539872 commit 17993e4
Show file tree
Hide file tree
Showing 8 changed files with 912 additions and 89 deletions.
674 changes: 674 additions & 0 deletions LICENSE

Large diffs are not rendered by default.

10 changes: 7 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

## Description

``mssm`` is a toolbox to estimate Generalized Additive Mixed Models (GAMMs) semi Markov-switching GAMMs (sMs-GAMMs) and sMs Impulse Response GAMMs (sMs-IR-GAMMs). The ``main`` branch is updated frequently to reflect new developments. The ``stable`` branch should reflect the latest releases. if you don't need the newest functionality, you should install from the ``stable`` branch (see below for instructions).
``mssm`` is a toolbox to estimate Generalized Additive Mixed Models (GAMMs) semi Markov-switching GAMMs (sMs-GAMMs) and sMs Impulse Response GAMMs (sMs-IR-GAMMs). The ``main`` branch is updated frequently to reflect new developments. The ``stable`` branch should reflect the latest releases. If you don't need the newest functionality, you should install from the ``stable`` branch (see below for instructions).

## Installation

Expand All @@ -28,8 +28,12 @@ pip install -i https://test.pypi.org/simple/ mssm

### Building from source

You can also build directly from source. This requires ``conda`` or an installation of [eigen](https://eigen.tuxfamily.org/index.php?title=Main_Page)(``setup.py`` then expects ``eigen`` in "usr/local/include/eigen3". This will probably not work on windows.). Once you have ``conda`` installed,
[install eigen from conda-forge](https://anaconda.org/conda-forge/eigen). After cloning and navigating into the downloaded repository you can then install via ``pip install . ``.
You can also build directly from source. This requires ``conda`` or an installation of [eigen](https://eigen.tuxfamily.org/index.php?title=Main_Page) (``setup.py`` then expects ``eigen`` in "usr/local/include/eigen3". This will probably not work on windows - the ``conda`` strategy should.). Once you have ``conda`` installed,
[install eigen from conda-forge](https://anaconda.org/conda-forge/eigen). After cloning and navigating into the downloaded repository you can then install via:

```
pip install .
```

## To get started

Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,6 @@ classifiers = [
# https://pypi.org/project/setuptools-scm/ - automatic version number based on tag
dynamic = ["version"]

[tool.setuptools_scm]
[tool.setuptools_scm]
# https://github.com/pypa/setuptools_scm/issues/342
local_scheme = "no-local-version"
12 changes: 10 additions & 2 deletions src/mssm/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,11 @@ def fit(self,burn_in=100,maxiter_inner=30,m_avg=15,conv_tol=1e-7,extend_lambda=T
temp = temp_schedule[iter]

durs,states,llks = self.__propose_all_states(pool,cov,temp,n_pi,n_TR,s_log_o_probs,dur_log_probs,var_map)
states_flat = np.array([st for s in states for st in s],dtype=int)

if not self.mvar_by is None:
states_flat = np.array([st for s in states for _ in range(len(factor_levels[self.mvar_by])) for st in s],dtype=int)
else:
states_flat = np.array([st for s in states for st in s],dtype=int)

### Convergence control ###

Expand Down Expand Up @@ -764,7 +768,11 @@ def fit(self,burn_in=100,maxiter_inner=30,m_avg=15,conv_tol=1e-7,extend_lambda=T
# Now we can decode.
with mp.Pool(processes=self.cpus) as pool:
_,states_max,_ = self.__decode_all_states(pool,cov,self.__pi,self.__TR,s_log_o_probs,dur_log_probs,var_map)
max_states_flat = np.array([st for s in states_max for st in s],dtype=int)

if not self.mvar_by is None:
max_states_flat = np.array([st for s in states_max for _ in range(len(factor_levels[self.mvar_by])) for st in s],dtype=int)
else:
max_states_flat = np.array([st for s in states_max for st in s],dtype=int)

return llk_hist,max_states_flat

Expand Down
15 changes: 10 additions & 5 deletions src/mssm/src/python/gamm_solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,20 @@ def step_fellner_schall_sparse(gInv,emb_SJ,Bps,cCoef,cLam,scale,verbose=False):
# equal to zero: every coefficient of a term is penalized away. In that
# case num /denom is not defined so we return directly.
if denom <= 0: # Prevent overflow
return 1e+7
nLam = scale * (num / denom) * cLam
nLam = max(nLam,1e-7) # Prevent Lambda going to zero
nLam = min(nLam,1e+7) # Prevent overflow
nLam = 1e+7
else:
nLam = scale * (num / denom) * cLam

nLam = max(nLam,1e-7) # Prevent Lambda going to zero
nLam = min(nLam,1e+7) # Prevent overflow

if verbose:
print(f"Num = {(gInv @ emb_SJ).trace()} - {Bps} == {num}\nDenom = {denom}; Lambda = {nLam}")

return nLam-cLam
# Compute lambda delta
delta_lam = nLam-cLam

return delta_lam

def grad_lambda(gInv,emb_SJ,Bps,cCoef,scale):
# P. Deriv of restricted likelihood with respect to lambda.
Expand Down
2 changes: 1 addition & 1 deletion src/mssm/src/python/sem.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def compute_log_probs(n_j,n_obs,has_scale_split,
if not mvar_by is None:
# We need to split the s_log_o_probs from every series by the multivariate factor
# and then sum the log-probs together. This is a strong independence assumption (see Langrock, 2021)
n_by_mvar = factor_levels[mvar_by]
n_by_mvar = len(factor_levels[mvar_by])
s_log_o_probs = [s_prob.reshape(n_j,n_by_mvar,-1).sum(axis=1) for s_prob in s_log_o_probs]

return s_log_o_probs,dur_log_probs
Expand Down
7 changes: 0 additions & 7 deletions src/mssm/src/python/smooths.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ def B_spline_basis(i, cov, state_est, nk, drop_outer_k=False, convolve=False, mi
# Setup basis with even knot locations.
# Code based on Eilers, P., & Marx, B. (2010). Splines, knots, and penalties. https://doi.org/10.1002/WICS.125
# See also: Eilers, P. H. C., & Marx, B. D. (1996). Flexible smoothing with B-splines and penalties. https://doi.org/10.1214/ss/1038425655
# However, knot location calculation was adapted to match mgcv (Wood, 2017)

xl = min(cov)
xr = max(cov)
Expand All @@ -64,12 +63,6 @@ def B_spline_basis(i, cov, state_est, nk, drop_outer_k=False, convolve=False, mi
if not min_c is None:
xl = min_c

rg = xr - xl

# MGCV adjustment.
xr += 0.001*rg
xl -= 0.001*rg

# ndx is equal to n in Eilers & Marx (2011)
# So there will be n-1 knots (without expansion)
# n + 1 + 2*deg knots with expansion
Expand Down
277 changes: 207 additions & 70 deletions tutorials/1) GAMMs.ipynb

Large diffs are not rendered by default.

0 comments on commit 17993e4

Please sign in to comment.