Skip to content

Updated docstrings in pymc.model.core.Model #7118

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Mar 4, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 72 additions & 42 deletions pymc/model/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,10 +404,10 @@ class Model(WithMemoization, metaclass=ContextMeta):

Parameters
----------
name: str
name : str
name that will be used as prefix for names of all random
variables defined within model
check_bounds: bool
check_bounds : bool
Ensure that input parameters to distributions are in a valid
range. If your model is built in a way where you know your
parameters can only take on valid values you can set this to
Expand Down Expand Up @@ -586,10 +586,10 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs):

Parameters
----------
grad_vars: list of random variables, optional
grad_vars : list of random variables, optional
Compute the gradient with respect to those variables. If None,
use all free random variables of this model.
tempered: bool
tempered : bool
Compute the tempered logp `free_logp + alpha * observed_logp`.
`alpha` can be changed using `ValueGradFunction.set_weights([alpha])`.
"""
Expand Down Expand Up @@ -625,12 +625,12 @@ def compile_logp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.
sum:
sum : bool
Whether to sum all logp terms or return elemwise logp for each variable.
Defaults to True.
"""
Expand All @@ -645,10 +645,10 @@ def compile_dlogp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.
"""
return self.compile_fn(self.dlogp(vars=vars, jacobian=jacobian))
Expand All @@ -662,10 +662,10 @@ def compile_d2logp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.
"""
return self.compile_fn(self.d2logp(vars=vars, jacobian=jacobian))
Expand All @@ -680,12 +680,12 @@ def logp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.
sum:
sum : bool
Whether to sum all logp terms or return elemwise logp for each variable.
Defaults to True.

Expand Down Expand Up @@ -758,10 +758,10 @@ def dlogp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.

Returns
Expand Down Expand Up @@ -797,10 +797,10 @@ def d2logp(

Parameters
----------
vars: list of random variables or potential terms, optional
vars : list of random variables or potential terms, optional
Compute the gradient with respect to those variables. If None, use all
free and observed random variables, as well as potential terms in model.
jacobian:
jacobian : bool
Whether to include jacobian terms in logprob graph. Defaults to True.

Returns
Expand Down Expand Up @@ -967,7 +967,7 @@ def add_coord(
name : str
Name of the dimension.
Forbidden: {"chain", "draw", "__sample__"}
values : optional, array-like
values : optional, array_like
Coordinate values or ``None`` (for auto-numbering).
If ``None`` is passed, a ``length`` must be specified.
mutable : bool
Expand Down Expand Up @@ -1026,11 +1026,11 @@ def set_dim(self, name: str, new_length: int, coord_values: Optional[Sequence] =

Parameters
----------
name
name : str
Name of the dimension.
new_length
new_length : int
New length of the dimension.
coord_values
coord_values : array_like, optional
Optional sequence of coordinate values.
"""
if not isinstance(self.dim_lengths[name], ScalarSharedVariable):
Expand Down Expand Up @@ -1090,7 +1090,7 @@ def set_data(
----------
name : str
Name of a shared variable in the model.
values : array-like
values : array_like
New values for the shared variable.
coords : optional, dict
New coordinate values for dimensions of the shared variable.
Expand Down Expand Up @@ -1218,14 +1218,14 @@ def register_rv(

Parameters
----------
rv_var: TensorVariable
name: str
rv_var : TensorVariable
name : str
Intended name for the model variable.
observed: array_like (optional)
observed : array_like, optional
Data values for observed variables.
total_size: scalar
total_size : scalar
upscales logp of variable with ``coef = total_size/var.shape[0]``
dims: tuple
dims : tuple
Dimension names for the variable.
transform
A transform for the random variable in log-likelihood space.
Expand Down Expand Up @@ -1292,16 +1292,19 @@ def make_obs_var(

Parameters
----------
rv_var
rv_var : TensorVariable
The random variable that is observed.
Its dimensionality must be compatible with the data already.
data
data : array_like
The observed data.
dims: tuple
dims : tuple
Dimension names for the variable.
transform
transform : int, optional
A transform for the random variable in log-likelihood space.

Returns
-------
TensorVariable
"""
name = rv_var.name
data = convert_observed_data(data).astype(rv_var.dtype)
Expand Down Expand Up @@ -1376,6 +1379,17 @@ def create_value_var(
observed data. That's why value variables are only referenced in
this branch of the conditional.

Parameters
----------
rv_var : TensorVariable

transform : Any

value_var : Variable, optional

Returns
-------
TensorVariable
"""

# Make the value variable a transformed value variable,
Expand Down Expand Up @@ -1419,6 +1433,13 @@ def add_named_variable(self, var, dims: Optional[Tuple[Union[str, None], ...]] =

This can include several types of variables such basic_RVs, Data, Deterministics,
and Potentials.

Parameters
----------
var

dims : tuple, optional

"""
if var.name is None:
raise ValueError("Variable is unnamed.")
Expand Down Expand Up @@ -1491,8 +1512,12 @@ def replace_rvs_by_values(

Parameters
----------
graphs
graphs : array_like
The graphs in which to perform the replacements.

Returns
-------
array_like
"""
return replace_rvs_by_values(
graphs,
Expand All @@ -1513,9 +1538,9 @@ def compile_fn(

Parameters
----------
outs
outs : Variable or sequence of Variables
PyTensor variable or iterable of PyTensor variables.
inputs
inputs : sequence of Variables, optional
PyTensor input variables, defaults to pytensorf.inputvars(outs).
mode
PyTensor compilation mode, default=None.
Expand Down Expand Up @@ -1550,12 +1575,12 @@ def profile(self, outs, *, n=1000, point=None, profile=True, **kwargs):

Parameters
----------
outs: PyTensor variable or iterable of PyTensor variables
n: int, default 1000
outs : PyTensor variable or iterable of PyTensor variables
n : int, default 1000
Number of iterations to run
point: point
point : Point
Point to pass to the function
profile: True or ProfileStats
profile : True or ProfileStats
args, kwargs
Compilation args

Expand All @@ -1580,6 +1605,11 @@ def update_start_vals(self, a: Dict[str, np.ndarray], b: Dict[str, np.ndarray]):
Values specified for transformed variables in `a` will be recomputed
conditional on the values of `b` and stored in `b`.

Parameters
----------
a : dict

b : dict
"""
raise FutureWarning(
"The `Model.update_start_vals` method was removed."
Expand Down Expand Up @@ -1667,10 +1697,10 @@ def point_logps(self, point=None, round_vals=2):

Parameters
----------
point: Point, optional
point : Point, optional
Point to be evaluated. If ``None``, then ``model.initial_point``
is used.
round_vals: int, default 2
round_vals : int, default 2
Number of decimals to round log-probabilities.

Returns
Expand Down Expand Up @@ -1709,7 +1739,7 @@ def debug(

Parameters
----------
point : Point
point : Point, optional
Point at which model function should be evaluated
fn : str, default "logp"
Function to be used for debugging. Can be one of [logp, dlogp, random].
Expand Down