Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 81 additions & 0 deletions src/pysatl_core/families/builtins/continuous/exponential.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,63 @@ def _support(_: Parametrization) -> ContinuousSupport:
"""Support of exponential distribution"""
return ContinuousSupport(left=0.0)

def _base_score(parameters: Parametrization, x: NumericArray) -> NumericArray:
"""
Compute the score (gradient of log‑PDF) for the base parametrization (rate λ).

For exponential distribution with rate λ > 0, log‑PDF is:
log f(x) = log λ - λ x for x ≥ 0, else -∞.

The derivative with respect to λ is:
∂/∂λ log f = 1/λ - x (for x ≥ 0).

For points x < 0 the density is zero; we return 0 for numerical stability
(though the score is technically undefined there).

Parameters
----------
parameters : Parametrization
Base parametrization instance (_Rate) with field lambda_.
x : NumericArray
Points at which to evaluate the gradient.

Returns
-------
NumericArray
Gradient array of shape (..., 1) where the last axis corresponds to
d(log f)/d(lambda_).
"""
params = cast(_Rate, parameters)
lam = params.lambda_
inside = x >= 0
grad = np.where(inside, 1.0 / lam - x, 0.0)
return grad[..., np.newaxis] # shape (..., 1)

def score(parameters: Parametrization, x: NumericArray) -> NumericArray:
"""
Compute the score (gradient of log‑PDF) for any parametrization of the exponential family.

Parameters
----------
parameters : Parametrization
Parametrization instance (rate or scale).
x : NumericArray
Points at which the gradient is evaluated.

Returns
-------
NumericArray
Gradient with respect to the parameters of the given parametrization.
Shape is (..., 1) for both rate and scale.
"""
x_arr = np.atleast_1d(x)

if parameters.name == "rate":
return _base_score(parameters, x_arr)
base_params = parameters.transform_to_base_parametrization()
base_grad = _base_score(base_params, x_arr)
return parameters.gradient_transform(base_grad)

Exponential = ParametricFamily(
name=FamilyName.EXPONENTIAL,
distr_type=UnivariateContinuous,
Expand All @@ -239,6 +296,7 @@ def _support(_: Parametrization) -> ContinuousSupport:
CharacteristicName.KURT: kurt_func,
},
support_by_parametrization=_support,
score=score,
)
Exponential.__doc__ = EXPONENTIAL_DOC

Expand Down Expand Up @@ -289,4 +347,27 @@ def transform_to_base_parametrization(self) -> Parametrization:
"""
return _Rate(lambda_=1.0 / self.beta)

def gradient_transform(self, grad_base: NumericArray) -> NumericArray:
"""
Transform gradient from base parameter (rate λ) to scale β = 1/λ.

The transformation is: β = 1/λ.
Derivative: dβ/dλ = -1/λ².
Hence: grad_β = grad_λ * (-1/λ²) = -grad_λ / λ².

Parameters
----------
grad_base : NumericArray
Gradient with respect to λ, shape (..., 1).

Returns
-------
NumericArray
Gradient with respect to β, shape (..., 1).
"""
lam = 1.0 / self.beta # λ = 1/β
grad_lam = grad_base[..., 0]
grad_beta = -grad_lam / (lam * lam)
return grad_beta[..., np.newaxis].astype(np.float64)

ParametricFamilyRegister.register(Exponential)
143 changes: 142 additions & 1 deletion src/pysatl_core/families/builtins/continuous/normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,67 @@ def _support(_: Parametrization) -> ContinuousSupport:
"""Support of normal distribution"""
return ContinuousSupport()

def _base_score(parameters: Parametrization, x: NumericArray) -> NumericArray:
"""
Compute the score (gradient of log‑PDF) for the base parametrization.

The base parametrization uses parameters mu (mean) and sigma (standard deviation).
The returned gradient has shape (..., 2) with the last axis corresponding
to [d(log f)/d(mu), d(log f)/d(sigma)].

Parameters
----------
parameters : Parametrization
Base parametrization instance (must be _MeanStd).
x : NumericArray
Points at which the gradient is evaluated.

Returns
-------
NumericArray
Gradient array of shape (..., 2).
"""
params = cast(_MeanStd, parameters)
mu = params.mu
sigma = params.sigma

z = (x - mu) / sigma

grad_mu = z / sigma
grad_sigma = (z * z - 1) / sigma
return np.stack([grad_mu, grad_sigma], axis=-1)

def score(parameters: Parametrization, x: NumericArray) -> NumericArray:
"""
Compute the score for any parametrization of the normal family.

For the base parametrization ("meanStd") the base score is returned directly.
For other parametrizations the parameters are transformed to the base
representation, the base gradient is computed, and then transformed back
using the parametrization's `gradient_transform` method.

Parameters
----------
parameters : Parametrization
Parametrization instance (any of the normal's parametrizations).
x : NumericArray
Points at which the gradient is evaluated.

Returns
-------
NumericArray
Gradient with respect to the parameters of the given parametrization.
Shape is (..., d) where d is the number of parameters of that class.
"""
x_arr = np.atleast_1d(x)

if parameters.name == "meanStd":
return _base_score(parameters, x_arr)
base_params = parameters.transform_to_base_parametrization()
base_grad = _base_score(base_params, x_arr)
result = parameters.gradient_transform(base_grad)
return result

Normal = ParametricFamily(
name=FamilyName.NORMAL,
distr_type=UnivariateContinuous,
Expand All @@ -248,6 +309,7 @@ def _support(_: Parametrization) -> ContinuousSupport:
CharacteristicName.KURT: kurt_func,
},
support_by_parametrization=_support,
score=score,
)
Normal.__doc__ = NORMAL_DOC

Expand Down Expand Up @@ -305,6 +367,29 @@ def transform_to_base_parametrization(self) -> Parametrization:
sigma = math.sqrt(self.var)
return _MeanStd(mu=self.mu, sigma=sigma)

def gradient_transform(self, grad_base: NumericArray) -> NumericArray:
"""
Transform gradient from base parameters (mu, sigma) to (mu, var).

The transformation is: var = sigma^2 → d(var) = 2 sigma d(sigma).

Parameters
----------
grad_base : NumericArray
Gradient with respect to (mu, sigma), shape (..., 2).

Returns
-------
NumericArray
Gradient with respect to (mu, var), shape (..., 2).
"""
sigma = np.sqrt(self.var)

grad_mu = grad_base[..., 0]
grad_sigma = grad_base[..., 1]
grad_var = grad_sigma / (2 * sigma)
return np.stack([grad_mu, grad_var], axis=-1)

@parametrization(family=Normal, name="meanPrec")
class _MeanPrec(Parametrization):
"""
Expand Down Expand Up @@ -338,6 +423,30 @@ def transform_to_base_parametrization(self) -> Parametrization:
sigma = math.sqrt(1 / self.tau)
return _MeanStd(mu=self.mu, sigma=sigma)

def gradient_transform(self, grad_base: NumericArray) -> NumericArray:
"""
Transform gradient from base parameters (mu, sigma) to (mu, tau).

The transformation is: tau = 1/sigma^2 → d(tau) = -2 / sigma^3 d(sigma).

Parameters
----------
grad_base : NumericArray
Gradient with respect to (mu, sigma), shape (..., 2).

Returns
-------
NumericArray
Gradient with respect to (mu, tau), shape (..., 2).
"""

sigma = 1.0 / np.sqrt(self.tau)

grad_mu = grad_base[..., 0]
grad_sigma = grad_base[..., 1]
grad_tau = -grad_sigma * sigma**3 / 2.0
return np.stack([grad_mu, grad_tau], axis=-1)

@parametrization(family=Normal, name="exponential")
class _Exp(Parametrization):
"""
Expand All @@ -363,7 +472,7 @@ def c(self) -> float:
Returns
-------
float
Normalization constant
Normalization constant
"""
return (self.b**2) / (4 * self.a) - (1 / 2) * math.log(math.pi / (-self.a))

Expand All @@ -384,4 +493,36 @@ def transform_to_base_parametrization(self) -> Parametrization:
sigma = math.sqrt(-1 / (2 * self.a))
return _MeanStd(mu=mu, sigma=sigma)

def gradient_transform(self, grad_base: NumericArray) -> NumericArray:
"""
Transform gradient from base parameters (mu, sigma) to (a, b).

The transformation is:
mu = -b/(2a), sigma = sqrt(-1/(2a)).
The Jacobian is applied via the chain rule.

Parameters
----------
grad_base : NumericArray
Gradient with respect to (mu, sigma), shape (..., 2).

Returns
-------
NumericArray
Gradient with respect to (a, b), shape (..., 2).
"""
a = self.a
b = self.b

dmu_da = b / (2 * a * a)
dmu_db = -1 / (2 * a)
dsigma_da = 1 / (2 * np.sqrt(-2 * a**3))
dsigma_db = 0.0

grad_mu = grad_base[..., 0]
grad_sigma = grad_base[..., 1]
grad_a = grad_mu * dmu_da + grad_sigma * dsigma_da
grad_b = grad_mu * dmu_db + grad_sigma * dsigma_db
return np.stack([grad_a, grad_b], axis=-1)

ParametricFamilyRegister.register(Normal)
Loading
Loading