Skip to content

Commit

Permalink
Merge branch 'sktime:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
bhavikar04 authored Jun 15, 2024
2 parents b7d044c + 70252b5 commit 8e229d9
Show file tree
Hide file tree
Showing 7 changed files with 190 additions and 36 deletions.
2 changes: 2 additions & 0 deletions docs/source/api_reference/distributions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,11 @@ Continuous support
Fisk
Gamma
HalfCauchy
HalfLogistic
HalfNormal
Laplace
Logistic
LogLaplace
Normal
TDistribution
Weibull
Expand Down
4 changes: 4 additions & 0 deletions skpro/distributions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
"Fisk",
"Gamma",
"HalfCauchy",
"HalfLogistic",
"HalfNormal",
"IID",
"Laplace",
"Logistic",
"LogLaplace",
"LogNormal",
"Mixture",
"Normal",
Expand All @@ -42,9 +44,11 @@
from skpro.distributions.fisk import Fisk
from skpro.distributions.gamma import Gamma
from skpro.distributions.halfcauchy import HalfCauchy
from skpro.distributions.halflogistic import HalfLogistic
from skpro.distributions.halfnormal import HalfNormal
from skpro.distributions.laplace import Laplace
from skpro.distributions.logistic import Logistic
from skpro.distributions.loglaplace import LogLaplace
from skpro.distributions.lognormal import LogNormal
from skpro.distributions.mixture import Mixture
from skpro.distributions.multivariate_normal import MultivariateNormal
Expand Down
80 changes: 80 additions & 0 deletions skpro/distributions/halflogistic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# copyright: skpro developers, BSD-3-Clause License (see LICENSE file)
"""Half-Logistic probability distribution."""

__author__ = ["SaiRevanth25"]

import pandas as pd
from scipy.stats import halflogistic, rv_continuous

from skpro.distributions.adapters.scipy import _ScipyAdapter


class HalfLogistic(_ScipyAdapter):
r"""Half-Logistic distribution.
This distribution is univariate, without correlation between dimensions
for the array-valued case.
The half-logistic distribution is a continuous probability distribution derived
from the logistic distribution by taking only the positive half. It is particularly
useful in reliability analysis, lifetime modeling, and other applications where
non-negative values are required.
The half-logistic distribution is parametrized by the scale parameter
:math:`\beta`, such that the pdf is
.. math::
f(x) = \frac{2 \exp\left(-\frac{x}{\beta}\right)}
{\beta \left(1 + \exp\left(-\frac{x}{\beta}\right)\right)^2},
x>0 otherwise 0
The scale parameter :math:`\beta` is represented by the parameter ``beta``.
Parameters
----------
beta : float or array of float (1D or 2D), must be positive
scale parameter of the half-logistic distribution
index : pd.Index, optional, default = RangeIndex
columns : pd.Index, optional, default = RangeIndex
Example
-------
>>> from skpro.distributions.halflogistic import HalfLogistic
>>> hl = HalfLogistic(beta=1)
"""

_tags = {
"capabilities:approx": ["pdfnorm"],
"capabilities:exact": ["mean", "var", "pdf", "log_pdf", "cdf", "ppf"],
"distr:measuretype": "continuous",
"distr:paramtype": "parametric",
"broadcast_init": "on",
}

def __init__(self, beta, index=None, columns=None):
self.beta = beta

super().__init__(index=index, columns=columns)

def _get_scipy_object(self) -> rv_continuous:
return halflogistic

def _get_scipy_param(self):
beta = self._bc_params["beta"]
return [beta], {}

@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator."""
# array case examples
params1 = {"beta": [[1, 2], [3, 4]]}
params2 = {
"beta": 1,
"index": pd.Index([1, 2, 5]),
"columns": pd.Index(["a", "b"]),
}
# scalar case examples
params3 = {"beta": 2}
return [params1, params2, params3]
79 changes: 79 additions & 0 deletions skpro/distributions/loglaplace.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
# copyright: skpro developers, BSD-3-Clause License (see LICENSE file)
"""Log-Laplace probability distribution."""

__author__ = ["SaiRevanth25"]

import pandas as pd
from scipy.stats import loglaplace, rv_continuous

from skpro.distributions.adapters.scipy import _ScipyAdapter


class LogLaplace(_ScipyAdapter):
r"""Log-Laplace distribution.
This distribution is univariate, without correlation between dimensions
for the array-valued case.
The log-Laplace distribution is a continuous probability distribution obtained by
taking the logarithm of the Laplace distribution, commonly used in finance and
hydrology due to its heavy tails and asymmetry.
The log-Laplace distribution is parametrized by the scale parameter
:math:`\c`, such that the pdf is
.. math:: f(x) = \frac{c}{2} x^{c-1}, \quad 0<x<1
and
.. math:: f(x) = \frac{c}{2} x^{-c-1}, \quad x >= 1
The scale parameter :math:`c` is represented by the parameter ``c``.
Parameters
----------
scale : float or array of float (1D or 2D), must be positive
scale parameter of the log-Laplace distribution
index : pd.Index, optional, default = RangeIndex
columns : pd.Index, optional, default = RangeIndex
Example
-------
>>> from skpro.distributions.loglaplace import LogLaplace
>>> ll = LogLaplace(scale=1)
"""

_tags = {
"capabilities:approx": ["pdfnorm"],
"capabilities:exact": ["mean", "var", "pdf", "log_pdf", "cdf", "ppf"],
"distr:measuretype": "continuous",
"distr:paramtype": "parametric",
"broadcast_init": "on",
}

def __init__(self, scale, index=None, columns=None):
self.scale = scale

super().__init__(index=index, columns=columns)

def _get_scipy_object(self) -> rv_continuous:
return loglaplace

def _get_scipy_param(self):
scale = self._bc_params["scale"]
return [scale], {}

@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator."""
# array case examples
params1 = {"scale": [[1, 2], [3, 4]]}
params2 = {
"scale": 1,
"index": pd.Index([1, 2, 5]),
"columns": pd.Index(["a", "b"]),
}
# scalar case examples
params3 = {"scale": 2}
return [params1, params2, params3]
2 changes: 1 addition & 1 deletion skpro/distributions/tests/test_all_distrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def test_methods_p(self, object_instance, method, shuffled):
else:
p = np_unif

res = getattr(object_instance, method)(p)
res = getattr(d, method)(p)

_check_output_format(res, d, method)

Expand Down
46 changes: 18 additions & 28 deletions skpro/model_selection/_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,8 +488,7 @@ def get_test_params(cls, parameter_set="default"):

from skpro.metrics import CRPS, PinballLoss
from skpro.regression.residual import ResidualDouble
from skpro.survival.coxph import CoxPH
from skpro.utils.validation._dependencies import _check_estimator_deps
from skpro.survival.compose._reduce_cond_unc import ConditionUncensored

linreg1 = LinearRegression()
linreg2 = LinearRegression(fit_intercept=False)
Expand All @@ -510,18 +509,14 @@ def get_test_params(cls, parameter_set="default"):
"error_score": "raise",
}

params = [param1, param2]

# testing with survival predictor
if _check_estimator_deps(CoxPH, severity="none"):
param3 = {
"estimator": CoxPH(alpha=0.05),
"cv": KFold(n_splits=4),
"param_grid": {"method": ["lpl", "elastic_net"]},
"scoring": PinballLoss(),
"error_score": "raise",
}
params.append(param3)
params3 = {
"estimator": ConditionUncensored(ResidualDouble(LinearRegression())),
"cv": KFold(n_splits=4),
"param_grid": {"estimator__fit_intercept": [True, False]},
"scoring": PinballLoss(),
"error_score": "raise",
}
params = [param1, param2, params3]

return params

Expand Down Expand Up @@ -747,8 +742,7 @@ def get_test_params(cls, parameter_set="default"):

from skpro.metrics import CRPS, PinballLoss
from skpro.regression.residual import ResidualDouble
from skpro.survival.coxph import CoxPH
from skpro.utils.validation._dependencies import _check_estimator_deps
from skpro.survival.compose._reduce_cond_unc import ConditionUncensored

linreg1 = LinearRegression()
linreg2 = LinearRegression(fit_intercept=False)
Expand All @@ -769,17 +763,13 @@ def get_test_params(cls, parameter_set="default"):
"error_score": "raise",
}

params = [param1, param2]

# testing with survival predictor
if _check_estimator_deps(CoxPH, severity="none"):
param3 = {
"estimator": CoxPH(alpha=0.05),
"cv": KFold(n_splits=4),
"param_distributions": {"method": ["lpl", "elastic_net"]},
"scoring": PinballLoss(),
"error_score": "raise",
}
params += [param3]
params3 = {
"estimator": ConditionUncensored(ResidualDouble(LinearRegression())),
"cv": KFold(n_splits=4),
"param_distributions": {"estimator__fit_intercept": [True, False]},
"scoring": PinballLoss(),
"error_score": "raise",
}
params = [param1, param2, params3]

return params
13 changes: 6 additions & 7 deletions skpro/regression/linear/_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ class ARDRegression(_DelegateWithFittedParamForwarding):
Parameters
----------
max_iter : int, default=None
Maximum number of iterations. If `None`, it corresponds to `max_iter=300`.
max_iter : int, default=300
Maximum number of iterations.
tol : float, default=1e-3
Stop the algorithm if w has converged.
Expand Down Expand Up @@ -90,7 +90,7 @@ class ARDRegression(_DelegateWithFittedParamForwarding):

def __init__(
self,
max_iter=None,
max_iter=300,
tol=1e-3,
alpha_1=1e-6,
alpha_2=1e-6,
Expand Down Expand Up @@ -188,10 +188,9 @@ class BayesianRidge(_DelegateWithFittedParamForwarding):
Parameters
----------
max_iter : int, default=None
max_iter : int, default=300
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion. If `None`, it
corresponds to `max_iter=300`.
stopping independently of any early stopping criterion.
tol : float, default=1e-3
Stop the algorithm if w has converged.
Expand Down Expand Up @@ -272,7 +271,7 @@ class BayesianRidge(_DelegateWithFittedParamForwarding):

def __init__(
self,
max_iter=None,
max_iter=300,
tol=1e-3,
alpha_1=1e-6,
alpha_2=1e-6,
Expand Down

0 comments on commit 8e229d9

Please sign in to comment.