diff --git a/docs/source/_templates/custom-module-template.rst b/docs/source/_templates/custom-module-template.rst index 86174798..508b70b1 100644 --- a/docs/source/_templates/custom-module-template.rst +++ b/docs/source/_templates/custom-module-template.rst @@ -53,14 +53,14 @@ {% endblock %} {% block modules %} -{% if modules %} +{% if all_modules %} .. rubric:: Modules .. autosummary:: :toctree: :template: custom-module-template.rst :recursive: -{% for item in modules %} +{% for item in all_modules %} {{ item }} {%- endfor %} {% endif %} diff --git a/docs/source/conf.py b/docs/source/conf.py index f6ce9562..ab45288e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -176,3 +176,7 @@ def setup(app: Sphinx): intersphinx_mapping = { "sklearn": ("https://scikit-learn.org/stable/", None), } + +suppress_warnings = [ + 'autosummary.import_cycle', +] diff --git a/pytorch_forecasting/metrics/_mqf2_utils.py b/pytorch_forecasting/metrics/_mqf2_utils.py index 927ee56b..d5080ee2 100644 --- a/pytorch_forecasting/metrics/_mqf2_utils.py +++ b/pytorch_forecasting/metrics/_mqf2_utils.py @@ -12,12 +12,13 @@ class DeepConvexNet(DeepConvexFlow): r""" Class that takes a partially input convex neural network (picnn) as input and equips it with functions of logdet - computation (both estimation and exact computation) + computation (both estimation and exact computation). This class is based on DeepConvexFlow of the CP-Flow repo (https://github.com/CW-Huang/CP-Flow) For details of the logdet estimator, see ``Convex potential flows: Universal probability distributions with optimal transport and convex optimization`` + Parameters ---------- picnn @@ -94,6 +95,7 @@ class SequentialNet(SequentialFlow): layers and provides energy score computation This class is based on SequentialFlow of the CP-Flow repo (https://github.com/CW-Huang/CP-Flow) + Parameters ---------- networks @@ -116,6 +118,7 @@ def es_sample(self, hidden_state: torch.Tensor, dimension: int) -> torch.Tensor: """ Auxiliary function for energy score computation Drawing samples conditioned on the hidden state + Parameters ---------- hidden_state @@ -159,6 +162,7 @@ def energy_score( h_i is the hidden state associated with z_i, and es_num_samples is the number of samples drawn for each of w, w', w'' in energy score approximation + Parameters ---------- z @@ -224,6 +228,7 @@ class MQF2Distribution(Distribution): Distribution class for the model MQF2 proposed in the paper ``Multivariate Quantile Function Forecaster`` by Kan, Aubet, Januschowski, Park, Benidis, Ruthotto, Gasthaus + Parameters ---------- picnn @@ -290,6 +295,7 @@ def stack_sliding_view(self, z: torch.Tensor) -> torch.Tensor: over the observations z Then, reshapes the observations into a 2-dimensional tensor for further computation + Parameters ---------- z @@ -317,6 +323,7 @@ def log_prob(self, z: torch.Tensor) -> torch.Tensor: """ Computes the log likelihood log(g(z)) + logdet(dg(z)/dz), where g is the gradient of the picnn + Parameters ---------- z @@ -346,6 +353,7 @@ def energy_score(self, z: torch.Tensor) -> torch.Tensor: h_i is the hidden state associated with z_i, and es_num_samples is the number of samples drawn for each of w, w', w'' in energy score approximation + Parameters ---------- z @@ -370,6 +378,7 @@ def energy_score(self, z: torch.Tensor) -> torch.Tensor: def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: """ Generates the sample paths + Parameters ---------- sample_shape @@ -377,7 +386,7 @@ def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: Returns ------- sample_paths - Tesnor of shape (batch_size, *sample_shape, prediction_length) + Tesnor of shape (batch_size, * sample_shape, prediction_length) """ numel_batch = self.numel_batch @@ -407,6 +416,7 @@ def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: def quantile(self, alpha: torch.Tensor, hidden_state: Optional[torch.Tensor] = None) -> torch.Tensor: """ Generates the predicted paths associated with the quantile levels alpha + Parameters ---------- alpha