From b8e33a9e75572c436d0213e3a9dfac496b8210e5 Mon Sep 17 00:00:00 2001
From: nyLiao <39255546+nyLiao@users.noreply.github.com>
Date: Sun, 14 Jul 2024 19:23:32 +0800
Subject: [PATCH] Format conv docs
---
README.md | 5 +-
benchmark/dataset_process/linkx.py | 4 +-
benchmark/dataset_process/yandex.py | 4 +-
docs/source/_tutorial/configure.rst | 4 +-
docs/source/_tutorial/installation.rst | 26 ++++----
docs/source/_tutorial/reproduce.rst | 22 +++----
docs/source/conf.py | 2 +-
pyg_spectral/nn/conv/acm_conv.py | 25 +++++---
pyg_spectral/nn/conv/adj_conv.py | 26 ++++----
pyg_spectral/nn/conv/adji_conv.py | 89 ++++++++++++++------------
pyg_spectral/nn/conv/base_mp.py | 50 +++++++--------
pyg_spectral/nn/conv/bern_conv.py | 18 +++---
pyg_spectral/nn/conv/cheb_conv.py | 15 +++--
pyg_spectral/nn/conv/chebii_conv.py | 15 +++--
pyg_spectral/nn/conv/clenshaw_conv.py | 23 +++----
pyg_spectral/nn/conv/favard_conv.py | 19 +++---
pyg_spectral/nn/conv/horner_conv.py | 21 +++---
pyg_spectral/nn/conv/jacobi_conv.py | 13 ++--
pyg_spectral/nn/conv/lapi_conv.py | 13 ++--
pyg_spectral/nn/conv/legendre_conv.py | 17 ++---
pyg_spectral/nn/conv/optbasis_conv.py | 13 ++--
pyg_spectral/transforms/gen_norm.py | 9 ++-
setup.py | 2 +-
23 files changed, 231 insertions(+), 204 deletions(-)
diff --git a/README.md b/README.md
index 1572294..3961638 100755
--- a/README.md
+++ b/README.md
@@ -3,8 +3,9 @@
diff --git a/benchmark/dataset_process/linkx.py b/benchmark/dataset_process/linkx.py
index ae0d24f..d57fc65 100644
--- a/benchmark/dataset_process/linkx.py
+++ b/benchmark/dataset_process/linkx.py
@@ -19,8 +19,8 @@
class LINKX(InMemoryDataset):
r"""
- paper: Large Scale Learning on Non-Homophilous Graphs: New Benchmarks and Strong Simple Methods
- ref: https://github.com/CUAI/Non-Homophily-Large-Scale/
+ :paper: Large Scale Learning on Non-Homophilous Graphs: New Benchmarks and Strong Simple Methods
+ :ref: https://github.com/CUAI/Non-Homophily-Large-Scale/
"""
_dataset_drive_url = {
'snap-patents.mat' : '1ldh23TSY1PwXia6dU0MYcpyEgX-w3Hia',
diff --git a/benchmark/dataset_process/yandex.py b/benchmark/dataset_process/yandex.py
index 5ac61a8..685411c 100644
--- a/benchmark/dataset_process/yandex.py
+++ b/benchmark/dataset_process/yandex.py
@@ -10,8 +10,8 @@
class Yandex(InMemoryDataset):
r"""
- paper: A critical look at the evaluation of GNNs under heterophily: are we really making progress?
- ref: https://github.com/yandex-research/heterophilous-graphs
+ :paper: A critical look at the evaluation of GNNs under heterophily: are we really making progress?
+ :ref: https://github.com/yandex-research/heterophilous-graphs
"""
def __init__(
self,
diff --git a/docs/source/_tutorial/configure.rst b/docs/source/_tutorial/configure.rst
index a84e046..62e61b2 100644
--- a/docs/source/_tutorial/configure.rst
+++ b/docs/source/_tutorial/configure.rst
@@ -6,9 +6,9 @@ Experiment Parameters
Refer to the help text by:
-.. code-block:: bash
+.. code-block:: console
- python benchmark/run_single.py --help
+ $ python benchmark/run_single.py --help
--help show this help message and exit
diff --git a/docs/source/_tutorial/installation.rst b/docs/source/_tutorial/installation.rst
index 96438cd..6e72ebe 100644
--- a/docs/source/_tutorial/installation.rst
+++ b/docs/source/_tutorial/installation.rst
@@ -3,10 +3,10 @@ Installation
This package can be easily installed by running `pip `__ at package root path:
-.. code-block:: bash
+.. code-block:: console
- pip install -r requirements.txt
- pip install -e .[benchmark]
+ $ pip install -r requirements.txt
+ $ pip install -e .[benchmark]
The installation script already covers the following core dependencies:
@@ -26,41 +26,41 @@ Only ``pyg_spectral`` Package
Install without any options:
-.. code-block:: bash
+.. code-block:: console
- pip install -e .
+ $ pip install -e .
Benchmark Experiments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install with ``[benchmark]`` option:
-.. code-block:: bash
+.. code-block:: console
- pip install -e .[benchmark]
+ $ pip install -e .[benchmark]
Docs Development
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Install with ``[docs]`` option:
-.. code-block:: bash
+.. code-block:: console
- pip install -e .[docs]
+ $ pip install -e .[docs]
C++ Backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Ensure C++ 11 is installed.
-.. code-block:: bash
+.. code-block:: console
- gcc --version
+ $ gcc --version
2. Install with ``[cpp]`` option and environment variable ``PSFLAG_CPP=1``:
-.. code-block:: bash
+.. code-block:: console
- export PSFLAG_CPP=1; pip install -e .[cpp]
+ $ export PSFLAG_CPP=1; pip install -e .[cpp]
.. [1] Please refer to the `official guide `__ if a specific CUDA version is required for PyTorch.
diff --git a/docs/source/_tutorial/reproduce.rst b/docs/source/_tutorial/reproduce.rst
index b498114..7001a70 100644
--- a/docs/source/_tutorial/reproduce.rst
+++ b/docs/source/_tutorial/reproduce.rst
@@ -8,38 +8,38 @@ Datasets will be automatically downloaded and processed by the code.
**Run full-batch models** (*Table 2, 8, 9*):
-.. code-block:: bash
+.. code-block:: console
- cd benchmark
- bash scripts/runfb.sh
+ $ cd benchmark
+ $ bash scripts/runfb.sh
**Run mini-batch models** (*Table 3, 10, 11*):
-.. code-block:: bash
+.. code-block:: console
- bash scripts/runmb.sh
+ $ bash scripts/runmb.sh
Additional Experiments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Effect of graph normalization** (*Figure 3, 9*):
-.. code-block:: bash
+.. code-block:: console
- bash scripts/eval_degree.sh
+ $ bash scripts/eval_degree.sh
Figures can be plotted by: `benchmark/notebook/fig_degng.ipynb `_.
**Effect of propagation hops** (*Figure 7, 8*):
-.. code-block:: bash
+.. code-block:: console
- bash scripts/eval_hop.sh
+ $ bash scripts/eval_hop.sh
Figures can be plotted by: `benchmark/notebook/fig_hop.ipynb `_.
**Frequency response** (*Table 12*):
-.. code-block:: bash
+.. code-block:: console
- bash scripts/exp_filter.sh
+ $ bash scripts/exp_filter.sh
diff --git a/docs/source/conf.py b/docs/source/conf.py
index faa8aa3..d737290 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -65,7 +65,7 @@
napoleon_preprocess_types = True
autodoc_type_aliases = {
"Tensor": ":external:class:`Tensor `",
- # "SparseTensor": ":class:`torch_sparse.SparseTensor`",
+ "SparseTensor": ":external:func:`SparseTensor `",
"pyg": "torch_geometric",
}
napoleon_type_aliases = autodoc_type_aliases
diff --git a/pyg_spectral/nn/conv/acm_conv.py b/pyg_spectral/nn/conv/acm_conv.py
index 42116ea..10ea1cc 100644
--- a/pyg_spectral/nn/conv/acm_conv.py
+++ b/pyg_spectral/nn/conv/acm_conv.py
@@ -13,16 +13,15 @@
class ACMConv(BaseMP):
r"""Convolutional layer of FBGNN & ACMGNN(I & II).
- paper: Revisiting Heterophily For Graph Neural Networks
-
- paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
-
- ref: https://github.com/SitaoLuan/ACM-GNN/blob/main/ACM-Geometric/layers.py
+ :paper: Revisiting Heterophily For Graph Neural Networks
+ :paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
+ :ref: https://github.com/SitaoLuan/ACM-GNN/blob/main/ACM-Geometric/layers.py
Args:
- num_hops (int), hop (int): total and current number of propagation hops.
- hop=0 explicitly handles x without propagation.
- alpha (int): variant I (propagate first) or II (act first)
+ alpha: variant I (propagate first) or II (act first)
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
+ ``hop=0`` explicitly handles :obj:`x` without propagation.
cached: whether cache the propagation matrix.
"""
supports_batch: bool = False
@@ -41,7 +40,9 @@ def __init__(self,
self.out_channels = out_channels
def _init_with_theta(self):
- """theta (nn.ModuleDict): Linear transformation for each scheme.
+ r"""
+ Attributes:
+ theta (torch.nn.ModuleDict): Linear transformation for each scheme.
"""
self.schemes = self.theta.keys()
self.n_scheme = len(self.schemes)
@@ -72,6 +73,10 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
return {'out': x}
def _forward_theta(self, x, scheme):
+ r"""
+ Attributes:
+ theta (torch.nn.ModuleDict): Linear transformation for each scheme.
+ """
if callable(self.theta[scheme]):
return self.theta[scheme](x)
return self.theta[scheme] * x
@@ -83,7 +88,7 @@ def forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): current propagation result
+ out (Tensor): current propagation result (shape: :math:`(|\mathcal{V}|, F)`)
prop_0, prop_1 (SparseTensor): propagation matrices
"""
h, a = {}, {}
diff --git a/pyg_spectral/nn/conv/adj_conv.py b/pyg_spectral/nn/conv/adj_conv.py
index 231744d..0c76932 100644
--- a/pyg_spectral/nn/conv/adj_conv.py
+++ b/pyg_spectral/nn/conv/adj_conv.py
@@ -8,10 +8,11 @@ class AdjConv(BaseMP):
r"""Linear filter using the normalized adjacency matrix for propagation.
Args:
- alpha (float): additional scaling for self-loop in adjacency matrix
- :math:`\mathbf{A} + \alpha\mathbf{I}`, i.e. `improved` in PyG GCNConv.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ alpha: additional scaling for self-loop in adjacency matrix
+ :math:`\mathbf{A} + \alpha\mathbf{I}`, i.e. :obj:`improved` in
+ :class:`torch_geometric.nn.conv.GCNConv`.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -31,7 +32,7 @@ def _forward(self,
) -> tuple:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): current propagation result
+ x (Tensor): current propagation result (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop == 0 and not callable(self.theta):
@@ -47,15 +48,16 @@ def _forward(self,
class AdjDiffConv(AdjConv):
r"""Linear filter using the normalized adjacency matrix for propagation.
- Preprocess the feature by distinguish matrix :math:`\beta\mathbf{L} + \mathbf{I}`.
+ Preprocess the feature by distinguish matrix :math:`\beta\mathbf{L} + \mathbf{I}`.
Args:
- alpha (float): additional scaling for self-loop in adjacency matrix
- :math:`\mathbf{A} + \alpha\mathbf{I}`, i.e. `improved` in PyG GCNConv.
- beta (float): scaling for self-loop in distinguish matrix
+ alpha: additional scaling for self-loop in adjacency matrix
+ :math:`\mathbf{A} + \alpha\mathbf{I}`, i.e. :obj:`improved` in
+ :class:`torch_geometric.nn.conv.GCNConv`.
+ beta: scaling for self-loop in distinguish matrix
:math:`\beta\mathbf{L} + \mathbf{I}`
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -75,7 +77,7 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): current propagation result
+ x (Tensor): current propagation result (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop == 0:
diff --git a/pyg_spectral/nn/conv/adji_conv.py b/pyg_spectral/nn/conv/adji_conv.py
index a54aa0e..6d5b045 100644
--- a/pyg_spectral/nn/conv/adji_conv.py
+++ b/pyg_spectral/nn/conv/adji_conv.py
@@ -11,14 +11,15 @@ class AdjiConv(BaseMP):
r"""Iterative linear filter using the normalized adjacency matrix for augmented propagation.
Args:
- alpha (float): decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
+ alpha: decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
Can be :math:`\alpha < 0`.
- beta (float): scaling for skip connection, i.e., self-loop in adjacency
- matrix, i.e. `improved` in PyG GCNConv and `eps` in GINConv.
+ beta: scaling for skip connection, i.e., self-loop in adjacency
+ matrix, i.e. :obj:`improved` in :class:`torch_geometric.nn.conv.GCNConv`
+ and :obj:`eps` in :class:`torch_geometric.nn.conv.GINConv`.
Can be :math:`\beta < 0`.
- beta = 'var' for learnable beta as parameter.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ ``beta = 'var'`` for learnable beta as parameter.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
# For similar convs supporting batching, use AdjConv or AdjSkipConv
@@ -60,8 +61,8 @@ def forward(self,
r"""Overwrite forward method.
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
# propagate_type: (x: Tensor)
@@ -76,21 +77,24 @@ def __repr__(self) -> str:
class Adji2Conv(AdjiConv):
r"""Iterative linear filter using the 2-hop normalized adjacency matrix for
- augmented propagation.
+ augmented propagation.
Args:
- num_hops (int): total number of propagation hops. NOTE that there are
+ num_hops: total number of propagation hops. NOTE that there are
only :math:`\text{num_hops} / 2` conv layers.
- alpha (float): decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
+ alpha: decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
Can be :math:`\alpha < 0`.
- beta (float): scaling for self-loop in adjacency matrix, i.e.
- `improved` in PyG GCNConv and `eps` in GINConv. Can be :math:`\beta < 0`.
- beta = 'var' for learnable beta as parameter.
- --- BaseMP Args ---
- hop (int): current number of propagation hops.
+ beta: scaling for skip connection, i.e., self-loop in adjacency
+ matrix, i.e. :obj:`improved` in :class:`torch_geometric.nn.conv.GCNConv`
+ and :obj:`eps` in :class:`torch_geometric.nn.conv.GINConv`.
+ Can be :math:`\beta < 0`.
+ ``beta = 'var'`` for learnable beta as parameter.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def message_and_aggregate(self, adj_t: Adj, x: Tensor) -> Tensor:
+ r""" Perform 2-hop propagation.
+ """
# return spmm(adj_t, spmm(adj_t, x, reduce=self.aggr), reduce=self.aggr)
return torch.spmm(adj_t, torch.spmm(adj_t, x))
@@ -101,12 +105,13 @@ class AdjSkipConv(BaseMP):
Args:
alpha (float): decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
Can be :math:`\alpha < 0`.
- beta (float): scaling for skip connection, i.e., self-loop in adjacency
- matrix, i.e. `improved` in PyG GCNConv and `eps` in GINConv.
+ beta: scaling for skip connection, i.e., self-loop in adjacency
+ matrix, i.e. :obj:`improved` in :class:`torch_geometric.nn.conv.GCNConv`
+ and :obj:`eps` in :class:`torch_geometric.nn.conv.GINConv`.
Can be :math:`\beta < 0`.
- beta = 'var' for learnable beta as parameter.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ ``beta = 'var'`` for learnable beta as parameter.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -141,8 +146,8 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _forward_out(self, **kwargs) -> Tensor:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
"""
out, h = kwargs['out'], kwargs['h']
out = h + self.beta * out
@@ -156,8 +161,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
# propagate_type: (x: Tensor)
@@ -174,15 +179,18 @@ class AdjSkip2Conv(AdjSkipConv):
Args:
alpha (float): decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
Can be :math:`\alpha < 0`.
- beta (float): scaling for skip connection, i.e., self-loop in adjacency
- matrix, i.e. `improved` in PyG GCNConv and `eps` in GINConv.
+ beta: scaling for skip connection, i.e., self-loop in adjacency
+ matrix, i.e. :obj:`improved` in :class:`torch_geometric.nn.conv.GCNConv`
+ and :obj:`eps` in :class:`torch_geometric.nn.conv.GINConv`.
Can be :math:`\beta < 0`.
- beta = 'var' for learnable beta as parameter.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ ``beta = 'var'`` for learnable beta as parameter.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def message_and_aggregate(self, adj_t: Adj, x: Tensor) -> Tensor:
+ r""" Perform 2-hop propagation.
+ """
# return spmm(adj_t, spmm(adj_t, x, reduce=self.aggr), reduce=self.aggr)
return torch.spmm(adj_t, torch.spmm(adj_t, x))
@@ -193,12 +201,13 @@ class AdjResConv(BaseMP):
Args:
alpha (float): decay factor :math:`\alpha(\mathbf{A} + \beta\mathbf{I})`.
Can be :math:`\alpha < 0`.
- beta (float): scaling for skip connection, i.e., self-loop in adjacency
- matrix, i.e. `improved` in PyG GCNConv and `eps` in GINConv.
+ beta: scaling for skip connection, i.e., self-loop in adjacency
+ matrix, i.e. :obj:`improved` in :class:`torch_geometric.nn.conv.GCNConv`
+ and :obj:`eps` in :class:`torch_geometric.nn.conv.GINConv`.
Can be :math:`\beta < 0`.
- beta = 'var' for learnable beta as parameter.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ ``beta = 'var'`` for learnable beta as parameter.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -233,8 +242,8 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _forward_out(self, **kwargs) -> Tensor:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
"""
out, x_0 = kwargs['out'], kwargs['x_0']
out = x_0 + self.beta * self._forward_theta(x=out)
@@ -248,9 +257,9 @@ def _forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
- x_0 (:math:`(|\mathcal{V}|, F)` Tensor): initial input
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
+ x_0 (Tensor): initial input (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop > 0:
diff --git a/pyg_spectral/nn/conv/base_mp.py b/pyg_spectral/nn/conv/base_mp.py
index 28195d2..7f044ef 100644
--- a/pyg_spectral/nn/conv/base_mp.py
+++ b/pyg_spectral/nn/conv/base_mp.py
@@ -15,12 +15,10 @@ class BaseMP(MessagePassing):
r"""Base filter layer structure.
Args:
- num_hops (int): total number of propagation hops.
- hop (int): current number of propagation hops of this layer.
- alpha (float): additional scaling for self-loop in adjacency matrix
- :math:`\mathbf{A} + \alpha\mathbf{I}`, i.e. `improved` in PyG GCNConv.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
- **kwargs: Additional arguments of :class:`pyg.nn.conv.MessagePassing`.
+ **kwargs: Additional arguments of :class:`torch_geometric.nn.conv.MessagePassing`.
"""
supports_batch: bool = True
supports_norm_batch: bool = True
@@ -54,15 +52,17 @@ def get_propagate_mat(self,
x: Tensor,
edge_index: Adj
) -> Adj:
- r"""Get matrices for self.propagate(). Called before each forward() with same input.
+ r"""Get matrices for :meth:`propagate()`. Called before each
+ :meth:`forward()` with same input.
Args:
- x (Tensor), edge_index (Adj): from pyg.data.Data
- Requires:
- :obj:`self.propagate_mat` (str): propagation schemes, separated by ','.
- Each scheme starts with 'A' or 'L' for adjacency or Laplacian,
- optionally following '+[p]*I' or '-[p]*I' for scaling the
- diagonal, where `p` can be float or attribute name.
+ x: from :class:`torch_geometric.data.Data`
+ edge_index: from :class:`torch_geometric.data.Data`
+ Attributes:
+ propagate_mat (str): propagation schemes, separated by ``,``.
+ Each scheme starts with ``A`` or ``L`` for adjacency or Laplacian,
+ optionally following ``+[p*]I`` or ``-[p*]I`` for scaling the
+ diagonal, where ``p`` can be float or attribute name.
Returns:
prop (SparseTensor): propagation matrix
"""
@@ -80,9 +80,6 @@ def _get_propagate_mat(self,
edge_index: Adj
) -> Adj:
""" Shadow function for :meth:`get_propagate_mat()`.
-
- Args:
- edge_index (SparseTensor or torch.sparse_csr_tensor)
"""
def _get_adj(mat: Adj, diag: float):
if diag != 0:
@@ -138,7 +135,7 @@ def _get_forward_mat(self, x: Tensor, edge_index: Adj) -> dict:
``self.comp_scheme == 'forward'``.
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor: initial output tensor
+ out (Tensor): initial output tensor (shape: :math:`(|\mathcal{V}|, F)`)
"""
return {'out': torch.zeros_like(x),}
@@ -156,9 +153,10 @@ def get_forward_mat(self,
r"""Get matrices for :meth:`forward()`. Called during :meth:`forward()`.
Args:
- x (Tensor), edge_index (Adj): from pyg.data.Data
+ x: from :class:`torch_geometric.data.Data`
+ edge_index: from :class:`torch_geometric.data.Data`
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor
+ out (Tensor): output tensor (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
comp_scheme = comp_scheme or self.comp_scheme
@@ -175,9 +173,9 @@ def get_forward_mat(self,
# ==========
def _forward_theta(self, **kwargs):
r"""
- Requires:
- :obj:`self.theta` (nn.Parameter or nn.Module): transformation of propagation result
- before applying to the output.
+ Attributes:
+ theta (nn.Parameter | nn.Module): transformation of propagation
+ result before applying to the output.
"""
x = kwargs['x'] if 'x' in kwargs else kwargs['out']
if callable(self.theta):
@@ -186,11 +184,11 @@ def _forward_theta(self, **kwargs):
return self.theta * x
def _forward_out(self, **kwargs) -> Tensor:
- r"""
+ r""" Shadow function for calling :meth:`_forward_theta()` and accumulating results.
+
Returns:
out (Tensor): output tensor for accumulating propagation results
- Shape:
- out: :math:`(|\mathcal{V}|, F)`
+ (shape: :math:`(|\mathcal{V}|, F)`)
"""
if self.out_scale == 1:
res = self._forward_theta(**kwargs)
@@ -200,7 +198,7 @@ def _forward_out(self, **kwargs) -> Tensor:
def forward(self, **kwargs) -> dict:
r""" Wrapper for distinguishing precomputed outputs.
- Args & Returns (dct): same with output of :meth:`get_forward_mat()`
+ Args & Returns should match the output of :meth:`get_forward_mat()`
"""
if self.comp_scheme is None or self.comp_scheme == 'convolute':
fwd_kwargs, keys = {}, list(kwargs.keys())
@@ -221,7 +219,7 @@ def _forward(self,
Dicts of Args & Returns should be matched.
Returns:
- x (Tensor): tensor for calculating `out`
+ x (Tensor): tensor for calculating :obj:`out`
"""
raise NotImplementedError
diff --git a/pyg_spectral/nn/conv/bern_conv.py b/pyg_spectral/nn/conv/bern_conv.py
index e5c6b2d..824469a 100644
--- a/pyg_spectral/nn/conv/bern_conv.py
+++ b/pyg_spectral/nn/conv/bern_conv.py
@@ -8,13 +8,15 @@
class BernConv(BaseMP):
r"""Convolutional layer with Bernstein Polynomials.
- We propose a new implementation reducing memory from O(KFn) to O(3Fn).
- paper: BernNet: Learning Arbitrary Graph Spectral Filters via Bernstein Approximation
- ref: https://github.com/ivam-he/BernNet/blob/main/NodeClassification/Bernpro.py
+ We propose a new implementation reducing memory overhead from
+ :math:`O(KFn)` to :math:`O(3Fn)`.
+
+ :paper: BernNet: Learning Arbitrary Graph Spectral Filters via Bernstein Approximation
+ :ref: https://github.com/ivam-he/BernNet/blob/main/NodeClassification/Bernpro.py
Args:
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -39,9 +41,9 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result through (2I-L)
- prop_0 (SparseTensor): L
- prop_1 (SparseTensor): 2I - L
+ x (Tensor): propagation result through :math:`2I-L` (shape: :math:`(|\mathcal{V}|, F)`)
+ prop_0 (SparseTensor): :math:`L`
+ prop_1 (SparseTensor): :math:`2I-L`
"""
if self.hop > 0:
# propagate_type: (x: Tensor)
diff --git a/pyg_spectral/nn/conv/cheb_conv.py b/pyg_spectral/nn/conv/cheb_conv.py
index 8ca011d..fc09270 100644
--- a/pyg_spectral/nn/conv/cheb_conv.py
+++ b/pyg_spectral/nn/conv/cheb_conv.py
@@ -6,13 +6,14 @@
class ChebConv(BaseMP):
r"""Convolutional layer with Chebyshev Polynomials.
- paper: Convolutional Neural Networks on Graphs with Chebyshev Approximation, Revisited
- ref: https://github.com/ivam-he/ChebNetII/blob/main/main/Chebbase_pro.py
+
+ :paper: Convolutional Neural Networks on Graphs with Chebyshev Approximation, Revisited
+ :ref: https://github.com/ivam-he/ChebNetII/blob/main/main/Chebbase_pro.py
Args:
- alpha (float): decay factor for each hop :math:`1/hop^\alpha`.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ alpha: decay factor for each hop :math:`1/k^\alpha`.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -36,8 +37,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop == 0:
diff --git a/pyg_spectral/nn/conv/chebii_conv.py b/pyg_spectral/nn/conv/chebii_conv.py
index 1573207..da759fb 100644
--- a/pyg_spectral/nn/conv/chebii_conv.py
+++ b/pyg_spectral/nn/conv/chebii_conv.py
@@ -24,13 +24,13 @@ def cheby(i, x):
class ChebIIConv(BaseMP):
r"""Convolutional layer with Chebyshev-II Polynomials.
- paper: Convolutional Neural Networks on Graphs with Chebyshev Approximation, Revisited
- ref: https://github.com/ivam-he/ChebNetII/blob/main/main/ChebnetII_pro.py
+
+ :paper: Convolutional Neural Networks on Graphs with Chebyshev Approximation, Revisited
+ :ref: https://github.com/ivam-he/ChebNetII/blob/main/main/ChebnetII_pro.py
Args:
- alpha (float): decay factor for each hop :math:`1/hop^\alpha`.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
coeffs_data = None
@@ -63,6 +63,7 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _get_forward_mat(self, x: Tensor, edge_index: Adj) -> dict:
r"""
+ Attributes:
thetas (Tensor): learnable/fixed (wrt decoupled/iterative model)
scalar parameters representing cheb(x)
"""
@@ -92,8 +93,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop == 0:
diff --git a/pyg_spectral/nn/conv/clenshaw_conv.py b/pyg_spectral/nn/conv/clenshaw_conv.py
index e5ffec0..fcebdc8 100644
--- a/pyg_spectral/nn/conv/clenshaw_conv.py
+++ b/pyg_spectral/nn/conv/clenshaw_conv.py
@@ -9,13 +9,14 @@
class ClenshawConv(BaseMP):
r"""Convolutional layer with Chebyshev Polynomials and explicit residual.
- paper: Clenshaw Graph Neural Networks
- ref: https://github.com/yuziGuo/ClenshawGNN/blob/master/models/ChebClenshawNN.py
+
+ :paper: Clenshaw Graph Neural Networks
+ :ref: https://github.com/yuziGuo/ClenshawGNN/blob/master/models/ChebClenshawNN.py
Args:
- alpha (float): transformation strength.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ alpha: transformation strength.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -50,8 +51,8 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _forward_out(self, **kwargs) -> Tensor:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
"""
out, x_0 = kwargs['out'], kwargs['x_0']
out = out + self.beta * x_0
@@ -68,10 +69,10 @@ def _forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
- out_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
- x_0 (:math:`(|\mathcal{V}|, F)` Tensor): initial input
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
+ out_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_0 (Tensor): initial input (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
# propagate_type: (x: Tensor)
diff --git a/pyg_spectral/nn/conv/favard_conv.py b/pyg_spectral/nn/conv/favard_conv.py
index de15aba..e4b1ad9 100644
--- a/pyg_spectral/nn/conv/favard_conv.py
+++ b/pyg_spectral/nn/conv/favard_conv.py
@@ -12,12 +12,13 @@
class FavardConv(BaseMP):
r"""Convolutional layer with basis in Favard's Theorem.
- paper: Graph Neural Networks with Learnable and Optimal Polynomial Bases
- ref: https://github.com/yuziGuo/FarOptBasis/blob/master/layers/FavardNormalConv.py
+
+ :paper: Graph Neural Networks with Learnable and Optimal Polynomial Bases
+ :ref: https://github.com/yuziGuo/FarOptBasis/blob/master/layers/FavardNormalConv.py
Args:
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
supports_batch: bool = False
@@ -55,7 +56,9 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _get_forward_mat(self, x: Tensor, edge_index: Adj) -> dict:
r"""
- alpha_1: parameter for k-1
+ Returns:
+ out (Tensor): initial output tensor (shape: :math:`(|\mathcal{V}|, F)`)
+ alpha_1: parameter for :math:`k-1`
"""
return {'out': torch.zeros_like(x), 'alpha_1': None}
@@ -85,10 +88,10 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
- alpha_1: parameter for k-1
+ alpha_1: parameter for :math:`k-1`
"""
if self.hop == 0:
h = self._div_coeff(x, self.alpha, pos=True)
diff --git a/pyg_spectral/nn/conv/horner_conv.py b/pyg_spectral/nn/conv/horner_conv.py
index 14f2f64..9e67f1d 100644
--- a/pyg_spectral/nn/conv/horner_conv.py
+++ b/pyg_spectral/nn/conv/horner_conv.py
@@ -9,13 +9,14 @@
class HornerConv(BaseMP):
r"""Convolutional layer with adjacency propagation and explicit residual.
- paper: Clenshaw Graph Neural Networks
- ref: https://github.com/yuziGuo/ClenshawGNN/blob/master/layers/HornerConv.py
+
+ :paper: Clenshaw Graph Neural Networks
+ :ref: https://github.com/yuziGuo/ClenshawGNN/blob/master/layers/HornerConv.py
Args:
- alpha (float): transformation strength.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ alpha: transformation strength.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -50,8 +51,8 @@ def _get_convolute_mat(self, x: Tensor, edge_index: Adj) -> dict:
def _forward_out(self, **kwargs) -> Tensor:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
"""
out, x_0 = kwargs['out'], kwargs['x_0']
out = out + self.beta * x_0
@@ -67,9 +68,9 @@ def _forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
- x_0 (:math:`(|\mathcal{V}|, F)` Tensor): initial input
+ out (Tensor): output tensor for accumulating propagation results
+ (shape: :math:`(|\mathcal{V}|, F)`)
+ x_0 (Tensor): initial input (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop > 0:
diff --git a/pyg_spectral/nn/conv/jacobi_conv.py b/pyg_spectral/nn/conv/jacobi_conv.py
index c3a95cc..46b72c9 100644
--- a/pyg_spectral/nn/conv/jacobi_conv.py
+++ b/pyg_spectral/nn/conv/jacobi_conv.py
@@ -6,13 +6,14 @@
class JacobiConv(BaseMP):
r"""Convolutional layer with Jacobi Polynomials.
- paper: How Powerful are Spectral Graph Neural Networks
- ref: https://github.com/GraphPKU/JacobiConv
+
+ :paper: How Powerful are Spectral Graph Neural Networks
+ :ref: https://github.com/GraphPKU/JacobiConv
Args:
alpha, beta (float): hyperparameters in Jacobi polynomials.
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -41,8 +42,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
a, b, l, r, k = self.alpha, self.beta, self.l, self.r, self.hop
diff --git a/pyg_spectral/nn/conv/lapi_conv.py b/pyg_spectral/nn/conv/lapi_conv.py
index 536acf3..b4084b3 100644
--- a/pyg_spectral/nn/conv/lapi_conv.py
+++ b/pyg_spectral/nn/conv/lapi_conv.py
@@ -7,12 +7,13 @@
class LapiConv(BaseMP):
r"""Iterative linear filter using the normalized adjacency matrix.
Used in AdaGNN.
- paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter
- ref: https://github.com/yushundong/AdaGNN/blob/main/layers.py
+
+ :paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter
+ :ref: https://github.com/yushundong/AdaGNN/blob/main/layers.py
Args:
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
# For similar convs supporting batching, use LapSkipConv
@@ -39,8 +40,8 @@ def forward(self,
) -> dict:
r"""
Returns:
- out (:math:`(|\mathcal{V}|, F)` Tensor): output tensor for
- accumulating propagation results
+ out (Tensor): output tensor for
+ accumulating propagation results (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
# propagate_type: (x: Tensor)
diff --git a/pyg_spectral/nn/conv/legendre_conv.py b/pyg_spectral/nn/conv/legendre_conv.py
index afa6276..37ddfa6 100644
--- a/pyg_spectral/nn/conv/legendre_conv.py
+++ b/pyg_spectral/nn/conv/legendre_conv.py
@@ -6,14 +6,15 @@
class LegendreConv(BaseMP):
r"""Convolutional layer with Legendre Polynomials.
- paper: How Powerful are Spectral Graph Neural Networks
- ref: https://github.com/GraphPKU/JacobiConv
- Alternative paper: Improved Modeling and Generalization Capabilities of Graph Neural Networks With Legendre Polynomials
- code: https://github.com/12chen20/LegendreNet
+
+ :paper: How Powerful are Spectral Graph Neural Networks
+ :ref: https://github.com/GraphPKU/JacobiConv
+ :paper: Improved Modeling and Generalization Capabilities of Graph Neural Networks With Legendre Polynomials
+ :ref: https://github.com/12chen20/LegendreNet
Args:
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -35,8 +36,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
if self.hop == 0:
diff --git a/pyg_spectral/nn/conv/optbasis_conv.py b/pyg_spectral/nn/conv/optbasis_conv.py
index bf1ce45..dcc41a2 100644
--- a/pyg_spectral/nn/conv/optbasis_conv.py
+++ b/pyg_spectral/nn/conv/optbasis_conv.py
@@ -7,12 +7,13 @@
class OptBasisConv(BaseMP):
r"""Convolutional layer with optimal adaptive basis.
- paper: Graph Neural Networks with Learnable and Optimal Polynomial Bases
- ref: https://github.com/yuziGuo/FarOptBasis/blob/master/layers/NormalBasisConv.py
+
+ :paper: Graph Neural Networks with Learnable and Optimal Polynomial Bases
+ :ref: https://github.com/yuziGuo/FarOptBasis/blob/master/layers/NormalBasisConv.py
Args:
- --- BaseMP Args ---
- num_hops (int), hop (int): total and current number of propagation hops.
+ num_hops: total number of propagation hops.
+ hop: current number of propagation hops of this layer.
cached: whether cache the propagation matrix.
"""
def __init__(self,
@@ -36,8 +37,8 @@ def _forward(self,
) -> dict:
r"""
Returns:
- x (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-1
- x_1 (:math:`(|\mathcal{V}|, F)` Tensor): propagation result of k-2
+ x (Tensor): propagation result of :math:`k-1` (shape: :math:`(|\mathcal{V}|, F)`)
+ x_1 (Tensor): propagation result of :math:`k-2` (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
"""
# dim_node = tuple(range(x.dim() - 1))
diff --git a/pyg_spectral/transforms/gen_norm.py b/pyg_spectral/transforms/gen_norm.py
index a6aa991..d54dc70 100755
--- a/pyg_spectral/transforms/gen_norm.py
+++ b/pyg_spectral/transforms/gen_norm.py
@@ -17,18 +17,17 @@ def pow_with_pinv(x: Tensor, p: float) -> Tensor:
@functional_transform('gen_norm')
class GenNorm(BaseTransform):
- r"""Generalized graph normalization from GBP/AGP.
+ r"""Generalized graph normalization.
.. math::
\mathbf{\hat{A}} = \mathbf{\hat{D}}^{-a} (\mathbf{A} + \mathbf{I})
\mathbf{\hat{D}}^{-b}
- where :math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij} + 1` and
- :math:`a,b \in [0,1]`.
+ where :math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij} + 1` and :math:`a,b \in [0,1]`.
Args:
- left (float): left (row) normalization :math:`a`.
- right (float): right (col) normalization :math:`b`. Default to :math:`1-a`.
+ left: left (row) normalization :math:`a`.
+ right: right (col) normalization :math:`b`. Default to :math:`1-a`.
"""
def __init__(self, left: float, right: float = None,
dtype: torch.dtype = torch.float32):
diff --git a/setup.py b/setup.py
index 9110551..7a771cc 100755
--- a/setup.py
+++ b/setup.py
@@ -32,4 +32,4 @@
packages=find_packages(),
ext_modules=ext_modules,
)
-# FEATURE: [optional benckmark](https://setuptools.pypa.io/en/latest/userguide/dependency_management.html#optional-dependencies)
+#FEATURE: [optional benckmark](https://setuptools.pypa.io/en/latest/userguide/dependency_management.html#optional-dependencies)