Skip to content

Add structures documentation #41

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
May 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/_templates/class.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@
.. autoclass:: {{ name }}
:members:
:special-members:
:exclude-members: __weakref__
:exclude-members: __weakref__, __init__
112 changes: 100 additions & 12 deletions torchhd/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,25 @@


class Identity(nn.Embedding):
"""Embedding wrapper around functional.identity_hv"""
"""Embedding wrapper around :func:`~torchhd.functional.identity_hv`.

Class inherits from `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html>`_ and supports the same keyword arguments.

Args:
num_embeddings (int): the number of hypervectors to generate.
embedding_dim (int): the dimensionality of the hypervectors.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.

Examples::

>>> emb = embeddings.Identity(5, 3)
>>> idx = torch.LongTensor([0, 1, 4])
>>> emb(idx)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])

"""
def __init__(self, num_embeddings, embedding_dim, requires_grad=False, **kwargs):
super(Identity, self).__init__(num_embeddings, embedding_dim, **kwargs)
self.weight.requires_grad = requires_grad
Expand All @@ -37,22 +54,32 @@ def reset_parameters(self):


class Random(nn.Embedding):
"""Embedding wrapper around functional.random_hv"""
"""Embedding wrapper around :func:`~torchhd.functional.random_hv`.

Class inherits from `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html>`_ and supports the same keyword arguments.

Args:
num_embeddings (int): the number of hypervectors to generate.
embedding_dim (int): the dimensionality of the hypervectors.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.

Examples::

>>> emb = embeddings.Random(5, 3)
>>> idx = torch.LongTensor([0, 1, 4])
>>> emb(idx)
tensor([[ 1., -1., 1.],
[ 1., -1., 1.],
[ 1., 1., 1.]])

"""
def __init__(
self,
num_embeddings,
embedding_dim,
low=0.0,
high=1.0,
randomness=0.0,
requires_grad=False,
**kwargs
):
self.low_value = low
self.high_value = high
self.randomness = randomness

super(Random, self).__init__(num_embeddings, embedding_dim, **kwargs)
self.weight.requires_grad = requires_grad

Expand All @@ -72,8 +99,28 @@ def reset_parameters(self):


class Level(nn.Embedding):
"""Embedding wrapper around functional.level_hv"""
"""Embedding wrapper around :func:`~torchhd.functional.level_hv`.

Class inherits from `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html>`_ and supports the same keyword arguments.

Args:
num_embeddings (int): the number of hypervectors to generate.
embedding_dim (int): the dimensionality of the hypervectors.
low (float, optional): The lower bound of the real number range that the levels represent. Default: ``0.0``
high (float, optional): The upper bound of the real number range that the levels represent. Default: ``1.0``
randomness (float, optional): r-value to interpolate between level at ``0.0`` and random-hypervectors at ``1.0``. Default: ``0.0``.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.

Examples::

>>> emb = embeddings.Level(5, 10, low=-1, high=2)
>>> x = torch.FloatTensor([0.3, 1.9, -0.8])
>>> emb(x)
tensor([[-1., 1., 1., -1., -1., -1., -1., 1., 1., 1.],
[-1., 1., -1., 1., -1., -1., -1., 1., 1., -1.],
[ 1., -1., 1., -1., -1., -1., 1., 1., -1., 1.]])

"""
def __init__(
self,
num_embeddings,
Expand Down Expand Up @@ -119,8 +166,28 @@ def forward(self, input: torch.Tensor) -> torch.Tensor:


class Circular(nn.Embedding):
"""Embedding wrapper around functional.circular_hv"""
"""Embedding wrapper around :func:`~torchhd.functional.circular_hv`.

Class inherits from `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html>`_ and supports the same keyword arguments.

Args:
num_embeddings (int): the number of hypervectors to generate.
embedding_dim (int): the dimensionality of the hypervectors.
low (float, optional): The lower bound of the real number range that the circular levels represent. Default: ``0.0``
high (float, optional): The upper bound of the real number range that the circular levels represent. Default: ``2 * pi``
randomness (float, optional): r-value to interpolate between circular at ``0.0`` and random-hypervectors at ``1.0``. Default: ``0.0``.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.

Examples::

>>> emb = embeddings.Circular(5, 10)
>>> x = torch.FloatTensor([0.0, 3.14, 6.28])
>>> emb(x)
tensor([[ 1., -1., 1., -1., 1., 1., 1., 1., -1., 1.],
[ 1., -1., -1., 1., 1., 1., 1., -1., -1., -1.],
[ 1., -1., -1., -1., 1., 1., 1., 1., 1., -1.]])

"""
def __init__(
self,
num_embeddings,
Expand Down Expand Up @@ -167,7 +234,28 @@ def forward(self, input: torch.Tensor) -> torch.Tensor:


class Projection(nn.Module):
"""Embedding using a random projection matrix"""
r"""Embedding using a random projection matrix.

Implemented based on `A Theoretical Perspective on Hyperdimensional Computing <https://arxiv.org/abs/2010.07426>`_.
:math:`\Phi x` where :math:`\Phi \in \mathbb{R}^{d \times m}` is a matrix whose rows are uniformly sampled at random from the surface of an :math:`m`-dimensional unit sphere.
This encoding ensures that similarities in the input space are preserved in the hyperspace.

Args:
in_features (int): the dimensionality of the input feature vector.
out_features (int): the dimensionality of the hypervectors.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: ``False``.
dtype (``torch.dtype``, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see ``torch.set_default_tensor_type()``).
device (``torch.device``, optional): the desired device of returned tensor. Default: if ``None``, uses the current device for the default tensor type (see torch.set_default_tensor_type()). ``device`` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.

Examples::

>>> emb = embeddings.Projection(5, 3)
>>> x = torch.rand(2, 5)
>>> emb(x)
tensor([[ 0.2747, -0.8804, -0.6810],
[ 0.5610, -0.9227, 0.1671]])

"""

__constants__ = ["in_features", "out_features"]
in_features: int
Expand Down
2 changes: 1 addition & 1 deletion torchhd/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def circular_hv(
Args:
num_embeddings (int): the number of hypervectors to generate.
embedding_dim (int): the dimensionality of the hypervectors.
randomness (float, optional): r-value to interpolate between level at ``0.0`` and random-hypervectors at ``1.0``. Default: ``0.0``.
randomness (float, optional): r-value to interpolate between circular at ``0.0`` and random-hypervectors at ``1.0``. Default: ``0.0``.
generator (``torch.Generator``, optional): a pseudorandom number generator for sampling.
out (Tensor, optional): the output tensor.
dtype (``torch.dtype``, optional): the desired data type of returned tensor. Default: if ``None``, uses a global default (see ``torch.set_default_tensor_type()``).
Expand Down
Loading