Skip to content

ENH: rename library #5

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitattributes
Original file line number Diff line number Diff line change
@@ -1 +1 @@
toydiff/_version.py export-subst
avagrad/_version.py export-subst
1 change: 1 addition & 0 deletions ALTERNATIVES.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ Alternatives that do same or very similar stuff:
* [TinyGrad](https://github.com/tinygrad/tinygrad)
* [JAX](https://github.com/google/jax)
* [MyGrad](https://github.com/rsokl/MyGrad)
* [MXNet](https://github.com/apache/mxnet)
4 changes: 2 additions & 2 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
include pyproject.toml
include versioneer.py
include toydiff/_version.py
include src/toydiff/_version.py
include avagrad/_version.py
include src/avagrad/_version.py

# Include the README
# include *.md
Expand Down
43 changes: 23 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# Toydiff
<p align="center">
<img src="img/logo.png" width="500">
</p>

`toydiff` is a simple automatic differentiation library that I created to wrap
`avagrad` is a simple automatic differentiation library that I created to wrap
my head around how autodiff works. It is built using NumPy and SciPy and it has
been tested using PyTorch as a reference.

Expand All @@ -10,35 +12,36 @@ networks (WIP, only linear layers for now).
## Installation
Normal user:
```bash
git clone https://github.com/Xylambda/toydiff.git
pip install toydiff/.
git clone https://github.com/Xylambda/avagrad.git
pip install avagrad/.
```

Developer:
```bash
git clone https://github.com/Xylambda/toydiff.git
pip install -e toydiff/. -r toydiff/requirements-dev.txt
git clone https://github.com/Xylambda/avagrad.git

pip install -e avagrad/. -r avagrad/requirements-dev.txt
```

## Tests
To run test, you must install the library as a `developer`.
To run tests you must install the library as a `developer`.

```bash
cd toydiff/
cd avagrad/
pytest -v tests/
```

## Differentiable operations
The use is almost the same as the one you would expect from PyTorch:

```python
>>> import toydiff as tdf
>>> import avagrad as ag
>>> # use `track_gradient=True` to allow backward to fill the gradients
>>> a = tdf.random.rand((3,3), track_gradient=True)
>>> b = tdf.random.rand((3,3), track_gradient=True)
>>> c = tdf.matmul(a, b)
>>> d = tdf.log(c)
>>> e = tdf.sum(d)
>>> a = ag.random.rand((3,3), track_gradient=True)
>>> b = ag.random.rand((3,3), track_gradient=True)
>>> c = ag.matmul(a, b)
>>> d = ag.log(c)
>>> e = ag.sum(d)
```

Variable `e` is a Tensor that allows to backpropagate:
Expand Down Expand Up @@ -69,10 +72,10 @@ basic neural networks:

```python
import numpy as np
import toydiff as tdf
from toydiff.nn.blocks import Linear
from toydiff.nn.optim import SGD
from toydiff.nn.functional import mse_loss
import avagrad as ag
from avagrad.nn.blocks import Linear
from avagrad.nn.optim import SGD
from avagrad.nn.functional import mse_loss

# generate data
x = np.arange(-1, 1, 0.01).reshape(-1,1)
Expand All @@ -82,8 +85,8 @@ y = 2 * x + np.random.normal(size=(len(x), 1), scale=0.3)
model = Linear(1, 1, bias=False)

# wrap your data in Tensors with `track_gradient=True`
feat = tdf.Tensor(X, track_gradient=True)
labels = tdf.Tensor(y, track_gradient=True)
feat = ag.Tensor(X, track_gradient=True)
labels = ag.Tensor(y, track_gradient=True)

# pass model to optimizer
optimizer = SGD(model)
Expand Down
144 changes: 130 additions & 14 deletions examples/LinearRegression.ipynb

Large diffs are not rendered by default.

94 changes: 94 additions & 0 deletions examples/fma.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "c3b837b4",
"metadata": {},
"outputs": [],
"source": [
"import avagrad as ag"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d3456753",
"metadata": {},
"outputs": [],
"source": [
"a = ag.random.rand((3,3), track_gradient=True)\n",
"b = ag.random.rand((3,3), track_gradient=True)\n",
"c = ag.random.rand((3,3), track_gradient=True)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "8041ad56",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Tensor([[1.8777063 , 0.9567863 , 1.2879769 ],\n",
" [0.62437826, 1.2251703 , 0.8385898 ],\n",
" [0.835547 , 1.1380192 , 1.1682112 ]], dtype=float32, backward_fn=<FusedMatMulAdd(TernaryOp).Backward>)"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ag.fma(a, b, c)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf6bbf6a",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "65b5a9c5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "a6f37935",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Binary file added img/logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
16 changes: 8 additions & 8 deletions profiling/ops.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import argparse
import cProfile
import toydiff as tdf
import avagrad as ag
from functools import partial
from pathlib import Path

Expand All @@ -13,7 +13,7 @@ def prof_func(func: callable, name: str, size: int):
profiler.enable()
func()
profiler.disable()
version = tdf.__version__
version = ag.__version__
folder = PROFS_PATH / version
folder.mkdir(exist_ok=True)
profiler.dump_stats(folder / f"n={name}_s={size}.prof")
Expand All @@ -22,10 +22,10 @@ def prof_func(func: callable, name: str, size: int):
# -----------------------------------------------------------------------------
def test_matmul(size: int):
def exec(a, b):
tdf.matmul(a, b)
ag.matmul(a, b)

a = tdf.rand((size, size))
b = tdf.rand((size, size))
a = ag.rand((size, size))
b = ag.rand((size, size))
func = partial(exec, a=a, b=b)
prof_func(func, "MatMul", size)

Expand All @@ -34,9 +34,9 @@ def test_matmul_backward(size: int):
def exec(c):
c.backward()

a = tdf.rand((size, size), track_gradient=True)
b = tdf.rand((size, size), track_gradient=True)
c = tdf.matmul(a, b)
a = ag.rand((size, size), track_gradient=True)
b = ag.rand((size, size), track_gradient=True)
c = ag.matmul(a, b)
func = partial(exec, c=c)
prof_func(func, "MatMul.Backward", size)

Expand Down
8 changes: 4 additions & 4 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ license_files = LICENSE.txt
[versioneer]
VCS = git
style = pep440
versionfile_source = src/toydiff/_version.py
versionfile_build = toydiff/_version.py
versionfile_source = src/avagrad/_version.py
versionfile_build = avagrad/_version.py
tag_prefix = v
parentdir_prefix = toydiff-
parentdir_prefix = avagrad-

[tool:pytest]
minversion = 4.0.2
testpaths = toydiff
testpaths = avagrad


[coverage:run]
Expand Down
9 changes: 6 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@
long_description = f.read()

setup(
name='toydiff',
name='avagrad',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Tensor automatic differentiation and neural networks library",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Xylambda/toydiff',
url='https://github.com/Xylambda/avagrad',
author='Alejandro Pérez-Sanjuán',
classifiers=[
'Development Status :: 3 - Alpha',
Expand All @@ -34,7 +34,6 @@
install_requires=[
"numpy",
"scipy",
#"pyfma", # fused multiply-add
],
extras_require={
"test": [
Expand All @@ -44,6 +43,10 @@
"profile": [
"snakeviz",
"perfplot",
],
"docs": [
"sphinx",
"furo",
]
}
)
4 changes: 2 additions & 2 deletions src/toydiff/__init__.py → src/avagrad/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
""" Small automatic differentiation package for scalars. """
"""Tensor automatic differentiation and neural networks package. """

# relative subpacackges import
from . import exceptions, nn, random, utils
Expand All @@ -7,6 +7,6 @@
__version__ = get_versions()["version"]
del get_versions

from toydiff.core import *
from avagrad.core import *

__all__ = ["exceptions", "utils", "nn", "Tensor", "random"]
2 changes: 1 addition & 1 deletion src/toydiff/_version.py → src/avagrad/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def get_config():
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "toydiff/_version.py"
cfg.versionfile_source = "avagrad/_version.py"
cfg.verbose = False
return cfg

Expand Down
Loading