Skip to content

Commit

Permalink
Merge branch 'master' of github.com:jurajHasik/tn-torch_dev
Browse files Browse the repository at this point in the history
  • Loading branch information
jurajHasik committed Sep 13, 2023
2 parents b25f49b + e17857f commit 3e22249
Show file tree
Hide file tree
Showing 149 changed files with 110,771 additions and 1,974 deletions.
9 changes: 5 additions & 4 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python_v: [3.7, 3.8, 3.9]
pytorch_v: [1.11.0]
fail-fast: false
python_v: [ "3.8", "3.9", "3.10", "3.11"]
pytorch_v: [2.0]
fail-fast: false

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
Expand All @@ -47,7 +47,7 @@ jobs:
then
$CONDA/bin/conda install -c pytorch-lts -c anaconda pytorch cpuonly scipy pytest
else
$CONDA/bin/conda install -c pytorch -c anaconda pytorch==${{ matrix.pytorch_v }} cpuonly scipy pytest
$CONDA/bin/conda install -c pytorch -c anaconda -c conda-forge pytorch==${{ matrix.pytorch_v }} cpuonly scipy pytest opt_einsum
fi
git submodule update --init --recursive
Expand Down Expand Up @@ -85,6 +85,7 @@ jobs:
run: |
$CONDA/bin/python -m pytest examples/ladders/abelian/ctmrg_*.py
$CONDA/bin/python -m pytest examples/ladders/abelian/optim_*.py
$CONDA/bin/python -m pytest examples/ladders/abelian/SU_ladders_u1.py
- name: test kagome
run: |
Expand Down
6 changes: 3 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[submodule "yast"]
path = yast
url = https://gitlab.com/marekrams/yast.git
[submodule "yastn"]
path = yastn
url = https://github.com/yastn/yastn.git
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ by the corner-transfer matrix (CTM) algorithm. Afterwards, the gradients are com
automatic differentiation (AD).

#### Now supporting
* **abelian symmetries, with implementation powered by [YAST](https://gitlab.com/marekrams/yast)**
* **abelian symmetries, with implementation power by [YASTN](https://github.com/yastn/yastn)**

Allows definition of abelian-symmetric iPEPS in terms of block-sparse tensors, computing their
symmetric environments, and their optimization with gradient-based methods.
Expand Down Expand Up @@ -129,10 +129,12 @@ python examples/j1j2/abelian/ctmrg_j1j2_u1.py --tiling BIPARTITE --chi 48 --j2 0

#### Dependencies
- PyTorch 1.11+ (see https://pytorch.org/)
- (optional) YAST (see https://gitlab.com/marekrams/yast)
- (optional) YASTN (see https://github.com/yastn/yastn)
- (optional) scipy 1.3.+
- (optional) opt_einsum
- (optional) ArrayFire (see https://github.com/arrayfire/arrayfire)

YAST is linked to **peps-torch** as a [git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules).
YASTN is linked to **peps-torch** as a [git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules).
To obtain it, you can use git:

`git submodule update --init --recursive`
Expand Down
14 changes: 14 additions & 0 deletions benchmarks/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import pytest
import context
import config as cfg

def pytest_addoption(parser):
parser.addoption(
"--device",
default="cpu",
help="choose device",
)

def pytest_configure(config):
device= config.getoption("--device")
cfg.global_args.device= device
3 changes: 3 additions & 0 deletions benchmarks/context.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
68 changes: 68 additions & 0 deletions benchmarks/test_energy_j1j2trgl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import context
import pytest
import torch
import config as cfg
from ipeps.ipeps import IPEPS
from ctm.generic.env import ENV, init_random
from models.spin_triangular import J1J2J4_1SITE, eval_nn_per_site, eval_nnn_per_site, eval_nn_and_chirality_per_site

import logging
logging.basicConfig(filename=f"{__file__}.log", filemode='w', level=logging.INFO)

test_dims=[(3,27), (3,54), (4,32)]


@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_j1j2_loop_oe_semimanual(dims, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

model= J1J2J4_1SITE(phys_dim=2, j1=1.0, j2=1.0, j4=0, jchi=0, global_args=cfg.global_args)

def test_f():
nn_h_v,nn_diag= eval_nn_per_site((0,0),state,env,model.R,model.R@model.R,model.SS,model.SS)
nnn= eval_nnn_per_site((0,0),state,env,None,None,model.SS,looped=unroll,use_checkpoint=False)

benchmark.pedantic(test_f, args=(),\
iterations=1, rounds=2, warmup_rounds=1)

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_j1j2jX_loop_oe_semimanual(dims, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

model= J1J2J4_1SITE(phys_dim=2, j1=1.0, j2=1.0, j4=0, jchi=0, global_args=cfg.global_args)

def test_f():
nnn= eval_nnn_per_site((0,0),state,env,None,None,model.SS,looped=unroll,use_checkpoint=False)
nn_h_v,nn_diag,chi= eval_nn_and_chirality_per_site((0,0),state,env,\
model.R,model.R@model.R,model.SS,model.SS,model.h_chi,looped=unroll,use_checkpoint=False)

benchmark.pedantic(test_f, args=(),\
iterations=1, rounds=2, warmup_rounds=1)

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_manual(dims, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

model= J1J2J4_1SITE(phys_dim=2, j1=1.0, j2=1.0, j4=0, jchi=0, global_args=cfg.global_args)

benchmark.pedantic(model.energy_1x3, args=(state,env,-1,unroll,\
cfg.ctm_args,cfg.global_args),\
iterations=1, rounds=2, warmup_rounds=1)
Empty file added benchmarks/test_rdm_2x2
Empty file.
56 changes: 56 additions & 0 deletions benchmarks/test_rdm_2x3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import context
import pytest
import torch
import config as cfg
from ipeps.ipeps import IPEPS
from ctm.generic.env import ENV, init_random
from ctm.generic import rdm_mc

import logging
logging.basicConfig(filename=f"{__file__}.log", filemode='w', level=logging.INFO)

test_dims=[(3,27), (3,54), (4,32)]


@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[0,1,2,3,4,5]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

print(f"{dims} {unroll}")
benchmark.pedantic(rdm_mc.rdm2x3_loop_oe, args=((0,0), state, env, open_inds, unroll),\
iterations=1, rounds=2, warmup_rounds=1)

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[0,1,2,3,4,5]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe_semimanual(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

print(f"{dims} {unroll}")
benchmark.pedantic(rdm_mc.rdm2x3_loop_oe_semimanual, args=((0,0), state, env, open_inds, unroll),\
iterations=1, rounds=2, warmup_rounds=1)


@pytest.mark.parametrize("dims",test_dims)
def test_profile_rdm2x3_loop_manual(dims, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

benchmark.pedantic(rdm_mc.rdm2x3_loop, args=((0,0), state, env),\
iterations=1, rounds=2, warmup_rounds=1)
39 changes: 39 additions & 0 deletions benchmarks/test_rdm_2x3_trglnnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import context
import pytest
import torch
import config as cfg
from ipeps.ipeps import IPEPS
from ctm.generic.env import ENV, init_random
from ctm.generic import rdm_mc

test_dims=[(3,27), (3,54), (4,32), (4,64)]

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[2,3]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

print(f"{dims} {unroll}")
benchmark.pedantic(rdm_mc.rdm2x3_loop_oe, args=((0,0), state, env, open_inds, unroll),\
iterations=1, rounds=2, warmup_rounds=1)

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[2,3]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe_semimanual(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

print(f"{dims} {unroll}")
benchmark.pedantic(rdm_mc.rdm2x3_loop_oe_semimanual, args=((0,0), state, env, open_inds, unroll),\
iterations=1, rounds=2, warmup_rounds=1)
84 changes: 84 additions & 0 deletions benchmarks/test_rdm_2x3_trglringex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import context
import pytest
import torch
import functools
from time import perf_counter
import config as cfg
from ipeps.ipeps import IPEPS
from ctm.generic.env import ENV, init_random
from ctm.generic import rdm_mc

import logging
logging.basicConfig(filename=f"{__file__}.log", filemode='w', level=logging.INFO)

test_dims=[(3,27), (3,54), (4,32), (4,64)]

def optional_cuda_measure(tag):
def _inner_optional_cuda_measure(f):
@functools.wraps(f)
def _wrap(*args,**kwargs):
if not cfg.global_args.device=='cpu': torch.cuda.synchronize()
t0= perf_counter()
res= f(*args,**kwargs)
if not cfg.global_args.device=='cpu': torch.cuda.synchronize()
t1= perf_counter()
logging.info(f"{tag} {t1-t0} [s]")
return res
return _wrap
return _inner_optional_cuda_measure

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[1,2,3,4]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

@optional_cuda_measure(f"rdm2x3_loop_oe{dims} {open_inds} {unroll}")
def test_f():
rdm_mc.rdm2x3_loop_oe((0,0), state, env, open_inds, unroll)

print(f"{dims} {unroll}")
benchmark.pedantic(test_f, args=(),\
iterations=1, rounds=2, warmup_rounds=1)

@pytest.mark.parametrize("dims",test_dims)
@pytest.mark.parametrize("open_inds",[[1,2,3,4]])
@pytest.mark.parametrize("unroll",[True,False])
def test_profile_rdm2x3_loop_oe_semimanual(dims, open_inds, unroll, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

@optional_cuda_measure(f"rdm2x3_loop_oe_semimanual{dims} {open_inds} {unroll}")
def test_f():
rdm_mc.rdm2x3_loop_oe_semimanual((0,0), state, env, open_inds, unroll)

print(f"{dims} {unroll}")
benchmark.pedantic(test_f, args=(),\
iterations=1, rounds=2, warmup_rounds=1)


@pytest.mark.parametrize("dims",test_dims)
def test_profile_rdm2x3_loop_trglringex_manual(dims, benchmark):
D,X= dims

state= IPEPS({(0,0): torch.rand((2,)+(D,)*4,\
dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)-0.5}, lX=1, lY=1)
env= ENV(X, state)
init_random(env)

@optional_cuda_measure(f"rdm2x3_loop_trglringex_manual_{dims}")
def test_f():
rdm_mc.rdm2x3_loop_trglringex_manual((0,0), state, env)


benchmark.pedantic(test_f, args=(),\
iterations=1, rounds=2, warmup_rounds=1)
7 changes: 7 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,8 @@ class PEPSARGS():
def __init__(self):
self.build_dl= True
self.build_dl_open= False
self.quasi_gauge_max_iter= 10**6
self.quasi_gauge_tol= 1.0e-8

def __str__(self):
res=type(self).__name__+"\n"
Expand Down Expand Up @@ -256,6 +258,8 @@ class CTMARGS():
:vartype projector_eps_multiplet: float
:ivar projector_multiplet_abstol: absolute threshold for spectral values to be considered in multiplets
:vartype projector_multiplet_abstol: float
:ivar radomize_ctm_move_sequence: If ``True``, then ``ctm_move_sequence`` is randomized in each optimization step
:vartype radomize_ctm_move_sequence: bool
:ivar ctm_move_sequence: sequence of directional moves within single CTM iteration. The possible
directions are encoded as tuples(int,int)
Expand Down Expand Up @@ -329,6 +333,7 @@ def __init__(self):
self.projector_multiplet_abstol = 1.0e-14
self.ad_decomp_reg= 1.0e-12
self.ctm_move_sequence = [(0,-1), (-1,0), (0,1), (1,0)]
self.randomize_ctm_move_sequence = False
self.ctm_force_dl = False
self.ctm_logging = False
self.verbosity_initialization = 0
Expand All @@ -342,6 +347,7 @@ def __init__(self):
self.fwd_checkpoint_projectors = False
self.fwd_checkpoint_absorb = False
self.fwd_checkpoint_move = False
self.fwd_checkpoint_loop_rdm = False

def __str__(self):
res=type(self).__name__+"\n"
Expand Down Expand Up @@ -419,6 +425,7 @@ def __init__(self):
self.tolerance_grad= 1e-5
self.tolerance_change= 1e-9
self.opt_ctm_reinit= True
self.env_sens_scale= 10.0
self.line_search= "default"
self.line_search_ctm_reinit= True
self.line_search_svd_method= 'DEFAULT'
Expand Down
Loading

0 comments on commit 3e22249

Please sign in to comment.