diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index a0b90d2724d3..ad4093faffa3 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -7,13 +7,14 @@ on: # yamllint disable-line rule:truthy jobs: pytest: + if: github.repository == 'pyg-team/pytorch_geometric' runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: [3.7, 3.9] + python-version: [3.7, 3.8, 3.9, 3.10] torch-version: [1.10.0] include: - torch-version: 1.10.0 @@ -44,6 +45,8 @@ jobs: - name: Run tests run: | FULL_TEST=1 pytest --cov --cov-report=xml + shell: + bash - name: Upload coverage uses: codecov/codecov-action@v2 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index eeb8341a6644..a08037c3e99a 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -14,8 +14,8 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest] - python-version: [3.7, 3.9] + os: [ubuntu-latest] + python-version: [3.9] torch-version: [1.10.0, 1.11.0] include: - torch-version: 1.10.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bca1b9bf2b1b..5d6c49c9d592 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -80,12 +80,22 @@ Everytime you send a Pull Request, your commit will be built and checked against If you do not want to format your code manually, we recommend to use [`yapf`](https://github.com/google/yapf). -2. Ensure that the entire test suite passes and that code coverage roughly stays the same. Please feel encouraged to provide a test with your submitted code. +2. Ensure that the entire test suite passes and that code coverage roughly stays the same. + Please feel encouraged to provide a test with your submitted code. + To test, either run ```bash pytest --cov ``` + or + + ```bash + FULL_TEST=1 pytest --cov + ``` + + (which runs a set of additional but time-consuming tests) dependening on your needs. + ## Building Documentation To build the documentation: diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 000000000000..b2fde9cd1d1f --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,47 @@ +import functools +import os.path as osp +import shutil + +import pytest + +from torch_geometric.data import Dataset + + +def load_dataset(root: str, name: str, *args, **kwargs) -> Dataset: + r"""Returns a variety of datasets according to :obj:`name`.""" + if 'karate' in name.lower(): + from torch_geometric.datasets import KarateClub + return KarateClub(*args, **kwargs) + if name.lower() in ['cora', 'citeseer', 'pubmed']: + from torch_geometric.datasets import Planetoid + path = osp.join(root, 'Planetoid', name) + return Planetoid(path, name, *args, **kwargs) + if name in ['BZR', 'ENZYMES', 'IMDB-BINARY', 'MUTAG']: + from torch_geometric.datasets import TUDataset + path = osp.join(root, 'TUDataset') + return TUDataset(path, name, *args, **kwargs) + if name in ['ego-facebook', 'soc-Slashdot0811', 'wiki-vote']: + from torch_geometric.datasets import SNAPDataset + path = osp.join(root, 'SNAPDataset') + return SNAPDataset(path, name, *args, **kwargs) + if name.lower() in ['bashapes']: + from torch_geometric.datasets import BAShapes + return BAShapes(*args, **kwargs) + if name.lower() in ['dblp']: + from torch_geometric.datasets import DBLP + path = osp.join(root, 'DBLP') + return DBLP(path, *args, **kwargs) + if name in ['citationCiteseer', 'illc1850']: + from torch_geometric.datasets import SuiteSparseMatrixCollection + path = osp.join(root, 'SuiteSparseMatrixCollection') + return SuiteSparseMatrixCollection(path, name=name, *args, **kwargs) + + raise NotImplementedError + + +@pytest.fixture(scope='session') +def get_dataset(): + root = osp.join('/', 'tmp', 'pyg_test_datasets') + yield functools.partial(load_dataset, root) + if osp.exists(root): + shutil.rmtree(root) diff --git a/test/data/test_lightning_datamodule.py b/test/data/test_lightning_datamodule.py index ad142977d4a4..0b52c3579fcb 100644 --- a/test/data/test_lightning_datamodule.py +++ b/test/data/test_lightning_datamodule.py @@ -1,8 +1,4 @@ import math -import os.path as osp -import random -import shutil -import sys import warnings import pytest @@ -10,7 +6,6 @@ import torch.nn.functional as F from torch_geometric.data import LightningDataset, LightningNodeData -from torch_geometric.datasets import DBLP, Planetoid, TUDataset from torch_geometric.nn import global_mean_pool try: @@ -61,15 +56,13 @@ def configure_optimizers(self): @pytest.mark.skipif(no_pytorch_lightning, reason='PL not available') @pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available') @pytest.mark.parametrize('strategy', [None, 'ddp_spawn']) -def test_lightning_dataset(strategy): +def test_lightning_dataset(get_dataset, strategy): import pytorch_lightning as pl - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, name='MUTAG').shuffle() + dataset = get_dataset(name='MUTAG').shuffle() train_dataset = dataset[:50] val_dataset = dataset[50:80] test_dataset = dataset[80:90] - shutil.rmtree(root) gpus = 1 if strategy is None else torch.cuda.device_count() if strategy == 'ddp_spawn': @@ -146,15 +139,13 @@ def configure_optimizers(self): @pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available') @pytest.mark.parametrize('loader', ['full', 'neighbor']) @pytest.mark.parametrize('strategy', [None, 'ddp_spawn']) -def test_lightning_node_data(strategy, loader): +def test_lightning_node_data(get_dataset, strategy, loader): import pytorch_lightning as pl - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, name='Cora') + dataset = get_dataset(name='Cora') data = dataset[0] data_repr = ('Data(x=[2708, 1433], edge_index=[2, 10556], y=[2708], ' 'train_mask=[2708], val_mask=[2708], test_mask=[2708])') - shutil.rmtree(root) model = LinearNodeModule(dataset.num_features, dataset.num_classes) @@ -237,13 +228,11 @@ def configure_optimizers(self): @pytest.mark.skipif(no_pytorch_lightning, reason='PL not available') @pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available') -def test_lightning_hetero_node_data(): +def test_lightning_hetero_node_data(get_dataset): import pytorch_lightning as pl - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = DBLP(root) + dataset = get_dataset(name='DBLP') data = dataset[0] - shutil.rmtree(root) model = LinearHeteroNodeModule(data['author'].num_features, int(data['author'].y.max()) + 1) diff --git a/test/datasets/test_ba_shapes.py b/test/datasets/test_ba_shapes.py index 2e7836dd9cee..00168abb4b2a 100644 --- a/test/datasets/test_ba_shapes.py +++ b/test/datasets/test_ba_shapes.py @@ -1,10 +1,6 @@ -from torch_geometric.datasets import BAShapes - - -def test_ba_shapes(): - dataset = BAShapes() +def test_ba_shapes(get_dataset): + dataset = get_dataset(name='BAShapes') assert str(dataset) == 'BAShapes()' - assert len(dataset) == 1 assert dataset.num_features == 10 assert dataset.num_classes == 4 diff --git a/test/datasets/test_bzr.py b/test/datasets/test_bzr.py index 54f55f5fb365..14bbbeda4f9c 100644 --- a/test/datasets/test_bzr.py +++ b/test/datasets/test_bzr.py @@ -1,26 +1,21 @@ -import os.path as osp -import random -import shutil -import sys +from torch_geometric.testing import onlyFullTest -from torch_geometric.datasets import TUDataset - - -def test_bzr(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, 'BZR') +@onlyFullTest +def test_bzr(get_dataset): + dataset = get_dataset(name='BZR') assert len(dataset) == 405 assert dataset.num_features == 53 assert dataset.num_node_labels == 53 assert dataset.num_node_attributes == 0 assert dataset.num_classes == 2 - assert dataset.__repr__() == 'BZR(405)' + assert str(dataset) == 'BZR(405)' assert len(dataset[0]) == 3 - dataset = TUDataset(root, 'BZR', use_node_attr=True) + +@onlyFullTest +def test_bzr_with_node_attr(get_dataset): + dataset = get_dataset(name='BZR', use_node_attr=True) assert dataset.num_features == 56 assert dataset.num_node_labels == 53 assert dataset.num_node_attributes == 3 - - shutil.rmtree(root) diff --git a/test/datasets/test_enzymes.py b/test/datasets/test_enzymes.py index 43ddc83f156b..7b7116a1ec37 100644 --- a/test/datasets/test_enzymes.py +++ b/test/datasets/test_enzymes.py @@ -1,24 +1,16 @@ -import os.path as osp -import random -import shutil -import sys - import pytest import torch -from torch_geometric.datasets import TUDataset from torch_geometric.loader import DataListLoader, DataLoader, DenseDataLoader from torch_geometric.transforms import ToDense -def test_enzymes(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, 'ENZYMES') - +def test_enzymes(get_dataset): + dataset = get_dataset(name='ENZYMES') assert len(dataset) == 600 assert dataset.num_features == 3 assert dataset.num_classes == 6 - assert dataset.__repr__() == 'ENZYMES(600)' + assert str(dataset) == 'ENZYMES(600)' assert len(dataset[0]) == 3 assert len(dataset.shuffle()) == 600 @@ -63,18 +55,14 @@ def test_enzymes(): assert list(data.mask.size()) == [600, 126] assert list(data.y.size()) == [600, 1] - dataset = TUDataset(root, 'ENZYMES', use_node_attr=True) + +def test_enzymes_with_node_attr(get_dataset): + dataset = get_dataset(name='ENZYMES', use_node_attr=True) assert dataset.num_node_features == 21 assert dataset.num_features == 21 assert dataset.num_edge_features == 0 - shutil.rmtree(root) - - -def test_cleaned_enzymes(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, 'ENZYMES', cleaned=True) +def test_cleaned_enzymes(get_dataset): + dataset = get_dataset(name='ENZYMES', cleaned=True) assert len(dataset) == 595 - - shutil.rmtree(root) diff --git a/test/datasets/test_imdb_binary.py b/test/datasets/test_imdb_binary.py index 774b2529aad1..56976a13e4a8 100644 --- a/test/datasets/test_imdb_binary.py +++ b/test/datasets/test_imdb_binary.py @@ -1,15 +1,9 @@ -import os.path as osp -import random -import shutil -import sys +from torch_geometric.testing import onlyFullTest -from torch_geometric.datasets import TUDataset - - -def test_imdb_binary(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, 'IMDB-BINARY') +@onlyFullTest +def test_imdb_binary(get_dataset): + dataset = get_dataset(name='IMDB-BINARY') assert len(dataset) == 1000 assert dataset.num_features == 0 assert dataset.num_classes == 2 @@ -20,5 +14,3 @@ def test_imdb_binary(): assert data.edge_index.size() == (2, 146) assert data.y.size() == (1, ) assert data.num_nodes == 20 - - shutil.rmtree(root) diff --git a/test/datasets/test_karate.py b/test/datasets/test_karate.py index 5cb7b0068898..f6fc70d1b895 100644 --- a/test/datasets/test_karate.py +++ b/test/datasets/test_karate.py @@ -1,13 +1,9 @@ -from torch_geometric.datasets import KarateClub - - -def test_karate(): - dataset = KarateClub() - +def test_karate(get_dataset): + dataset = get_dataset(name='KarateClub') + assert str(dataset) == 'KarateClub()' assert len(dataset) == 1 assert dataset.num_features == 34 assert dataset.num_classes == 4 - assert dataset.__repr__() == 'KarateClub()' assert len(dataset[0]) == 4 assert dataset[0].edge_index.size() == (2, 156) diff --git a/test/datasets/test_mutag.py b/test/datasets/test_mutag.py index 89dcb59557a7..d7d556101f20 100644 --- a/test/datasets/test_mutag.py +++ b/test/datasets/test_mutag.py @@ -1,24 +1,14 @@ -import os.path as osp -import random -import shutil -import sys - -from torch_geometric.datasets import TUDataset - - -def test_mutag(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = TUDataset(root, 'MUTAG') - +def test_mutag(get_dataset): + dataset = get_dataset(name='MUTAG') assert len(dataset) == 188 assert dataset.num_features == 7 assert dataset.num_classes == 2 - assert dataset.__repr__() == 'MUTAG(188)' + assert str(dataset) == 'MUTAG(188)' assert len(dataset[0]) == 4 assert dataset[0].edge_attr.size(1) == 4 - dataset = TUDataset(root, 'MUTAG', use_node_attr=True) - assert dataset.num_features == 7 - shutil.rmtree(root) +def test_mutag_with_node_attr(get_dataset): + dataset = get_dataset(name='MUTAG', use_node_attr=True) + assert dataset.num_features == 7 diff --git a/test/datasets/test_planetoid.py b/test/datasets/test_planetoid.py index 1dc5369ce3e9..3cc2634f442c 100644 --- a/test/datasets/test_planetoid.py +++ b/test/datasets/test_planetoid.py @@ -1,19 +1,12 @@ -import os.path as osp -import random -import shutil -import sys - -from torch_geometric.datasets import Planetoid from torch_geometric.loader import DataLoader -def test_citeseer(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, 'Citeseer') +def test_citeseer(get_dataset): + dataset = get_dataset(name='CiteSeer') loader = DataLoader(dataset, batch_size=len(dataset)) assert len(dataset) == 1 - assert dataset.__repr__() == 'Citeseer()' + assert dataset.__repr__() == 'CiteSeer()' for data in loader: assert data.num_graphs == 1 @@ -35,19 +28,21 @@ def test_citeseer(): assert not data.has_self_loops() assert data.is_undirected() - dataset = Planetoid(root, 'Citeseer', split='full') + +def test_citeseer_with_full_split(get_dataset): + dataset = get_dataset(name='CiteSeer', split='full') data = dataset[0] assert data.val_mask.sum() == 500 assert data.test_mask.sum() == 1000 assert data.train_mask.sum() == data.num_nodes - 1500 assert (data.train_mask & data.val_mask & data.test_mask).sum() == 0 - dataset = Planetoid(root, 'Citeseer', split='random', - num_train_per_class=11, num_val=29, num_test=41) + +def test_citeseer_with_random_split(get_dataset): + dataset = get_dataset(name='CiteSeer', split='random', + num_train_per_class=11, num_val=29, num_test=41) data = dataset[0] assert data.train_mask.sum() == dataset.num_classes * 11 assert data.val_mask.sum() == 29 assert data.test_mask.sum() == 41 assert (data.train_mask & data.val_mask & data.test_mask).sum() == 0 - - shutil.rmtree(root) diff --git a/test/datasets/test_snap_dataset.py b/test/datasets/test_snap_dataset.py index f1409820b5e0..f360ae660843 100644 --- a/test/datasets/test_snap_dataset.py +++ b/test/datasets/test_snap_dataset.py @@ -1,18 +1,22 @@ -import os.path as osp -import random -import shutil -import sys +from torch_geometric.testing import onlyFullTest -import pytest -from torch_geometric.datasets import SNAPDataset +@onlyFullTest +def test_ego_facebook_snap_dataset(get_dataset): + dataset = get_dataset(name='ego-facebook') + assert str(dataset) == 'SNAP-ego-facebook(10)' + assert len(dataset) == 10 -@pytest.mark.skipif(True, reason="'https://snap.stanford.edu' not available") -def test_snap_dataset(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) +@onlyFullTest +def test_soc_slashdot_snap_dataset(get_dataset): + dataset = get_dataset(name='soc-Slashdot0811') + assert str(dataset) == 'SNAP-soc-slashdot0811(1)' + assert len(dataset) == 1 - for name in ['ego-facebook', 'soc-Slashdot0811', 'wiki-vote']: - SNAPDataset(root, name) - shutil.rmtree(root) +@onlyFullTest +def test_wiki_vote_snap_dataset(get_dataset): + dataset = get_dataset(name='wiki-vote') + assert str(dataset) == 'SNAP-wiki-vote(1)' + assert len(dataset) == 1 diff --git a/test/datasets/test_suite_sparse.py b/test/datasets/test_suite_sparse.py index 4c5a7b1d89b3..95b89ea4c129 100644 --- a/test/datasets/test_suite_sparse.py +++ b/test/datasets/test_suite_sparse.py @@ -1,16 +1,17 @@ -import os.path as osp -import random -import shutil -import sys +from torch_geometric.testing import onlyFullTest -from torch_geometric.datasets import SuiteSparseMatrixCollection +@onlyFullTest +def test_suite_sparse_dataset(get_dataset): + dataset = get_dataset(group='DIMACS10', name='citationCiteseer') + assert str(dataset) == ('SuiteSparseMatrixCollection(' + 'group=DIMACS10, name=citationCiteseer)') + assert len(dataset) == 1 -def test_suite_sparse_dataset(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - for group, name in [('DIMACS10', 'citationCiteseer'), ('HB', 'illc1850')]: - dataset = SuiteSparseMatrixCollection(root, group, name) - assert len(dataset) == 1 - - shutil.rmtree(root) +@onlyFullTest +def test_illc1850_suite_sparse_dataset(get_dataset): + dataset = get_dataset(group='HB', name='illc1850') + assert str(dataset) == ('SuiteSparseMatrixCollection(' + 'group=HB, name=illc1850)') + assert len(dataset) == 1 diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index 447290209fb2..b1593df8c289 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -41,7 +41,8 @@ def test_run_single_graphgym(skip_train_eval, use_trivial_metric): load_cfg(cfg, args) cfg.out_dir = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) cfg.run_dir = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - cfg.dataset.dir = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) + cfg.dataset.dir = osp.join('/', 'tmp', 'pyg_test_datasets', 'Planetoid') + dump_cfg(cfg) set_printing() @@ -95,4 +96,3 @@ def test_run_single_graphgym(skip_train_eval, use_trivial_metric): agg_runs(set_agg_dir(cfg.out_dir, args.cfg_file), cfg.metric_best) shutil.rmtree(cfg.out_dir) - shutil.rmtree(cfg.dataset.dir) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index c4e01b19ae6b..1e9e0bfd8155 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -8,7 +8,7 @@ try: rowptr = torch.tensor([0, 1]) col = torch.tensor([0]) - torch.ops.torch_sparse.partition(rowptr, col, None, 1) + torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) with_metis = True except RuntimeError: with_metis = False diff --git a/test/loader/test_hgt_loader.py b/test/loader/test_hgt_loader.py index 50c862449348..79b1cec612f6 100644 --- a/test/loader/test_hgt_loader.py +++ b/test/loader/test_hgt_loader.py @@ -1,14 +1,8 @@ -import os.path as osp -import random -import shutil -import sys - import numpy as np import torch from torch_sparse import SparseTensor from torch_geometric.data import HeteroData -from torch_geometric.datasets import Planetoid from torch_geometric.loader import HGTLoader from torch_geometric.nn import GraphConv, to_hetero from torch_geometric.utils import k_hop_subgraph @@ -129,12 +123,11 @@ def test_hgt_loader(): # Test for isolated nodes (there shouldn't exist any): n_id = torch.cat([batch['paper'].x, batch['author'].x]) row, col, _ = full_adj[n_id, n_id].coo() - assert torch.cat([row, col]).unique().numel() == 60 + assert torch.cat([row, col]).unique().numel() >= 59 -def test_hgt_loader_on_cora(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, 'Cora') +def test_hgt_loader_on_cora(get_dataset): + dataset = get_dataset(name='Cora') data = dataset[0] data.edge_weight = torch.rand(data.num_edges) @@ -181,8 +174,3 @@ def forward(self, x, edge_index, edge_weight): out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, hetero_batch.edge_weight_dict)['paper'][:batch_size] assert torch.allclose(out1, out2, atol=1e-6) - - try: - shutil.rmtree(root) - except PermissionError: - pass diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 250c71fca1ff..a8034619463e 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -1,15 +1,9 @@ -import os.path as osp -import random -import shutil -import sys - import numpy as np import pytest import torch from torch_sparse import SparseTensor from torch_geometric.data import Data, HeteroData -from torch_geometric.datasets import Planetoid from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero from torch_geometric.utils import k_hop_subgraph @@ -172,9 +166,8 @@ def test_heterogeneous_neighbor_loader(directed): @pytest.mark.parametrize('directed', [True, False]) -def test_homogeneous_neighbor_loader_on_cora(directed): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, 'Cora') +def test_homogeneous_neighbor_loader_on_cora(get_dataset, directed): + dataset = get_dataset(name='Cora') data = dataset[0] data.n_id = torch.arange(data.num_nodes) data.edge_weight = torch.rand(data.num_edges) @@ -214,13 +207,10 @@ def forward(self, x, edge_index, edge_weight): out2 = model(batch.x, batch.edge_index, batch.edge_weight)[:batch_size] assert torch.allclose(out1, out2, atol=1e-6) - shutil.rmtree(root) - @pytest.mark.parametrize('directed', [True, False]) -def test_heterogeneous_neighbor_loader_on_cora(directed): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, 'Cora') +def test_heterogeneous_neighbor_loader_on_cora(get_dataset, directed): + dataset = get_dataset(name='Cora') data = dataset[0] data.edge_weight = torch.rand(data.num_edges) @@ -268,8 +258,3 @@ def forward(self, x, edge_index, edge_weight): out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, hetero_batch.edge_weight_dict)['paper'][:batch_size] assert torch.allclose(out1, out2, atol=1e-6) - - try: - shutil.rmtree(root) - except PermissionError: - pass diff --git a/test/loader/test_neighbor_sampler.py b/test/loader/test_neighbor_sampler.py index 2bb07f5d4f20..ad89422b6f4e 100644 --- a/test/loader/test_neighbor_sampler.py +++ b/test/loader/test_neighbor_sampler.py @@ -1,12 +1,6 @@ -import os.path as osp -import random -import shutil -import sys - import numpy as np import torch -from torch_geometric.datasets import Planetoid from torch_geometric.loader import NeighborSampler from torch_geometric.nn.conv import GATConv, SAGEConv from torch_geometric.utils import erdos_renyi_graph @@ -36,9 +30,8 @@ def test_neighbor_sampler(): assert len(out) == 3 -def test_neighbor_sampler_on_cora(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - dataset = Planetoid(root, 'Cora') +def test_neighbor_sampler_on_cora(get_dataset): + dataset = get_dataset(name='Cora') data = dataset[0] batch = torch.arange(10) @@ -96,8 +89,3 @@ def full(self, x, edge_index): out1 = model.batch(data.x[n_id], adjs) out2 = model.full(data.x, data.edge_index)[batch] assert torch.allclose(out1, out2) - - try: - shutil.rmtree(root) - except PermissionError: - pass diff --git a/test/nn/conv/test_wl_conv.py b/test/nn/conv/test_wl_conv.py new file mode 100644 index 000000000000..ee7b9c25c897 --- /dev/null +++ b/test/nn/conv/test_wl_conv.py @@ -0,0 +1,25 @@ +import torch +import torch.nn.functional as F +from torch_sparse import SparseTensor + +from torch_geometric.nn import WLConv + + +def test_wl_conv(): + x1 = torch.tensor([1, 0, 0, 1]) + x2 = F.one_hot(x1).to(torch.float) + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + adj_t = SparseTensor.from_edge_index(edge_index).t() + + conv = WLConv() + assert str(conv) == 'WLConv()' + + out = conv(x1, edge_index) + assert out.tolist() == [0, 1, 1, 0] + assert conv(x2, edge_index).tolist() == out.tolist() + assert conv(x1, adj_t).tolist() == out.tolist() + assert conv(x2, adj_t).tolist() == out.tolist() + + assert conv.histogram(out).tolist() == [[2, 2]] + assert torch.allclose(conv.histogram(out, norm=True), + torch.tensor([[0.7071, 0.7071]])) diff --git a/test/profile/test_profile.py b/test/profile/test_profile.py index 391b6e74a21c..330edf10d83f 100644 --- a/test/profile/test_profile.py +++ b/test/profile/test_profile.py @@ -1,22 +1,14 @@ -import os.path as osp -import random -import shutil -import sys - import pytest import torch import torch.nn.functional as F -from torch_geometric.datasets import Planetoid from torch_geometric.nn import GraphSAGE from torch_geometric.profile import get_stats_summary, profileit, timeit @pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available') -def test_profile(): - root = osp.join('/', 'tmp', str(random.randrange(sys.maxsize))) - - dataset = Planetoid(root, 'PubMed') +def test_profile(get_dataset): + dataset = get_dataset(name='PubMed') data = dataset[0].cuda() model = GraphSAGE(dataset.num_features, hidden_channels=64, num_layers=3, out_channels=dataset.num_classes).cuda() @@ -64,5 +56,3 @@ def test(model, x, edge_index, y): assert stats_summary.max_active_cuda > 0 assert stats_summary.min_nvidia_smi_free_cuda > 0 assert stats_summary.max_nvidia_smi_used_cuda > 0 - - shutil.rmtree(root) diff --git a/torch_geometric/datasets/snap_dataset.py b/torch_geometric/datasets/snap_dataset.py index 38533e417e1e..ac1dbce5103a 100644 --- a/torch_geometric/datasets/snap_dataset.py +++ b/torch_geometric/datasets/snap_dataset.py @@ -66,7 +66,7 @@ def read_ego(files, name): x = x_all idx = pd.read_csv(feat_file, sep=' ', header=None, dtype=str, - usecols=[0], squeeze=True) + usecols=[0]).squeeze() idx_assoc = {} for i, j in enumerate(idx): @@ -84,9 +84,9 @@ def read_ego(files, name): try: row = pd.read_csv(edges_file, sep=' ', header=None, dtype=str, - usecols=[0], squeeze=True) + usecols=[0]).squeeze() col = pd.read_csv(edges_file, sep=' ', header=None, dtype=str, - usecols=[1], squeeze=True) + usecols=[1]).squeeze() except: # noqa continue diff --git a/torch_geometric/testing.py b/torch_geometric/testing.py index 89b9d70f32dc..def0c6d16b04 100644 --- a/torch_geometric/testing.py +++ b/torch_geometric/testing.py @@ -2,10 +2,13 @@ from typing import Callable -def is_full_test(): +def is_full_test() -> bool: + r"""Whether to run the full but time-consuming test suite.""" return os.getenv('FULL_TEST', '0') == '1' -def onlyFullTest(func: Callable): +def onlyFullTest(func: Callable) -> Callable: + r"""A decorator to specify that this function belongs to the full test + suite.""" import pytest return pytest.mark.skipif(not is_full_test(), reason="Fast test run")(func)