diff --git a/.gitignore b/.gitignore
index 99b3eab8a..073679e24 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,4 +25,13 @@ data/TSP/*.pkl
data/TSP/*.zip
data/TSP/pyconcorde/
+#COLLAB
+data/COLLAB/*
+dataset/
+
+#CSL
+data/CSL/*.pkl
+data/CSL/*.zip
+data/CSL/*.pt
+
diff --git a/README.md b/README.md
index 63feb5aeb..83b00d1fe 100644
--- a/README.md
+++ b/README.md
@@ -32,17 +32,22 @@
-## 5. Adding a new graph neural network
+## 5. Adding a Message-passing GCN
-[Step-by-step directions](./docs/05_add_gnn.md) to add a GNN to the benchmark.
+[Step-by-step directions](./docs/05_add_mpgcn.md) to add a MP-GCN to the benchmark.
+
+
+## 6. Adding a Weisfeiler-Lehman GNN
+
+[Step-by-step directions](./docs/06_add_wlgnn.md) to add a WL-GNN to the benchmark.
-## 6. Reference
+## 7. Reference
```
@article{dwivedi2020benchmarkgnns,
diff --git a/configs/COLLAB_edge_classification_GAT_40k.json b/configs/COLLAB_edge_classification_GAT_40k.json
new file mode 100644
index 000000000..b95048c51
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GAT_40k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GAT/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 19,
+ "out_dim": 76,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 4,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "layer_type": "dgl"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GAT_edgefeat.json b/configs/COLLAB_edge_classification_GAT_edgefeat.json
new file mode 100644
index 000000000..94f3e854d
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GAT_edgefeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GAT/edgefeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 20,
+ "out_dim": 60,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 3,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "edge_feat": false,
+ "layer_type": "edgefeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GAT_edgereprfeat.json b/configs/COLLAB_edge_classification_GAT_edgereprfeat.json
new file mode 100644
index 000000000..6152c5493
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GAT_edgereprfeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GAT/edgereprfeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 13,
+ "out_dim": 39,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 3,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "edge_feat": false,
+ "layer_type": "edgereprfeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GAT_isotropic.json b/configs/COLLAB_edge_classification_GAT_isotropic.json
new file mode 100644
index 000000000..f3b07a88e
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GAT_isotropic.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GAT/isotropic/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 20,
+ "out_dim": 60,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 3,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "edge_feat": false,
+ "layer_type": "isotropic"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GCN_40k.json b/configs/COLLAB_edge_classification_GCN_40k.json
new file mode 100644
index 000000000..4e4a30ea8
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GCN_40k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GCN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 74,
+ "out_dim": 74,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GIN_40k.json b/configs/COLLAB_edge_classification_GIN_40k.json
new file mode 100644
index 000000000..d8a38031e
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GIN_40k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GIN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GIN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 60,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GatedGCN_40k.json b/configs/COLLAB_edge_classification_GatedGCN_40k.json
new file mode 100644
index 000000000..b83cd3b87
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GatedGCN_40k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GatedGCN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 44,
+ "out_dim": 44,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgereprfeat",
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GatedGCN_PE_40k.json b/configs/COLLAB_edge_classification_GatedGCN_PE_40k.json
new file mode 100644
index 000000000..2aa857f93
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GatedGCN_PE_40k.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GatedGCN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 44,
+ "out_dim": 44,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgereprfeat",
+ "pos_enc": true,
+ "pos_enc_dim": 40
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GatedGCN_edgefeat.json b/configs/COLLAB_edge_classification_GatedGCN_edgefeat.json
new file mode 100644
index 000000000..ae8a62064
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GatedGCN_edgefeat.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GatedGCN/edgefeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 38,
+ "out_dim": 38,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgefeat",
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json b/configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json
new file mode 100644
index 000000000..cd5eb3c39
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GatedGCN/edgereprfeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 35,
+ "out_dim": 35,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgereprfeat",
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GatedGCN_isotropic.json b/configs/COLLAB_edge_classification_GatedGCN_isotropic.json
new file mode 100644
index 000000000..c53238263
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GatedGCN_isotropic.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/GatedGCN/isotropic/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 48,
+ "out_dim": 48,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "isotropic",
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GraphSage_40k.json b/configs/COLLAB_edge_classification_GraphSage_40k.json
new file mode 100644
index 000000000..c4ef77af0
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GraphSage_40k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 2
+ },
+
+ "model": "GraphSage",
+ "dataset": "OGBL-COLLAB",
+
+
+ "out_dir": "out/COLLAB_edge_classification/GraphSage/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 53,
+ "out_dim": 53,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "layer_type": "isotropic"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GraphSage_edgefeat.json b/configs/COLLAB_edge_classification_GraphSage_edgefeat.json
new file mode 100644
index 000000000..75028cfe1
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GraphSage_edgefeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 2
+ },
+
+ "model": "GraphSage",
+ "dataset": "OGBL-COLLAB",
+
+
+ "out_dir": "out/COLLAB_edge_classification/GraphSage/edgefeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 38,
+ "out_dim": 38,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "edge_feat": false,
+ "layer_type": "edgefeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GraphSage_edgereprfeat.json b/configs/COLLAB_edge_classification_GraphSage_edgereprfeat.json
new file mode 100644
index 000000000..636ad44ad
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GraphSage_edgereprfeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 2
+ },
+
+ "model": "GraphSage",
+ "dataset": "OGBL-COLLAB",
+
+
+ "out_dir": "out/COLLAB_edge_classification/GraphSage/edgereprfeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 35,
+ "out_dim": 35,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "edge_feat": false,
+ "layer_type": "edgereprfeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_GraphSage_isotropic.json b/configs/COLLAB_edge_classification_GraphSage_isotropic.json
new file mode 100644
index 000000000..8ff174624
--- /dev/null
+++ b/configs/COLLAB_edge_classification_GraphSage_isotropic.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 2
+ },
+
+ "model": "GraphSage",
+ "dataset": "OGBL-COLLAB",
+
+
+ "out_dir": "out/COLLAB_edge_classification/GraphSage/isotropic/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 24
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 42,
+ "out_dim": 42,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "edge_feat": false,
+ "layer_type": "isotropic"
+ }
+}
\ No newline at end of file
diff --git a/configs/COLLAB_edge_classification_MF.json b/configs/COLLAB_edge_classification_MF.json
new file mode 100644
index 000000000..51f2f48cf
--- /dev/null
+++ b/configs/COLLAB_edge_classification_MF.json
@@ -0,0 +1,31 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MF",
+ "dataset": "OGBL-COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/MF/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.01,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 0,
+ "hidden_dim": 256,
+ "out_dim": 256,
+ "num_embs": 235868
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_MLP_GATED.json b/configs/COLLAB_edge_classification_MLP_40k.json
similarity index 69%
rename from configs/TSP_edge_classification_MLP_GATED.json
rename to configs/COLLAB_edge_classification_MLP_40k.json
index a9b919eb2..0ccb88fc5 100644
--- a/configs/TSP_edge_classification_MLP_GATED.json
+++ b/configs/COLLAB_edge_classification_MLP_40k.json
@@ -5,29 +5,29 @@
},
"model": "MLP",
- "dataset": "TSP",
+ "dataset": "COLLAB",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/COLLAB_edge_classification/MLP/",
"params": {
"seed": 41,
"epochs": 500,
- "batch_size": 64,
+ "batch_size": 32768,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 3,
- "hidden_dim": 144,
- "out_dim": 144,
+ "hidden_dim": 80,
+ "out_dim": 80,
"readout": "mean",
- "gated": true,
+ "gated": false,
"in_feat_dropout": 0.0,
"dropout": 0.0
}
diff --git a/configs/COLLAB_edge_classification_MoNet_40k.json b/configs/COLLAB_edge_classification_MoNet_40k.json
new file mode 100644
index 000000000..39715e355
--- /dev/null
+++ b/configs/COLLAB_edge_classification_MoNet_40k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "MoNet",
+ "dataset": "COLLAB",
+
+ "out_dir": "out/COLLAB_edge_classification/MoNet/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 32768,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 53,
+ "out_dim": 53,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_3WLGNN_CSL_100k.json b/configs/CSL_graph_classification_3WLGNN_CSL_100k.json
new file mode 100644
index 000000000..b580d2cff
--- /dev/null
+++ b/configs/CSL_graph_classification_3WLGNN_CSL_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 80,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json b/configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json
new file mode 100644
index 000000000..80d1d208f
--- /dev/null
+++ b/configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json
@@ -0,0 +1,35 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 78,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GAT_CSL_100k.json b/configs/CSL_graph_classification_GAT_CSL_100k.json
new file mode 100644
index 000000000..755801f4f
--- /dev/null
+++ b/configs/CSL_graph_classification_GAT_CSL_100k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GAT",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 18,
+ "out_dim": 144,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GAT_CSL_PE_100k.json b/configs/CSL_graph_classification_GAT_CSL_PE_100k.json
new file mode 100644
index 000000000..7b222a358
--- /dev/null
+++ b/configs/CSL_graph_classification_GAT_CSL_PE_100k.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GAT",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 18,
+ "out_dim": 144,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GCN_CSL_100k.json b/configs/CSL_graph_classification_GCN_CSL_100k.json
new file mode 100644
index 000000000..d8b57cb22
--- /dev/null
+++ b/configs/CSL_graph_classification_GCN_CSL_100k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GCN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 146,
+ "out_dim": 146,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GCN_CSL_PE_100k.json b/configs/CSL_graph_classification_GCN_CSL_PE_100k.json
new file mode 100644
index 000000000..6513396d6
--- /dev/null
+++ b/configs/CSL_graph_classification_GCN_CSL_PE_100k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GCN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 146,
+ "out_dim": 146,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GIN_CSL_100k.json b/configs/CSL_graph_classification_GIN_CSL_100k.json
new file mode 100644
index 000000000..d8cc7170e
--- /dev/null
+++ b/configs/CSL_graph_classification_GIN_CSL_100k.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GIN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 110,
+ "out_dim": 110,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GIN_CSL_PE_100k.json b/configs/CSL_graph_classification_GIN_CSL_PE_100k.json
new file mode 100644
index 000000000..dbe6c138e
--- /dev/null
+++ b/configs/CSL_graph_classification_GIN_CSL_PE_100k.json
@@ -0,0 +1,40 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GIN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 110,
+ "out_dim": 110,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GatedGCN_CSL_100k.json b/configs/CSL_graph_classification_GatedGCN_CSL_100k.json
new file mode 100644
index 000000000..1f4487db5
--- /dev/null
+++ b/configs/CSL_graph_classification_GatedGCN_CSL_100k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 70,
+ "out_dim": 70,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json b/configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json
new file mode 100644
index 000000000..cfc1cb7da
--- /dev/null
+++ b/configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 70,
+ "out_dim": 70,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GraphSage_CSL_100k.json b/configs/CSL_graph_classification_GraphSage_CSL_100k.json
new file mode 100644
index 000000000..6c07538fa
--- /dev/null
+++ b/configs/CSL_graph_classification_GraphSage_CSL_100k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GraphSage",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 90,
+ "out_dim": 90,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json b/configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json
new file mode 100644
index 000000000..430400aad
--- /dev/null
+++ b/configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GraphSage",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 90,
+ "out_dim": 90,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool",
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_MLP_CSL_100k.json b/configs/CSL_graph_classification_MLP_CSL_100k.json
new file mode 100644
index 000000000..f9fd132e0
--- /dev/null
+++ b/configs/CSL_graph_classification_MLP_CSL_100k.json
@@ -0,0 +1,35 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MLP",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 145,
+ "out_dim": 145,
+ "readout": "mean",
+ "gated": false,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_MLP_CSL_PE_100k.json b/configs/CSL_graph_classification_MLP_CSL_PE_100k.json
new file mode 100644
index 000000000..e0f2c5b8c
--- /dev/null
+++ b/configs/CSL_graph_classification_MLP_CSL_PE_100k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MLP",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 145,
+ "out_dim": 145,
+ "readout": "mean",
+ "gated": false,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_MoNet_CSL_100k.json b/configs/CSL_graph_classification_MoNet_CSL_100k.json
new file mode 100644
index 000000000..6044ff8ae
--- /dev/null
+++ b/configs/CSL_graph_classification_MoNet_CSL_100k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MoNet",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 90,
+ "out_dim": 90,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_MoNet_CSL_PE_100k.json b/configs/CSL_graph_classification_MoNet_CSL_PE_100k.json
new file mode 100644
index 000000000..d2f705eb0
--- /dev/null
+++ b/configs/CSL_graph_classification_MoNet_CSL_PE_100k.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MoNet",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 90,
+ "out_dim": 90,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_RingGNN_CSL_100k.json b/configs/CSL_graph_classification_RingGNN_CSL_100k.json
new file mode 100644
index 000000000..9acb8eb75
--- /dev/null
+++ b/configs/CSL_graph_classification_RingGNN_CSL_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 45,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json b/configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json
new file mode 100644
index 000000000..5ff70683d
--- /dev/null
+++ b/configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json
@@ -0,0 +1,35 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "CSL",
+
+ "out_dir": "out/CSL_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 5,
+ "init_lr": 5e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 37,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "pos_enc": true,
+ "pos_enc_dim": 20
+ }
+}
\ No newline at end of file
diff --git a/configs/CitationGraphs_node_classification_GAT.json b/configs/CitationGraphs_node_classification_GAT.json
deleted file mode 100644
index e72561e44..000000000
--- a/configs/CitationGraphs_node_classification_GAT.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "GAT",
-
- "out_dir": "out/CitationGraphs_node_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 300,
- "batch_size": 128,
- "init_lr": 0.005,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
- "min_lr": 1e-5,
- "weight_decay": 5e-4,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "builtin": true,
- "L": 1,
- "n_heads": 8,
- "hidden_dim": 8,
- "out_dim": 8,
- "residual": false,
- "in_feat_dropout": 0.6,
- "dropout": 0.6,
- "graph_norm": false,
- "batch_norm": false,
- "self_loop": true
- }
-}
diff --git a/configs/CitationGraphs_node_classification_GCN.json b/configs/CitationGraphs_node_classification_GCN.json
deleted file mode 100644
index d14a36fec..000000000
--- a/configs/CitationGraphs_node_classification_GCN.json
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "GCN",
-
- "out_dir": "out/CitationGraphs_node_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 300,
- "batch_size": 128,
- "init_lr": 1e-2,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
- "min_lr": 1e-5,
- "weight_decay": 5e-4,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "builtin": true,
- "L": 1,
- "hidden_dim": 16,
- "out_dim": 16,
- "residual": false,
- "in_feat_dropout": 0.5,
- "dropout": 0.5,
- "graph_norm": false,
- "batch_norm": false,
- "self_loop": true
- }
-}
diff --git a/configs/CitationGraphs_node_classification_GraphSage.json b/configs/CitationGraphs_node_classification_GraphSage.json
deleted file mode 100644
index 0d918dc0d..000000000
--- a/configs/CitationGraphs_node_classification_GraphSage.json
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "GraphSage",
-
- "out_dir": "out/CitationGraphs_node_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 300,
- "batch_size": 20,
- "init_lr": 1e-2,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-6,
- "weight_decay": 5e-4,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "builtin": true,
- "L": 1,
- "hidden_dim": 16,
- "out_dim": 16,
- "residual": false,
- "in_feat_dropout": 0.5,
- "dropout": 0.5,
- "graph_norm": false,
- "batch_norm": false,
- "sage_aggregator": "mean"
- }
-}
diff --git a/configs/CitationGraphs_node_classification_MLP.json b/configs/CitationGraphs_node_classification_MLP.json
deleted file mode 100644
index 2103b347c..000000000
--- a/configs/CitationGraphs_node_classification_MLP.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "MLP",
-
- "out_dir": "out/CitationGraphs_node_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 300,
- "batch_size": 20,
- "init_lr": 0.005,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-5,
- "weight_decay": 5e-4,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 4,
- "hidden_dim": 16,
- "out_dim": 16,
- "readout": "mean",
- "gated": false,
- "in_feat_dropout": 0.6,
- "dropout": 0.6
- }
-}
diff --git a/configs/CitationGraphs_node_classification_MLP_GATED.json b/configs/CitationGraphs_node_classification_MLP_GATED.json
deleted file mode 100644
index 1f6698153..000000000
--- a/configs/CitationGraphs_node_classification_MLP_GATED.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "MLP",
-
- "out_dir": "out/CitationGraphs_node_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 300,
- "batch_size": 20,
- "init_lr": 0.005,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-5,
- "weight_decay": 5e-4,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 4,
- "hidden_dim": 16,
- "out_dim": 16,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.6,
- "dropout": 0.6
- }
-}
diff --git a/configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json
new file mode 100644
index 000000000..8524ece45
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 82,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json
new file mode 100644
index 000000000..0ff7e7b72
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 182,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json
new file mode 100644
index 000000000..8f6aef46f
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 105,
+ "depth_of_mlp": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json b/configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json
new file mode 100644
index 000000000..0e31d1e2e
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 82,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json b/configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json
new file mode 100644
index 000000000..a02e78edd
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 182,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json b/configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json
new file mode 100644
index 000000000..6d5ed6ae2
--- /dev/null
+++ b/configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 105,
+ "depth_of_mlp": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GAT_CLUSTER.json b/configs/SBMs_node_clustering_GAT_CLUSTER_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GAT_CLUSTER.json
rename to configs/SBMs_node_clustering_GAT_CLUSTER_100k.json
index 0ac21adb2..85ca989fc 100644
--- a/configs/SBMs_node_clustering_GAT_CLUSTER.json
+++ b/configs/SBMs_node_clustering_GAT_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/SBMs_node_clustering_GAT_CLUSTER_500k.json b/configs/SBMs_node_clustering_GAT_CLUSTER_500k.json
new file mode 100644
index 000000000..e929caabf
--- /dev/null
+++ b/configs/SBMs_node_clustering_GAT_CLUSTER_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GAT",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "n_heads": 8,
+ "hidden_dim": 22,
+ "out_dim": 176,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GAT_PATTERN.json b/configs/SBMs_node_clustering_GAT_PATTERN_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GAT_PATTERN.json
rename to configs/SBMs_node_clustering_GAT_PATTERN_100k.json
index aa0497482..510c2866c 100644
--- a/configs/SBMs_node_clustering_GAT_PATTERN.json
+++ b/configs/SBMs_node_clustering_GAT_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/SBMs_node_clustering_GAT_PATTERN_500k.json b/configs/SBMs_node_clustering_GAT_PATTERN_500k.json
new file mode 100644
index 000000000..17c35b933
--- /dev/null
+++ b/configs/SBMs_node_clustering_GAT_PATTERN_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GAT",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "n_heads": 8,
+ "hidden_dim": 22,
+ "out_dim": 176,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GCN_CLUSTER.json b/configs/SBMs_node_clustering_GCN_CLUSTER_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GCN_CLUSTER.json
rename to configs/SBMs_node_clustering_GCN_CLUSTER_100k.json
index a945b2de7..1c75a44d1 100644
--- a/configs/SBMs_node_clustering_GCN_CLUSTER.json
+++ b/configs/SBMs_node_clustering_GCN_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -30,7 +30,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/SBMs_node_clustering_GCN_CLUSTER_500k.json b/configs/SBMs_node_clustering_GCN_CLUSTER_500k.json
new file mode 100644
index 000000000..1219381fa
--- /dev/null
+++ b/configs/SBMs_node_clustering_GCN_CLUSTER_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GCN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 172,
+ "out_dim": 172,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GCN_PATTERN.json b/configs/SBMs_node_clustering_GCN_PATTERN_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GCN_PATTERN.json
rename to configs/SBMs_node_clustering_GCN_PATTERN_100k.json
index 2cbedd9e2..28b195069 100644
--- a/configs/SBMs_node_clustering_GCN_PATTERN.json
+++ b/configs/SBMs_node_clustering_GCN_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -30,7 +30,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/SBMs_node_clustering_GCN_PATTERN_500k.json b/configs/SBMs_node_clustering_GCN_PATTERN_500k.json
new file mode 100644
index 000000000..8c81ac560
--- /dev/null
+++ b/configs/SBMs_node_clustering_GCN_PATTERN_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GCN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 172,
+ "out_dim": 172,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GIN_CLUSTER.json b/configs/SBMs_node_clustering_GIN_CLUSTER_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GIN_CLUSTER.json
rename to configs/SBMs_node_clustering_GIN_CLUSTER_100k.json
index 20711b0dc..3198da178 100644
--- a/configs/SBMs_node_clustering_GIN_CLUSTER.json
+++ b/configs/SBMs_node_clustering_GIN_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GIN_CLUSTER_500k.json b/configs/SBMs_node_clustering_GIN_CLUSTER_500k.json
new file mode 100644
index 000000000..e6b815c9a
--- /dev/null
+++ b/configs/SBMs_node_clustering_GIN_CLUSTER_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GIN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 124,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GIN_PATTERN.json b/configs/SBMs_node_clustering_GIN_PATTERN_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_GIN_PATTERN.json
rename to configs/SBMs_node_clustering_GIN_PATTERN_100k.json
index a6f4c6c86..860f1b92b 100644
--- a/configs/SBMs_node_clustering_GIN_PATTERN.json
+++ b/configs/SBMs_node_clustering_GIN_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GIN_PATTERN_500k.json b/configs/SBMs_node_clustering_GIN_PATTERN_500k.json
new file mode 100644
index 000000000..1cd28a469
--- /dev/null
+++ b/configs/SBMs_node_clustering_GIN_PATTERN_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GIN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 124,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_CLUSTER.json b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json
similarity index 89%
rename from configs/SBMs_node_clustering_GatedGCN_CLUSTER.json
rename to configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json
index c34bef5fd..ea13c865a 100644
--- a/configs/SBMs_node_clustering_GatedGCN_CLUSTER.json
+++ b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,7 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true
+ "batch_norm": true,
+ "pos_enc": false
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json
new file mode 100644
index 000000000..f9ba5ae4f
--- /dev/null
+++ b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 32,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 70,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json
new file mode 100644
index 000000000..f805e17a5
--- /dev/null
+++ b/configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 32,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 70,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 40
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_PATTERN.json b/configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json
similarity index 89%
rename from configs/SBMs_node_clustering_GatedGCN_PATTERN.json
rename to configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json
index 00d96a9e4..782185aba 100644
--- a/configs/SBMs_node_clustering_GatedGCN_PATTERN.json
+++ b/configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,7 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true
+ "batch_norm": true,
+ "pos_enc": false
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json b/configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json
new file mode 100644
index 000000000..2294f6a69
--- /dev/null
+++ b/configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 32,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 78,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json b/configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json
new file mode 100644
index 000000000..a729226b1
--- /dev/null
+++ b/configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 32,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 78,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 40
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GraphSage_CLUSTER.json b/configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json
similarity index 82%
rename from configs/SBMs_node_clustering_GraphSage_CLUSTER.json
rename to configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json
index 48a566d10..f1e73c78a 100644
--- a/configs/SBMs_node_clustering_GraphSage_CLUSTER.json
+++ b/configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 106,
- "out_dim": 106,
+ "hidden_dim": 89,
+ "out_dim": 89,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json b/configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json
new file mode 100644
index 000000000..9027358bb
--- /dev/null
+++ b/configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GraphSage",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 101,
+ "out_dim": 101,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool"
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GraphSage_PATTERN.json b/configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json
similarity index 82%
rename from configs/SBMs_node_clustering_GraphSage_PATTERN.json
rename to configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json
index 74a7bcf02..57722fba9 100644
--- a/configs/SBMs_node_clustering_GraphSage_PATTERN.json
+++ b/configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 106,
- "out_dim": 106,
+ "hidden_dim": 89,
+ "out_dim": 89,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json b/configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json
new file mode 100644
index 000000000..d012db5fb
--- /dev/null
+++ b/configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GraphSage",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 101,
+ "out_dim": 101,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool"
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_MLP_CLUSTER.json b/configs/SBMs_node_clustering_MLP_CLUSTER_100k.json
similarity index 96%
rename from configs/SBMs_node_clustering_MLP_CLUSTER.json
rename to configs/SBMs_node_clustering_MLP_CLUSTER_100k.json
index 0f99a0b8f..a31a4b733 100644
--- a/configs/SBMs_node_clustering_MLP_CLUSTER.json
+++ b/configs/SBMs_node_clustering_MLP_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
diff --git a/configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json b/configs/SBMs_node_clustering_MLP_CLUSTER_500k.json
similarity index 83%
rename from configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json
rename to configs/SBMs_node_clustering_MLP_CLUSTER_500k.json
index 431812d3c..7a7e5aa0d 100644
--- a/configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json
+++ b/configs/SBMs_node_clustering_MLP_CLUSTER_500k.json
@@ -19,15 +19,15 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 135,
- "out_dim": 135,
+ "L": 16,
+ "hidden_dim": 174,
+ "out_dim": 174,
"readout": "mean",
- "gated": true,
+ "gated": false,
"in_feat_dropout": 0.0,
"dropout": 0.0
}
diff --git a/configs/SBMs_node_clustering_MLP_PATTERN.json b/configs/SBMs_node_clustering_MLP_PATTERN_100k.json
similarity index 96%
rename from configs/SBMs_node_clustering_MLP_PATTERN.json
rename to configs/SBMs_node_clustering_MLP_PATTERN_100k.json
index 0dfd525e8..0b7da581e 100644
--- a/configs/SBMs_node_clustering_MLP_PATTERN.json
+++ b/configs/SBMs_node_clustering_MLP_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
diff --git a/configs/SBMs_node_clustering_MLP_GATED_PATTERN.json b/configs/SBMs_node_clustering_MLP_PATTERN_500k.json
similarity index 83%
rename from configs/SBMs_node_clustering_MLP_GATED_PATTERN.json
rename to configs/SBMs_node_clustering_MLP_PATTERN_500k.json
index 1dac03c14..e229a8697 100644
--- a/configs/SBMs_node_clustering_MLP_GATED_PATTERN.json
+++ b/configs/SBMs_node_clustering_MLP_PATTERN_500k.json
@@ -19,15 +19,15 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 135,
- "out_dim": 135,
+ "L": 16,
+ "hidden_dim": 174,
+ "out_dim": 174,
"readout": "mean",
- "gated": true,
+ "gated": false,
"in_feat_dropout": 0.0,
"dropout": 0.0
}
diff --git a/configs/SBMs_node_clustering_MoNet_CLUSTER.json b/configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_MoNet_CLUSTER.json
rename to configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json
index b052291a3..52c1ce1d4 100644
--- a/configs/SBMs_node_clustering_MoNet_CLUSTER.json
+++ b/configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json b/configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json
new file mode 100644
index 000000000..0d4061258
--- /dev/null
+++ b/configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MoNet",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 102,
+ "out_dim": 102,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_MoNet_PATTERN.json b/configs/SBMs_node_clustering_MoNet_PATTERN_100k.json
similarity index 93%
rename from configs/SBMs_node_clustering_MoNet_PATTERN.json
rename to configs/SBMs_node_clustering_MoNet_PATTERN_100k.json
index 17e21993a..980af6905 100644
--- a/configs/SBMs_node_clustering_MoNet_PATTERN.json
+++ b/configs/SBMs_node_clustering_MoNet_PATTERN_100k.json
@@ -19,7 +19,7 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_MoNet_PATTERN_500k.json b/configs/SBMs_node_clustering_MoNet_PATTERN_500k.json
new file mode 100644
index 000000000..968a77f80
--- /dev/null
+++ b/configs/SBMs_node_clustering_MoNet_PATTERN_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MoNet",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 102,
+ "out_dim": 102,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json b/configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json
new file mode 100644
index 000000000..8fdd174a1
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 43,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json b/configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json
new file mode 100644
index 000000000..8d0d9721b
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 101,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json b/configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json
new file mode 100644
index 000000000..67fa49a50
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_CLUSTER",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 37,
+ "radius": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json b/configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json
new file mode 100644
index 000000000..5b0fdbf14
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 45,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json b/configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json
new file mode 100644
index 000000000..518854989
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 101,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json b/configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json
new file mode 100644
index 000000000..a637ef166
--- /dev/null
+++ b/configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "SBM_PATTERN",
+
+ "out_dir": "out/SBMs_node_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 37,
+ "radius": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_3WLGNN_100k.json b/configs/TSP_edge_classification_3WLGNN_100k.json
new file mode 100644
index 000000000..a575abe42
--- /dev/null
+++ b/configs/TSP_edge_classification_3WLGNN_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "3WLGNN",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/3WLGNN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 52,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": true
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GAT.json b/configs/TSP_edge_classification_GAT_100k.json
similarity index 76%
rename from configs/TSP_edge_classification_GAT.json
rename to configs/TSP_edge_classification_GAT_100k.json
index e15b7d61c..bc0158f56 100644
--- a/configs/TSP_edge_classification_GAT.json
+++ b/configs/TSP_edge_classification_GAT_100k.json
@@ -7,7 +7,7 @@
"model": "GAT",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/GAT/",
"params": {
"seed": 41,
@@ -19,20 +19,20 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 16,
- "out_dim": 128,
+ "hidden_dim": 15,
+ "out_dim": 120,
"residual": true,
"readout": "mean",
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "self_loop": false
+ "self_loop": false,
+ "layer_type": "dgl"
}
}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GAT_edgefeat.json b/configs/TSP_edge_classification_GAT_edgefeat.json
new file mode 100644
index 000000000..cb4592111
--- /dev/null
+++ b/configs/TSP_edge_classification_GAT_edgefeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GAT/edgefeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 15,
+ "out_dim": 120,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "layer_type": "edgefeat",
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GAT_edgereprfeat.json b/configs/TSP_edge_classification_GAT_edgereprfeat.json
new file mode 100644
index 000000000..b3220ae1a
--- /dev/null
+++ b/configs/TSP_edge_classification_GAT_edgereprfeat.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GAT/edgereprfeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 10,
+ "out_dim": 80,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "layer_type": "edgereprfeat",
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GAT_isotropic.json b/configs/TSP_edge_classification_GAT_isotropic.json
new file mode 100644
index 000000000..302839357
--- /dev/null
+++ b/configs/TSP_edge_classification_GAT_isotropic.json
@@ -0,0 +1,39 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "GAT",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GAT/isotropic/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 15,
+ "out_dim": 120,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false,
+ "layer_type": "isotropic",
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GCN.json b/configs/TSP_edge_classification_GCN_100k.json
similarity index 80%
rename from configs/TSP_edge_classification_GCN.json
rename to configs/TSP_edge_classification_GCN_100k.json
index 46febd1fe..e71728a8e 100644
--- a/configs/TSP_edge_classification_GCN.json
+++ b/configs/TSP_edge_classification_GCN_100k.json
@@ -7,7 +7,7 @@
"model": "GCN",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/GCN/",
"params": {
"seed": 41,
@@ -19,18 +19,17 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 120,
+ "out_dim": 120,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TSP_edge_classification_GIN.json b/configs/TSP_edge_classification_GIN_100k.json
similarity index 80%
rename from configs/TSP_edge_classification_GIN.json
rename to configs/TSP_edge_classification_GIN_100k.json
index fc462080d..502544b50 100644
--- a/configs/TSP_edge_classification_GIN.json
+++ b/configs/TSP_edge_classification_GIN_100k.json
@@ -7,7 +7,7 @@
"model": "GIN",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/GIN/",
"params": {
"seed": 41,
@@ -19,20 +19,19 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 80,
- "residual": false,
+ "hidden_dim": 73,
+ "residual": true,
"readout": "sum",
"n_mlp_GIN": 2,
"learn_eps_GIN": true,
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GatedGCN.json b/configs/TSP_edge_classification_GatedGCN_100k.json
similarity index 75%
rename from configs/TSP_edge_classification_GatedGCN.json
rename to configs/TSP_edge_classification_GatedGCN_100k.json
index 53865f804..355acea2d 100644
--- a/configs/TSP_edge_classification_GatedGCN.json
+++ b/configs/TSP_edge_classification_GatedGCN_100k.json
@@ -7,7 +7,7 @@
"model": "GatedGCN",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/GatedGCN/",
"params": {
"seed": 41,
@@ -19,19 +19,19 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 64,
- "out_dim": 64,
+ "hidden_dim": 65,
+ "out_dim": 65,
"residual": true,
"edge_feat": false,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true
+ "batch_norm": true,
+ "layer_type": "edgereprfeat"
}
}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GatedGCN_edgefeat.json b/configs/TSP_edge_classification_GatedGCN_edgefeat.json
new file mode 100644
index 000000000..6b0b18a65
--- /dev/null
+++ b/configs/TSP_edge_classification_GatedGCN_edgefeat.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GatedGCN/edgefeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 72,
+ "out_dim": 72,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgefeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GatedGCN_edgereprfeat.json b/configs/TSP_edge_classification_GatedGCN_edgereprfeat.json
new file mode 100644
index 000000000..a1a77ed1f
--- /dev/null
+++ b/configs/TSP_edge_classification_GatedGCN_edgereprfeat.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GatedGCN/edgereprfeat/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 65,
+ "out_dim": 65,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "edgereprfeat"
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GatedGCN_isotropic.json b/configs/TSP_edge_classification_GatedGCN_isotropic.json
new file mode 100644
index 000000000..42c2122ca
--- /dev/null
+++ b/configs/TSP_edge_classification_GatedGCN_isotropic.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 3
+ },
+
+ "model": "GatedGCN",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/GatedGCN/isotropic/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 64,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 96,
+ "out_dim": 96,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "layer_type": "isotropic"
+ }
+}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_GraphSage.json b/configs/TSP_edge_classification_GraphSage_100k.json
similarity index 75%
rename from configs/TSP_edge_classification_GraphSage.json
rename to configs/TSP_edge_classification_GraphSage_100k.json
index ec195e6e1..ad07da5d9 100644
--- a/configs/TSP_edge_classification_GraphSage.json
+++ b/configs/TSP_edge_classification_GraphSage_100k.json
@@ -7,7 +7,8 @@
"model": "GraphSage",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+
+ "out_dir": "out/TSP_edge_classification/GraphSage/",
"params": {
"seed": 41,
@@ -19,19 +20,18 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
- "out_dim": 96,
+ "hidden_dim": 82,
+ "out_dim": 82,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_MLP.json b/configs/TSP_edge_classification_MLP_100k.json
similarity index 78%
rename from configs/TSP_edge_classification_MLP.json
rename to configs/TSP_edge_classification_MLP_100k.json
index 279717432..520fc2293 100644
--- a/configs/TSP_edge_classification_MLP.json
+++ b/configs/TSP_edge_classification_MLP_100k.json
@@ -7,7 +7,7 @@
"model": "MLP",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/MLP/",
"params": {
"seed": 41,
@@ -19,13 +19,13 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 3,
- "hidden_dim": 144,
- "out_dim": 144,
+ "L": 4,
+ "hidden_dim": 132,
+ "out_dim": 132,
"readout": "mean",
"gated": false,
"in_feat_dropout": 0.0,
diff --git a/configs/TSP_edge_classification_MoNet.json b/configs/TSP_edge_classification_MoNet_100k.json
similarity index 81%
rename from configs/TSP_edge_classification_MoNet.json
rename to configs/TSP_edge_classification_MoNet_100k.json
index e243b627e..ea154100d 100644
--- a/configs/TSP_edge_classification_MoNet.json
+++ b/configs/TSP_edge_classification_MoNet_100k.json
@@ -7,7 +7,7 @@
"model": "MoNet",
"dataset": "TSP",
- "out_dir": "out/TSP_edge_classification/",
+ "out_dir": "out/TSP_edge_classification/MoNet/",
"params": {
"seed": 41,
@@ -19,20 +19,19 @@
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 1,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 80,
- "out_dim": 80,
+ "hidden_dim": 82,
+ "out_dim": 82,
"residual": true,
"readout": "mean",
"kernel": 3,
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TSP_edge_classification_RingGNN_100k.json b/configs/TSP_edge_classification_RingGNN_100k.json
new file mode 100644
index 000000000..d3c8e6266
--- /dev/null
+++ b/configs/TSP_edge_classification_RingGNN_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 1
+ },
+
+ "model": "RingGNN",
+ "dataset": "TSP",
+
+ "out_dir": "out/TSP_edge_classification/RingGNN/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 500,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 1,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 42,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": true
+ }
+}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MLP_GATED_DD.json b/configs/TUs_graph_classification_3WLGNN_DD_100k.json
similarity index 63%
rename from configs/TUs_graph_classification_MLP_GATED_DD.json
rename to configs/TUs_graph_classification_3WLGNN_DD_100k.json
index 8cd33c7d1..96eb9c48c 100644
--- a/configs/TUs_graph_classification_MLP_GATED_DD.json
+++ b/configs/TUs_graph_classification_3WLGNN_DD_100k.json
@@ -4,7 +4,7 @@
"id": 0
},
- "model": "MLP",
+ "model": "3WLGNN",
"dataset": "DD",
"out_dir": "out/TUs_graph_classification/",
@@ -12,23 +12,22 @@
"params": {
"seed": 41,
"epochs": 1000,
- "batch_size": 20,
+ "batch_size": 4,
"init_lr": 1e-4,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.0,
- "dropout": 0.0
+ "L": 3,
+ "hidden_dim": 74,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MLP_GATED_ENZYMES.json b/configs/TUs_graph_classification_3WLGNN_ENZYMES_100k.json
similarity index 63%
rename from configs/TUs_graph_classification_MLP_GATED_ENZYMES.json
rename to configs/TUs_graph_classification_3WLGNN_ENZYMES_100k.json
index 88c115180..f4e37df00 100644
--- a/configs/TUs_graph_classification_MLP_GATED_ENZYMES.json
+++ b/configs/TUs_graph_classification_3WLGNN_ENZYMES_100k.json
@@ -4,7 +4,7 @@
"id": 0
},
- "model": "MLP",
+ "model": "3WLGNN",
"dataset": "ENZYMES",
"out_dir": "out/TUs_graph_classification/",
@@ -12,23 +12,22 @@
"params": {
"seed": 41,
"epochs": 1000,
- "batch_size": 20,
+ "batch_size": 4,
"init_lr": 1e-3,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.0,
- "dropout": 0.0
+ "L": 3,
+ "hidden_dim": 80,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_3WLGNN_PROTEINS_full_100k.json b/configs/TUs_graph_classification_3WLGNN_PROTEINS_full_100k.json
new file mode 100644
index 000000000..3604a5dac
--- /dev/null
+++ b/configs/TUs_graph_classification_3WLGNN_PROTEINS_full_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "PROTEINS_full",
+
+ "out_dir": "out/TUs_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 1e-3,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 25,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 80,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_DiffPool_DD.json b/configs/TUs_graph_classification_DiffPool_DD.json
deleted file mode 100644
index e2372611b..000000000
--- a/configs/TUs_graph_classification_DiffPool_DD.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "DD",
-
- "out_dir": "out/TUs_graph_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 10,
- "init_lr": 5e-4,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-6,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 8,
- "embedding_dim": 8,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "sage_aggregator": "meanpool",
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_DiffPool_ENZYMES.json b/configs/TUs_graph_classification_DiffPool_ENZYMES.json
deleted file mode 100644
index 6b06722bf..000000000
--- a/configs/TUs_graph_classification_DiffPool_ENZYMES.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "ENZYMES",
-
- "out_dir": "out/TUs_graph_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 20,
- "init_lr": 7e-3,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-6,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 64,
- "embedding_dim": 64,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "sage_aggregator": "meanpool",
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_DiffPool_PROTEINS_full.json b/configs/TUs_graph_classification_DiffPool_PROTEINS_full.json
deleted file mode 100644
index 165872e15..000000000
--- a/configs/TUs_graph_classification_DiffPool_PROTEINS_full.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "PROTEINS_full",
-
- "out_dir": "out/TUs_graph_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 20,
- "init_lr": 1e-3,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 25,
- "min_lr": 1e-6,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 22,
- "embedding_dim": 22,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "sage_aggregator": "meanpool",
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GAT_DD.json b/configs/TUs_graph_classification_GAT_DD_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GAT_DD.json
rename to configs/TUs_graph_classification_GAT_DD_100k.json
index 95b65b89f..91de8932f 100644
--- a/configs/TUs_graph_classification_GAT_DD.json
+++ b/configs/TUs_graph_classification_GAT_DD_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 16,
- "out_dim": 128,
+ "hidden_dim": 17,
+ "out_dim": 136,
"residual": true,
"readout": "mean",
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GAT_ENZYMES.json b/configs/TUs_graph_classification_GAT_ENZYMES_100k.json
similarity index 87%
rename from configs/TUs_graph_classification_GAT_ENZYMES.json
rename to configs/TUs_graph_classification_GAT_ENZYMES_100k.json
index b3171c4e9..b78e46c6a 100644
--- a/configs/TUs_graph_classification_GAT_ENZYMES.json
+++ b/configs/TUs_graph_classification_GAT_ENZYMES_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 16,
- "out_dim": 128,
+ "hidden_dim": 18,
+ "out_dim": 144,
"residual": true,
"readout": "mean",
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GAT_PROTEINS_full.json b/configs/TUs_graph_classification_GAT_PROTEINS_full_100k.json
similarity index 87%
rename from configs/TUs_graph_classification_GAT_PROTEINS_full.json
rename to configs/TUs_graph_classification_GAT_PROTEINS_full_100k.json
index aaa1af3e0..f59f0e9b8 100644
--- a/configs/TUs_graph_classification_GAT_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_GAT_PROTEINS_full_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 16,
- "out_dim": 128,
+ "hidden_dim": 18,
+ "out_dim": 144,
"residual": true,
"readout": "mean",
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GCN_DD.json b/configs/TUs_graph_classification_GCN_DD_100k.json
similarity index 83%
rename from configs/TUs_graph_classification_GCN_DD.json
rename to configs/TUs_graph_classification_GCN_DD_100k.json
index e4bb3f344..2360df932 100644
--- a/configs/TUs_graph_classification_GCN_DD.json
+++ b/configs/TUs_graph_classification_GCN_DD_100k.json
@@ -13,24 +13,23 @@
"seed": 41,
"epochs": 1000,
"batch_size": 20,
- "init_lr": 1e-5,
+ "init_lr": 7e-4,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 138,
+ "out_dim": 138,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GCN_ENZYMES.json b/configs/TUs_graph_classification_GCN_ENZYMES_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GCN_ENZYMES.json
rename to configs/TUs_graph_classification_GCN_ENZYMES_100k.json
index 225f7b180..99c18beef 100644
--- a/configs/TUs_graph_classification_GCN_ENZYMES.json
+++ b/configs/TUs_graph_classification_GCN_ENZYMES_100k.json
@@ -19,18 +19,17 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 146,
+ "out_dim": 146,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GCN_PROTEINS_full.json b/configs/TUs_graph_classification_GCN_PROTEINS_full_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GCN_PROTEINS_full.json
rename to configs/TUs_graph_classification_GCN_PROTEINS_full_100k.json
index 5569d8633..45c8a4e7e 100644
--- a/configs/TUs_graph_classification_GCN_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_GCN_PROTEINS_full_100k.json
@@ -19,18 +19,17 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 146,
+ "out_dim": 146,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/TUs_graph_classification_GIN_DD.json b/configs/TUs_graph_classification_GIN_DD_100k.json
similarity index 90%
rename from configs/TUs_graph_classification_GIN_DD.json
rename to configs/TUs_graph_classification_GIN_DD_100k.json
index 1fecbe103..0f60ea4e8 100644
--- a/configs/TUs_graph_classification_GIN_DD.json
+++ b/configs/TUs_graph_classification_GIN_DD_100k.json
@@ -19,12 +19,12 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
+ "hidden_dim": 106,
"residual": true,
"readout": "sum",
"n_mlp_GIN": 2,
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GIN_ENZYMES.json b/configs/TUs_graph_classification_GIN_ENZYMES_100k.json
similarity index 90%
rename from configs/TUs_graph_classification_GIN_ENZYMES.json
rename to configs/TUs_graph_classification_GIN_ENZYMES_100k.json
index a101819ac..0503644f6 100644
--- a/configs/TUs_graph_classification_GIN_ENZYMES.json
+++ b/configs/TUs_graph_classification_GIN_ENZYMES_100k.json
@@ -19,12 +19,12 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
+ "hidden_dim": 110,
"residual": true,
"readout": "sum",
"n_mlp_GIN": 2,
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GIN_PROTEINS_full.json b/configs/TUs_graph_classification_GIN_PROTEINS_full_100k.json
similarity index 87%
rename from configs/TUs_graph_classification_GIN_PROTEINS_full.json
rename to configs/TUs_graph_classification_GIN_PROTEINS_full_100k.json
index ff4a52011..155f19e9e 100644
--- a/configs/TUs_graph_classification_GIN_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_GIN_PROTEINS_full_100k.json
@@ -13,18 +13,18 @@
"seed": 41,
"epochs": 1000,
"batch_size": 20,
- "init_lr": 7e-3,
+ "init_lr": 1e-4,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
+ "hidden_dim": 110,
"residual": true,
"readout": "sum",
"n_mlp_GIN": 2,
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GatedGCN_DD.json b/configs/TUs_graph_classification_GatedGCN_DD_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GatedGCN_DD.json
rename to configs/TUs_graph_classification_GatedGCN_DD_100k.json
index 5a67dc1d6..b38625710 100644
--- a/configs/TUs_graph_classification_GatedGCN_DD.json
+++ b/configs/TUs_graph_classification_GatedGCN_DD_100k.json
@@ -19,18 +19,17 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 64,
- "out_dim": 64,
+ "hidden_dim": 66,
+ "out_dim": 66,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"edge_feat": false
}
diff --git a/configs/TUs_graph_classification_GatedGCN_ENZYMES.json b/configs/TUs_graph_classification_GatedGCN_ENZYMES_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GatedGCN_ENZYMES.json
rename to configs/TUs_graph_classification_GatedGCN_ENZYMES_100k.json
index 4d7ca540d..1602ef8a0 100644
--- a/configs/TUs_graph_classification_GatedGCN_ENZYMES.json
+++ b/configs/TUs_graph_classification_GatedGCN_ENZYMES_100k.json
@@ -19,18 +19,17 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 64,
- "out_dim": 64,
+ "hidden_dim": 69,
+ "out_dim": 69,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"edge_feat": false
}
diff --git a/configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json b/configs/TUs_graph_classification_GatedGCN_PROTEINS_full_100k.json
similarity index 86%
rename from configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json
rename to configs/TUs_graph_classification_GatedGCN_PROTEINS_full_100k.json
index 4ab9624de..fe6452081 100644
--- a/configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_GatedGCN_PROTEINS_full_100k.json
@@ -19,18 +19,17 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 64,
- "out_dim": 64,
+ "hidden_dim": 69,
+ "out_dim": 69,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"edge_feat": false
}
diff --git a/configs/TUs_graph_classification_GraphSage_DD.json b/configs/TUs_graph_classification_GraphSage_DD_100k.json
similarity index 78%
rename from configs/TUs_graph_classification_GraphSage_DD.json
rename to configs/TUs_graph_classification_GraphSage_DD_100k.json
index ed63f7b83..c32736a91 100644
--- a/configs/TUs_graph_classification_GraphSage_DD.json
+++ b/configs/TUs_graph_classification_GraphSage_DD_100k.json
@@ -13,25 +13,24 @@
"seed": 41,
"epochs": 1000,
"batch_size": 20,
- "init_lr": 1e-5,
+ "init_lr": 1e-3,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
- "out_dim": 96,
+ "hidden_dim": 86,
+ "out_dim": 86,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GraphSage_ENZYMES.json b/configs/TUs_graph_classification_GraphSage_ENZYMES_100k.json
similarity index 82%
rename from configs/TUs_graph_classification_GraphSage_ENZYMES.json
rename to configs/TUs_graph_classification_GraphSage_ENZYMES_100k.json
index 9a14e47cf..0e9d7a0b9 100644
--- a/configs/TUs_graph_classification_GraphSage_ENZYMES.json
+++ b/configs/TUs_graph_classification_GraphSage_ENZYMES_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
- "out_dim": 96,
+ "hidden_dim": 90,
+ "out_dim": 90,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_GraphSage_PROTEINS_full.json b/configs/TUs_graph_classification_GraphSage_PROTEINS_full_100k.json
similarity index 82%
rename from configs/TUs_graph_classification_GraphSage_PROTEINS_full.json
rename to configs/TUs_graph_classification_GraphSage_PROTEINS_full_100k.json
index d8d445056..4abfe04c0 100644
--- a/configs/TUs_graph_classification_GraphSage_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_GraphSage_PROTEINS_full_100k.json
@@ -19,19 +19,18 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 96,
- "out_dim": 96,
+ "hidden_dim": 88,
+ "out_dim": 88,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MLP_DD.json b/configs/TUs_graph_classification_MLP_DD_100k.json
similarity index 88%
rename from configs/TUs_graph_classification_MLP_DD.json
rename to configs/TUs_graph_classification_MLP_DD_100k.json
index 807d14413..17d9a5e39 100644
--- a/configs/TUs_graph_classification_MLP_DD.json
+++ b/configs/TUs_graph_classification_MLP_DD_100k.json
@@ -19,13 +19,13 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 154,
+ "out_dim": 154,
"readout": "mean",
"gated": false,
"in_feat_dropout": 0.0,
diff --git a/configs/TUs_graph_classification_MLP_ENZYMES.json b/configs/TUs_graph_classification_MLP_ENZYMES_100k.json
similarity index 88%
rename from configs/TUs_graph_classification_MLP_ENZYMES.json
rename to configs/TUs_graph_classification_MLP_ENZYMES_100k.json
index 4ebe5b639..6a352c5af 100644
--- a/configs/TUs_graph_classification_MLP_ENZYMES.json
+++ b/configs/TUs_graph_classification_MLP_ENZYMES_100k.json
@@ -19,13 +19,13 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 164,
+ "out_dim": 164,
"readout": "mean",
"gated": false,
"in_feat_dropout": 0.0,
diff --git a/configs/TUs_graph_classification_MLP_PROTEINS_full.json b/configs/TUs_graph_classification_MLP_PROTEINS_full_100k.json
similarity index 89%
rename from configs/TUs_graph_classification_MLP_PROTEINS_full.json
rename to configs/TUs_graph_classification_MLP_PROTEINS_full_100k.json
index a36c97828..e3f3a8406 100644
--- a/configs/TUs_graph_classification_MLP_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_MLP_PROTEINS_full_100k.json
@@ -19,13 +19,13 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
+ "hidden_dim": 162,
+ "out_dim": 162,
"readout": "mean",
"gated": false,
"in_feat_dropout": 0.0,
diff --git a/configs/TUs_graph_classification_MoNet_DD.json b/configs/TUs_graph_classification_MoNet_DD_100k.json
similarity index 83%
rename from configs/TUs_graph_classification_MoNet_DD.json
rename to configs/TUs_graph_classification_MoNet_DD_100k.json
index 938686b4f..90944650e 100644
--- a/configs/TUs_graph_classification_MoNet_DD.json
+++ b/configs/TUs_graph_classification_MoNet_DD_100k.json
@@ -13,26 +13,25 @@
"seed": 41,
"epochs": 1000,
"batch_size": 20,
- "init_lr": 7e-5,
+ "init_lr": 1e-5,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 80,
- "out_dim": 80,
+ "hidden_dim": 86,
+ "out_dim": 86,
"residual": true,
"readout": "mean",
"kernel": 3,
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MoNet_ENZYMES.json b/configs/TUs_graph_classification_MoNet_ENZYMES_100k.json
similarity index 87%
rename from configs/TUs_graph_classification_MoNet_ENZYMES.json
rename to configs/TUs_graph_classification_MoNet_ENZYMES_100k.json
index 7c58dffca..83df5c18d 100644
--- a/configs/TUs_graph_classification_MoNet_ENZYMES.json
+++ b/configs/TUs_graph_classification_MoNet_ENZYMES_100k.json
@@ -19,20 +19,19 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 80,
- "out_dim": 80,
+ "hidden_dim": 90,
+ "out_dim": 90,
"residual": true,
"readout": "mean",
"kernel": 3,
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MoNet_PROTEINS_full.json b/configs/TUs_graph_classification_MoNet_PROTEINS_full_100k.json
similarity index 87%
rename from configs/TUs_graph_classification_MoNet_PROTEINS_full.json
rename to configs/TUs_graph_classification_MoNet_PROTEINS_full_100k.json
index b41f47afd..3715efc61 100644
--- a/configs/TUs_graph_classification_MoNet_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_MoNet_PROTEINS_full_100k.json
@@ -19,20 +19,19 @@
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 80,
- "out_dim": 80,
+ "hidden_dim": 89,
+ "out_dim": 89,
"residual": true,
"readout": "mean",
"kernel": 3,
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_RingGNN_DD_100k.json b/configs/TUs_graph_classification_RingGNN_DD_100k.json
new file mode 100644
index 000000000..fc965ac67
--- /dev/null
+++ b/configs/TUs_graph_classification_RingGNN_DD_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "DD",
+
+ "out_dir": "out/TUs_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 1e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 25,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 20,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_RingGNN_ENZYMES_100k.json b/configs/TUs_graph_classification_RingGNN_ENZYMES_100k.json
new file mode 100644
index 000000000..063374c02
--- /dev/null
+++ b/configs/TUs_graph_classification_RingGNN_ENZYMES_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "ENZYMES",
+
+ "out_dir": "out/TUs_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 1e-4,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 25,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 38,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/TUs_graph_classification_MLP_GATED_PROTEINS_full.json b/configs/TUs_graph_classification_RingGNN_PROTEINS_full_100k.json
similarity index 60%
rename from configs/TUs_graph_classification_MLP_GATED_PROTEINS_full.json
rename to configs/TUs_graph_classification_RingGNN_PROTEINS_full_100k.json
index c75812fc2..6b975f363 100644
--- a/configs/TUs_graph_classification_MLP_GATED_PROTEINS_full.json
+++ b/configs/TUs_graph_classification_RingGNN_PROTEINS_full_100k.json
@@ -4,7 +4,7 @@
"id": 0
},
- "model": "MLP",
+ "model": "RingGNN",
"dataset": "PROTEINS_full",
"out_dir": "out/TUs_graph_classification/",
@@ -12,23 +12,22 @@
"params": {
"seed": 41,
"epochs": 1000,
- "batch_size": 20,
- "init_lr": 1e-4,
+ "batch_size": 4,
+ "init_lr": 7e-5,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 25,
"min_lr": 1e-6,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 128,
- "out_dim": 128,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.0,
- "dropout": 0.0
+ "L": 2,
+ "hidden_dim": 35,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
}
}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_3WLGNN_ZINC_100k.json b/configs/molecules_graph_regression_3WLGNN_ZINC_100k.json
new file mode 100644
index 000000000..d8fec2e36
--- /dev/null
+++ b/configs/molecules_graph_regression_3WLGNN_ZINC_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 79,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_3WLGNN_ZINC_500k.json b/configs/molecules_graph_regression_3WLGNN_ZINC_500k.json
new file mode 100644
index 000000000..3a3073fce
--- /dev/null
+++ b/configs/molecules_graph_regression_3WLGNN_ZINC_500k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 3,
+ "hidden_dim": 180,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json b/configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json
new file mode 100644
index 000000000..e173a03d5
--- /dev/null
+++ b/configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 104,
+ "depth_of_mlp": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_DiffPool_ZINC.json b/configs/molecules_graph_regression_DiffPool_ZINC.json
deleted file mode 100644
index c63577559..000000000
--- a/configs/molecules_graph_regression_DiffPool_ZINC.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "ZINC",
-
- "out_dir": "out/molecules_graph_regression/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 128,
- "init_lr": 0.001,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
- "min_lr": 1e-5,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 56,
- "embedding_dim": 56,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "sage_aggregator": "meanpool",
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GAT_ZINC.json b/configs/molecules_graph_regression_GAT_ZINC_100k.json
similarity index 88%
rename from configs/molecules_graph_regression_GAT_ZINC.json
rename to configs/molecules_graph_regression_GAT_ZINC_100k.json
index 282791370..16b933ffe 100644
--- a/configs/molecules_graph_regression_GAT_ZINC.json
+++ b/configs/molecules_graph_regression_GAT_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/molecules_graph_regression_GAT_ZINC_500k.json b/configs/molecules_graph_regression_GAT_ZINC_500k.json
new file mode 100644
index 000000000..698d98cf2
--- /dev/null
+++ b/configs/molecules_graph_regression_GAT_ZINC_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GAT",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 22,
+ "out_dim": 176,
+ "residual": true,
+ "readout": "mean",
+ "n_heads": 8,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GCN_ZINC.json b/configs/molecules_graph_regression_GCN_ZINC_100k.json
similarity index 88%
rename from configs/molecules_graph_regression_GCN_ZINC.json
rename to configs/molecules_graph_regression_GCN_ZINC_100k.json
index 6128997f3..395addc80 100644
--- a/configs/molecules_graph_regression_GCN_ZINC.json
+++ b/configs/molecules_graph_regression_GCN_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -30,7 +30,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/molecules_graph_regression_GCN_ZINC_500k.json b/configs/molecules_graph_regression_GCN_ZINC_500k.json
new file mode 100644
index 000000000..01eb80363
--- /dev/null
+++ b/configs/molecules_graph_regression_GCN_ZINC_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GCN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 172,
+ "out_dim": 172,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "self_loop": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GIN_ZINC.json b/configs/molecules_graph_regression_GIN_ZINC_100k.json
similarity index 89%
rename from configs/molecules_graph_regression_GIN_ZINC.json
rename to configs/molecules_graph_regression_GIN_ZINC_100k.json
index fbd4e4ec7..7fbcaa4b8 100644
--- a/configs/molecules_graph_regression_GIN_ZINC.json
+++ b/configs/molecules_graph_regression_GIN_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GIN_ZINC_500k.json b/configs/molecules_graph_regression_GIN_ZINC_500k.json
new file mode 100644
index 000000000..e9612a51b
--- /dev/null
+++ b/configs/molecules_graph_regression_GIN_ZINC_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GIN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 124,
+ "residual": true,
+ "readout": "sum",
+ "n_mlp_GIN": 2,
+ "learn_eps_GIN": true,
+ "neighbor_aggr_GIN": "sum",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GatedGCN_ZINC.json b/configs/molecules_graph_regression_GatedGCN_ZINC_100k.json
similarity index 81%
rename from configs/molecules_graph_regression_GatedGCN_ZINC.json
rename to configs/molecules_graph_regression_GatedGCN_ZINC_100k.json
index 2897dd28f..bff1d4775 100644
--- a/configs/molecules_graph_regression_GatedGCN_ZINC.json
+++ b/configs/molecules_graph_regression_GatedGCN_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -27,11 +27,11 @@
"hidden_dim": 70,
"out_dim": 70,
"residual": true,
- "edge_feat": true,
+ "edge_feat": false,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true
+ "batch_norm": true,
+ "pos_enc": false
}
}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GatedGCN_ZINC_500k.json b/configs/molecules_graph_regression_GatedGCN_ZINC_500k.json
new file mode 100644
index 000000000..21149dbff
--- /dev/null
+++ b/configs/molecules_graph_regression_GatedGCN_ZINC_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 78,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json b/configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json
new file mode 100644
index 000000000..801f141a5
--- /dev/null
+++ b/configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json
@@ -0,0 +1,38 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GatedGCN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 78,
+ "out_dim": 78,
+ "residual": true,
+ "edge_feat": false,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "pos_enc": true,
+ "pos_enc_dim": 8
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GraphSage_ZINC.json b/configs/molecules_graph_regression_GraphSage_ZINC_100k.json
similarity index 77%
rename from configs/molecules_graph_regression_GraphSage_ZINC.json
rename to configs/molecules_graph_regression_GraphSage_ZINC_100k.json
index b8a10cf0f..4be7039e1 100644
--- a/configs/molecules_graph_regression_GraphSage_ZINC.json
+++ b/configs/molecules_graph_regression_GraphSage_ZINC_100k.json
@@ -15,23 +15,22 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 108,
- "out_dim": 108,
+ "hidden_dim": 90,
+ "out_dim": 90,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_GraphSage_ZINC_500k.json b/configs/molecules_graph_regression_GraphSage_ZINC_500k.json
new file mode 100644
index 000000000..fce699d0e
--- /dev/null
+++ b/configs/molecules_graph_regression_GraphSage_ZINC_500k.json
@@ -0,0 +1,36 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "GraphSage",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 101,
+ "out_dim": 101,
+ "residual": true,
+ "readout": "mean",
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true,
+ "sage_aggregator": "maxpool"
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_MLP_ZINC.json b/configs/molecules_graph_regression_MLP_ZINC_100k.json
similarity index 91%
rename from configs/molecules_graph_regression_MLP_ZINC.json
rename to configs/molecules_graph_regression_MLP_ZINC_100k.json
index efc25b8c8..bd0c1ce9b 100644
--- a/configs/molecules_graph_regression_MLP_ZINC.json
+++ b/configs/molecules_graph_regression_MLP_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
diff --git a/configs/molecules_graph_regression_MLP_GATED_ZINC.json b/configs/molecules_graph_regression_MLP_ZINC_500k.json
similarity index 77%
rename from configs/molecules_graph_regression_MLP_GATED_ZINC.json
rename to configs/molecules_graph_regression_MLP_ZINC_500k.json
index fb03dc065..90536176c 100644
--- a/configs/molecules_graph_regression_MLP_GATED_ZINC.json
+++ b/configs/molecules_graph_regression_MLP_ZINC_500k.json
@@ -15,19 +15,19 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 135,
- "out_dim": 135,
+ "L": 16,
+ "hidden_dim": 173,
+ "out_dim": 173,
"readout": "mean",
- "gated": true,
+ "gated": false,
"in_feat_dropout": 0.0,
"dropout": 0.0
}
diff --git a/configs/molecules_graph_regression_MoNet_ZINC.json b/configs/molecules_graph_regression_MoNet_ZINC_100k.json
similarity index 88%
rename from configs/molecules_graph_regression_MoNet_ZINC.json
rename to configs/molecules_graph_regression_MoNet_ZINC_100k.json
index e760b6c49..8cfc507ab 100644
--- a/configs/molecules_graph_regression_MoNet_ZINC.json
+++ b/configs/molecules_graph_regression_MoNet_ZINC_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_MoNet_ZINC_500k.json b/configs/molecules_graph_regression_MoNet_ZINC_500k.json
new file mode 100644
index 000000000..966f269b7
--- /dev/null
+++ b/configs/molecules_graph_regression_MoNet_ZINC_500k.json
@@ -0,0 +1,37 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "MoNet",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 10,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 16,
+ "hidden_dim": 101,
+ "out_dim": 101,
+ "residual": true,
+ "readout": "mean",
+ "kernel": 3,
+ "pseudo_dim_MoNet": 2,
+ "in_feat_dropout": 0.0,
+ "dropout": 0.0,
+ "batch_norm": true
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_RingGNN_ZINC_100k.json b/configs/molecules_graph_regression_RingGNN_ZINC_100k.json
new file mode 100644
index 000000000..e3211a675
--- /dev/null
+++ b/configs/molecules_graph_regression_RingGNN_ZINC_100k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 34,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_RingGNN_ZINC_500k.json b/configs/molecules_graph_regression_RingGNN_ZINC_500k.json
new file mode 100644
index 000000000..c948ed29e
--- /dev/null
+++ b/configs/molecules_graph_regression_RingGNN_ZINC_500k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 90,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json b/configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json
new file mode 100644
index 000000000..763cbdc2e
--- /dev/null
+++ b/configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json
@@ -0,0 +1,34 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "ZINC",
+
+ "out_dir": "out/molecules_graph_regression/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 35,
+ "radius": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false,
+ "edge_feat": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json b/configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json
similarity index 64%
rename from configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json
rename to configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json
index fab9b6c72..ba29689db 100644
--- a/configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json
+++ b/configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json
@@ -4,7 +4,7 @@
"id": 0
},
- "model": "MLP",
+ "model": "3WLGNN",
"dataset": "CIFAR10",
"out_dir": "out/superpixels_graph_classification/",
@@ -12,23 +12,22 @@
"params": {
"seed": 41,
"epochs": 1000,
- "batch_size": 128,
+ "batch_size": 4,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 5,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 150,
- "out_dim": 150,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.0,
- "dropout": 0.0
+ "L": 3,
+ "hidden_dim": 82,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json b/configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json
new file mode 100644
index 000000000..4a4e9e761
--- /dev/null
+++ b/configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "CIFAR10",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 96,
+ "depth_of_mlp": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_MLP_GATED_MNIST.json b/configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json
similarity index 64%
rename from configs/superpixels_graph_classification_MLP_GATED_MNIST.json
rename to configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json
index a1eab1c9b..b428b20ec 100644
--- a/configs/superpixels_graph_classification_MLP_GATED_MNIST.json
+++ b/configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json
@@ -4,7 +4,7 @@
"id": 0
},
- "model": "MLP",
+ "model": "3WLGNN",
"dataset": "MNIST",
"out_dir": "out/superpixels_graph_classification/",
@@ -12,23 +12,22 @@
"params": {
"seed": 41,
"epochs": 1000,
- "batch_size": 128,
+ "batch_size": 4,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
"lr_schedule_patience": 5,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
- "L": 4,
- "hidden_dim": 150,
- "out_dim": 150,
- "readout": "mean",
- "gated": true,
- "in_feat_dropout": 0.0,
- "dropout": 0.0
+ "L": 3,
+ "hidden_dim": 82,
+ "depth_of_mlp": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json b/configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json
new file mode 100644
index 000000000..43a897051
--- /dev/null
+++ b/configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "3WLGNN",
+ "dataset": "MNIST",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-5,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 96,
+ "depth_of_mlp": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_DiffPool_CIFAR10.json b/configs/superpixels_graph_classification_DiffPool_CIFAR10.json
deleted file mode 100644
index 643647fe5..000000000
--- a/configs/superpixels_graph_classification_DiffPool_CIFAR10.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "CIFAR10",
-
- "out_dir": "out/superpixels_graph_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 128,
- "init_lr": 0.001,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
- "min_lr": 1e-5,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 32,
- "embedding_dim": 16,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "sage_aggregator": "meanpool",
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_DiffPool_MNIST.json b/configs/superpixels_graph_classification_DiffPool_MNIST.json
deleted file mode 100644
index f2b20c7da..000000000
--- a/configs/superpixels_graph_classification_DiffPool_MNIST.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "gpu": {
- "use": true,
- "id": 0
- },
-
- "model": "DiffPool",
- "dataset": "MNIST",
-
- "out_dir": "out/superpixels_graph_classification/",
-
- "params": {
- "seed": 41,
- "epochs": 1000,
- "batch_size": 128,
- "init_lr": 0.001,
- "lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
- "min_lr": 1e-5,
- "weight_decay": 0.0,
- "print_epoch_interval": 5,
- "max_time": 48
- },
-
- "net_params": {
- "L": 3,
- "hidden_dim": 32,
- "embedding_dim": 32,
- "num_pool": 1,
- "pool_ratio": 0.15,
- "residual": true,
- "readout": "mean",
- "in_feat_dropout": 0.0,
- "dropout": 0.0,
- "graph_norm": true,
- "batch_norm": true,
- "sage_aggregator": "meanpool",
- "data_mode": "default",
- "linkpred": true,
- "cat": false
- }
-}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GAT_CIFAR10.json b/configs/superpixels_graph_classification_GAT_CIFAR10_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_GAT_CIFAR10.json
rename to configs/superpixels_graph_classification_GAT_CIFAR10_100k.json
index 48aec55b3..0e84c3c7c 100644
--- a/configs/superpixels_graph_classification_GAT_CIFAR10.json
+++ b/configs/superpixels_graph_classification_GAT_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/superpixels_graph_classification_GAT_MNIST.json b/configs/superpixels_graph_classification_GAT_MNIST_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_GAT_MNIST.json
rename to configs/superpixels_graph_classification_GAT_MNIST_100k.json
index c4318836a..387b1d380 100644
--- a/configs/superpixels_graph_classification_GAT_MNIST.json
+++ b/configs/superpixels_graph_classification_GAT_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"n_heads": 8,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/superpixels_graph_classification_GCN_CIFAR10.json b/configs/superpixels_graph_classification_GCN_CIFAR10_100k.json
similarity index 88%
rename from configs/superpixels_graph_classification_GCN_CIFAR10.json
rename to configs/superpixels_graph_classification_GCN_CIFAR10_100k.json
index 01f1985e6..7fc077f7e 100644
--- a/configs/superpixels_graph_classification_GCN_CIFAR10.json
+++ b/configs/superpixels_graph_classification_GCN_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -30,7 +30,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/superpixels_graph_classification_GCN_MNIST.json b/configs/superpixels_graph_classification_GCN_MNIST_100k.json
similarity index 88%
rename from configs/superpixels_graph_classification_GCN_MNIST.json
rename to configs/superpixels_graph_classification_GCN_MNIST_100k.json
index 70c8971f8..dfccf4968 100644
--- a/configs/superpixels_graph_classification_GCN_MNIST.json
+++ b/configs/superpixels_graph_classification_GCN_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -30,7 +30,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
"self_loop": false
}
diff --git a/configs/superpixels_graph_classification_GIN_CIFAR10.json b/configs/superpixels_graph_classification_GIN_CIFAR10_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_GIN_CIFAR10.json
rename to configs/superpixels_graph_classification_GIN_CIFAR10_100k.json
index 047b7ba66..f5e0ed8bd 100644
--- a/configs/superpixels_graph_classification_GIN_CIFAR10.json
+++ b/configs/superpixels_graph_classification_GIN_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GIN_MNIST.json b/configs/superpixels_graph_classification_GIN_MNIST_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_GIN_MNIST.json
rename to configs/superpixels_graph_classification_GIN_MNIST_100k.json
index 3891cd3f2..3e0e22e1b 100644
--- a/configs/superpixels_graph_classification_GIN_MNIST.json
+++ b/configs/superpixels_graph_classification_GIN_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -33,7 +33,6 @@
"neighbor_aggr_GIN": "sum",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GatedGCN_CIFAR10.json b/configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json
similarity index 88%
rename from configs/superpixels_graph_classification_GatedGCN_CIFAR10.json
rename to configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json
index a7fb32622..19885332f 100644
--- a/configs/superpixels_graph_classification_GatedGCN_CIFAR10.json
+++ b/configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GatedGCN_MNIST.json b/configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json
similarity index 88%
rename from configs/superpixels_graph_classification_GatedGCN_MNIST.json
rename to configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json
index 540be901a..b21152cfd 100644
--- a/configs/superpixels_graph_classification_GatedGCN_MNIST.json
+++ b/configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -31,7 +31,6 @@
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GraphSage_CIFAR10.json b/configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json
similarity index 77%
rename from configs/superpixels_graph_classification_GraphSage_CIFAR10.json
rename to configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json
index f1b778374..f4e2d4d21 100644
--- a/configs/superpixels_graph_classification_GraphSage_CIFAR10.json
+++ b/configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json
@@ -15,23 +15,22 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 108,
- "out_dim": 108,
+ "hidden_dim": 90,
+ "out_dim": 90,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_GraphSage_MNIST.json b/configs/superpixels_graph_classification_GraphSage_MNIST_100k.json
similarity index 77%
rename from configs/superpixels_graph_classification_GraphSage_MNIST.json
rename to configs/superpixels_graph_classification_GraphSage_MNIST_100k.json
index d5cd23831..4ec52f9db 100644
--- a/configs/superpixels_graph_classification_GraphSage_MNIST.json
+++ b/configs/superpixels_graph_classification_GraphSage_MNIST_100k.json
@@ -15,23 +15,22 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
"L": 4,
- "hidden_dim": 108,
- "out_dim": 108,
+ "hidden_dim": 90,
+ "out_dim": 90,
"residual": true,
"readout": "mean",
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true,
- "sage_aggregator": "meanpool"
+ "sage_aggregator": "maxpool"
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_MLP_CIFAR10.json b/configs/superpixels_graph_classification_MLP_CIFAR10_100k.json
similarity index 91%
rename from configs/superpixels_graph_classification_MLP_CIFAR10.json
rename to configs/superpixels_graph_classification_MLP_CIFAR10_100k.json
index bf0f19f54..8a346cab5 100644
--- a/configs/superpixels_graph_classification_MLP_CIFAR10.json
+++ b/configs/superpixels_graph_classification_MLP_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
diff --git a/configs/superpixels_graph_classification_MLP_MNIST.json b/configs/superpixels_graph_classification_MLP_MNIST_100k.json
similarity index 91%
rename from configs/superpixels_graph_classification_MLP_MNIST.json
rename to configs/superpixels_graph_classification_MLP_MNIST_100k.json
index b45997d6e..7f60a43fe 100644
--- a/configs/superpixels_graph_classification_MLP_MNIST.json
+++ b/configs/superpixels_graph_classification_MLP_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
diff --git a/configs/superpixels_graph_classification_MoNet_CIFAR10.json b/configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_MoNet_CIFAR10.json
rename to configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json
index e5dfa8598..800cff758 100644
--- a/configs/superpixels_graph_classification_MoNet_CIFAR10.json
+++ b/configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_MoNet_MNIST.json b/configs/superpixels_graph_classification_MoNet_MNIST_100k.json
similarity index 89%
rename from configs/superpixels_graph_classification_MoNet_MNIST.json
rename to configs/superpixels_graph_classification_MoNet_MNIST_100k.json
index e3d8bb8ac..75b7718ba 100644
--- a/configs/superpixels_graph_classification_MoNet_MNIST.json
+++ b/configs/superpixels_graph_classification_MoNet_MNIST_100k.json
@@ -15,11 +15,11 @@
"batch_size": 128,
"init_lr": 0.001,
"lr_reduce_factor": 0.5,
- "lr_schedule_patience": 5,
+ "lr_schedule_patience": 10,
"min_lr": 1e-5,
"weight_decay": 0.0,
"print_epoch_interval": 5,
- "max_time": 48
+ "max_time": 12
},
"net_params": {
@@ -32,7 +32,6 @@
"pseudo_dim_MoNet": 2,
"in_feat_dropout": 0.0,
"dropout": 0.0,
- "graph_norm": true,
"batch_norm": true
}
}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json b/configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json
new file mode 100644
index 000000000..67e9254ba
--- /dev/null
+++ b/configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "CIFAR10",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.0001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 44,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json b/configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json
new file mode 100644
index 000000000..310843d03
--- /dev/null
+++ b/configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "CIFAR10",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.0001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 37,
+ "radius": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_RingGNN_MNIST_100k.json b/configs/superpixels_graph_classification_RingGNN_MNIST_100k.json
new file mode 100644
index 000000000..61c742ecf
--- /dev/null
+++ b/configs/superpixels_graph_classification_RingGNN_MNIST_100k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "MNIST",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.0001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 2,
+ "hidden_dim": 45,
+ "radius": 2,
+ "residual": false,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json b/configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json
new file mode 100644
index 000000000..12066f451
--- /dev/null
+++ b/configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json
@@ -0,0 +1,33 @@
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": "RingGNN",
+ "dataset": "MNIST",
+
+ "out_dir": "out/superpixels_graph_classification/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 4,
+ "init_lr": 0.0001,
+ "lr_reduce_factor": 0.5,
+ "lr_schedule_patience": 5,
+ "min_lr": 1e-6,
+ "weight_decay": 0.0,
+ "print_epoch_interval": 5,
+ "max_time": 12
+ },
+
+ "net_params": {
+ "L": 8,
+ "hidden_dim": 37,
+ "radius": 2,
+ "residual": true,
+ "dropout": 0.0,
+ "layer_norm": false
+ }
+}
\ No newline at end of file
diff --git a/data/COLLAB.py b/data/COLLAB.py
new file mode 100644
index 000000000..7595ea3c3
--- /dev/null
+++ b/data/COLLAB.py
@@ -0,0 +1,67 @@
+import time
+import dgl
+import torch
+from torch.utils.data import Dataset
+
+from ogb.linkproppred import DglLinkPropPredDataset, Evaluator
+
+from scipy import sparse as sp
+import numpy as np
+
+
+def positional_encoding(g, pos_enc_dim):
+ """
+ Graph positional encoding v/ Laplacian eigenvectors
+ """
+
+ # Laplacian
+ A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
+ N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
+ L = sp.eye(g.number_of_nodes()) - N * A * N
+
+ # # Eigenvectors with numpy
+ # EigVal, EigVec = np.linalg.eig(L.toarray())
+ # idx = EigVal.argsort() # increasing order
+ # EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
+ # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ # Eigenvectors with scipy
+ #EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
+ EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2)
+ EigVec = EigVec[:, EigVal.argsort()] # increasing order
+ g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ return g
+
+
+class COLLABDataset(Dataset):
+ def __init__(self, name):
+ start = time.time()
+ print("[I] Loading dataset %s..." % (name))
+ self.name = name
+ self.dataset = DglLinkPropPredDataset(name='ogbl-collab')
+
+ self.graph = self.dataset[0] # single DGL graph
+
+ # Create edge feat by concatenating weight and year
+ self.graph.edata['feat'] = torch.cat(
+ [self.graph.edata['edge_weight'], self.graph.edata['edge_year']],
+ dim=1
+ )
+
+ self.split_edge = self.dataset.get_edge_split()
+ self.train_edges = self.split_edge['train']['edge'] # positive train edges
+ self.val_edges = self.split_edge['valid']['edge'] # positive val edges
+ self.val_edges_neg = self.split_edge['valid']['edge_neg'] # negative val edges
+ self.test_edges = self.split_edge['test']['edge'] # positive test edges
+ self.test_edges_neg = self.split_edge['test']['edge_neg'] # negative test edges
+
+ self.evaluator = Evaluator(name='ogbl-collab')
+
+ print("[I] Finished loading.")
+ print("[I] Data load time: {:.4f}s".format(time.time()-start))
+
+ def _add_positional_encodings(self, pos_enc_dim):
+
+ # Graph positional encoding v/ Laplacian eigenvectors
+ self.graph = positional_encoding(self.graph, pos_enc_dim)
diff --git a/data/CSL.py b/data/CSL.py
new file mode 100644
index 000000000..e107d37cf
--- /dev/null
+++ b/data/CSL.py
@@ -0,0 +1,334 @@
+import numpy as np, time, pickle, random, csv
+import torch
+from torch.utils.data import DataLoader, Dataset
+
+import os
+import pickle
+import numpy as np
+
+import dgl
+
+from sklearn.model_selection import StratifiedKFold, train_test_split
+
+random.seed(42)
+
+from scipy import sparse as sp
+
+
+class DGLFormDataset(torch.utils.data.Dataset):
+ """
+ DGLFormDataset wrapping graph list and label list as per pytorch Dataset.
+ *lists (list): lists of 'graphs' and 'labels' with same len().
+ """
+ def __init__(self, *lists):
+ assert all(len(lists[0]) == len(li) for li in lists)
+ self.lists = lists
+ self.graph_lists = lists[0]
+ self.graph_labels = lists[1]
+
+ def __getitem__(self, index):
+ return tuple(li[index] for li in self.lists)
+
+ def __len__(self):
+ return len(self.lists[0])
+
+def format_dataset(dataset):
+ """
+ Utility function to recover data,
+ INTO-> dgl/pytorch compatible format
+ """
+ graphs = [data[0] for data in dataset]
+ labels = [data[1] for data in dataset]
+
+ for graph in graphs:
+ #graph.ndata['feat'] = torch.FloatTensor(graph.ndata['feat'])
+ graph.ndata['feat'] = graph.ndata['feat'].float() # dgl 4.0
+ # adding edge features for Residual Gated ConvNet, if not there
+ if 'feat' not in graph.edata.keys():
+ edge_feat_dim = graph.ndata['feat'].shape[1] # dim same as node feature dim
+ graph.edata['feat'] = torch.ones(graph.number_of_edges(), edge_feat_dim)
+
+ return DGLFormDataset(graphs, labels)
+
+
+def get_all_split_idx(dataset):
+ """
+ - Split total number of graphs into 3 (train, val and test) in 3:1:1
+ - Stratified split proportionate to original distribution of data with respect to classes
+ - Using sklearn to perform the split and then save the indexes
+ - Preparing 5 such combinations of indexes split to be used in Graph NNs
+ - As with KFold, each of the 5 fold have unique test set.
+ """
+ root_idx_dir = './data/CSL/'
+ if not os.path.exists(root_idx_dir):
+ os.makedirs(root_idx_dir)
+ all_idx = {}
+
+ # If there are no idx files, do the split and store the files
+ if not (os.path.exists(root_idx_dir + dataset.name + '_train.index')):
+ print("[!] Splitting the data into train/val/test ...")
+
+ # Using 5-fold cross val as used in RP-GNN paper
+ k_splits = 5
+
+ cross_val_fold = StratifiedKFold(n_splits=k_splits, shuffle=True)
+ k_data_splits = []
+
+ # this is a temporary index assignment, to be used below for val splitting
+ for i in range(len(dataset.graph_lists)):
+ dataset[i][0].a = lambda: None
+ setattr(dataset[i][0].a, 'index', i)
+
+ for indexes in cross_val_fold.split(dataset.graph_lists, dataset.graph_labels):
+ remain_index, test_index = indexes[0], indexes[1]
+
+ remain_set = format_dataset([dataset[index] for index in remain_index])
+
+ # Gets final 'train' and 'val'
+ train, val, _, __ = train_test_split(remain_set,
+ range(len(remain_set.graph_lists)),
+ test_size=0.25,
+ stratify=remain_set.graph_labels)
+
+ train, val = format_dataset(train), format_dataset(val)
+ test = format_dataset([dataset[index] for index in test_index])
+
+ # Extracting only idxs
+ idx_train = [item[0].a.index for item in train]
+ idx_val = [item[0].a.index for item in val]
+ idx_test = [item[0].a.index for item in test]
+
+ f_train_w = csv.writer(open(root_idx_dir + dataset.name + '_train.index', 'a+'))
+ f_val_w = csv.writer(open(root_idx_dir + dataset.name + '_val.index', 'a+'))
+ f_test_w = csv.writer(open(root_idx_dir + dataset.name + '_test.index', 'a+'))
+
+ f_train_w.writerow(idx_train)
+ f_val_w.writerow(idx_val)
+ f_test_w.writerow(idx_test)
+
+ print("[!] Splitting done!")
+
+ # reading idx from the files
+ for section in ['train', 'val', 'test']:
+ with open(root_idx_dir + dataset.name + '_'+ section + '.index', 'r') as f:
+ reader = csv.reader(f)
+ all_idx[section] = [list(map(int, idx)) for idx in reader]
+ return all_idx
+
+
+
+class CSL(torch.utils.data.Dataset):
+ """
+ Circular Skip Link Graphs:
+ Source: https://github.com/PurdueMINDS/RelationalPooling/
+ """
+
+ def __init__(self, path="data/CSL/"):
+ self.name = "CSL"
+ self.adj_list = pickle.load(open(os.path.join(path, 'graphs_Kary_Deterministic_Graphs.pkl'), 'rb'))
+ self.graph_labels = torch.load(os.path.join(path, 'y_Kary_Deterministic_Graphs.pt'))
+ self.graph_lists = []
+
+ self.n_samples = len(self.graph_labels)
+ self.num_node_type = 1 #41
+ self.num_edge_type = 1 #164
+ self._prepare()
+
+ def _prepare(self):
+ t0 = time.time()
+ print("[I] Preparing Circular Skip Link Graphs v4 ...")
+ for sample in self.adj_list:
+ _g = dgl.DGLGraph()
+ _g.from_scipy_sparse_matrix(sample)
+ g = dgl.transform.remove_self_loop(_g)
+ g.ndata['feat'] = torch.zeros(g.number_of_nodes()).long()
+ #g.ndata['feat'] = torch.arange(0, g.number_of_nodes()).long() # v1
+ #g.ndata['feat'] = torch.randperm(g.number_of_nodes()).long() # v3
+
+ # adding edge features as generic requirement
+ g.edata['feat'] = torch.zeros(g.number_of_edges()).long()
+ #g.edata['feat'] = torch.arange(0, g.number_of_edges()).long() # v1
+ #g.edata['feat'] = torch.ones(g.number_of_edges()).long() # v2
+
+ # NOTE: come back here, to define edge features as distance between the indices of the edges
+ ###################################################################
+ # srcs, dsts = new_g.edges()
+ # edge_feat = []
+ # for edge in range(len(srcs)):
+ # a = srcs[edge].item()
+ # b = dsts[edge].item()
+ # edge_feat.append(abs(a-b))
+ # g.edata['feat'] = torch.tensor(edge_feat, dtype=torch.int).long()
+ ###################################################################
+
+ self.graph_lists.append(g)
+ self.num_node_type = self.graph_lists[0].ndata['feat'].size(0)
+ self.num_edge_type = self.graph_lists[0].edata['feat'].size(0)
+ print("[I] Finished preparation after {:.4f}s".format(time.time()-t0))
+
+ def __len__(self):
+ return self.n_samples
+
+ def __getitem__(self, idx):
+ return self.graph_lists[idx], self.graph_labels[idx]
+
+
+
+def self_loop(g):
+ """
+ Utility function only, to be used only when necessary as per user self_loop flag
+ : Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
+
+
+ This function is called inside a function in TUsDataset class.
+ """
+ new_g = dgl.DGLGraph()
+ new_g.add_nodes(g.number_of_nodes())
+ new_g.ndata['feat'] = g.ndata['feat']
+
+ src, dst = g.all_edges(order="eid")
+ src = dgl.backend.zerocopy_to_numpy(src)
+ dst = dgl.backend.zerocopy_to_numpy(dst)
+ non_self_edges_idx = src != dst
+ nodes = np.arange(g.number_of_nodes())
+ new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
+ new_g.add_edges(nodes, nodes)
+
+ # This new edata is not used since this function gets called only for GCN, GAT
+ # However, we need this for the generic requirement of ndata and edata
+ new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
+ return new_g
+
+
+
+
+
+def positional_encoding(g, pos_enc_dim):
+ """
+ Graph positional encoding v/ Laplacian eigenvectors
+ """
+ n = g.number_of_nodes()
+ # Laplacian
+ A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
+ N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
+ L = sp.eye(n) - N * A * N
+ # Eigenvectors
+ EigVal, EigVec = np.linalg.eig(L.toarray())
+ idx = EigVal.argsort() # increasing order
+ EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
+ #g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() # v1
+ g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float() # v2
+ return g
+
+
+class CSLDataset(torch.utils.data.Dataset):
+ def __init__(self, name='CSL'):
+ t0 = time.time()
+ self.name = name
+
+ dataset = CSL()
+
+ print("[!] Dataset: ", self.name)
+
+ # this function splits data into train/val/test and returns the indices
+ self.all_idx = get_all_split_idx(dataset)
+
+ self.all = dataset
+ self.train = [self.format_dataset([dataset[idx] for idx in self.all_idx['train'][split_num]]) for split_num in range(5)]
+ self.val = [self.format_dataset([dataset[idx] for idx in self.all_idx['val'][split_num]]) for split_num in range(5)]
+ self.test = [self.format_dataset([dataset[idx] for idx in self.all_idx['test'][split_num]]) for split_num in range(5)]
+
+ print("Time taken: {:.4f}s".format(time.time()-t0))
+
+ def format_dataset(self, dataset):
+ """
+ Utility function to recover data,
+ INTO-> dgl/pytorch compatible format
+ """
+ graphs = [data[0] for data in dataset]
+ labels = [data[1] for data in dataset]
+
+ return DGLFormDataset(graphs, labels)
+
+
+ # form a mini batch from a given list of samples = [(graph, label) pairs]
+ def collate(self, samples):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.tensor(np.array(labels))
+ batched_graph = dgl.batch(graphs)
+ return batched_graph, labels
+
+
+ # prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
+ def collate_dense_gnn(self, samples, pos_enc):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.tensor(np.array(labels))
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+ zero_adj = torch.zeros_like(adj)
+ if pos_enc:
+ in_dim = g.ndata['pos_enc'].shape[1]
+ # use node feats to prepare adj
+ adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
+ adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
+ for node, node_feat in enumerate(g.ndata['pos_enc']):
+ adj_node_feat[1:, node, node] = node_feat
+ x_node_feat = adj_node_feat.unsqueeze(0)
+ return x_node_feat, labels
+ else: # no node features here
+ in_dim = 1
+ # use node feats to prepare adj
+ adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
+ adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
+ for node, node_feat in enumerate(g.ndata['feat']):
+ adj_node_feat[1:, node, node] = node_feat
+ x_no_node_feat = adj_node_feat.unsqueeze(0)
+ return x_no_node_feat, labels
+
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
+
+
+
+
+
+ def _add_self_loops(self):
+
+ # function for adding self loops
+ # this function will be called only if self_loop flag is True
+ for split_num in range(5):
+ self.train[split_num].graph_lists = [self_loop(g) for g in self.train[split_num].graph_lists]
+ self.val[split_num].graph_lists = [self_loop(g) for g in self.val[split_num].graph_lists]
+ self.test[split_num].graph_lists = [self_loop(g) for g in self.test[split_num].graph_lists]
+
+ for split_num in range(5):
+ self.train[split_num] = DGLFormDataset(self.train[split_num].graph_lists, self.train[split_num].graph_labels)
+ self.val[split_num] = DGLFormDataset(self.val[split_num].graph_lists, self.val[split_num].graph_labels)
+ self.test[split_num] = DGLFormDataset(self.test[split_num].graph_lists, self.test[split_num].graph_labels)
+
+
+ def _add_positional_encodings(self, pos_enc_dim):
+
+ # Graph positional encoding v/ Laplacian eigenvectors
+ for split_num in range(5):
+ self.train[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train[split_num].graph_lists]
+ self.val[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val[split_num].graph_lists]
+ self.test[split_num].graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test[split_num].graph_lists]
+
+
+
+
+
diff --git a/data/CSL/CSL.zip b/data/CSL/CSL.zip
new file mode 100644
index 000000000..01d6ec6bc
Binary files /dev/null and b/data/CSL/CSL.zip differ
diff --git a/data/CSL/CSL_test.index b/data/CSL/CSL_test.index
new file mode 100644
index 000000000..c7d96e2f9
--- /dev/null
+++ b/data/CSL/CSL_test.index
@@ -0,0 +1,5 @@
+9,10,14,23,26,27,35,39,40,47,50,57,66,70,71,75,82,83,91,93,94,107,110,117,130,132,133,136,140,142
+1,4,5,16,21,24,32,33,38,46,48,52,62,72,73,79,80,85,97,101,102,108,109,111,123,127,129,143,147,149
+7,11,12,17,20,25,34,43,44,45,54,59,64,65,69,76,77,86,95,99,100,105,116,118,124,126,128,138,139,144
+0,2,13,15,19,22,31,36,41,55,56,58,61,67,68,78,81,88,92,96,104,106,113,115,121,125,131,137,145,146
+3,6,8,18,28,29,30,37,42,49,51,53,60,63,74,84,87,89,90,98,103,112,114,119,120,122,134,135,141,148
diff --git a/data/CSL/CSL_train.index b/data/CSL/CSL_train.index
new file mode 100644
index 000000000..4da3ebf04
--- /dev/null
+++ b/data/CSL/CSL_train.index
@@ -0,0 +1,5 @@
+62,69,88,115,43,128,11,55,108,84,137,74,48,104,72,81,24,34,90,12,134,37,99,58,63,25,22,7,4,30,103,21,87,28,148,86,68,44,118,145,36,114,95,54,42,144,18,20,124,8,85,126,122,138,49,73,5,92,141,113,45,109,120,146,51,59,149,89,41,15,46,139,2,116,123,78,61,97,121,112,125,76,101,119,6,1,67,100,31,29
+63,65,95,49,55,141,135,105,134,122,61,66,132,89,118,37,99,92,91,142,18,0,93,139,15,116,9,148,3,30,53,43,138,35,84,67,12,51,117,22,125,25,69,74,81,39,120,70,10,17,45,137,124,75,11,87,94,131,140,29,107,58,2,20,82,96,119,83,23,28,112,36,8,130,60,56,115,42,50,59,31,76,100,128,78,44,114,103,145,6
+42,137,47,57,10,87,148,83,61,23,107,32,109,73,14,123,46,72,38,37,51,48,24,127,21,92,141,36,132,6,13,136,84,19,98,91,63,120,134,16,106,18,60,50,112,80,125,66,22,135,131,94,4,3,1,79,70,102,110,145,96,33,56,146,108,93,58,143,119,97,15,129,27,8,81,40,149,115,55,75,9,30,78,74,104,89,35,62,133,114
+70,90,65,46,44,87,128,114,98,136,33,80,116,149,84,118,7,47,5,103,21,34,57,37,134,25,100,142,3,60,79,97,69,135,8,66,144,45,139,12,148,102,107,35,95,1,108,124,18,82,16,59,143,76,111,10,126,43,77,29,109,9,26,94,73,120,49,6,140,20,83,74,48,110,133,51,72,32,39,130,24,127,91,54,123,85,30,27,62,112
+55,26,13,23,46,16,101,136,91,125,111,80,25,9,14,128,56,15,109,100,133,20,40,82,143,21,88,113,38,77,45,139,62,67,130,124,93,10,35,110,73,144,116,44,58,142,107,85,36,83,131,106,64,7,70,138,105,127,108,66,97,79,34,0,147,5,52,61,47,72,92,48,17,1,33,68,24,129,32,78,123,102,95,149,145,11,41,76,57,99
diff --git a/data/CSL/CSL_val.index b/data/CSL/CSL_val.index
new file mode 100644
index 000000000..4120b0eb8
--- /dev/null
+++ b/data/CSL/CSL_val.index
@@ -0,0 +1,5 @@
+111,79,135,129,80,52,98,53,3,127,16,17,32,102,13,105,106,0,38,64,65,96,19,77,143,147,60,33,56,131
+126,136,90,26,98,54,104,77,71,113,106,40,110,144,14,41,146,47,13,86,121,133,88,34,57,27,64,19,68,7
+39,88,68,26,29,67,90,0,2,111,113,142,117,122,71,53,41,101,103,49,31,52,140,130,82,5,28,85,121,147
+50,71,119,141,23,138,42,64,40,105,75,63,89,53,4,129,38,117,101,147,122,52,17,14,11,86,28,132,93,99
+86,121,146,2,12,39,96,19,117,94,71,81,104,65,4,54,118,50,59,69,75,132,140,31,43,22,137,115,126,27
diff --git a/data/CSL/X_eye_list_Kary_Deterministic_Graphs.pkl b/data/CSL/X_eye_list_Kary_Deterministic_Graphs.pkl
new file mode 100644
index 000000000..ae67d80a6
Binary files /dev/null and b/data/CSL/X_eye_list_Kary_Deterministic_Graphs.pkl differ
diff --git a/data/CSL/X_unity_list_Kary_Deterministic_Graphs.pkl b/data/CSL/X_unity_list_Kary_Deterministic_Graphs.pkl
new file mode 100644
index 000000000..928cb8187
Binary files /dev/null and b/data/CSL/X_unity_list_Kary_Deterministic_Graphs.pkl differ
diff --git a/data/CSL/graphs_Kary_Deterministic_Graphs.pkl b/data/CSL/graphs_Kary_Deterministic_Graphs.pkl
new file mode 100644
index 000000000..b8feace49
Binary files /dev/null and b/data/CSL/graphs_Kary_Deterministic_Graphs.pkl differ
diff --git a/data/CSL/y_Kary_Deterministic_Graphs.pt b/data/CSL/y_Kary_Deterministic_Graphs.pt
new file mode 100644
index 000000000..7753e4f2d
Binary files /dev/null and b/data/CSL/y_Kary_Deterministic_Graphs.pt differ
diff --git a/data/CitationGraphs.py b/data/CitationGraphs.py
deleted file mode 100644
index 6b973bb5b..000000000
--- a/data/CitationGraphs.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import torch
-import pickle
-import torch.utils.data
-import time
-import os
-import numpy as np
-
-import csv
-
-import dgl
-from dgl.data import CoraDataset
-from dgl.data import CitationGraphDataset
-import networkx as nx
-
-import random
-random.seed(42)
-
-
-def self_loop(g):
- """
- Utility function only, to be used only when necessary as per user self_loop flag
- : Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
-
-
- This function is called inside a function in CitationGraphsDataset class.
- """
- new_g = dgl.DGLGraph()
- new_g.add_nodes(g.number_of_nodes())
- new_g.ndata['feat'] = g.ndata['feat']
-
- src, dst = g.all_edges(order="eid")
- src = dgl.backend.zerocopy_to_numpy(src)
- dst = dgl.backend.zerocopy_to_numpy(dst)
- non_self_edges_idx = src != dst
- nodes = np.arange(g.number_of_nodes())
- new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
- new_g.add_edges(nodes, nodes)
-
- # This new edata is not used since this function gets called only for GCN, GAT
- # However, we need this for the generic requirement of ndata and edata
- new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
- return new_g
-
-
-
-
-class CitationGraphsDataset(torch.utils.data.Dataset):
- def __init__(self, name):
- t0 = time.time()
- self.name = name.lower()
-
- if self.name == 'cora':
- dataset = CoraDataset()
- else:
- dataset = CitationGraphDataset(self.name)
- dataset.graph.remove_edges_from(nx.selfloop_edges(dataset.graph))
- graph = dgl.DGLGraph(dataset.graph)
- E = graph.number_of_edges()
- N = graph.number_of_nodes()
- D = dataset.features.shape[1]
- graph.ndata['feat'] = torch.Tensor(dataset.features)
- graph.edata['feat'] = torch.zeros((E, D))
- graph.batch_num_nodes = [N]
-
-
- self.norm_n = torch.FloatTensor(N,1).fill_(1./float(N)).sqrt()
- self.norm_e = torch.FloatTensor(E,1).fill_(1./float(E)).sqrt()
- self.graph = graph
- self.train_mask = torch.BoolTensor(dataset.train_mask)
- self.val_mask = torch.BoolTensor(dataset.val_mask)
- self.test_mask = torch.BoolTensor(dataset.test_mask)
- self.labels = torch.LongTensor(dataset.labels)
- self.num_classes = dataset.num_labels
- self.num_dims = D
-
-
-
- print("[!] Dataset: ", self.name)
-
-
- print("Time taken: {:.4f}s".format(time.time()-t0))
-
-
- def _add_self_loops(self):
- # function for adding self loops
- # this function will be called only if self_loop flag is True
- self.graph = self_loop(self.graph)
- norm = torch.pow(self.graph.in_degrees().float().clamp(min=1), -0.5)
- shp = norm.shape + (1,) * (self.graph.ndata['feat'].dim() - 1)
- self.norm_n = torch.reshape(norm, shp)
-
diff --git a/data/SBMs.py b/data/SBMs.py
index f403be6b8..ce224e44e 100644
--- a/data/SBMs.py
+++ b/data/SBMs.py
@@ -7,6 +7,8 @@
import dgl
import torch
+from scipy import sparse as sp
+import numpy as np
@@ -118,6 +120,32 @@ def self_loop(g):
+def positional_encoding(g, pos_enc_dim):
+ """
+ Graph positional encoding v/ Laplacian eigenvectors
+ """
+
+ # Laplacian
+ A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
+ N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
+ L = sp.eye(g.number_of_nodes()) - N * A * N
+
+ # # Eigenvectors with numpy
+ # EigVal, EigVec = np.linalg.eig(L.toarray())
+ # idx = EigVal.argsort() # increasing order
+ # EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
+ # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ # Eigenvectors with scipy
+ #EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
+ EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2) # for 40 PEs
+ EigVec = EigVec[:, EigVal.argsort()] # increasing order
+ g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ return g
+
+
+
class SBMsDataset(torch.utils.data.Dataset):
def __init__(self, name):
@@ -143,16 +171,63 @@ def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.cat(labels).long()
- tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
- tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
- snorm_n = torch.cat(tab_snorm_n).sqrt()
- tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
- tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
- snorm_e = torch.cat(tab_snorm_e).sqrt()
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = torch.cat(tab_snorm_n).sqrt()
+ #tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
+ #tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
+ #snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
- return batched_graph, labels, snorm_n, snorm_e
+ return batched_graph, labels
+
+ # prepare dense tensors for GNNs which use; such as RingGNN and 3WLGNN
+ def collate_dense_gnn(self, samples):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.cat(labels).long()
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = tab_snorm_n[0][0].sqrt()
+
+ #batched_graph = dgl.batch(graphs)
+
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+
+ zero_adj = torch.zeros_like(adj)
+
+ if self.name == 'SBM_CLUSTER':
+ self.num_node_type = 7
+ elif self.name == 'SBM_PATTERN':
+ self.num_node_type = 3
+
+ # use node feats to prepare adj
+ adj_node_feat = torch.stack([zero_adj for j in range(self.num_node_type)])
+ adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
+
+ for node, node_label in enumerate(g.ndata['feat']):
+ adj_node_feat[node_label.item()+1][node][node] = 1
+ x_node_feat = adj_node_feat.unsqueeze(0)
+
+ return x_node_feat, labels
+
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
+
+
def _add_self_loops(self):
# function for adding self loops
@@ -163,5 +238,13 @@ def _add_self_loops(self):
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
+ def _add_positional_encodings(self, pos_enc_dim):
+
+ # Graph positional encoding v/ Laplacian eigenvectors
+ self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
+ self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
+ self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
+
+
diff --git a/data/SBMs/generate_SBM_PATTERN.ipynb b/data/SBMs/generate_SBM_PATTERN.ipynb
index b65d681b3..ff7b137ef 100644
--- a/data/SBMs/generate_SBM_PATTERN.ipynb
+++ b/data/SBMs/generate_SBM_PATTERN.ipynb
@@ -39,17 +39,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "{'nb_clusters': 10, 'size_min': 5, 'size_max': 15, 'p': 0.5, 'q': 0.25, 'p_pattern': 0.5, 'q_pattern': 0.25, 'vocab_size': 3, 'size_subgraph': 10, 'W0': array([[0., 1., 1., 1., 0., 1., 0., 0., 0., 0.],\n",
- " [1., 0., 0., 0., 1., 0., 1., 0., 1., 1.],\n",
- " [1., 0., 0., 0., 0., 1., 0., 0., 0., 1.],\n",
- " [1., 0., 0., 0., 0., 0., 1., 0., 0., 1.],\n",
- " [0., 1., 0., 0., 0., 1., 0., 1., 1., 1.],\n",
- " [1., 0., 1., 0., 1., 0., 0., 0., 0., 0.],\n",
- " [0., 1., 0., 1., 0., 0., 0., 0., 1., 1.],\n",
- " [0., 0., 0., 0., 1., 0., 0., 0., 0., 1.],\n",
- " [0., 1., 0., 0., 1., 0., 1., 0., 0., 0.],\n",
- " [0., 1., 1., 1., 1., 0., 1., 1., 0., 0.]]), 'u0': array([0, 2, 2, 2, 0, 0, 2, 2, 0, 0])}\n",
- "<__main__.generate_SBM_graph object at 0x12ce33890>\n"
+ "{'nb_clusters': 10, 'size_min': 5, 'size_max': 15, 'p': 0.5, 'q': 0.25, 'p_pattern': 0.5, 'q_pattern': 0.25, 'vocab_size': 3, 'size_subgraph': 10, 'W0': array([[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],\n",
+ " [0., 0., 0., 1., 1., 0., 1., 0., 0., 1.],\n",
+ " [0., 0., 0., 1., 0., 0., 1., 1., 1., 1.],\n",
+ " [0., 1., 1., 0., 0., 1., 0., 0., 1., 0.],\n",
+ " [0., 1., 0., 0., 0., 1., 1., 1., 0., 0.],\n",
+ " [0., 0., 0., 1., 1., 0., 1., 1., 1., 0.],\n",
+ " [1., 1., 1., 0., 1., 1., 0., 0., 1., 1.],\n",
+ " [0., 0., 1., 0., 1., 1., 0., 0., 1., 0.],\n",
+ " [0., 0., 1., 1., 0., 1., 1., 1., 0., 1.],\n",
+ " [0., 1., 1., 0., 0., 0., 1., 0., 1., 0.]]), 'u0': array([1, 1, 2, 0, 2, 1, 2, 0, 2, 2])}\n",
+ "<__main__.generate_SBM_graph object at 0x12247fd90>\n"
]
}
],
@@ -203,7 +203,7 @@
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD8CAYAAACxd9IeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO19f9CfVXXn5xISSSKEJEgMhEiEUNdKVUhFSmdHS1m7bKf+A7S7U8dl7cTObFuLO1NwO9aXP9ZR24KMbrWpWN1Wt7LEWR3sKAy7/qHd4hLaoVULuIaJCeGHCYVCqC+Yu3+8732573nPOfece+/z/T6a75nJfPM+z73nnnvu8zz33nM+59wQY8SMZjSjH386adoCzGhGM5oMzV72Gc3oBKHZyz6jGZ0gNHvZZzSjE4RmL/uMZnSC0Oxln9GMThCa2MseQviFEMIDIYTvhBBumFS7VgohnBNC+N8hhG+HEL4ZQnjX4vVNIYS7QggPLf5unLasOYUQVoUQ/iaEcMfi36OVN4Rwegjh9hDCPyzq+dKRy3vd4rPw9yGE/x5COGXM8pZoIi97CGEVgP8K4F8DeDWAfxtCePUk2nbQCwD+U4zxXwB4I4D/uCjjDQDujjHuBHD34t9joncB+Hb295jlvQXAl2OMrwLwWizIPUp5QwhnA/gtALtijK8BsArAr2Ck8pooxjj4PwCXAvhK9vd7ALxnEm03yPwFAFcAeADA1sVrWwE8MG3ZMhm3YeGB+zkAdyxeG6W8AE4DsB9AINfHKu/ZAL4HYBOAkwHcAeBfjVVey79JLeOT4hIdXLw2SgohnAvg9QDuAbAlxngYABZ/z5yeZCvowwB+B8Dx7NpY5X0lgCcA/OnituMTIYT1GKm8McZDAP4AwAEAhwE8FWO8EyOV10KTetkDc22UON0QwksB7AXw2zHGp6ctj0QhhF8E8HiMcd+0ZTHSyQAuAvCxGOPrATyLES+BF/fibwWwA8BZANaHEH51ulK10aRe9oMAzsn+3gbgkQm1baYQwmosvOifiTF+fvHyYyGErYv3twJ4fFryEboMwC+FEB4G8BcAfi6E8OcYr7wHARyMMd6z+PftWHj5xyrvzwPYH2N8Isb4PIDPA/gZjFfeIk3qZf+/AHaGEHaEENZgwdDxxQm1baIQQgBwK4Bvxxhvym59EcDbF///dizs5adOMcb3xBi3xRjPxYI+/1eM8VcxXnkfBfC9EMJPLF66HMC3MFJ5sbB8f2MIYd3is3E5FgyKY5W3TBM0eFwJ4EEA/w/A707bWMHI97NY2FrcD+BvF/9dCWAzFoxgDy3+bpq2rIzsb8KLBrrRygvgdQDuXdTx/wSwceTy3gjgHwD8PYA/A/CSMctb+hcWOzWjGc3ox5xmCLoZzegEodnLPqMZnSA0e9lnNKMThGYv+4xmdILQ7GWf0YxOEGp62Wsi2UIIu1vanDTN5B2WZvJOjqpf9oZIth81Zc3kHZZm8k6IWmb2NwD4TozxuzHGeSxANt/aR6wZzWhGvakaVBNCuArAL8QYf23x77cBuCTG+BtSnTPOOCOuX78eL3vZy1bcm3/hOJ48No+N69Zgzckrv0Gl+1by8nniiSewYeNmsQ7lx/GX2kzXX/qSk/HMD17AxnVrAEAtq8k9/8JxHPr+P+LsM05364iTpSS/pgcrPfHEE0vPQ+sYt9S31s3l7S2DRT46PrS9ffv2fT/GyAvYACW8GsAnsr/fBuAjTLndWIBI3rt9+/Yo0U13PhBfcf0d8aY7H6i6b6UaPlodeo8rK9VP16/5+F8t3S+V1eRu0REny6Ta7sWjR/+n8Xx5+NLxoe0BuDcK7+zJDR8bUyRbjHEPgD0AsGvXLnEZcdn5m/HX3z2CnVvW4+a7HsRVF2/DOZvWLd2/6uJty36t9L2jx3D7voNL/FI7l52/uVg2/b1zy3pcsmMTW4fKxcmZ/n/Z+Ztx810P4rLzN+Pr33lRhvQ3VyfJkMrmZai8UjtUl6W6kiyJL8fPMz607UTc2EhlOdLGlvICsKTXfCyo3vN2LbL00IPGd+eW9Ut9tfQ5p5aXfSmSDcAhLERe/btaZl//zhHcs/8oACz9XnfFBUv3z9m0btnfVrp930HccvdDS/xSO1//zhG8YcdmtWz6+5Idm8Q6VC5OznTt5rsexC13P4S//u6RFX3M+eb1qUyavJZ2SnWpLJz8HD/P+Eh94sZG6z8lbWwpLwCqjrh2LbL00IPG9+a7HlzWx1Kfc6p+2WOML4QQfgPAV7CQn+uTMcZv1vKTvlwW0r6Qlpm3VDbJxK06ar7O3OxZqsOVpV91uhJ526Xb8cZXbmZXA3S1YpnJNFl6zMDcdalNrr3SLEfv//V3j+DKC7cs3cv5cispzyyqySn1TVpZ5isROm6elUTLzI4Y418C+MsWHomkL5eFtC+kZeYtlU0yAStXHTVfZ2Dl7GmpQ4l+1elK5I2v3CzO6HS1YpnJNFl6zMDcdalNrr3SLJffB7BsTDU9lOTWyPNsSivLRNy4eVYSTS/7EFSzN6/dz3v5a/vZodq2yEV/tZWDVEazNbSuQKxlW3mU6nP3LXqoka+mjmUV6lkVUppoPPtPve6ieO3v/4VqOPJQvuRKRpb8N1/+0La0ZavEj5Pbs3yt6VtvvjUySP2vNWJR/lpZaWnb+xnSng8A1W1qS3PJaGppR+ITQtgXY9zF1ZnozP7ksfmi4chDaZmT+NHfRNwySlu2Svw4uT3L15q+9eZbI4PU/1ojFuWvlZWWtr2fIcrHY8zz8LcYXC3t1DwfE33ZN65bg2sv36kavBJZv4CJD4Alg1Ru5Nu6YS0A2ZWVu5OSASbxSV9Yyt+ydGz9gtcsGVtWA5xMdMlPjaeWpTTHF8CyMdDcXaWtiuYOtJCk59wYl56hmiW0tjSXDKAWgzD3/Jb6P9GXfc3JJxUNXok8LqfELxmkciPfdVdcoM7ouTspyZMbtt6wY/MK/jl5DEj0nvYFr3E1tqwGJJk4PWjGIcnoVLva0gysSaaWFZCk59wYR/XQwj//m8peYxD29H/V3NycS/gW2rNnz9zu3buxbeNanHbKavzyT5+Ds05fi6su3oYNa1cDWPhyffJr+3HZ+Ztx1ulrcdn5m3HbvQexbePapTKrTgIOHD2GX/7pc3D2xnVL/BKf/P7xCHzya/uX6tOy+d8b169exjcRrWOhvM7Tzz3PypD6z/UxUdIHd48S1QvHg8pC61554RY8N398BY+SHiQ5ubF+9Vmn4bRTVi/1e9VJwG33LriVjjwzz7ZNiT4nmkyJv0e/XF89Y2EhabxS229+1ctW6EN7fjesXY0bb7zx8Nzc3B6uvalY4zUXlOXLRV0g9OtJXSya+yj/+/Z9B1nXSs0sa/mCp/5rX+ceLi1u/0n5UVCTBUBkkVMaaw78o4GXrO1xZTzgIq2vve0o0nhpbmiPK5nSaF1vOeQzv86VofuVfF90+KnnzECIHm40y77T064HyCHtl3MgRr7/zHVncdtp5IGqUr1IXhStPqczah/QwEUlnU3CjlKyb3DPvof/CpJA80P8u/jii93A/9bgi6ECEybV3jSDW3rKOYkgF9pfrt892vGQh582Xlb+UAJhJrpn/+gffXzun897s2nPY9knpzJ0z5vvrdL+MPGx7OMksuzZeu/1KD+NF72n2UYs+/sSf03PtA63t5ba4q57noe0133bpdux88xTq20tpToWXWl2CPosJh1x42WVV9uzT/Rlf/9NH5n7xikX4bRTVuPS8/Rl6Ya1C2W0zqYyt927sJdKfD/5tf1Lf7/lNS9fxifdO3D0GPbed8gkS6Kcr1SHk9tSr9RHKj/Hi95Ldc/euG6FTLfdexB77zuEs05fu4yPh7+mZ1rnrNPX4rorLjDphbvueR6+dP+j2HvfIew889QVbZbI0k5J/vzekWfmcc/+oyv0nJdJz2LSETdeVnlHY6BLfvaWPTFHFpghLdvDZzp0PS8vTztSWQ9/S3s18rbqq6e+a9uxPGetdhI3Sev7If5Z9uwHjjwbb7rzgXjPd78fb7rzgXjgyLPFOi380n2tHUuZFn4e/q1te+pK/IaWV+Pfu48txD1b0vNm0W9Lv9N9rFp9fxwgecUgNBQc0hO3LPGwyNICAbXwb23bU9cCJR0aqtrT/dXbdaYBhmqgxS39TvdXrdtwhiTvVEA1HFFDjgVwYiHNSJXftxh+LAYTyfDFlXnzq16GL93/6DIjDgUBUdKMQhZjXglEkutbMrpJhlGONHmpHqihitN3DbCHtmcB7WgkPas5YIg+b1zb9Br9O++PBPiiffvmHbc+8r73/u5HOblHM7NzX67ecEgOqGEBJXiAC5aYZwpgycEkAA96SdQSH63Vl7LRcDJYMtdY5KV6sKzmWmLqa+LRre1wmYZKsfr0mpSfIJElVv+kl6w7TZJ7qjM79+VKX3nqPqqd4TV3UU+XWyLuC15ysXAzg7SK0GC4WtmSy02rI1HrqoiuujwuJ45a+uZ5TrRnUuLDueA0mDeFgZeei9HP7NyXK/+lwQE1M3yP/XErZFWzG6QyUg46ShoMVyuryVeqY5Glpgy36mqZcVv65nlOtGdS4sNBgTWYNwcD13Q9+pmdC0KRgBA1wShcPfpV5sA1EjhFa1sDj6SvLu2btqdOcmngn9KqxwNO8YBptP5bZLDW5e55bBaUh0WXUoAJJxM31hIfLegryaPt2a2rrC9/6uaH537vvR9jC0lm+iH+aa63acFae0FJPbnltes1kM+WXPMt8veSoTUvf4lqYKgWfr359OgrxgKX1azxkmW2Za8NyDODBk2ssdpqM4H0xU77uNxOQa25ln0s1Z1nRVIjP6dbCZasWdaltjW+NZ6ApGcLfNay2qJ9qn02S6sJj30myfCVz/7xEWlmH401vmSZrfWRWvZidH9XY7VtyQKb+pxk8u5jpZz7vfbfltBZarn3jFfJi2DxGnBE9cwlH5Hq5Hx7YDUs/fYkupDk1vzso3nZS+GOvaCqrRDPGpLatoR2evgPBbvsAa3t0V5tmdY8/SU+k4DnltpI19/9B099X2Qire+H+Jf27B7oIAdzLcEMW+GWFsijB9ZbA9NsqZNk0mRr0dk04bIWHp5naWg5e0N0S4SBznqrJg900JLRtSfsMC+jbSUAe8bRod19ktyeDC0enU0TLmvh0TsjbYucY8gSnGiqOehqXRalHHQWIIfFYETb9rgKOX5a3H2NnCW5NeNeSYeaLJJBEKgDIFE+Wg7BRB6wCo1zz/l6XJwe46NUxqMfS+5AWnZ0BjpLVlIN5loCH3iOeCqVkXLRpXx1FqOPZFTyQF8t5AGplHSoyaIdwlkDQKJ8tByCiTxgFS6nW+JrWQVZYMiUeq9aJD3QsqMJhEmZamqyfVrcPNyMWfoiWtrWZPDAeOkMb3FLeeTtkYnW0h7ntkx6tvSp5P7kYKJUzx6wSiKOr2UVVAOyksBbnmy4+arlkh2bTTDq0YBqtu38ySK4oQacoYERegFLpPu9ATgeagFhTAo4NIm2e/Afip8H0FMDqKKEsRjoUqYazoVBT7qgLgYte2nuGillpNVIOgL5qovljKb0miXbp8VV4+Gj9VuiGncRbS+XzZMFt6TnnLgMudz9JJv2DGmkyVBzVDOtqx2fndqUTiTq5dKbyokwQDlfPCUN6FICWtSGp1KZPCegaO1a9uMePjVBQi02AUsuf41Kes5J25vn9xMNkdiiJTQ21dWOz05tcmVbAoMojQYuW4JBWqCDrckJPFZ9Sh54qCdLrcWC3xoGXJKtBBcFfLqn8moZWOneXNq7W8JAtf5qJxDVBA9R+S1eDss4lmxamjV+NIEwdN8ydOBHbxpqT+nZl/fovyU4p1fbNQEqQ+/Hp2kbsPAtPfurTj3jkTimQBht9uCCQ6w+aE8oak3eeI003zMlS/gknbk0C75nVSGRJWxTSxU2dIAK1Z0lcURNfzn+0krSU5a2W7OqA8rnw2nW+NEh6KTgkJyk/aZnL9wroWUizffskVOyE/Q6D04iq89fShU2dIAKlcWSOKKmvxx/gLcFeMqW2rX2sXQ+3Lt/+Py81Ofiyx5COAfAfwPwcgDHAeyJMd4SQtgE4HMAzgXwMIBrYoxPlvgBtmCDoYM6evPvxXfSQSc1PFpl6aEri2fEW9/Kv3dZj4xNYy2t79M/AFsBXLT4/1MBPAjg1QA+BOCGxes3APhgiZe2Z+8RMKDl8e7B1xOY4Qm68ORsr2nLEsDTO/jEUke6VyvLNMapB1lkseoILX72GONhAIcX//9PIYRvAzgbwFsBvGmx2KcBfBXA9f7PzQL1XIpyebx78LXwqNkmeLLA1rRlCeDpHXxiqSPdGwpSytVrHaceZJGlRzCSa88eQjgXwOsB3ANgy+KHADHGwyGEM618OABDC3AhEbc8lI5s9hx1W7P0qo2hLoGLatrK+Sd9XHnhlqW66ZfqiR4h7AEXcTyk/tC6mix0vGhZL7CH6sFSZ+eW9exR4RpZjm62HhXlBVAlMr/sIYSXAtgL4LdjjE+HEKz1dgPYDQDbt28HwH8he+T05gJBUsBKKcuKlW+NDJ46HoCMtS0ugAdYnofckxXXAi5KVAMy0mShfGjQjBfY01JH6xMljwGUo9Ysy4DxZQ8hrMbCi/6ZGOPnFy8/FkLYujirbwXwOFc3xrgHwB4A+KnXXRRvvuvBJehj/oX0fDXpV9Jz4H3pOsefuw7ALIOHWmYPiXLZkuw7t6xfag+wG03TrJLP9HkbllUAJ1feR8/KIZ/Zt25YK7Yl6aFmBUZ1p7WV606TTZKT6leCDVvIYo0PAG4F8O0Y403ZrS8CeDuADyz+fqHE68lj86J7Lf+/NxlETahozcki2v6w136uZfaQiMvplod7SiGukstNSyhiWQVIcnHtateA5TN76ptHD6lvFuJCZaW6LS5ei2u6ZgVsmdkvA/A2AH8XQvjbxWv/GQsv+W0hhHcAOADg6hIjGghDv5BSwADAf5WlX6medQam/Lh9Z9r7phmYfrlrVwHSjCPNnBaq2WN795aeWY/jJ60YSuTZ31tsL5agHG2sUx3LyqHGJkJXWR5dWazxXwMgbdAvN7WySDQQhiYT0AAW0lcZ6HfSh8RP+tJqM3DtKkDav7XMFL322Nre0jPrcXVq++bZ37cGIHkSUlj24bU2kdozEEdzIoznjDMPFJTCWGsgpRwUVjqtk9bJEw9IkFdLsg1PsoYWSHBN8I8laESrJ52YUytvzbNigRxLMFYL/JkbCy5dVn6uQF6HPhcSbPbGG288PDc3t4fr41RTSXvOsqoJyQT67IElHtqXm9tLSl9ji69Ymymk1UuvVYBEpRnNWk8LA62Rt+ZZaYGxWlaW3FhIqyELXqTGezWavPFDUY3VtQcPzz5ZK+uRj/NXDwE5ltqtrVfLpyd5ZKgpa/GhW8avSWcStG6Ifxe+9vUi5G9oKGKPnOe1EFZP3vESnNWSR7+GWvmX+tgLwlwDLbXwmzSc2pLnvkYmjCUtVXK9ASsNVUA7rJWjHjBDiZdWxgLZ1VwsEpzVkke/hlr5l4xXvSDMNdBSC78aWXq11/MZ1WgqOei0pUjLUk5zgUi/mouM8vVAWHMXVAJ7UBdTDlVNbhRpCcflMqNy1bhjEmnwUQnoo7lDqT4sEGaOJHAK5+IrucY4116NC1IDzJRcu5yrkF7jZOoB2ppaDjrAZ6CzkMVgpLnVAPsMLBFndKEGOjorUYMPZ4jjjFi0XguUUoOPSgZKzR1K9ZH3R4IwcyTN5JyLz7O6KBkULS44jl9pDDjDmiWHf4/Zfip54z2uIEtONEtObolvXied8uI5+tiSo5xmGaEuJkt+Pc7tIx1nXOM2k9xAVrlrjipOfdFchFLbmjtUco1xfbOMHx0TzQ1a0gnH15KdSTr2m+pOc71N9GV//00fmfvGKRfhtFNW49LzbO6CT35tP265+6FlddK1A0ePYe99h5Z+zzp9La674gLTg5d45HVuu/cg9t53CEeemcfe+w4ttblh7Yu/FvkAsHUS/51nnrpMzrxsid9t9x5cuv+W17x8WRuanCW5U90v3f/oki5T+xa5PUT7ksaPey6ktrmxSvrIdVTqm2X8JF2dvXHdiv6XdMLxPfLMPO7Zf3TFc8fpIclO34FUZzQv+6du/cTcr7/znS7AhQaasJzmIZHGl5sBqGyer7LUJjerlLK01s6mtA9SRldNL3Qm5mZkC8CH6o7O2pbTf9KKzDO71gJwSuOmrT6lLLAcMEt67jgZpNXFaF72Wz/xJ3N/+sH3qA+p9BXlvp7py8p9YUuk8eVmACrbtw4/bf4qS21yswr9glvk9hCdTWg7ml7oTMzNyLRP3AxEdUdn7ZwHXb3QFZlndtX6pumzNG7a6pM+D1zfUh+k546TQVpdjOZlt+zZPfnSE0l7eEu2T00GCwQ2fY1roJ7cLO7JUkv7b8loKsF8PfniU5+vvHALnps/bjpBNW+PykDHmjvFVbLLaFBjC2y4JiMtXRVp9hm6x87r0hNqW87fs+zZp+pn56jmxFNqHW316XrCKZNVuAbqqVlmATu8t2SptYR2enz+qc9JRssJqrQ9LaiDO8VV8qtbAo56++RpmCn3vEin3uR1qbw1EFiP/FP1s2vkgVRy4X8SzLCFPBDYVn6t0FzPfa8sveG4lrGW2rKMiQeqapHT0u9JQWBddSRo3RD/UnbZ3tDYFjhnrzYnlVW2NUsrlVPLMjsUlLSku9osqz2h0K18LRDpmmemRBgLXDZRbzhgC5yzV5s1kWYW6KSnHxbYpbblGQLC7IGHSjJZ+9gib2++nmxHvQ8skWgq8eyaIcJiMCkdNOg5/LDGQMO5hGpisi2uIIsrUuKnGY6ScU0DykhurxZjFicnlSk3/FEDnWYco6AaTzw/fSZzvhLYinsuSi5UzsirHavlpdEY6BJphojaYJMSvLXG8Ce1m8rm5WtismuOgNZip0vQ4FxOgM8MpEGYexizuDY4o6RkoCsZxzRYskYaZNWTlShdl55xKWdebfYZD03F9abBWi3uh1KGD242KR3kVzsTS3w5l5BlprEcjyyVtcwI1BWmzYI1K6jSGHFEV0V5ne2b1i27p8ktgXUsM6bm8pR0xq22qAwW0BK9x8lrda9qRzaPzvVmcT+UMnxws0lLXjKtbO+9mWfvX2Mn6JExttT3vB1LTjq6KirlJpTkBrBs/HN+1ucutZeXLeV/4/L+S1lgLSuzmkMr0/1V6zacwTLHhGf2P9mzZ+7it1xtmrW1r3EJ3MDNJi1BIlqZNNtpX/tSvjqu/zVla+wRGg8P+EUKLLEEEWmrLQ9cNu2t6YxuCZDy6F1bfZZWnZbZ2gPrpfrQjmyeqOtt286f7HLQfbp3zcf/Sj2Y3tKOt22pTGubPaiHTBa9a/w9bU5Dvh7U6/noIR/lAcX1NrqZPZFlbyN9PWsyyALybM3tZ6WstdpekvLxWLc1eaVZ1LKakTK9eoIxPDMQtxf2WPste+s0BtK+OZdHsndY7BHa6rNGV1qAjfQMUR6jwcZ/8MMfnXtk68+KQR45aQEKUsAKF7bqIVpfC8GkIZZanRT4QflIwRJeeaWgIUuQB+WhBeJY+Ettcn0tBZhwJIUh5/zSGEjBPnlbUnitJgttRyvj0ZUWYCM9Q5THaFxvHrishTyQ2hZ+GsTRAyWVyvaStwePHjwt7bSWsZ54Wluml7weaoENm0ha3w/xL8FlOZoUNNPSnifbZ4/Mq95stV7+vTKxevjWkIdvTdba3plde+n3xzK7rEZDZfn0HNkj3as5EshSpgeAyMK/V9SXh28NtbodAR3W2juzay/9/lhml01kyQJbc5Ahl7lTWmrlyziakdWztK1Z7lmy1XKZY+mBfsDCgyId48stVanuJdk0fVv40n7k17nsrNbMsYkPl223lLU276s0Bp6luZaRtyYjsSZDy8GeiUYTCCPBRFsgjxzfRBqAwQI7tdyTytSuBiigA1gOItEAHOl66egii2wWvhq/UuCLJXMsB08uZa3lwC9UXg/ISsvIW5ORWJOhx2pqdHDZRB6QA63jcblxUEdLxhcJUuqBrNYANzSwjgUWWmq7RodaPc3lJOUQtLipPNlhNNeppSylljx4GvWAH2vW+NGCaqZBJZCDBoyQAD5DydKrzo8TSUAWbmw8ZUvtDCV/TRmMBVRjyS4rkQduWMu3dGy0ljPOEnThkdcC3LDIJ7XbQ3ceqs3AWrO6oDBZbuWToLUWqLXUDgeGoeAXD5CqBqzjyUF3klmTHSidCFNzfE3as9y+76B6rYVv2itJ8tHcaPm1hx57dmkvKcnkkTfJovGzyCe120N3HvLIUCsb1VkKI33Djs0rdEl1leo+9NizrA65dtJzksub/v+Hdz647Df1ReubZcy1tks0VWu8x7LIWSg1i3qpbckazZXxyKBZXy2Wb8nqrJ2LluqUrPIUnMHxtZxxVmMJ1nTn8VLUtMXpJ52/5wHT0P5r8qZnm3oWWsE60jlz6fnDqtVrRMbS+n6IfwlU03uf2zv4YlL7ZUtwhEdejz4lvp5Ak9409F649XlrGZvethzal3R91alnPBJbQTUhhFUA7gVwKMb4iyGETQA+B+BcAA8DuCbG+KSFlwXO6CGPb3QoGGSvOh4fP63TIyNtK6aghYbi3+t5axmbXn2S+pJ+3/0HT31frCx9Beg/AO8G8FkAdyz+/SEANyz+/wYAHyzx0OCyPagGbqllXm2FsWptadet/K2wSktG01YIbI9suxb+Wjbckkwt/akt42nL0zeJ0AqXDSFsA/BvAPyXxZceAN4K4E2L//80gK8CuN7CbyiqgVvmZYbKEtMC2e0BdbVkzWkFbUiQz16QWolfK5TZ2p/aMp62PH2rIesy/sMAfgfAqdm1LTHGwwAQYzwcQjizxGT+hePLDGkWKCVXFoBqQMuNIiWjSm7c8yz3aFnNSCgZEjUDo8Q/l81qZJMMdD0PfpCWthw/aUw0o2AyrlFoqmXbwem3ZGzsvUTnnmsA5r61GEcTFV/2EMIvAng8xrgvhPAmbwMhhN0AdgPAprN3FAH/lq8cwAc8cHnPaFnaJoVNUhioRKW8ZKgzqHUAACAASURBVFxZCapaguxqOdc4eCjto5QxtgSt9ZD12CdOPsvsnCDBtM9D5XSrgUFrpAXuWPrWI/OsZWa/DMAvhRCuBHAKgNNCCH8O4LEQwtbFWX0rgMe5yjHGPQD2AMBPve6ieO3lO5e+tJyrqGSA4IxZlPKyNDiixsjCkWX21OSyXOfKaLH1La4rbeZpmU24GSmtSHZuWb/MXWld4ZRWUlyfKP8a16Onj5S4oJnk/qN986xS6Lh1c71hYY+eDHS/j+UGug+V6g/letNomtDGSVOLTNyY9OijB2I8tAs1/7vG9dhDlqHcf4lvF9cbQx8AcFsI4R0ADgC42lqRC0+UyAPy0GYRDZRSQzV8a8JAPTNMzR6Srkxye0c+81hBS5S/ZhuhgBM642v6qFlJSbYLzo7i3YdbZbGEBWtUso10cb31+Edn9tYvuAXkMKaZ3TObTAvA0kuWmrHRZr8xjWMvmYboE8aWqcby9SxBQDk+2r6+Zv+qrRh6zgDSjGPdU3pmCI8s2uxcWmXRPSbXnseC74ENS3poHUdKratGzVtA5QWwAibrTWIxlZfdYsWkVlguIYEl6N9jMaVtA7K1uMYSm9cvySidjKN5MCT+tbJQC7BmEZY8ABYvBfc3HWvLSUGlvrWOIyWLTBqVdKThI2rwC6PJQUep1e/bo+1Wi73Er6VO6+qlpaxWt8XTYKEWb0dvWXrz8+jOYrkXSVrfD/GPwmV7ZW2dZpbTEo9aOK7Gx1umNZMulbtV3yU9TAIC64Ew12Sc7Q0btmagxdj27IlaoY69IZk9+bbCcTU+3jIePVvkbtV3SQ+TgMACPDCr9zap1zPqASJJNNV4dovxjYOJSoALbkkjZTL1ACA4V1BN37RspBLRpRxntJH0aHENeYycniWkZiyTXK818nqMk1rfLGUkQItm8JPcjJrsHr6eZfxUs8tajG/akcIWiGfNrMQd3+udibm+adlIJdKyvwJ6dlnL199r5Ez8S3JrbXOZYWvl9cxwEmxYK5Nfo4Y0i8GPg3CXMtB6+Hpoojno9uzZM7d79+6mY4m1spb6NW3WZLjl6ljy1ZWynHqOguYynHryu7XkqdOyq1oy51r1oR1EqfXHk0k4laUHRVra1o72lnIeWo7Cltr5ymf/+Ih0ZPPUXW/egJMebdW0aW3b4j5KM5oWoCGtQCxBLVLZnD/ly1HL/libeSSXVU2+dI/7y7Jn18rSFVSNO1AKvCrpx2qXWbVuwxlS/6eSN17LtGnJ3+3JSlozk0mZQS11NHnT1z19ybkveM1KRjvqt5SdVKPaHPKabIB+7LIkQ8r6muokfeez7fEIdaxzfV+yY7N6tHTON5WV8tt7ssF6zgzI9SOtAuiz9eVP3fzwKGb2J4/NV4e45uTZ69TMZJ79fYu8pRNsrKsJj6XWs9drAZxIsgG8TcQSpprXSfrOZ1tAn61zfUuzKmdzyctyK5Fa4JBEkn4snpZ3//D5eYnvRGf2P9mzZ+7it1y9Ilc3t6+98sIteG7++IoTO/KvHP2iWk4SocTxlfbq2iyV5OVmK2kG4K7TvaQ2S3OzUL6X5GRp2YdbZJFsDNxe1ZKjnfKhY5OPUSnvv+X0H2pX0WZgacWm6cOif251J8lLn28tb/xEX/YPfvijc49s/Vmcdfpa9iB5YGFptve+Q3hu/jju2X90qWx+MP1bXvNyXHrewsOTrkn8uGs5cXzP3rhu2W+qm5e99LzNrLxHnpnH3vsOLSuTZJDkza9/6/DTy8pwbVLZjzwzv0xXSSZOFo1fiSyy0HtcH5Oev3T/o9h736EluTVKfOjY5GNUGuv8viRv0t3OM0/FdVdcoH4QE4+zTl+7oqzE36J/2ldNXtpn7WWf6DJ+47o1uPbynSbIoyd5RQtcsRVq6vFBt0BgNX6eFFMtOquBy2r3e8NXPdQD3jtJ+HAXXUnQuiH+aXDZFkjiUDTG7KGe9obKwMr1xwPZndT4tcgwSXk9kOWSDPhRgMsCunFFqtcDHjt0O5a8ej37UZPjzSODlk9N4lM71j2p1fUmle0pV8lw3SLDVF1vuXGBHrSnkWR049xgNccwJ/KAJjR3nWT08QBCPCQdcMgZelqOufYYxyxur0Qe8ItUjzNucoAkyfjKldVcbJIMlvt0DDSXrPTMWw52nOjL/v6bPjL3jVMuYg1qyTDiMdZQZSYjxoGjx5YMU9TgRUkzmFhkom3mbVMjHjX6ePrsIWoUyw1ItL8lo5bG32Mcy/uaDHQWo2lp/KR6nHEzl5HKaylLDawWGSz3qSzcc1F65hO/0bzs6chm7gtZAlrUuixKX3BtdvWAJpIbiYPCSjJwEErPS1fSCSd/aQXVG07Luack92JaFeVlpVmVW8VJxzBzKwitzdLzpc2upVWABcJsgctKbr/RWOPTkc0cCKEEtPAGSeTgB42PBnn0gCYS6EOCwmoy1OYEt2adyfkDehBObzgtV0YKKJGASVoQipZHXwOw1ISeSvxoH7UxtUCYLXDZmj38VANhOPCLJaijZh9Hv+Dafpbuuy17a23fRWGcFgilZyVT2m97bCMePVtmTm7GlHSlAVksdTyrgFIQEUfW2TWX17Jq02wsnn09oPvZp+p6q6Wa7LSWHOU98plzMg6Vi7yGeudmn1Y7NVmHx35kc4+yUFxvU5nZOfJYMUv7Iq4shWZ6VheWmVfbd23ftI79CmtBNENb6i0zGZXBs5+3zGyS7YLTh2ePLc2UlllcC6aSApm0gB7POHoChKSxGV2IK0fe1EravkgqW9rPSXt+yz5L23cB+pls3L6xNXOpRJ7gFipDTaIIT0ZaLVGJZ4+ttV2TdKMUyATIAT2ecfQECEljo4W4juZl7wm3nAQ0U4KzWuC99O/esNZeZOmTl4eFv6YPT1bVXvKWnrMaiLTWtqWPku5GcyLMha99vRvGOVQmVi1T6tBwUw8NnV1WKztpWGtveHILn2lAens8d1D27CcVPzcdKcWz377vIIAXl0fp75zSvT+880GxjFRHK0v5cvwpH4ucFvlqyNMnqYxHxrzs0H3T2p42n0n3nWuz93M31ai3/Fc7PqjHso07yohGqUnZTbUDB6U2a45k4spaloE1WyAPr5atBHcUND22Kh8La3uW7LUtxzHXHjfVcuSz9NxZjj5LPLQjm6cCqkmkHTVUk7HFAqJI/HO+6VcyJmmGEyvQQqPWzDKlMi3ZaXplqmk1ukl8uTqeo7Mkqj1uqiWwiNaxAL4oj9HkoNNcbzUBGRJZgg08MpQAP97+aEEXNX2vyYoyVMYaShpgxpNt1wJkSSTpU3OD1YyJ5r6l0GguUCqBliTXowViTOUdTQ46jXrkO0tkybvtkWHoVUZr3z05+Sz3atvjSMoRz93Tsu2ma17YNNceN1PWjInlIFEtryGwPHRWcz2WctandkeTg06b2RO1ZI61fPU95An08MibcsZZwBMWGSSAhRZoI81AnqAWS957zwxsCe30rJgoaXU1fUgwaku7dFWoBWlpK0irHkYTCGOhlsyxvZNB1AZ6lORNmUuBMnjCIkMJ/MLNmDUnq1j41oBfpH5wZbx2Ga097R6XvKLmbLrSGQRS32qOJy/R6F52aoHUzv/qAfrgyHLmmCSvdp16GDhPg+SVoFZXzSOQt5POySv1g9ZJf2vWck7f0jluGmnn43moNP4e67nUt6SH3CujeZVo25azBq2ya9c5Gt3L7oFo9vjacdTDamvNCe/dO1rsERaoKiUpHFab0Th9a3t0ieiKpxYiXBp/j/VcO3lH8xyVINy1p7nW2GUomV72EMLpAD4B4DUAIoD/AOABAJ8DcC6AhwFcE2N80iy9QEPN1i0y1JStlXton3lNnUlBVD3t1JCGIWjVUeleax97PGdWBN0tAL4cY3wVgNcC+DaAGwDcHWPcCeDuxb9N9L2jx3DzXQ/iG/uP4Oa7HsT3jh4Ty6avpmfJU9t2ugeg2GYqe/ip5wAAh596rtiXVMfTX1pH0wftG9cPWob+cnW2bli7bOYaerx66ZKWTWOVXwPKY01J66N0L13fumGtuR0PJZ01gWpCCKcB+JcA/j0AxBjnAcyHEN4K4E2LxT4N4KsArrcIZgFa5Nd6Uq+2NZeKZCzsCbjw9q0kt0UfQ41NSZdaHYssnF6A6We47bmMT31rjXp7JYAnAPxpCOG1APYBeBeALTHGwwAQYzwcQjjTKji3pOHgrNLB96mTFmOHBMnkjC0emKVkbEv8OGNTDbRWMlhq0NqdW9Yv/VJ4LzWgJbm5OqUtlWbw8hiiqC6TLJedv3lFG9pzAkA1tmnRdDVUA4n2GCFz/qVlfNLZgR8ce1riZ3nZTwZwEYDfjDHeE0K4BY4lewhhN4DdALB9+3YAvKGKGjY0EAGgG45y0mY7amwB9LjzvK5kbEv8OGNTDbS2xmBJD0HMy3IGtCR36TBBi7GwxhBFdZnnHgCgri44gyJt05KbsIZqVhceIyTlr7kTk85Oesm60yR+lpf9IICDMcZ7kgxYeNkfCyFsXZzVtwJ4nKscY9wDYA8A7Nq1K0qNcG6jfEbjvmz5DADoM7BUNv/Spv2U5FqxzCKc64lbXdBf2jfKT9NDIinYx2KQshiQLIErkuuNW5lJM6LFkFZy/yWXI3Vx1tgSPKsLyp+Wpc+hRlzfpNVE0nvTzB5jfDSE8L0Qwk/EGB8AcDmAby3+ezuADyz+fqEovUIlgAXnCqHZZ7QZWCrLHc0ruVYsswg3c1IYpGdvWtJDTlqwj6Rn7nop8EOTW3K9cSszqd9aME7J3ZqPk0fPGpVsF95sPNaZ3ZIhKVHSe+vMDgC/CeAzIYQ1AL4L4FosWPJvCyG8A8ABAFeXmMy/cLz4xfXsbTyznjQTeGa/vL20CrC4Qujq4soLtwAACzzxhH1qYcG5nrUZxzPbcft9DoDDyUDHsxQ+WkP02aF2CU3Pmh6k50yzz0jPcasrWbKbJP5aphrTyx5j/FsAu5hbl3sETckrLBZgy97GM+tpM4J19svbK+2huGt0X8wBTzx7XgmAowVUSHUtpAWuAHpQRz6eqf+98+vRZ4faJbiyLasrzT4jPcetwK+S3WQ0gTDpRBguKKCU4ZULTJCCLDxBDBxJWV89oaicDCkA5soLt+C5+eNsIAw9YYaW4TLnUnkoD+5kEU+wUCm7Knd+mxYAknLXWwKBOH16TgSyluWCemhZSc95e3SstVz4UiCXFg4rBX0l+UeTXTZPXiHBRaUZ17Pnq7Xc0/q0juerzMnABcBIyTAk63iNhVY7WcTbF8muolnu6Vjfvu+gORCoJAPX77wtiTgPgBWWrbVHZ3sNNizZAizhsJLd4Eciu2yJtL2wpWwN5LMFvqnJoAXClGTw7PksVu3avtTyyssPDcOtkamVv6dvJTuSxZ5Ef0eTXTadCNNy2LxGnmycvbOHatlwpbY9mXM9Mnj49cheO0m+tG5vHXpkG0MGWkoYS3bZRGkJcvs+OaNrL77SvR7tcfw92Wo9mXM9Mnj4WeqMiS+t21uHHtl6P0O95JJodAc7asa2lrxv9EigZEDRjuXpnWsttWU5yNAjA82AQzPWaMZJy5FD0lFOlgwtFiNZzZh7jHGeXHycMVIy8tYcC2aRS6tTGq/RZaqx5O5K5DEoWVxvmrukpW1LrrXUlifm2yKD5ObxgGAA2VgmxcdboLsatYy5xxin8bME+0iuTAvoxdO2RV7PEVGURpODTvqaWb6eJR459XC5cHJTtxTnIpNWFZybzuPuS1/75NKjh1dyriHKn3PTSbrjXG+lbK2a7ixjbqkrua4sefDS2HBla1x6Gj86bnSW1vpPx5rWHd3MzpH0NfNABmtmhBqXS40s2qpCcytaZKBfew7AIQUcaW46TXd5u5ZsrR7d1epdc11ZV3xc2RqXniVLkDRLa/3n6lhn+KnO7PnXOQEtLMcDl0AO3GxtmUXorGch+nXWZhNpVZGXpYATCxiox2rFcrSwJXOsBbTjAfZ4MghTPUjAJM84cqstSTYrv5oVQyINrDTqmZ3OaLW55+h1S/5xjpcnZ1si+qXVZhPLqoICTrxZWmtXK54TUFphuDWhoams9bnQwnY50sYR4EFcnr01pzPPikHqo6fuVF/2UngivVfDVwpIKCWk4AJ2uKAJCoTgQhiloAsuWQYNmrFkaS1lpNXqSIEqHEnhq5LuJaJ6rglCsfYNsIWVegAtWl+lMGOLDFIorSdYSaOpvuxaeGL6u5WvZZ/MQVWtIala4gUKUaX1tQy0iU+Nxd6z5+UCVUqBR5pMHiu8Z1/rCSSpCSstzZgluKzUduJjkcGziqmh0RjoEvWGRdbATqWvfA0EslWGoaCklpmsRzsWGXq3WYKUDkkt0OKh5Z4ogi7Fs9OMpnmG0FJ21ZwsWWoTvzfs2LyML0dcFlLgxeyqGo+WbKVaNtV0z9JHLQ+fpOfUJy3rqaQXC9Vkga19HmgdAMUxkfhZ2vH0jcpkGUdtXKUswaM5stkSz07JAkrodeRvjeumRU5N/hZZPLK1yN/ads2ytcW45ylTW7dUr9fSXHo+RnNksyWefYWACjzQ4rpocd1oMfWSq4lze0muMS7mmcbqU1k0t5pELdBVTX4N0GIBiNRAmKVDLGtBUZLL0QLmssC+LXUoWWDOFAyVQDbfvOPWR9733t/9KMtYipAZ4l+KevPQTXc+EF9x/R3xmo//VXzF9XfEm+58oKp+qkf/buFVy1/rU6l+i/y9SNNDzTh56vYcT2/9mrZa6nj0kcquOvWMR6Lw/o0GLkuJBndYgkZoXS6biyfbDCXLl9wyI9BAmHympOAi2icNyGIJvqghaXbK5U569gBEEnkCg+hKjwtYsYBfSsAero4FeERJA3qV9EHhz5xcdNX15U/d/PAoMtV4SMsnZq0LrMzm0pIDzBLAY3HHlHK5WXPXS/d6u2wk1x51nXkBIok8gUFaIEjpUMX8es3e2gI8ouRxM0p91DINUZehloNutC+7BcBBiQPQ9AYmlNrm2illJ+UARLS/HABHyl/eAkjiiLqAaLvedqiuuL5JdShQRjvBRsvvXnJraYAnWsfyjGntSRmFOXcolcvzfI/2Zfd87RNxoZ7AZM70skAnpeyktI4VzsqBR4boq+UkGA9JKx0P4MRygk1JP5rsGuCpJBtHlnz/0qlFmlyesZjKnt1jbdQs7LQ+t/eryWRKybIHtngNpMAdixeB2/vR/b0lyYSUpZQbC6nfltBWyk8LeqrxFlg8MVq4aSnoyWPbaQ2m6hlCO7pAGIvftuZLyK0GaGAJ15ZVXq2utpe0ZCcttSmdSmNZMWghtBZfP+23JbRVO9VWklsjT95/WoZb8ZRWQR7bTmsw1VAhtJSm8rKnfUc6GcV7MogHfkoDS6z7QutZbCWZvKTtw2nACj2VxnIKyeGnnlu256O2kXxfK+0PNX1YxobKQPvey77SsmevlaUG4io9d3nbpWCndH80CLpEmrXRQtKX0HIaiyeEs+Uk0lqS9plcwIq0R9VOIUkrHSlFlrZyaEkXpckg9b2VWvbstbLUeHssz10p2CndH13e+F6zYE2bntl5kvJRGeivRSaLXkvBFtrqqFdgRkuwSEs7Hv7TCJ6xhNdK8qTro80bn+f8nlRed8pPy2FvyUlukdvTl5750Celw151PXn/tfpD5eOnfC3PcY92PAQlb/xoDHSAfrxNrzYpP0tAjCaDBdBSE7zResywt92h+fUKxrHU76E7C1/Lc9yjnV40UdfbR//o43P/fN6bVwR5XHXxtiZYq8c1RgMoarOJSvU1SG1yl3AH95XaroHAtkCDLX211PEEgGjQUkmHrTndSnJrfLnrEtzZQpr81vHXXG8TXcZv2/mTgwRxeIJPphFAQeukoAVP0Mg0A2AmFQBiqdsaGNXS9iTqt/KFsoyfSoirZ7by5ILX+EqgDE+AghZgYyEp+6kl77rl5BaLzko597UZ0xIsQvtaM7tqwURpdWHJc+8hT1ZczwqyRgZNn6XnVZvZJ5qpJh3ZnHyHaY9y+76287RoRg+ujpQF5OvfOVLkz8lSyg6jyZmywzz02LPLYL1av5OL7M/+z4EmnUn30nXu7DSpr1o7NfrR6qa2kstR010NUf6c3JZnMQcc1cqg6dPzvFKaKlyWgy96kh8kqtkfJvJAHT3w1lo5S7YAy37OkjCCroK0RBoWfUjj1xpuK+mDm0GlUF8LFLgGsmtZQXr6mMZAW72VVg6jhcsCK+GLlvO0SvyGgjp64K21ckp8NNCOp/8SGIMLJfboQxq/3kEziSyBQR4ocA1kl5OtBVSTxgCQod01YbaJTC97COE6AL8GIAL4OwDXAlgH4HMAzgXwMIBrYoxPWvhxAIGakFaJXw4tBLAMZijlcM9l8MJ387a1XOiJfx6WScvUgDk8daTwUk7vJX3kvCRAjzd8VcvLn+vMEoKqgYws41UiTe8euG2qnyDdCUbOPSctz2jxZQ8hnA3gtwC8Osb4XAjhNgC/AuDVAO6OMX4ghHADgBsAXG9plPv61YS0SvxqAz9avpqWAAXPiak1bVtICi+1nD5b4sWtPFpPmpH0aglB1VZDLQElHH+pTxa+FNINyOe4DT6zL5ZbG0J4Hgsz+iMA3gPgTYv3Pw3gqzC+7Bz1hCdaIJ81kMTatum9acBwqQwWCKkVommBIHtgyjUw31oaChbbskJrgc2qJPnk8n8A3gXgGQBPAPjM4rV/JGWeLPGZFFy2FSZagkd6+Q4FBZ4UNLNF/mn0dVJ902Tr0e8aHmiBy4YQNgJ4K4AdAP4RwP8IIfyq9WMSQtgNYDcAbN++HcDwcNleRqFexiYLpLaGeuioFyR4CBlr+Uyqb5psPfrdG+5sWcb/PID9McYnACCE8HkAPwPgsRDC1hjj4RDCVgCPc5VjjHsA7AGAXbt2RcC3lNaWK9JBeFy8dYvBJBlKcr6lQ/jyvy056KhhqtRXq45Kfaw59snTrsUQ1prDTWtzyL5psg29JZSeNy2e3bKEvwTAN7GwVw9Y2J//JoDfB3DDYpkbAHyoxKsmb7xGHghs7/zdpbbzv6W2W/Kl/6hR77E50Ul63rS88cWZPcZ4TwjhdgD3AXgBwN9gYaZ+KYDbQgjvAHAAwNXGD1YVce40S1bVUnYVbVbxrECk2Zuro/H3ZMvRXFal+9I9yyxr4Uvrc/rQMsmU+HHXa7LLePh7yFO/JVMNffa1eHaTNT7G+D4A7yOXfwDgckv9HqS50zT3SWnfo923uG4SlY4WLoFSEn/puGitb1IZC6ikxi7hAatI+uLKcuMo8eu1T/bw91CLjcEDAqI6+5HMG08pnxloDjM6a3BgD4v7qPQ11mYTKovGy7KaoAALLk+4tPLgZkwNVJJmk1LOeSl3uwZssa6cbr7rwRW58zh+2nWprCenv8arxu6j6UEaJzomdKzz1Q+93rRn7/mv1559qPPQaviW9uO1e1RpP18TzmuhocKEW/rai3rZC3qH7Q7xvI3mrLeUvKL1DLLSSZ614ZWlIANLYgpPMIoWrigFvlhCKGsCdix1LIk6LPnuS33lypZCc7mc+BZ909Dh2rMMSqQ9OxI/bqylOqmsdorrRF/299/0kblvnHIRTjtlNS49rz4D6233HsTe+w7hrNPX4tLzNuOTX9uPW+5+aInvhrUv/rbwpcTxTdduu/egWQapDlfm7I3rlvEpySjJmYjqylPnrNPX4rorLlgqw9WRxsLTV66sJHd+/VuHn1b7xvFN+jzyzDz23ncIB44ew977Dqly1kxU2rMj8ePGWqqTyv7T33zp+d97z+/8Icdvonv2jevW4NrLdzb5HoFxQid7+WeHqDPJNj17bA+Pmr27h+80MwpLVKOz0WWXraEcOtgL0jhkXU7eBN+Ufi1ZVXtnjC21Z73Xs22LPnpnkK2Ra5qwbOm5wKrV98daP/tYSHO9eetPAn7LyUtdKx73WrrXG0Kpye11Y/Zqu8XdOAT1hk/34Cs9F9ohEVPJVJPIYlyhWTx++afPwSU7NhczhtD6ybikZSeVyJIVhVJuSEn56pIBSvrlDF1ahpbjEe4sMFI2lxrDoqZDT4YaLVtOTQ661uw4lErZZWsMdhzfpFf6rFqMhenvL3/q5ofnfu+9H+Pam+rMbpmtaRYP7dgjWpe7J2VU0ciSFYWSBKpJ9aXfUp9ojrPWFU5NlhuLDmtAJVy2HImfdpxX7xWI57ixFr4UIKOBxaTY/dGCanpBSjkwhtSGJyOOBrX1ZAypAevUGKRK8Nlcbi64x0oSmIdrhwMDlUAlHD9pHLlsLhIAZSiqgcZagFkWYI+n7am+7L0gpfQgQ60NT0YcbYbwZAypgexKs4amM8t+1pItp0SWQyypfqzw1tLxy5Z+9MhC46HWkN8WiLGn7anu2XPy7LPoXsdyAkhNXvCW/WvOK50SIgE3ep1+k/bzV164Bc/NH2f3syUAC8e/Zg9M7RycrcGSTVUibh9N7T69TsSxyuJ9/pK80jNpyV5L9Ty67LIceb5Q3MzSOrtyZVr2r9Qekc9CNRl0Lf2wHIVd2if2CjChMztna7BkU5VIW/ElPq17aivVZEAGytl7LSs+zwpzqnnjPWd5cZZ7T47yHjm/LeeAUYvqVRe/eP5XmnGptblmVcPJmL7yib9mza05q84zQ9IZx+Kd0PSh5YC36GZIsqyGOMhu0oM0TjUQ5tHN7NoXyzpTev2TPXJ+l+pwM2Wql89g1F5Qu6qhRO0RNaG/HP+aGdIa8sutzLTVhWaPmNRMTsmyGpLsCNo49cpem2gqL7sGA5QsvVwdS5kaGVrqWCCZLVBSj3wWPQ8FC+0NPbZ4AKZFveC9Q0GYl0iC1g3xrxdcdmiS2vJAYFvhpj2zqU4yK+404byeOiU+rX216G4IXaElu+xYaGiopqUtDwSWk7PVRVMqW8pYY+GlyVBj5OxNvbLRtGQwspSxGS0xMQAABbxJREFU6G6SzzTwIwSX5VwWyVhDfy2HK2plpZhxi5EpwThzdxJ1sdQYzlpcbrWGKwmSyRlRPX0sUauhlRI3ntIR2Bw8W4Ila7kFqK4491qLQVF6jkdnoEvkgcvm16U8bZ484RbgCXVnWIxM3BE+tG81hrMWl1ut4UqCZHJGVE8fS9RqaKXEjacEyuHg2bRvGl8qpwfy6qGagKCpzuz5l40CT9KXSsvWYQkooWRxT1lmD+nLyoE90ipAmiE1F6Ql44sUqKERlV871piW4YJRaHCSJaOORJ7AIy1bDpU3LyOthhK/vG/bN61j+6L10QLiaiFpzEc7s+dfNuqeAha+VKUD/EoBJZQs7inL7CF9WSUoqTZDcm1LAJGSvNbD/jwZTaUyeTBKciNRPdccQOgJPNIgpR7YMHUR5n2jZSx9HHo/XjPmo9mzp9mP7nk98FZLqGu+F6Ngj1IOM25m4MIxraTNSlK/tTrSvtUCBuJCRkuwZE532mxqkSvXrSX3GsejBjas6VladXKrrZLtojb81vqsj3Zmp1+//IsKlCGUHmsxtxejYA8PuKHliGmNrzd01lLHCgbyrKDo/ZrQYUsYr3Qcs8ajBjZMybLq5FZbgG67mKRnhNJUs8ta9uPajC7N0loWUW0mluprswfNcOtJiuHhq/HT+k15UsuylNyD85BYrOSWAJtSAIhnv6/Bey3PUE3fqC0n74dkn9HgyJbno8Q3kTazTzW7rJY9k2ZVzSllFD3yzDzu2X90qX6qq2UR3XnmqbjuigtYvlJ9LrOplOFWy04q9cPCV+On9ZvypBlYaebYnIdUtiZDLJcFlmar5eQt6TC1m/PwPEM1fUv8uKy7VJ7S3xrlMr3lNS838RnNMp5ml62FVNbAT1ugtBZ+NXBOD98WCHBNO5ayHlk0vtZ+9KQWPUs8elN3/hK0boh/F7729UVo6SRhsbQ9CeI4jUymFhkssF6t3FjIAju1wJFLdWvrDA2trSGJH8YCl33y2LzZ3ZNfG5I0YM80M5laZLACcCatUy9ZDKse4JRUt7YOoOf6mwaMePQGuk/d+om5X3/nO1lXTsloMxRxEFgKceSMTiV4bw1MlMojgXQkY57EozUjLe0bB8TxGPNKfdZcspo7TYoh146VkmTRQFGWjEY1ZSxGw1K25K989o+PjCK77JqTTxJdOZPKF0aJg8BystBYdcAO722RR3MRWWG9rRlpE2mzLOXbEqNvcclagCyWXAOaLBIoypLRqKZMTfAMdXmOJm98cr1x7imP22toktwa3CrA4jaqAb1IpLljJH4WuT2597mVWck1VBv0RJ8PLV98Kbil16qx5OrMy9D+588+1ZmW3Ud6Tyh0eXQHO3LuKY/ba2iS3Br5dakM5zYq9cHTR80dI/GzyK3JQF1MyQWVu6JKLibO9Ubb4lxZVPdfuv9R0S1HD2mU3Iutk0bJ1an1P3/26fjRZ0dzK6a2U5+SXrSDHae6Z9eALRp4oBTE4cmQOtTpIZZQxpoZxwLv9YBSLDJ4oKqWgBUp227NSSg5lYJbPBBjj844W44lQMrTN0p0FZNkGc2JMNyenZK2Z01UstB6EgUMeXqIdq22bQu8t2dOe6/8loAVCj/lAmxqwkJLwS3efksk7ZsTeQKkrH2jJIXoaifChAXX3GQohPAEgGcByMfKWmjV6jWr1m044/gPjj190kvWnZZ+f3jsqe/jh8/Pp/tLf2d1ll3Trr9IZzTLW+iH0nZN/TOwavXTZr4tMnj0zNQ7/oNjT5+0Zu3Lj88/92hx/BzyrHge+vZ7pX5zXgDMMrSQ3NdXxBhfxlWZ6MsOACGEe2OMuybaaAPN5B2WZvJOjk6atgAzmtGMJkOzl31GMzpBaBovOxuRM2KayTsszeSdEE18zz6jGc1oOjRbxs9oRicIzV72Gc3oBKHZyz6jGZ0gNHvZZzSjE4RmL/uMZnSC0P8H6RjKkx6drbIAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQIAAAD8CAYAAACcoKqNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO19e9CmRXXnrxkGmEGHyyA4XEZGGZL1kkSZqCypLQ1hTVgr/oPEbGkZ19RgVS4GsxVgU9Hxj6TIrsFQcRN3AgazuUlharU0pbjsWluSDS4Yi/WSEdahhsFRcCAQZMKA0/vH9/bY3/nOr885/TzvfM+n76miPt7n6cvp7me6T5/L76ScMxa0oAX9YNNxq83Agha0oNWnxUawoAUtaLERLGhBC1psBAta0IKw2AgWtKAFYbERLGhBC8JENoKU0k+nlPaklO5PKV272vwAQErpvJTS/0wpfTWl9OWU0jtnz09PKX0mpXTf7O9pE+B1XUrp71NKn5gwj6emlG5LKf3DbE4vnhqfKaWrZ2v9pZTSX6aUTpoCjymlD6WUHk4pfal6RvlKKV03+7e0J6X0Ok8fq74RpJTWAfjPAH4GwIsB/HxK6cWryxUA4FkAv55z/hcAXg3gl2Z8XQvgjpzzdgB3zH6vNr0TwFer31Pk8UYAn8o5/zCAH8USv5PhM6V0DoBfBbAj5/xSAOsAvGkiPN4C4KfFM5Wv2Tf6JgAvmdX5w9m/sTblnFf1PwAXA/h09fs6ANetNl8Knx8DcBmAPQC2zJ5tAbBnlfk6d/Yh/CSAT8yeTY3HTQD2Akji+WT4BHAOgAcBnA7geACfAPCvp8IjgPMBfMmaO/nvB8CnAVxstb/qEgG+twCF9s+eTYZSSucDeDmAuwCclXM+AACzv2euHmcAgN8H8BsAjlTPpsbjCwE8AuBPZleYm1JKJ2NCfOacHwLwPgD7ABwA8HjO+fYp8SiI8dX172kKG0FSnk3G7zml9BwAHwXwaznnJ1abn5pSSq8H8HDO+Z7V5sWg4wG8AsAf5ZxfDuA7mMZ15SjN7thvALANwNkATk4pvXl1ueqirn9PU9gI9gM4r/p9LoBvrBIvyyiltB5Lm8Cf55z/evb4WymlLbP3WwA8vFr8AbgEwM+mlB4A8FcAfjKl9GeYFo/A0hrvzznfNft9G5Y2hinx+VMA9uacH8k5PwPgrwH8y4nxWBPjq+vf0xQ2gv8DYHtKaVtK6QQsKTo+vso8IaWUANwM4Ks55xuqVx8H8NbZ/78VS7qDVaGc83U553Nzzudjad7+R875zZgQjwCQc/4mgAdTSj80e3QpgK9gWnzuA/DqlNLG2dpfiiWF5pR4rInx9XEAb0opnZhS2gZgO4DPm62tlnJGKEIuB/A1AP8PwG+uNj8znn4CSyLVvQC+OPvvcgCbsaScu2/29/TV5nXG72vwPWXh5HgE8GMA7p7N538DcNrU+ATwXgD/AOBLAP4rgBOnwCOAv8SS3uIZLJ34b2/xBeA3Z/+W9gD4GU8faVZxQQta0A8wTeFqsKAFLWiVabERLGhBC1psBAta0IIWG8GCFrQgLDaCBS1oQZjjRhCNKEwp7ZwXL2PSWuBzLfAIrA0+f1B4nMtG0BlROPkJn9Fa4HMt8AisDT5/IHicl0TwSgD355y/nnM+jCX31zfMqa8FLWhBA2kuDkUppSsA/HTO+Rdnv98C4FU551/Wyp9xxhn55JNPxvOe9zx3H4efPYLHnjqM0zaegBOOH76fae1pzx555JGjfJb3zznxeDz59LNH/0qeZDtDeGd16+ePP3bQNZdD51COn/HE3tdzadXxzmFkTJ6yLR4lL2yc86TDzx7BQ9/+R5xzxqkr+pR83nPPPd/OOesfxpxcIt8I4Kbq91sA/IEosxNLLqd3b926NUfphtv35Bdc84l8w+17wnW97Vl9lPdXfvBvl/2V5WU7Q3hndXvaHDqHcvyMJ/Y+0qZ3DiNj6h0/4yUyzrGoNQb5DsDdmfybPX5OG5UZAZVz3g1gNwDs2LHDLZY8+OhTuO2e/dh+1sl41bbTsf2sk/H+z3wNV1x0Ls47faNZ75ILNuPO+w+uKH/FRecu+6s9K22UupdcsBl/9/WDeMvFW/HqF25e1nZNsh2tL8af7LPUueSCzXj/Z752tPwlF2xu8qqRxoe3bl1PjrseCwBsP+vko+UsknNqzZ01163xlL6sb8hag/ItSp5bfY9FbB6sd5LmtREcjSgE8BCWIuP+7RgN33bPftx4x3141bbTcdfeRwHg6N+rL7vQrPd3Xz+olj/v9I0r6stnpY1S9877l9p69Qs3Hy33ym0rP3bZjtYX40/2Weq+/zNfa45H1tNI48NbV9avxy3rv/8zX8Ndex/FnfcfVOenJm1ONV4Z7625leMpfQHtb8hag/ItSp5bfY9FbB6sd5LmshHknJ9NKf0ylmCS1gH4UM75yz1tyZNS7r7ltLF2dc/pVT/XdnHrVIru/nX5cjpd/rKzjvKp9SF5keOX8+Q5hSX1jktKa6XvMrZLLthsSj4W34yXFo9MimInOZMAmMTDJEE59h6y5l573yOFzEsiQM75bwD8zdB25Ekpd99y2gDtXd06veqTFYC6i1unUnT3r8vX/Nenp3XyyfHLefKcwqzt6LiktFb6LifvnfcfBICm5GPxzXhp8cikKHaSMwmg1Reb43rs0XWwxsXe90ghc9sIhhLbdeu/ZXcHYvfQmuTJeskFm7HllA3L3jHe2J2z8GDpJerypU+PxKJJKdr8eNuR42G6EOuU1taqXqMrLjoXBx4/tEKvUpdpna5yvLUE6LkPS/0Dkyattlp6Gjm3QyUCixet/YhuoNBkN4LWrvvKbZuP7u7lfeQeWpM8We+8/yCuvuxC18lX8was3P0tvURdvu7TkliYVFLPT6QdOR6mC4mc0toalbJSovOervV4ZT3PfVjqH5g0abXFJAxtbodKBBYvWvsR3UChyW4EbCfVTp9acxu9WxaK7KLszigtGa0TUtP0t/pibUSsJXU7V1x0Lr2XezX0hdhaac/ls+jpxda/ZW0B0PxmNG2/h5g0FrE8eccrx+X5hiJ9TXYjYDupppEecrcsFNlF2Z2xZcmoT0h2ult99bTRkqzY3Hk19IXYWmnP5bPo6cXWv2VtAdD8ZjRtv4da0phWRuM/Ol5LQmzVbdGkNgJNi67dtwF7V2fWhshdzbqfM3u5dv+td295uke0u5b1w7J01OWYtMF0BR6erLs2K+vVjjNJQuqImP6lpVeydCWSlx7fAK8EZElrLZ0Ym6sWrdu1a5d7EPOi3bt379q5cyc+9Lm9uPGO+7DppPV48NFD+OgXHsLZp27AxS/afPTd2aduwNWXXYhb717a7Q4+eRh37X0U2898Lq6+7EKcsmE9ABwtv+/Rp/DRLzx0tFxpz0OyjU0nrV9WV/L0yXu/iY9+4aGjvBQeS/1S7pzTNuLiF20++l6226JTNiyVLW3I8cq2Snmt3Ote+vxlbUl+WJuMJ1lf9t0qa/Ul57q0Wdorc1/W99a79x/9XcZZ+pTrJb8xiyfvvLTmqp4TjUof8rtl4/XM1Xvf+94Du3bt2q31N4mN4AN/+MFd//yi12L7WSfj4JOH8XM/fh5etW0zNp20HpdcsLSAl1ywGWefumHF75/78fNw9qkbcMVF5+KUDevx4KNP4UOf27vivSznoXXHAfsefQpvuXgrtp/53KN9n3vaBpyyYf3R96/94efhk/d+82ifpQ+r/rmnbcCmk9arPJVxlLLy+brjoLYl+5D16jk+57SNy95J/hl/jLfWeGRd1lfhn42PtS3fa+Wt9bLaYLy35oXNFZsX+W2Vb0f2Vd7X6yjflXGWNlsbwSSuBo89dXjFXbVo0TXrQEvLbFkbIqRpmTVtOqD7MFj1W/djy14u74hMmy7raboU1lfU89Bz37fqsju/V5Pf4sVar6F+IkNs+h6vVW0smk7M6zFZ0yQ2gtM2noC3Xbp92b0nah0Y6pmmkWYvZr8jMQaaboDZ7pmNu+UnUPcl50V6Lmr8aZrpWkfSY6e2+pJts/t7yzpg3ePZ3DGKaOjl2Kz1aPmDeKxgLf+E6DiBiVwNbr7pj3f9ye9et+ze85UDTzR1AvJuxO5UkiL3O3mflbzIO78U/eR9kN2PNb7KHffgk4eX6ScsHYG8F8p5OXT4yIr5YXqEotuQOhLvPbc1p0y3we7ATF8TucezuWMk+5Rz2xqbtR5Ml2N954VqHYh8z8Y5eR1BURbWd7LTTl6/7J5T7rbyziTvba/94eetuAPXZN2la5JtS/2EV98g738aD2W8hW9Lv2DxyO6ata7kiUPPNPUNln7F0mNY9+J6PWTb7L5eeLHWwqOvYPwyPZO1BvVzdoe3+PKue2R8hSavIyhU38mKBxqAkF+45WFo3aVrYjbrVh1POxoPZZzSP7/lgefhsRUh6bmPt/QrPX7/krxRhMyfgvUR8U+wPCutPrXy7A7v9cmw1r3He7BFk9oIavJ4bdXlvF5wrJ6nTO/dmNVr2dm9fXvLazz33CV7xzU2jdlH71y3no/1rRyLuQQwjSSoF110URiZZd/B7+Qbbt+T7/r6t5f93XfwO2o59pzVa7XB2ozW87bjaTv6vi5jzZ01xz3t9/I/ZM68fTD+o+OP9DUmMb6xbv29+RgjFM2dpBktCtAxxFWz1yTU246n7ej7uow1d9YcWzx6Ap28/A+ZM28fjP/o+CN9jUmM73UbTzmD1ZmUstBDUYchSxnlcTTyOKtoPErnHalA0hRK0mFIKvIkT5YySb6vFVsvPnuTqoArfUoF7OUvOwuHDh9ZoQBjPDOnL21OmUONdIphjket+WLKS9aHnMMyfkuB1/pe5bcQdTRi34FGTNn7qVve/8Cud//WH2l11pxEEHUY8iijLEejaBCOB6BD+13XtU4hS+nJ3sv58wQ0yXBdFgjW4jkajMOcYqwQ4Ij04XUwKuO3FHgasW8hKlUOCVQrf9/13WcOszprbiNoOVIAfiiriGNRNPhEU3RqTiItJZOlJPUGlkTCfpkzDxsPcwLSnJYsYsqxlmOR5KkEl5V6hUfmyFXmxgpD9irwrNBn7VsYCrHXckqL0JrbCHpAMrTnkTtb9L7KTF1WuK9Wt/7r4UlSJOxXA2mRdTQzGoNNi4BxMKlL8uIBrNFOea9pNcKbpFbos5w7L/iuBbFX1xuih5jURtDa0bwuxC13We29xyzDynqlExb2qpVlEosFqOnlvXWSeF1p2bjLcykRaCel99SKSDT18/oEZd8Ac+OOAoZq3yaDu5P8DYXYq9tn4/HQpDaC1o7WCpqpyRsoMgSIpFBUOmk5O0UtDF7+PfMRvYdaQCQA6CkO6MCw3r4iwUfScUr2yXQEUYuG9m3W+pcWf0Mh9mrqCTYqNKmNoCeQYkwIskKWTsA6Ob1SiAYSwk4rKQl5g3A8koTnPq4FHdXBV/V8MB2D1qdF3vVrfQfRAC6v1KGtmZRCot+QFfDVGre1jli3/gQ2f5PaCFonbOSuxOp5aajvgVcKYRp8YOXuzrTOVkIUjyRh3cetkGdmZWCneGQ9vOvX+g4sCwSw/JsaEvJsSSFj+25o3xBbxzXjR9ATSGEBWlj2Ws1OW+zrlu8Bs9HLQJgIkIe0O2vBQq0gnFKPgW/UvJSgIwb+wfooc8UCwVqgGV6KBC5pc9nyl5DrOkbwFONHfiPSN4H5SXh9N7SgKzn/pc2WH8GkNoKe8FYZvukNUy2khbcWaCsWVmxBe0VDT+t+ZNhwgT2TIaWMl1KvhC+3eCmh3iy8mPUh4d8kTFwrRNZLUTiwVnizhGST6zpPSDIG31bCrFmYeXmvhY3XvGmh0XL+S5vvfc+796+J6MOaojZRywZu3eNbgTdeYFDWV8uSIe9/7N4aHT+zjUfu6xZoSLEOyHRr9Zxa2mvv3HrrFar1TVE9UhRUpNW+1Kcw/YSlb4mktOvRi012I4jaRD02cKt9prX13vm9IbWte2LE47E1fmYbj9zXmX5Bs9Vr99meUG/Gp7deoVrfBLQtFV5LU0/8A/PwZL4ZllenJ6Vdj15sEleDAl6qATswP3BGXj1D5B7r8duPXGcicQ7M19wCMY3oWWRfFiBqmbtyf5V/mX6iNResDItj0EBYWbtM58OI6Ruk/iUy51aMBAPWYfEOPWC8kwcmKeClwEpgByBmE/XuhpYPgNam5bfvpcipb1kHoiCfnr5kWx7QVvbX4sV78kstetSfZIilQvoJyPF55tyKkShtMYlWk/R6UqgxmsRGUMBLNS+p3nRUjHqSPzAgVe8dkrXn0X9Ytm5vWnhPn14dB/OOk8lFh3gWRmMK5Hs5Xz0++BYPPd+kpcvx6ita/DIdxuT9CE44/jhq++1NR8Wo5xTXPASHxMpHeLBs3d608J4+h8CGyei8oZ6F0ZiCVhSit88oDz1k6XK8+govv/XzyfsRaDoCK7lG1F+gUM8d2kqMIdu2dAmRxCbsjuyJ+R86bsaLFdcv50e7r7OYfiuJiByPjLnX8CmkjsDrN2BhJnjmzsKTsHQjZR7YN1W3LwFwZV+TxyPQdASWtt/yqGPUc4f26iu8uoQWD8wz0BN370knHiFvxGY0ccgYUlUrSlM7tXu0/2Oczp509q36pR77pqTko+lNSl9zwSNIKZ0H4E8BPB/AEQC7c843ppROB/ARAOcDeADAlTnnx1ptaToCRlJ3IO/r0l4bjSSrn2l2cc/dULtLawldWZ9a3Xq8HpyCiK27dxyyXOTuLO3rHvu4Nk7mi++x7Ufv4cwnQOuTfadePq2oS02HduDxQzRx8J33H5ybjuBZAL+ec/5CSum5AO5JKX0GwC8AuCPnfH1K6VoA1wK4ptWQpiNgZME9RzX70Wg8z91Q7vbSQtHqU/LZgiOfF8S4dxxauSj+gNSiW9YAOZ4oVqU2rqgU4pFmALhQjax1Z/4w2ndx2z37m7EiLR1B90aQcz4A4MDs//8ppfRVAOcAeAOA18yKfRjAZ2FsBIUimm0mCUQi/rRIuisu4lFcjF8rAlDu7trJGj2lrMhAWb4V2cnGZZ1SVj0Pig6TeCyPQktK0zwLmfTA5szr9dhaK0ti9Vpq2Pjr9+ybL9/xvqefekKZUgAj6QhSSucDeDmAuwCcNdskkHM+kFI6k9TZCWAnAGzduhVATLPtSY5aly/ktZVr2m9JXn2FB6swekpF/QgifhPeU8qq55F8WhKPlzetvOZZGL2ve70eWTnPd+q11Fh9aXWkheK4EzduWtHgjAZvBCml5wD4KIBfyzk/kVJy1cs57wawGwB27NiRAX0X9OACtnZcSdY994qLOKKNJHk37pVSNGJ6BeaL7pFaWJ9e9COL75a/gZS2WmVaOgy2fi0prKAFeefKmgdZvvXtsfFaehSvxBeRot/1vse/TTtkCQ88/wFYD+DTAN5VPdsDYMvs/7cA2GO100pwcsPte/ILrvlEvuH2Pa730fK9ZcbgpaetKz/4t5MZr5fG5IGV88z9GHMV7bOX2HoP4RnA3XnsBCdp6ei/GcBXc843VK8+DuCtAK6f/f2Y1dbhZ48s0xy3dlQL4cVbvmVdiN4ZCw097SS/rC0Pmo7ntGJ8e8drjYHpSDSyTjpLspFzrVkfohKNHI91OmtSJbMSePVKzLNWm4/e9QLQLxEA+AkAGcC9AL44++9yAJsB3AHgvtnf0622zt3+EtfuF9kprfI9u/lYksJYZSPt9LS7mpJRbxvR7yPCS6RNS3IZ+j32SDyYh0SQc/4cAKYQuDTSVvEj8Nih2ckYLd9zd/Pe74bU6+3Daqen3bHGO0bf3jai30eElx4/iZbkMuR71N4P+nbYDnEs/3vZj758bolIx6Ro0sxIAs/exKrWe0+fkl/reTQJak/yU29i1sj6R+fYm/y051tkfQxJuGrNDeYhEYxJmouxJK+76zyp16TnqT9WwFJknizzZ68bd4vXseawJyFpdI57AUQjvDDX8SHj66k7iY3gOScejx8Rih2pVJEpojxhmS1qKVa8JkumyJJin0dx53FDrnmImvC08kwhFVVMynlruQtbSkHLNbfUY6bAiGsxI8mD1VfPtyjH0etCrim5e65Fk9gInnz62RWOKnJ386aI8lLP6RwBKGlBrmvOTx435JoHRl74tLrPVihsBOabrVnEccpyIWeQdKy91tww0niI9mWRB3Ckx2kr6updaBJhyLfcfNOud1x1Fa64aGXa7BJOKqGa6rIaWaGml1zAw1ytcGJZ14I986RVZxDUkj9vCC0LX9bg4BhUthV+LPso8yJDgeU8tuZaQpBZsGEyzXhrXa05ZOslw6zleFvjYxTht8VbJLx88lBlWtCRBX5h0ZDgmyg0meV66zmlvam9vOPy3HMZ3149gyc4S0ua2pprK1Fs6bM3zXhrDiVPhWSYdQ9Ia5QHRhGJL0KTkAhKXgONGEADA3wY48QoxHZfeQIMAQxlfVnzIMsxiUID6rDAX1hbbNyybXniapIHKyMlPm+5yBqw8VjlmVTaSlQTBSaJAuJq5VkbLYlg1U2HOehibDl39LriRmjeLrk9fR6L8fW6AXucX4a4Do89zjHK9zo2jcETawMN8+EkJAINqqyQPE2sk0PeU8uuL09Sz87L7tnsbmi12XofvbdK8sK/e8YdlXwsiC9NIpB9MIlPPmcSUfQk1Xiw4O8ikoSUIizYfCbx9XwXbJ1aEsEkNoLfueEPdn3+pFeoqaVk+qbyW6bZkimgZJoubxqymmQ6tPK3tMVSnlnp1bT3Vl2Z0ktSmReZQivCg+yLpeuyUobJcloKNNmHnFM211qKL++4rHFa6fLYvLTaLqnLrPRv0XR5sp96Ptg6TX4j0KwGhayTg+3WMhHGkKQU7J7NeGOnlcdS0WsN8SZNad3Xo8lSrFOslYyE6XLYOCyJZ0jiVdmntZ49+girLON/jIQ1ZR0+/Rf/5eCkwUtbUGVeIBIL+groT0oB6LZdC7qqUMRSYZHHv6BlQ9YsBVGQE4sXjz8B0/6zcVgAqRHgFUmahSpiVfC23SIPDFyUIlBlk5AIWjqCQr2a7JbWHPBpXb0aYHZ39OzqXv2CZfWIaKtLWnSpV/Hev+WaMD1Ny2LB0oNLsiSelkQQ1R8MgTG31sGS6IZYnmTfch3WJJy5JGZnt5JLan9r8vjDWz7clmTg2dWjMQRWOx5YLo3fCCCqxxuSzX00QYsl8bQkgqjNfgiMuazjjdcYyx+A8fvKbZubcOaTkAg0HQHzWvPqBiw/g5aNm/kPzCOJCKvr4VMjaVXxePX1+i70eLn1+g8wYroGbZxeH5Qx9A/sm7GkR49HqEWM3zXpWch2VK9uwBsB6AES9d6/h+zqXh986w5s+epH+I164PVIPizeISoBedKEs9TjVp89+gcrXsOCNx8ShdjD7yQkguJZWO/IJX2T5cUlTwSWNpqdbmPe39ku7vXdr9uS1g6vX/sYpzRrixEbfyu+oTd9nOyT6Te0MTHrgCVdWtagnlObfcc98RpsTmTbk7caFKp3ZADqSWH5njNY7CF3s2gMvXUv9EQ+MmtHVKM9ZHxRS4Ynpn5o+rgWz9Z6W9YBr+TAxs3KaeTRQ3jjNVpt1M8nbzUoEkF9t9l6+kbXvczr796jCbbuyF4tueeUZm1JrfqQiLe6H+2kjJ78zFJTeL78ZWfh0OEjyywCJSGppW+R62vpJ1q+C0cyQhJPVCekndrFImNJgd5YmojfC5N0Jm81KCSTUnjuOewuJnfHHk1wjya/5W/AfrfaYlr1nog31k/vyV/aYJGiNc91eY++pZA3AUjLdwHQ06J72o58e/WayD69UaNMGon4vTBJZy5JUOdBGoKMFw1HwkMzxJ8Weo7FjwWlHoWTrsszyG8vWpC3by9KT6uuhZ5T5vbyl50FAMsQkFoIOyxNGpuzunzps5U6rQXp3upDPpffWt2+TKrSglev51T+baFcMRh0AN2ISZPaCOSuF7GXyx2U2bgjyTatU0fyOATTD2intAbaUoa377EsGy1/A6nfqPU2FsZDVArz6ogiUlQP/gKwXKItkg/DWdDmtDXHHr8EQJd8PDQpHUEktlp6pknrAvNEZOVbfTMEniE+6LJ8sZJ4owdZRGSvx6FGljVAtlXmvOgGNDQppi+xYknYHHvu815djzWXlo9GxGuz19tR6oxqXUjRq7FvaPJ+BIU8Xn6FWphvQNsT0Wu39iDw1BQ9bevyt92z36VN77Uz92i4LWsAk8LKGDQ0KXYC9ia19dzno1YfVs6LpwjYXpu93o5S/yJ1Ib24npOSCLQT1YsHaMXOWx5oWj9jeb1Z3mO1Nt2KX5c8eeMD5InSssZYvhmWdcRrEdDW0ev/EPHNsCwMlqQn27GiE7WyUR8NJsG11l/63si+14xE4MHyY3WsdNOWB5rWz1heb7IcO4HqHZ+N15tGnFlNrPa1uWoh7Hr0GR6K+j9EdAsRXY/HSuDRNQyNP2E8tta/SJUer1JJk5AIWtGHbGe1/AQkWadVRBphUXeSN+bhpZ1A8k5pnV5e+3sPviKThCRFPS4jeolojIh1AtdtlDn0ogdJ6unLi4Ik+7BiFTSpkunJJg9M0kIoOmWDjgpjoeJIKu8LcpEsp9XXkHW055IXC21GoiidsmG9WUci9lj/oNi8WfNUj0+iQEnyIjJJ5J8WsTblc7kGbLxa22UOvehBknr68qIgyT7k9yr7rtt73Uuf30SGmvzVoCRBbdk9mb21xx7uJdaHxYu3XqSvnuw1Vp9D61jlenjvnXMPT2N/Qz19Wf4g0b5bcxtqm6GaHsv/CopxJEkoK2uV89aL1B2TppDs1eKp93mk7bHeD6kTTbg65lpZSVJ7+kADxfi40DY0ZyoKktvu2W++Y2Wtct56kbpj0mr0GeWp93mk7bHeD6lTyv3e7V8LlR9jrWTf8u/Y38MkdARa0JEMFLHMS0w5I5VrHgVg6ZvVtZx+hpAclxcUowdOq5cnFirNFHk9bbPQYDk+S3Hr6dPijYUC9ypya2KKZmm6lY5w2nxYEHtzDUNOKa0DcDeAh3LOr08pnQ7gIwDOB/AAgCtzzo952pJBRy1zoOViy0wnzBzZ6to0tW0AACAASURBVFuWAcZJxKoRS7dlBV9FHVSG8GSFSkeAMax1tMxw3uSxrT49vGmhwEPCkNl4melWOsJp88Ha8oCXjqEsfCeArwLYNPt9LYA7cs7Xp5Sunf2+ptXA4WePrEh1fuDxQ2oQDgs6kYEWHuWblm671bdX+WUFRHmIjctbzhPMYwXXsOcywKek9i4p62UK+8i4Peuj8VL6KvPQCjBqfUOtdfIqAFtzKN/LdSuBSyxVfev7lnMn1+ld73v822zeB20EKaVzAfwbAL8N4F2zx28A8JrZ/38YwGdhbAQaeGlxjuhN0GmFmLYckFjfXoeZXjdgrQ2rjuW0VJ5H3Let5+y0Apa7vZbfkXH3OojJvlrONNbJyfiNuDdHg6cKeVLVa7+1uZPrNM8w5N8H8BsAnls9OyvnfAAAcs4HUkpnWo1o5kNPWK73BKiJnSR1PXb6WLu7PL3qkOfyuy7fOoGGmvDYPEXm2DJlyrbLOKWkULfvlULYGsgQcNmnPFm1vmVIcK+JtiX5MclGSgJeyadXqqrXCevWn8DqdG8EKaXXA3g453xPSuk1HfV3AtgJAFu3bnW7FkeDVCRZJ0ndh6zD2mb3WmA5UAcLidba7bnHas9b8xJNqmHdmbV7rGzfK4V43YK9QWd1G96kKhZZkp82/5YuawwodbZO89IRXALgZ1NKlwM4CcCmlNKfAfhWSmnLTBrYAuBhrXLOeTeA3QCwY8eODPiAOgq17mstyaCUYyeJp69C3vu5xqN2D2xRVCqxeJf8lJO9Ptmid2a5ZtqaMH7YejOwE6YT0njTTuH6pIycthpPLTAU7xyxb6CUr3UgDJiEjaG0se/pp55gY+reCHLO1wG4DgBmEsG/zzm/OaX0nwC8FcD1s78f87ZZ735AG6rMG87K6rGTRCN2Qvaebuwe2KLeO2dLsmBgnV7dhmXhYGuitcWkEybBaac5442FBA+Fe7PAUCJzxEhaqrwJU7Q2jjtx4ya1AObjYnw9gFtTSm8HsA/AG70VW5pQL3yUvHuxHbQHzknrs+6r9OGVZiInklWnp02tbq3TqE86T5uWZl+7r8t103QCGm/shJTtt6QQJhFZ45QS3RjraEl0mrTGdD+M37lIBDXlnD+LJesAcs4HAVza004Lqsw6fVkilJ4d1CLrntcbOt0ib1hrzynHdBrRcNZI6LelJ9HWSQNG9db36FG81p1IEhmrz1LHI9FF4c0lv8daIhiVrHu4vKdZNl6PNt6yM2tSh+yzJREU6uElqq/oIY/NukXsNK/v62zdrHGwE7Klc7D8Oti34tW7tOaHtSG/Ee83w3ho9XnMJIJ5UtSmXYhpgj3aeK+dOeq9KKmHl6i+ooeiICEeXso8MBu3dxzshJSkeYoy3Qf7Vrx6l9b8WP4C5RuJpimL9OmRCCYVa6AR8yUvvtgFKFP6tVtQVhFoKwskovxm9SMAHQyAw5s2XPLSIgZOOhRopBVrYCWkKf76mi+9ZyxavMOrtm1e1mcU3s2a0xbwbRkPGyf7lobEjjBItTWT4EQj5kvOtMiFLM+61snj9SDzaKxrXjx3ULabl3GyutFTW+NrrBRfrdPNe/pGdR1s3mpo8brPCLxbtG/2rPV8DP8B2Zacw8mnRY9AlZXdjSWJZJFgvVF9HrJOViuKTRuvJY1YPFineg1tJVOVWUCxrJwFENsiBj3mPSE9wKKWVGhFOlqAohrwrZyDaORjRDKS/Ml1+L6CKpMwX+WvhG6SkF4SoovBkPWQhOSScFQSdqoFcSX5tCDWGA8WfFgNbVXatqC7Ci+sXGn74JOHcdfeR0Nzy6DHJGQbGxf7Tur5YpBycp3Y+rA51r4l9n15oOK08URg3gp/ch0mvxHccvNNu95x1VXqLslOGXYn9OoMeu7UrK0oxLjntIqSV+poSUJeKcnCgvBIPtZ6WKebFcfvGZ+l45H8M4lIa8cr0Vjrbq1JS8KT3+HkMQtPOP44884sbdHWXdrSGfTcqVlbvRDj3ncesuIACrXu773WjpbN3zteS69irX/LZ4GNrzdCVcZQtCIBW3PUes54lsT8JVq6EI0mIRG0rAbspGM6AHYaWdr3yIkcTcZRyKNN75FQIn1pSExDEHZavLfGxN4x6UKebux0b+lSovfuqFWohRbE2pLfpUf/oM2TRxcyV4SieZP3pLPKWdr3yInca2f3atPHoAgS0xCEHcBvPfG8a/kgtBLSeCIIo96XUatQ3Z5XArViCawYDG0MTFpqRR9OSiLo2VnZSS53WnnCRE6vQt57HtMuj6GXYFYBr/9BzYO8U/bYyz08tur16m6G+Gq0koSMta5e6UPql1pJTjU/Gk3CZdLSmvEj6NlZLc8tYKUE0JvA1HvPY/b2MfQSXt96Nn4Wz+G5S3ojICPxHb26myG+Gt4YgyHr6pU+tLgFDe3J8qOp22fS0uT9CFpJUC0JgHkeMgkg4mHolUbkru3V4Hv0EgzV1pJovGnLPNS6j9bE0oa36rHT2evtyPr2WGQsvQLzTbD8KLTTWZ7cloehpRtpWajYnK4ZHUGP9tWDYtuDeaiV9UbXefUaHr2EFVPB5qkH84CRV3/SE5U3FBOB9e3h39IrtNCwvPd2JplFow1b42T6Jjmna0ZHUJP3PiYlAss6EEmead1XLb95S5LwaOl7U7P3eKZJsvi0bP0RnQi7K3slmh4LhZeYvolZAGpJwWv18lqtPLEnTCpeMzqCmrz3MRbFBejWAS+KDmDrDbx+80P8yNlJ543S60Xh8fRh2fojOhF2V/ZKND0WCi954z00Df+Y0qGXFyYVzxPFeG7EYsRZOQuHgJW3ng/h3ctzT1veNoaMyzsOCxugp68x12Mo9X6LkTLR9Wzx0jV3LCnisfyvJEEdg7yJKz0JK1kiynkkA2V1ogk7xyg3hcSrQ6keQ+942FoMSURq9eF97uFBtoFGEtTJSgS95DUjRRR1luKq13201Wep421jzHJjgpysFtVjAPQUdpE2WubDsfiMfFM9Ie0tWrMbQQTsUQOmtGCiWVvzvm5EIaxaAKFW+z0Q6MeKIinDNMBRbQzWeBjElwZF13sFkiT5tKD5NPg3RvIbn0uCk9Umr/mFOY1Yrp1aW/XfmnpcbBlFIaw8wTasfUA/Kcd2d+6hXuelQi0gXG+fPW7MURoSnGTxIL/xyZsPW8AkjLyuxsx8I6HOWPhyD1muyC0nGcs8ykx2rXTZrP3iYjyWa3Fk/bxgLmw8XtOtp29m5h1igo2GHVumWu/3XpMMzf/yJ27+xnt+6zc/oJadwkbQAiZhdMoGH3BFKSdBQQpow6HDR5b9HROoxAts0RqXBLhg4BPbz3wurr7sQhPIom5f9hUdj/d9q00LzIWNR66r9R20+pZ9lrYsHnrmzHquAarUvETGWb6d8m3/099/8pl3X/cbv6eVncRG8Me7d++66HVvVE9jbzBJNBhJOq7UEoEMz40640TdnutxspOPuaDKIJWeUOJecAwLcFRbuycOPaO6TDO3Xa8zUAuYxAKSsVzAe6DXrBBmBnoSdRhrfUNynJN3KHry6Wfp/darJY0GI7EgjzHCc6Nuzy1+LSBVyX+U11bfbDysHkvaUc8jANVlmrntRgFEW8lUCl9eV+io/qWmXtATNree9q1xTt6hSEuLzrSk3vRSzCrAkpHUyTW3nLIBwPcSpZY6EU123QfTDNftSA21pRUvVNr0pgLz8Cv7ZhaJeg61RC8tLbt8JtfLWl85BpYktcUn41urN5aVQPuuWynsZJ+t72KIU9YkNgINqqwH/qomj1VAK1fDX7N05qzvIVDVTIrwSkLeVGAefi3XYa3vSFCP9szrxsvG0IK3bwUPeU/dsawEXth2C7jFsnK12tBoEhtBofqkZPZza6eUNuByQpS/TEKoT//ek52d6uU9a7f0W5+IrROublue2ixFt3aSsDkuz1vpvj1r0iLJT8Q+LuvX9SKut95vyeLBK73UfTHpxCvpsu+il79JbQTSGyxyZ/QAc7ROzPr0B3z29WhIdEuikPx6E7jIU5uFp7L7egvOzAJjZfPiodbJ1guSYgF2ek/Msb05NR6i0kkhC6S3l79JWA2KH0Gt8S42bgtOqhADkZAADywxita31PhaCTGsMFwPlLj0a2Dhx3J80nrAADBqi4WcY68NX1KPH4EFnDJWMpIW7B37bVleImHk3m/HCvVmloBW3/L7mzyc+WNPHVZ3rnqnjNyVrZPCk17ac1LIPq0wXA+UOLBccvGEH7esB5bFoqXB9p5SPSejBZxiSVMWjxpPVpvzlASi/LN6EcCZiKQ2CYmg+BHUp1mxN7ds7xpZNm/LTu8pK6UUyaN8z4A7aiptW4AcViot5kHZsoEPAU5p8dQiq47Xy89rl9f69EpsFgCLh7x+BVEgngjU3twkgpTSqQBuAvBSABnAvwOwB8BHAJwP4AEAV+acH2u1U/wIACy7v0Z9qwG/zbuQxx4ry7akFC2+wdK+y7ZbOz4bH4ux8NjAhybgtOIheuqw+7xXetHWtRU70OLJa0VpkdevIArEE4Haa9HQq8GNAD6Vc74ipXQCgI0A/gOAO3LO16eUrgVwLYBrWo0UP4Ja43vg8UPN6MBo5JwVxecp6/VtYFGLTJNffg+xL0d5bFHUChCZW6uPlqXF49Og8cwiNGVfrA25ntJnw+OrYc2712ehZfnwjkej7o0gpbQJwL8C8AsAkHM+DOBwSukNAF4zK/ZhAJ+FsRHUfgS1N1hLyx854YHYqRWRKjQ9QytqkbUHLJeAovblKI8tiloBeiQCr8Y+6tOgEfMQZPf2Fq9Rb07vqez1WWhZPjxSEaMhEsELATwC4E9SSj8K4B4A7wRwVs75AADknA+klM7saZzZsi1vsDFi7KWt13tieE83rb0iATHfBS8xX4cWXkGPPbzVtuYjYHk5Mv6l3bz+HqISjjxt2TqzcUp/kcJLa40s6VL22bsW9Xh6PCCHbATHA3gFgF/JOd+VUroRS9cAF6WUdgLYCQBbt25d8Z7ZsqNa1kKR3TFy72y17dV8l7K9wJ8tXjz+8j1a8Fbb8hSPxGywOzGw3IrSK+FEPCPlOMv7iOY+qrPqXQs5zigN2Qj2A9ifc75r9vs2LG0E30opbZlJA1sAPKxVzjnvBrAbAHbs2JHle++pPORepNVv8WCdbIyXUr+cnJrewxpXhO+aPKdXtG+r7ahnoncOa4mQSTgtX3wmobXWpYVUFJEuLSmy5XXqmSdr7BZ1bwQ552+mlB5MKf1QznkPgEsBfGX231sBXD/7+7Ge9nujtKK7YmsH9lgJWm0xPwNA13tETgbvyeE5vXqtBqztqGdia1xRa0jLY5FJaNa6WNaGFnmlyKgk0PKT6ImaHWo1+BUAfz6zGHwdwNsAHAfg1pTS2wHsA/DGnoatyDhLAohqaz33POt+yiQIqcEvWmfPKT00Oq01fwCa+hRLEpLxEazPCFnrYUk4LV98pn/Q9FCaLkE7vdnJG5XovFJGS3Io/x+xahQatBHknL8IYIfy6tIh7QL+O9RQf3HP7u6NX4/4GXhP6TGj0+S8AO2YCksS8kZ4RshaD0vCafniM/2DJrm0LBKekzcq0XmlDK8EG43knIRnYYk1qL2qTjt5fdODzvKDH5rmCuAINcwTz4sj6OGNefd5ffAZll8rpiLq187Qkloedxb/FrG586BAWW15cQaZF+eQ5KdjxUHUfct1WzOxBp7IOK+md4gGtRDTilsnhIa+E939vRKRdae22mu1bZ2+zL4eiYiL3o0jEqCll/De0716i7oew1foXdfIPEWsGoUmIRGUWIN6B7MQdq0d14pW9PhoW6eLlBhYqupo/APgx7bzRuGVsWjl2GljzbEVt6GVY/z3Sm6Rk9IrTTHJh82x9r58v97YDysaMTJPbB1aadEnsRH87u9/YNc3tvzEUSReD8JuQWg9+OThZUi0HqRgoI0CKxFlJVKuLCeRhGV5iUTs4YEh6EaRduVYtHIMQdeaY4Ys3ELaZfz3Xt8s9F+tLBsH48WaY+396176/CZ6tvwWZBtyPJF5Yuvw5Bc/dYShGE/iaqBhFlrk8RbrRdXxanCH9uXpJ8qL1YdWzqpradGH8j6Ehtj0o/xZ5T3f1JDvspdKW+963+PfpoVYUsRj+d+YSVALeROYyvJDElv2JjDt6d8qP0bfY/Yh60QTiY6dDLaHt3kmtR0j6anFA9atvzf/oCRBLRR1cx3i2snaiLQ5hkNJb3u97q5DADqipsZ5goZ4eRsDkCQ610OchGQb31cpz7zmJ2niscxJPeAalrLJAp2oKZrIgyn/Ikk5rLIWT9Z7bW0kJJs135a5Tc6pZx293wxTGkYUm0wJKNetJymONWfyu2wlOJnERhBJeWYpfAoVhUlR1jCFXyGm0IvwYinyIqnOrD6Z8k8qMFvjscpaPFnvtbWRabis+WbKQKYU9qyj95thSsOIYpMpAeW6sdR2lsLaM77Cw3vf8+79zI9gEhvBLTfftOsdV121bLfzOlRY0E1eh5XISSIhytipLMt5HG5kX5aJjkFZlRNXmv5qsk7AoaSZ1YqjmHS2YtKSZdJrgdBaZuGxxuv5xrwwaSx1nacPa3yTdyjyJDhhd6ghEOI1ecA12H3Nk+qrdv30AHh4w6mtsN36L3NJBcZL4KG1X89P4Uc6W3nTmzPePWHJY+iBetv1wqQB+np5+hgyvklIBJqOwAv3bN3BrN1fuyeXJKgeSHBtN5cORh6nEEvfYDlKWTDnEUnHkp6s06k1FgmhzubK22fklPc6X3md0yL6GMs12poHj37CkmonLxFocOZRV+LehBGaGzEAU6popevSko3UvLb4KH1EA11aEGW9ko41Z6ycNZZ6fdhcefuMnIJRCHTvnPcAxLI2PO7cjHog4wpNYiNoORTJkFgZUsnCMr2Ampqjh4QNs9qSbXiBOOpQVBbqKsNUNci2GtRThp565sHr7FLIgt+q542FwVp9MngwDQ6tbs9DbK4Z8AjjMTpvsi8tRH2II5H3m9doEhuBpiMoFN3FC3l3R+2eLGHDItDbVtqtmiTvPYEuMtAJWH7H9MxDVFfghd9qSWVWnywAygPjbZE32MjisUfH0pIyhupphkgEk9AR7N69e9fOnTuXPWMhsVILzu5OPYE+rK43XJX1Ie+arUQZ7N5phcKyxCge3qN3Y9YmC9e2Qsa1uWNBN17dT4Si6+sha81ZKrto8FxrHJImryPQiJ0IwPITLwpfPoaGN3pHZl6O2t2S3Ts9EoJHL9GCuGL8ynF6AVK9ep7W3Mk+I215aShsmEbWmlup7Ho8CoeE3k9CImhZDeSJEE195pUgtLpWOi7mP8BOM5aIteaFgUpE+2RUj7+klZMnuTfRppQEmLegx1oifS6YtWSIJBD1So1IMmyereS78ru0rCkeSYGNa/ISQctqUKj3Hgv4JAitrqUTYP4D9TgY70yXwEAlon0y0hJiyJPcC2zBwD97ko9YmvkxYNC8PiZjSB/Wmlup2yzJYQhMmkaTkAg0z0JJbCe0dnVmR2/trMxb0brjDwHZ8Nqko/EMjHcNPMPyd7D0FAwezaOPsaSJMbwfmTcf+za80kdEp2DFijCPSfZe88Yt35CUhicvEbSsBoXYTujZ1aOw10xDbd3xh9zRvDZpJikxDz0rClOTsrxwYJa1JKKPAWxpYqhWnXnz9eiRahoS6Wmly/Ok05NtMwtSiyaxERRqQUAzGGsG5MBs2tJ+W6Cfa9urtMeWtiUMea/tVxun7EPay7U5qanlZ6DxHrF5Wz4JjEdtbSz/iHmCmEhiPipee3zLl8GaE7k+zOciAlTCvlNP3ePMEseQyo522z37V7wru/d93/rO0Z0R+N4uL/+RlLbuvP/gUavDjXfch9+7/WvLnt/3re8s8yis+yrPSh+lbOHhlds2q333jFP2UfpuzUlNpX7hqYxXjlPOX4RPOS+t8dQ81X3JspLv6FwOodJ3mStrnJLkN9YaJ+tTfp+eObTG07PWk9ARFD8CTaPNIv0s+yqzO0udgXYftnwTvGCejFpaacte7vVhsKI067JefwErEtCKwqyjD5l/hByH97n3vVZGzo3XB6WlE7L0RZavikXa3JZ/M+ybaYGXTupqoGm0WaSfpUVuxSBY93rLNwHwQVRb49S00r2elNZ77T4c9RewIgE9UZhl7iwPwXmiJllei17dQUsn5E3U0qtfYnPbmrMWQtGkNoKapE6A3Xm9sQSWTkEj733WihOwxqiNwfKHl3fK8p4lB9X6YnoFi285P1Lv4vHVt9YjOn5Wj/Fbt8F+yzZYvINcg1KPvYukOGOkza2MkZFl1yR46Q2378kvuOYT+Ybb97h+z4N6+/DWi7TvnY8rP/i3apuevqLjtfock8ZY/6HfVKT+FL9XAHdn8m9wEjqCVsozGePPEFwKMbt5xGe7UK9fgNczTdOJsLIWgo3X/qzZnZl3mzVX0gOR6Qh64hq8eJDW+kf0L16PUsZDXf9V2zaHErl4sQ9acyu/EdlWy49gEhtBwSys8ecefPTQsuQaBVev/GYJLSSWnfzrwUUsdMqGvuQbpR5LiKG130p2AqxMNsISYMiEGq3kHQzbzuJb8lQSuzD8RC2ZipWQxosHaa2/llyFJRGx8A4tHur6JcGJxa+H79a8aXiQ5RuRbU3eoajgEdT3/i2nbADgt93L+5+s12ufj5atidmpW2U9/hCe+H1P36zPHiwH7W6ttS/rFJ2G1z9EkkePY+kyrPmwSPMnkH0yXQH7biWuBJuXen01nY/bN4PdGY7lf0VHoN1xeu/bY+oYht7vhtQfqm8Y4y49dvm6zlD9QkT3MS9dRuu7lX0O1fV4+maEqesIND8CZtPvRff13jFrYjb5qN/AmNgI0T4slGeNvPdZy57uua9Lvw5vhGAPVqHUn/TojbTxa3PP9CdMLyF1PyxuQ/Ic0WNN/mpQyGPT90T49fzWaAycPGBcbIRoHz3IPtbceO3pLT8FFgvijRCMrIGF7uRpozV+be5lBCfzC2mhF3tiY4bEt9Q0aCNIKV0N4BcBZAD/F8DbAGwE8BEA5wN4AMCVOefHom1H/Ndb5a33rXo9Nu76PijLy/t63Y51d7TueV5eW316ybpLe/Ua5Z0W11HPWcs2znw3NJu+1BH0xjdYPGvPPH4RLV+OUs+j62JzgXXrT6CDYncG6z8A5wDYC2DD7PetAH4BwH8EcO3s2bUAftdqy+NHYFHUBtzbT6tt617X6muKdudj0W7vulnlNJv+WDqCMb+Z6Pfo+U5YmXXPPeMbeU5JUI8HsCGl9AyWJIFvALgOwGtm7z8M4LMArok2HNXgWuW9J2YPaZrqKI/WCTIPmlcfkXZ7180qp9UbK8JxzG8m+j16xsnezc2zEMA7ATwJ4BEAfz579o+izGNWO5G06Faa6THSZ/emrh6SPnzstOiR8tHU3TJF9xgp5aPUuxbz7HtqbUpCw2rQHYacUjoNwBsAbANwNoCTU0pvDtTfmVK6O6V09yOPPOLuV4Z3Wr+97XjeRZ9730f4nkd57xyW5yWUOzrnY1LvWsyz76m1GaEhV4OfArA35/wIAKSU/hrAvwTwrZTSlpzzgZTSFgAPa5VzzrsB7AaAHTt2ZMDnuGMpk8pv6ZBhtdNKNmLVkclFpNLQo1yMiptepx/Ge+udVG4yJZt0CmK8tBRalsNNdPzzvFKxwK4epavXyWkInxFl4ZCNYB+AV6eUNgI4BOBSAHcD+A6AtwK4fvb3Y94Ge0xbLIQUaMM0WWbJSB0JDWWZeJgpKGIGiiaziITMWvBnwHIznAWvpq0rmwOvWZCNfyxzmkZyvYckW7FCwMfgs7Q51zDknPNdKaXbAHwBwLMA/h5LJ/xzANyaUno7ljaLN3rbjOzmLKSUuaxa7WinmrXTa6HSJf0YsNJNVI6zpbzqNUlGyDKpet1eLSWcJwTakkbY+CMnqDWnnnoaz1HpTI4zosBsfZPM5Fz42/f0U0+wdgdZDXLO7wHwHvH4aSxJB2GK7OZy14sm1ZTttJKNlD4kabDfdbIRQJdKPICc1okxBuS211mHjccLLKqd3qyuRxqR9aPjjZ7C2jy1nLY81AvK2vom2bvC33EnbtzE2p2Ei7GW4CTqWmqF4fbAnlthuRakGQvP9ZC3jR73XtmHhF5jbq+eFOs1aRDtJeW8N+x4DBhzOVesTS9sPWu3h0dv+LHkRQs1LvD0ko/C36duef8DDKpsEhtBCUNuhasWKs+jYbisvVLvk/d+c0UIqhWWy0JLrfBcD3nb6AnHlX2UsNVShoU8a3PUosJDCSE/+9QN+MqBJ0Jhx2U9h24C9VyxNlkIe82/Jzw5Qt7wY8mLFmosQ58lf+99z7v3TxqPoCQ4qU+9srt5ADY0igYZRYAoe6WVFs8WSEhp05J05CncSlvGTkjruTX3ch7qdgrgjOTLG1zF5itS1pIOPanptPY1INFon+w0l0luvbzV/LXAS1c9BDmPFIZ8LGke7sreNscOY50HRVypPXWifVhlx/6mtDkfu88xvq2Wi/EkJIKiI9DuYt6Tohfmeki6KikhsFPbQ2ycEXgsLYFpS4LwnlpR6oH59sKkaXoH7buoxyLvzkzC897T5XxpOhQGVeb9NmSfPaHwkr81oyPQ7mIWfFQhC+rL0jl47u9MPyGfD9EJyHFG4LHq+70HPqzc1y09SpRad2b2zguTpukdrPW2YMOi93Q5X5oOhfXp/TZkn+yba7Uj+Zu8juCPd+/eddHr3qjeZ4cmm7BOkB6AEktv0aNFlieGBZrBpBMrcUb9e+hJOQ/yWmqsu3FrDdhpa93T5fsWoKxM0MMS9XitJj26MlmmBUyy6vqBnDPO3f6Sud0px7gzH8sQ4aEhzWPytpr6hinM8ZB25LsoZBnra8h8YOpQZa206N47JTu1htj0mTQRvc9G9BLenk7VVgAADNlJREFUO76lp7A03B4dgdfuPoakYN2JvfU1Xhi0mJzjqOTTWqsibRW9QUn7bsHzR9e7xbMc9+Shylpp0b1ptpi3mBcySiPmdej16uuB2bL4HQrdVZcD0Gzb8n7r8bG3+BoLPqz1js1xtG9rrZhX5l17H6Wpy6Pr3eI5sj6TkAgKeKl2WlmaXAvskdn0PZKBdTpb9zTLc0/TeFv+A+yktMalzQfTEXhPyB5diAXCaYGZMvLMqWV5aflcaNSy4BQPSvl9Sn8AKykv09t4vD3leq6ZJKjaacV2PMv/2wI59UgG1o7vTXQJLN/1W77preStLX57k6dGow61PiNkgXBaYKaMPHPKfmsxIx5qRcICKxOS1nEo9TfVSoZqRcnK+hp/ZT1b0YeTkgg0jbalyY1aCWT9lmTATufoiRHx0Buql7B0Kq0+rdM5Stpp7vWkYxKRpQvy8Gp5cXp1BC1Y862nb3TBy1vfMbOaRL7JUqflR7DqFoMchCqzKKoR9mhjj6V3o8X/PHiZlwViTE/LMedlXt6N9e+xrF1jWhEwdatBkQha5NXAW/bXntOAlenVmrfqWSemdfJ5veS0U7pX72DNbY8OgWnVmS5ISlCeOY5aXCS1TmeZxNdrDbGsBEPmdvJWAw95teUsmUV5z8q37uCsTK/WvFXP0tRbse+WNlnr22uZsdpqrYWXGOpT3bemC4qgBnmT5ljUmp/b7tnfRK2yxi/LjTG3LVozEkH07stOCo8mv2h82f2MeZZ5JYXInTTqQSlPTnm3jpx61klp3ecj42L8M8mukCe1mGWBYhLCkPWUUZZeDASv9yqbR63N8nfNWA1aZJ1akthJ4dHkAys1vlpfrC3rhIlYLrwnAbOiDEnv1TuOnnG1rECaZFfI4ycSRTvy8GbNAwAVvYrNg+TNa6Hy4EGWv3PBLJw6Fbw2ibcncfhqHLqCO1dSspc2GE6c1YeFQ6fh7sm+ZBmGUCuxGyO4ghqiroasa81DjTOo8dIau4VByOZB8ih5ueKilenCZR8WorDEJLTm4YqLzsWBxw810bQtHERrPgppeInl2eUvOwsAjmJqthKcfN9uBNIuDaz04NIkhTvvP3jUK6yQxyfBg5zMeKzJkgCs06p1knjxESP6BTkPXskgmpQ2mgxX8/Jjc2DpVbyJZGsepY5AlrX8YLw6gFYC1tJ3kUre9d1nDrN21uxG4MWSZ6cBO8W0HZjtzhIBmSEne9Bz2Qln8WL99fQtJRs5DsYbk14ikgGTcDSJp5ZWrNwC9TxE0Zplm5aEoI3FkkJYG5Ks+dG+TSapziuvwaqSV9PrRctttePVQzDPtDH9wqPp3j19S8lGjiOqyY5IBpaEw6QVy0pQz4PX6uH1avSmuR9i/fHOj/ZtMkn1+1JH4L1DzbMdz/3bW26s8fTy2OJh6PPW2LySjaa/8N6pe/kcaz408paN6lK0umWu5pYEdaz/hngWssScVnmZwNNb39Om9bz1vje56RD+o22NkYC1N3HsGAlnZdl5JHUdK0nrmN8Q5pEEdSpURCWZmNMqX8pF63vatJ633lt1WBtD+I+21ctjZJxjzqnF1zySuvby722n9b6H/zV3NZDKo2iKs6KkkaY+TXRmijbGg1T8WGJcy/TTUkTVPPSmePMoDRkPXrNiax4sBSzrgylT5XxYSrjWeKPXNG38lrmzpUDVeKwVsK0Euz38A2twI5DKo2iKs1aSVKm0YYo2xkM0IWfL9GMporyKSjZ/9bglvyy01as0lNRSaHn5s/qQ8+FJP8bGG3Xf1XjzhA9HlMPMKSoyty2arIuxN3FHNMVZcUmV0FGaiyYLdJFurd4EGJ7QWeZKasGWl/JW0FHdPgPYjAK9RkNpW9QbEDQEki4awNMKPy7zZbmGs3Fa6zYkBdyaDDqKnICe4KJCmkQgJQPL2YMlP/WORbbDkoPWxNK/M1Ofx51WnjK9Ek70dG9Rb0DQEEi6oRKAto6WmS9q5u1NmuqlyUoEbAf1ApVYCUMkdFQtGbB0a6wvizwnqRdIVI5PSjQRUBE5F9EUX9ZpOyQMmZ22FmAHO3E9fUVh0VrAoywhqcW/FWRljaFVZs2kPBuTxoA7Xw1Akii89bEEMFmNtucJJd/bNpv7MUFuxixXykw+5dkQYBJG3tOoBVxphacO5bHFrzx15N3RAtyMJhXVxiF1B1Yos1XPc2rJtqMnfWQNvFKWbJslmNV4i4aRt/QOnjG0yrSgykwdQUrpQwBeD+DhnPNLZ89OB/ARAOcDeADAlTnnx2bvrgPwdgDfBfCrOedPW314KHrfjAZtaFr36H21507M+pK6DHl3tAA3vS6sGjG3XiuUubdeXVeWiQYnRdbAG1Yu22aWmp4gMlbOu36e77yUGRp0dAuADwD40+rZtQDuyDlfn1K6dvb7mpTSiwG8CcBLAJwN4L+nlC7MOX/X0U+TNPu/dDXV7LFWwEnL7myFp3oCQHpJ2riLn4A3oEfany17dT0e6ZtQeChhrSzQh4W/ekOga757qRXM4w3gYfwyvwPtG4v6DQxxJR6DzI0g5/y/Ukrni8dvAPCa2f9/GMBnAVwze/5XOeenAexNKd0P4JUA/vdQRtkJ6T2l5HuP3dnS5Hp3+x5igUDegJ5IUJUcjzzxZPAKC8ph4a+AHQLt4c9DrZM0EsCj8RuBXI/6DUQln7Gp13x4Vs75AADknA+klM6cPT8HwN9V5fbPnjXp8LNHVuyW7DS2gk8keTy8WP1yuvSG5XpDpTViEos88aIeeq2+WDguOynZX1nO48041snXao/NnTytrXXzrAHzYNW+Ic1rlUmwPd+Sh8b2I0jKs6wWTGkngJ0AcPo520y4pfpdDWFV/9XIc89n9S1bfd2G9nzIacckFnnijWHD104v1kfLr4CV83gzjkWt9tjcRUFpPWvg1Tcwr1VWnvE0lHo3gm+llLbMpIEtAB6ePd8P4Lyq3LkAvqE1kHPeDWA3APzIj70iv+3S7aq/tCd8tkXWPb9Vp5zGEm6K+X+zE8LDu3Xftk4WpiOISAK999NIrMFQqYkBlXhO0JaeyTP+iJTl1Td4w6nnrTPo3Qg+DuCtAK6f/f1Y9fwvUko3YElZuB3A563GtCSoY3lSWff8Vh0GqW35f2tj8PJZ2vB4QGr1In0PqetpR2tvKCQ8s0zI+lq7liXGGn+vRaKlb5Dvo96bY5HHfPiXWFIMnpFS2g/gPVjaAG5NKb0dwD4AbwSAnPOXU0q3AvgKgGcB/FKvxcADL+U5CaSmV54GrXurpYdgWuSeMWhSRt02iy70WgdaJ6QGzeW5t0oePKeVpk/QTunWnJV62tpouhVW17tuY1s25Hgicz0v8lgNfp68upSU/20Avz2EKcC+h0c1+VLz7YHuAtp6iKHRepIHZhVo+Qt4rQOtE1Jqv733Vm2+LIr6S7T419ZG060A7YSk3khPNv4e6p3redEkPAs/8Icf3PXPL3ptMw2X9DxjvvTeGINIFJc3wYVsyxqDxgPzpCu+59JLT3q7RZK+liQcMiGn9HeXvvSW52DEh1/GRjBPSa+nqBZr8aptm10RnayvHm9G65thUbBWlOUQakUfTmIj+J0b/mDX5096BTadtB4Xv2hpZz5lw9L/l8n40Of24sY77sPZp27A1ZddiE/e+0189AsP4exTNxytAyx9kNrz0t45p21c9tcz2aXvmr+aJK/suRyDxoOsU37fevfSSbHv0afw0S88dJSX0ubBJw/jrr2Prhi3bLe0s+mk9Xjw0UPL5krOUSlb+JU8yL+SJ/Zb46us5/Yzn4urL7vQnEtGsr2zT92A1730+Wpd61uRa8D61sZnfTOl70OHj+CuvY/i4JOHj/KijX8MmvxGcMvNN+16x1VXNXdB7y7tTRIaScdl+aB7qcWbV+qwsBGsU7s+cWSEnOXvLk/bcppZPvcRf3jJiydOwdOeNsdev3/GSwsrwUprJ9fNihYdEsdSaPJ4BJrVQJLX88ry0fbcwTwa/J77W8TrTRLTMrM4fK+/f+se3rK7Mw9Cyav2W6MeGPZIe9r4vNaBnvu8pbvR1m3odzuEUs6qv88xpZTSIwC+A4DDLXtp3foT1m085YzvPvX4t6EFWVjvtTKz30eefuqJ407Y8PzvHnpiH63by5uHLy+vGzZtPXL40DePO3HjpiNPP/VE/bdn3Cvq1PNhtdmmM8DWfLw+VrRptrW83CasW/+EykvPt8SeD/tu+TwupxfknJ+nvZjERgAAKaW7c847VpsPi9YCn2uBR2Bt8PmDwuOahzNf0IIWNJwWG8GCFrSgSW0EqjZzgrQW+FwLPAJrg88fCB4noyNY0IIWtHo0JYlgQQta0CrRYiNY0IIWtNgIFrSgBS02ggUtaEFYbAQLWtCCAPx/bzccBieIOU0AAAAASUVORK5CYII=\n",
"text/plain": [
""
]
@@ -215,7 +215,7 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD8CAYAAACxd9IeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO19f7BfxXXfZxESSIB+8iMSgiCDSOoYbIMTm5Dp2CHUKc3EmQ44bccex3UGZ6ZJXdyODc0YxD8d48YQJm7iyoBNE6c1BU+dwR0HD63/MElIJeIhsR1+1GKEhAy2hCGyFD+wtn+8t8/7zjvn7Dm7e7/fq+p7Zt689+7dPXv27L13d8/5nLMhxogZzWhG///TSdMWYEYzmtFkaPayz2hGJwjNXvYZzegEodnLPqMZnSA0e9lnNKMThGYv+4xmdILQxF72EMIvhhCeCCE8HUK4cVLtWimEcF4I4X+HEL4ZQvh6COEDC9c3hhC+HEJ4auH3hmnLmlMIYUUI4a9CCA8u/D9aeUMI60MI94cQ/nZBz1eMXN4bFp6Fvwkh/NcQwqljlrdEE3nZQwgrAPwnAP8YwGsB/PMQwmsn0baDXgXwb2OM/wDAWwD8qwUZbwTwcIxxO4CHF/4fE30AwDez/8cs750AvhRj/EkAr8e83KOUN4RwLoB/DeBNMcbXAVgB4J9hpPKaKMY4+A+AKwD8afb/TQBumkTbDTJ/AcDVAJ4AsHnh2mYAT0xbtkzGrZh/4H4ewIML10YpL4C1APYACOT6WOU9F8CzADYCOBnAgwD+0VjltfxMahmfFJdo38K1UVII4QIAbwTwKIBzYowHAGDh99nTk2wZ/S6ADwE4ll0bq7yvAfAdAJ9e2HbcFUI4DSOVN8a4H8DvANgL4ACAl2KMD2Gk8lpoUi97YK6NEqcbQjgdwAMA/k2M8eVpyyNRCOGXALwQY9w9bVmMdDKAywD8QYzxjQC+jxEvgRf24u8AsA3AFgCnhRDeNV2p2mhSL/s+AOdl/28F8NyE2jZTCGEl5l/0z8YYP79w+fkQwuaF+5sBvDAt+QhdCeCXQwjPAPhvAH4+hPBHGK+8+wDsizE+uvD//Zh/+ccq7y8A2BNj/E6M8RUAnwfwsxivvEWa1Mv+fwBsDyFsCyGswryh408m1LaJQggBwN0AvhljvD279ScA3rPw93swv5efOsUYb4oxbo0xXoB5ff6vGOO7MF55vw3g2RDCTyxcugrANzBSeTG/fH9LCGHNwrNxFeYNimOVt0wTNHhcA+BJAP8XwG9P21jByPdzmN9aPA7gaws/1wDYhHkj2FMLvzdOW1ZG9rfiRwa60coL4A0Adi3o+H8A2DByeW8F8LcA/gbAHwI4Zczyln7CQqdmNKMZ/X9OMwTdjGZ0gtDsZZ/RjE4Qmr3sM5rRCUKzl31GMzpBaPayz2hGJwg1vew1kWwhhOtb2pw0zeQdlmbyTo6qX/aGSLbjTVkzeYelmbwTopaZ/WcAPB1j/FaMcQ7zkM139BFrRjOaUW+qBtWEEK4F8Isxxl9f+P/dAN4cY/xNqc6ZZ54ZTzvtNKzbsAkvHpnDhjWrsOrkpd+buVePifdqKPE7/ZSTcfgHry7+tvCfe/UY9n/3ezj3zPVdZJFk6tlXTl5NpzX3Wsco7/8LL76sygtgSVstbXN1LfzyMi+9eBBnnXXWsn54nqkaGay0e/fu78YYz2JvNkAJrwNwV/b/uwH8HlPuesxDJHedf/75McYYb3/oifjjH34w3v7QE5GSdq+GEr93fvLPlvy28O8tiyRT775SfrX6ruHnkVPqf86fttXSNlfXwq+kh9ZnqudzBmBXFN7Zkxs+IqZIthjjTgA7AeDSN1wW7/jyk7jyok0AgGsv37qM6ZUXbcJffOvgYplnDx3B/bv34drLt+K8jWvYa+n/Ky/ahEeePrikbOJ3zSXnAADefcX5eMtrNrFtU76pDFdWIou8id/2c05b/H3Hl59cvE7rW9tKfU38kj62n3Ma3rxt46JOc6J9zHlK97Txs1CSUxoLTu9XXrQJpWeHPgf0eeD4WsZYKkPHkdOv1Pe8rEUG7Rm3UsvLvhjJBmA/5iOv/oVW4cUjc7jz4acAADdcfTFb5pGnD+LRPYfwyNMH8TPbNuH+3fuW1aHX0v9/8a35unnZxA8AHt1zCG95zSaxbcr3vI1rxLISWeRNfO/48pNLZEtU0pHUFu1r0sebt21cotOcaB8pT+1eLSU5pbGgMiVdldqmzwF9HrjxtIyxVIaOI6dfqe95WYsM2jNupeqXPcb4agjhNwH8Kebzc90TY/y6VmfDmlV471XbXV/R9Dt92bkZh84UXNn8i5hTPpPRmbHm6+mZPbQZLu+DJAPVTZot6MyW9700Q3Cy1szo3AqnpI+aOlyZNNOm1Zw242pt0jLSioGbrUsy5mNhec5Kz7GJpPX9ED+XX3559V7Es7f07IG4/WHvvbSlbc+9HmWHtl1Mqo7Gx9JHz55dsvvUyj2ETQgD7dknStqXXVoNWL5+0v6w+uvpIE+favlIZT19rLFdTKqOxsfSR8+KgZvZW+Tu1V8zSV+BIX4sM/veg9+Ptz/0RNx78Pvs/5Y6Fv6Pfuu7y+pIbXNlPXytMmmyeKimbkt7Gh9OL6W2tPue56OlrKePHj75/V7PbU44nmZ2yfiW/rfUsfDnDB0ew5+Hr7fP3j5Z+A1Rx8KH0wugGyE1WTzPR0tZTx89fPL7gN8YOxUDXQ3NvXpsiUuIc0/lRiaPqyUvYzH0cO4SamjRloOS28tiFKLEGW2kPmmuF02Hkr4tbkbJ1amNH2dEpTqUjJCakTC5EXP3olRWkqV126S5hzVXZsn9R4njm28lvEbkib7syfVmmVU9rhZaRvu6au4S6hbJXSLUpVJye1ncMFQmAGK/LV92SYcWfWvuH8sMKelc0qE2xpwsiX9yIwIw60Hja7lHqeQe1p5Fzq0okcT3Z7ZtMr0flFbs2LHDVLAHfebuu3b8xvvfj1/96fOwZf1qXHv5VqxbvRIAsHXDaqw9dSWuvGgT7ts1/1XOyzx76Aju+eoebN2werHOipOAvYeO4G0/eRa++Pi3F++l67/60+fh3A0/mnny+rS9rRtW47Vb1mLtqSvVNhNJ8r7tJ8/CwcNzS9pOJPHLrycZcrlyeTndJaL9Tv+/+4rzsf3sM1R9p3ZyWV4++gru+eqexb5JY8O1reku3aN8ubJU30m/11xyDo7OHVs29nlZOo4rTsKydjQ5JaLPXa6PpDPtOZP0QeWj/eB0R5+DW2+99cCOHTt2cnJPdGZfdfJJ4kyZz7jW2ZrOpukeB1zQgC01IBKpvgawsK5EOL7aKiMR7bcGXpHk1/aU2mxiAUNJ/bXMUhoQic7wElDIssKxkPTcActXZqXnWpNPW+F45E000Zl9586dO66//nr2a1r6YuUzx7GIJWXTbJe+ntvPOW3Z7Mp9Jel16avMzaK0vjbr0T7Q2YhrxzMjSHy4FU5J/lx3529cs6S+pg96z1KWtsnN0pK8acy5lQ7VS9JDWg3k+pDGxLKq41Zx0rhp42lZtUl6pqTN7FN52e/56h7c+fBTWHvqSlxx4fwsla5tWb8aN1x98bKO3LdrHx54bD+2rF+Nbxx4eUnZczeswRUXzivxzoefwsHDc3h0zyFsWb96kf+61fNtUb75dSqXVIern9qW5M/7cPDwHB54bL/aDuWb6ypRSd5cZ7Su1E6uu2cPHV1SX9MHvWcpS9uketHkTWOefuftUL0kPRydO7bsuZDGhHtGqQxffPzby/QrjZs2nrRPludNKjO6l92zX077mHzG3HDaSnZ/yO2b0yrAsiezzKaJSrMqV5auRLQvOJXJskenpN2n+k2za76/l+wHFqL60WwBnlla4q/pTJs5qV2DW6EleemeWloVcDKUVq5WKtkYRrNnT+TZL0v7LG1/mO+bAbsv07K/ovKlMh5rNiDvuyWZOOJsE9b7VL/Jyk3391Z9UNIs+MDyMUny1do5cqI60+wd1K7B9VXyamh791KAUS218JkqqMbi9+T8syV4rMeX6ZXLU6amrIdKfD361aClPaCvtWMitd0bWmuRU4LJ9oLl1sjrIglaN8RPgsv2hremexYIrIdqILAe6Gsv/tOiWllK+hiqbx6o6pj0rBGVE2ODy/aGt0rxy962PG17+lZairbynxbVylKzNO9BHqjqmPSskUfOqbzsFmhmvrwEli6VKBwyQV+5uPB8qcXBCy3Q2tYIMQlCyfWtRDlU0xMPnagUm63VoWU8cdw5SePP8bNAda3yWrYSrdl4SvqtGTONPMv6qRroONJmu2RckYAsHvAIbY9ep3K2GNQsxhor/9zoBtiNj7RtbTUk1aFlSgZCiaTxt4ChWgKjLFDV1hm9pN/eKwYPzHcqLzsX1AGAna25YAMu1xqgB8J4DD29v76UtBmxlF8vzye3ed3qRX7WoIjUT201RGWhwSepHYvuPDMxlyuPrsy0fHpSHU/ettrVCtc2zRZE5ZdWAR55PTTVPbsW9khhp3S/pQVDcF/PUh4xTj7KtxdZXGKpbclF9sjTB0VorUYUdmrJyScFn1h055mJuVx5dGWm5dPj+ui1idSuVmjbwFJ3Iic/AHYV0BLGqtFUXnYt9xrn1qCz94GXjrqzk3pI2mPnX9qava9FPqnfmouspr+WGYzOUpYsqtz4eduxugo9OfqGzMrDkcX2lIjqtyZMOrWHFStXiYUkM/0QP8n11ppfbah87jUy1OSjr22zJx1veeUmxbcXteT6q3mWUt0VZ5z5XBTev9HAZSk8ksI4c0iiJxTVE8IoldUCVRI0lwsjtRLXrge6a+1HThrUU+KnhR3TIKJElmAiD6U2uWCnUh0uxNXTpgTZ5fh6glsS1dShY/Olz9zxzI6bP/IHXNlRwWUBGcYJLN0vWi3qPdINaRZ2y963RJqNoQdUlSMN6mnlZ/Fb1+Te10jb35fq1O6Fa3ASrZ4cax0q2wd/+MqcVHZ0cFnNmlkD5xwa1tpjj9dLfk+dHlBPi9+6N7Xswz11uPpS/2v59iDX8yGt74f4ueT1b+ySgbUG6liTKdYD5/RkwR0K3uvtz1D1Wuta+LVk8+0lZy8ZPG2VMuViLHBZLQddiVqhjp6lXA2c0+NiGgreK1FvWOvQdS38WpfmPeTsJYOnrdRGjdwTfdlPP+VkXLptY9WxPNxyxZKdtAaaWgPn1IA9AFgZLG40D8BHA+Rw/SnxoG5AD8hD60fSRws/bgldc/ihZRlc0mvtMl6SVwOFca5oK030ZT/8g1fNGVhL8eKa8aqUrbVk/GiBc3LyA/b47ZY46B5ff1qn1kio9QPww3wtMeo1IBrPoYqJXwvcmeNrgdS2GGwTTdT19qmdO3dc/vbrWDeVJ/NLIksZKVuLxR2jyUSzrVB3VH6dugol/rV9lMpactBZ2uuRrTXnQTPg1LrEKGnZdL2kZfxNY2txHVvcn1RebdxactBNZWbn3FSlmZwjSxkJ/mjZb2kyeQNraoMuPK4rWrYG+ukJ5PHYHLSVWa89b+koaA9ZVjgW13F+zyqvNm4trsyJvuzakc00uKVXEIC0d5QCbri63J6V7t+1oBEA7J7PEtShkbSfl06G4eC+Hj1TuTnbiyeQJOlGGwNPYI00Jlofa8JhKRQ4H2tLnyR5tbZ7BGedVFWrklLeeE7Y9DX7wz/fizsffmoe59uB0pcwtZm+vCmQ5Knnv78kMIGr+8jTB5fJRENNE98//PO9y/qR7qX6tCzXtoUoX3o99ZGTX6praS/J/T//+vll8lO9aJT0q42BpDtObmlMtD5KZehzw13jnllLnyR5tbZrxovSVOCyibh9Ec0B78nwaiHtNI9S3nENskv3X3mG2zdv22TOdurpoyUXfikvvQY5lmwWSW4ub7oHhlvqR37Psl/25K7vke1VG0ep7VynNEsyJe79KMmp7dmnemSzFsxgCXRoCczQgg0o35Y6rYEakwr4qAk4sgQIDRUY1KqXaQXS5O3W6LdEGAuoJhG3Z9VCAqU9vOQjbU01ZfHp0rao37MHjLaWT00CCcselfLnfL0eCGnJ5sD5mXvpV9ova756j7yWdqXkI5x+uyRUkb4CQ/zQENf86y/NCGMKq5x2W1YaapUh8e/NZxI6LbXNrUx6yGtZQbWEdaNlZg8hnAfgvwD4MQDHAOyMMd4ZQtgI4HMALgDwDIB3xhhftHxgtK8/vVbzBe81q46tLSv1ngVL/HvzmYROS217koR45LWsoDyrLBdJX4H0A2AzgMsW/j4DwJMAXgvgYwBuXLh+I4DbSrzonr0mf7w3CGVS+cEtgQpDBdhIdaaRj77XmEplanTVayw88reMm4VfTSBM0fUWYzwQY3xs4e+/A/BNAOcCeAeAexeK3QvgV7wfGo87QStbe88rg0c+jm/JVVYjvybLxx96sot+PdRrTKUyNbrqNRYe+VvGzcKvhr/LQBdCuADAGwE8CuCcGOMBYP6DEEI4u1R/7tVjYnbSklFJK6stcSRQjaUuJS2YgwJwOOOKFLxQ27cSIMQC7EjUmlWV64u1TQ1I5dmSSGOtGRKlsp4cd5wsEsBHC9LRQEFdtmbSlE9/AJwOYDeAf7rw//fI/ReFetcD2AVg18Zzt3VxXbUYcHrV9RhrWg04tWWHarcXDeWmo/xr+t/btTcp9y1aXW8hhJUAHgDw2Rjj5xcuPx9C2BznZ/XNAF4QPiY7AewEgEvfcFkswWUlF5ZWNpHHxZTPwDXhkCnDrQR1zGeGHrMd1zeL+4jKAvDhpZYVhEVXlrI9QkRrxtr7XKTrpT55QlLz1VZpFddrtZXIYo0PAO4G8M0Y4+3ZrT8B8B4AH134/YUSrwSX5YiC/zXAfym4BSgHlrSGQ96/ex8rgxSKaA1ekHLDcX2z6iGXBfDnikv8PIk/PAFGgD9EtGasa8uWnhVOFomfdpx4j0AmjSwz+5UA3g3gr0MIX1u49u8x/5LfF0J4H4C9AK5rEaTVfSFdl77Kln2tFZzjSaBRmhmp60dLVmANpOD04klI4dFVAkppJ814ZtySTNxYU341wTnc/p6uQCzgF23FkMtXeoZKfTSRtL4f4oe63iZBpb1Sq23AU6YlH/gYbBcWWWrypA8N9hnKHuGxz/Qax1JZKHv2qQbCcFTK0e0NjKHBCjRhghYIYQnqsOR3l2SoCRLRgiM8OpKSblgCYTRdeRJHWIJxPCTJxyWDoG3VnD3AtUfbsgTctCQoobKNJnmFhaS9mGWPpu2dgKVnbnmSCwBybnXLvlCSQWubq8v1sYeOpOv0msWO4kkc0ZKCy8KPylZ7Oqx0j2uPtuWxLdT00aOz0b3snv24tS5XxmIBrinbYnOwkMfP7Kmv7VWH7lNPHla+Hr91ix6G6lONbKPds/eGidZAM2tk6g3nrMkxP1Q+80lCi4eWZew6svKi/cCKlY/HFj/7NKhmSae5fVqWuB6ZapaD2hK6Jse8x1XmodZltsSnZaxrZRm7jqy8aD9WrFl3psRvNAY6mpVTy0giEWckKmWBzbOA0iyimkFGOsiQM+qlsvQwQs1448kKQzOd1BjLLFlRa7LVclTKKGMxFlqMblrfaGYkj4482YA92WZrMizTsR7dwY4cSV9ajzGLMxKVssDS9rQ2LdllOaMeMA+eoIcRal9uzqhHjUwS6KLGWGaZ6XqBPLQc8LksuQz0Wk0Of26sgfpDFC19qwEkpTKe7MlprEd3sKMG/UwZSxNclAMhtGQEtYA/LKAPDiyRZ9jJASgpI0m6pvVN0pUlmCNRDczSYowcythExzOX35MJqGR85EBBluwzAJY8Mx69pr5oICNJ/toMQxJN5WXXoJ8Aiu4ujytE+tKmWZab/TzuNMmVl8/E+YrBekxyqa9Sf4H2fPFSnZac5RppszZgh/dK8uVj1XKij+eY6ESeVVaNW80zJlN52S2QxN4zjJQvnguv9IQ70llDm7VLsEuNP1cW8Ae1SHrxwC9r8rRpZHFXSXnaLO1osFY6TtpKqiZgx7JKLNW1rGpNJJnph/iZBlw2kSfksCbUcChIrFa2Bwy0BTbrydPWSi3hn71grT3lH4oPxgaXrYFF5nUkS7hG1HqrWaw9ucopf4sFXPI8WOrkZUveg5IerRbfUv54TneUn2fMNdgzteDnfafPBdVZXlbK2d7qcZBguNQT49VJouMOLtvqVwX8p4DSfay2l9LCE6V2PXuzGh+vpDOvx6LG4qvVsZ5F5hnzEuw5/18L3y2F+mqejFqPg4Qh4Pb7Ne/BcQeXrdlza/v8mvo10NLekErPHnAoOKeFeljhh5LRYpW3WPBr2rbIo431pPU62j17LzhjTyhmLazV006PzLAeqK2lnRp4aw3/GkiwFxJtlaFV7qHaLPE6ruGyrXDGnlDMWlirp50eS3wP1LYG7FED46wt2xKl56GacWx9RlueHYmXBped6MtOs8tqpAEhEtVk7KR1Le4u7rqUR80D3LC4ID2Am+TeoYAhTiaLGxBYCibxAJ08/ahZ6nLjKpXVxtozjh65LVRyN2tuVknfH/ydl74rNihN+UP8bN3+UxPPjjKUi8XDd9LuqJYjtDjXXuJXo+ehqFd2l0m74DykuVkl+TAW19undu7ccfnbr1sMErEEB3CuHOpS0YIZpMwhnCvEQ5ILhHOjeY4xLgVOaJlqJPef5k7SAkzO37hmCT9Nz6WMPTWZYLQy2nNBdaf13zOOHnktVBprbixSHyR9j8b1dvgHry6Bi1r2KJwrh9ubSW4SKXNIDfRRk0vbL1oy3lA+1kymXBAOdf9p7qQSVJXyK0FpS4FHXN9rQoe154LqTut/jSurd6ivNNbcWNDsyx6360Rn9s/cfdeO33j/+xfDNblQVEtOsBLwQZv9NEAIV98KANFyunnCKVty5klfe8vqiOonB+1o8kry0zoeGSx8uTGSVjaeHG+evIMe4BQ3e5eeRctYUFm0mX0qrjfLvrZmn2W9X6Ie+8He8MjeWVvHtiftUbcnfLj1lBqPvaMnYSx79gSX5WZmCaKqzWDSV96SZEIjKotmW6iZySyrl0RJV9dccg6Ozh1j4ZZUPsvMUwOTpdfzdlv0XAOt1Z4hiZ82jnSVkScLORZhTkBB+5ZmbU+SDA+NHi7L7aFqTu9IpO3nPHsayt+SibYUVplTjb+a7vc5uCWVzwKBrYHJ0ut5u4AfwtwCrdWeIYmf55Qa7uSW2tODknw9TnWhdFzDZXvDOVsgia1+1JIsHsiu5oPuJV9JXo9MQ7TrLUPLesKmOf5D6bmFjhu4bA5vHEPG0Rq+njaHhp1aaCgd1bTX0jdL3V5lpL5MWpcajR4uq0WyTSPjaA3flmiu3rDT1r4MQRbd1fTNszRvLSP1ZdK61MgCl51qdlnNtWA5VimRFrcsGcdoXcuxRxxfCbRjMeZpMdnU1WY5OsrSxxTHbQE2eTKjSuQBRWnjJhkhc8MXNRJqri0KrqLuOk9MvQXYo1FLHdpXLbvsVF/2datX4ooLN2Hd6pVL/s7v3bdr/ou19tT5/zm656t7cOfDT2HL+tW44eqL2TqpDOXDXafXNFnu27UPDzy2H1vWr8YVF24S26H9lfr4jQMv486Hn8LeQ0cW+d5w9cU4d8Oaoj4sfXz20FE88Nh+HDw8hwce27/YjsZPK1Mi2mduvLS+STIkvqnuutUrxXHLy1AZDh6ew6N7DmH72WfghqsvXizD6VIaP46vR2ctdWhfb73l5n2jssZrVDqgniMaFKEFmGjBLdK1Ul6yv/jWwapsuJL8eZbd0nHMJfmkOinQIwXLcDnSUnkpKKQ2H5qkXxpwc+3lW5dlHbYGpnD56nIDW6lvXKCNJ9sr5asFbXn6KOnQQqN72VtyZ1MoYU4eF1lvKGxpb8fBItNvCvG0woe1/uSupQSF5VyUWlZWS7sSlTL+5joo6YPjS/si7cu1vnny0ksycDBtzjaguVetOrTQ6F52S85z+pWsyeftIctsqoUnSl9hSyZTLSspnREt/ZdmUW1F4lkxlNrl+kFdY1zOfcntpZ1BQPvmCaHNr5f0wMlDx4Rz/0kZjy3Pvnc1BYzwZbfkAKNfydaglhLVBONoASsaD8pPAwXVBPVos2jrisHSLsdLO/0m1x3XN46vttqy5syrBWZJY8KBbLgch9Zn3yILpdFll7WclZUs91xAjdVa7Amr5IIZaGZXT1ilFgBSCuWsCbDR6njCVmkfNU9GDYTXoxfpecmvtUBUOZ1Z9OAJepIyHtcE4SQZ/vSP//PBUZ31Zv3K52W1/ZbnTDOLDFLb2rlwnrBKj9++ZD8ohZ5a6lj3wLSPnrRaLfBc7V7J5lK70uN0ZtWDtW0p4zEgr7ZKkOAuaalCCCsA7AKwP8b4SyGEjQA+B+ACAM8AeGeM8UULr15Qxx5QWA9U1QNZrYXwluSq6XOLnvJ6lj62QHhb9NKbPH2rlU2q36K7LmmpAHwQwB8DeHDh/48BuHHh7xsB3FbiocFle0AFxwBb5KgFklnDo8RrEtQCI55E2z0y0XKw2R598WTbpf9DCXE9yfLVCCFsBfBPANyVXX4HgHsX/r4XwK+YP0MLlJYe9+/e5606CJ+hKMn38YeeLMop9cXDo8RrEuRpu7ecFn4tbdK6+f89+qKNtdZ2iazL+N8F8CEAZ2TXzokxHgCAGOOBEMLZJSYpu2y+LAZkd4+UMZaSZRnVdCBeIz9uO5DrgQOySICe1oMlLEcU99CPBHCxlPWMvcX1RsE1uTvUIh9tSwJvXXv5Vhx46ag5m7Gk8yS/5ThxizswUfFlDyH8EoAXYoy7QwhvLXJcXv96ANcDwMZzt3UNTEjUavypIQ8/zrUk9dEC6Kk5hrkkd2/91OQnsOQPoORxvWlGt9q2cvlTGSsASeJXczSZhSwz+5UAfjmEcA2AUwGsDSH8EYDnQwibF2b1zQBe4CrHGHcC2AkAl77hsvjeq7arudVrZjBK3BdTmjG5Ly2dWTR+dDbSZiUKuNCgqol6GKZy+SXQUg08VMtrTvlyR2NT0oAn0li0GtI8qwqqI8tzYZGTjq025tLqIl3HipWrWOXC8LLHGG8CcBMALMzs/y7G+G+3wJgAACAASURBVK4Qwn8E8B4AH134/YUSr1UnnySCB1pmMEocL8uMKa0qNH50NrKsWhLgQoOqJqqBRWr6AOwHGpZme8pXm6VSuxIvgHeZSdDX9L/H9aaVtawqpEzFeVlp5m2FcCcqrcyGOhHmowDuCyG8D8BeANdZK3KzSGmmbN3D01mb2+ukv9PKQ4OS0r7ks7S0IpH2Yq2zd2kG1mYVrUwJuizVyWc0rs8euwznlirBqSlZ9svaPlk6ecfyXPSGcJdWA5rrzfWyxxi/AuArC38fBHCVp34iSw46OlO27uEttgAaxACUZyNuf1XKYU/3Yq2ztxfWap1NStBlKcAml4Xrs2dvTtuoOVLZsl/W9sm0rGW/3Hr0s0Sl1cAHf/jKnFR3onDZT/z+J3f8/YVvU09jobm/W7Nz1iRK8CSMaMlJbklWYDlJhcJLa05f4WCuKdGF59Qc2sckW65DCVJqgTBr8GHp7AEOfitBVbVTY1IfOH6SHixJTWqg21LZ0WSXffHIXDFwQ5spa76Qki2A+zp7yibyzMp0/1qTCslyr6UOzRjrnZ2kPTrVIbcKsngENNsLYE9tJkFVtVNjPBZwy/7e0+8aLxWlib7sG9aswnuv2q7ua3vDIj0WzxrraI0sGvzWKn+tvKU6GiTYQh5LuFU2S3vaOFr00Fs+D39P2y1eqtFkl22BFQ4NB+XaGRoOKkEma7K1erLjevramh14UjDZmvEbSrZ8bDw6s5aFApcdTXbZGgNVbyCIpx1P2zVyetyAPepoZS3bBcCfHXhSQKea8RtKNu1gjdZxKtFUE05yRhFPFljJ4NVq+KLEZYH1HHpYypSrZTJNMfupj5pxU4rj1oyIdAy0+HBa35IdWNNPzQGJmu5Khx96xs9jeLUQd+ikR2dWI+HoDnZM5DmcTytLD8+zHKJXcwjgUAdGWg6xHOqAwDEc8KhR6aDFHodwToJ6H/Qp8cFYDnZMrjcte4k0C3KrAMlNlx/KJ7mMar7cnjraSoS6YzRXDu2jpW8WuWhees0VKa2YLO6klpzzgLxa8Rz3THlde/nWZfn5PYdtSjrl8v57MhdZZNBcj6M62DG53oAyhFACZ+SuEclNZ8nlVQNkac255nHHJKJ9tPTNIhfd12uuSAnYVGM/8JIEfa3Zs3J55Tx9koizXbRkLtJk8GQ+pjQV15sG8KeBAwmayGVQ7eEq8wTEaPU8ARo17h4LRNMSyMO5/2gG1gR3TWVpjnlNfinYp5crVYKuapBbWpfrW87PC3OVYMO1mYssz4Un4+0iSev7IX7ont2yFynt2VrJYgvQ9tLp3tD7QQt/SW6LTUArW7MvntR4te7ZJz2OvUiSE2PZs2vZZaW9iHQWl5d6Zyel9bQ9mSSLZz/rsW5TuXPZjkWo59hxe+BSllmLDBbS9EL36EkWzTtBidODh5/HUyT1pcYmwJH0PIxmz57IE4JKw0F7tsm1S69Z9/yewIehEnRIctMTZzi7iQUSbAlgacnsqulFCkbx5MzX9GDhV+PH72ET4Oi4ORFmYvBAZ5uT4t+7bzWy1dg5hpbbAtmlv1vTdHn4eWxEEr+hn0ONTAkne1HKQQf8aFbpQc8eOoI7vvwknj10RLwntanV9bSZvrStfaJ8S/9rPP5yz0Hc8eUnceClo4v3qJwl/QxBpT5sXrd6EFk8Y3TgpaPLZJTqc9fTtXR8lUcGy/PsfV6BKbveNPIsdVsiimqXVS3LMc9ytQYmWXKZ1fajlzutR7Reqywlfq19tPSppk7LczdRA91n7r5rx2+8//0iIACoiz+nRhXt6B6LgY6SxaCoETXWaEZHyfBH48NzYIjUJy4XAK1jOXIoEQf5tIJpch1KcfKWmHIKLa3NcyDpmQK0vM+FdHyXlhPBU6f03I3GQJdy0CWyBiiU4s+pUYXy0AxQtZlpawA29CBKLXtoIi2Hm6VPNBeABFbi+FPSsrlIfU38avPgSXppPeJJ0rMl4xDta+ojd81iAPXUaclqNFXXm8XNY4HW0mARja+2qpBIm/0sbjQ6C3kgr9oM5nH3UX7SjJbrnUI/NWhtSd683dLMblll1Lgvtaw2ngw4tK+5PmjfJJehZbbmrkvQ2qSH4/JgR092TkvOuBaYoTb7eXLbAXY4r1SXk8sDnS3NaLneAR76ac3cI2V2vX83n1vds8qo2bNrIbmeDDi0T1Rn2irTM1tb8v5TPQyVXdZN+YkwgC8vtkYeNwyFc1rypFtcQvReCYZb6r8F/tnDjcO5nnK4KM2cK41fCWKs5a6vgdhassFK46i5ICn/Ky/atGhRl8qWXG95XzU4rmXsJfh0gv3u/cGRl9mKGKE1vjXAwbrP4sATFuCNtpesPQGEk08rw7VdSxo4CIA5v31pptX27DUAKk82WKmvWhu5HpLdRyKNryfXnScHHx2LtFo86ZQ1ayU5p2qN56h3QgMpZPSaS87B0bljS/aHnoQUJeL205a+Sftwi+XX0v9SOGVtQgrJfsKFeL552yaWr1SXk5+zy9SEvUpU66Wh17QsthJ02WJXkvh+/cG7n7vlI7/9CU7eqVrjOfJYxy2zCOXF7QtzHq0zJW2Hy43vrSf1qae/Ov2vzVKa/CX7iTZTemwvQ/mgpf5oZBkTbWYH7HYD6TmmfLWZfao56GqptEe3hBN6Mry2yOjlWwPJrOn/UPDNFphoC2TV2kZPsoyJRd8eW0CpHe1EmNGEuCbSDqK30lBZZ7Vsqr3k6yH7NPmPsayFJvHMTKIulBDXiWLjLZSWLtxB9F4eNXWtfFva0Or2kH2a/MdY1kKTeGamLdNUs8vmJMFkPXBUDYBDy2qZaCUjVg7LpUamkkw5tcAhLW1o8eeSEdJj1KuB2HoARBywxwJzrok3tzwzJR15DGkWflofSzQauKxGEky2pq7lyFst4EEyYuXuupIxrwY4lN+r6bfEn/aR4+8xfNVAbD0AIg7YI/XR0gcPrFWjGoNoi6uth2s1p6m+7FxOMAmUogEONEOGltvukacPLstBxvHTABwScTJZ8tR5KMmVjpaWQCR5HyW9ciAj2g4F03C6s+ghkUWWEqDF0pbHuGkBQ9G8d7VGw5rx94CulpG0mR/ix5I3XrpXmxusVM+T223onN+1/Er53vJ2Jb168u316k+NLEPT2PPUHTd54+mePd+TlAITPPtwzx7Yk9utFWxTk6dO2w/ToBO6R6cBLBxQxhK2agnUkPKle/TBBdFIxzC35nCT9KwFXFmexR7kyRtPabQnwni+WJav3TRnBAsNfXpJzekxvVctnjGoWVUNtToa0zPTIhPGOrPnVJr1LF87zYLfe0bwkATjrPmCW+ok2CkXtuqdKbg2uZVUWjF4vCjSCTnayswSbqzJWeq/xythGRONJHlb4L7HhTW+ZLW1WLcB2YLfE0rppRorrmSJtVr5rQEsNVZoCa7s9aJYTsjxJPOoSV3lgajWjKNGJehybzK97CGE9QDuAvA6ABHAvwTwBIDPAbgAwDMA3hljfLFWkJJlstVyPWkopaXtGpk8sNNekFiJTyvUuEZOre0ectZYz2v123uciiSt7/MfAPcC+PWFv1cBWA/gYwBuXLh2I4DbSnwuef0bRShs6bD5VqjqmOGttW0PWTevn8aL/h6qz71kmQZUdyi4t1U+KHv24sweQlgL4B8C+LWFj8McgLkQwjsAvHWh2L0AvgLgwxqvFM/emlUV4IEWGrUskSYVaeVte8i6eX0KQOqVeXVoWTz971W2JoOOhX+P58yyjH8NgO8A+HQI4fUAdgP4AIBzYowHACDGeCCEcHaJ0emnnIxLhUwk+ZKLO2iQy45Ss/zVDgKk2Vby31LdHkuv2qw2JeKyotRkwuH0Yc0IxNWR2tX0kIA211xyDgAsyZ6T+GoAqtLhj55x1ABgnq2DNBbcuJUAVBayvOwnA7gMwG/FGB8NIdyJ+WW7iUII1wO4HgA2nrutmFWVy8DBfdG8XzdPlk9p1uid7ZO2m/Pt8SXnjJ61mXBollrvEVe1+f8pVBnAsmdIypxrGXOuryUqGfWshkppLLTjyVP/tb5IZHnZ9wHYF2N8NMmI+Zf9+RDC5oVZfTOAF7jKMcadAHYCwKVvuCxKRzYnol9jDcaZyDNbaV9I+lWmcFALBFaTRSrLzd5SnjYLX6pLC0zUQ95cadbVADc2pTEp1bfCkks59Ep1Svw5Khn88hVJDWSbUvFljzF+O4TwbAjhJ2KMTwC4CsA3Fn7eA+CjC7+/UOLlyVTjObjPM1tpX8hSFlhPtk9OFk9Zi1uq1P/SoZW1ZNG3tiqQSBobbUws9XtkP9LqWMpyVMoqy2WtbTng1Opn/y0Anw0hrALwLQDvxfw5cfeFEN4HYC+A67yNW4JbuC85/QpbZn/K1zLTaDOC9FXX6kgBJZqbR1vhlAI0aDnPbJXa5/bdSZZ8Bk1lpRVOyS5BbSMtmWqk+pbnjavrWTn1JI6/xf4gkelljzF+DcCbmFtXuVoj5AGIcPtOesJKax52raxVdq0Ona2tOd2s2XClvWmtpRkAu++m++e8bGrDsorRZlPrUdnSvZ7ArJqx7kFcPkCL/UGiicJlP/H7n9zx9xe+jQ068CQpoPDY2vO+KHlgjxLMUkuGYYFBSoEZXOZVKaiFQo49gRtcFlh6kk2C4eYn26SyCZpLz9/TxtFzQk5JX9I1qodS4A53XqCkv6GCczyBXamOdiLMRF/2/3D77+34y1Mvw9pTV+KKCzdh3eof/b7nq3tw58NPLd5LlJeh187dsGbJ71ZFSzJwROVKdfceOoIHHtu/hEe6t2X9/FHEmpxUhtTOFx//Nh54bD8OHp5b5P/21/3YEhnu27UPDzy2H1vWry7qUKKcR+Kf9Jtk2H72Gbjh6osX/8/L3rdrfhY8eHgOj+45tCiLNo45n5LeS/qSrlE9aONFeVA9W2RoIU02aSxTncNf+9Kxm2/60Mc5vhPFxm9YswqSNX5wqKCBWmTwwDhrZLDYGnro0LIn1uwTHptID7lrPQ4lOXuMWS216HA02WUTXHbSEEIrvzHyrYFfWtqZVDbVXvqo6VMLdFWDZ1v4Dp39VvofK1Y+HseQXTbBZXtl2hwqc+eY+KY6nmy7lnZa+uhpu5c+avpUozuOVw3f3vot6TX9rx3sOJXjn7T4aotxhRorWvi1Znil7XiO5rUYduhxVbkRy6Mria92zJRkNNXIktWFy9YrGeY8xk0p9p3LwuPJ7EqPYdbyJ3j6xum81Lf0XFF9pPtf+swdz4zCQHf3XZ/a8enbblo04nAGjRpDXQs/rq7HoEXbSUY4C1+LYScZzI7OHVti8PLqSuJLjWI5z28ceNlteKJtc7KkNqgRjyOPcZO2lfqYDIqcDFLfcl5UV9RAXNs3qyy5POm5ovpI92+95eZ9UvKKqbjetEP50hfRkpNc+opa3CaWAwJ7z2ilOloZbhYpuWFqMrRo+eok/VvGKJclzZTc4Zo9MvZY9OtZvXlWZNJKrCWvHMdfWjGMJlONdmSzFPDAlZXqSNBSb8aXGhgkBUBYABfeMtpx0Zz8PTK0aPJZ9MTJAiw/VNMqp6dtKwCnRDUHLgL8sdRcHY8snjz8lEZzZDMFWFi+jFJ21Xymp8fi0vZa99bSLKqBPbSyNVlaKVly8lH5OVuDlNmVy1pbAkXlwJnzN65ZspeuOf1H28OXjqXWxsKjX+3ZoSCo1j18qd+jA9WkPTunVAqwoHtUbl9D904coEPad3r27NqeSgJAaGAPrWySVwJ7WEjbU1LSbA1Ud1T+fN9Ysh/kwJlnDx1dspf2gKIse3gqi2csPPrVnh0Kgmrdw5f6PTpQjUY0qMMSXkqJAyMceOnoskQAXJ3agBgJAJH/5gJWSscZS6GcNaGunlNOaMBNrjstzLKkx/x+aUxqT/9JRENduUAhady0UFfPiT6UvzfhB+Vr1e9oQDWevPFjPYVlUjLUnMLiqWM5jWVaY9LrpBmaw96Tp57Lfz/0s9SDP1py0E2KLFDMHnynQTUylGCzHrisBVLqgfsOPSat7VhWW966PeSyyj3Ysyp9BYb4STP70BlZOZKyk3qgjrXQTwleOSnYZWtmXgvfkrxa2aH674HuttAkMh9b9Yqxzew98qvVttmSlbTWXSe5wiwush664mLUW/hpsnl0N3T/PZmBWqhVvx4XJi3j6dNUjn+qOcLHA4XliEInNXePBHKoBWlIrjDPoYr00EaPGymXrQSUsZDmBvLoTopn94CiNNcZfc44V6HF9dYCrbXwsBz4aQFBrVu9cjygmkQ1R/jkZRLV5J7jspJSKuUG89Sh9/I2OZmkuilDiZZzX5ohuYwnLVQDENHy4dFsRB5QlLYq0J4zTZ+e/tK+3b97Hwt68eQb5MgD7JFoKi97co2kHOBXXrSpKqdbbkzx5gW35GVrOvjewEfrm+Tm4Vxy2r2e/bDILRGX205yYUluy9wVxvXZk9OOewYl4vK4e8u2PsdS3zxjMIqZ/ZGnDwLg9zqePG20Llc2Ucs+yUs1OcxoHS0nn3avZz8sckuk7Ws1qLE0A3N9TnwtOe0kWCtHHoiqVLb1ObasZEo0lZfd4t7w5PH25Fin7WizKgVjUHnzNrRsqtKqQgNuUHCLJUe51k663nuWl3RH+edjtHnd6sVrpRWZBOTR9KHllk8krYY4+bWzBiS+niywUl9qT5qRaCovu7SH5WZey5fMk2Odk4ESrc/NLoAvm6q1b9bgHkufuDot2UktJMmZj1HSh0UWya6hBe5oueUTWe0FuQxAOTjLs9os9UV6hrwBMImmml02J2qt9CRtkJIW1GQrBeRw1dyaLmV21Sy/VF7OIyBZ6C19Kukl7xsN4rBYt1vCSjVZqIXZkkiE60fSsyXRhcRH8zDQgKsaPdT0UfMeUH6aNX6ioJqt23/KDPVsgS1OAyLbC2ZZA5Pt0Y4mfw3/GmqFFbfI12OMerfD1StBgDEWUI0nu6y2RylZRz3WYu0EFIulXrL85ns2jzXXsu/WZJHqUlsDZ48AeOt20oOlHy02AW2/6z0nj9pYLOMote2R09rHXF6LziTvgedZn+jLrp31pllkJUuqtCfzWIs5K7HH9ypZfrlzurwn1lD5atFVmq2BOzVX8pBYrNItln9tv2vFX5QwCSV5PZ6dGvuH5bxAjiTvgedZH00gjIdq/LweXhaLZ8k6yvGtkdfSV6mMxetR+l26VyNviTwylPzW3vP8PPL3eg49em2xxk81b7wneKQm97fWRq+giJpgC08ZT/8nlZfeQ5MODMnvabrrIVdtcI+1bI2MGMueneag8wSP1IIJegQQePj3AuvU9L+mTz3AGhb+NTJpdVqfnR5y1Qb3SHLSsr3BUFPNQce5FqjbiB4maHGn5FQT1JLIEoQj5e/m+Hryp6WcZZ48bVLQjCXLLOdW6hEsouWNl9xHrZl5aZ+4vtVkztXcq5KbTnO9JRmkspZniY7NaFxvnkw1peuTIK3tVjdRqYzmArO2k3h4eHGuLEv9Hm6/oce41b3Wc2xa+yrJAmUZP5UQV4484IkeGUI5krKnWjLQSrzyGVLKb5/LX/raW8gTzktlyGWkWWA9M602e1uPmraQJ3e9JRsu1WEukwR48mQH5vjWPMcS6Gx0Ia4clUIjNXdMr/2mxI9ru+Ty4Fx6EgSWttcKi/SE81IZchkBFENwE5Vchhp8uCUXeu3+vuQ242SyQHcB3X3L8a15jj2BPIlG87JLpAVzaCGMJX4cgIEGXVDAiYe/FqBBs55qQBZPttNEmitHCp3lsqDSLLCe4KQaVxkFA+UgIKktC9DH4srTQkhpcFI+fo88fZDNyMs9Q1zZXAbPc+wJ0U00+pfd+lW2zgiWJALpy10DmrAEaABYtnKQgCyWoBnaVk3oLPAjHeYzjmUlwpHnhBxtVUD1Qakm8YPWJte39BzQ04osqyEK8KGrOyqL9TkebGYPIdwA4NcBRAB/DeC9ANYA+ByACwA8A+CdMcYXNT5zrx5bAi/0hKLm0MQSCMGSJ90yw3jgm4m0mUb6gue/6WzKzbwtemgBjUjhpta2S8SNUdKHFF7K9Yebpa0rEU3P+ewMYHFW1fLo0xk4L+vRkQRdpisIrFi5SmQiWe7SD4BzAewBsHrh//sA/BqAjwG4ceHajQBuK/GigTBDBR9oltReFtShvAc9g3x6W7mHbruXBbzFG2GRv8ZL0TtASvp/xRlnPhcbQTUnA1gdQngF8zP6cwBuAvDWhfv3AvgKgA9rTGggzFDQRMsezUMefq0QylL9acA5J9W2pmfPflazQ7TIUiOTVrZFn9Lv5hNhAHwAwGEA3wHw2YVr3yNlXizxoX52DxxwKHir5V4NvLVWzlKOeQ3y6albar9V/qHgydb2rGUkaK0nl33vvnHjOJG88SGEDQDeAWAbgO8B+O8hhHcVP0U/qn89gOsB4Pzzz19yzwMHHDofnHavBt5aK6fER4Nm0jKWuqX2W+Wn9XtDPz18a6C1nlz2vftmiVa09I2SZRn/CwD2xBi/AwAhhM8D+FkAz4cQNscYD4QQNgN4gascY9wJYCcAvOlNb4r5vdyYVTKqtC5Jawx0petcmVY3oMWIl4jmRpNi6j2uw17bEGrUzPm2GKY8hyuWZMrvJaNbMnhZs9bSwzBL7knNeEz7pLlBad8s42V52fcCeEsIYQ2AowCuArALwPcBvAfARxd+f8HAawnlbhMA7BcW4DOQeqkmh1vpulSmxQ0o8eFk0HKj0Zj6lnhrD0kgKC5jrEWu0izqGRsNHJXcn7nLrJS1lrrkShluuf5w17hngLpBad8sVHzZY4yPhhDuB/AYgFcB/BXmZ+rTAdwXQngf5j8I15V4UdebNFvl7g0KYPDOBoCcmUXi5zn62EOeWcnrlpSOHZb4t8htkdFi4KLycnyoK1ObpUvErS44EA3Xrga24mZgCnbhsgZbssrWZLiVyGSNjzHeAuAWcvkHmJ/lzURDXKWTStKsxAEY8nISaaAMSxaTofadnlnJuxIZMk98jT5qQTTS6oVmZmldtZSeA7rqLIGt6Ayc6lBAErcKoG1z+vBkuJVoogi60085GZcycENu/0K/mpZc4Im4rzL9enL8LLnc8uteskA7E1n2YqXTUmrsBiU7Qmll5lkFePKlW+wonpVXaVWUz8A0zz0HeaV86fNlccFpfdOeWytNNOrttt/9xI7nNv8cDh6ewwOP7cfeQ0fwwGP7sfbUlbjiwnnh7/nqHtz58FPYsn41brj6Ypy7YQ2uuHATvvj4t/HAY/uxZf3qxbIS3bdr32LZZw8dXVJv3eqVIj/adoo8kq57KZer1Ickp9ZWkovq0VKX8kh16f+5LPft2rfkHtcOV1+6XtL32lNX4u2v+zG2L562Nf2m54u2ffDwHB7dcwhb1q9elCH1P79H25GeL649aZy469pzm5MW9TaV5BU0qcC1l29dlvObhpVakiAk0k4t1RJI1OQo9yR20JJWSGQJFU0JPrhTcUtEQy41PUshurU50CV9aDJ4Tm2tSbqhnVdAE6po+pZCfnvn5ffkjZ9adlkadMFlOS3tb62BIK37TG6fCPgz0WplJLKEikoBNhaS9sScDJZTaiwejdJ+W5PBc2przdhoASbUcp/KcPw9gTaSLBzVZB9ONJqotxIM0FKnpp0WGYG6DKYtbWr7ud6w0BL/2n61jJvWR1qmZmxq2rH0w8N3sOdZgtYN8aOlpSpBB3vBFi2w1qEzrkrt9cqy66kzNPRTk88D3/XAWT3t9XiGPPq13GuBAGMs2WVryANv7QVr7QV9tVINnFOTyVNnaOinJp+ljRo4q6e9Hs9Q7dK8BzxbK0NpNDnoEnkyjVLjkiWXWSn7J1eGGgs5OTUDTMnwosktZUrVjHyeOtqhmMci1D7mffUYLC3ZY0t9SUYyS946SybadE8zxkoHc3L6pUbZxI/LN2gxRlOSnpnRGOgs5AGeUOOSN2uJtYznOKKaHOWa3FomGYk8dah+OQiz1EcuUKMmi01NXyzHMlvakzLqcMbY1CeLfumYSyAeqzGaUk3evtG97BKAwwL26AUPTaQd+scZdDhI5bWXlw92tIBtLIARC6AFWHp4pQWQI8FDOWNTjZHQQxzoxaoHD2nALK0d6ZnhoLWJPAAqaZzSfS1TzehedgkOqbl5aqCTFpK+zul/bkbkIJWAnies14GJlj0g4As4KsFDc3lLK6YeJLkBuTK5TF7i4LK0LUtQiyVfomUmL6140/0Va9adKfGY6J79E7//yR1/f+HbTPtaunfiABgS2MNDXM5vCrDQTgBJJO39OGAPpZY9moVPfn3DaSvVnPIaUIbuO3vl6/eQx+ZC71lO+OHy57952yYWyMI9dzV2JAuV+KT7X3/w7udu+chvf4LjMZWz3iz7WsusbfnKl0gLmvEEH5SyqWpf7l57NAugJQVqSDnlLSuo1vP3Wshrc8nJ4/XInym6itOeuxo7koVKfNL9k05Zs1biMVW4bG6FTDMO/XJZLKkWy6xkSda+4B7+GkmWegs8kvZVsxJb+JVmCM8ZapxeJBm4657+9yANhptWbRqUWzqHkIPUaitSSpqXg54zVxq3L33mjmd23PyRP+DamejLfvddn9rx6dtuWgwGyAMraMBKIi2owxocACwPGkm/uYCH1JaHv0ZSwEpN4Eaus28ceLkYxEKpFIxj0bcWnFQTCGPpfw/Sgmdo4FUesELl1J4LOk6W4Cnp2cz1Yh23W2+5ed8oXW8a9LOVj1SGWpKHhrtybbfATTWdDQUxbpGlpuwkqUVnWl1P3yww3y66kqB1Q/xc8vo3DpLltDXDqLVM78yrk8paWwPZ9JYZmlp0pWXmrcnE2+vZrHneShmFsWLl41F4/06q/0z4KRno7t+9r1g2GUF6le1RxiOTpb5Hpo8/9GR121o7vXQ3NLXoiqvrKVsji6VszfMmPUOpKF/cZwAABRtJREFUH6NxvSUDnQUm6XFZUBeZJb6YI8nFZIEx1hjHPC63FF+d97F07DDXd8nYZNGL5p4rUasxzuJOo8dda0YyyYXX4uLk+umBe2vtSDkAqLFwNK63PJ69RB6XhcVFVpOF1JPh1gLksMSOW/pWyiUu9QtYHkvuzZxr7SulVqCL1Z1Wgjlz/PLnq8XFycnjgXtr7Uh9oTH2o3G9WQJhEnmCJbTsIok8mUKoG8YTsOGZIS2kBV0kd6UnM4tnlrZkAir1g8vSUwqasawCLNl/WvRtmbUtAKQWNygnl8R3dKAaD3mCJSzH19ZmCrECIWpmSC9f6UhlwJ6ZxTNLWzMBacTxKGV27RXi2arvUputEG7P6rXEd3SgmpoQVwvVhK1yM5uUr86TN8wzK7XCTel+LUFge81kpdlEgxpr/EshxK37Zko1qxeOPDaXGhuRZyVD7UoJHKbN7KN72WuAFhTsYQGEpDJaJlMJDKPJa5HfwtfT7wTu2H72GUUAh8RD0xnNKsv1nQJ8NP50LCh/SxZeTW5K2ph4njcql0V3mnwl/XJyJpBOqkPBYX/3V1985eabPvRxjsdUlvGeHOJDtCW1p53pZc0bZgE/pPBJempILZXa5EJcPeGfNUAZjaieabhqzrcmD326poUoJz1YnjcaVttyShF9liT5aFi39HxRII52ZPNUXnZLaOCQbUnXtb2k1TJbY2FvDZIotakF+7Tw9wT7cPJwQSfUSyHtUS3PkBZu6tEDDXwB6k8p0k5mLYV1S89Zbsv54A9fmZNkmMrLPknoZM2sNHQChkm1Q9trhSX3Igs8lJatgd9aVl0tcNZe2WU98jaRBK0b4idll/XAAHtlPe0B+ewN4e0lQ43uamChQ0OYPVl2a2TI/+/5PLRmIe6h59HBZRN5YIAeGKO3zSF5DAUx7aW7Gljo0BBmC7TUAxvW+Pd8HlqgzCU5vTKMBi6brPGaO0aCONYAJIC6o5d6gElaobVS2VbdSbBLy1FOJTixtf9SGU+8ucZXGnMte5Alc670PGjZZWuATh6XnAcuO7pDIm5/6In44x9+MN7+0BPFshaq4ddbhhb+vctKZWraeecn/2xQPXnl6lEn9SnvWw9+NTqqGZMVZ5z5XDxeDomwGCVK2UMtR/72kkHK0qrJmxtrcleLVtYqS6ks5eepKx2nPaSxzzN+pb5ozw11h+ZuUXpks+Zqq9GRJBeXdVgqm/iPzvWmUU2mzdL9npBJ2kbNqSAa9NETSCHx9/TNU7cFTlxLrdBc630p4CjPQWeBwNboSJKLg9OWIMya6210CDoLlcIda45GrpXBAlWV9mCW/XdNMEhNbjuLrmqORK4hCwzXc1y0pa9awFHpiGlLG9oeXtKrlr1WCuc+rk6EsVBN9tChZEihhXm2Vq1sadb2BFJ4AEOlflio5kjkGrKAgLQgFImsJ8OUjhPXqPRscjqT9Kplr01lLTIlCjFGU8EeFEL4DoDvAxD3FU20YuWqFWvWnfnDIy99F8pyxklnQpPX02bvsnyZM7Fi5csD6GGxvWM/OPLySaesWZt+N7azXL95vwCwfRlmrHla2tbaZfI6eLA6E/RqKcv0/8djjGdxIkz0ZQeAEMKuGOObJtpoA83kHZZm8k6OpgKqmdGMZjR5mr3sM5rRCULTeNlZS+GIaSbvsDSTd0I08T37jGY0o+nQbBk/oxmdIDR72Wc0oxOEZi/7jGZ0gtDsZZ/RjE4Qmr3sM5rRCUL/D+NVQ7mKgl8IAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQIAAAD8CAYAAACcoKqNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO19fbCmRXXnrxkGmBkFZgaZDAwjo4zJ+pXoEJGQ2jIhrglrxaotNGZLy1imxlTlw2i2BGIJ4x9JYa1iqLj5mIgf2Wgii9ZqkSrEZdfa0qy4jLGMH0FcR2FgBJwhsAjLiNP7x3370vfc8+tzTj/P5T5X31NFXd737af7dPczfU6fj99JOWfMaU5z+vGmE1abgTnNaU6rT/ODYE5zmtP8IJjTnOY0PwjmNKc5YX4QzGlOc8L8IJjTnOaEiRwEKaVfTindnlL6Zkrp8tXmBwBSSueklP5HSunrKaWvppTeNPt+S0rp0ymlO2Z/N0+A13UppX9MKd04YR5PTyndkFL659maXjg1PlNKb57t9VdSSn+bUjplCjymlN6fUrovpfSV6jvKV0rpitm/pdtTSi/zjLHqB0FKaR2A/wTgVwA8G8Cvp5SevbpcAQAeB/AHOed/BeDFAH57xtflAG7JOe8GcMvs82rTmwB8vfo8RR6vBXBTzvmnAPw0FvidDJ8ppbMB/B6A83POzwWwDsCrJ8LjBwH8svhO5Wv2jr4awHNmz/zZ7N9Ym3LOq/ofgAsBfKr6fAWAK1abL4XPTwB4KYDbAWyffbcdwO2rzNeO2YvwiwBunH03NR5PBXAQQBLfT4ZPAGcDuAvAFgAnArgRwL+ZCo8AzgXwFWvt5L8fAJ8CcKHV/6prBHhiAwodmn03GUopnQvgBQBuBbAt53wYAGZ/z1w9zgAAfwLgrQCOV99NjcdnALgfwAdmV5j3pZQ2YUJ85pzvBvAuAHcCOAzgwZzzzVPiURDjq+vf0xQOgqR8N5m455TSUwB8DMDv55wfWm1+akopvRzAfTnnA6vNi0EnAnghgD/POb8AwPcxjevKIs3u2K8AsAvAWQA2pZRes7pcdVHXv6cpHASHAJxTfd4B4J5V4mUJpZTWY+EQ+HDO+eOzr+9NKW2f/b4dwH2rxR+AiwD8akrp2wD+DsAvppT+BtPiEVjY40M551tnn2/AwsEwJT5/CcDBnPP9OecfAPg4gJ+bGI81Mb66/j1N4SD43wB2p5R2pZROwoKh45OrzBNSSgnAdQC+nnO+pvrpkwBeN/v/12HBdrAqlHO+Iue8I+d8LhbW7b/nnF+DCfEIADnn7wK4K6X0k7OvLgbwNUyLzzsBvDiltHG29xdjwaA5JR5rYnx9EsCrU0onp5R2AdgN4Atmb6tlnBGGkEsAfAPA/wHwttXmZ8bTz2NBpfoygC/N/rsEwFYsGOfumP3dstq8zvh9CZ4wFk6ORwA/A+C22Xr+VwCbp8YngHcA+GcAXwHwnwGcPAUeAfwtFuwWP8CCxH9Diy8Ab5v9W7odwK94xkizB+c0pzn9GNMUrgZzmtOcVpnmB8Gc5jSn+UEwpznNaX4QzGlOc8L8IJjTnOaEFTwIohmFKaW9K8XLmLQW+FwLPAJrg88fFx5X5CDozCic/ILPaC3wuRZ4BNYGnz8WPK6URvAiAN/MOX8r53wMC+Gvr1ihseY0pzkNpBUJKEopXQrgl3POvzn7/FoAF+Scf0drf8YZZ+RNmzbhaU972uJ3xx4/jgceOYbNG0/CSSeeQL8vn59y8ol4+LHHl7W3+ovS/fffv4TPnjG0dt75WnTs8eO4+3v/grPPON3dT88YrTX3jvXgA0eWraXs2xpD/q59DyC0L/XY9z3w0LK19KyD9730rnXr+dY7WdOBAwe+l3NWG54Y5sxHZgbU7F6zFwB27tyJ73znO0sav+fT38C1t9yB11+8G29+6bPo9+Xz83dtwa0Hjy5rb/U3JnnH0Np559vLQ6uf3jHYmg8ZS/ZtjSF/174HENoXa2zPOnjfS4vGfG9TSt9hv63UQWBmQOWc9wPYDwDnn3/+MrXkovO24vPfOoLd2zbhPZ/+Bi46bys+982Fzxfs2rL4ffl8yfO2AcDi95fu2YFztmxc7O/SPTuW/JV019FHcMOBQ4u/l/+v+9DaamNcdN7WJTzI9hovZb4Xnbd1yVisT8YLm6dcz7ofawxJpf3ubZsW/9Z7VOagrTWbpzbf0lf5W/OmtSs81N/XPJT/9+xfPTZ7Z+Q61HOy1sj7bsn1YrwPpZU6CBYzCgHcjYXMuH8f6eBz3zyCWw8eBQDcevAoPv+thc8XzE7Y8r38XP+tT9Bztmxsnqg3HDiEa2+5Y/Fz+X/tmbqtNkY5xcvvsr3GS5nv5755BC/atdXsk/HC5inXs+7HGkNS3V7bo9bzbJ6ybwCLv79o19ZlvMl2Gu91/601s8b2rkM9p941krzJ9WK8D6UVOQhyzo+nlH4HCzBJ6wC8P+f8Vdb+2OPHl52W8nQuJ+trL9yJFz9j6+LnognIv0wzkFROWE2KWZKASVD5e0tCFvJKStmHNpYmceR6arxENYPCc9kT1nctxSzNjJH1XKRf2VZK2fqdaElvz9i9WgZ7d6z3pJtWOz0154wdu5+Tn37ZjflVf/EP+emX3Zivufn2LOmam29f8lv5XJ5hf7W+Wv1GyHo20vcQPurnvfMeg5ex260WWe/WavDN1mzIWgK4LZN/g+v27ds37snSQR+87n37fuuNb8Qv/NTTcOThY/i1nz0HZ29eegLv2LwBp56yHpfu2YHTNqxf/FyeueR52/DoseOLf1974U7sPvOpi+0l3XX0Ebz/swdx0XlbcdbpG2i7uu26E4DrbzuEHZs34LQN67HuBODOo4+o/Go8t0i2LWOWseRn9vyv/ew5zflY/UT4Lu0uOm/rknWRY9T9PfToD8zxNWJ8y72Re+QhuY/ls/UOMR7qsT3rrRHbA+s9afH1qY/85ZF9V779z9U1mMJBcN37/mrfB955Bf7+y9/Fx754N846fQMufOZS1ee0Detx4TO3Lk62fC7PPHrsOG49eHTx7+4zn4o3v/RZdPHf/9mDuPaWO3DW6Rua7eq2dx59BB/74t049ZSFsa+/7RDlV+O5RbJtGbOMJT+z58/evLE5ptVPhO/S7vrbDjV5rfvzjB/hW+6N3CMPyX0sn613iPFQj907X7YH1nvS4uvhL910/Mor3vpubbxJHAT79+/ft3fv3iUn8/GMpgSQUtnSBOTp6NEECskxiuSISPyaeqSydywmIXvmbUlhphm1eC1tf+Gnnoa///J33RLckpBFE9I0Imu9vfyzfjQNomg+Zb2l1sT2y1pr77pofN30wfd8m2kEK+U16KLaQgrolntmVQUWrLIvfsbWpjXWY9lmfJUxigXX8kQw8lh+Zd/eseQ85d/WmF4+Lct2i1fmDbL4Yn16LP3Wenv5Z/2U5+t3T3oxLK8PWweLd89aF77e8sMfHFMbYiIHQfEalJiAi87biu2nbQCw3LJb+4m1OAKvf7pl2S4WYm3MlsWX9WX5hiPPWu2ZJb/FO7OSM+u63APmFdH6lf71wqfl4/eS9vxYHgfmoZHroXlH2J7L/WJrHfGy9Dw7iYPggUeO4dpb7liMCfjcN4/gzS99VvNkLCesjCOI+Kdr0k5e7buWX5n1ZUnQyLPWWJp0snziTCpJacP2wOJN9lv716UGN9RPrj1vaVNebUvGW8h5yfXQtAa559p+sXl4qefZSdgINK+BtBFIC3W5e8m7Yfnda0Uud7Dd2zYt81jIMb19Ml499335m7y/Mot8GUubh0Xyns3us5bdgvGm3duZxyViFW/NxWMj8H7P7Etyveq1l++vtI1oHhXN7iDXx7IptJ59xzvecXjfvn37tXWbxEGgeQ2+dvgh1fJcLNTF2l+s5OVv+d1rRS6W1SMPH8OtB48u8QDIMb19Ml6lx0N7seVv0qLNLPJlLG0eFkmPg/QCMN4sC3bLk8E8LhGreGsu9ViWx8H6Xlrf5bunrb18f8t8jzx8bMm7w94Ftj4tD4X1bOsgmMTVQLMRAGjmGjBbgLxzaXHgrfat6DAZzciiF9lduufeK++aVi4B461lA5F8WdFrrUi8mrfWfZ/NQ9oVeu7IkqyoTE+0Zvms2Vla9idvH4znlj1C9mPtA9atP4mNN4mDQLMRAFCty3Ub7c4r71xaHHirvUYsbrz+O+Qu3SKv90DLJah589pA6r7YmnntGK27qmWZ7/FyMPJ4HLTvrfwFrR+Z3+Dtw+JBWw+vd6R8v27jaWew8SZxEDzl5BPxfMXyX0s4+deS8PJUZCdwxILP4sbZqa1Zji3NIBrnzjwbUhPS1sErKa0MRya9NO2NaQnl+/JMS0MbiyxeIhoOo6HZg5a3S2vL9vMt73rwe+zZSRwEDz/2+DLLf/l/9teS8F5ff8SCb2XGMSkWye6LSkTm2ZCakLYOXklpZTgy6aVpb5bUKs+0NLSxKKrJ9Fjjh2YPWt4u1lb7fvJxBJs3noTXX7yb3rGk9PVkb3klSat9j395KG8RCdDqM7oOPWOwdq298tzLPfMeg3ptBxHevDaeVSeWjfRk/rdnz56cc853Hvl+vubm2/OdR76/+P+3fut7i9/VJH+X7eq+VopWYgzZZ3Se0c9j8DgGRedt8dLD40qsVe8Y3rEjPKKRfTgJjaCQBg5ihV6yENqVAnBg/I41BjPEeecZ/TwGj2NQdN4WLz08rsRa9Y7hHXssHicRR1CSjurgis2b1jdTQWVChQzusJI9xiArESiaSlz3KYNUWFKVlTDTk7xkBdp4E5c8KbIssIbtrxwzGpDTouhaRebH+GJBa941jiS+teIIVv1akKurQU29oB8MZGIK4BI9QCUW0MpKzG8sUIxW+zFBXVbi+aFjRNfwyQBDwVq5GtTkdfvJwBkWDPJkuqGsBCkPzJQ0nrHAqKHuKe0Z1qe1ll4g1fo76ZprJfJEgrHq/qMuWTYfaw7e+bW+jyTFWfzKeU8+oEgjr9sPWBo4EwXiHIOse59MkGKuz5o0cM5W0soQcMuxADO9QKpyfvWzViKPl+r+Lfeudz7WHDzzk315E9s8e2DZWVoBRZOwEbz3z/5i3/975i80786RhBaA351Y0oZ2p/MkeADLwTZYklELRqwXgKIXwETrmyVJWYlP3jX32EbkGvX2Wf/+7LNO7YJx89oZhsC/sWQk+e4VW5Fs1xpDrunkgUlKiDHgP+2sQItoCGsrBNeSJC2ocMmLlQrMxvAG//SApVhalFdDiAJ6sHlocO7RPuXvnqCcSLr4kPnVxN4d+e5psP1WgJhc0zUTUNS6x1uJMBaxu5h2f5fhriysuRe4RCPLJhIFKOkhT7CPB4hE8qDd16N8RsBdWnNp9ekN/onYM7x2BmkDkslw3oA6bcw1k3R00oknmFLMezozaoXiaglPHtCTXuASjSIFWHqlbi8PXruLR1PovfNHpbVHM/KGTrPnPPYMr6YnbUA9ADNsXp6ko0nYCEocgUZR3zUjdt/TgEkv2LV1yd1KwqwzniL2B4us+yoDy5BxE147hza2fMayP3jsE72xF8xuwe7MPfd2b9wHA3LR3lGvrSoCXmPNj9kKJm8jaNFYkVMWVBTwhNSXd0p5WlsW4DGiHL2WfEsq9aT1smfGgPyKaj5WqjTgSwX38OId21NuzZqvtS9DNESm2bRsBJPVCCxNIFrYworgqi3KsggHK4AhpdGYsOcWBBkrwsGkgVY8xpL8USt7bzGPmthaM0izqNdI45uN7d23yP7K99qChxtjzEKTRyjSyHsn9sJ1R4Ae5AkvpTGTRmPCnkt+ZQwCA1RppQizlOCW5I9Yy8fQ3thaS02u12uk8c3GjoKaesiyVUmexhjTQ5M6CGprp2WxlQAWljXV6wmox9RgpjTYKel5GAKqwSzZFtBITxScB6atRVEQGA+NFRlarwcAdU1ln8wT4fV0eNpFPDORKMgWrRmvQSHm+9XaaAAWHunlgT9nVnIGOyWl9hBQDWbJtoBGeuIQPDBtLeqVoC0aKzJUy2QtfVk+fY93yBozGk/g0Vh7ac15DTyRXN77K3uu3JXL3bp1N7MyxSzbQM+d2fJujFFwlT0TidbrjWK0yOslsuwT9XoVL1DUcl/IG2HYglK3Spq1bFa9nibJ15rxGnhiBSLQTdpzsrBGS/IwicdsCMBSLWMMaCvv2gyxQxSKxiqMfU/18MDatbwslnbpjRy1Yli09bDGaP0ejUmxvBxrxmswRq68Jc2k9V2T8rKApZSYrI+ibRRvQk+xkWghjLHWB7C1CqYRRcjiw1sk1cqx0KQ402jkvkXjJ1pklVhnMQlM0/HED0jPWGm7ZrwGPdFg1vfyd2l916Q80C7A2uqj1gwi2YaFrKxKi4Yg3njjBIbc3y0+5Py9fnb5WZPilv2FjTlE87HsMFZMQsQzM8TO0H0QpJTOAfDXAH4CwHEA+3PO16aUtgD4KIBzAXwbwKtyzg/0juON67Ys1sUiLAuAMAhyrS8t/pthIbQwBFrEvARyPaQ1WXpFPFgCLCbdslRH8yI0bxDDGbDW0ANBLvdTrk0Lw6K1bz3rFc2RkTgbzCOjrf2Q3JchGsHjAP4g5/zFlNJTARxIKX0awG8AuCXnfHVK6XIAlwO4rHcQbwRdD35BS+K0pBWzL3gxBFrU8vvX6yF5ZNpKS4L0xgEMjQ5saRXWGkajOgEs8+oAXNtq7VvPekVzZCKZrJKG5L50HwQ558MADs/+//+mlL4O4GwArwDwklmzDwH4DAYcBFKSy+wsVnZMkjz1tb8sc4xpI0zCyDHHzEa0fOBSW2n54WVf3jW1JKEnvsCKE4lmBLL3RPuNaXQWb5G4CaahWaS9p624gnq9hmTojmIjSCmdC+AFAG4FsG12SCDnfDildCZ5Zi+AvQCwc+dO2jezyFsnpyTN26AVKan7sbQRK0d8yN3Siz9QyNJWPGN419R7z+8prtKbEcjeE+03a428cQYe6Ry1EzGbgQfFWZu7lwYfBCmlpwD4GIDfzzk/lFJyPZdz3g9gPwCcf/75GdAljTwhpfSySqAxitwpmZQtd0nt91ZU2BjYAZbkjEiH6N1S9s14YRK3RVH7g3xO451JWSatmS3Aku7MFlLzo7139bvijSxlPGmFgz3rP+ggSCmtx8Ih8OGc88dnX9+bUto+0wa2A7jP258maVjcQEsCRMby3CmZBGExCZa1doyYfEtyRu6m0bulhVg0xLvQm3XXii9hUpZJa2YLsKQ7i4zV+vbWqWA2I40nwGf/0miI1yABuA7A13PO11Q/fRLA6wBcPfv7CW+ftTSwkGetk9KSxlKq16e8JUGYRdfyKljW6xZZUleuU8Q+IdtaaLheS7aHhyjCsJUP4OmnpT3UY5T98eSnWPNlWpenWG2LJy1PRGqqK+01uAjAawH8U0rpS7Pv/hALB8D1KaU3ALgTwCu9HWpINp7CopFoPvm7ZiG2JAig36GtezrLd2C8tvi2fMYR+4Q3Lz+aOx+JC/FKLysfwNOPFZ0qtU1Pfoo131bWocfuwHiS8QmapuqhIV6DzwJgBoGLe/stZJ3asp3Hz9rTXuOF8WT17e3HwzfjrcdDYY21EmPIsbx9PxnrYHmYngwvkJcnjzfIRazyyZP5n1bpKEpWMUlWRFN7vrcw5dCCpUPm2duupw+2pisxZs/+MV56+/J+b/U7Bg15Z7Bu/ZfzWqt0FKWeQBP2PNAOMbb66C1YOmSeve16xmJruhJj9uwf46W3r971WAka8s5MPg3ZU+DEIgvSigGQygISv/az52Dnlo3NxB+Z1MFSSK1iHT1JVt7En0jfVmKPHIulgnvSdb2FXMqYsgis7NsCN63bWwlA1tpZKeJWv5F1YGSBtmpjlLWZfBqyp8CJRRakFTMqygISmhum8MUMdK0U0laxjp4kK69rbgxDHRvLcum2XJaWRGMQbYAfUp7x4gViiQKX9AC89GpPkUA6Ocbk05A/eN379v3WG99oSjnAD3ct2zOJoZUbLyWyNMBPYLkEYO3GIG+psyF9S8nuBeyIQqrv2Lxhsdy9t3yaBSRjaS01z15ocQYFL+dnzduz9tYzDHDFo33IMT71kb88wjSCSRwE173vr/Z94J1X4PrbFk6wU09ZjwufqUuU93/24JI25fNZp2/Am1/6rGWLwn6//rZD+NgX78ajx47j1oNHsfvMpy7+ftqGhb7//svfxce+eDfOOn3DEn7Ks+UZ1m4MKrzIebHve/o+e/PGJX2VNbvz6COL82qtrWwn97Hes7uOPrpkreR+St7K2pa1ln2Xduz7mme2ZnIekm82P2venrW3npHrI9+91v7LMR7+0k3Hr7zire/W2k7iINCASSSkeCF5ykU1AdmPBgklAUgsW4EFQOIpMmIBUAyFCm89z+wQFlwWs7+0IMkLbBjbP0sbGQrcEpkvm1+kwIm1j5aWIvuMwsvV+9SyEUzqIKhPbEtSlFMuqgnIfqQ0rJ898vAx3HrwKI48fGyJpJA8lHZMI2ASR2ujSdGW5PRS63n5W2tttDWUGlGRWtrnlz33J5r7Z2kjpS+2J55D0jtfNj/ZvvUuWvtoaSmyT9ne816UZ95x1ZWHGELRpA6C+nQr93TrpGR3LOsO5oF8YtZ/1s7itVXMwvIo9AKNeu6xllSytJAo760xrTVl8GIRzYlZ/y17hfWuadqIFwCX8WiVS681BqZFF2pBla16MFGuAoquufn2/PTLbszX3Hw7DY6Qbaxn2O+esYbSSo7tnfer/uIfQus1Np8rRUPm17svPesxdCw2P/m9p08At2Xyb3BSGoHnnsekDbNUs/v7GFZ36+5vxTZE4gckeeHNrViG1phjgJX2knf+kfJyzPZjeUcKDbmfM83G0jIYIK7mNbDeq5ZGMKmDwHPPk22Y9VXe8+X9fQyru3X3l3flyNjW3Y/1Lcdg915tbHZ/jVjDxyLv/IvXR97fW/OTth/LO1JoyP2c2ajY2kr7ROG18K55Daz3avIHgSey0BsFJyVfT6EIi6RksaCqh9gpovdSZqX2lEWPRtSxdRmiOURtQJH9lc8UaWtFLTL7QyvKT647++v1esl4isj7XPqafBzBH1/zp/u+cMoLXfEDTPoyTcGyfPeQlCzMp2ud+i2J4pUYzNrMfOE90svSPjzz8ZK0llvzj+yvfKZIW6lVsPkwn770YNTPlnVnf71eLxlPEXmfS1+TjyMokYUe28BKSPgoWZZr1l7eLVt30t7SX3IsTyRar69bPt9T0MWSstb8e8Yo5C0+YnlDNG9QiaAs2ob8K8e01nyITavM86s3XnfPVW9/23u1NpPINTjpxBPMeOveUmcrQawwBuCL+/fEmfdmGTLIslYcvJW554UP6ynoIseKgtL2jFEoUnzE+iwBcGv+tb9yzF7QVg+VeZ5w8sZTWZtJaATFRlDfYy2faJTGuL9K8sYRRPzOlnS1LPvyDtqS0t7MPW/GYI+2xjQAbwmwyBhyPj0ajHecksEq81G82seYtGY0gpJ9WGfBATomQC+NkSsvydJSPECb7BkvsKaVEdmS0t7MPW/GIFuHFrU0AE8JsMgYLeDPMbTMepwynxq2LqJ9jElrRiMoNoJaoliRhcwC7/UqSKr7LdqIx9Leop74d2kllvYHb068FRXZ4s8bq2GtvcdjYWkTUmPwakqtKFUWg2DtTSTCko1p5aNE3zXtvWW2kMlrBLWNoD6Ze9BjogCbWr8ARkGciZzyDFQVWCopIznxGiaChz92P2UYCBZiUWsde4FEo+XINI2n9GlpBL2l4Cx8hJ4xWs8Buha9ZjSCElCkkdeCLSViNM9bO829XgFGkYzCYmWWd2VvqXWvr38In95MTpmNN8TTI/u2NAdPhGHL2t+KFxgizZktx3qvI9GrJbOT7c/kEYpa5LVgR0t+Wf1GvAJe3luf6zHqsbyl1qPFNnv59NgItLXvvYO3StXVFClHFi1FPoY074Wft7Sqmjep+cixJ49Q1IosZD54dqJ6MQI8cfRRa7jXF6zlrzMtxMJB9FrAW5LFypCzovwiEXdR6s2A9PTJovtYLIaFl1ivj4Wn4fU0WdqLNm+2ZpMPMW5FFspoNxYpJyPxLIwAK3KvbuONXrNyzeWYdVRZ+U3Glpc2hQeGJjQEE0FGDjI+vRGSrYi7KHmjFVlUZKtPFt0n+2KRla0oyK8dfqgrItLCSvAgMbE1m/xB0MIsjKLnSIuwlc+toRB5YxgsDaAnjkBGLVoSnqH8RtaNaUeWZGTZfDIHY0j2IuPN8h5pY3nxIbw2gtb6SCQm79wtzcbzjjGNbPIHQQuz0Doh5WLJrDQplaQ00FCI2GkuydIA2Mnckl5SM7AkfOFfxstH1o1pR5ZkZNl8MgdjSPYi442tbUuDkBKcSVmm4VhZpC0kJu/cLc3G844xjWzyB0ELj8Cyplrx4ywfX965WzEMK4FcI6lVZ0HTCNg8vTYSjbyWbeYVkFl9mgXfitGwPBaW3aK1J17Jbn221q+eZzRXxHo/Wu+YHFO++2sGoShCTyby0Eog17A+CvKMhkDjGbOFYNPLy5hIPtG+o59bPDyZiEvescZsNwShaPLuQ0aX7hlWVHKMscYcszzrLbhpzX+Mgp3eYq9e3nr6jv5t8bAS7wYj71hjths0P3ZCPJn/jVEENUpDCleuRpFMq4/e4qE984n2MxWyCqX2FphtPTdG4dv6OVZg19M3fhQ1gqE0pHDlahTJtProLR7aMx+Lx575PRk0NHDK209vWw/vrTJ7Q9Z/UsZCj+HEm3Qk/1pGxUj4q9fdJtuzYCDPfL2waNLA5SnH5k0+Ggqoqs2LJSpZbsJov/V8WEESb5iv57mokTOa6h1xAxdqGQsHawQppXUAbgNwd8755SmlLQA+CuBcAN8G8Kqc8wOevoYAdrAT0wrNjBSuLOQpDqq1B+Aq4KnNt4QWR0E0ZPHX1nys7y1+I2HO3tLqvVqJtu9W8lRvmK/2nJZE1vrck+ot19hbIFejMa4GbwLwdQAls+lyALfknK9OKV0++3yZp6Pa2HHX0Udww4FDuHTPDpyzZeOyNhedtxAjXhvXyve1sZRlHbgAACAASURBVE0apUq/pb38Xo4nnyl9nbNlIy46bys+/60nxo6SNl/J9+5tm3DBri245HnbFudn8cvGkMTmZfVV1l62l+vR4rG0fe2FO/HiZ2xdxme9v/JzzaucQ1kv2a82ttw3yW/5LPtk86+fZ/PwGj29+6sR6wvr1p/Enhl0EKSUdgD4twD+CMBbZl+/AsBLZv//IQCfgfMgqE8969RmyRvA8hPTI4U99zw51pBEHzZfBiwCLE2Z7U2N9czL6ovtTQTQhGlkbAy236y8fUvTY/vGtBHZp5WOXb6PaB89cHaMWF/rNp52BntmqEbwJwDeCuCp1Xfbcs6HASDnfDildKbVybHHjy87YZl0iUpK+ZlJNU1yMolgua4s0k57KSGZZsMkDBuDSdBL9+xYHLOs4e5tm5asiSUhpUT1uvbq75ik1zQES4PwuF1bfMl3jrk6I++Q9Y5Ymq+myXq0N/kMALzlXQ9+j7XtPghSSi8HcF/O+UBK6SUdz+8FsBcAtpy9a9npx6RLVFJa0F7y+5qs+3kvvJR22nuARViKb2uM1h28rJ32t3VfZ6nQEcDNaFqupUFo68XISmXXUpm1eXreoQggjkdDiHieZN+tNOQhGsFFAH41pXQJgFMAnJpS+hsA96aUts+0ge0A7tMezjnvB7AfAJ7/My/Mr794d/MkZRJz97ZNAJ6QZtZdjPWvkSZtWvdUdkozraQe27I3SGkspbdsJ20mrfUoayg1A7aWpT3T1jxSS85HanaSLA2C9V//bu1T1OYzJIDH0q7YWC2Nx/OeMeo+CHLOVwC4AgBmGsF/yDm/JqX0HwG8DsDVs7+fsPrS4Mw9EqFIyBZ4iNcirhEDsLAs3ZI84Cpe8AupCckxNV5aUrv2LNR9ttZSeiI8Pm7vfNj8PbYhrf/69zG9HjVPPRQFUPUAxHpBfDRaiYCiqwFcn1J6A4A7AbxyjE7Zac2ktnWHilhle6Rsi3ftvmdJIzlPqQlZngxtTACqVLbsLtZdOnJPl9qIJRmZrYC1q/uzLPNerazHki/Jq21Gxh6ioYxyEOScP4MF7wByzkcAXDxGvzWx09oDk6VRxCoblbIW79p9D/BJxHqeEXBTNibTMlp2F89d2ntPl/OwJKM3/kNbB8syv1IAoxp5tc3I2EM0lElEFmoFTmR0lQUXZUX3SfLAaXnLjjGyCojUUYJR+HYWrciiAQsPdaRhAc+wioJ603I98OWSokCiPSnBFsiNtXYyotATFRiF3bcAd6IALRpNHo+gQJW1CnVacFEt6CaNPHBaEsgiiq7Dni9j1wUtGd8WuIlV9lzyUAOeFPAMqyio7NPi1VNwlc3DAgnx7q/WzgslJ9dOQpp5QFIs4BQGtccAd6IALRpN/iAoUGVaXLwXSlue4l6wy54CIF6K5iRoFIUpt4BE63WUkGyeNWmNZUl37VlWJtyKzbd40d4DS+J781FYzkLNY4GnZyAoXg2IrXF5rgagOZ7RnM/kD4ICVabBc1lS2QPdpJF1Amt9R4md4pF+vSXJC1lAovU6RmHgrLEs6a49y8qEe4FTGS/ae2BJfNm3prnV/bR4vOvoo014M68GxNZYA+ktEHtsPpM/CHqgygr13iHl86177ZhwUtGsSu98GKx5q/ALg2SzMgIt3jy2FZkdyaSuZZco5ClwEgXC9ULRSc1As/lYtgFr3pIHTVspWgjLUP2RhCorNBR+ygPtNTbs1NjP1s/LeXihvSJ9jDmXseDDxoDy6n0uMnYvhJxnH4dAlU1CI2gVOJEU1QC8OfQtXAKvHSJaTrz+3StB2LwYjLvUEFrrJaW0tCsUqcSwHloFP5iW1euRYfvYg7vQ2hfPO2ZpDnUbb2ETa34t7ZnxMfmrQavAiaSoFdmyGbB7YE1eOwSD+/bca4sF33t/ZbYACeMuC5+01qv8xoqslHupvNfL+6znXj/UIyPJY0vx2mai7xizBWltvIVNrPlpPFl8TP4gaBU4kRS1onvvea27pIVQw/qyNAmPdPaWupLSvGgGrNCLNk+5xky7kH1b8QY17xbMN/MmWBrRUA9PTZZmZ0GuR3hgRWIse0Tr3wh77z71kb88woqgTuIgaBU4kRS1olunpHayMsstK5HF+rI0CY909pa6ktK8aAas0IvGj1xjpl3Ivq14A60UGFtD5k2wNKKhHp6aLM2Ofe7RbsqzWqEd7T31/Bth793DX7rp+JVXvPXd2jOTOAg0r4FVRFLeV60ILnlat+5klrS1bABeK3vdTs7X8h8ziWBpBrUmJWMM5FpJi74VgcjIU7pbtmUFarQ+LXtMFPdQ7pO3SGyPJlCelWtraQaRsQr/X73xunuuevvb3qu1mRSK8VgIRUAc9ZfxIcf0IPBEc8wL9eAosDVkGYV1HH2JrS+/ybUqfVgZn978gFbpbtlWw2VgfVpoSNEcAZb5KnMrerABWzwNyaZtUeH/hJM3nsraTEojqMl7v7Z8wF6J4qGoj9eSVnV/0mtg9c0kBpM0LdRbKeGZt6Dlw25JJ4+m5913K15C28/e2BJrP1veEW+EIHtvGfp0RLuRfN70wfd8m9kIJqUR1GRliln52dbvPXiDLU2hlcXm1RAifQNxPIIW6m0Lj4Dx4EWA9mh6bE28KEmt/YxKUSsr0UIobvFdPls4GQx9ekjWbAuhaLIagaTeeAFvRFcPWdj1TLprdg2vJdqrCXlsISwzUUYhMh6svWjVFvD68uXasvoOPfvZG+0X8elH18yrMVg2Mq1ty2uw6lGFeaSSZytZFNQ7pjcCTeMl2lcvjxH+h0bijVGIVfIy5j729rkSxVR7ozc9vJe26556xj35R73kGUNnkUgwPegt3jEthJgWL9G+enmM8N/LwxhrzngYcx97+xxrb4b0GeG9/NZCMV51bSAHNYLeQpWR54eOMYSixUzHKLI5RpHW3rGjzw6ZL5un1Yd3DyIFZ4eudU/f+FEqgtpbqDLy/NAxhhAzMkWLn0Z46oHK9vC+Es8Oma+3zJr3Oe/vkb6G0JC+J2Es9CQdMeOZ18gWCQNlrjsvdNUQYkYmK9RYGtNkkJAnbFu6DyOQYxrvhSIGS7aWlvHUA+HFXHPeMHRrD+p+C0gIC99mrlcPwIq1/izwbfIhxp6kIxbG6Q33jISBsnBOFmrsCfv0EgvTtUKNJVBFCVX1wIaxkOII5JjGe6FWQo03UYntn7VXdT8yDFuGRlth6NYe1P2WUGoWvs2S3DwAK9b6y/mtmRDjv9q/f9+el71ySZAMg9HyJt9YbrVCEVcXc91F0497yAvZJgOIPKXYpUZQ/noDhixAkxaclgVNZiV6eV24dVsZjGS9I5I87yJLK7c0Uha+HNE6mXtz8gFFDz/2+LJAFmBpyK23zFS0oEnrXtXquw7yiIS79pLVZwtS3ApI0YKUIgFD1t27Dv4BoLb1Fj211qUVsBQt2cbI+y5GbVOtviPhzCwkevIBRRp4aUlOsSTDUOlrBehoZAV/eKDStX607ywp5uVRzlcLfmmFI7fGkAFJMm1Zg21nWpUMb7Y0Gm/YN+C/n3uh2iJh6l44frZPY4zVwiOYhEZQSp7VYZUlOWWMwg8t8pSSkmSFi1phwC3eo4VHvTxq89W+8xYo0caQocqseGxLqyrPsnbW2rcSgqzQ6KhXIRKm7i3FLucx5lgtmoRGUEKMtZOMSeyotZmd7j0aBQvjlVqLBUulSXl2v2OWYAbgsRJpul6o9FaaMpO63kQmK8RWu4sXe5OlVbF3zWuf0tbK8jBF7RI9afSFJq8RFLLKVLVOUnaHsvzOPRpFpGS1p6CplhqspbxGCo+uRJouS8YBlicpsTRltlbeRCYrDV2bS2lraVVjlEGXa2XZQqJ2Cdl/PU/W1kOT0AhKHEHL0tt7h7K8Cp4T2QISZX5mb3vtvi77kPEBVpqxXA9tfbyWbMuHz8BOtfmxmAWv7YelWWsFP6SW5fXdM+qRyixmYaiNK5Jk5YkjmIRG8MAjx8LSynuHsrwKntPTitiKFrRs2SWYxGuVQ9fu9XI9tPUZGkHZSmNm82MeCtl3JDK0XvOWlmXZBqx16JHKUpOL9NGiiNQvY6zbeNoZrM0kNILiNdBON+sEZfftMX35VmSZ1X4IhBWDC7OkmseaXiz41nwsi7cENW1ZxqXXgAFwsHu5BRLS0rLYPjDINsuO5JHKVtSiF2hmyHvsiSOYxEFQwEu1icpoNRZ5JWG7IxFZFlmRZVb7yAbKCLoSJVYi0bxw2CxCsea9QKhb82GAsXLtJZipxhsrvMpKg0neGLBoWS9tfVjEo5wfi6hk75LVb2sebGwWUTnkPS48vOOqKw9NGs5cAybx+m4tS+8Y8Qa9RVWisfqt+Xm1kKj1uf6O2TbYWso4Acuz49H4LH5X0sbjLb/mIeudsbQpr6Ybea9XrOQZgNMB3ADgnwF8HcCFALYA+DSAO2Z/N1v9aGnIFtDI2GWrxnx2CIjG2POOkJfvaLuV5NlDq8FvFKSGlZkbUsJOElYwDflaADflnC9NKZ0EYCOAPwRwS8756pTS5QAuB3BZq5Njjx/Hez79jSUgCzXwQv3bRectqEcSiOGuo4/ghgOHcOmeHThny3IfseyPtdP6ZGOy9ru3bcIFu7bgtRfuxIufsTUEYOEBWPHyH6WLztuKz3/ryCLfbKzCy+5tmxafA5auV2uvVoLk2DXPDMRDvjPWGsv2rc9lLcsaSNLWuuZN/j42eIyk7oMgpXQqgH8N4DcAIOd8DMCxlNIrALxk1uxDAD4D4yAoXgNpZR8Scy6pB4I6atGV8QHeWH2NzzH4j5K0rnsg5VuxDSvBIyNvzognSnGMmBSgDfVuwbYzT0eP18tDQzSCZwC4H8AHUko/DeAAgDcB2JZzPgwAOefDKaUzrY42bzwJr794dxPCq0if3ds2LdMe6tOcnfayP88JGpXCUvKshPSWkoZJQjZ/7Xum+VhrJXkpny953rbFzxZZUrbFd03sPWmtvaV9yXfJWp/67+EHH3VpBIzPHklvvQtYt/4k9uyQg+BEAC8E8Ls551tTStdi4RrgopTSXgB7AWDnzp0uv3qrGIfXPxs5QaNSmMUTWM9FiEUMeuPjPfkN2nw8vMgYAU+suzfSM6LxefI8WvPrfZfqzzccOOTSCBifPZLeehdacQRDDoJDAA7lnG8tfGDhILg3pbR9pg1sB3Cf9nDOeT+A/QBw/vnnZ2sw604laay7U6svS3pZd+T6eQBNicd4Yff6YqeQEqeW4lEbiDW/iFS2+vDahtj6SPvFEJLSW7NDMD6s99PSHi1NSONT2hXK93c+9shD7NnugyDn/N2U0l0ppZ/MOd8O4GIAX5v99zoAV8/+fqJ3jJq8pbAKjXV3avU1BBdRtgMQ1joAfq9nkYi1FPeO6Z13RCp7+uixN1gFQnrIKg3X4sPiE2hrjxE7FbMreEqeDfUa/C6AD888Bt8C8HoAJwC4PqX0BgB3AnjlwDEA2Cds9K48BjHLsFcbidwpPX3UPJV7uryv1zxvP22Di89CUoprUruW3i2tbajEbHkJZP9eOwQjqWVYXpUeYuvBbELamJbNowVnPuggyDl/CcD5yk8XD+lXI+uE7UWoHULe/AZGkTulp4+aJ0AvfhopSCqJZXyyDEiAa21DJWYk/4M9K5+xeJFFYce0AbH1iGSRWjaPySMUeVCMC1m5BjIqzIqTb5Ufs2LOrai/SBk2iXEXnb/kSWYltrD8PGsCcDw9FpEX2UeLWLapzGL05Of3FMCN9hOdXzSStvAwVmTh5LMPJVkIPux+xJBhNIni1S6ifmdr7EisgsUTsDQr0SO9vFJWw9OLxk30SuVC3izG1rNjaV9aP70xKLI9w6foHYfRJDSCgmLsOaHZqSwLd1rot62MwiKdrVxy2Vc0U45l3kXmH81FaLVjefuWJtSDI+nluxBDRyq8RtaSjR1FavLkUHjnxyS9hRXh0W48eAQnUM6eRCooxsWS3aJyIhZDSTkx77j3+4tW8WtvuQPvvnlBapW7sDSslH5etGvrkuduOHBo8bc77v3+Er7k2LKvuo+at/J8Ob0LT7J/D0ke2NjsuVY7uZZljMJ3eUb2JddYtu/lpybZp+Q1spZsbA/fdTv2bvXMT+6rZ4x6HVrzXuk4gtGoRBb2+PxZdJcVZ2D1w77r4WWl48QjfbXaeSPter8fk+8x1rK3j5WYX+9znnYer8GqF0DNOeN5P/2C7sKdvUVBPcUovQUrZTtv+/p3b4HO3iKgPcVfvcVBe38fm3+LhvaxGkVeI89bz2Ld+i9n8m9wEleDYiy01KeamBonv7falSuENranjdbO277+nY1lzSeqzrbaRfse+vvY/Fs0tI8hzw+d55C19FwNJmEsLFBlmmElChPldbdYkOP1GBbkmHRleSG5698tsEtmmLNAReSYLeOV1xAZLd/VclUymG8GBy77smDdtX2y4OwsyPUIHF4USs/7XmtkvetrBqpMg3OKwkRJKCsGddaCtpJjWJBjsogoK3DZ4r18ZxXotIqAssKlFmSWxhd7xltQtgXlZRU/ZQVnGURbadcq3Mpgzhg8mOzL+7zGnxdKz/tee9ZbrvGagSprSUpPkIqn/RiwU1ZBE0s6t8awCnRaIJjMjeqFWWvx5IUe90C1eSHILG1Mtmu5Eb1uTxacZvFcr0MpqsLeDa8m1LNv8p0oazH5gKJCWohkNHkoErrqJRbEw0JqexJmZGAUC1tmgUEs+chK147wBPigx63ApHo+ns/e5LKy9izIxxsAxoLTLJ6tJDItsMsbOt3z3kYCpyahEURCjHspcpf0QoMzaSXv+wwmuyarT+tebgVaRTQAWVTFglJnkransKxnf1p71SrkYoWGsz3oKYDCQsbH0oQ8JPtuaQQpZxMKYMXpnGc9N6/7d+/Emy7ePXpiUCGZnlv+amOWtkP5iYzp7cv7bM8chvI7ZN16x5Zjajwwvrz8jvU+rDallA7knLUkwWloBK0Q47GSN3ruktHTmEmeVlFQL0UTnHrmwBKWLFvAEKt6Icvzwsa2vCg7Nm/A5k3rm+XwvOHZQ6Qzoyj0fQ8svyfEeBIHwTv/5L377tn+86pFNFrgwbLGWoU16rbRTWdFRspYrACIhyyrv1XgJDJGWSPpLWHFZYZY1QtZnhc2tuVFOfWU9bjr6KOhIipsXVbi2so8FFb7SMGT8szDX7rp+JVXvPXdWptJHAStOIJoyqj39Pb4uC1LLvMeeO+eGnlTgXv8zHJuxbJtzcubbGT9rq2tnJe0qzBtI+IdslK8rVT1odpoi7zvBtOAtP1ka9uKI5iE1+CkE0+g1txoymgPKIgkrxWcfd8qfe0F8/QAbtTkXSfNsu2dl2V194C99sKcWem4kuTvrbZWqrr1fG97yad33yKeinqMFjDJJA6CusAJsDSBIgLVNBZpCUwaTLe3CEWEZ5Y0ZQGlFrBSC+pMS1KRY1mJW/XvGnxYixcvjLccuwdA1CIGoDq0sE0NDCvXhu0fmwdb0xaPPe/dJA6CFjBJBKppLLJAMCTsl+VvjvDMJIQlEVj8gTU3bSxLSjHJ7+ElAuOtFfwoz41RRMUjZSOaQD1vAOraRMdia8qe653HJGwELa+B5XeV5IUZa32Wd2fmF4/G+Uf86d5cAQtMxFviO0IsTyCSv8FsAYyYR8MD/MFiFCT/Vl6DxVvtibpg11YXrJnX1iHnK6M9W7Ep5Zk17TWQFlvLgsussJbFu/78tcMPqXHu0oourc5WnL/npbK8HqxMOLOye0t8R4jlCUTyN4onhZUDZ89Jj4Zl8dfWQu6r3D8rh8Ka01mnb1gs/176YLkC1rsh5114LbkLrfWT78zkvQYeqDJLohdiJyizcGsndjnNvbkDvdFgrdh0b64EkyiMJ492YmkNjAe2Tlp/bJ+iFnqWm1GPKaHnGMAtK03u9e17oMssjc2iSDyMfKblNVh1UJKcM3bsfo5Z2rm3HPQY7YaUOfeOOZUS4pKvnuesUt5jjGntidafVXK+h+8e3j38R/sZWhZ9EhpBiSNoSamoraCQ17+u9SelDosM9J7ureg/y9cdjcX3aFDRnHgrviIKKW+tf2ttLS2sHvN4RhP7gGVuem0FPVpWTy6IRp7cikKTzz4scQQtsjK/GHn961p/LPsuAo2u9Vd+j/i65RjRsTQLsuXT9/ZZeIlCytdkjdnKxmsVGdUs+NLb4S0Hz8jzLsq1Yx6nKGlr2+NZm4RGUPAIxrBoS/J6GVpWV3m37LXcs/tszQe7X0uLvGW/8GhQ0Rj6Ms9iqY5mJ/ZEVPbaEjSbD5P0LGqxF+5cm5fc+94iK5K0tZX7VPidvEZQaCViBLyl0rQxvaWu2OnOJGarIIanZJtWuMTKXx8D60H68jUNoBV30CKv5PdKa620m4XlIKMWh5ZKq+cl9763yIokbW2tWA2NJnUQaFFvjLyRW9Zz2pisTYmkk1FejG8ZJSfLhdfRcVb0oozSk5FlLAqwd508Y0Uh4zV+2FqxqMWah1YZ8VbEHdsnK7KQPd9aU2+kpBVZ6ImklHxHSsNP6iDoQc8BYqe1dU9vtZE581bcO0PPAZZHx5Xvyl9LcrQ0hTHWqdVHPVZUmnnsEq2ip+VzTxlxzz55efTMSZtXIa/U9tqfND4ipeEnZSOoybKS93oRPO3YHSuaKSbvkq2c++I18GLb9c63527K1oNRy4vSy7c3dkFbLxkpanlcmM/f215bC2/sCfMseWJUrL1eMzaCmiwrea8XwdOOndZDM8Vkv9Km0JKIrYzGyHx77qbRO2dLivXyzT4zG0m9XgCa75I1tqVFRTJZtWdaml4kc3XIXk9CI9AwC1lsuSVJLKx65gPXSpT3FCkFOH5+BMvPkoje+YyBhmtZ7i1kJq8mUZN3H5nErPdg55aNS7w6Xo2O5R5EYjQ8OQGtfe/JkWF7PXmNQMs+ZOW9LfJmeVkaR/SOVZMnYzJ6ultSis1nDDTc6P3dwhbwkHcfrTt4iSPweDfkGExziMRoWAjQkrz7rH0G0KU1AgMPgpTSmwH8JoAM4J8AvB7ARgAfBXAugG8DeFXO+YFWP085+UQ8X+SvW9ZSZg1nOeTM8tuygDPLruRN8sjGGmJdt7wdbD4eyz5ba8vCLy3g0jqt4TV4vRbMkm/hLrTeA+aJaHkmNEwI6bnQeJJzZxZ86z231lp7tyS/Hg9R90GQUjobwO8BeHbO+dGU0vUAXg3g2QBuyTlfnVK6HMDlAC5r9VWXRZfRUUNO5Zo8uPne3PmVsl/UZM0vMp9eVCSv9ZzdRbX4Cm8cAJOMFu5CK0/f0mQszY15LjSetGjFnqhUa61b0akRD9HQq8GJADaklH6ABU3gHgBXAHjJ7PcPAfgMjINAK4vO/K/y9yFS1yLGg0ebGGvslZyfHIvN0xrT65+P9Onl0cuL9lt0jS0trDVPa42871Bk/UJrzbKRPP8BeBOAhwHcD+DDs+/+RbR5wOpHK4veW0aafbbKjfdQbwnrMcaO9ukt8T5VGrKm3nLv0THkmmpry9r07sOQdweN7MMhV4PNAF4BYBeAfwHwX1JKrwk8vxfAXgDYcvYud0KMJK8xxav2Rmgo/NSQsaN9RgJTpkhD1tR7jYyO4UkE8xpxx1qHXhpyNfglAAdzzvcDQErp4wB+DsC9KaXtOefDKaXtAO7THs457wewHwCe/zMvzOxqwNQabxioV+2tDSsAmkYWy3DlBejU5sYMkZbR1DKieQx3lhE0CsDZMlZFQme1NfOEirNnWV8WWGn9LtVgu8UQqK1t2ZfyG7tOekPBvaHJ2hpj3fqT2PoOOQjuBPDilNJGAI8CuBjAbQC+D+B1AK6e/f2E1ZGWhtybLOQxomntWm4YNjYzXEUAOlnfXgniNaJ5DHeWhItKUo+U9kpGy63WWtOoK9biQQv2YmvbSsxqGSK9QWktnuUar9t42hlsbt0HQc751pTSDQC+COBxAP+IBQn/FADXp5TegIXD4pW9Y9QkT0jpRhkKc+5xw9TJQxfs2rKYICSTiLTknJbbSuPD6wbUJL22bp4kHMsIytyEUlK2pDRLjGESjmkOQ4ynljZhaStyHbR1KX2x+Vk8MMnf2m/WZ+HhzsceeYitySCvQc75KgBXia8fw4J2MCrJE3JsmHOPG0amEwNLE4WsMunl+2iSSssNaAFcRJJwLJcqcxMySelJ6LKCjpjm0OOSZTwwDYFpK3IdtHWxgqosHpjkb+23Fdh1wskbT2VrMokQYy3pSJJMMpFhn97Q2R5wCRbuzP5aCUI9Yb5sHiwpia1ba95W2LJ3rNa8WGIMA3WJhGV7yYJFk2NaiU1aGLEEo7XAdK3QaTmWB/pfzmfy4KV79uxxu0CGgnw+GSChqzGGNWYPgOhQUM+eeUT4HZt6wU3L9/Xv0Xl59y8Cdir7xNTBS7WkI0Y9xT4ZqKU3kSiqRVhFNSMQ1l6AUUvyezQDpnVFC7u01otJPiZ1mcRcCbJ4YVJdg0lnYLRjFTrxrItMH598gZM/vuZP933hlBeGymdbpcVZIQ/v856+ojxGS2C3xj5tg174hRX8sH7X2vQWdmmtFyvfbhUZGaMwi0UWL7LISPm+FDapi8zI/Sk0VqETz7qUsUoRlDVT4ERL1YzeUwtZ9yXPnZn1JcnLo0zP9RR0kZJR/u69p2v9ybRVa428KcCt9WK/WQAcQ2xA0dR0CV9f3ksJQlu+97xTbD8t3iwwlXpdWBpyWdOWjWASB0EpeaaVb5JSVJapYhJCnrCsTJdHwlintZfH0k9dGssq08XKb3k1hVZ/rOwb459Jc6YxaOvFfmN9efegtS5srdj8SzmxR48dx60HnygrVvZXlhvzvFNsPy3eZJm21n5b+/OOq648xPAIJnEQlAInmjRgxSda1lONLK9D5A5qFatgYzBeWqAS7B7eC6dVj1nusV5PTBSyzLN2UbIkqGbBIKIU7gAADatJREFUZ/d1ZmeR5cQkoAmDt9e0LaZFyHfHgsXz2AYsrWnywCR1ZCFLBZZQ4t4y4IWYr7tQJA6BxTR4C2RovAA6qIQXrtybhi15a/Hr9W1HaGi8hxXFyIBAWmuifa7BZmVUoAVvD2DZvjG+IrB4FnjtkNiKSWgELa8Bk+TWPTt6h/bcQdk9LxonoBW9KHBacj5M22D3+siYZRzLruC1cXju4ps3rW9qFZbGYFndWaFTD3ltPZ6CKBKMlkl2b+kzy9vi0bAmrxFoUGWFrPjuKECFF9Jao578hlY/tVYD6OXULG0jmqWpaVJemG4ZqeaBWJffl3nWfyOajSe6sRVp2SIWUcgK2lgFUTwRot7SZ629GCMbcRIagcdrwKSw9w5dyHNHZRbbqF2CxQ1oZbhLWS7LniClVNSLEonQi2pP1n239q9b3oFezSei2TFvAbNHSS+BN0ZAG5vFSzAe2bpEolQnrxEUqDJguZSwpLD3Dl3II0FZFl7ULsEkjLwX1mW5JLU0otZn1g/gL0oS1Z48992av1Z5tF7Np0ezk9oSs0fJebH5t94Pb9alVwMYYheoaRIHQYEq0wAetRxwmZVXfveU/PJkrckML28OuSSZzSczy+ryZlEQS5YbH8nGs/j3/u7NuoyQnJ+Ft8B4qsdm2A0sU1BiCVgApB4etQzP1nutZUTW69KbbStpEgdB8Rpod08m+Vmmn6RI3noh7d7mtdzWZEFtA09oBACaksIq8MF4aNFQJKWWdIsULNVIzs+rjbV4YnYSlimoaQgt24iHRwYN78WdKN+PZRsoNImDoBDLdwd4QdHStoXsUz8vSZN6ltbgRbSxUGZqjWD7aRuWzM868b08tHhprbe2DlENYAhmgOTBuy5MC7N+036X71hLOwX0oqOWVsW0T6YJSK0yMlaLJnUQtO5YzMcr8QiimHA9/tiopV62s2wEHmka9cdr7a07bSRPX+N5jPsrk6D12K323t/YWC3vgOZdkOvp9YKw4ivWOxQZq0WT8BoUPIKIBdSKCvOWtbKs0BpZ2YWWpdfjO7Ys0JYfmUU/9sQPSJ4Y357+ZMSdN8JwyH4xL5DFQ7R0nTZ/ax+taE3rHWrtp9zHyXsNCkUkSE/BkpqGnJ5MknotvR7fsVdaeyMlWyg63rWx+Pb0B7RtIYyG7FcUB7KQBwWrhYOp9cHGANr4llYsgzZ+ZM0mpRG0yJvLb6HstE5U1ocky1/OfL1WDkJN0Wg/KQVkzIMnEs2Sul6e5JrXvJR4iWiBWSu+gH2u94vlDDDNzoogle3rd0wiFMl4FxlHYK0Hy/torZ9cs8njEXgOAm8uP8sok8+1cAms3HeZ581y69n3LKOwJiuLUPYts9lKhlyZH+NN65vx5+VJrnnNy8ue+xPuDExtbCsjUtu7sl8FL6Bk4zEe2DpY86/fsa8dfkjN5JTZhBLTgK2HHNuDqyHXbPJ4BJ6DwBuTbeUmeHLIvffzaO68J2PQm0XI+pC8eSLRWnkIPTzJuP86glLeZy0Jb43F4v7r+fVGSErbAONRy8GQkaLy/WXvoxfzQtsrK79m8ngEnoNAnupMmjLpW05cTw65ddqWMaK580x6e3LnvX1I3ph2UvPGtAj5u5en8rmsecnrr/v1SnhrLLlX2vy8+1FI9mlpHXK+teYj+SzvL3sf2Xvd0rKsd6fw18IjmJSxsEVRn3QrEs8q9DmG/7uHx56xrfae/rz+9eh6WP1qfY89/x6yeGJjtnjxvo8WTyzK1RrfJIZq+mT+V1CMxywOGu1La7+SBVS9NHQsbwHQ+jerQOdqzn81CsiuxhhD5s2exbr1X87k3+AJ8aNj5ai4O244cOhJ70trX757983fWPLbmHz28DXk+VZ/bL5j8xShCP9jjbESNPR9jDzPnl2RkmcrQbVqw8IlreQcFpIZLWNVP8tCTb1hvXLMSChodCzZZ0T1Zsk3rE9v4kvNG9AuMCuf8YTWDqVISHnrndPm1BsKzt611vPWs29514PfY89O6iCoAyKsQBlvcob3OU/ZKhlqahEbMxLoER1L9hkBTbHKdGntoqHWQLvArDUfT6pvlLwh5dF3zvqtNZZsPwZwzlt++INj7NlJHATHHj++JInj0j07lp1mWhpmJNFFJnd4DD29qZ/yOSlhPdIhmkAyVHPQ+rJSu71JV3V6bkmuYpqalSzmld6edZNtLa2KpSF7kuXkfBl/Q7QTxkd5ZqXKoo9GBapMnrjWqRtJdOkJj+1N/dSei4J+RiXJUM2B9RWBgWPt6vRctq/esN+xEsK0tpZEZWnI3mQ5D39DtBMr9L1lI5hEHEELzryQN/hDkhUs0yILzttTdEILOPHAgfeWTfMGv1y6ZznktjcByIKJk+08+2rBdntDyyP77AlGas27Fa7O9oEFsnnn2QqsY+9nWZOv3njdPVe9/W3v1dZiEhpBC868kCXpLRDTKMyYNqZMEOkBEfFqBtbp7r2vskQoYDnkdosfbV2s5z3waF7Ybi+0V2SfGQipd96FF09KsKVdeufZ0mzZ+1nWZM2URdfSVS0JYIVmWhDcLbLCd6WEZ+mtDByT8VzzzUKE2Xw8UNsy9JaFI3vDfb0p3x7A2KhWwVKDW8lVsm+mfbC/LKGr5s1KNGPaJisP7ykOK4FhJbR7K8TY1AhSSu8H8HIA9+Wcnzv7bguAjwI4F8C3Abwq5/zA7LcrALwBwA8B/F7O+VPWGIUi1mXvXdC633n5kdK0Bq4Alp7AzEsgoa9aZIFVeu+rLQu/BfoRuc8OTfn2WMu99govTJoFvML+Mqgzre9o8Rj5bkQ1QO39LH0O9Rp8EMB7Afx19d3lAG7JOV+dUrp89vmylNKzAbwawHMAnAXgv6WUnpVz/qFjnGbIraSoPzlqvWX81M8ycEvpmbA8FhG+C7H5W+GvtecDWOrTb4XBRuMFWrEMkXlaxPbVC3aq9SU9GHJfLW9KPX+5hpp3pIyh8Wu9O/V7wKDkPGtqHgQ55/+ZUjpXfP0KAC+Z/f+HAHwGwGWz7/8u5/wYgIMppW8CeBGA/2VyguWnZEtqRv3JPaANFtAGA7eUp7q3iEWEB6vQB+vHU2at9UyPxd7j0eiFNWP7OtQmVNsrWOkzSdr8LfBdCyrfenfq9wDoA30B+o2F23LOhwEg53w4pXTm7PuzAXy+ando9l03RaWM14/cI4GYJsAkaTSqTD7TikaM8u/x6TPyal9D96Ruy6L4PM8BS33+3j6Hvmt1O/Yb0wi8CV8tqX/4wUeXgOKyyFCNxvYaJOW7rDZMaS+AvQCwc+dO2mFUykRtBxGy7vpDoKLkM1Y0YpR/j0+fkVf7GrondduhUGa1DQXwScqh71orMrblHWlB60UAZG84cGiZV8q7hr0Hwb0ppe0zbWA7gPtm3x8CcE7VbgeAe7QOcs77AewHgPPPP189LIB4JFmPLcDqU0rTGoa8RdE7dmmr3QnZXdMr3bTouKj2FJmH9jyL2qznoUlOz5hSa9HGlFJXzt/SfFpSXvIY1Y7YO8ciLrV8ELl2ss1KRBZ+EsDrAFw9+/uJ6vuPpJSuwYKxcDeAL3SOASAeSTamVC7PSGkK6AVLGe+RYhTsTuj13bM5aN4GwBf33zMP7XmNv1apukiRFCsmQLvbyzXyxqRIXi2PTKsP9r0VccliUlperUHZhymlv8WCYfCMlNIhAFdh4QC4PqX0BgB3AnglAOScv5pSuh7A1wA8DuC3vR4DL0Uto0O8BbX0LZ+1k3ZMe4T3TmhJTI+3oS6qEpG6dc6EvN96NQWvJ8W7dp521rtj9WF5ZHr2V35vvXP197JIjiS5xoOyD3POv05+upi0/yMAf2T120vRO+QQb4F20msnbZSHFkXuhC2J6fE2FAkSlbpl3tFiMlpflifFu3aR7DxvRKh3jCH7K7+33rmirWhFcqQWI9e4FUcwqchCjVhklbe8twX/3SIW729FgXlj9j1kRRJ6AFG1/ur5R6HTZV5ApNR6i48W9cLZt9aS5YxY+RpeXjzzsfJUCsk8gvLvoH4vjmeo/1ZKdGMLznwSuQYtsiSi9/nyXOT0ZndGKwqs5nEoWZGEY2S39VjFPRZvz7y8FM050H5nOR+Wh8a6vzNePPORz1qaXOG5POfxjhQtY/LZhy2NwIIx9xYjYdDUrX6isOQss0xSqyCGN6OR9WnlXrSIaV/RHIRIboGVRWlpgkxrae2BlfFoZZ1q0tibvyJ5sNbWylOped28af2Sfysyz2FQrsFqk3WXtCSgp2wV68d7J4ziFmgSBYiVRY+ug4eYpIvmIERyC6wYeksTjFjyC1kZj1bWab0uvWhJjE/LY9WKXSlxBHWka91Xy0aQcqYu/CeNUkr3A/g+gOVWzXXrT1q38bQzfvjIg9+DNpHo76y91c8TdMYyPr1jiPbHH3vkoRNO3njqDx9ZsOYu+67NR2uepy7jMdDHMh4659ecw7r1J63bcOrO48ce/a46X8aLdyz/flp0Btatf8i1Lj1kra1vHRbeSXufnp5zfprGxiQOAgBIKd2Wcz5/tfmwaC3wuRZ4BNYGnz8uPE4KznxOc5rT6tD8IJjTnOY0qYNArck2QVoLfK4FHoG1weePBY+TsRHMaU5zWj2akkYwpznNaZVofhDMaU5zmh8Ec5rTnOYHwZzmNCfMD4I5zWlOAP4/zKWfUvm6p98AAAAASUVORK5CYII=\n",
"text/plain": [
""
]
@@ -232,8 +232,8 @@
"tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
" 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n",
- " 1, 1, 1], dtype=torch.int16)\n"
+ " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.int16)\n"
]
}
],
@@ -274,7 +274,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "{'nb_clusters': 5, 'size_min': 5, 'size_max': 35, 'p': 0.5, 'q': 0.2, 'p_pattern': 0.5, 'q_pattern': 0.5, 'vocab_size': 3, 'size_subgraph': 20}\n",
+ "{'nb_clusters': 5, 'size_min': 5, 'size_max': 35, 'p': 0.5, 'q': 0.35, 'p_pattern': 0.5, 'q_pattern': 0.5, 'vocab_size': 3, 'size_subgraph_min': 5, 'size_subgraph_max': 35}\n",
"pattern: 0\n",
"pattern: 1\n",
"pattern: 2\n",
@@ -380,7 +380,7 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEICAYAAACktLTqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAS/UlEQVR4nO3df5BdZ33f8fcncjC/YizXkiMktTIdQWN3BkNVlZQ2IYjGbkiR/6hn1DYZNVVGMxk3kzBpsdT0R9KpWpF00nSmIRkNkCgJwaNQqNWkSRFKXaYzYLEGA5ZtjQUy0iJhLc6QBNJRkPPtH/e4XEv3aq927929++z7NaM55zznObvfI+1+9tFzfmyqCklSW75tuQuQJI2f4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXboOSX4lyb9a7jqk+cT73LWaJHkG+NGq+thy1yJNkiN3qZPkhuWuQRoXw12rRpLfAP4i8N+TfD3Ju5JUkj1JzgJ/0PX77SRfSfJHST6e5M6+j/FrSf5dt/6WJLNJfirJxSQXkvzIspycdAXDXatGVf0wcBb4e1X1SuBIt+t7ge8C7u62fw/YCqwHPg184Bof9juBVwEbgT3ALyVZO/7qpetjuEvwM1X1jar6vwBV9f6q+pOqugT8DPD6JK8acuw3gX9bVd+sqv8BfB143ZJULV2D4S7BuRdWkqxJcjDJF5L8MfBMt+vWIcc+V1WX+7b/FHjlZMqURme4a7UZdHtYf9s/BHYCb6M33bKla89ky5LGy3DXavMs8Jpr7P8O4BLwHPBy4N8vRVHSuBnuWm3+A/Avk3wN+PsD9v868CXgy8ATwCeXsDZpbHyISZIa5MhdkhpkuEtSgwx3SWqQ4S5JDZqKFyXdeuuttWXLluUuQ5JWlEcfffSrVbVu0L6pCPctW7YwMzOz3GVI0oqS5EvD9jktI0kNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDZqKJ1SlabZl3+8ObH/m4NuXuBJpdI7cJalBhrskNchpGWmBnK7RNHPkLkkNMtwlqUGGuyQ1aKRwT3Jzkg8leSrJk0m+O8ktSY4lebpbru3rvz/J6SSnktw9ufIlSYOMOnL/z8DvV9VfAV4PPAnsA45X1VbgeLdNkjuAXcCdwD3Ae5KsGXfhkqTh5g33JDcB3wO8D6Cq/qyqvgbsBA533Q4D93brO4EHq+pSVZ0BTgPbx124JGm4UUburwHmgF9N8pkk703yCuC2qroA0C3Xd/03Auf6jp/t2iRJS2SUcL8BeCPwy1X1BuAbdFMwQ2RAW13VKdmbZCbJzNzc3EjFSpJGM0q4zwKzVfVIt/0hemH/bJINAN3yYl//zX3HbwLOX/lBq+pQVW2rqm3r1q1baP2SpAHmDfeq+gpwLsnruqYdwBPAUWB317YbeKhbPwrsSnJjktuBrcCJsVYtSbqmUV8/8OPAB5K8BPgi8CP0fjAcSbIHOAvcB1BVJ5McofcD4DJwf1U9P/bKJUlDjRTuVfUYsG3Arh1D+h8ADiyiLmlifCeMVgOfUJWkBhnuktQgw12SGmS4S1KD/GUdWtG8OCoNZrhLS8QfRFpKTstIUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWrQSOGe5Jkkn0/yWJKZru2WJMeSPN0t1/b135/kdJJTSe6eVPGSpMGuZ+T+fVV1V1Vt67b3AceraitwvNsmyR3ALuBO4B7gPUnWjLFmSdI8FjMtsxM43K0fBu7ta3+wqi5V1RngNLB9EZ9HknSdRg33Aj6a5NEke7u226rqAkC3XN+1bwTO9R0727W9SJK9SWaSzMzNzS2seknSQDeM2O/NVXU+yXrgWJKnrtE3A9rqqoaqQ8AhgG3btl21X5K0cCON3KvqfLe8CHyE3jTLs0k2AHTLi133WWBz3+GbgPPjKliSNL95wz3JK5J8xwvrwPcDjwNHgd1dt93AQ936UWBXkhuT3A5sBU6Mu3BJ0nCjTMvcBnwkyQv9f6uqfj/Jp4AjSfYAZ4H7AKrqZJIjwBPAZeD+qnp+ItVLkgaaN9yr6ovA6we0PwfsGHLMAeDAoquTJC3IqBdUJS2xLft+d2D7MwffvsSVaCXy9QOS1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQvyBbU8VfCi2NhyN3SWqQ4S5JDRo53JOsSfKZJL/Tbd+S5FiSp7vl2r6++5OcTnIqyd2TKFySNNz1zLn/BPAkcFO3vQ84XlUHk+zrth9IcgewC7gTeDXwsSSvrarnx1i3pCt4vUL9Rhq5J9kEvB14b1/zTuBwt34YuLev/cGqulRVZ4DTwPbxlCtJGsWo0zK/CLwL+PO+ttuq6gJAt1zftW8EzvX1m+3aXiTJ3iQzSWbm5uauu3BJ0nDzhnuSHwQuVtWjI37MDGirqxqqDlXVtqratm7duhE/tCRpFKPMub8ZeEeSHwBeCtyU5DeBZ5NsqKoLSTYAF7v+s8DmvuM3AefHWbQ0CcPmrKWVaN6Re1Xtr6pNVbWF3oXSP6iqHwKOAru7bruBh7r1o8CuJDcmuR3YCpwYe+WSpKEW84TqQeBIkj3AWeA+gKo6meQI8ARwGbjfO2UkaWldV7hX1cPAw936c8COIf0OAAcWWZskaYF8QlWSGmS4S1KDDHdJapDhLkkNMtwlqUH+sg41yQeStNo5cpekBjlyl1YxXxPcLkfuktQgw12SGmS4S1KDnHPXRI1rTte7X6TrY7hLY+YPIk0Dw11aZv4w0CQ45y5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ2aN9yTvDTJiSSfTXIyyc927bckOZbk6W65tu+Y/UlOJzmV5O5JnoAk6WqjjNwvAW+tqtcDdwH3JHkTsA84XlVbgePdNknuAHYBdwL3AO9JsmYSxUuSBpv33TJVVcDXu81v7/4UsBN4S9d+GHgYeKBrf7CqLgFnkpwGtgOfGGfh0mrlu2g0ipHm3JOsSfIYcBE4VlWPALdV1QWAbrm+674RONd3+GzXJklaIiOFe1U9X1V3AZuA7Un+6jW6Z9CHuKpTsjfJTJKZubm50aqVJI3kuu6Wqaqv0Zt+uQd4NskGgG55ses2C2zuO2wTcH7AxzpUVduqatu6desWULokaZhR7pZZl+Tmbv1lwNuAp4CjwO6u227goW79KLAryY1Jbge2AifGXbgkabhRflnHBuBwd8fLtwFHqup3knwCOJJkD3AWuA+gqk4mOQI8AVwG7q+q5ydTvpbauH5tnqTJGuVumc8BbxjQ/hywY8gxB4ADi65OkrQgPqEqSQ0y3CWpQYa7JDXIcJekBo1yt4w0dj5CL02WI3dJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSg3yISWqcD4ytTo7cJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoO8FVLSyIbdVvnMwbcvcSWajyN3SWqQ4S5JDTLcJalB84Z7ks1J/leSJ5OcTPITXfstSY4lebpbru07Zn+S00lOJbl7kicgSbraKCP3y8BPVdV3AW8C7k9yB7APOF5VW4Hj3Tbdvl3AncA9wHuSrJlE8ZKkwea9W6aqLgAXuvU/SfIksBHYCbyl63YYeBh4oGt/sKouAWeSnAa2A58Yd/GaHr6cSpou1zXnnmQL8AbgEeC2Lvhf+AGwvuu2ETjXd9hs13blx9qbZCbJzNzc3PVXLkkaauRwT/JK4L8CP1lVf3ytrgPa6qqGqkNVta2qtq1bt27UMiRJIxgp3JN8O71g/0BVfbhrfjbJhm7/BuBi1z4LbO47fBNwfjzlSpJGMcrdMgHeBzxZVb/Qt+sosLtb3w081Ne+K8mNSW4HtgInxleyJGk+o7x+4M3ADwOfT/JY1/YvgIPAkSR7gLPAfQBVdTLJEeAJenfa3F9Vz4+9ckkT4wXylW+Uu2X+D4Pn0QF2DDnmAHBgEXVJkhbBJ1QlqUGGuyQ1yFf+rnK+wlVqkyN3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkE+oaiDfCiitbI7cJalBjtwlTYzvLlo+jtwlqUGGuyQ1yHCXpAYZ7pLUIC+oNsYLWJLAcJc0Bj4XMX2clpGkBhnuktQgw12SGjRvuCd5f5KLSR7va7slybEkT3fLtX379ic5neRUkrsnVbgkabhRLqj+GvBfgF/va9sHHK+qg0n2ddsPJLkD2AXcCbwa+FiS11bV8+MtW1KLvNtrfOYduVfVx4E/vKJ5J3C4Wz8M3NvX/mBVXaqqM8BpYPuYapUkjWihc+63VdUFgG65vmvfCJzr6zfbtV0lyd4kM0lm5ubmFliGJGmQcV9QzYC2GtSxqg5V1baq2rZu3boxlyFJq9tCw/3ZJBsAuuXFrn0W2NzXbxNwfuHlSZIWYqFPqB4FdgMHu+VDfe2/leQX6F1Q3QqcWGyRktriE62TN2+4J/kg8Bbg1iSzwL+hF+pHkuwBzgL3AVTVySRHgCeAy8D93ikjSUtv3nCvqn8wZNeOIf0PAAcWU5TGz5GStLr4hKokNchwl6QGGe6S1CDf5z7FfBRb0kI5cpekBjlyX4G880XSfAx3SSuWU5fDOS0jSQ0y3CWpQYa7JDXIcJekBnlBdQl58UdaGO8Qu36O3CWpQYa7JDXIaZkp4H85JY2bI3dJapAjd0nN8eYFw13SKrKaQt9pGUlqkOEuSQ1yWmYCvPtF0nJz5C5JDXLkvgiO0CVNK8N9BIa41LaFfI9P+x02E5uWSXJPklNJTifZN6nPI0m62kRG7knWAL8E/B1gFvhUkqNV9cQkPt+4OEKXtFjTci/9pKZltgOnq+qLAEkeBHYCEwl3Q1nSUrve3Fnq0J9UuG8EzvVtzwJ/o79Dkr3A3m7z60lOTaiWUdwKfHUZP/84rPRzsP7lt9LPYUXWn3f//9WF1P+Xhu2YVLhnQFu9aKPqEHBoQp//uiSZqapty13HYqz0c7D+5bfSz8H6X2xSF1Rngc1925uA8xP6XJKkK0wq3D8FbE1ye5KXALuAoxP6XJKkK0xkWqaqLif5p8D/BNYA76+qk5P4XGMyFdNDi7TSz8H6l99KPwfr75Oqmr+XJGlF8d0yktQgw12SGrQqwz3JzUk+lOSpJE8m+e4ktyQ5luTpbrl2uescJsk7k5xM8niSDyZ56bTXn+T9SS4mebyvbWjNSfZ3r644leTu5an6W4bU//Pd19Dnknwkyc19+6a+/r59/yxJJbm1r22q6ofh55Dkx7s6Tyb5ub72qTqHIV9DdyX5ZJLHkswk2d63b3H1V9Wq+wMcBn60W38JcDPwc8C+rm0f8O7lrnNI7RuBM8DLuu0jwD+e9vqB7wHeCDze1zawZuAO4LPAjcDtwBeANVNY//cDN3Tr715p9Xftm+nd+PAl4NZprf8a/wbfB3wMuLHbXj+t5zCk/o8Cf7db/wHg4XHVv+pG7kluoveX/D6AqvqzqvoavdcjHO66HQbuXZ4KR3ID8LIkNwAvp/cMwVTXX1UfB/7wiuZhNe8EHqyqS1V1BjhN75UWy2ZQ/VX10aq63G1+kt7zHLBC6u/8J+BdvPghw6mrH4aew48BB6vqUtfnYtc+decwpP4CburWX8W3ngdadP2rLtyB1wBzwK8m+UyS9yZ5BXBbVV0A6Jbrl7PIYarqy8B/BM4CF4A/qqqPskLqv8Kwmge9vmLjEtd2vf4J8Hvd+oqoP8k7gC9X1Wev2LUi6u+8FvjbSR5J8r+T/PWufaWcw08CP5/kHL3v6/1d+6LrX43hfgO9/xr9clW9AfgGvSmBFaGbl95J779qrwZekeSHlreqsZv39RXTJMlPA5eBD7zQNKDbVNWf5OXATwP/etDuAW1TVX+fG4C1wJuAfw4cSRJWzjn8GPDOqtoMvJNuRoEx1L8aw30WmK2qR7rtD9EL+2eTbADolheHHL/c3gacqaq5qvom8GHgb7Jy6u83rOYV8/qKJLuBHwT+UXWTpayM+v8yvQHCZ5M8Q6/GTyf5TlZG/S+YBT5cPSeAP6f3Aq6Vcg676X0PA/w235p6WXT9qy7cq+orwLkkr+uadtB7FfFRen/RdMuHlqG8UZwF3pTk5d0IZQfwJCun/n7Daj4K7EpyY5Lbga3AiWWo75qS3AM8ALyjqv60b9fU119Vn6+q9VW1paq20AuTN3bfH1Nff5//BrwVIMlr6d0g8VVWzjmcB763W38r8HS3vvj6l/Pq8TJetb4LmAE+R++LYy3wF4Dj3V/uceCW5a7zGvX/LPAU8DjwG/SuqE91/cAH6V0j+Ca9INlzrZrpTRl8AThFdzfBFNZ/mt686GPdn19ZSfVfsf8ZurtlprH+a/wbvAT4ze574dPAW6f1HIbU/7eAR+ndGfMI8NfGVb+vH5CkBq26aRlJWg0Md0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktSg/wft2E8DChbyNwAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEICAYAAACktLTqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAATGklEQVR4nO3db5Bdd33f8fcncjD/YpAryRGSqExHpLE7g6GqSkqbEERjN6TID+oZtU1GTZXRTMfNJDQtSE3/JJ2qFUmHNA/iZDRAoiaAR6FQq2mTIpS6TGfAYg0GLBuNBTLSImEtzjgJpKNYzrcP7vHkWrp396537967P71fM5pzzu+eu/tZafXZ3557zrmpKiRJbfmOSQeQJC0/y12SGmS5S1KDLHdJapDlLkkNstwlqUGWu7QISX4tyb+edA5pIfE8d11PkjwJ/ERVfXLSWaRxcuYudZLcMOkM0nKx3HXdSPKbwGuB/57kW0nenaSS7E1yDvj9br/fTvKNJH+Y5FNJbu/7GL+R5N93629NMpvkZ5JcSnIxyY9P5IuTrmK567pRVT8GnAP+blW9EjjaPfQDwPcCd3bbvwtsAzYAnwM+NM+H/W7gVcAmYC/wK0nWLn96aXEsdwl+rqq+XVX/D6CqPlhVf1xVl4GfA96Q5FVDnvss8O+q6tmq+p/At4DvWZHU0jwsdwnOP7+SZE2SQ0m+kuSPgCe7h9YNee7TVXWlb/tPgFeOJ6Y0Ostd15tBp4f1j/0DYBfwdnqHW7Z24xlvLGl5We663jwFvG6ex78LuAw8Dbwc+A8rEUpabpa7rjf/EfhXSZ4B/t6Ax/8L8DXg68BjwGdWMJu0bLyISZIa5MxdkhpkuUtSgyx3SWqQ5S5JDZqKGyWtW7eutm7dOukYkrSqPPzww9+sqvWDHpuKct+6dSszMzOTjiFJq0qSrw17zMMyktQgy12SGjRSuSd5dZKPJvlykseTfF+Sm5McT/JEt1zbt/+BJGeSnE5y53wfW5K0/Eaduf8y8HtV9ZeBNwCPA/uBE1W1DTjRbZPkNmA3cDtwF3BfkjXLHVySNNyC5Z7kJuD7gQ8AVNWfVtUz9O6cd6Tb7Qhwd7e+C7i/qi5X1VngDLBjuYNLkoYbZeb+OmAO+PUkn0/y/iSvAG6pqosA3XJDt/8m+u6PDcx2Yy+QZF+SmSQzc3NzS/oiJEkvNEq53wC8CfjVqnoj8G26QzBDDLrv9TV3J6uqw1W1vaq2r18/8DRNSdKLNEq5zwKzVfVQt/1RemX/VJKNAN3yUt/+W/qevxm4sDxxJUmjWLDcq+obwPkkz78v5E5697k+BuzpxvYAD3Trx4DdSW5Mciu9Nxo+uaypJUnzGvUK1Z8EPpTkJcBXgR+n94PhaJK99N5R/h6AqjqV5Ci9HwBXgHur6rllTy5N2Nb9/2Pg+JOH3rHCSaRrjVTuVfUIsH3AQzuH7H8QOLiEXJKkJfAKVUlqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJatBUvEG2NM2G3WZAmmbO3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDnuatJ852b7tvg6XrgzF2SGmS5S1KDPCwjLbNhh4Q8HKSV5MxdkhpkuUtSgyx3SWqQ5S5JDbLcJalBI5V7kieTfCnJI0lmurGbkxxP8kS3XNu3/4EkZ5KcTnLnuMJLkgZbzMz9B6vqjqra3m3vB05U1TbgRLdNktuA3cDtwF3AfUnWLGNmSdIClnKe+y7grd36EeBB4D3d+P1VdRk4m+QMsAP49BI+l7RsPA9d14NRZ+4FfCLJw0n2dWO3VNVFgG65oRvfBJzve+5sN/YCSfYlmUkyMzc39+LSS5IGGnXm/paqupBkA3A8yZfn2TcDxuqagarDwGGA7du3X/O4JOnFG2nmXlUXuuUl4OP0DrM8lWQjQLe81O0+C2zpe/pm4MJyBZYkLWzBck/yiiTf9fw68EPAo8AxYE+32x7ggW79GLA7yY1JbgW2ASeXO7gkabhRDsvcAnw8yfP7f7iqfi/JZ4GjSfYC54B7AKrqVJKjwGPAFeDeqnpuLOklSQMtWO5V9VXgDQPGnwZ2DnnOQeDgktNJkl4Ur1CVpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ76EqTZj3utE4OHOXpAY5c9eq4OxWWhxn7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDfJsGWlKeYaQlsKZuyQ1yJm7tMo4o9conLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJatDI5Z5kTZLPJ/mdbvvmJMeTPNEt1/bteyDJmSSnk9w5juCSpOEWc2+ZnwIeB27qtvcDJ6rqUJL93fZ7ktwG7AZuB14DfDLJ66vquWXMrVXO+6NI4zVSuSfZDLwDOAj8s254F/DWbv0I8CDwnm78/qq6DJxNcgbYAXx62VKrWcNKX9LijHpY5j8D7wb+rG/slqq6CNAtN3Tjm4DzffvNdmMvkGRfkpkkM3Nzc4sOLkkabsFyT/IjwKWqenjEj5kBY3XNQNXhqtpeVdvXr18/4oeWJI1ilMMybwHemeSHgZcCNyX5LeCpJBur6mKSjcClbv9ZYEvf8zcDF5YztCRpfgvO3KvqQFVtrqqt9F4o/f2q+lHgGLCn220P8EC3fgzYneTGJLcC24CTy55ckjTUUt6J6RBwNMle4BxwD0BVnUpyFHgMuALc65kykrSyFlXuVfUgvbNiqKqngZ1D9jtI78waSdIEeIWqJDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSg5Zyy19Ji+D7w2olWe5Sx/JVSzwsI0kNstwlqUGWuyQ1yHKXpAZZ7pLUIM+W0armGS4Lm+/v6MlD71jBJFpJztwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhq0YLkneWmSk0m+kORUkp/vxm9OcjzJE91ybd9zDiQ5k+R0kjvH+QVIkq41ysz9MvC2qnoDcAdwV5I3A/uBE1W1DTjRbZPkNmA3cDtwF3BfkjXjCC9JGmzBcq+eb3Wb39n9KWAXcKQbPwLc3a3vAu6vqstVdRY4A+xY1tSSpHmNdMw9yZokjwCXgONV9RBwS1VdBOiWG7rdNwHn+54+241d/TH3JZlJMjM3N7eUr0GSdJWRyr2qnquqO4DNwI4kf2We3TPoQwz4mIerantVbV+/fv1oaSVJI1nU2TJV9QzwIL1j6U8l2QjQLS91u80CW/qethm4sOSkkqSRLXhvmSTrgWer6pkkLwPeDrwXOAbsAQ51ywe6pxwDPpzkfcBrgG3AyTFk1xQZdv8S710iTcYoNw7bCBzpznj5DuBoVf1Okk8DR5PsBc4B9wBU1akkR4HHgCvAvVX13HjiS5IGWbDcq+qLwBsHjD8N7BzynIPAwSWnkyS9KF6hKkkN8n7uWhTvny6tDpa7dB3zhfB2eVhGkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGeZ67xsqLnqTJcOYuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapAXMUmN8IIx9XPmLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhq0YLkn2ZLkfyd5PMmpJD/Vjd+c5HiSJ7rl2r7nHEhyJsnpJHeO8wuQJF1rlJn7FeBnqup7gTcD9ya5DdgPnKiqbcCJbpvusd3A7cBdwH1J1owjvCRpsAXLvaouVtXnuvU/Bh4HNgG7gCPdbkeAu7v1XcD9VXW5qs4CZ4Adyx1ckjTcoq5QTbIVeCPwEHBLVV2E3g+AJBu63TYBn+l72mw3dvXH2gfsA3jta1+72NySJmDYVbBPHnrHCifRQkZ+QTXJK4H/Cvx0Vf3RfLsOGKtrBqoOV9X2qtq+fv36UWNIkkYwUrkn+U56xf6hqvpYN/xUko3d4xuBS934LLCl7+mbgQvLE1eSNIoFD8skCfAB4PGqel/fQ8eAPcChbvlA3/iHk7wPeA2wDTi5nKElTRcP10yfUY65vwX4MeBLSR7pxv4lvVI/mmQvcA64B6CqTiU5CjxG70ybe6vquWVPLkkaasFyr6r/y+Dj6AA7hzznIHBwCbkkSUvgFaqS1CDLXZIaZLlLUoMsd0lqkO+hKukavh/r6ufMXZIaZLlLUoMsd0lqkMfcr3NeNi61yXLXQL6gJq1uHpaRpAZZ7pLUIA/LSBobX9OZHGfuktQgZ+7XCV8gla4vztwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgzwVsjGe8igJnLlLUpMsd0lqkOUuSQ2y3CWpQQuWe5IPJrmU5NG+sZuTHE/yRLdc2/fYgSRnkpxOcue4gkuShhtl5v4bwF1Xje0HTlTVNuBEt02S24DdwO3dc+5LsmbZ0kqSRrJguVfVp4A/uGp4F3CkWz8C3N03fn9VXa6qs8AZYMcyZZUkjejFHnO/paouAnTLDd34JuB8336z3ZgkaQUt9wuqGTBWA3dM9iWZSTIzNze3zDEk6fr2Yq9QfSrJxqq6mGQjcKkbnwW29O23Gbgw6ANU1WHgMMD27dsH/gCQdH3xbfmWz4uduR8D9nTre4AH+sZ3J7kxya3ANuDk0iJKkhZrwZl7ko8AbwXWJZkF/i1wCDiaZC9wDrgHoKpOJTkKPAZcAe6tqufGlF2SNMSC5V5Vf3/IQzuH7H8QOLiUUJKkpfEKVUlqkOUuSQ2y3CWpQb5Zh6Sp5ymSi2e5S1pxvmPY+HlYRpIa5Mx9Cix2FuOvopIW4sxdkhrkzH0V8nilpIU4c5ekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGe5y6pOd5ozJm7JDXJmbukVcurtYez3MfAXwklTZqHZSSpQc7cV5C/QkpaKZb7CDzMIrXhevq/7GEZSWqQM/cl8DCLpGlluUu67s03UVuth2wsd0max2o9Tj+2ck9yF/DLwBrg/VV1aFyfa7FW6z+WJI1qLOWeZA3wK8DfBmaBzyY5VlWPjePzSdJKm/ZJ4rhm7juAM1X1VYAk9wO7gLGU+3K9sOkLpJLGZaV/GIyr3DcB5/u2Z4G/3r9Dkn3Avm7zW0lOjynLUqwDvjnpEIu02jKvtrxg5pWy2jKvA76Z9y7uSYvd/yp/cdgD4yr3DBirF2xUHQYOj+nzL4skM1W1fdI5FmO1ZV5tecHMK2W1ZZ62vOO6iGkW2NK3vRm4MKbPJUm6yrjK/bPAtiS3JnkJsBs4NqbPJUm6ylgOy1TVlST/FPhf9E6F/GBVnRrH5xqzqT5sNMRqy7za8oKZV8pqyzxVeVNVC+8lSVpVvHGYJDXIcpekBlnunSSvTvLRJF9O8niS70tyc5LjSZ7olmsnnbNfknclOZXk0SQfSfLSacuc5INJLiV5tG9saMYkB5KcSXI6yZ1TlPkXu++NLyb5eJJXT3vmvsf+eZJKsq5vbKKZh+VN8pNdplNJfmFa8nYZBn1f3JHkM0keSTKTZMfUZK4q//RedzgC/ES3/hLg1cAvAPu7sf3Aeyedsy/vJuAs8LJu+yjwj6YtM/D9wJuAR/vGBmYEbgO+ANwI3Ap8BVgzJZl/CLihW3/vasjcjW+hd2LD14B105J5yN/xDwKfBG7stjdMS955Mn8C+Dvd+g8DD05LZmfuQJKb6P3DfQCgqv60qp6hd8uEI91uR4C7J5NwqBuAlyW5AXg5vWsJpipzVX0K+IOrhodl3AXcX1WXq+oscIberSxW1KDMVfWJqrrSbX6G3rUbMMWZO78EvJsXXkQ48cxD8v4T4FBVXe72udSNTzxvl2dQ5gJu6tZfxZ9fzzPxzJZ7z+uAOeDXk3w+yfuTvAK4paouAnTLDZMM2a+qvg78J+AccBH4w6r6BFOcuc+wjINuW7FphbON4h8Dv9utT23mJO8Evl5VX7jqoWnN/HrgbyV5KMn/SfLXuvFpzQvw08AvJjlP7//jgW584pkt954b6P269atV9Ubg2/QOF0yt7jj1Lnq/8r0GeEWSH51sqiVb8LYVk5bkZ4ErwIeeHxqw28QzJ3k58LPAvxn08ICxiWem9/9wLfBm4F8AR5OE6c0Lvd823lVVW4B30f32zxRkttx7ZoHZqnqo2/4ovbJ/KslGgG55acjzJ+HtwNmqmquqZ4GPAX+D6c78vGEZp/q2FUn2AD8C/MPqDqwyvZn/Er0f/F9I8iS9XJ9L8t1Mb+ZZ4GPVcxL4M3o345rWvAB76P3fA/ht/vzQy8QzW+5AVX0DOJ/ke7qhnfRuT3yM3j8e3fKBCcQb5hzw5iQv72Y3O4HHme7MzxuW8RiwO8mNSW4FtgEnJ5DvGt2bz7wHeGdV/UnfQ1OZuaq+VFUbqmprVW2lVzZv6r7XpzIz8N+AtwEkeT29Exu+yfTmhV5h/0C3/jbgiW598plX+hXnaf0D3AHMAF+k9022FvgLwInuH+wEcPOkc16V+eeBLwOPAr9J75X5qcoMfITeawLP0iuYvfNlpHco4SvAabqzEKYk8xl6x1Af6f782rRnvurxJ+nOlpmGzEP+jl8C/Fb3/fw54G3TkneezH8TeJjemTEPAX91WjJ7+wFJapCHZSSpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJatD/BxzhWxwFDVEjAAAAAElFTkSuQmCC\n",
"text/plain": [
""
]
@@ -392,7 +392,7 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEICAYAAABYoZ8gAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAASN0lEQVR4nO3dfYylZXnH8e+vbEHBUKA7i8BiB81qi6ZVOqW+VEtdX1CJS5NisNVsFbOt8aWSWl1KoukfJPgSrU2rzUbRtQq4opZNrApug6ZJAQcUBVbrKrgMrOxQ0BpNkMWrf5yHeFxmdnbOy87M3t9PsjnnuZ/nzLmu3Znfufc+z3kmVYUkqR2/ttQFSJIOLYNfkhpj8EtSYwx+SWqMwS9JjTH4JakxBr80gCRnJZlZ6jqkQRj8ktQYg1+SGmPwq2lJNie5ar+xDyT5pySvSbIzyU+SfD/JXy1VndIoGfxq3RXAS5McC5DkCOAVwOXAXuAc4FjgNcD7k5yxVIVKo2Lwq2lV9QPgZuDcbuj5wM+q6vqq+nxVfa96vgJcAzx3qWqVRsXgl3qz+1d29/+82ybJS5Jcn+T+JD8CXgqsXqIapZEx+CX4NHBWkrXAnwKXJzkK+AzwXuDEqjoO+A8gS1emNBoGv5pXVbPAdcBHgTuqaidwJHAUMAvsS/IS4EVLVqQ0Qga/1HM58ILulqr6CfBmYBvwAL0loO1LVp00QvEXsUhSW5zxS1JjDH5JaozBL0mNMfglqTGrFjogyWX0Pra+t6qett++twLvASaq6r5u7CLgAuBh4M1V9aWFnmP16tU1OTm5+OolqWE33XTTfVU1sdjHLRj8wMeAfwY+3j+Y5FTghcDuvrHTgfOBpwInA19O8uSqevhATzA5Ocn09PTiKpekxiX5wSCPW3Cpp6q+Ctw/x673A28D+s8H3QBcWVUPVtUdwC7gzEEKkySNx0Br/EleDtxdVbfst+sU4K6+7ZlubK6vsSnJdJLp2dnZQcqQJA1g0cGf5GjgYuAdc+2eY2zOT4hV1ZaqmqqqqYmJRS9RSZIGdDBr/Pt7EnAacEsSgLXAzUnOpDfDP7Xv2LXAPcMWKUkanUXP+KvqW1W1pqomq2qSXtifUVU/pHctk/OTHJXkNGAdcONIK5YkDWXB4E9yBfDfwFOSzCS5YL5jq+o2ehe1uh34IvCGhc7okSQdWgsu9VTVKxfYP7nf9iXAJcOVJUkaFz+5K0mNMfglqTGDnNUjCZjc/Pk5x++89GWHuBJpcZzxS1JjDH5JaozBL0mNMfglqTEGvyQ1xuCXpMYY/JLUGINfkhpj8EtSYwx+SWqMwS9JjTH4JakxBr8kNcbgl6TGGPyS1BiDX5IaY/BLUmMMfklqjMEvSY1ZMPiTXJZkb5Jb+8bek+TbSb6Z5HNJjuvbd1GSXUm+k+TF4ypckjSYg5nxfww4e7+xa4GnVdXvAv8DXASQ5HTgfOCp3WM+mOSIkVUrSRragsFfVV8F7t9v7Jqq2tdtXg+s7e5vAK6sqger6g5gF3DmCOuVJA1pFGv8rwW+0N0/Bbirb99MN/YoSTYlmU4yPTs7O4IyJEkHY6jgT3IxsA/45CNDcxxWcz22qrZU1VRVTU1MTAxThiRpEVYN+sAkG4FzgPVV9Ui4zwCn9h22Frhn8PIkSaM2UPAnORt4O/DHVfWzvl3bgcuTvA84GVgH3Dh0ldJhYHLz5+ccv/PSlx3iStS6BYM/yRXAWcDqJDPAO+mdxXMUcG0SgOur6q+r6rYk24Db6S0BvaGqHh5X8ZKkxVsw+KvqlXMMf+QAx18CXDJMUZKk8fGTu5LUGINfkhpj8EtSYwx+SWqMwS9JjTH4JakxBr8kNcbgl6TGGPyS1BiDX5IaY/BLUmMMfklqjMEvSY0x+CWpMQa/JDVm4F+9KGm8/I1dGhdn/JLUGINfkhpj8EtSYwx+SWqMwS9JjVkw+JNclmRvklv7xk5Icm2S73a3x/ftuyjJriTfSfLicRUuSRrMwcz4Pwacvd/YZmBHVa0DdnTbJDkdOB94aveYDyY5YmTVSpKGtmDwV9VXgfv3G94AbO3ubwXO7Ru/sqoerKo7gF3AmSOqVZI0AoOu8Z9YVXsAuts13fgpwF19x810Y4+SZFOS6STTs7OzA5YhSVqsUb+5mznGaq4Dq2pLVU1V1dTExMSIy5AkzWfQ4L83yUkA3e3ebnwGOLXvuLXAPYOXJ0katUGv1bMd2Ahc2t1e3Td+eZL3AScD64Abhy1SGiWvgaPWLRj8Sa4AzgJWJ5kB3kkv8LcluQDYDZwHUFW3JdkG3A7sA95QVQ+PqXZJ0gAWDP6qeuU8u9bPc/wlwCXDFCVJGh8/uStJjTH4JakxBr8kNcbgl6TGGPyS1BiDX5IaY/BLUmMMfklqjMEvSY0x+CWpMQa/JDXG4Jekxhj8ktSYQa/HLy0LXltfWjyDXzrMzffiCL5AtsqlHklqjMEvSY1xqUfLimv20vg545ekxhj8ktQYl3qkJXags26kcXDGL0mNGWrGn+RC4HVAAd8CXgMcDXwKmATuBF5RVQ8MVaW0SM6ipfkNPONPcgrwZmCqqp4GHAGcD2wGdlTVOmBHty1JWiaGXeNfBTw2yUP0Zvr3ABcBZ3X7twLXAW8f8nmksfNUUrVi4Bl/Vd0NvBfYDewBflxV1wAnVtWe7pg9wJq5Hp9kU5LpJNOzs7ODliFJWqRhlnqOBzYApwEnA8ckedXBPr6qtlTVVFVNTUxMDFqGJGmRhjmr5wXAHVU1W1UPAZ8Fng3cm+QkgO527/BlSpJGZZjg3w08M8nRSQKsB3YC24GN3TEbgauHK1GSNEoDv7lbVTckuQq4GdgHfB3YAjwO2JbkAnovDueNolBJ0mgMdVZPVb0TeOd+ww/Sm/1LkpYhP7krSY0x+CWpMV6kTVqAl3/Q4cbgl1YYP2GsYbnUI0mNMfglqTEu9Uh6FJeTDm/O+CWpMQa/JDXG4Jekxhj8ktQYg1+SGuNZPRoJzwKRVg5n/JLUGINfkhpj8EtSYwx+SWqMb+5KhwkvH62D5YxfkhrjjF8aMWfeWu6c8UtSYwx+SWrMUMGf5LgkVyX5dpKdSZ6V5IQk1yb5bnd7/KiKlSQNb9gZ/weAL1bVbwO/B+wENgM7qmodsKPbliQtEwMHf5JjgecBHwGoqp9X1Y+ADcDW7rCtwLnDFilJGp1hZvxPBGaBjyb5epIPJzkGOLGq9gB0t2vmenCSTUmmk0zPzs4OUYYkaTGGCf5VwBnAh6rqGcBPWcSyTlVtqaqpqpqamJgYogxJ0mIME/wzwExV3dBtX0XvheDeJCcBdLd7hytRkjRKAwd/Vf0QuCvJU7qh9cDtwHZgYze2Ebh6qAolSSM17Cd33wR8MsmRwPeB19B7MdmW5AJgN3DekM8hSRqhoYK/qr4BTM2xa/0wX1eSND5eq0crgte/kUbHSzZIUmMMfklqjMEvSY0x+CWpMQa/JDXG4Jekxhj8ktQYz+PXong+vbTyGfwaq/leKO689GWHuBJJj3CpR5IaY/BLUmMMfklqjMEvSY0x+CWpMQa/JDXG4Jekxngev5aEHwSTlo7BLzXMF+A2udQjSY0x+CWpMUMv9SQ5ApgG7q6qc5KcAHwKmATuBF5RVQ8M+zw6tFwCkA5fo5jx/w2ws297M7CjqtYBO7ptSdIyMVTwJ1kLvAz4cN/wBmBrd38rcO4wzyFJGq1hZ/z/CLwN+EXf2IlVtQegu10z1wOTbEoynWR6dnZ2yDIkSQdr4OBPcg6wt6puGuTxVbWlqqaqampiYmLQMiRJizTMm7vPAV6e5KXAY4Bjk3wCuDfJSVW1J8lJwN5RFCpJGo2BZ/xVdVFVra2qSeB84D+r6lXAdmBjd9hG4Oqhq5Qkjcw4Prl7KbAtyQXAbuC8MTyHpGVksaf/+qs3l9ZIgr+qrgOu6+7/L7B+FF9XkjR6fnJXkhrjRdokHTQ/0X14cMYvSY0x+CWpMQa/JDXG4Jekxhj8ktQYg1+SGmPwS1JjDH5JaozBL0mNMfglqTEGvyQ1xuCXpMYY/JLUGINfkhrjZZkb52V2tRTm+77zN3MdGs74JakxBr8kNcbgl6TGGPyS1BiDX5IaM/BZPUlOBT4OPB74BbClqj6Q5ATgU8AkcCfwiqp6YPhSJR3uPNvn0Bhmxr8P+Nuq+h3gmcAbkpwObAZ2VNU6YEe3LUlaJgYO/qraU1U3d/d/AuwETgE2AFu7w7YC5w5bpCRpdEbyAa4kk8AzgBuAE6tqD/ReHJKsmecxm4BNAE94whNGUcZhx//2Sgfmz8hghn5zN8njgM8Ab6mq/zvYx1XVlqqaqqqpiYmJYcuQJB2koYI/ya/TC/1PVtVnu+F7k5zU7T8J2DtciZKkURo4+JME+Aiws6re17drO7Cxu78RuHrw8iRJozbMGv9zgFcD30ryjW7s74FLgW1JLgB2A+cNV6IkaZQGDv6q+i8g8+xeP+jX1XC82qakhXhZ5hXIcJc0DC/ZIEmNMfglqTEu9Uha9lzeHC1n/JLUGINfkhpj8EtSYwx+SWqMwS9JjTH4JakxBr8kNcbgl6TGGPyS1BiDX5Ia4yUbDiF/P6h0aPizdmDO+CWpMQa/JDXGpZ5lwCsPSjqUnPFLUmOc8UtqxiD/u57vDeGV/AaywT+ElfwPL+ngHI5LsS71SFJjxjbjT3I28AHgCODDVXXpuJ5r3A7HV3xJ7RpL8Cc5AvgX4IXADPC1JNur6vZxPJ9LLpKWu+WUU+Na6jkT2FVV36+qnwNXAhvG9FySpEVIVY3+iyZ/BpxdVa/rtl8N/GFVvbHvmE3Apm7zKcB3Rl7IwVkN3LdEzz1K9rH8HC692Mfy0t/Hb1XVxGK/wLjW+DPH2K+8wlTVFmDLmJ7/oCWZrqqppa5jWPax/BwuvdjH8jKKPsa11DMDnNq3vRa4Z0zPJUlahHEF/9eAdUlOS3IkcD6wfUzPJUlahLEs9VTVviRvBL5E73TOy6rqtnE81wgs+XLTiNjH8nO49GIfy8vQfYzlzV1J0vLlJ3clqTEGvyQ1pqngT3JckquSfDvJziTPSnJCkmuTfLe7PX6p61xIkguT3Jbk1iRXJHnMSukjyWVJ9ia5tW9s3tqTXJRkV5LvJHnx0lT9aPP08Z7ue+ubST6X5Li+fSumj759b01SSVb3jS3LPmD+XpK8qav3tiTv7htflr3M87319CTXJ/lGkukkZ/btW3wfVdXMH2Ar8Lru/pHAccC7gc3d2GbgXUtd5wI9nALcATy2294G/OVK6QN4HnAGcGvf2Jy1A6cDtwBHAacB3wOOWOoeDtDHi4BV3f13rdQ+uvFT6Z2c8QNg9XLv4wD/Jn8CfBk4qttes9x7maePa4CXdPdfClw3TB/NzPiTHEvvL/QjAFX186r6Eb1LSWztDtsKnLs0FS7KKuCxSVYBR9P7jMSK6KOqvgrcv9/wfLVvAK6sqger6g5gF73LgSy5ufqoqmuqal+3eT29z6/ACuuj837gbfzqBy+XbR8wby+vBy6tqge7Y/Z248u2l3n6KODY7v5v8MvPRQ3URzPBDzwRmAU+muTrST6c5BjgxKraA9DdrlnKIhdSVXcD7wV2A3uAH1fVNaywPvYzX+2nAHf1HTfTja0ErwW+0N1fUX0keTlwd1Xdst+uFdVH58nAc5PckOQrSf6gG19pvbwFeE+Su+j9/F/UjQ/UR0vBv4ref58+VFXPAH5Kb1lhRenWvzfQ+2/dycAxSV61tFWNzYKX/liOklwM7AM++cjQHIctyz6SHA1cDLxjrt1zjC3LPvqsAo4Hngn8HbAtSVh5vbweuLCqTgUupFu5YMA+Wgr+GWCmqm7otq+i90Jwb5KTALrbvfM8frl4AXBHVc1W1UPAZ4Fns/L66Ddf7Svu0h9JNgLnAH9R3SIsK6uPJ9GbVNyS5E56td6c5PGsrD4eMQN8tnpuBH5B7yJnK62XjfR+1gE+zS+Xcwbqo5ngr6ofAncleUo3tB64nd6lJDZ2YxuBq5egvMXYDTwzydHdzGU9sJOV10e/+WrfDpyf5KgkpwHrgBuXoL6D0v3yobcDL6+qn/XtWjF9VNW3qmpNVU1W1SS9YDmj+/lZMX30+Xfg+QBJnkzvpI77WHm93AP8cXf/+cB3u/uD9bHU72Af4nfLnw5MA9+k9w1xPPCbwI7uL3IHcMJS13kQffwD8G3gVuDf6L2jvyL6AK6g997EQ/RC5YID1U5v2eF79C7b/ZKlrn+BPnbRW2/9RvfnX1diH/vtv5PurJ7l3McB/k2OBD7R/azcDDx/ufcyTx9/BNxE7wyeG4DfH6YPL9kgSY1pZqlHktRj8EtSYwx+SWqMwS9JjTH4JakxBr8kNcbgl6TG/D9lxU1dZ4KQzgAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEICAYAAACktLTqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAARZ0lEQVR4nO3de5DdZX3H8fenpETBoYQmoZFgg51oi51W6ZaiVkuJFwSG0JnixJZOqnTSdqwXepFQZ8r0D2biZartTLWTATRTuTQilozWFhqLTmcEXBAVCJQoCAuRrFVbqw4a/faP80PWZZdk9+zZc/bJ+zWTOb/znHP2fCbZfPbZ53c5qSokSW35iWEHkCQtPMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrs0D0lOTzIx7BzSbCx3SWqQ5S5JDbLcdVhLsjXJddPG/jbJ3yV5fZI9Sb6V5MtJ/nBYOaW5stx1uLsGOCvJMQBJjgBeC1wN7AfOAY4BXg+8J8kpwwoqzYXlrsNaVX0FuAM4rxs6A/hOVd1SVR+vqi9Vz6eAG4GXDSurNBeWu9Sbpb+u2/6d7j5JXpPkliRfT/JN4Cxg5ZAySnNiuUvwYeD0JGuB3wKuTrIc+AjwbuD4qjoW+Bcgw4spHTrLXYe9qpoEbgY+ADxQVXuAI4HlwCRwIMlrgFcNLaQ0R5a71HM18Irulqr6FvBmYCfwDXrLNbuGlk6ao/hhHZLUHmfuktQgy12SGmS5S1KDLHdJatCyYQcAWLlyZa1bt27YMSRpSbn99tu/VlWrZnpsJMp93bp1jI+PDzuGJC0pSb4y22Muy0hSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1KCDlnuSK5PsT3LXlLF3Jbk3yReSfDTJsVMeuyTJ3iT3JXn1oIJLkmZ3KDP3DwJnThu7CfjFqvol4L+ASwCSnAxsAl7QveZ93QcOS5IW0UHPUK2qTydZN23sxil3bwF+u9veCFxbVY8DDyTZC5wKfGZB0kojZN3Wj884/uC2sxc5ifRUC7Hm/gbgE932CcDDUx6b6MaeIsmWJONJxicnJxcghiTpCX2Ve5K3AweAq54YmuFpM37UU1Vtr6qxqhpbtWrG695IkuZp3hcOS7IZOAfYUE9+Vt8EcOKUp60FHp1/PEnSfMxr5p7kTOBi4Nyq+s6Uh3YBm5IsT3ISsB64rf+YkqS5OOjMPck1wOnAyiQTwKX0jo5ZDtyUBOCWqvqjqro7yU7gHnrLNW+sqh8MKrw0itzRqlFwKEfLvG6G4Sue5vmXAZf1E0qS1B/PUJWkBlnuktQgy12SGmS5S1KDLHdJapDlLkkNmvcZqtJS5XHoOhw4c5ekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yJOYpIOY7aQnaZQ5c5ekBjlzV5NGcbbtZQ+0mJy5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUoIOWe5Irk+xPcteUseOS3JTk/u52xZTHLkmyN8l9SV49qOCSpNkdysz9g8CZ08a2Aruraj2wu7tPkpOBTcALute8L8kRC5ZWknRIDnqGalV9Osm6acMbgdO77R3AzcDF3fi1VfU48ECSvcCpwGcWJq7UHs9c1SDMd839+KraB9Ddru7GTwAenvK8iW5MkrSIFnqHamYYqxmfmGxJMp5kfHJycoFjSNLhbb7l/liSNQDd7f5ufAI4ccrz1gKPzvQFqmp7VY1V1diqVavmGUOSNJP5lvsuYHO3vRm4Ycr4piTLk5wErAdu6y+iJGmuDrpDNck19HaerkwyAVwKbAN2JrkQeAg4H6Cq7k6yE7gHOAC8sap+MKDskqRZHMrRMq+b5aENszz/MuCyfkJJkvrjGaqS1CDLXZIa5MfsaUnwRB9pbpy5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQlx/QkjbbZQmkw50zd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDPIlJGlF+bqz64cxdkhpkuUtSgyx3SWpQX+We5KIkdye5K8k1SZ6R5LgkNyW5v7tdsVBhJUmHZt47VJOcALwZOLmqvptkJ7AJOBnYXVXbkmwFtgIXL0haSbNyB6ym6ndZZhnwzCTLgKOAR4GNwI7u8R3AeX2+hyRpjuY9c6+qR5K8G3gI+C5wY1XdmOT4qtrXPWdfktUzvT7JFmALwHOe85z5xlBjvD67tDDmPXPv1tI3AicBzwaOTnLBob6+qrZX1VhVja1atWq+MSRJM+hnWeYVwANVNVlV3weuB14CPJZkDUB3u7//mJKkuejnDNWHgNOSHEVvWWYDMA58G9gMbOtub+g3pLQYXBJSS/pZc781yXXAHcAB4HPAduBZwM4kF9L7AXD+QgSVJB26vq4tU1WXApdOG36c3ixekjQknqEqSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkN6usMVUlLmx/w0S5n7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDWor3JPcmyS65Lcm2RPkhcnOS7JTUnu725XLFRYSdKh6Xfm/rfAv1bVzwO/DOwBtgK7q2o9sLu7L0laRPMu9yTHAC8HrgCoqu9V1TeBjcCO7mk7gPP6DSlJmpt+Zu7PBSaBDyT5XJLLkxwNHF9V+wC629UzvTjJliTjScYnJyf7iCFJmq6fcl8GnAK8v6peBHybOSzBVNX2qhqrqrFVq1b1EUOSNF0/5T4BTFTVrd396+iV/WNJ1gB0t/v7iyhJmqt5l3tVfRV4OMnzu6ENwD3ALmBzN7YZuKGvhJKkOVvW5+vfBFyV5Ejgy8Dr6f3A2JnkQuAh4Pw+30OSNEd9lXtV3QmMzfDQhn6+riSpP56hKkkNstwlqUGWuyQ1qN8dqpJG3LqtHx92BA2BM3dJapDlLkkNcllGAzXbksCD285e5CTS4cWZuyQ1yJm75sSdc9LS4MxdkhpkuUtSg1yWkZYYl8Z0KJy5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZ5KKSkp/CaQEufM3dJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUoL4PhUxyBDAOPFJV5yQ5DvgnYB3wIPDaqvpGv++jtnhlQ2mwFmLm/hZgz5T7W4HdVbUe2N3dlyQtor7KPcla4Gzg8inDG4Ed3fYO4Lx+3kOSNHf9ztzfC7wN+OGUseOrah9Ad7t6phcm2ZJkPMn45ORknzEkSVPNu9yTnAPsr6rb5/P6qtpeVWNVNbZq1ar5xpAkzaCfHaovBc5NchbwDOCYJB8CHkuypqr2JVkD7F+IoJKGz2vOLB3znrlX1SVVtbaq1gGbgE9W1QXALmBz97TNwA19p5QkzckgjnPfBrwyyf3AK7v7kqRFtCCX/K2qm4Gbu+3/BjYsxNeVJM2PZ6hKUoMsd0lqkOUuSQ3yY/Y0I6/9Ii1tztwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGeW2Zw5zXkJHa5MxdkhpkuUtSgyx3SWqQ5S5JDXKHqqS+zXXH/IPbzh5QEj3BmbskNchyl6QGWe6S1CDLXZIaNO9yT3Jikv9IsifJ3Une0o0fl+SmJPd3tysWLq4k6VD0M3M/APxZVf0CcBrwxiQnA1uB3VW1Htjd3ZckLaJ5l3tV7auqO7rtbwF7gBOAjcCO7mk7gPP6DSlJmpsFWXNPsg54EXArcHxV7YPeDwBg9UK8hyTp0PVd7kmeBXwEeGtV/e8cXrclyXiS8cnJyX5jSJKm6OsM1SQ/Sa/Yr6qq67vhx5Ksqap9SdYA+2d6bVVtB7YDjI2NVT859KTZzhT0jECNEr9PB6+fo2UCXAHsqaq/mfLQLmBzt70ZuGH+8SRJ89HPzP2lwO8BX0xyZzf2l8A2YGeSC4GHgPP7iyhJmqt5l3tV/SeQWR7eMN+vK0nqn1eFPEz4cXrS4cXLD0hSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGeSjkEuRhjZIOxpm7JDXIcpekBrksI2lkeLXIhePMXZIaZLlLUoNclhlhHhUj9bhcM3fO3CWpQc7c+7BQswln6JIWmjN3SWqQM3dJS5Zr8bNz5i5JDbLcJalBLsssInecSlosztwlqUGWuyQ1yHKXpAa55i6pOXPdv9XioZOpqmFnYGxsrMbHx4cdwx2ekg7ZKPxASHJ7VY3N9NjAlmWSnJnkviR7k2wd1PtIkp5qIMsySY4A/h54JTABfDbJrqq6ZxDv569gkkbdYp9NO6iZ+6nA3qr6clV9D7gW2Dig95IkTTOoHaonAA9PuT8B/NrUJyTZAmzp7v5fkvu67ZXA1waUq/fe71iwLzXwrAtkqeQEsw6KWRdY1yN9Z+2zj352tgcGVe6ZYezH9txW1XZg+1NemIzPtoNg1CyVrEslJ5h1UMw6GKOcdVDLMhPAiVPurwUeHdB7SZKmGVS5fxZYn+SkJEcCm4BdA3ovSdI0A1mWqaoDSf4E+DfgCODKqrr7EF/+lKWaEbZUsi6VnGDWQTHrYIxs1pE4iUmStLC8towkNchyl6QGDb3ckxyR5HNJPtbdPy7JTUnu725XDDsjQJJjk1yX5N4ke5K8eISzXpTk7iR3JbkmyTNGJWuSK5PsT3LXlLFZsyW5pLuExX1JXj0CWd/VfQ98IclHkxw7qlmnPPbnSSrJymFnnS1nkjd1We5O8s5h55wta5IXJrklyZ1JxpOcOgpZZ1RVQ/0D/ClwNfCx7v47ga3d9lbgHcPO2GXZAfxBt30kcOwoZqV3AtkDwDO7+zuB3x+VrMDLgVOAu6aMzZgNOBn4PLAcOAn4EnDEkLO+CljWbb9jlLN24yfSO7DhK8DKYWed5e/0N4F/B5Z391cPO+fTZL0ReE23fRZw8yhknenPUGfuSdYCZwOXTxneSK9I6W7PW+xc0yU5ht4/9BUAVfW9qvomI5i1swx4ZpJlwFH0zjEYiaxV9Wng69OGZ8u2Ebi2qh6vqgeAvfQubbEoZspaVTdW1YHu7i30zuEYyayd9wBv48dPIhxa1lly/jGwraoe756zf9g5nyZrAcd02z/Fk+fvDDXrTIa9LPNeet94P5wydnxV7QPoblcPI9g0zwUmgQ90S0iXJzmaEcxaVY8A7wYeAvYB/1NVNzKCWaeYLdtMl7E4YZGzPZ03AJ/otkcua5JzgUeq6vPTHhq1rM8DXpbk1iSfSvKr3fio5QR4K/CuJA/T+392STc+clmHVu5JzgH2V9Xtw8owB8vo/Xr2/qp6EfBtessHI6dbr95I71fDZwNHJ7lguKnm7aCXsRiWJG8HDgBXPTE0w9OGljXJUcDbgb+a6eEZxob597oMWAGcBvwFsDNJGL2c0Pst46KqOhG4iO63eUYw6zBn7i8Fzk3yIL2rRp6R5EPAY0nWAHS3+2f/EotmApioqlu7+9fRK/tRzPoK4IGqmqyq7wPXAy9hNLM+YbZsI3kZiySbgXOA361uwZXRy/pz9H7Af777P7YWuCPJzzB6WSeA66vnNnq/ya9k9HICbKb3fwrgwzy59DJyWYdW7lV1SVWtrap19C5P8MmquoDeZQo2d0/bDNwwpIg/UlVfBR5O8vxuaANwDyOYld5yzGlJjupmPxuAPYxm1ifMlm0XsCnJ8iQnAeuB24aQ70eSnAlcDJxbVd+Z8tBIZa2qL1bV6qpa1/0fmwBO6b6XRyor8M/AGQBJnkfvgIWvMXo5oVfYv9FtnwHc322PXtZh7s2dsgf6dJ48Wuangd30/tJ2A8cNO1+X64XAOPAFet+MK0Y4618D9wJ3Af9Ibw/+SGQFrqG3L+D79ArnwqfLRm9p4UvAfXRHKQw56156a6t3dn/+YVSzTnv8QbqjZYaZdZa/0yOBD3Xfr3cAZww759Nk/XXgdnpHxtwK/MooZJ3pj5cfkKQGDftoGUnSAFjuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUH/D0gFpL8IdAPxAAAAAElFTkSuQmCC\n",
"text/plain": [
""
]
@@ -404,7 +404,7 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEICAYAAACktLTqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAARdklEQVR4nO3da7BdZX3H8e+vpHKzFNIEGgkY7KDWOlO1R4qXKjWiKEh4USodcYLiZLReGasE7ZTpC6fxMm190dZJFZsqA6WUlnS8EWOt43QAA4ISAgNKCoFIDrVYLzNg5N8XezFs4znknL33ueznfD8zZ/bez1p7718g55fnPHutdVJVSJLa8ksLHUCSNHqWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ5a4lKcnuJK8c8jUuSPL1UWWSRslyl6QGWe5acpJ8BjgR+PckP0ry/iSnJvmvJA8nuTXJaX37X5Dku0l+mOSeJG9I8pvAJ4AXda/x8AL9caQpxcsPaClKsht4S1V9OcnxwLeANwJfBNYCVwLPBn4C7AVeWFV3JlkFLK+qnUku6F7jpQvxZ5CejDN3Cc4HPl9Vn6+qx6pqG7ADeG23/THguUkOr6q9VbVzwZJKM2S5S/B04NxuSebhbonlpcCqqvox8HrgrcDeJJ9L8uyFDCvNhOWupap/PfI+4DNVdXTf15FVtQmgqr5UVacDq4A7gL+f4jWkRcVy11L1IPCM7v5ngdcleXWSQ5IcluS0JKuTHJfk7CRHAo8APwJ+1vcaq5M8Zf7jS0/OctdS9RfAn3ZLMK8H1gEfACbpzeTfR+/745eA9wIPAN8HXg78cfcaXwF2At9L8tC8ppcOwqNlJKlBztwlqUGWuyQ1yHKXpAZZ7pLUoGUH2yHJZcBZwL6qem439lHgdcCjwHeAN1XVw922S4AL6R0u9q6q+tLB3mPFihW1Zs2aQf8MkrQk3XTTTQ9V1cqpth30aJkkL6N3bO8/9pX7q4CvVNX+JB8GqKqLkzwHuAI4BXga8GXgmVX1s6lfvWdiYqJ27Ngxyz+WJC1tSW6qqompth10Waaqvkbv+N7+seuqan/38HpgdXd/HXBlVT1SVfcAd9MreknSPBrFmvubgS9094+ndwLI4/Z0Y5KkeTRUuSf5ILAfuPzxoSl2m3LdJ8mGJDuS7JicnBwmhiTpAAOXe5L19D5ofUM9sXC/Bzihb7fV9E7b/gVVtbmqJqpqYuXKKT8PkCQNaKByT3IGcDFwdlX9pG/TVuC8JIcmOQk4Gbhx+JiSpNmYyaGQVwCnASuS7AEuBS4BDgW2JQG4vqre2v12mquA2+kt17z9YEfKSJJGb1FcOMxDISVp9oY6FFKSNH4sd0lq0EHX3KWlYs3Gz005vnvTmfOcRBqeM3dJapDlLkkNcllGGjGXd7QYOHOXpAY5c5cWmDN9zQVn7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1CDLXZIa5OUH1KTpTukHT+vX0uDMXZIaZLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktSgg5Z7ksuS7EtyW9/Y8iTbktzV3R7Tt+2SJHcnuTPJq+cquCRpejOZuf8DcMYBYxuB7VV1MrC9e0yS5wDnAb/VPedvkxwysrSSpBk5aLlX1deA7x8wvA7Y0t3fApzTN35lVT1SVfcAdwOnjCirJGmGBr22zHFVtRegqvYmObYbPx64vm+/Pd3YL0iyAdgAcOKJJw4YQ0vFdNeKmY/rxCzke0uDGvUHqplirKbasao2V9VEVU2sXLlyxDEkaWkbtNwfTLIKoLvd143vAU7o22818MDg8SRJgxi03LcC67v764Fr+8bPS3JokpOAk4Ebh4soSZqtg665J7kCOA1YkWQPcCmwCbgqyYXAvcC5AFW1M8lVwO3AfuDtVfWzOcouSZrGQcu9qv5omk1rp9n/Q8CHhgklSRqOZ6hKUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDBr3kr7TkTXcpYGkxcOYuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGuShkNI88dBJzSdn7pLUIMtdkhrksoyWHJdHtBQ4c5ekBlnuktQgy12SGuSauzRmpvvMYPemM+c5iRYzZ+6S1CDLXZIaZLlLUoOGKvckFyXZmeS2JFckOSzJ8iTbktzV3R4zqrCSpJkZuNyTHA+8C5ioqucChwDnARuB7VV1MrC9eyxJmkfDLsssAw5Psgw4AngAWAds6bZvAc4Z8j0kSbM0cLlX1f3Ax4B7gb3AD6rqOuC4qtrb7bMXOHaq5yfZkGRHkh2Tk5ODxpAkTWGYZZlj6M3STwKeBhyZ5PyZPr+qNlfVRFVNrFy5ctAYkqQpDLMs80rgnqqarKqfAtcALwYeTLIKoLvdN3xMSdJsDFPu9wKnJjkiSYC1wC5gK7C+22c9cO1wESVJszXw5Qeq6oYkVwM3A/uBbwKbgacCVyW5kN4/AOeOIqgkaeaGurZMVV0KXHrA8CP0ZvGSpAXiGaqS1CDLXZIaZLlLUoMsd0lqkOUuSQ3yNzFJi9R0v3FJmgln7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ3yqpBS457s6pK7N505j0k0n5y5S1KDnLlrQUw3m3QmKY2GM3dJapDlLkkNcllGI7FQyyz+Kjppas7cJalBlrskNWiock9ydJKrk9yRZFeSFyVZnmRbkru622NGFVaSNDPDztw/Dnyxqp4N/DawC9gIbK+qk4Ht3WNJ0jwauNyTHAW8DPgUQFU9WlUPA+uALd1uW4Bzhg0pSZqdYWbuzwAmgU8n+WaSTyY5EjiuqvYCdLfHTvXkJBuS7EiyY3JycogYkqQDDXMo5DLgBcA7q+qGJB9nFkswVbUZ2AwwMTFRQ+RQQzy0URqNYWbue4A9VXVD9/hqemX/YJJVAN3tvuEiSpJma+CZe1V9L8l9SZ5VVXcCa4Hbu6/1wKbu9tqRJJU0crP9Sclr/4yPYc9QfSdweZKnAN8F3kTvp4GrklwI3AucO+R7SJJmaahyr6pbgIkpNq0d5nUlScPxDFVJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGDXvJX+lJ+ZuVpIXhzF2SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yEMhNaXpDmHcvenMeU6imfKwU/Vz5i5JDbLcJalBlrskNchyl6QGWe6S1KChj5ZJcgiwA7i/qs5Kshz4J2ANsBv4w6r632HfR9Li5dFVi88oZu7vBnb1Pd4IbK+qk4Ht3WNJ0jwaqtyTrAbOBD7ZN7wO2NLd3wKcM8x7SJJmb9iZ+18D7wce6xs7rqr2AnS3x071xCQbkuxIsmNycnLIGJKkfgOXe5KzgH1VddMgz6+qzVU1UVUTK1euHDSGJGkKw3yg+hLg7CSvBQ4DjkryWeDBJKuqam+SVcC+UQTV4uAp7kub///Hx8Az96q6pKpWV9Ua4DzgK1V1PrAVWN/tth64duiUkqRZmYvj3DcBpye5Czi9eyxJmkcjuSpkVX0V+Gp3/3+AtaN4XUnSYDxDVZIaZLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1aCSX/NX48jfraC5N9/dr96Yz5znJ0uPMXZIa5Mx9iXCGLi0tztwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgzwUUtK88+SmuefMXZIaZLlLUoMsd0lq0MDlnuSEJP+RZFeSnUne3Y0vT7ItyV3d7TGjiytJmolhPlDdD7y3qm5O8ivATUm2ARcA26tqU5KNwEbg4uGjSmqdH7SOzsAz96raW1U3d/d/COwCjgfWAVu63bYA5wwbUpI0OyM5FDLJGuD5wA3AcVW1F3r/ACQ5dprnbAA2AJx44omjiCG8+qOknqE/UE3yVOBfgPdU1f/N9HlVtbmqJqpqYuXKlcPGkCT1Garck/wyvWK/vKqu6YYfTLKq274K2DdcREnSbA1ztEyATwG7quov+zZtBdZ399cD1w4eT5I0iGHW3F8CvBH4dpJburEPAJuAq5JcCNwLnDtcREnSbA1c7lX1dSDTbF476OtKkobnGaqS1CDLXZIaZLlLUoMsd0lqkOUuSQ3yNzEtYl5ESdKgnLlLUoMsd0lqkOUuSQ2y3CWpQX6gOoa8Zrukg3HmLkkNstwlqUGWuyQ1yDV3SYueJ/TNnjN3SWqQM/d55OxDGq3Zfk8tpe9BZ+6S1CDLXZIa5LLMEJbSj3iSxoszd0lqkDN3Sc3xEh3O3CWpSc7cFwFnGZJGzZm7JDXIcpekBrksMwOzXTZxmUVqx7ge8jxnM/ckZyS5M8ndSTbO1ftIkn7RnMzckxwC/A1wOrAH+EaSrVV1+1y836hmyov9X2JJi99s+2iuemeuZu6nAHdX1Xer6lHgSmDdHL2XJOkAqarRv2jyB8AZVfWW7vEbgd+tqnf07bMB2NA9fBZw58iDDGYF8NBChxiC+RfWOOcf5+ywNPM/vapWTrVhrj5QzRRjP/evSFVtBjbP0fsPLMmOqppY6ByDMv/CGuf845wdzH+guVqW2QOc0Pd4NfDAHL2XJOkAc1Xu3wBOTnJSkqcA5wFb5+i9JEkHmJNlmaran+QdwJeAQ4DLqmrnXLzXHFh0S0WzZP6FNc75xzk7mP/nzMkHqpKkheXlBySpQZa7JDVoSZd7kqOTXJ3kjiS7krwoyfIk25Lc1d0es9A5p5PkoiQ7k9yW5Iokhy3m/EkuS7IvyW19Y9PmTXJJd/mKO5O8emFSP2Ga/B/t/v58K8m/Jjm6b9uiz9+37U+SVJIVfWNjkT/JO7uMO5N8pG980edP8rwk1ye5JcmOJKf0bRsuf1Ut2S9gC/CW7v5TgKOBjwAbu7GNwIcXOuc02Y8H7gEO7x5fBVywmPMDLwNeANzWNzZlXuA5wK3AocBJwHeAQxZh/lcBy7r7Hx63/N34CfQOfvhvYMU45Qd+H/gycGj3+Ngxy38d8Jru/muBr44q/5KduSc5it5/7E8BVNWjVfUwvcskbOl22wKcszAJZ2QZcHiSZcAR9M4lWLT5q+prwPcPGJ4u7zrgyqp6pKruAe6md1mLBTNV/qq6rqr2dw+vp3dOB4xJ/s5fAe/n5080HJf8bwM2VdUj3T77uvFxyV/AUd39X+WJ84GGzr9kyx14BjAJfDrJN5N8MsmRwHFVtReguz12IUNOp6ruBz4G3AvsBX5QVdcxJvn7TJf3eOC+vv32dGOL2ZuBL3T3xyJ/krOB+6vq1gM2jUV+4JnA7yW5Icl/JnlhNz4u+d8DfDTJffS+ny/pxofOv5TLfRm9H5H+rqqeD/yY3rLAWOjWptfR+5HtacCRSc5f2FQjddBLWCwmST4I7Acuf3xoit0WVf4kRwAfBP5sqs1TjC2q/J1lwDHAqcD7gKuShPHJ/zbgoqo6AbiIbiWBEeRfyuW+B9hTVTd0j6+mV/YPJlkF0N3um+b5C+2VwD1VNVlVPwWuAV7M+OR/3HR5x+YSFknWA2cBb6huwZTxyP8b9CYHtybZTS/jzUl+nfHID72c11TPjcBj9C7ANS7519P73gX4Z55Yehk6/5It96r6HnBfkmd1Q2uB2+ldJmF9N7YeuHYB4s3EvcCpSY7oZiprgV2MT/7HTZd3K3BekkOTnAScDNy4APmeVJIzgIuBs6vqJ32bFn3+qvp2VR1bVWuqag29QnlB972x6PN3/g14BUCSZ9I7MOIhxif/A8DLu/uvAO7q7g+ffyE/PV7oL+B5wA7gW/T+khwD/BqwvfuPvB1YvtA5nyT/nwN3ALcBn6H3yfqizQ9cQe/zgZ/SK5ILnywvvSWD79C7HPRrFmn+u+mtjd7SfX1inPIfsH033dEy45KfXpl/tvseuBl4xZjlfylwE70jY24AfmdU+b38gCQ1aMkuy0hSyyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1KD/By9h8tAJ79sWAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEICAYAAACktLTqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAQxUlEQVR4nO3de5CddX3H8ffHpFwdStJs0kjAYCfe6kzVRouXKiVSrVjCP4x0xAktTqbWWqVWG7RTp384jZdp7R+tTorajFgYSmmhahWMtY7TEQ03JQQmKAiRSJY6WC8zKPLtH+dhPMTdJLtnz+6zv32/ZjLnPM+57Idl97Pf/Z3zPJuqQpLUlictdABJ0tyz3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHctSUnuTfKKEZ/joiRfmqtM0lyy3CWpQZa7lpwkHwdOA/4jyQ+SvCPJGUn+J8nDSW5LcubQ/S9K8s0k309yT5LXJXkW8GHgRd1zPLxA/znSlOLpB7QUJbkXeENVfS7JKcDXgNcDnwE2AVcCzwR+BBwAXlBVdyVZC6ysqj1JLuqe46UL8d8gHY6TuwQXAp+uqk9X1WNVdQOwG3h1d/tjwHOSHF9VB6pqz4IllY6S5S7BU4HzuyWZh7sllpcCa6vqh8BrgT8EDiT5VJJnLmRY6WhY7lqqhtcj7wc+XlUnD/07saq2A1TVZ6vqbGAtcCfwj1M8h9QrlruWqgeBp3XXLwd+N8krkyxLclySM5OsS7ImyblJTgQeAX4A/HToOdYlOWb+40uHZ7lrqfpr4C+6JZjXApuBdwKTDCb5tzP4/ngS8DbgAeC7wMuBP+qe4/PAHuA7SR6a1/TSEfhuGUlqkJO7JDXIcpekBlnuktQgy12SGrR8oQMArFq1qtavX7/QMSRpUbnpppseqqqJqW7rRbmvX7+e3bt3L3QMSVpUknxruttclpGkBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAb14ghVaSlbv+1TU+6/d/s585xELXFyl6QGWe6S1CDLXZIaZLlLUoN8QVU6Al/w1GLk5C5JDbLcJalBlrskNcg1dzVpunVycK1cS4OTuyQ1yHKXpAZZ7pLUIMtdkhrkC6pS53Avwi6G55eGOblLUoMsd0lqkOUuSQ1yzV2aY66tqw+c3CWpQZa7JDXIcpekBh2x3JN8NMnBJLcP7VuZ5IYk+7rLFUO3XZrk7iR3JXnluIJLkqZ3NJP7PwGvOmTfNmBXVW0AdnXbJHk2cAHwq91j/iHJsjlLK0k6Kkcs96r6IvDdQ3ZvBnZ213cC5w3tv7KqHqmqe4C7gRfOUVZJ0lGa7Zr7mqo6ANBdru72nwLcP3S//d2+n5Nka5LdSXZPTk7OMoYkaSpz/YJqpthXU92xqnZU1caq2jgxMTHHMSRpaZttuT+YZC1Ad3mw278fOHXofuuAB2YfT5I0G7Mt9+uALd31LcC1Q/svSHJsktOBDcBXRosoSZqpI55+IMkVwJnAqiT7gXcD24GrklwM3AecD1BVe5JcBdwBPAq8qap+OqbskqRpHLHcq+r3prlp0zT3fw/wnlFCSZJG4xGqktQgy12SGmS5S1KDLHdJapDlLkkN8i8xSYvMdH/p6d7t58xzEvWZk7skNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkN8nzuUuOmO/87eA74ljm5S1KDLHdJapDlLkkNstwlqUGWuyQ1aKRyT3JJkj1Jbk9yRZLjkqxMckOSfd3lirkKK0k6OrMu9ySnAH8CbKyq5wDLgAuAbcCuqtoA7Oq2JUnzaNRlmeXA8UmWAycADwCbgZ3d7TuB80b8GJKkGZp1uVfVt4EPAPcBB4DvVdX1wJqqOtDd5wCweqrHJ9maZHeS3ZOTk7ONIUmawijLMisYTOmnA08BTkxy4dE+vqp2VNXGqto4MTEx2xiSpCmMsizzCuCeqpqsqp8A1wAvBh5Mshaguzw4ekxJ0kyMUu73AWckOSFJgE3AXuA6YEt3ny3AtaNFlCTN1KxPHFZVNya5GrgZeBS4BdgBPBm4KsnFDH4AnD8XQaWpHO6kWNJSNtJZIavq3cC7D9n9CIMpXpK0QDzlr9QIf4vRME8/IEkNcnKXZslJWX3m5C5JDXJy16LglCzNjJO7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDfJ87pJ+znTnz793+znznESz5eQuSQ1ycteCcDKUxsvJXZIaZLlLUoNclpF6yj8KrlE4uUtSgyx3SWrQSOWe5OQkVye5M8neJC9KsjLJDUn2dZcr5iqsJOnojDq5/x3wmap6JvBrwF5gG7CrqjYAu7ptSdI8mnW5JzkJeBnwEYCq+nFVPQxsBnZ2d9sJnDdqSEnSzIwyuT8NmAQ+luSWJJclORFYU1UHALrL1VM9OMnWJLuT7J6cnBwhhiTpUKOU+3Lg+cCHqup5wA+ZwRJMVe2oqo1VtXFiYmKEGJKkQ41S7vuB/VV1Y7d9NYOyfzDJWoDu8uBoESVJMzXrg5iq6jtJ7k/yjKq6C9gE3NH92wJs7y6vnZOkWhI8cEeaG6Meofpm4BNJjgG+Cfw+g98GrkpyMXAfcP6IH0OSNEMjlXtV3QpsnOKmTaM8ryRpNB6hKkkN8sRhkkbm+fn7x8ldkhpkuUtSgyx3SWqQa+6SjprHISweTu6S1CDLXZIa5LKMlhyXFrQUOLlLUoMsd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapCnH9Cc8C/xSP3i5C5JDXJy11h5ki5pYTi5S1KDLHdJapDlLkkNcs1dWsJ8TaRdTu6S1KCRyz3JsiS3JPlkt70yyQ1J9nWXK0aPKUmaibmY3N8C7B3a3gbsqqoNwK5uW5I0j0Yq9yTrgHOAy4Z2bwZ2dtd3AueN8jEkSTM36uT+QeAdwGND+9ZU1QGA7nL1VA9MsjXJ7iS7JycnR4whSRo263JP8hrgYFXdNJvHV9WOqtpYVRsnJiZmG0OSNIVR3gr5EuDcJK8GjgNOSnI58GCStVV1IMla4OBcBJUkHb1ZT+5VdWlVrauq9cAFwOer6kLgOmBLd7ctwLUjp5Qkzcg43ue+HTg7yT7g7G5bkjSP5uQI1ar6AvCF7vr/Apvm4nklSbPjEaqS1CDLXZIaZLlLUoM8K6Skeeff3B0/J3dJapCTu6TecKKfO07uktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUG+FVJT8i1p0uLm5C5JDbLcJalBlrskNchyl6QGWe6S1CDfLSNpbKZ715XGz8ldkhrk5K4ZcRKTFgfLXVLveVDdzLksI0kNstwlqUGWuyQ1yHKXpAZZ7pLUoFmXe5JTk/xXkr1J9iR5S7d/ZZIbkuzrLlfMXVxJ0tEYZXJ/FHhbVT0LOAN4U5JnA9uAXVW1AdjVbUuS5tGsy72qDlTVzd317wN7gVOAzcDO7m47gfNGDSlJmpk5OYgpyXrgecCNwJqqOgCDHwBJVk/zmK3AVoDTTjttLmJoFjziVC3yoKc5eEE1yZOBfwXeWlX/d7SPq6odVbWxqjZOTEyMGkOSNGSkck/yCwyK/RNVdU23+8Eka7vb1wIHR4soSZqpUd4tE+AjwN6q+puhm64DtnTXtwDXzj6eJGk2RllzfwnweuDrSW7t9r0T2A5cleRi4D7g/NEiSpJmatblXlVfAjLNzZtm+7ySpNF5hKokNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ2akxOHSdJisJROKObkLkkNcnJfIjy1r7S0OLlLUoOc3BvjhC4JnNwlqUlO7j22lF7ZlzS3LHdJi5bLkNNzWUaSGuTk3gNOH5LmmpO7JDXIyX0RctKXdCRO7pLUIMtdkhpkuUtSg1xzl7TkHe51rMV60KCTuyQ1yMl9DDxtgNSOmb47rS/f55a7JM2hvvwwcFlGkhrk5D6Cmf6E9uAjSfNlbJN7klcluSvJ3Um2jevjSJJ+3lgm9yTLgL8Hzgb2A19Ncl1V3TGOjzfTFzDn6v6S1FfjmtxfCNxdVd+sqh8DVwKbx/SxJEmHGNea+ynA/UPb+4HfGL5Dkq3A1m7zB0numua5VgEPzSZE3jve+09j1nkX0GLLbN7xW2yZF1te6DKP2DtPne6GcZV7pthXT9io2gHsOOITJburauNcBRu3xZYXFl9m847fYsu82PLC+DOPa1lmP3Dq0PY64IExfSxJ0iHGVe5fBTYkOT3JMcAFwHVj+liSpEOMZVmmqh5N8sfAZ4FlwEeras8sn+6ISzc9s9jywuLLbN7xW2yZF1teGHPmVNWR7yVJWlQ8/YAkNchyl6QG9a7ckyxLckuST3bbK5PckGRfd7lioTM+LsnJSa5OcmeSvUle1Oe8AEkuSbInye1JrkhyXN8yJ/lokoNJbh/aN23GJJd2p7m4K8kre5L3/d3XxdeS/FuSk/ucd+i2P0tSSVYN7VvQvF2GKTMneXOXa0+S9w3t793nOMlzk3w5ya1Jdid54VjzVlWv/gF/Cvwz8Mlu+33Atu76NuC9C51xKOtO4A3d9WOAk3ue9xTgHuD4bvsq4KK+ZQZeBjwfuH1o35QZgWcDtwHHAqcD3wCW9SDvbwPLu+vv7Xvebv+pDN4E8S1gVV/yHuZz/FvA54Bju+3Vfck8Td7rgd/prr8a+MI48/Zqck+yDjgHuGxo92YGJUp3ed5855pKkpMY/A/8CEBV/biqHqaneYcsB45Pshw4gcHxB73KXFVfBL57yO7pMm4GrqyqR6rqHuBuBqe/mDdT5a2q66vq0W7zywyO9YCe5u38LfAOnnjA4YLnhWkzvxHYXlWPdPc52O1f8MzT5C3gpO76L/KzY3/GkrdX5Q58kMEX12ND+9ZU1QGA7nL1QgSbwtOASeBj3TLSZUlOpL95qapvAx8A7gMOAN+rquvpceYh02Wc6lQXp8xztiP5A+A/u+u9zJvkXODbVXXbITf1Mm/n6cBvJrkxyX8neUG3v6+Z3wq8P8n9DL4PL+32jyVvb8o9yWuAg1V100JnOUrLGfza9aGqeh7wQwbLBb3VrVNvZvCr31OAE5NcuLCpRnbEU10spCTvAh4FPvH4rinutqB5k5wAvAv4y6lunmJfXz6/y4EVwBnA24GrkoT+Zn4jcElVnQpcQvdbP2PK25tyB14CnJvkXgZnkTwryeXAg0nWAnSXB6d/inm1H9hfVTd221czKPu+5gV4BXBPVU1W1U+Aa4AX0+/Mj5suY29PdZFkC/Aa4HXVLa7Sz7y/wuAH/m3d99864OYkv0w/8z5uP3BNDXyFwW/8q+hv5i0MvucA/oWfLb2MJW9vyr2qLq2qdVW1nsHpCj5fVRcyOG3Blu5uW4BrFyjiE1TVd4D7kzyj27UJuIOe5u3cB5yR5IRuwtkE7KXfmR83XcbrgAuSHJvkdGAD8JUFyPcESV4F/DlwblX9aOim3uWtqq9X1eqqWt99/+0Hnt99jfcu75B/B84CSPJ0Bm9qeIj+Zn4AeHl3/SxgX3d9PHnn8xXkGbzSfCY/e7fMLwG7uk/ELmDlQucbyvlcYDfwNQZfaCv6nLfL/FfAncDtwMcZvELfq8zAFQxeE/gJg6K5+HAZGSwpfAO4i+7dCD3IezeDddRbu38f7nPeQ26/l+7dMn3Ie5jP8THA5d3X8s3AWX3JPE3elwI3MXhnzI3Ar48zr6cfkKQG9WZZRpI0dyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1KD/B8oacEELFTjdAAAAAElFTkSuQmCC\n",
"text/plain": [
""
]
@@ -418,7 +418,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Time (sec): 158.54225397109985\n"
+ "Time (sec): 157.89826607704163\n"
]
}
],
@@ -450,16 +450,29 @@
"nb_pattern_instances = 100 # nb of patterns\n",
"nb_train_graphs_per_pattern_instance = 100 # train per pattern\n",
"nb_test_graphs_per_pattern_instance = 20 # test, val per pattern\n",
+ "\n",
+ "# # debug\n",
+ "# nb_pattern_instances = 10 # nb of patterns\n",
+ "# nb_train_graphs_per_pattern_instance = 10 # train per pattern\n",
+ "# nb_test_graphs_per_pattern_instance = 2 # test, val per pattern\n",
+ "# # debug\n",
+ "\n",
"SBM_parameters = {}\n",
"SBM_parameters['nb_clusters'] = 5 \n",
"SBM_parameters['size_min'] = 5 \n",
"SBM_parameters['size_max'] = 35 \n",
- "SBM_parameters['p'] = 0.5 \n",
- "SBM_parameters['q'] = 0.2 \n",
+ "#SBM_parameters['p'] = 0.5 # v1\n",
+ "#SBM_parameters['q'] = 0.2 # v1\n",
+ "#SBM_parameters['p'] = 0.5 # v2\n",
+ "#SBM_parameters['q'] = 0.5 # v2\n",
+ "#SBM_parameters['p'] = 0.5; SBM_parameters['q'] = 0.25 # v3\n",
+ "SBM_parameters['p'] = 0.5; SBM_parameters['q'] = 0.35 # v4\n",
"SBM_parameters['p_pattern'] = 0.5 \n",
"SBM_parameters['q_pattern'] = 0.5 \n",
"SBM_parameters['vocab_size'] = 3 \n",
- "SBM_parameters['size_subgraph'] = 20 \n",
+ "#SBM_parameters['size_subgraph'] = 20 # v1\n",
+ "SBM_parameters['size_subgraph_min'] = 5 # v2\n",
+ "SBM_parameters['size_subgraph_max'] = 35 # v2\n",
"print(SBM_parameters)\n",
" \n",
"\n",
@@ -470,9 +483,12 @@
" \n",
" print('pattern:',idx)\n",
" \n",
- " SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'],SBM_parameters['p'])\n",
- " SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'],size=SBM_parameters['size_subgraph'])\n",
- "\n",
+ " #SBM_parameters['W0'] = random_pattern(SBM_parameters['size_subgraph'],SBM_parameters['p']) # v1\n",
+ " #SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'],size=SBM_parameters['size_subgraph']) # v1\n",
+ " size_subgraph = np.random.randint(SBM_parameters['size_subgraph_min'],SBM_parameters['size_subgraph_max'],size=1)[0] # v2\n",
+ " SBM_parameters['W0'] = random_pattern(size_subgraph,SBM_parameters['p']) # v2\n",
+ " SBM_parameters['u0'] = np.random.randint(SBM_parameters['vocab_size'],size=size_subgraph) # v2\n",
+ " \n",
" for _ in range(nb_train_graphs_per_pattern_instance):\n",
" data = generate_SBM_graph(SBM_parameters)\n",
" graph = DotDict()\n",
@@ -523,6 +539,13 @@
"print('Time (sec):',time.time() - start) # 163s\n"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -539,7 +562,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "/Users/xbresson/Documents/Dropbox/06_NTU_2017_now/03_my_codes/34_benchmark20/GITHUB_benchmark_project/benchmarking-gnn\n"
+ "/Users/xbresson/Documents/Dropbox/06_NTU_2017_now/03_my_codes/34_benchmark20/14_GITHUB_benchmark_project_apr20/benchmarking-gnns-dev_NEW_PATTERN\n"
]
}
],
@@ -582,14 +605,13 @@
"preparing 2000 graphs for the TEST set...\n",
"preparing 2000 graphs for the VAL set...\n",
"[I] Finished loading.\n",
- "[I] Data load time: 5083.2611s\n",
- "Time (sec): 5083.27174282074\n"
+ "[I] Data load time: 6211.2342s\n"
]
}
],
"source": [
"DATASET_NAME = 'SBM_PATTERN'\n",
- "dataset = SBMsDatasetDGL(DATASET_NAME) # 4424s = 73min\n"
+ "dataset = SBMsDatasetDGL(DATASET_NAME) # 6211s\n"
]
},
{
@@ -604,29 +626,26 @@
"10000\n",
"2000\n",
"2000\n",
- "(DGLGraph(num_nodes=119, num_edges=4842,\n",
+ "(DGLGraph(num_nodes=108, num_edges=4884,\n",
" ndata_schemes={'feat': Scheme(shape=(), dtype=torch.int64)}\n",
- " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0,\n",
- " 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],\n",
- " dtype=torch.int16))\n",
- "(DGLGraph(num_nodes=134, num_edges=5956,\n",
+ " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
+ " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
+ " 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
+ " 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,\n",
+ " 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0], dtype=torch.int16))\n",
+ "(DGLGraph(num_nodes=108, num_edges=4738,\n",
" ndata_schemes={'feat': Scheme(shape=(), dtype=torch.int64)}\n",
- " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
- " 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0,\n",
- " 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=torch.int16))\n",
- "(DGLGraph(num_nodes=118, num_edges=4686,\n",
+ " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,\n",
+ " 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
+ " 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1,\n",
+ " 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0,\n",
+ " 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1], dtype=torch.int16))\n",
+ "(DGLGraph(num_nodes=94, num_edges=3772,\n",
" ndata_schemes={'feat': Scheme(shape=(), dtype=torch.int64)}\n",
- " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n",
- " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n",
- " 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n",
- " 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0,\n",
- " 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n",
+ " edata_schemes={'feat': Scheme(shape=(1,), dtype=torch.float32)}), tensor([0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1,\n",
+ " 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
+ " 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1,\n",
+ " 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n",
" dtype=torch.int16))\n"
]
}
@@ -643,14 +662,14 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Time (sec): 21.121261835098267\n"
+ "Time (sec): 24.47579312324524\n"
]
}
],
@@ -672,7 +691,7 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -682,7 +701,7 @@
"[I] Loading dataset SBM_PATTERN...\n",
"train, test, val sizes : 10000 2000 2000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 30.4815s\n"
+ "[I] Data load time: 47.9751s\n"
]
}
],
@@ -694,14 +713,14 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Time (sec): 0.0003371238708496094\n"
+ "Time (sec): 0.0036211013793945312\n"
]
}
],
diff --git a/data/TSP.py b/data/TSP.py
index 5f95ed493..113fb3624 100644
--- a/data/TSP.py
+++ b/data/TSP.py
@@ -136,15 +136,77 @@ def collate(self, samples):
graphs, labels = map(list, zip(*samples))
# Edge classification labels need to be flattened to 1D lists
labels = torch.LongTensor(np.array(list(itertools.chain(*labels))))
- tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
- tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
- snorm_n = torch.cat(tab_snorm_n).sqrt()
- tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
- tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
- snorm_e = torch.cat(tab_snorm_e).sqrt()
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = torch.cat(tab_snorm_n).sqrt()
+ #tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
+ #tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
+ #snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
- return batched_graph, labels, snorm_n, snorm_e
+ return batched_graph, labels
+
+
+ # prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
+ def collate_dense_gnn(self, samples, edge_feat):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ # Edge classification labels need to be flattened to 1D lists
+ labels = torch.LongTensor(np.array(list(itertools.chain(*labels))))
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = tab_snorm_n[0][0].sqrt()
+
+ #batched_graph = dgl.batch(graphs)
+
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+
+ zero_adj = torch.zeros_like(adj)
+
+ in_node_dim = g.ndata['feat'].shape[1]
+ in_edge_dim = g.edata['feat'].shape[1]
+
+ if edge_feat:
+ # use edge feats also to prepare adj
+ adj_with_edge_feat = torch.stack([zero_adj for j in range(in_node_dim + in_edge_dim)])
+ adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
+
+ us, vs = g.edges()
+ for idx, edge_feat in enumerate(g.edata['feat']):
+ adj_with_edge_feat[1+in_node_dim:, us[idx], vs[idx]] = edge_feat
+
+ for node, node_feat in enumerate(g.ndata['feat']):
+ adj_with_edge_feat[1:1+in_node_dim, node, node] = node_feat
+
+ x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
+
+ return None, x_with_edge_feat, labels, g.edges()
+ else:
+ # use only node feats to prepare adj
+ adj_no_edge_feat = torch.stack([zero_adj for j in range(in_node_dim)])
+ adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
+
+ for node, node_feat in enumerate(g.ndata['feat']):
+ adj_no_edge_feat[1:1+in_node_dim, node, node] = node_feat
+
+ x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
+
+ return x_no_edge_feat, None, labels, g.edges()
+
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
diff --git a/data/TUs.py b/data/TUs.py
index 8db4a03ff..1c63e137f 100644
--- a/data/TUs.py
+++ b/data/TUs.py
@@ -181,14 +181,59 @@ def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
- tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
- tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
- snorm_n = torch.cat(tab_snorm_n).sqrt()
- tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
- tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
- snorm_e = torch.cat(tab_snorm_e).sqrt()
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = torch.cat(tab_snorm_n).sqrt()
+ #tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
+ #tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
+ #snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
- return batched_graph, labels, snorm_n, snorm_e
+
+ return batched_graph, labels
+
+
+ # prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
+ def collate_dense_gnn(self, samples):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.tensor(np.array(labels))
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = tab_snorm_n[0][0].sqrt()
+
+ #batched_graph = dgl.batch(graphs)
+
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+
+ zero_adj = torch.zeros_like(adj)
+
+ in_dim = g.ndata['feat'].shape[1]
+
+ # use node feats to prepare adj
+ adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
+ adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
+
+ for node, node_feat in enumerate(g.ndata['feat']):
+ adj_node_feat[1:, node, node] = node_feat
+
+ x_node_feat = adj_node_feat.unsqueeze(0)
+
+ return x_node_feat, labels
+
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
diff --git a/data/data.py b/data/data.py
index 46dc097c6..125d33704 100644
--- a/data/data.py
+++ b/data/data.py
@@ -6,7 +6,9 @@
from data.TUs import TUsDataset
from data.SBMs import SBMsDataset
from data.TSP import TSPDataset
-from data.CitationGraphs import CitationGraphsDataset
+from data.COLLAB import COLLABDataset
+from data.CSL import CSLDataset
+
def LoadData(DATASET_NAME):
"""
@@ -23,7 +25,7 @@ def LoadData(DATASET_NAME):
return MoleculeDataset(DATASET_NAME)
# handling for the TU Datasets
- TU_DATASETS = ['COLLAB', 'ENZYMES', 'DD', 'PROTEINS_full']
+ TU_DATASETS = ['ENZYMES', 'DD', 'PROTEINS_full']
if DATASET_NAME in TU_DATASETS:
return TUsDataset(DATASET_NAME)
@@ -36,7 +38,11 @@ def LoadData(DATASET_NAME):
if DATASET_NAME == 'TSP':
return TSPDataset(DATASET_NAME)
- # handling for the CITATIONGRAPHS Datasets
- CITATIONGRAPHS_DATASETS = ['CORA', 'CITESEER', 'PUBMED']
- if DATASET_NAME in CITATIONGRAPHS_DATASETS:
- return CitationGraphsDataset(DATASET_NAME)
+ # handling for COLLAB dataset
+ if DATASET_NAME == 'OGBL-COLLAB':
+ return COLLABDataset(DATASET_NAME)
+
+ # handling for the CSL (Circular Skip Links) Dataset
+ if DATASET_NAME == 'CSL':
+ return CSLDataset(DATASET_NAME)
+
\ No newline at end of file
diff --git a/data/molecules.py b/data/molecules.py
index 647190a68..c03c862ef 100644
--- a/data/molecules.py
+++ b/data/molecules.py
@@ -9,6 +9,9 @@
import dgl
+from scipy import sparse as sp
+import numpy as np
+
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [.pickle and .index; for split 'train', 'val' and 'test']
@@ -135,6 +138,31 @@ def self_loop(g):
+def positional_encoding(g, pos_enc_dim):
+ """
+ Graph positional encoding v/ Laplacian eigenvectors
+ """
+
+ # Laplacian
+ A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
+ N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
+ L = sp.eye(g.number_of_nodes()) - N * A * N
+
+ # Eigenvectors with numpy
+ EigVal, EigVec = np.linalg.eig(L.toarray())
+ idx = EigVal.argsort() # increasing order
+ EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
+ g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ # # Eigenvectors with scipy
+ # EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
+ # EigVec = EigVec[:, EigVal.argsort()] # increasing order
+ # g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
+
+ return g
+
+
+
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
@@ -162,15 +190,73 @@ def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
- tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
- tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
- snorm_n = torch.cat(tab_snorm_n).sqrt()
- tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
- tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
- snorm_e = torch.cat(tab_snorm_e).sqrt()
- batched_graph = dgl.batch(graphs)
- return batched_graph, labels, snorm_n, snorm_e
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = torch.cat(tab_snorm_n).sqrt()
+ #tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
+ #tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
+ #snorm_e = torch.cat(tab_snorm_e).sqrt()
+ batched_graph = dgl.batch(graphs)
+
+ return batched_graph, labels
+
+ # prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
+ def collate_dense_gnn(self, samples, edge_feat):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.tensor(np.array(labels)).unsqueeze(1)
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = tab_snorm_n[0][0].sqrt()
+
+ #batched_graph = dgl.batch(graphs)
+
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+
+ zero_adj = torch.zeros_like(adj)
+
+ if edge_feat:
+ # use edge feats also to prepare adj
+ adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
+ adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
+
+ us, vs = g.edges()
+ for idx, edge_label in enumerate(g.edata['feat']):
+ adj_with_edge_feat[edge_label.item()+1+self.num_atom_type][us[idx]][vs[idx]] = 1
+
+ for node, node_label in enumerate(g.ndata['feat']):
+ adj_with_edge_feat[node_label.item()+1][node][node] = 1
+
+ x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
+
+ return None, x_with_edge_feat, labels
+
+ else:
+ # use only node feats to prepare adj
+ adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
+ adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
+
+ for node, node_label in enumerate(g.ndata['feat']):
+ adj_no_edge_feat[node_label.item()+1][node][node] = 1
+
+ x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
+
+ return x_no_edge_feat, None, labels
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
@@ -181,6 +267,13 @@ def _add_self_loops(self):
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
+ def _add_positional_encodings(self, pos_enc_dim):
+
+ # Graph positional encoding v/ Laplacian eigenvectors
+ self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
+ self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
+ self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
+
diff --git a/data/script_download_CSL.sh b/data/script_download_CSL.sh
new file mode 100644
index 000000000..8c035db9c
--- /dev/null
+++ b/data/script_download_CSL.sh
@@ -0,0 +1,22 @@
+
+
+# Command to download dataset:
+# bash script_download_CSL.sh
+
+
+DIR=CSL/
+cd $DIR
+
+FILE=CSL.zip
+if test -f "$FILE"; then
+ echo -e "$FILE already downloaded."
+else
+ echo -e "\ndownloading $FILE..."
+ curl https://www.dropbox.com/s/rnbkp5ubgk82ocu/CSL.zip?dl=1 -o CSL.zip -J -L -k
+ unzip CSL.zip -d ./
+ rm -r __MACOSX/
+fi
+
+
+
+
diff --git a/data/script_download_SBMs.sh b/data/script_download_SBMs.sh
index 90e0d0dc7..a2de3d98c 100644
--- a/data/script_download_SBMs.sh
+++ b/data/script_download_SBMs.sh
@@ -22,7 +22,7 @@ if test -f "$FILE"; then
echo -e "$FILE already downloaded."
else
echo -e "\ndownloading $FILE..."
- curl https://www.dropbox.com/s/zf17n6x6s441s14/SBM_PATTERN.pkl?dl=1 -o SBM_PATTERN.pkl -J -L -k
+ curl https://www.dropbox.com/s/9h6crgk4argc89o/SBM_PATTERN.pkl?dl=1 -o SBM_PATTERN.pkl -J -L -k
fi
diff --git a/data/script_download_all_datasets.sh b/data/script_download_all_datasets.sh
index 8396feefe..b0f6e292e 100644
--- a/data/script_download_all_datasets.sh
+++ b/data/script_download_all_datasets.sh
@@ -69,7 +69,7 @@ if test -f "$FILE"; then
echo -e "$FILE already downloaded."
else
echo -e "\ndownloading $FILE..."
- curl https://www.dropbox.com/s/zf17n6x6s441s14/SBM_PATTERN.pkl?dl=1 -o SBM_PATTERN.pkl -J -L -k
+ curl https://www.dropbox.com/s/9h6crgk4argc89o/SBM_PATTERN.pkl?dl=1 -o SBM_PATTERN.pkl -J -L -k
fi
cd ..
@@ -93,6 +93,28 @@ fi
cd ..
+############
+# CSL
+############
+
+DIR=CSL/
+cd $DIR
+
+FILE=CSL.zip
+if test -f "$FILE"; then
+ echo -e "$FILE already downloaded."
+else
+ echo -e "\ndownloading $FILE..."
+ curl https://www.dropbox.com/s/rnbkp5ubgk82ocu/CSL.zip?dl=1 -o CSL.zip -J -L -k
+ unzip CSL.zip -d ./
+ rm -r __MACOSX/
+fi
+
+cd ..
+
+
+
+
diff --git a/data/superpixels.py b/data/superpixels.py
index 2a726df4e..204629a99 100644
--- a/data/superpixels.py
+++ b/data/superpixels.py
@@ -281,17 +281,64 @@ def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels))
- tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
- tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
- snorm_n = torch.cat(tab_snorm_n).sqrt()
- tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
- tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
- snorm_e = torch.cat(tab_snorm_e).sqrt()
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = torch.cat(tab_snorm_n).sqrt()
+ #tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
+ #tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
+ #snorm_e = torch.cat(tab_snorm_e).sqrt()
for idx, graph in enumerate(graphs):
graphs[idx].ndata['feat'] = graph.ndata['feat'].float()
graphs[idx].edata['feat'] = graph.edata['feat'].float()
batched_graph = dgl.batch(graphs)
- return batched_graph, labels, snorm_n, snorm_e
+
+ return batched_graph, labels
+
+
+ # prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
+ def collate_dense_gnn(self, samples):
+ # The input samples is a list of pairs (graph, label).
+ graphs, labels = map(list, zip(*samples))
+ labels = torch.tensor(np.array(labels))
+ #tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
+ #tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
+ #snorm_n = tab_snorm_n[0][0].sqrt()
+
+ #batched_graph = dgl.batch(graphs)
+
+ g = graphs[0]
+ adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+ """
+ Adapted from https://github.com/leichen2018/Ring-GNN/
+ Assigning node and edge feats::
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
+ The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+
+ zero_adj = torch.zeros_like(adj)
+
+ in_dim = g.ndata['feat'].shape[1]
+
+ # use node feats to prepare adj
+ adj_node_feat = torch.stack([zero_adj for j in range(in_dim)])
+ adj_node_feat = torch.cat([adj.unsqueeze(0), adj_node_feat], dim=0)
+
+ for node, node_feat in enumerate(g.ndata['feat']):
+ adj_node_feat[1:, node, node] = node_feat
+
+ x_node_feat = adj_node_feat.unsqueeze(0)
+
+ return x_node_feat, labels
+
+ def _sym_normalize_adj(self, adj):
+ deg = torch.sum(adj, dim = 0)#.squeeze()
+ deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+ deg_inv = torch.diag(deg_inv)
+ return torch.mm(deg_inv, torch.mm(adj, deg_inv))
+
+
def _add_self_loops(self):
diff --git a/docs/02_download_datasets.md b/docs/02_download_datasets.md
index e1467e063..049b7deb1 100644
--- a/docs/02_download_datasets.md
+++ b/docs/02_download_datasets.md
@@ -64,7 +64,26 @@ Script [script_download_TSP.sh](../data/script_download_TSP.sh) is located here.
-## 6. All datasets
+## 6. CSL dataset
+CSL size is 27KB.
+
+```
+# At the root of the project
+cd data/
+bash script_download_CSL.sh
+```
+Script [script_download_CSL.sh](../data/script_download_CSL.sh) is located here.
+
+
+
+## 7. COLLAB dataset
+
+Nothing to do. The COLLAB dataset files will be automatically downloaded from OGB when running the experiment files for COLLAB.
+
+
+
+
+## 8. All datasets
```
# At the root of the project
diff --git a/docs/03_run_codes.md b/docs/03_run_codes.md
index 7eb814bba..3b9ee1001 100644
--- a/docs/03_run_codes.md
+++ b/docs/03_run_codes.md
@@ -12,8 +12,8 @@
```
# Run the main file (at the root of the project)
-python main_molecules_graph_regression.py --dataset ZINC --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' # for CPU
-python main_molecules_graph_regression.py --dataset ZINC --gpu_id 0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' # for GPU
+python main_molecules_graph_regression.py --dataset ZINC --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' # for CPU
+python main_molecules_graph_regression.py --dataset ZINC --gpu_id 0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' # for GPU
```
The training and network parameters for each dataset and network is stored in a json file in the [`configs/`](../configs) directory.
@@ -45,7 +45,7 @@ Use [`main_molecules_graph_regression.ipynb`](../main_molecules_graph_regression
## 2. Output, checkpoints and visualizations
-Output results are located in the folder defined by the variable `out_dir` in the corresponding config file (eg. [`configs/molecules_graph_regression_GatedGCN_ZINC.json`](../configs/molecules_graph_regression_GatedGCN_ZINC.json) file).
+Output results are located in the folder defined by the variable `out_dir` in the corresponding config file (eg. [`configs/molecules_graph_regression_GatedGCN_ZINC_100k.json`](../configs/molecules_graph_regression_GatedGCN_ZINC_100k.json) file).
If `out_dir = 'out/molecules_graph_regression/'`, then
@@ -53,51 +53,70 @@ If `out_dir = 'out/molecules_graph_regression/'`, then
1. Go to`out/molecules_graph_regression/results` to view all result text files.
2. Directory `out/molecules_graph_regression/checkpoints` contains model checkpoints.
-#### 2.2 To see the training logs in Tensorboard
-1. Go to the logs directory, i.e. `out/molecules_graph_regression/logs/`
-2. Run the command `tensorboard --logdir='./'`
-3. Open `http://localhost:6006` in your browser. Note that the port information (here 6006) appears on the terminal immediately after running Step 2.
-
-
-
-
-
-
-## 3. Reproduce results
-
-
-
-### 3.1 Results (1 run)
-
+#### 2.2 To see the training logs in Tensorboard on local machine
+1. Go to the logs directory, i.e. `out/molecules_graph_regression/logs/`.
+2. Run the commands
```
-# At the root of the project
-bash script_one_code_to_rull_them_all.sh # run all datasets and all GNNs
+source activate benchmark_gnn
+tensorboard --logdir='./' --port 6006
```
+3. Open `http://localhost:6006` in your browser. Note that the port information (here 6006 but it may change) appears on the terminal immediately after starting tensorboard.
-See script [script_one_code_to_rull_them_all.sh](../script_one_code_to_rull_them_all.sh).
+#### 2.3 To see the training logs in Tensorboard on remote machine
+1. Go to the logs directory, i.e. `out/molecules_graph_regression/logs/`.
+2. Run the [script](../scripts/TensorBoard/script_tensorboard.sh) with `bash script_tensorboard.sh`.
+3. On your local machine, run the command `ssh -N -f -L localhost:6006:localhost:6006 user@xx.xx.xx.xx`.
+4. Open `http://localhost:6006` in your browser. Note that `user@xx.xx.xx.xx` corresponds to your user login and the IP of the remote machine.
-### 3.2 Results (4 runs, except TSP)
+## 3. Reproduce results (4 runs on all, except CSL and TUs)
+
```
-# At the root of the project
-bash script_main_TUs_graph_classification.sh # run TU datasets
-bash script_main_superpixels_graph_classification_MNIST.sh # run MNIST dataset
-bash script_main_superpixels_graph_classification_CIFAR10.sh # run CIFAR10 dataset
-bash script_main_molecules_graph_regression_ZINC.sh # run ZINC dataset
-bash script_main_SBMs_node_classification_PATTERN.sh # run PATTERN dataset
-bash script_main_SBMs_node_classification_CLUSTER.sh # run CLUSTER dataset
-bash script_main_TSP_edge_classification.sh # run TSP dataset
+# At the root of the project
+bash scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_100k.sh # run MNIST dataset for 100k params
+bash scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_500k.sh # run MNIST dataset for 500k params; WL-GNNs
+bash scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_100k.sh # run CIFAR10 dataset for 100k params
+bash scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_500k.sh # run CIFAR10 dataset for 500k params; WL-GNNs
+
+bash scripts/ZINC/script_main_molecules_graph_regression_ZINC_100k.sh # run ZINC dataset for 100k params
+bash scripts/ZINC/script_main_molecules_graph_regression_ZINC_500k.sh # run ZINC dataset for 500k params
+bash scripts/ZINC/script_main_molecules_graph_regression_ZINC_PE_GatedGCN_500k.sh # run ZINC dataset with PE for GatedGCN
+
+bash scripts/SBMs/script_main_SBMs_node_classification_PATTERN_100k.sh # run PATTERN dataset for 100k params
+bash scripts/SBMs/script_main_SBMs_node_classification_PATTERN_500k.sh # run PATTERN dataset for 500k params
+bash scripts/SBMs/script_main_SBMs_node_classification_PATTERN_PE_GatedGCN_500k.sh # run PATTERN dataset with PE for GatedGCN
+bash scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_100k.sh # run CLUSTER dataset for 100k params
+bash scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_500k.sh # run CLUSTER dataset for 500k params
+bash scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_PE_GatedGCN_500k.sh # run CLUSTER dataset with PE for GatedGCN
+
+bash scripts/TSP/script_main_TSP_edge_classification_100k.sh # run TSP dataset for 100k params
+bash scripts/TSP/script_main_TSP_edge_classification_edge_feature_analysis.sh # run TSP dataset for edge feature analysis
+
+bash scripts/COLLAB/script_main_COLLAB_edge_classification_40k.sh # run OGBL-COLLAB dataset for 40k params
+bash scripts/COLLAB/script_main_COLLAB_edge_classification_edge_feature_analysis.sh # run OGBL-COLLAB dataset for edge feature analysis
+bash scripts/COLLAB/script_main_COLLAB_edge_classification_PE_GatedGCN_40k.sh # run OGBL-COLLAB dataset with PE for GatedGCN
+
+bash scripts/CSL/script_main_CSL_graph_classification_20_seeds.sh # run CSL dataset without node features on 20 seeds
+bash scripts/CSL/script_main_CSL_graph_classification_PE_20_seeds.sh # run CSL dataset with PE on 20 seeds
+
+bash scripts/TU/script_main_TUs_graph_classification_100k_seed1.sh # run TU datasets for 100k params on seed1
+bash scripts/TU/script_main_TUs_graph_classification_100k_seed2.sh # run TU datasets for 100k params on seed2
```
-Scripts are [located](../../../) at the root of the repository.
+Scripts are [located](../scripts/) at the `scripts/` directory of the repository.
+
+
+## 4. Generate statistics obtained over mulitple runs (except CSL and TUs)
+After running a script, statistics (mean and standard variation) can be generated from a notebook. For example, after running the script `scripts/ZINC/script_main_molecules_graph_regression_ZINC_100k.sh`, go to the results folder `out/molecules_graph_regression/results/`, and run the [notebook](../scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC_100k.ipynb) `scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC_100k.ipynb` to generate the statistics.
+
diff --git a/docs/04_add_dataset.md b/docs/04_add_dataset.md
index c900ebb04..7e3b8c782 100644
--- a/docs/04_add_dataset.md
+++ b/docs/04_add_dataset.md
@@ -84,7 +84,7 @@ class NewDatasetDGL(torch.utils.data.Dataset):
### 2.3 Load your dataset
-At the next step, the user will define a class `NewDataset()` that loads the DGL dataset and define a `collate()` module to create mini-batches of graphs.
+At the next step, the user will define a class `NewDataset()` that loads the DGL dataset and define a `collate()` module to create mini-batches of graphs. Note that `collate()` function is for the MP-GCNs which use batches of sparse graphs, and `collate_dense_gnn()` is for the WL-GNNs which use dense graphs, with no batching of multiple graphs in one tensor.
```
class NewDataset(torch.utils.data.Dataset):
def __init__(self, name):
@@ -98,6 +98,18 @@ class NewDataset(torch.utils.data.Dataset):
graphs, labels = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, labels
+
+ def collate_dense_gnn(self, samples):
+ """
+ we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
+ Then we build a zero-initialized tensor, say X, in R^{(1 + d_n + d_e) x n x n}. X[0, :, :] is the adjacency matrix.
+ The diagonal X[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
+ The off diagonal X[1+d_n:, i, j] store edge features of edge(i, j).
+ """
+ # prepare one dense tensor using above instruction
+ # store as x_will_all_info
+
+ return x_with_all_info, labels
```
@@ -114,7 +126,7 @@ def LoadData(DATASET_NAME):
-### 2.5 Create mini-batches
+### 2.5 Create mini-batches for MP-GCNs
Eventually, the user will call function `LoadData(DATASET_NAME)` to load the dataset and function `DataLoader()` to create mini-batch of graphs. For example, this code loads the ZINC dataset and prepares mini-batch of 128 train graphs:
```
@@ -127,13 +139,19 @@ dataset = LoadData(DATASET_NAME)
train_loader = DataLoader(dataset.train, batch_size=128, shuffle=True, collate_fn=MoleculeDataset.collate)
```
+**Note** that the batching approach for MP-GCNs is not applicable for WL-GNNs which operate on dense tensors. Therefore, we simply have the following code for WL-GNNs.
+
+```
+train_loader = DataLoader(dataset.train, shuffle=True, collate_fn=MoleculeDataset.collate_dense_gnn)
+```
+
## 3. Dataset split
-A data split for the TU dataset that preserves the class distribution across train-validation-test sets was prepared. The splits are stored in the [TUs/](../data/TUs) folder. We also store the split for the ZINC dataset in the [molecules/](../data/molecules) folder.
+A data split for the TU dataset that preserves the class distribution across train-validation-test sets was prepared. The splits are stored in the [TUs/](../data/TUs) folder. Similarly, the split indices for CSL are stored in the [CSL/](../data/CSL) folder. We also store the split for the ZINC dataset in the [molecules/](../data/molecules) folder. For COLLAB, the dataset splits are automatically fetched from the OGB library.
diff --git a/docs/05_add_gnn.md b/docs/05_add_mpgcn.md
similarity index 87%
rename from docs/05_add_gnn.md
rename to docs/05_add_mpgcn.md
index 4e3664058..b0d6962ad 100644
--- a/docs/05_add_gnn.md
+++ b/docs/05_add_mpgcn.md
@@ -1,4 +1,4 @@
-# Adding a new graph neural network
+# Adding a new graph neural network from the class of MP-GCNs
## 1. New graph layer
@@ -23,7 +23,7 @@ class MyGraphLayer(nn.Module):
return h_out, e_out
```
-Directory *layers/* contains all layer classes for all graph networks and standard layers like *MLP* for *readout* layers. Directory *layers/tensorized/* assembles the class definitions for dense graphs as used in diffpool intermediate layers.
+Directory *layers/* contains all layer classes for all graph networks and standard layers like *MLP* for *readout* layers.
As instance, the GCN class *GCNLayer()* is defined in the [layers/gcn_layer.py](../layers/gcn_layer.py) file.
@@ -93,14 +93,14 @@ For the ZINC example, *GCNNet()* in [nets/molecules_graph_regression/gcn_net.py]
Add a file `train_data_my_new_task.py` in the [`train/`](../train) directory.
```
-def train_epoch(model, optimizer, device, data_loader, nb_epochs):
+def train_epoch_sparse(model, optimizer, device, data_loader, nb_epochs):
model.train()
# write your code here
return train_loss, train_acc
-def evaluate_network(model, device, data_loader):
+def evaluate_network_sparse(model, device, data_loader):
model.eval()
# write your code here
@@ -121,7 +121,7 @@ Add a new notebook file `main_my_new_task.ipynb` or python `main_my_new_task.py`
```
from nets.load_net import gnn_model
from data.data import LoadData
-from train.train_my_network import train_epoch, evaluate_network
+from train.train_data_my_new_task import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
DATASET_NAME = 'MY_DATASET'
dataset = LoadData(DATASET_NAME)
@@ -181,9 +181,9 @@ The training and network parameters for the dataset and the network is stored in
}
```
-For ZINC, the config is [molecules_graph_regression_GCN_ZINC.json](../configs/molecules_graph_regression_GCN_ZINC.json) and the code is run with
+For ZINC, the config is [molecules_graph_regression_GCN_ZINC_100k.json](../configs/molecules_graph_regression_GCN_ZINC_100k.json) and the code is run with
```
-python main_molecules_graph_regression.py --dataset ZINC --gpu_id 0 --config 'configs/molecules_graph_regression_GCN_ZINC.json'
+python main_molecules_graph_regression.py --dataset ZINC --gpu_id 0 --config 'configs/molecules_graph_regression_GCN_ZINC_100k.json'
```
diff --git a/docs/06_add_wlgnn.md b/docs/06_add_wlgnn.md
new file mode 100644
index 000000000..9c111c71a
--- /dev/null
+++ b/docs/06_add_wlgnn.md
@@ -0,0 +1,200 @@
+# Adding a new graph neural network from the class of WL-GNNs
+
+
+## 1. New graph layer
+
+Add a class `MyGraphLayer()` in `my_graph_layer.py` file in the `layers/` directory. A standard code is
+```
+import torch
+import torch.nn as nn
+
+class MyGraphLayer(nn.Module):
+
+ def __init__(self, in_dim, out_dim, dropout):
+ super().__init__()
+
+ # write your code here
+
+ def forward(self, x_with_all_info):
+
+ # write your code here
+ # which operates on the dense
+ # input tensor x_with_all_info
+
+ return x_out
+```
+Directory *layers/* contains all layer classes for all graph networks and standard layers like *MLP* for *readout* layers.
+
+As instance, the RingGNN Layer class *RingGNNEquivLayer()* is defined in the [layers/ring_gnn_equiv_layer.py](../layers/ring_gnn_equiv_layer.py) file.
+
+
+
+
+
+
+## 2. New graph network
+
+Add a class `MyGraphNetwork()` in `my_gcn_net.py` file in the `net/` directory. The `loss()` function of the network is also defined in class MyGraphNetwork().
+```
+import torch
+import torch.nn as nn
+
+from layers.my_graph_layer import MyGraphLayer
+
+class MyGraphNetwork(nn.Module):
+
+ def __init__(self, in_dim, out_dim, dropout):
+ super().__init__()
+
+ # write your code here
+ self.layer = MyGraphLayer()
+
+ def forward(self, x_with_all_info):
+
+ # write your code here
+ # which operates on the dense
+ # input tensor x_with_all_info
+
+ return x_out
+
+ def loss(self, pred, label):
+
+ # write your loss function here
+
+ return loss
+```
+
+Add a name `MyGNN` for the proposed new graph network class in `load_gnn.py` file in the `net/` directory.
+```
+from nets.my_gcn_net import MyGraphNetwork
+
+def MyGNN(net_params):
+ return MyGraphNetwork(net_params)
+
+def gnn_model(MODEL_NAME, net_params):
+ models = {
+ 'MyGNN': MyGNN
+ }
+ return models[MODEL_NAME](net_params)
+```
+
+
+For the ZINC example, *RingGNNNet()* in [nets/molecules_graph_regression/ring_gnn_net.py](../nets/molecules_graph_regression/ring_gnn_net.py) is given the GNN name *RingGNN* in [nets/molecules_graph_regression/load_net.py](../nets/molecules_graph_regression/load_net.py).
+
+
+
+
+
+
+
+
+
+## 3. Define the training/testing loops of the new task for the WL-GNNs
+
+Add a file `train_data_my_new_task.py` in the [`train/`](../train) directory.
+```
+def train_epoch_dense(model, optimizer, device, data_loader, nb_epochs, batch_size):
+ model.train()
+
+ # write your code here
+ # Note, we use gradient accumulation wrt to
+ # the batch_size during training, since the
+ # ususal batching approach for MP-GCNs operating
+ # on sparse tensors do not apply for WL-GNNs
+
+ return train_loss, train_acc
+
+def evaluate_network_dense(model, device, data_loader):
+ model.eval()
+
+ # write your code here
+
+ return test_loss, test_acc
+```
+
+For ZINC, the loops are defined in file [train/train_molecules_graph_regression.py](../train/train_molecules_graph_regression.py).
+
+
+
+
+
+## 4. Main code
+
+Add a new notebook file `main_my_new_task.ipynb` or python `main_my_new_task.py` for the new task.
+```
+from nets.load_net import gnn_model
+from data.data import LoadData
+from train.train_data_my_new_task import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+
+DATASET_NAME = 'MY_DATASET'
+dataset = LoadData(DATASET_NAME)
+
+MODEL_NAME = 'MyGNN'
+model = gnn_model(MODEL_NAME, net_params)
+
+optimizer = optim.Adam(model.parameters())
+train_loader = DataLoader(dataset.train, shuffle=True, collate_fn=dataset.collate_dense_gnn)
+epoch_train_loss, epoch_train_acc = train_epoch(model, optimizer, device, train_loader, epoch, batch_size)
+```
+
+Python file `main_my_new_task.py` can be generated by saving the notebook `main_my_new_task.ipynb` as a regular python file. (We actually developed a new graph network within the notebook and then converted the .ipynb to .py, but it can be done directly in .py)
+
+
+As for ZINC, the main file is [`main_molecules_graph_regression.ipynb`](../main_molecules_graph_regression.ipynb) or [`main_molecules_graph_regression.py`](../main_molecules_graph_regression.py).
+
+
+
+
+
+
+
+## 5. Run code
+
+Code can be executed in the notebook `main_my_new_task.ipynb` or in terminal with command
+```
+bash main_my_new_task.py --dataset DATASET_NAME --gpu_id 0 --config 'configs/my_new_task_MyGNN_DATASET_NAME.json'
+```
+
+The training and network parameters for the dataset and the network is stored in a json file in the `configs/` directory.
+```
+{
+ "gpu": {
+ "use": true,
+ "id": 0
+ },
+
+ "model": MyGNN,
+ "dataset": DATASET_NAME,
+
+ "out_dir": "out/my_new_task/",
+
+ "params": {
+ "seed": 41,
+ "epochs": 1000,
+ "batch_size": 128,
+ "init_lr": 0.001
+ },
+
+ "net_params": {
+ "L": 4,
+ "hidden_dim": 70,
+ "out_dim": 70,
+ "residual": true
+ }
+}
+```
+
+For ZINC, the config is [molecules_graph_regression_RingGNN_ZINC_100k.json](../configs/molecules_graph_regression_RingGNN_ZINC_100k.json) and the code is run with
+```
+python main_molecules_graph_regression.py --dataset ZINC --gpu_id 0 --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json'
+```
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/environment_cpu.yml b/environment_cpu.yml
index 8a69888b8..a767476ec 100644
--- a/environment_cpu.yml
+++ b/environment_cpu.yml
@@ -3,9 +3,12 @@ channels:
- pytorch
- dglteam
- conda-forge
+- anaconda
+- defaults
dependencies:
- python=3.7.4
- python-dateutil=2.8.0
+- pip=19.2.3
- pytorch=1.3
- torchvision==0.4.2
- pillow==6.1
@@ -14,6 +17,7 @@ dependencies:
- matplotlib=3.1.0
- tensorboard=1.14.0
- tensorboardx=1.8
+- future=0.18.2
- absl-py
- networkx=2.3
- scikit-learn=0.21.2
@@ -32,4 +36,9 @@ dependencies:
- plotly=4.1.1
- scikit-image=0.15.0
- requests==2.22.0
-- tqdm==4.43.0
\ No newline at end of file
+- tqdm==4.43.0
+- pip:
+ - tensorflow==2.1.0
+ - tensorflow-estimator==2.1.0
+ - tensorboard==2.1.1
+ - ogb==1.1.1
\ No newline at end of file
diff --git a/environment_gpu.yml b/environment_gpu.yml
index 81f8989b7..41cb4b899 100644
--- a/environment_gpu.yml
+++ b/environment_gpu.yml
@@ -4,12 +4,15 @@ channels:
- dglteam
- conda-forge
- fragcolor
+- anaconda
+- defaults
dependencies:
- cuda10.0
- cudatoolkit=10.0
- cudnn=7.6.5
- python=3.7.4
- python-dateutil=2.8.0
+- pip=19.2.3
- pytorch=1.3
- torchvision==0.4.2
- pillow==6.1
@@ -18,6 +21,7 @@ dependencies:
- matplotlib=3.1.0
- tensorboard=1.14.0
- tensorboardx=1.8
+- future=0.18.2
- absl-py
- networkx=2.3
- scikit-learn=0.21.2
@@ -36,4 +40,9 @@ dependencies:
- plotly=4.1.1
- scikit-image=0.15.0
- requests==2.22.0
-- tqdm==4.43.0
\ No newline at end of file
+- tqdm==4.43.0
+- pip:
+ - tensorflow-gpu==2.1.0
+ - tensorflow-estimator==2.1.0
+ - tensorboard==2.1.1
+ - ogb==1.1.1
\ No newline at end of file
diff --git a/layers/diffpool_layer.py b/layers/diffpool_layer.py
deleted file mode 100644
index e91bc4d5d..000000000
--- a/layers/diffpool_layer.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import torch
-import torch.nn as nn
-
-import numpy as np
-from scipy.linalg import block_diag
-
-from torch.autograd import Function
-
-"""
- DIFFPOOL:
- Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec,
- Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)
- https://arxiv.org/pdf/1806.08804.pdf
-
- ! code started from dgl diffpool examples dir
-"""
-
-from layers.graphsage_layer import GraphSageLayer
-
-
-def masked_softmax(matrix, mask, dim=-1, memory_efficient=True,
- mask_fill_value=-1e32):
- '''
- masked_softmax for dgl batch graph
- code snippet contributed by AllenNLP (https://github.com/allenai/allennlp)
- '''
- if mask is None:
- result = torch.nn.functional.softmax(matrix, dim=dim)
- else:
- mask = mask.float()
- while mask.dim() < matrix.dim():
- mask = mask.unsqueeze(1)
- if not memory_efficient:
- result = torch.nn.functional.softmax(matrix * mask, dim=dim)
- result = result * mask
- result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
- else:
- masked_matrix = matrix.masked_fill((1 - mask).byte(),
- mask_fill_value)
- result = torch.nn.functional.softmax(masked_matrix, dim=dim)
- return result
-
-
-class EntropyLoss(nn.Module):
- # Return Scalar
- # loss used in diffpool
- def forward(self, adj, anext, s_l):
- entropy = (torch.distributions.Categorical(
- probs=s_l).entropy()).sum(-1).mean(-1)
- assert not torch.isnan(entropy)
- return entropy
-
-
-class DiffPoolLayer(nn.Module):
-
- def __init__(self, input_dim, assign_dim, output_feat_dim,
- activation, dropout, aggregator_type, graph_norm, batch_norm, link_pred):
- super().__init__()
- self.embedding_dim = input_dim
- self.assign_dim = assign_dim
- self.hidden_dim = output_feat_dim
- self.link_pred = link_pred
- self.feat_gc = GraphSageLayer(
- input_dim,
- output_feat_dim,
- activation,
- dropout,
- aggregator_type,
- graph_norm,
- batch_norm)
- self.pool_gc = GraphSageLayer(
- input_dim,
- assign_dim,
- activation,
- dropout,
- aggregator_type,
- graph_norm,
- batch_norm)
- self.reg_loss = nn.ModuleList([])
- self.loss_log = {}
- self.reg_loss.append(EntropyLoss())
-
- def forward(self, g, h, snorm_n):
- feat = self.feat_gc(g, h, snorm_n)
- assign_tensor = self.pool_gc(g, h, snorm_n)
- device = feat.device
- assign_tensor_masks = []
- batch_size = len(g.batch_num_nodes)
- for g_n_nodes in g.batch_num_nodes:
- mask = torch.ones((g_n_nodes,
- int(assign_tensor.size()[1] / batch_size)))
- assign_tensor_masks.append(mask)
- """
- The first pooling layer is computed on batched graph.
- We first take the adjacency matrix of the batched graph, which is block-wise diagonal.
- We then compute the assignment matrix for the whole batch graph, which will also be block diagonal
- """
- mask = torch.FloatTensor(
- block_diag(
- *
- assign_tensor_masks)).to(
- device=device)
- assign_tensor = masked_softmax(assign_tensor, mask,
- memory_efficient=False)
- h = torch.matmul(torch.t(assign_tensor), feat) # equation (3) of DIFFPOOL paper
- adj = g.adjacency_matrix(ctx=device)
-
- adj_new = torch.sparse.mm(adj, assign_tensor)
- adj_new = torch.mm(torch.t(assign_tensor), adj_new) # equation (4) of DIFFPOOL paper
-
- if self.link_pred:
- current_lp_loss = torch.norm(adj.to_dense() -
- torch.mm(assign_tensor, torch.t(assign_tensor))) / np.power(g.number_of_nodes(), 2)
- self.loss_log['LinkPredLoss'] = current_lp_loss
-
- for loss_layer in self.reg_loss:
- loss_name = str(type(loss_layer).__name__)
- self.loss_log[loss_name] = loss_layer(adj, adj_new, assign_tensor)
-
- return adj_new, h
-
-
-
-
diff --git a/layers/gat_layer.py b/layers/gat_layer.py
index cadf82330..197820d35 100644
--- a/layers/gat_layer.py
+++ b/layers/gat_layer.py
@@ -10,11 +10,70 @@
https://arxiv.org/abs/1710.10903
"""
-class GATHeadLayer(nn.Module):
- def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm):
+class GATLayer(nn.Module):
+ """
+ Parameters
+ ----------
+ in_dim :
+ Number of input features.
+ out_dim :
+ Number of output features.
+ num_heads : int
+ Number of heads in Multi-Head Attention.
+ dropout :
+ Required for dropout of attn and feat in GATConv
+ batch_norm :
+ boolean flag for batch_norm layer.
+ residual :
+ If True, use residual connection inside this layer. Default: ``False``.
+ activation : callable activation function/layer or None, optional.
+ If not None, applies an activation function to the updated node features.
+
+ Using dgl builtin GATConv by default:
+ https://github.com/graphdeeplearning/benchmarking-gnns/commit/206e888ecc0f8d941c54e061d5dffcc7ae2142fc
+ """
+ def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=False, activation=F.elu):
+ super().__init__()
+ self.residual = residual
+ self.activation = activation
+ self.batch_norm = batch_norm
+
+ if in_dim != (out_dim*num_heads):
+ self.residual = False
+
+ self.gatconv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
+
+ if self.batch_norm:
+ self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
+
+ def forward(self, g, h):
+ h_in = h # for residual connection
+
+ h = self.gatconv(g, h).flatten(1)
+
+ if self.batch_norm:
+ h = self.batchnorm_h(h)
+
+ if self.activation:
+ h = self.activation(h)
+
+ if self.residual:
+ h = h_in + h # residual connection
+
+ return h
+
+
+##############################################################
+#
+# Additional layers for edge feature/representation analysis
+#
+##############################################################
+
+
+class CustomGATHeadLayer(nn.Module):
+ def __init__(self, in_dim, out_dim, dropout, batch_norm):
super().__init__()
self.dropout = dropout
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.fc = nn.Linear(in_dim, out_dim, bias=False)
@@ -35,94 +94,240 @@ def reduce_func(self, nodes):
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
- def forward(self, g, h, snorm_n):
+ def forward(self, g, h):
z = self.fc(h)
g.ndata['z'] = z
g.apply_edges(self.edge_attention)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
- if self.graph_norm:
- h = h * snorm_n
+
if self.batch_norm:
h = self.batchnorm_h(h)
+
h = F.elu(h)
+
h = F.dropout(h, self.dropout, training=self.training)
+
return h
-class GATLayer(nn.Module):
+
+class CustomGATLayer(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
- def __init__(self, in_dim, out_dim, num_heads, dropout, graph_norm, batch_norm, residual=False, activation=None, dgl_builtin=False):
-
+ def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
super().__init__()
- self.dgl_builtin = dgl_builtin
- if dgl_builtin == False:
- self.in_channels = in_dim
- self.out_channels = out_dim
- self.num_heads = num_heads
- self.residual = residual
-
- if in_dim != (out_dim*num_heads):
- self.residual = False
-
- self.heads = nn.ModuleList()
- for i in range(num_heads):
- self.heads.append(GATHeadLayer(in_dim, out_dim, dropout, graph_norm, batch_norm))
- self.merge = 'cat'
+ self.in_channels = in_dim
+ self.out_channels = out_dim
+ self.num_heads = num_heads
+ self.residual = residual
+
+ if in_dim != (out_dim*num_heads):
+ self.residual = False
+
+ self.heads = nn.ModuleList()
+ for i in range(num_heads):
+ self.heads.append(CustomGATHeadLayer(in_dim, out_dim, dropout, batch_norm))
+ self.merge = 'cat'
+
+ def forward(self, g, h, e):
+ h_in = h # for residual connection
+
+ head_outs = [attn_head(g, h) for attn_head in self.heads]
+ if self.merge == 'cat':
+ h = torch.cat(head_outs, dim=1)
else:
- self.in_channels = in_dim
- self.out_channels = out_dim
- self.num_heads = num_heads
- self.residual = residual
- self.activation = activation
- self.graph_norm = graph_norm
- self.batch_norm = batch_norm
-
- if in_dim != (out_dim*num_heads):
- self.residual = False
+ h = torch.mean(torch.stack(head_outs))
- # Both feat and weighting dropout tied together here
- self.conv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
- self.batchnorm_h = nn.BatchNorm1d(out_dim)
+ if self.residual:
+ h = h_in + h # residual connection
+
+ return h, e
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
+ self.in_channels,
+ self.out_channels, self.num_heads, self.residual)
+
+##############################################################
- def forward(self, g, h, snorm_n):
- if self.dgl_builtin == False:
- h_in = h # for residual connection
- head_outs = [attn_head(g, h, snorm_n) for attn_head in self.heads]
-
- if self.merge == 'cat':
- h = torch.cat(head_outs, dim=1)
- else:
- h = torch.mean(torch.stack(head_outs))
-
- if self.residual:
- h = h_in + h # residual connection
- return h
- else:
- h_in = h # for residual connection
+class CustomGATHeadLayerEdgeReprFeat(nn.Module):
+ def __init__(self, in_dim, out_dim, dropout, batch_norm):
+ super().__init__()
+ self.dropout = dropout
+ self.batch_norm = batch_norm
+
+ self.fc_h = nn.Linear(in_dim, out_dim, bias=False)
+ self.fc_e = nn.Linear(in_dim, out_dim, bias=False)
+ self.fc_proj = nn.Linear(3* out_dim, out_dim)
+ self.attn_fc = nn.Linear(3* out_dim, 1, bias=False)
+ self.batchnorm_h = nn.BatchNorm1d(out_dim)
+ self.batchnorm_e = nn.BatchNorm1d(out_dim)
- h = self.conv(g, h).flatten(1)
+ def edge_attention(self, edges):
+ z = torch.cat([edges.data['z_e'], edges.src['z_h'], edges.dst['z_h']], dim=1)
+ e_proj = self.fc_proj(z)
+ attn = F.leaky_relu(self.attn_fc(z))
+ return {'attn': attn, 'e_proj': e_proj}
- if self.graph_norm:
- h = h * snorm_n
- if self.batch_norm:
- h = self.batchnorm_h(h)
-
- if self.residual:
- h = h_in + h # residual connection
+ def message_func(self, edges):
+ return {'z': edges.src['z_h'], 'attn': edges.data['attn']}
- if self.activation:
- h = self.activation(h)
- return h
+ def reduce_func(self, nodes):
+ alpha = F.softmax(nodes.mailbox['attn'], dim=1)
+ h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
+ return {'h': h}
+
+ def forward(self, g, h, e):
+ z_h = self.fc_h(h)
+ z_e = self.fc_e(e)
+ g.ndata['z_h'] = z_h
+ g.edata['z_e'] = z_e
+
+ g.apply_edges(self.edge_attention)
+
+ g.update_all(self.message_func, self.reduce_func)
+
+ h = g.ndata['h']
+ e = g.edata['e_proj']
+
+ if self.batch_norm:
+ h = self.batchnorm_h(h)
+ e = self.batchnorm_e(e)
+
+ h = F.elu(h)
+ e = F.elu(e)
+
+ h = F.dropout(h, self.dropout, training=self.training)
+ e = F.dropout(e, self.dropout, training=self.training)
+
+ return h, e
+
+
+class CustomGATLayerEdgeReprFeat(nn.Module):
+ """
+ Param: [in_dim, out_dim, n_heads]
+ """
+ def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
+ super().__init__()
+
+ self.in_channels = in_dim
+ self.out_channels = out_dim
+ self.num_heads = num_heads
+ self.residual = residual
+
+ if in_dim != (out_dim*num_heads):
+ self.residual = False
+
+ self.heads = nn.ModuleList()
+ for i in range(num_heads):
+ self.heads.append(CustomGATHeadLayerEdgeReprFeat(in_dim, out_dim, dropout, batch_norm))
+ self.merge = 'cat'
+
+ def forward(self, g, h, e):
+ h_in = h # for residual connection
+ e_in = e
+
+ head_outs_h = []
+ head_outs_e = []
+ for attn_head in self.heads:
+ h_temp, e_temp = attn_head(g, h, e)
+ head_outs_h.append(h_temp)
+ head_outs_e.append(e_temp)
+
+ if self.merge == 'cat':
+ h = torch.cat(head_outs_h, dim=1)
+ e = torch.cat(head_outs_e, dim=1)
+ else:
+ raise NotImplementedError
+
+ if self.residual:
+ h = h_in + h # residual connection
+ e = e_in + e
+
+ return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)
+
+##############################################################
+
+
+class CustomGATHeadLayerIsotropic(nn.Module):
+ def __init__(self, in_dim, out_dim, dropout, batch_norm):
+ super().__init__()
+ self.dropout = dropout
+ self.batch_norm = batch_norm
+
+ self.fc = nn.Linear(in_dim, out_dim, bias=False)
+ self.batchnorm_h = nn.BatchNorm1d(out_dim)
+
+ def message_func(self, edges):
+ return {'z': edges.src['z']}
+
+ def reduce_func(self, nodes):
+ h = torch.sum(nodes.mailbox['z'], dim=1)
+ return {'h': h}
+ def forward(self, g, h):
+ z = self.fc(h)
+ g.ndata['z'] = z
+ g.update_all(self.message_func, self.reduce_func)
+ h = g.ndata['h']
+
+ if self.batch_norm:
+ h = self.batchnorm_h(h)
+
+ h = F.elu(h)
+
+ h = F.dropout(h, self.dropout, training=self.training)
+
+ return h
+
+
+class CustomGATLayerIsotropic(nn.Module):
+ """
+ Param: [in_dim, out_dim, n_heads]
+ """
+ def __init__(self, in_dim, out_dim, num_heads, dropout, batch_norm, residual=True):
+ super().__init__()
+
+ self.in_channels = in_dim
+ self.out_channels = out_dim
+ self.num_heads = num_heads
+ self.residual = residual
+
+ if in_dim != (out_dim*num_heads):
+ self.residual = False
+
+ self.heads = nn.ModuleList()
+ for i in range(num_heads):
+ self.heads.append(CustomGATHeadLayerIsotropic(in_dim, out_dim, dropout, batch_norm))
+ self.merge = 'cat'
+
+ def forward(self, g, h, e):
+ h_in = h # for residual connection
+
+ head_outs = [attn_head(g, h) for attn_head in self.heads]
+
+ if self.merge == 'cat':
+ h = torch.cat(head_outs, dim=1)
+ else:
+ h = torch.mean(torch.stack(head_outs))
+
+ if self.residual:
+ h = h_in + h # residual connection
+
+ return h, e
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
+ self.in_channels,
+ self.out_channels, self.num_heads, self.residual)
diff --git a/layers/gated_gcn_layer.py b/layers/gated_gcn_layer.py
index a7ba3e1ea..30a889b93 100644
--- a/layers/gated_gcn_layer.py
+++ b/layers/gated_gcn_layer.py
@@ -12,12 +12,11 @@ class GatedGCNLayer(nn.Module):
"""
Param: []
"""
- def __init__(self, input_dim, output_dim, dropout, graph_norm, batch_norm, residual=False):
+ def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
@@ -47,7 +46,7 @@ def reduce_func(self, nodes):
h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {'h' : h}
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h_in = h # for residual connection
e_in = e # for residual connection
@@ -63,10 +62,6 @@ def forward(self, g, h, e, snorm_n, snorm_e):
h = g.ndata['h'] # result of graph convolution
e = g.edata['e'] # result of graph convolution
- if self.graph_norm:
- h = h* snorm_n # normalize activation w.r.t. graph size
- e = e* snorm_e # normalize activation w.r.t. graph size
-
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
e = self.bn_node_e(e) # batch normalization
@@ -86,4 +81,138 @@ def forward(self, g, h, e, snorm_n, snorm_e):
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
self.in_channels,
- self.out_channels)
\ No newline at end of file
+ self.out_channels)
+
+
+##############################################################
+#
+# Additional layers for edge feature/representation analysis
+#
+##############################################################
+
+
+class GatedGCNLayerEdgeFeatOnly(nn.Module):
+ """
+ Param: []
+ """
+ def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
+ super().__init__()
+ self.in_channels = input_dim
+ self.out_channels = output_dim
+ self.dropout = dropout
+ self.batch_norm = batch_norm
+ self.residual = residual
+
+ if input_dim != output_dim:
+ self.residual = False
+
+ self.A = nn.Linear(input_dim, output_dim, bias=True)
+ self.B = nn.Linear(input_dim, output_dim, bias=True)
+ self.D = nn.Linear(input_dim, output_dim, bias=True)
+ self.E = nn.Linear(input_dim, output_dim, bias=True)
+ self.bn_node_h = nn.BatchNorm1d(output_dim)
+
+ def message_func(self, edges):
+ Bh_j = edges.src['Bh']
+ e_ij = edges.src['Dh'] + edges.dst['Eh'] # e_ij = Dhi + Ehj
+ edges.data['e'] = e_ij
+ return {'Bh_j' : Bh_j, 'e_ij' : e_ij}
+
+ def reduce_func(self, nodes):
+ Ah_i = nodes.data['Ah']
+ Bh_j = nodes.mailbox['Bh_j']
+ e = nodes.mailbox['e_ij']
+ sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
+ h = Ah_i + torch.sum( sigma_ij * Bh_j, dim=1 ) / ( torch.sum( sigma_ij, dim=1 ) + 1e-6 ) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
+ return {'h' : h}
+
+ def forward(self, g, h, e):
+
+ h_in = h # for residual connection
+
+ g.ndata['h'] = h
+ g.ndata['Ah'] = self.A(h)
+ g.ndata['Bh'] = self.B(h)
+ g.ndata['Dh'] = self.D(h)
+ g.ndata['Eh'] = self.E(h)
+ g.update_all(self.message_func,self.reduce_func)
+ h = g.ndata['h'] # result of graph convolution
+
+ if self.batch_norm:
+ h = self.bn_node_h(h) # batch normalization
+
+ h = F.relu(h) # non-linear activation
+
+ if self.residual:
+ h = h_in + h # residual connection
+
+ h = F.dropout(h, self.dropout, training=self.training)
+
+ return h, e
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
+ self.in_channels,
+ self.out_channels)
+
+
+##############################################################
+
+
+class GatedGCNLayerIsotropic(nn.Module):
+ """
+ Param: []
+ """
+ def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
+ super().__init__()
+ self.in_channels = input_dim
+ self.out_channels = output_dim
+ self.dropout = dropout
+ self.batch_norm = batch_norm
+ self.residual = residual
+
+ if input_dim != output_dim:
+ self.residual = False
+
+ self.A = nn.Linear(input_dim, output_dim, bias=True)
+ self.B = nn.Linear(input_dim, output_dim, bias=True)
+ self.bn_node_h = nn.BatchNorm1d(output_dim)
+
+ def message_func(self, edges):
+ Bh_j = edges.src['Bh']
+ return {'Bh_j' : Bh_j}
+
+ def reduce_func(self, nodes):
+ Ah_i = nodes.data['Ah']
+ Bh_j = nodes.mailbox['Bh_j']
+ h = Ah_i + torch.sum( Bh_j, dim=1 ) # hi = Ahi + sum_j Bhj
+ return {'h' : h}
+
+ def forward(self, g, h, e):
+
+ h_in = h # for residual connection
+
+ g.ndata['h'] = h
+ g.ndata['Ah'] = self.A(h)
+ g.ndata['Bh'] = self.B(h)
+ g.update_all(self.message_func,self.reduce_func)
+ h = g.ndata['h'] # result of graph convolution
+
+ if self.batch_norm:
+ h = self.bn_node_h(h) # batch normalization
+
+ h = F.relu(h) # non-linear activation
+
+ if self.residual:
+ h = h_in + h # residual connection
+
+ h = F.dropout(h, self.dropout, training=self.training)
+
+ return h, e
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__,
+ self.in_channels,
+ self.out_channels)
+
+
diff --git a/layers/gcn_layer.py b/layers/gcn_layer.py
index 7baacdbf0..df35d72a8 100644
--- a/layers/gcn_layer.py
+++ b/layers/gcn_layer.py
@@ -33,11 +33,10 @@ class GCNLayer(nn.Module):
"""
Param: [in_dim, out_dim]
"""
- def __init__(self, in_dim, out_dim, activation, dropout, graph_norm, batch_norm, residual=False, dgl_builtin=False):
+ def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
@@ -54,7 +53,7 @@ def __init__(self, in_dim, out_dim, activation, dropout, graph_norm, batch_norm,
self.conv = GraphConv(in_dim, out_dim)
- def forward(self, g, feature, snorm_n):
+ def forward(self, g, feature):
h_in = feature # to be used for residual connection
if self.dgl_builtin == False:
@@ -64,10 +63,6 @@ def forward(self, g, feature, snorm_n):
h = g.ndata['h'] # result of graph convolution
else:
h = self.conv(g, feature)
-
- if self.graph_norm:
- h = h * snorm_n # normalize activation w.r.t. graph size
-
if self.batch_norm:
h = self.batchnorm_h(h) # batch normalization
diff --git a/layers/gin_layer.py b/layers/gin_layer.py
index 1803a7a1f..9fabb52f3 100644
--- a/layers/gin_layer.py
+++ b/layers/gin_layer.py
@@ -24,8 +24,6 @@ class GINLayer(nn.Module):
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
- graph_norm :
- boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
@@ -36,10 +34,9 @@ class GINLayer(nn.Module):
If True, :math:`\epsilon` will be a learnable parameter.
"""
- def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, residual=False, init_eps=0, learn_eps=False, activation=None):
+ def __init__(self, apply_func, aggr_type, dropout, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
- self.activation = activation
if aggr_type == 'sum':
self._reducer = fn.sum
@@ -50,7 +47,6 @@ def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, resid
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
@@ -69,7 +65,7 @@ def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, resid
self.bn_node_h = nn.BatchNorm1d(out_dim)
- def forward(self, g, h, snorm_n):
+ def forward(self, g, h):
h_in = h # for residual connection
g = g.local_var()
@@ -79,14 +75,10 @@ def forward(self, g, h, snorm_n):
if self.apply_func is not None:
h = self.apply_func(h)
- if self.graph_norm:
- h = h * snorm_n # normalize activation w.r.t. graph size
-
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
- if self.activation:
- h = F.relu(h) # non-linear activation
+ h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
@@ -104,7 +96,6 @@ class ApplyNodeFunc(nn.Module):
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
- self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
diff --git a/layers/gmm_layer.py b/layers/gmm_layer.py
index ca83193ec..b92b1d1a0 100644
--- a/layers/gmm_layer.py
+++ b/layers/gmm_layer.py
@@ -28,8 +28,6 @@ class GMMLayer(nn.Module):
Aggregator type (``sum``, ``mean``, ``max``).
dropout :
Required for dropout of output features.
- graph_norm :
- boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
@@ -39,14 +37,13 @@ class GMMLayer(nn.Module):
"""
def __init__(self, in_dim, out_dim, dim, kernel, aggr_type, dropout,
- graph_norm, batch_norm, residual=False, bias=True):
+ batch_norm, residual=False, bias=True):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.dim = dim
self.kernel = kernel
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
@@ -84,7 +81,7 @@ def reset_parameters(self):
if self.bias is not None:
init.zeros_(self.bias.data)
- def forward(self, g, h, pseudo, snorm_n):
+ def forward(self, g, h, pseudo):
h_in = h # for residual connection
g = g.local_var()
@@ -100,9 +97,6 @@ def forward(self, g, h, pseudo, snorm_n):
g.update_all(fn.u_mul_e('h', 'w', 'm'), self._reducer('m', 'h'))
h = g.ndata['h'].sum(1)
- if self.graph_norm:
- h = h* snorm_n # normalize activation w.r.t. graph size
-
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
diff --git a/layers/graphsage_layer.py b/layers/graphsage_layer.py
index 033a240e8..d39c077f8 100644
--- a/layers/graphsage_layer.py
+++ b/layers/graphsage_layer.py
@@ -11,19 +11,15 @@
https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
"""
-from layers.sage_aggregator_layer import MaxPoolAggregator, MeanAggregator, LSTMAggregator
-from layers.node_apply_layer import NodeApply
-
class GraphSageLayer(nn.Module):
def __init__(self, in_feats, out_feats, activation, dropout,
- aggregator_type, graph_norm, batch_norm, residual=False, bias=True,
- dgl_builtin=False):
+ aggregator_type, batch_norm, residual=False,
+ bias=True, dgl_builtin=False):
super().__init__()
self.in_channels = in_feats
self.out_channels = out_feats
self.aggregator_type = aggregator_type
- self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dgl_builtin = dgl_builtin
@@ -36,7 +32,7 @@ def __init__(self, in_feats, out_feats, activation, dropout,
if dgl_builtin == False:
self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout,
bias=bias)
- if aggregator_type == "pool":
+ if aggregator_type == "maxpool":
self.aggregator = MaxPoolAggregator(in_feats, in_feats,
activation, bias)
elif aggregator_type == "lstm":
@@ -50,21 +46,19 @@ def __init__(self, in_feats, out_feats, activation, dropout,
if self.batch_norm:
self.batchnorm_h = nn.BatchNorm1d(out_feats)
- def forward(self, g, h, snorm_n=None):
+ def forward(self, g, h):
h_in = h # for residual connection
if self.dgl_builtin == False:
h = self.dropout(h)
g.ndata['h'] = h
- g.update_all(fn.copy_src(src='h', out='m'), self.aggregator,
+ g.update_all(fn.copy_src(src='h', out='m'),
+ self.aggregator,
self.nodeapply)
h = g.ndata['h']
else:
h = self.sageconv(g, h)
- if self.graph_norm:
- h = h * snorm_n
-
if self.batch_norm:
h = self.batchnorm_h(h)
@@ -77,3 +71,290 @@ def __repr__(self):
return '{}(in_channels={}, out_channels={}, aggregator={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.aggregator_type, self.residual)
+
+
+
+"""
+ Aggregators for GraphSage
+"""
+class Aggregator(nn.Module):
+ """
+ Base Aggregator class.
+ """
+
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, node):
+ neighbour = node.mailbox['m']
+ c = self.aggre(neighbour)
+ return {"c": c}
+
+ def aggre(self, neighbour):
+ # N x F
+ raise NotImplementedError
+
+
+class MeanAggregator(Aggregator):
+ """
+ Mean Aggregator for graphsage
+ """
+
+ def __init__(self):
+ super().__init__()
+
+ def aggre(self, neighbour):
+ mean_neighbour = torch.mean(neighbour, dim=1)
+ return mean_neighbour
+
+
+class MaxPoolAggregator(Aggregator):
+ """
+ Maxpooling aggregator for graphsage
+ """
+
+ def __init__(self, in_feats, out_feats, activation, bias):
+ super().__init__()
+ self.linear = nn.Linear(in_feats, out_feats, bias=bias)
+ self.activation = activation
+
+ def aggre(self, neighbour):
+ neighbour = self.linear(neighbour)
+ if self.activation:
+ neighbour = self.activation(neighbour)
+ maxpool_neighbour = torch.max(neighbour, dim=1)[0]
+ return maxpool_neighbour
+
+
+class LSTMAggregator(Aggregator):
+ """
+ LSTM aggregator for graphsage
+ """
+
+ def __init__(self, in_feats, hidden_feats):
+ super().__init__()
+ self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
+ self.hidden_dim = hidden_feats
+ self.hidden = self.init_hidden()
+
+ nn.init.xavier_uniform_(self.lstm.weight,
+ gain=nn.init.calculate_gain('relu'))
+
+ def init_hidden(self):
+ """
+ Defaulted to initialite all zero
+ """
+ return (torch.zeros(1, 1, self.hidden_dim),
+ torch.zeros(1, 1, self.hidden_dim))
+
+ def aggre(self, neighbours):
+ """
+ aggregation function
+ """
+ # N X F
+ rand_order = torch.randperm(neighbours.size()[1])
+ neighbours = neighbours[:, rand_order, :]
+
+ (lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0], neighbours.size()[1], -1))
+ return lstm_out[:, -1, :]
+
+ def forward(self, node):
+ neighbour = node.mailbox['m']
+ c = self.aggre(neighbour)
+ return {"c": c}
+
+
+class NodeApply(nn.Module):
+ """
+ Works -> the node_apply function in DGL paradigm
+ """
+
+ def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
+ super().__init__()
+ self.dropout = nn.Dropout(p=dropout)
+ self.linear = nn.Linear(in_feats * 2, out_feats, bias)
+ self.activation = activation
+
+ def concat(self, h, aggre_result):
+ bundle = torch.cat((h, aggre_result), 1)
+ bundle = self.linear(bundle)
+ return bundle
+
+ def forward(self, node):
+ h = node.data['h']
+ c = node.data['c']
+ bundle = self.concat(h, c)
+ bundle = F.normalize(bundle, p=2, dim=1)
+ if self.activation:
+ bundle = self.activation(bundle)
+ return {"h": bundle}
+
+
+##############################################################
+#
+# Additional layers for edge feature/representation analysis
+#
+##############################################################
+
+
+
+class GraphSageLayerEdgeFeat(nn.Module):
+
+ def __init__(self, in_feats, out_feats, activation, dropout,
+ aggregator_type, batch_norm, residual=False,
+ bias=True, dgl_builtin=False):
+ super().__init__()
+ self.in_channels = in_feats
+ self.out_channels = out_feats
+ self.batch_norm = batch_norm
+ self.residual = residual
+
+ if in_feats != out_feats:
+ self.residual = False
+
+ self.dropout = nn.Dropout(p=dropout)
+
+ self.activation = activation
+
+ self.A = nn.Linear(in_feats, out_feats, bias=bias)
+ self.B = nn.Linear(in_feats, out_feats, bias=bias)
+
+ self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
+
+ if self.batch_norm:
+ self.batchnorm_h = nn.BatchNorm1d(out_feats)
+
+ def message_func(self, edges):
+ Ah_j = edges.src['Ah']
+ e_ij = edges.src['Bh'] + edges.dst['Bh'] # e_ij = Bhi + Bhj
+ edges.data['e'] = e_ij
+ return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
+
+ def reduce_func(self, nodes):
+ # Anisotropic MaxPool aggregation
+
+ Ah_j = nodes.mailbox['Ah_j']
+ e = nodes.mailbox['e_ij']
+ sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
+
+ Ah_j = sigma_ij * Ah_j
+ if self.activation:
+ Ah_j = self.activation(Ah_j)
+
+ c = torch.max(Ah_j, dim=1)[0]
+ return {'c' : c}
+
+ def forward(self, g, h):
+ h_in = h # for residual connection
+ h = self.dropout(h)
+
+ g.ndata['h'] = h
+ g.ndata['Ah'] = self.A(h)
+ g.ndata['Bh'] = self.B(h)
+ g.update_all(self.message_func,
+ self.reduce_func,
+ self.nodeapply)
+ h = g.ndata['h']
+
+ if self.batch_norm:
+ h = self.batchnorm_h(h)
+
+ if self.residual:
+ h = h_in + h # residual connection
+
+ return h
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={}, residual={})'.format(
+ self.__class__.__name__,
+ self.in_channels,
+ self.out_channels,
+ self.residual)
+
+
+##############################################################
+
+
+class GraphSageLayerEdgeReprFeat(nn.Module):
+
+ def __init__(self, in_feats, out_feats, activation, dropout,
+ aggregator_type, batch_norm, residual=False,
+ bias=True, dgl_builtin=False):
+ super().__init__()
+ self.in_channels = in_feats
+ self.out_channels = out_feats
+ self.batch_norm = batch_norm
+ self.residual = residual
+
+ if in_feats != out_feats:
+ self.residual = False
+
+ self.dropout = nn.Dropout(p=dropout)
+
+ self.activation = activation
+
+ self.A = nn.Linear(in_feats, out_feats, bias=bias)
+ self.B = nn.Linear(in_feats, out_feats, bias=bias)
+ self.C = nn.Linear(in_feats, out_feats, bias=bias)
+
+ self.nodeapply = NodeApply(in_feats, out_feats, activation, dropout, bias=bias)
+
+ if self.batch_norm:
+ self.batchnorm_h = nn.BatchNorm1d(out_feats)
+ self.batchnorm_e = nn.BatchNorm1d(out_feats)
+
+ def message_func(self, edges):
+ Ah_j = edges.src['Ah']
+ e_ij = edges.data['Ce'] + edges.src['Bh'] + edges.dst['Bh'] # e_ij = Ce_ij + Bhi + Bhj
+ edges.data['e'] = e_ij
+ return {'Ah_j' : Ah_j, 'e_ij' : e_ij}
+
+ def reduce_func(self, nodes):
+ # Anisotropic MaxPool aggregation
+
+ Ah_j = nodes.mailbox['Ah_j']
+ e = nodes.mailbox['e_ij']
+ sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
+
+ Ah_j = sigma_ij * Ah_j
+ if self.activation:
+ Ah_j = self.activation(Ah_j)
+
+ c = torch.max(Ah_j, dim=1)[0]
+ return {'c' : c}
+
+ def forward(self, g, h, e):
+ h_in = h # for residual connection
+ e_in = e
+ h = self.dropout(h)
+
+ g.ndata['h'] = h
+ g.ndata['Ah'] = self.A(h)
+ g.ndata['Bh'] = self.B(h)
+ g.edata['e'] = e
+ g.edata['Ce'] = self.C(e)
+ g.update_all(self.message_func,
+ self.reduce_func,
+ self.nodeapply)
+ h = g.ndata['h']
+ e = g.edata['e']
+
+ if self.activation:
+ e = self.activation(e) # non-linear activation
+
+ if self.batch_norm:
+ h = self.batchnorm_h(h)
+ e = self.batchnorm_e(e)
+
+ if self.residual:
+ h = h_in + h # residual connection
+ e = e_in + e # residual connection
+
+ return h, e
+
+ def __repr__(self):
+ return '{}(in_channels={}, out_channels={}, residual={})'.format(
+ self.__class__.__name__,
+ self.in_channels,
+ self.out_channels,
+ self.residual)
\ No newline at end of file
diff --git a/layers/node_apply_layer.py b/layers/node_apply_layer.py
deleted file mode 100644
index b9318be8e..000000000
--- a/layers/node_apply_layer.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-
-! Code started from dgl diffpool examples dir
-"""
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class NodeApply(nn.Module):
- """
- Works -> the node_apply function in DGL paradigm
- """
-
- def __init__(self, in_feats, out_feats, activation, dropout, bias=True):
- super().__init__()
- self.dropout = nn.Dropout(p=dropout)
- self.linear = nn.Linear(in_feats * 2, out_feats, bias)
- self.activation = activation
-
-# nn.init.xavier_uniform_(self.linear.weight,
-# gain=nn.init.calculate_gain('relu'))
-
- def concat(self, h, aggre_result):
- bundle = torch.cat((h, aggre_result), 1)
- bundle = self.linear(bundle)
- return bundle
-
- def forward(self, node):
- h = node.data['h']
- c = node.data['c']
- bundle = self.concat(h, c)
- bundle = F.normalize(bundle, p=2, dim=1)
- if self.activation:
- bundle = self.activation(bundle)
- return {"h": bundle}
\ No newline at end of file
diff --git a/layers/ring_gnn_equiv_layer.py b/layers/ring_gnn_equiv_layer.py
new file mode 100644
index 000000000..428b8c010
--- /dev/null
+++ b/layers/ring_gnn_equiv_layer.py
@@ -0,0 +1,200 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+"""
+ Ring-GNN equi 2 to 2 layer file
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+"""
+
+class RingGNNEquivLayer(nn.Module):
+ def __init__(self, device, input_dim, output_dim, layer_norm, residual, dropout,
+ normalization='inf', normalization_val=1.0, radius=2, k2_init = 0.1):
+ super().__init__()
+ self.device = device
+ basis_dimension = 15
+ self.radius = radius
+ self.layer_norm = layer_norm
+ self.residual = residual
+ self.dropout = dropout
+
+ coeffs_values = lambda i, j, k: torch.randn([i, j, k]) * torch.sqrt(2. / (i + j).float())
+ self.diag_bias_list = nn.ParameterList([])
+
+ for i in range(radius):
+ for j in range(i+1):
+ self.diag_bias_list.append(nn.Parameter(torch.zeros(1, output_dim, 1, 1)))
+
+ self.all_bias = nn.Parameter(torch.zeros(1, output_dim, 1, 1))
+ self.coeffs_list = nn.ParameterList([])
+
+ for i in range(radius):
+ for j in range(i+1):
+ self.coeffs_list.append(nn.Parameter(coeffs_values(input_dim, output_dim, basis_dimension)))
+
+ self.switch = nn.ParameterList([nn.Parameter(torch.FloatTensor([1])), nn.Parameter(torch.FloatTensor([k2_init]))])
+ self.output_dim = output_dim
+
+ self.normalization = normalization
+ self.normalization_val = normalization_val
+
+ if self.layer_norm:
+ self.ln_x = LayerNorm(output_dim.item())
+
+ if self.residual:
+ self.res_x = nn.Linear(input_dim.item(), output_dim.item())
+
+ def forward(self, inputs):
+ m = inputs.size()[3]
+
+ ops_out = ops_2_to_2(inputs, m, normalization=self.normalization)
+ ops_out = torch.stack(ops_out, dim = 2)
+
+
+ output_list = []
+
+ for i in range(self.radius):
+ for j in range(i+1):
+ output_i = torch.einsum('dsb,ndbij->nsij', self.coeffs_list[i*(i+1)//2 + j], ops_out)
+
+ mat_diag_bias = torch.eye(inputs.size()[3]).unsqueeze(0).unsqueeze(0).to(self.device) * self.diag_bias_list[i*(i+1)//2 + j]
+ # mat_diag_bias = torch.eye(inputs.size()[3]).to('cuda:0').unsqueeze(0).unsqueeze(0) * self.diag_bias_list[i*(i+1)//2 + j]
+ if j == 0:
+ output = output_i + mat_diag_bias
+ else:
+ output = torch.einsum('abcd,abde->abce', output_i, output)
+ output_list.append(output)
+
+ output = 0
+ for i in range(self.radius):
+ output += output_list[i] * self.switch[i]
+
+ output = output + self.all_bias
+
+ if self.layer_norm:
+ # Now, changing shapes from [1xdxnxn] to [nxnxd] for BN
+ output = output.permute(3,2,1,0).squeeze()
+
+ # output = self.bn_x(output.reshape(m*m, self.output_dim.item())) # batch normalization
+ output = self.ln_x(output) # layer normalization
+
+ # Returning output back to original shape
+ output = output.reshape(m, m, self.output_dim.item())
+ output = output.permute(2,1,0).unsqueeze(0)
+
+ output = F.relu(output) # non-linear activation
+
+ if self.residual:
+ # Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
+ inputs, output = inputs.permute(3,2,1,0).squeeze(), output.permute(3,2,1,0).squeeze()
+
+ residual_ = self.res_x(inputs)
+ output = residual_ + output # residual connection
+
+ # Returning output back to original shape
+ output = output.permute(2,1,0).unsqueeze(0)
+
+ output = F.dropout(output, self.dropout, training=self.training)
+
+ return output
+
+
+def ops_2_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
+ # input: N x D x m x m
+ diag_part = torch.diagonal(inputs, dim1 = 2, dim2 = 3) # N x D x m
+ sum_diag_part = torch.sum(diag_part, dim=2, keepdim = True) # N x D x 1
+ sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
+ sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
+ sum_all = torch.sum(sum_of_rows, dim=2) # N x D
+
+ # op1 - (1234) - extract diag
+ op1 = torch.diag_embed(diag_part) # N x D x m x m
+
+ # op2 - (1234) + (12)(34) - place sum of diag on diag
+ op2 = torch.diag_embed(sum_diag_part.repeat(1, 1, dim))
+
+ # op3 - (1234) + (123)(4) - place sum of row i on diag ii
+ op3 = torch.diag_embed(sum_of_rows)
+
+ # op4 - (1234) + (124)(3) - place sum of col i on diag ii
+ op4 = torch.diag_embed(sum_of_cols)
+
+ # op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
+ op5 = torch.diag_embed(sum_all.unsqueeze(2).repeat(1, 1, dim))
+
+ # op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
+ op6 = sum_of_cols.unsqueeze(3).repeat(1, 1, 1, dim)
+
+ # op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
+ op7 = sum_of_rows.unsqueeze(3).repeat(1, 1, 1, dim)
+
+ # op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
+ op8 = sum_of_cols.unsqueeze(2).repeat(1, 1, dim, 1)
+
+ # op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
+ op9 = sum_of_rows.unsqueeze(2).repeat(1, 1, dim, 1)
+
+ # op10 - (1234) + (14)(23) - identity
+ op10 = inputs
+
+ # op11 - (1234) + (13)(24) - transpose
+ op11 = torch.transpose(inputs, -2, -1)
+
+ # op12 - (1234) + (234)(1) - place ii element in row i
+ op12 = diag_part.unsqueeze(3).repeat(1, 1, 1, dim)
+
+ # op13 - (1234) + (134)(2) - place ii element in col i
+ op13 = diag_part.unsqueeze(2).repeat(1, 1, dim, 1)
+
+ # op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
+ op14 = sum_diag_part.unsqueeze(3).repeat(1, 1, dim, dim)
+
+ # op15 - sum of all ops - place sum of all entries in all entries
+ op15 = sum_all.unsqueeze(2).unsqueeze(3).repeat(1, 1, dim, dim)
+
+ #A_2 = torch.einsum('abcd,abde->abce', inputs, inputs)
+ #A_4 = torch.einsum('abcd,abde->abce', A_2, A_2)
+ #op16 = torch.where(A_4>1, torch.ones(A_4.size()), A_4)
+
+ if normalization is not None:
+ float_dim = float(dim)
+ if normalization is 'inf':
+ op2 = torch.div(op2, float_dim)
+ op3 = torch.div(op3, float_dim)
+ op4 = torch.div(op4, float_dim)
+ op5 = torch.div(op5, float_dim**2)
+ op6 = torch.div(op6, float_dim)
+ op7 = torch.div(op7, float_dim)
+ op8 = torch.div(op8, float_dim)
+ op9 = torch.div(op9, float_dim)
+ op14 = torch.div(op14, float_dim)
+ op15 = torch.div(op15, float_dim**2)
+
+ #return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16]
+ '''
+ l = [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
+ for i, ls in enumerate(l):
+ print(i+1)
+ print(torch.sum(ls))
+ print("$%^&*(*&^%$#$%^&*(*&^%$%^&*(*&^%$%^&*(")
+ '''
+ return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, d):
+ super().__init__()
+ self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
+ self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
+
+ def forward(self, x):
+ # x tensor of the shape n x n x d
+ mean = x.mean(dim=(0,1), keepdim=True)
+ var = x.var(dim=(0,1), keepdim=True, unbiased=False)
+ x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
+ return x
+
+
\ No newline at end of file
diff --git a/layers/sage_aggregator_layer.py b/layers/sage_aggregator_layer.py
deleted file mode 100644
index aaaf11aa3..000000000
--- a/layers/sage_aggregator_layer.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""
-Aggregator class(s) for the GraphSAGE example
-
-! Code started from dgl diffpool examples dir
-"""
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class Aggregator(nn.Module):
- """
- Base Aggregator class.
- """
-
- def __init__(self):
- super().__init__()
-
- def forward(self, node):
- neighbour = node.mailbox['m']
- c = self.aggre(neighbour)
- return {"c": c}
-
- def aggre(self, neighbour):
- # N x F
- raise NotImplementedError
-
-
-class MeanAggregator(Aggregator):
- """
- Mean Aggregator for graphsage
- """
-
- def __init__(self):
- super().__init__()
-
- def aggre(self, neighbour):
- mean_neighbour = torch.mean(neighbour, dim=1)
- return mean_neighbour
-
-
-class MaxPoolAggregator(Aggregator):
- """
- Maxpooling aggregator for graphsage
- """
-
- def __init__(self, in_feats, out_feats, activation, bias):
- super().__init__()
- self.linear = nn.Linear(in_feats, out_feats, bias=bias)
- self.activation = activation
- # Xavier initialization of weight
-# nn.init.xavier_uniform_(self.linear.weight,
-# gain=nn.init.calculate_gain('relu'))
-
- def aggre(self, neighbour):
- neighbour = self.linear(neighbour)
- if self.activation:
- neighbour = self.activation(neighbour)
- maxpool_neighbour = torch.max(neighbour, dim=1)[0]
- return maxpool_neighbour
-
-
-class LSTMAggregator(Aggregator):
- """
- LSTM aggregator for graphsage
- """
-
- def __init__(self, in_feats, hidden_feats):
- super().__init__()
- self.lstm = nn.LSTM(in_feats, hidden_feats, batch_first=True)
- self.hidden_dim = hidden_feats
- self.hidden = self.init_hidden()
-
- nn.init.xavier_uniform_(self.lstm.weight,
- gain=nn.init.calculate_gain('relu'))
-
- def init_hidden(self):
- """
- Defaulted to initialite all zero
- """
- return (torch.zeros(1, 1, self.hidden_dim),
- torch.zeros(1, 1, self.hidden_dim))
-
- def aggre(self, neighbours):
- """
- aggregation function
- """
- # N X F
- rand_order = torch.randperm(neighbours.size()[1])
- neighbours = neighbours[:, rand_order, :]
-
- (lstm_out, self.hidden) = self.lstm(neighbours.view(neighbours.size()[0],
- neighbours.size()[
- 1],
- -1))
- return lstm_out[:, -1, :]
-
- def forward(self, node):
- neighbour = node.mailbox['m']
- c = self.aggre(neighbour)
- return {"c": c}
\ No newline at end of file
diff --git a/layers/tensorized/assignment_layer.py b/layers/tensorized/assignment_layer.py
deleted file mode 100644
index eae144008..000000000
--- a/layers/tensorized/assignment_layer.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import torch
-
-from torch import nn as nn
-from torch.nn import functional as F
-from torch.autograd import Variable
-
-"""
- This layer is the generating the Assignment matrix as shown in
- equation (6) of the DIFFPOOL paper.
- ! code started from dgl diffpool examples dir
-"""
-
-from .dense_graphsage_layer import DenseGraphSage
-
-class DiffPoolAssignment(nn.Module):
- def __init__(self, nfeat, nnext):
- super().__init__()
- self.assign_mat = DenseGraphSage(nfeat, nnext, use_bn=True)
-
- def forward(self, x, adj, log=False):
- s_l_init = self.assign_mat(x, adj)
- s_l = F.softmax(s_l_init, dim=-1)
- return s_l
\ No newline at end of file
diff --git a/layers/tensorized/dense_diffpool_layer.py b/layers/tensorized/dense_diffpool_layer.py
deleted file mode 100644
index 86cf38d20..000000000
--- a/layers/tensorized/dense_diffpool_layer.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import torch
-from torch import nn as nn
-
-"""
-
-
- DIFFPOOL:
- Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec,
- Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)
- https://arxiv.org/pdf/1806.08804.pdf
-
- ! code started from dgl diffpool examples dir
-"""
-
-from .assignment_layer import DiffPoolAssignment
-from .dense_graphsage_layer import DenseGraphSage
-
-
-class EntropyLoss(nn.Module):
- # Return Scalar
- # loss used in diffpool
- def forward(self, adj, anext, s_l):
- entropy = (torch.distributions.Categorical(
- probs=s_l).entropy()).sum(-1).mean(-1)
- assert not torch.isnan(entropy)
- return entropy
-
-
-class LinkPredLoss(nn.Module):
- # loss used in diffpool
- def forward(self, adj, anext, s_l):
- link_pred_loss = (
- adj - s_l.matmul(s_l.transpose(-1, -2))).norm(dim=(1, 2))
- link_pred_loss = link_pred_loss / (adj.size(1) * adj.size(2))
- return link_pred_loss.mean()
-
-
-class DenseDiffPool(nn.Module):
- def __init__(self, nfeat, nnext, nhid, link_pred=False, entropy=True):
- super().__init__()
- self.link_pred = link_pred
- self.log = {}
- self.link_pred_layer = self.LinkPredLoss()
- self.embed = DenseGraphSage(nfeat, nhid, use_bn=True)
- self.assign = DiffPoolAssignment(nfeat, nnext)
- self.reg_loss = nn.ModuleList([])
- self.loss_log = {}
- if link_pred:
- self.reg_loss.append(LinkPredLoss())
- if entropy:
- self.reg_loss.append(EntropyLoss())
-
- def forward(self, x, adj, log=False):
- z_l = self.embed(x, adj)
- s_l = self.assign(x, adj)
- if log:
- self.log['s'] = s_l.cpu().numpy()
- xnext = torch.matmul(s_l.transpose(-1, -2), z_l)
- anext = (s_l.transpose(-1, -2)).matmul(adj).matmul(s_l)
-
- for loss_layer in self.reg_loss:
- loss_name = str(type(loss_layer).__name__)
- self.loss_log[loss_name] = loss_layer(adj, anext, s_l)
- if log:
- self.log['a'] = anext.cpu().numpy()
- return xnext, anext
-
diff --git a/layers/tensorized/dense_graphsage_layer.py b/layers/tensorized/dense_graphsage_layer.py
deleted file mode 100644
index 39abf806b..000000000
--- a/layers/tensorized/dense_graphsage_layer.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-
-"""
-
-
- GraphSAGE:
- William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
- https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
-
- ! code started from the dgl diffpool examples dir
-"""
-
-class DenseGraphSage(nn.Module):
- def __init__(self, infeat, outfeat, residual=False, use_bn=True,
- mean=False, add_self=False):
- super().__init__()
- self.add_self = add_self
- self.use_bn = use_bn
- self.mean = mean
- self.residual = residual
-
- if infeat != outfeat:
- self.residual = False
-
- self.W = nn.Linear(infeat, outfeat, bias=True)
-
- nn.init.xavier_uniform_(
- self.W.weight,
- gain=nn.init.calculate_gain('relu'))
-
- def forward(self, x, adj):
- h_in = x # for residual connection
-
- if self.use_bn and not hasattr(self, 'bn'):
- self.bn = nn.BatchNorm1d(adj.size(1)).to(adj.device)
-
- if self.add_self:
- adj = adj + torch.eye(adj.size(0)).to(adj.device)
-
- if self.mean:
- adj = adj / adj.sum(1, keepdim=True)
-
- h_k_N = torch.matmul(adj, x)
- h_k = self.W(h_k_N)
- h_k = F.normalize(h_k, dim=2, p=2)
- h_k = F.relu(h_k)
-
- if self.residual:
- h_k = h_in + h_k # residual connection
-
- if self.use_bn:
- h_k = self.bn(h_k)
- return h_k
-
- def __repr__(self):
- if self.use_bn:
- return 'BN' + super(DenseGraphSage, self).__repr__()
- else:
- return super(DenseGraphSage, self).__repr__()
\ No newline at end of file
diff --git a/layers/three_wl_gnn_layers.py b/layers/three_wl_gnn_layers.py
new file mode 100644
index 000000000..b99ed4e2e
--- /dev/null
+++ b/layers/three_wl_gnn_layers.py
@@ -0,0 +1,155 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+"""
+ Layers used for
+ 3WLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+class RegularBlock(nn.Module):
+ """
+ Imputs: N x input_depth x m x m
+ Take the input through 2 parallel MLP routes, multiply the result, and add a skip-connection at the end.
+ At the skip-connection, reduce the dimension back to output_depth
+ """
+ def __init__(self, depth_of_mlp, in_features, out_features, residual=False):
+ super().__init__()
+
+ self.residual = residual
+
+ self.mlp1 = MlpBlock(in_features, out_features, depth_of_mlp)
+ self.mlp2 = MlpBlock(in_features, out_features, depth_of_mlp)
+
+ self.skip = SkipConnection(in_features+out_features, out_features)
+
+ if self.residual:
+ self.res_x = nn.Linear(in_features, out_features)
+
+ def forward(self, inputs):
+ mlp1 = self.mlp1(inputs)
+ mlp2 = self.mlp2(inputs)
+
+ mult = torch.matmul(mlp1, mlp2)
+
+ out = self.skip(in1=inputs, in2=mult)
+
+ if self.residual:
+ # Now, changing shapes from [1xdxnxn] to [nxnxd] for Linear() layer
+ inputs, out = inputs.permute(3,2,1,0).squeeze(), out.permute(3,2,1,0).squeeze()
+
+ residual_ = self.res_x(inputs)
+ out = residual_ + out # residual connection
+
+ # Returning output back to original shape
+ out = out.permute(2,1,0).unsqueeze(0)
+
+ return out
+
+
+class MlpBlock(nn.Module):
+ """
+ Block of MLP layers with activation function after each (1x1 conv layers).
+ """
+ def __init__(self, in_features, out_features, depth_of_mlp, activation_fn=nn.functional.relu):
+ super().__init__()
+ self.activation = activation_fn
+ self.convs = nn.ModuleList()
+ for i in range(depth_of_mlp):
+ self.convs.append(nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True))
+ _init_weights(self.convs[-1])
+ in_features = out_features
+
+ def forward(self, inputs):
+ out = inputs
+ for conv_layer in self.convs:
+ out = self.activation(conv_layer(out))
+
+ return out
+
+
+class SkipConnection(nn.Module):
+ """
+ Connects the two given inputs with concatenation
+ :param in1: earlier input tensor of shape N x d1 x m x m
+ :param in2: later input tensor of shape N x d2 x m x m
+ :param in_features: d1+d2
+ :param out_features: output num of features
+ :return: Tensor of shape N x output_depth x m x m
+ """
+ def __init__(self, in_features, out_features):
+ super().__init__()
+ self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True)
+ _init_weights(self.conv)
+
+ def forward(self, in1, in2):
+ # in1: N x d1 x m x m
+ # in2: N x d2 x m x m
+ out = torch.cat((in1, in2), dim=1)
+ out = self.conv(out)
+ return out
+
+
+class FullyConnected(nn.Module):
+ def __init__(self, in_features, out_features, activation_fn=nn.functional.relu):
+ super().__init__()
+
+ self.fc = nn.Linear(in_features, out_features)
+ _init_weights(self.fc)
+
+ self.activation = activation_fn
+
+ def forward(self, input):
+ out = self.fc(input)
+ if self.activation is not None:
+ out = self.activation(out)
+
+ return out
+
+
+def diag_offdiag_maxpool(input):
+ N = input.shape[-1]
+
+ max_diag = torch.max(torch.diagonal(input, dim1=-2, dim2=-1), dim=2)[0] # BxS
+
+ # with torch.no_grad():
+ max_val = torch.max(max_diag)
+ min_val = torch.max(-1 * input)
+ val = torch.abs(torch.add(max_val, min_val))
+
+ min_mat = torch.mul(val, torch.eye(N, device=input.device)).view(1, 1, N, N)
+
+ max_offdiag = torch.max(torch.max(input - min_mat, dim=3)[0], dim=2)[0] # BxS
+
+ return torch.cat((max_diag, max_offdiag), dim=1) # output Bx2S
+
+def _init_weights(layer):
+ """
+ Init weights of the layer
+ :param layer:
+ :return:
+ """
+ nn.init.xavier_uniform_(layer.weight)
+ # nn.init.xavier_normal_(layer.weight)
+ if layer.bias is not None:
+ nn.init.zeros_(layer.bias)
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, d):
+ super().__init__()
+ self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
+ self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) # shape is 1 x 1 x d
+
+ def forward(self, x):
+ # x tensor of the shape n x n x d
+ mean = x.mean(dim=(0,1), keepdim=True)
+ var = x.var(dim=(0,1), keepdim=True, unbiased=False)
+ x = self.a * (x - mean) / torch.sqrt(var + 1e-6) + self.b # shape is n x n x d
+ return x
+
+
\ No newline at end of file
diff --git a/main_COLLAB_edge_classification.ipynb b/main_COLLAB_edge_classification.ipynb
new file mode 100644
index 000000000..5c416a388
--- /dev/null
+++ b/main_COLLAB_edge_classification.ipynb
@@ -0,0 +1,863 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Main Driver Notebook for Training Graph NNs on OGBL-COLLAB for Edge Classification"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### MODELS\n",
+ "- GatedGCN \n",
+ "- GCN \n",
+ "- GAT \n",
+ "- GraphSage \n",
+ "- GIN \n",
+ "- MoNet \n",
+ "- MLP \n",
+ "- Matrix Factorization (MF)\n",
+ "\n",
+ "### DATASET\n",
+ "- OGBL-COLLAB\n",
+ "\n",
+ "### TASK\n",
+ "- Edge Classification"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " IMPORTING LIBS\n",
+ "\"\"\"\n",
+ "import dgl\n",
+ "\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import socket\n",
+ "import time\n",
+ "import random\n",
+ "import glob\n",
+ "import argparse, json\n",
+ "import pickle\n",
+ "\n",
+ "import torch\n",
+ "import torch.nn as nn\n",
+ "import torch.nn.functional as F\n",
+ "\n",
+ "import torch.optim as optim\n",
+ "from torch.utils.data import DataLoader\n",
+ "\n",
+ "from tensorboardX import SummaryWriter\n",
+ "from tqdm import tqdm\n",
+ "\n",
+ "class DotDict(dict):\n",
+ " def __init__(self, **kwds):\n",
+ " self.update(kwds)\n",
+ " self.__dict__ = self\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "True\n"
+ ]
+ }
+ ],
+ "source": [
+ "# \"\"\"\n",
+ "# AUTORELOAD IPYTHON EXTENSION FOR RELOADING IMPORTED MODULES\n",
+ "# \"\"\"\n",
+ "\n",
+ "def in_ipynb():\n",
+ " try:\n",
+ " cfg = get_ipython().config \n",
+ " return True\n",
+ " except NameError:\n",
+ " return False\n",
+ " \n",
+ "notebook_mode = in_ipynb()\n",
+ "print(notebook_mode)\n",
+ "\n",
+ "if notebook_mode == True:\n",
+ " %load_ext autoreload\n",
+ " %autoreload 2\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " IMPORTING CUSTOM MODULES/METHODS\n",
+ "\"\"\"\n",
+ "from nets.COLLAB_edge_classification.load_net import gnn_model # import all GNNS\n",
+ "from data.data import LoadData # import dataset\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " GPU Setup\n",
+ "\"\"\"\n",
+ "def gpu_setup(use_gpu, gpu_id):\n",
+ " os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
+ " os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id) \n",
+ "\n",
+ " if torch.cuda.is_available() and use_gpu:\n",
+ " print('cuda available with GPU:',torch.cuda.get_device_name(0))\n",
+ " device = torch.device(\"cuda\")\n",
+ " else:\n",
+ " print('cuda not available')\n",
+ " device = torch.device(\"cpu\")\n",
+ " return device\n",
+ "\n",
+ "\n",
+ "use_gpu = True\n",
+ "gpu_id = -1\n",
+ "device = None\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[I] Loading data (notebook) ...\n",
+ "[I] Loading dataset COLLAB...\n",
+ "[I] Finished loading.\n",
+ "[I] Data load time: 0.3213s\n",
+ "[I] Finished loading.\n"
+ ]
+ }
+ ],
+ "source": [
+ "# \"\"\"\n",
+ "# USER CONTROLS\n",
+ "# \"\"\"\n",
+ "if notebook_mode == True:\n",
+ " \n",
+ " # MODEL_NAME = '3WLGNN'\n",
+ " # MODEL_NAME = 'RingGNN'\n",
+ " # MODEL_NAME = 'MF'\n",
+ " # MODEL_NAME = 'MLP'\n",
+ " # MODEL_NAME = 'MLP'\n",
+ " # MODEL_NAME = 'GAT'\n",
+ " MODEL_NAME = 'GatedGCN'\n",
+ " # MODEL_NAME = 'GAT'\n",
+ " # MODEL_NAME = 'GraphSage'\n",
+ " # MODEL_NAME = 'DiffPool'\n",
+ " # MODEL_NAME = 'GIN'\n",
+ "\n",
+ " DATASET_NAME = 'OGBL-COLLAB'\n",
+ "\n",
+ " out_dir = 'out/COLLAB_edge_classification/debug/'\n",
+ " root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ "\n",
+ " print(\"[I] Loading data (notebook) ...\")\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " print(\"[I] Finished loading.\")\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# \"\"\"\n",
+ "# PARAMETERS\n",
+ "# \"\"\"\n",
+ "if notebook_mode == True:\n",
+ " \n",
+ "# MODEL_NAME = 'MF'\n",
+ " MODEL_NAME = 'GatedGCN'\n",
+ " \n",
+ " n_heads = -1\n",
+ " edge_feat = False\n",
+ " pseudo_dim_MoNet = -1\n",
+ " kernel = -1\n",
+ " gnn_per_block = -1\n",
+ " embedding_dim = -1\n",
+ " pool_ratio = -1\n",
+ " n_mlp_GIN = -1\n",
+ " gated = False\n",
+ " self_loop = False\n",
+ " max_time = 12\n",
+ " layer_type = 'dgl'\n",
+ " num_embs = -1\n",
+ " pos_enc = True\n",
+ " #pos_enc = False\n",
+ " pos_enc_dim = 10\n",
+ "\n",
+ " \n",
+ " if MODEL_NAME == 'MF':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.01; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=0; hidden_dim=256; out_dim=hidden_dim; num_embs=235868;\n",
+ " \n",
+ " if MODEL_NAME == 'MLP':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=80; out_dim=hidden_dim; dropout=0.0; readout='mean'; gated = False # Change gated = True for Gated MLP model\n",
+ " \n",
+ " if MODEL_NAME == 'GCN':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=74; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " \n",
+ " if MODEL_NAME == 'GraphSage':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=38; out_dim=hidden_dim; dropout=0.0; readout='mean'; layer_type='edgefeat'\n",
+ "\n",
+ " if MODEL_NAME == 'GAT':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; n_heads=3; hidden_dim=19; out_dim=n_heads*hidden_dim; dropout=0.0; readout='mean'; layer_type='dgl'\n",
+ " \n",
+ " if MODEL_NAME == 'GIN':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=60; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " \n",
+ " if MODEL_NAME == 'MoNet':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=53; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " \n",
+ " if MODEL_NAME == 'GatedGCN':\n",
+ " seed=41; epochs=500; batch_size=32*1024; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
+ " L=3; hidden_dim=35; out_dim=hidden_dim; dropout=0.0; readout='mean'; edge_feat = False; layer_type='edgereprfeat'\n",
+ " \n",
+ " # generic new_params\n",
+ " net_params = {}\n",
+ " net_params['device'] = device\n",
+ " net_params['in_dim'] = dataset.graph.ndata['feat'].shape[-1]\n",
+ " net_params['in_dim_edge'] = dataset.graph.edata['feat'].shape[-1]\n",
+ " net_params['residual'] = True\n",
+ " net_params['hidden_dim'] = hidden_dim\n",
+ " net_params['out_dim'] = out_dim\n",
+ " num_classes = 1\n",
+ " net_params['n_classes'] = num_classes\n",
+ " net_params['n_heads'] = n_heads\n",
+ " net_params['L'] = L # min L should be 2\n",
+ " net_params['readout'] = \"mean\"\n",
+ " net_params['layer_norm'] = True\n",
+ " net_params['batch_norm'] = True\n",
+ " net_params['in_feat_dropout'] = 0.0\n",
+ " net_params['dropout'] = 0.0\n",
+ " net_params['edge_feat'] = edge_feat\n",
+ " net_params['self_loop'] = self_loop\n",
+ " net_params['layer_type'] = layer_type\n",
+ " \n",
+ " # for MF\n",
+ " net_params['num_embs'] = num_embs\n",
+ " \n",
+ " # for MLPNet \n",
+ " net_params['gated'] = gated\n",
+ " \n",
+ " # specific for MoNet\n",
+ " net_params['pseudo_dim_MoNet'] = 2\n",
+ " net_params['kernel'] = 3\n",
+ " \n",
+ " # specific for GIN\n",
+ " net_params['n_mlp_GIN'] = 2\n",
+ " net_params['learn_eps_GIN'] = True\n",
+ " net_params['neighbor_aggr_GIN'] = 'sum'\n",
+ " \n",
+ " # specific for graphsage\n",
+ " net_params['sage_aggregator'] = 'maxpool' \n",
+ " \n",
+ " # specific for pos_enc_dim\n",
+ " net_params['pos_enc'] = pos_enc\n",
+ " net_params['pos_enc_dim'] = pos_enc_dim\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "MODEL DETAILS:\n",
+ "\n",
+ "GatedGCNNet(\n",
+ " (embedding_pos_enc): Linear(in_features=10, out_features=35, bias=True)\n",
+ " (embedding_h): Linear(in_features=128, out_features=35, bias=True)\n",
+ " (embedding_e): Linear(in_features=2, out_features=35, bias=True)\n",
+ " (layers): ModuleList(\n",
+ " (0): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " (1): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " (2): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " )\n",
+ " (MLP_layer): MLPReadout(\n",
+ " (FC_layers): ModuleList(\n",
+ " (0): Linear(in_features=70, out_features=35, bias=True)\n",
+ " (1): Linear(in_features=35, out_features=17, bias=True)\n",
+ " (2): Linear(in_features=17, out_features=1, bias=True)\n",
+ " )\n",
+ " )\n",
+ ")\n",
+ "MODEL/Total parameters: GatedGCN 27440\n"
+ ]
+ }
+ ],
+ "source": [
+ "\"\"\"\n",
+ " VIEWING MODEL CONFIG AND PARAMS\n",
+ "\"\"\"\n",
+ "def view_model_param(MODEL_NAME, net_params):\n",
+ " model = gnn_model(MODEL_NAME, net_params)\n",
+ " total_param = 0\n",
+ " print(\"MODEL DETAILS:\\n\")\n",
+ " print(model)\n",
+ " for param in model.parameters():\n",
+ " # print(param.data.size())\n",
+ " total_param += np.prod(list(param.data.size()))\n",
+ " print('MODEL/Total parameters:', MODEL_NAME, total_param)\n",
+ " return total_param\n",
+ "\n",
+ "\n",
+ "if notebook_mode == True:\n",
+ " view_model_param(MODEL_NAME, net_params)\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " TRAINING CODE\n",
+ "\"\"\"\n",
+ "\n",
+ "def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):\n",
+ " t0 = time.time()\n",
+ " per_epoch_time = []\n",
+ " \n",
+ " DATASET_NAME = dataset.name\n",
+ " \n",
+ " #assert net_params['self_loop'] == False, \"No self-loop support for %s dataset\" % DATASET_NAME\n",
+ " \n",
+ " if MODEL_NAME in ['GatedGCN']:\n",
+ " if net_params['pos_enc']:\n",
+ " print(\"[!] Adding graph positional encoding\",net_params['pos_enc_dim'])\n",
+ " dataset._add_positional_encodings(net_params['pos_enc_dim'])\n",
+ " print('Time PE:',time.time()-t0)\n",
+ " \n",
+ " graph = dataset.graph\n",
+ " \n",
+ " evaluator = dataset.evaluator\n",
+ " \n",
+ " train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg = dataset.train_edges, dataset.val_edges, dataset.val_edges_neg, dataset.test_edges, dataset.test_edges_neg\n",
+ " \n",
+ " root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs\n",
+ " device = net_params['device']\n",
+ " \n",
+ " # Write the network and optimization hyper-parameters in folder config/\n",
+ " with open(write_config_file + '.txt', 'w') as f:\n",
+ " f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n\\nTotal Parameters: {}\\n\\n\"\"\"\\\n",
+ " .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))\n",
+ " \n",
+ " log_dir = os.path.join(root_log_dir, \"RUN_\" + str(0))\n",
+ " writer = SummaryWriter(log_dir=log_dir)\n",
+ "\n",
+ " # setting seeds\n",
+ " random.seed(params['seed'])\n",
+ " np.random.seed(params['seed'])\n",
+ " torch.manual_seed(params['seed'])\n",
+ " if device.type == 'cuda':\n",
+ " torch.cuda.manual_seed(params['seed'])\n",
+ " \n",
+ " print(\"Graph: \", graph)\n",
+ " print(\"Training Edges: \", len(train_edges))\n",
+ " print(\"Validation Edges: \", len(val_edges) + len(val_edges_neg))\n",
+ " print(\"Test Edges: \", len(test_edges) + len(test_edges_neg))\n",
+ "\n",
+ " model = gnn_model(MODEL_NAME, net_params)\n",
+ " model = model.to(device)\n",
+ "\n",
+ " optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])\n",
+ " scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max',\n",
+ " factor=params['lr_reduce_factor'],\n",
+ " patience=params['lr_schedule_patience'],\n",
+ " verbose=True)\n",
+ " \n",
+ " epoch_train_losses = []\n",
+ " epoch_train_hits, epoch_val_hits = [], [] \n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " raise NotImplementedError # gave OOM while preparing dense tensor\n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_COLLAB_edge_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
+ " \n",
+ " # At any point you can hit Ctrl + C to break out of training early.\n",
+ " try:\n",
+ " monet_pseudo = None\n",
+ " if MODEL_NAME == \"MoNet\":\n",
+ " print(\"\\nPre-computing MoNet pseudo-edges\")\n",
+ " # for MoNet: computing the 'pseudo' named tensor which depends on node degrees\n",
+ " us, vs = graph.edges()\n",
+ " # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop\n",
+ " monet_pseudo = [ \n",
+ " [1/np.sqrt(graph.in_degree(us[i])+1), 1/np.sqrt(graph.in_degree(vs[i])+1)] \n",
+ " for i in range(graph.number_of_edges())\n",
+ " ]\n",
+ " monet_pseudo = torch.Tensor(monet_pseudo)\n",
+ " \n",
+ " with tqdm(range(params['epochs'])) as t:\n",
+ " for epoch in t:\n",
+ "\n",
+ " t.set_description('Epoch %d' % epoch) \n",
+ "\n",
+ " start = time.time()\n",
+ " \n",
+ " epoch_train_loss, optimizer = train_epoch(model, optimizer, device, graph, train_edges, params['batch_size'], epoch, monet_pseudo)\n",
+ " \n",
+ " epoch_train_hits, epoch_val_hits, epoch_test_hits = evaluate_network(\n",
+ " model, device, graph, train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg, evaluator, params['batch_size'], epoch, monet_pseudo)\n",
+ " \n",
+ " epoch_train_losses.append(epoch_train_loss)\n",
+ " epoch_train_hits.append(epoch_train_hits)\n",
+ " epoch_val_hits.append(epoch_val_hits)\n",
+ "\n",
+ " writer.add_scalar('train/_loss', epoch_train_loss, epoch)\n",
+ " \n",
+ " writer.add_scalar('train/_hits@10', epoch_train_hits[0]*100, epoch)\n",
+ " writer.add_scalar('train/_hits@50', epoch_train_hits[1]*100, epoch)\n",
+ " writer.add_scalar('train/_hits@100', epoch_train_hits[2]*100, epoch)\n",
+ " \n",
+ " writer.add_scalar('val/_hits@10', epoch_val_hits[0]*100, epoch)\n",
+ " writer.add_scalar('val/_hits@50', epoch_val_hits[1]*100, epoch)\n",
+ " writer.add_scalar('val/_hits@100', epoch_val_hits[2]*100, epoch)\n",
+ " \n",
+ " writer.add_scalar('test/_hits@10', epoch_test_hits[0]*100, epoch)\n",
+ " writer.add_scalar('test/_hits@50', epoch_test_hits[1]*100, epoch)\n",
+ " writer.add_scalar('test/_hits@100', epoch_test_hits[2]*100, epoch)\n",
+ " \n",
+ " writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) \n",
+ "\n",
+ " t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
+ " train_loss=epoch_train_loss, train_hits=epoch_train_hits[1], \n",
+ " val_hits=epoch_val_hits[1], test_hits=epoch_test_hits[1]) \n",
+ "\n",
+ " per_epoch_time.append(time.time()-start)\n",
+ "\n",
+ " # Saving checkpoint\n",
+ " ckpt_dir = os.path.join(root_ckpt_dir, \"RUN_\")\n",
+ " if not os.path.exists(ckpt_dir):\n",
+ " os.makedirs(ckpt_dir)\n",
+ " torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + \"/epoch_\" + str(epoch)))\n",
+ "\n",
+ " files = glob.glob(ckpt_dir + '/*.pkl')\n",
+ " for file in files:\n",
+ " epoch_nb = file.split('_')[-1]\n",
+ " epoch_nb = int(epoch_nb.split('.')[0])\n",
+ " if epoch_nb < epoch-1:\n",
+ " os.remove(file)\n",
+ "\n",
+ " scheduler.step(epoch_val_hits[1])\n",
+ "\n",
+ " if optimizer.param_groups[0]['lr'] < params['min_lr']:\n",
+ " print(\"\\n!! LR EQUAL TO MIN LR SET.\")\n",
+ " break\n",
+ " \n",
+ " # Stop training after params['max_time'] hours\n",
+ " if time.time()-t0 > params['max_time']*3600:\n",
+ " print('-' * 89)\n",
+ " print(\"Max_time for training elapsed {:.2f} hours, so stopping\".format(params['max_time']))\n",
+ " break\n",
+ " \n",
+ " except KeyboardInterrupt:\n",
+ " print('-' * 89)\n",
+ " print('Exiting from training early because of KeyboardInterrupt')\n",
+ " \n",
+ " \n",
+ " train_hits, val_hits, test_hits = evaluate_network(\n",
+ " model, device, graph, train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg, evaluator, params['batch_size'], epoch, monet_pseudo)\n",
+ "\n",
+ " print(f\"Test:\\nHits@10: {test_hits[0]*100:.4f}% \\nHits@50: {test_hits[1]*100:.4f}% \\nHits@100: {test_hits[2]*100:.4f}% \\n\")\n",
+ " print(f\"Train:\\nHits@10: {train_hits[0]*100:.4f}% \\nHits@50: {train_hits[1]*100:.4f}% \\nHits@100: {train_hits[2]*100:.4f}% \\n\")\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
+ " print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-t0))\n",
+ " print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
+ "\n",
+ " writer.close()\n",
+ "\n",
+ " \"\"\"\n",
+ " Write the results in out_dir/results folder\n",
+ " \"\"\"\n",
+ " with open(write_file_name + '.txt', 'w') as f:\n",
+ " f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
+ " FINAL RESULTS\\nTEST HITS@10: {:.4f}\\nTEST HITS@50: {:.4f}\\nTEST HITS@100: {:.4f}\\nTRAIN HITS@10: {:.4f}\\nTRAIN HITS@50: {:.4f}\\nTRAIN HITS@100: {:.4f}\\n\\n\n",
+ " Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f}hrs\\nAverage Time Per Epoch: {:.4f}s\\n\\n\\n\"\"\"\\\n",
+ " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
+ " test_hits[0]*100, test_hits[1]*100, test_hits[2]*100, train_hits[0]*100, train_hits[1]*100, train_hits[2]*100,\n",
+ " epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Convert main_COLLAB_edge_classification.ipynb to main_COLLAB_edge_classification.py\n",
+ "Clean main_COLLAB_edge_classification.py\n",
+ "Done. \n",
+ "[I] Loading dataset COLLAB...\n",
+ "[I] Finished loading.\n",
+ "[I] Data load time: 0.4527s\n",
+ "cuda not available\n",
+ "MODEL DETAILS:\n",
+ "\n",
+ "GatedGCNNet(\n",
+ " (embedding_pos_enc): Linear(in_features=10, out_features=35, bias=True)\n",
+ " (embedding_h): Linear(in_features=128, out_features=35, bias=True)\n",
+ " (embedding_e): Linear(in_features=2, out_features=35, bias=True)\n",
+ " (layers): ModuleList(\n",
+ " (0): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " (1): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " (2): GatedGCNLayer(in_channels=35, out_channels=35)\n",
+ " )\n",
+ " (MLP_layer): MLPReadout(\n",
+ " (FC_layers): ModuleList(\n",
+ " (0): Linear(in_features=70, out_features=35, bias=True)\n",
+ " (1): Linear(in_features=35, out_features=17, bias=True)\n",
+ " (2): Linear(in_features=17, out_features=1, bias=True)\n",
+ " )\n",
+ " )\n",
+ ")\n",
+ "MODEL/Total parameters: GatedGCN 27440\n",
+ "[!] Adding graph positional encoding 10\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/Users/xbresson/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/base.py:25: UserWarning: Currently adjacency_matrix() returns a matrix with destination as rows by default. In 0.5 the result will have source as rows (i.e. transpose=True)\n",
+ " warnings.warn(msg, warn_type)\n",
+ "Epoch 0: 0%| | 0/500 [00:00, ?it/s]\n",
+ " 0%| | 0/36 [00:00, ?it/s]\u001b[A"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Time PE: 60.033313035964966\n",
+ "Graph: DGLGraph(num_nodes=235868, num_edges=2358104,\n",
+ " ndata_schemes={'feat': Scheme(shape=(128,), dtype=torch.float32), 'pos_enc': Scheme(shape=(10,), dtype=torch.float32)}\n",
+ " edata_schemes={'edge_weight': Scheme(shape=(1,), dtype=torch.int64), 'edge_year': Scheme(shape=(1,), dtype=torch.int64), 'feat': Scheme(shape=(2,), dtype=torch.int64)})\n",
+ "Training Edges: 1179052\n",
+ "Validation Edges: 160084\n",
+ "Test Edges: 146329\n"
+ ]
+ }
+ ],
+ "source": [
+ "def main(notebook_mode=False,config=None):\n",
+ " \n",
+ " \"\"\"\n",
+ " USER CONTROLS\n",
+ " \"\"\"\n",
+ " \n",
+ " # terminal mode\n",
+ " if notebook_mode==False:\n",
+ " \n",
+ " parser = argparse.ArgumentParser()\n",
+ " parser.add_argument('--config', help=\"Please give a config.json file with training/model/data/param details\")\n",
+ " parser.add_argument('--gpu_id', help=\"Please give a value for gpu id\")\n",
+ " parser.add_argument('--model', help=\"Please give a value for model name\")\n",
+ " parser.add_argument('--dataset', help=\"Please give a value for dataset name\")\n",
+ " parser.add_argument('--out_dir', help=\"Please give a value for out_dir\")\n",
+ " parser.add_argument('--seed', help=\"Please give a value for seed\")\n",
+ " parser.add_argument('--epochs', help=\"Please give a value for epochs\")\n",
+ " parser.add_argument('--batch_size', help=\"Please give a value for batch_size\")\n",
+ " parser.add_argument('--init_lr', help=\"Please give a value for init_lr\")\n",
+ " parser.add_argument('--lr_reduce_factor', help=\"Please give a value for lr_reduce_factor\")\n",
+ " parser.add_argument('--lr_schedule_patience', help=\"Please give a value for lr_schedule_patience\")\n",
+ " parser.add_argument('--min_lr', help=\"Please give a value for min_lr\")\n",
+ " parser.add_argument('--weight_decay', help=\"Please give a value for weight_decay\")\n",
+ " parser.add_argument('--print_epoch_interval', help=\"Please give a value for print_epoch_interval\") \n",
+ " parser.add_argument('--L', help=\"Please give a value for L\")\n",
+ " parser.add_argument('--hidden_dim', help=\"Please give a value for hidden_dim\")\n",
+ " parser.add_argument('--out_dim', help=\"Please give a value for out_dim\")\n",
+ " parser.add_argument('--residual', help=\"Please give a value for residual\")\n",
+ " parser.add_argument('--edge_feat', help=\"Please give a value for edge_feat\")\n",
+ " parser.add_argument('--readout', help=\"Please give a value for readout\")\n",
+ " parser.add_argument('--kernel', help=\"Please give a value for kernel\")\n",
+ " parser.add_argument('--n_heads', help=\"Please give a value for n_heads\")\n",
+ " parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
+ " parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
+ " parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
+ " parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
+ " parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
+ " parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
+ " parser.add_argument('--num_pool', help=\"Please give a value for num_pool\")\n",
+ " parser.add_argument('--gnn_per_block', help=\"Please give a value for gnn_per_block\")\n",
+ " parser.add_argument('--embedding_dim', help=\"Please give a value for embedding_dim\")\n",
+ " parser.add_argument('--pool_ratio', help=\"Please give a value for pool_ratio\")\n",
+ " parser.add_argument('--linkpred', help=\"Please give a value for linkpred\")\n",
+ " parser.add_argument('--cat', help=\"Please give a value for cat\")\n",
+ " parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n",
+ " parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n",
+ " parser.add_argument('--layer_type', help=\"Please give a value for layer_type (for GAT and GatedGCN only)\")\n",
+ " parser.add_argument('--pos_enc_dim', help=\"Please give a value for pos_enc_dim\")\n",
+ " parser.add_argument('--pos_enc', help=\"Please give a value for pos_enc\")\n",
+ " args = parser.parse_args()\n",
+ " with open(args.config) as f:\n",
+ " config = json.load(f)\n",
+ " \n",
+ " \n",
+ " # device\n",
+ " if args.gpu_id is not None:\n",
+ " config['gpu']['id'] = int(args.gpu_id)\n",
+ " config['gpu']['use'] = True\n",
+ " device = gpu_setup(config['gpu']['use'], config['gpu']['id'])\n",
+ "\n",
+ " # model, dataset, out_dir\n",
+ " if args.model is not None:\n",
+ " MODEL_NAME = args.model\n",
+ " else:\n",
+ " MODEL_NAME = config['model']\n",
+ " if args.dataset is not None:\n",
+ " DATASET_NAME = args.dataset\n",
+ " else:\n",
+ " DATASET_NAME = config['dataset']\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " if args.out_dir is not None:\n",
+ " out_dir = args.out_dir\n",
+ " else:\n",
+ " out_dir = config['out_dir']\n",
+ "\n",
+ " # parameters\n",
+ " params = config['params']\n",
+ " if args.seed is not None:\n",
+ " params['seed'] = int(args.seed)\n",
+ " if args.epochs is not None:\n",
+ " params['epochs'] = int(args.epochs)\n",
+ " if args.batch_size is not None:\n",
+ " params['batch_size'] = int(args.batch_size)\n",
+ " if args.init_lr is not None:\n",
+ " params['init_lr'] = float(args.init_lr)\n",
+ " if args.lr_reduce_factor is not None:\n",
+ " params['lr_reduce_factor'] = float(args.lr_reduce_factor)\n",
+ " if args.lr_schedule_patience is not None:\n",
+ " params['lr_schedule_patience'] = int(args.lr_schedule_patience)\n",
+ " if args.min_lr is not None:\n",
+ " params['min_lr'] = float(args.min_lr)\n",
+ " if args.weight_decay is not None:\n",
+ " params['weight_decay'] = float(args.weight_decay)\n",
+ " if args.print_epoch_interval is not None:\n",
+ " params['print_epoch_interval'] = int(args.print_epoch_interval)\n",
+ " if args.max_time is not None:\n",
+ " params['max_time'] = float(args.max_time)\n",
+ "\n",
+ " # network parameters\n",
+ " net_params = config['net_params']\n",
+ " net_params['device'] = device\n",
+ " net_params['gpu_id'] = config['gpu']['id']\n",
+ " net_params['batch_size'] = params['batch_size']\n",
+ " if args.L is not None:\n",
+ " net_params['L'] = int(args.L)\n",
+ " if args.hidden_dim is not None:\n",
+ " net_params['hidden_dim'] = int(args.hidden_dim)\n",
+ " if args.out_dim is not None:\n",
+ " net_params['out_dim'] = int(args.out_dim) \n",
+ " if args.residual is not None:\n",
+ " net_params['residual'] = True if args.residual=='True' else False\n",
+ " if args.edge_feat is not None:\n",
+ " net_params['edge_feat'] = True if args.edge_feat=='True' else False\n",
+ " if args.readout is not None:\n",
+ " net_params['readout'] = args.readout\n",
+ " if args.kernel is not None:\n",
+ " net_params['kernel'] = int(args.kernel)\n",
+ " if args.n_heads is not None:\n",
+ " net_params['n_heads'] = int(args.n_heads)\n",
+ " if args.gated is not None:\n",
+ " net_params['gated'] = True if args.gated=='True' else False\n",
+ " if args.in_feat_dropout is not None:\n",
+ " net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
+ " if args.dropout is not None:\n",
+ " net_params['dropout'] = float(args.dropout)\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
+ " if args.batch_norm is not None:\n",
+ " net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
+ " if args.sage_aggregator is not None:\n",
+ " net_params['sage_aggregator'] = args.sage_aggregator\n",
+ " if args.data_mode is not None:\n",
+ " net_params['data_mode'] = args.data_mode\n",
+ " if args.num_pool is not None:\n",
+ " net_params['num_pool'] = int(args.num_pool)\n",
+ " if args.gnn_per_block is not None:\n",
+ " net_params['gnn_per_block'] = int(args.gnn_per_block)\n",
+ " if args.embedding_dim is not None:\n",
+ " net_params['embedding_dim'] = int(args.embedding_dim)\n",
+ " if args.pool_ratio is not None:\n",
+ " net_params['pool_ratio'] = float(args.pool_ratio)\n",
+ " if args.linkpred is not None:\n",
+ " net_params['linkpred'] = True if args.linkpred=='True' else False\n",
+ " if args.cat is not None:\n",
+ " net_params['cat'] = True if args.cat=='True' else False\n",
+ " if args.self_loop is not None:\n",
+ " net_params['self_loop'] = True if args.self_loop=='True' else False\n",
+ " if args.layer_type is not None:\n",
+ " net_params['layer_type'] = layer_type\n",
+ " if args.pos_enc is not None:\n",
+ " net_params['pos_enc'] = True if args.pos_enc=='True' else False\n",
+ " if args.pos_enc_dim is not None:\n",
+ " net_params['pos_enc_dim'] = int(args.pos_enc_dim)\n",
+ " \n",
+ "\n",
+ " # notebook mode\n",
+ " if notebook_mode:\n",
+ " \n",
+ " # parameters\n",
+ " params = config['params']\n",
+ " \n",
+ " # dataset\n",
+ " DATASET_NAME = config['dataset']\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " \n",
+ " # device\n",
+ " device = gpu_setup(config['gpu']['use'], config['gpu']['id'])\n",
+ " out_dir = config['out_dir']\n",
+ " \n",
+ " # GNN model\n",
+ " MODEL_NAME = config['model']\n",
+ " \n",
+ " # network parameters\n",
+ " net_params = config['net_params']\n",
+ " net_params['device'] = device\n",
+ " net_params['gpu_id'] = config['gpu']['id']\n",
+ " net_params['batch_size'] = params['batch_size']\n",
+ " \n",
+ " \n",
+ " # COLLAB\n",
+ " net_params['in_dim'] = dataset.graph.ndata['feat'].shape[-1]\n",
+ " net_params['in_dim_edge'] = dataset.graph.edata['feat'].shape[-1]\n",
+ " net_params['n_classes'] = 1 # binary prediction\n",
+ " \n",
+ " root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " write_file_name = out_dir + 'results/result_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " write_config_file = out_dir + 'configs/config_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file\n",
+ "\n",
+ " if not os.path.exists(out_dir + 'results'):\n",
+ " os.makedirs(out_dir + 'results')\n",
+ " \n",
+ " if not os.path.exists(out_dir + 'configs'):\n",
+ " os.makedirs(out_dir + 'configs')\n",
+ "\n",
+ " net_params['total_param'] = view_model_param(MODEL_NAME, net_params)\n",
+ " train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)\n",
+ "\n",
+ " \n",
+ " \n",
+ "if notebook_mode==True:\n",
+ " \n",
+ " config = {}\n",
+ " # gpu config\n",
+ " gpu = {}\n",
+ " gpu['use'] = use_gpu\n",
+ " gpu['id'] = gpu_id\n",
+ " config['gpu'] = gpu\n",
+ " # GNN model, dataset, out_dir\n",
+ " config['model'] = MODEL_NAME\n",
+ " config['dataset'] = DATASET_NAME\n",
+ " config['out_dir'] = out_dir\n",
+ " # parameters\n",
+ " params = {}\n",
+ " params['seed'] = seed\n",
+ " params['epochs'] = epochs\n",
+ " params['batch_size'] = batch_size\n",
+ " params['init_lr'] = init_lr\n",
+ " params['lr_reduce_factor'] = lr_reduce_factor \n",
+ " params['lr_schedule_patience'] = lr_schedule_patience\n",
+ " params['min_lr'] = min_lr\n",
+ " params['weight_decay'] = weight_decay\n",
+ " params['print_epoch_interval'] = 5\n",
+ " params['max_time'] = max_time\n",
+ " config['params'] = params\n",
+ " # network parameters\n",
+ " config['net_params'] = net_params\n",
+ " \n",
+ " # convert to .py format\n",
+ " from utils.cleaner_main import *\n",
+ " cleaner_main('main_COLLAB_edge_classification')\n",
+ " \n",
+ " main(True,config)\n",
+ " \n",
+ "else:\n",
+ " \n",
+ " main()\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/main_CitationGraphs_node_classification.py b/main_COLLAB_edge_classification.py
similarity index 66%
rename from main_CitationGraphs_node_classification.py
rename to main_COLLAB_edge_classification.py
index 826130611..dd0b6c075 100644
--- a/main_CitationGraphs_node_classification.py
+++ b/main_COLLAB_edge_classification.py
@@ -31,6 +31,7 @@ class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
+
@@ -40,10 +41,8 @@ def __init__(self, **kwds):
"""
IMPORTING CUSTOM MODULES/METHODS
"""
-
-from nets.CitationGraphs_node_classification.load_net import gnn_model # import GNNs
+from nets.COLLAB_edge_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
-from train.train_CitationGraphs_node_classification import train_epoch, evaluate_network # import train functions
@@ -68,10 +67,6 @@ def gpu_setup(use_gpu, gpu_id):
-
-
-
-
"""
VIEWING MODEL CONFIG AND PARAMS
"""
@@ -79,9 +74,9 @@ def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
- #print(model)
+ print(model)
for param in model.parameters():
- #print(param.data.size())
+ # print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
@@ -92,27 +87,29 @@ def view_model_param(MODEL_NAME, net_params):
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
-
- start0 = time.time()
+ t0 = time.time()
per_epoch_time = []
-
+
DATASET_NAME = dataset.name
- if MODEL_NAME in ['GCN', 'GAT']:
- if net_params['self_loop']:
- print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
- dataset._add_self_loops()
+ #assert net_params['self_loop'] == False, "No self-loop support for %s dataset" % DATASET_NAME
-
+ if MODEL_NAME in ['GatedGCN']:
+ if net_params['pos_enc']:
+ print("[!] Adding graph positional encoding",net_params['pos_enc_dim'])
+ dataset._add_positional_encodings(net_params['pos_enc_dim'])
+ print('Time PE:',time.time()-t0)
+
+ graph = dataset.graph
+
+ evaluator = dataset.evaluator
+
+ train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg = dataset.train_edges, dataset.val_edges, dataset.val_edges_neg, dataset.test_edges, dataset.test_edges_neg
+
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
-
- train_mask = dataset.train_mask.to(device)
- val_mask = dataset.val_mask.to(device)
- test_mask = dataset.test_mask.to(device)
- labels = dataset.labels.to(device)
- # Write network and optimization hyper-parameters in folder config/
+ # Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
@@ -123,62 +120,81 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
- print("Training Nodes: ", train_mask.int().sum().item())
- print("Validation Nodes: ", val_mask.int().sum().item())
- print("Test Nodes: ", test_mask.int().sum().item())
- print("Number of Classes: ", net_params['n_classes'])
+ print("Graph: ", graph)
+ print("Training Edges: ", len(train_edges))
+ print("Validation Edges: ", len(val_edges) + len(val_edges_neg))
+ print("Test Edges: ", len(test_edges) + len(test_edges_neg))
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
- #scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
- # factor=params['lr_reduce_factor'],
- # patience=params['lr_schedule_patience'],
- # verbose=True)
+ scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max',
+ factor=params['lr_reduce_factor'],
+ patience=params['lr_schedule_patience'],
+ verbose=True)
- epoch_train_losses, epoch_val_losses = [], []
- epoch_train_accs, epoch_val_accs = [], []
-
- graph = dataset.graph
- nfeat = graph.ndata['feat'].to(device)
- efeat = graph.edata['feat'].to(device)
- norm_n = dataset.norm_n.to(device)
- norm_e = dataset.norm_e.to(device)
+ epoch_train_losses = []
+ epoch_train_hits, epoch_val_hits = [], []
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ raise NotImplementedError # gave OOM while preparing dense tensor
+ else:
+ # import train functions for all other GCNs
+ from train.train_COLLAB_edge_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
# At any point you can hit Ctrl + C to break out of training early.
try:
+ monet_pseudo = None
+ if MODEL_NAME == "MoNet":
+ print("\nPre-computing MoNet pseudo-edges")
+ # for MoNet: computing the 'pseudo' named tensor which depends on node degrees
+ us, vs = graph.edges()
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ monet_pseudo = [
+ [1/np.sqrt(graph.in_degree(us[i])+1), 1/np.sqrt(graph.in_degree(vs[i])+1)]
+ for i in range(graph.number_of_edges())
+ ]
+ monet_pseudo = torch.Tensor(monet_pseudo)
+
with tqdm(range(params['epochs'])) as t:
for epoch in t:
- t.set_description('Epoch %d' % epoch)
+ t.set_description('Epoch %d' % epoch)
start = time.time()
-
- epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, train_mask, labels, epoch)
-
- epoch_val_loss, epoch_val_acc = evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, val_mask, labels, epoch)
- epoch_test_loss, epoch_test_acc = evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, test_mask, labels, epoch)
-
+
+ epoch_train_loss, optimizer = train_epoch(model, optimizer, device, graph, train_edges, params['batch_size'], epoch, monet_pseudo)
+
+ epoch_train_hits, epoch_val_hits, epoch_test_hits = evaluate_network(
+ model, device, graph, train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg, evaluator, params['batch_size'], epoch, monet_pseudo)
+
epoch_train_losses.append(epoch_train_loss)
- epoch_val_losses.append(epoch_val_loss)
- epoch_train_accs.append(epoch_train_acc)
- epoch_val_accs.append(epoch_val_acc)
+ epoch_train_hits.append(epoch_train_hits)
+ epoch_val_hits.append(epoch_val_hits)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
- writer.add_scalar('val/_loss', epoch_val_loss, epoch)
- writer.add_scalar('train/_acc', epoch_train_acc, epoch)
- writer.add_scalar('val/_acc', epoch_val_acc, epoch)
- writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
+
+ writer.add_scalar('train/_hits@10', epoch_train_hits[0]*100, epoch)
+ writer.add_scalar('train/_hits@50', epoch_train_hits[1]*100, epoch)
+ writer.add_scalar('train/_hits@100', epoch_train_hits[2]*100, epoch)
+
+ writer.add_scalar('val/_hits@10', epoch_val_hits[0]*100, epoch)
+ writer.add_scalar('val/_hits@50', epoch_val_hits[1]*100, epoch)
+ writer.add_scalar('val/_hits@100', epoch_val_hits[2]*100, epoch)
+
+ writer.add_scalar('test/_hits@10', epoch_test_hits[0]*100, epoch)
+ writer.add_scalar('test/_hits@50', epoch_test_hits[1]*100, epoch)
+ writer.add_scalar('test/_hits@100', epoch_test_hits[2]*100, epoch)
+
+ writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
- _, epoch_test_acc = evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, test_mask, labels, epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
- train_loss=epoch_train_loss, val_loss=epoch_val_loss,
- train_acc=epoch_train_acc, val_acc=epoch_val_acc,
- test_acc=epoch_test_acc)
+ train_loss=epoch_train_loss, train_hits=epoch_train_hits[1],
+ val_hits=epoch_val_hits[1], test_hits=epoch_test_hits[1])
per_epoch_time.append(time.time()-start)
@@ -195,15 +211,14 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
if epoch_nb < epoch-1:
os.remove(file)
- #scheduler.step(epoch_val_loss)
+ scheduler.step(epoch_val_hits[1])
if optimizer.param_groups[0]['lr'] < params['min_lr']:
- optimizer.param_groups[0]['lr'] = params['min_lr']
- #print("\n!! LR SMALLER OR EQUAL TO MIN LR THRESHOLD.")
- #break
+ print("\n!! LR EQUAL TO MIN LR SET.")
+ break
# Stop training after params['max_time'] hours
- if time.time()-start0 > params['max_time']*3600:
+ if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
@@ -213,11 +228,13 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
print('Exiting from training early because of KeyboardInterrupt')
- _, test_acc = evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, test_mask, labels, epoch)
- _, train_acc = evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, train_mask, labels, epoch)
- print("Test Accuracy: {:.4f}".format(test_acc))
- print("Train Accuracy: {:.4f}".format(train_acc))
- print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
+ train_hits, val_hits, test_hits = evaluate_network(
+ model, device, graph, train_edges, val_edges, val_edges_neg, test_edges, test_edges_neg, evaluator, params['batch_size'], epoch, monet_pseudo)
+
+ print(f"Test:\nHits@10: {test_hits[0]*100:.4f}% \nHits@50: {test_hits[1]*100:.4f}% \nHits@100: {test_hits[2]*100:.4f}% \n")
+ print(f"Train:\nHits@10: {train_hits[0]*100:.4f}% \nHits@50: {train_hits[1]*100:.4f}% \nHits@100: {train_hits[2]*100:.4f}% \n")
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
+ print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
@@ -227,25 +244,12 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
+ FINAL RESULTS\nTEST HITS@10: {:.4f}\nTEST HITS@50: {:.4f}\nTEST HITS@100: {:.4f}\nTRAIN HITS@10: {:.4f}\nTRAIN HITS@50: {:.4f}\nTRAIN HITS@100: {:.4f}\n\n
+ Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time)))
-
-
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time))
- send(subject, body)
- except:
- pass
-
+ test_hits[0]*100, test_hits[1]*100, test_hits[2]*100, train_hits[0]*100, train_hits[1]*100, train_hits[2]*100,
+ epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
+
@@ -261,7 +265,6 @@ def main():
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
- parser.add_argument('--builtin', help="Please give a value for builtin")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
@@ -283,7 +286,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -295,10 +298,14 @@ def main():
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
+ parser.add_argument('--layer_type', help="Please give a value for layer_type (for GAT and GatedGCN only)")
+ parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
+ parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
+
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
@@ -367,8 +374,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -389,22 +396,25 @@ def main():
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
- if args.builtin is not None:
- net_params['builtin'] = True if args.builtin == 'True' else False
-
- # CitationGraph
- net_params['in_dim'] = dataset.num_dims # node_dim (feat is an integer)
- net_params['n_classes'] = dataset.num_classes
-
-
- if MODEL_NAME in ['MLP', 'MLP_GATED']:
- builtin = ''
- else:
- builtin = 'DGL' if net_params['builtin'] else 'Custom'
- root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + builtin
- root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + builtin
- write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + builtin
- write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y') + builtin
+ if args.layer_type is not None:
+ net_params['layer_type'] = layer_type
+ if args.pos_enc is not None:
+ net_params['pos_enc'] = True if args.pos_enc=='True' else False
+ if args.pos_enc_dim is not None:
+ net_params['pos_enc_dim'] = int(args.pos_enc_dim)
+
+
+
+
+ # COLLAB
+ net_params['in_dim'] = dataset.graph.ndata['feat'].shape[-1]
+ net_params['in_dim_edge'] = dataset.graph.edata['feat'].shape[-1]
+ net_params['n_classes'] = 1 # binary prediction
+
+ root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
@@ -419,9 +429,6 @@ def main():
-
-
-
main()
diff --git a/main_CSL_graph_classification.ipynb b/main_CSL_graph_classification.ipynb
new file mode 100644
index 000000000..8698e7627
--- /dev/null
+++ b/main_CSL_graph_classification.ipynb
@@ -0,0 +1,968 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Main Driver Notebook for Training Graph NNs on CSL Dataset"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### MODELS\n",
+ "- GatedGCN \n",
+ "- GCN \n",
+ "- GAT \n",
+ "- GraphSage \n",
+ "- GIN \n",
+ "- MoNet \n",
+ "- MLP \n",
+ "- RingGNN \n",
+ "- 3WLGNN\n",
+ "\n",
+ "### DATASET\n",
+ "- CSL (Circular Skip Link) Graphs || [Source](https://github.com/PurdueMINDS/RelationalPooling/) \n",
+ "\n",
+ "### TASK\n",
+ "- Graph Classification"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " IMPORTING LIBS\n",
+ "\"\"\"\n",
+ "import dgl\n",
+ "\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import socket\n",
+ "import time\n",
+ "import random\n",
+ "import glob\n",
+ "import argparse, json\n",
+ "\n",
+ "import torch\n",
+ "import torch.nn as nn\n",
+ "import torch.nn.functional as F\n",
+ "\n",
+ "import torch.optim as optim\n",
+ "from torch.utils.data import DataLoader\n",
+ "\n",
+ "from tensorboardX import SummaryWriter\n",
+ "from tqdm import tqdm\n",
+ "\n",
+ "class DotDict(dict):\n",
+ " def __init__(self, **kwds):\n",
+ " self.update(kwds)\n",
+ " self.__dict__ = self\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "True\n"
+ ]
+ }
+ ],
+ "source": [
+ "# \"\"\"\n",
+ "# AUTORELOAD IPYTHON EXTENSION FOR RELOADING IMPORTED MODULES\n",
+ "# \"\"\"\n",
+ "\n",
+ "def in_ipynb():\n",
+ " try:\n",
+ " cfg = get_ipython().config \n",
+ " return True\n",
+ " except NameError:\n",
+ " return False\n",
+ " \n",
+ "notebook_mode = in_ipynb()\n",
+ "print(notebook_mode)\n",
+ "\n",
+ "if notebook_mode == True:\n",
+ " %load_ext autoreload\n",
+ " %autoreload 2\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " IMPORTING CUSTOM MODULES/METHODS\n",
+ "\"\"\"\n",
+ "\n",
+ "from nets.CSL_graph_classification.load_net import gnn_model # import GNNs\n",
+ "from data.data import LoadData # import dataset"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " GPU Setup\n",
+ "\"\"\"\n",
+ "def gpu_setup(use_gpu, gpu_id):\n",
+ " os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
+ " os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id) \n",
+ "\n",
+ " if torch.cuda.is_available() and use_gpu:\n",
+ " print('cuda available with GPU:',torch.cuda.get_device_name(0))\n",
+ " device = torch.device(\"cuda\")\n",
+ " else:\n",
+ " print('cuda not available')\n",
+ " device = torch.device(\"cpu\")\n",
+ " return device\n",
+ "\n",
+ "\n",
+ "use_gpu = True\n",
+ "gpu_id = -1\n",
+ "device = None\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[I] Loading data (notebook) ...\n",
+ "[I] Preparing Circular Skip Link Graphs v4 ...\n",
+ "[I] Finished preparation after 0.1282s\n",
+ "[!] Dataset: CSL\n",
+ "Time taken: 0.1386s\n",
+ "[I] Finished loading.\n"
+ ]
+ }
+ ],
+ "source": [
+ "# \"\"\"\n",
+ "# USER CONTROLS\n",
+ "# \"\"\"\n",
+ "if notebook_mode == True:\n",
+ " \n",
+ " #MODEL_NAME = 'GatedGCN'\n",
+ " #MODEL_NAME = 'MoNet'\n",
+ " #MODEL_NAME = 'GCN'\n",
+ " #MODEL_NAME = 'GAT'\n",
+ " #MODEL_NAME = 'GraphSage'\n",
+ " #MODEL_NAME = 'DiffPool'\n",
+ " #MODEL_NAME = 'MLP'\n",
+ " MODEL_NAME = '3WLGNN'\n",
+ "\n",
+ " DATASET_NAME = 'CSL'\n",
+ " out_dir = 'out/CSL_graph_classification/'\n",
+ " root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ "\n",
+ " print(\"[I] Loading data (notebook) ...\")\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " trainset, valset, testset = dataset.train, dataset.val, dataset.test\n",
+ " print(\"[I] Finished loading.\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "# MODEL_NAME = 'GatedGCN'\n",
+ "# MODEL_NAME = 'GCN'\n",
+ "# MODEL_NAME = 'GAT' \n",
+ "# MODEL_NAME = 'GraphSage'\n",
+ "# MODEL_NAME = 'MLP'\n",
+ "# MODEL_NAME = 'GIN'\n",
+ "# MODEL_NAME = 'MoNet'\n",
+ "# MODEL_NAME = 'RingGNN'\n",
+ "MODEL_NAME = '3WLGNN'\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# \"\"\"\n",
+ "# PARAMETERS\n",
+ "# \"\"\"\n",
+ "if notebook_mode == True:\n",
+ "\n",
+ " n_heads = -1\n",
+ " edge_feat = False\n",
+ " pseudo_dim_MoNet = -1\n",
+ " kernel = -1\n",
+ " gnn_per_block = -1\n",
+ " embedding_dim = -1\n",
+ " pool_ratio = -1\n",
+ " n_mlp_GIN = -1\n",
+ " gated = False\n",
+ " self_loop = False\n",
+ " #self_loop = True\n",
+ " max_time = 48\n",
+ " residual = True \n",
+ " layer_norm = True\n",
+ " batch_norm = True\n",
+ " pos_enc_dim=20\n",
+ " \n",
+ "\n",
+ " if MODEL_NAME == 'GatedGCN':\n",
+ " seed=41; epochs=1000; batch_size=5; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=70; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ " init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=1; pos_enc_dim=20; batch_size=5; # v1\n",
+ " init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=10; pos_enc_dim=20; batch_size=5; # v2\n",
+ " \n",
+ " if MODEL_NAME == 'GCN':\n",
+ " seed=41; epochs=1000; batch_size=5; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=146; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ "\n",
+ " if MODEL_NAME == 'GAT':\n",
+ " seed=41; epochs=1000; batch_size=50; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; n_heads=8; hidden_dim=18; out_dim=n_heads*hidden_dim; dropout=0.0; readout='sum'\n",
+ " print('True hidden dim:',out_dim)\n",
+ "\n",
+ " if MODEL_NAME == 'GraphSage':\n",
+ " seed=41; epochs=1000; batch_size=50; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ "\n",
+ " if MODEL_NAME == 'MLP':\n",
+ " seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ " \n",
+ " if MODEL_NAME == 'GIN':\n",
+ " seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=110; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ " n_mlp_GIN = 2; learn_eps_GIN=True; neighbor_aggr_GIN='sum'\n",
+ "\n",
+ " if MODEL_NAME == 'MoNet':\n",
+ " seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='sum'\n",
+ " pseudo_dim_MoNet=2; kernel=3;\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=2; hidden_dim=37; out_dim=hidden_dim; dropout=0.0; edge_feat=False\n",
+ " residual=False; layer_norm=False; batch_norm=False\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=3; hidden_dim=78; out_dim=hidden_dim; dropout=0.0; edge_feat=False\n",
+ " residual=False; layer_norm=False; batch_norm=False\n",
+ " \n",
+ " \n",
+ " # DEV\n",
+ " #epochs=10\n",
+ " \n",
+ " \n",
+ "\n",
+ " # generic new_params\n",
+ " net_params = {}\n",
+ " net_params['device'] = device\n",
+ " net_params['num_node_type'] = dataset.all.num_node_type\n",
+ " net_params['num_edge_type'] = dataset.all.num_edge_type\n",
+ " net_params['gated'] = False # for mlpnet baseline\n",
+ " net_params['residual'] = residual\n",
+ " net_params['hidden_dim'] = hidden_dim\n",
+ " net_params['out_dim'] = out_dim\n",
+ " num_classes = len(np.unique(dataset.all.graph_labels))\n",
+ " net_params['n_classes'] = num_classes\n",
+ " net_params['n_heads'] = n_heads\n",
+ " net_params['L'] = L # min L should be 2\n",
+ " net_params['readout'] = \"mean\"\n",
+ " net_params['layer_norm'] = layer_norm \n",
+ " net_params['batch_norm'] = batch_norm\n",
+ " net_params['in_feat_dropout'] = 0.0\n",
+ " net_params['dropout'] = 0.0\n",
+ " net_params['edge_feat'] = edge_feat\n",
+ " net_params['self_loop'] = self_loop\n",
+ "\n",
+ " # specific for MoNet\n",
+ " net_params['pseudo_dim_MoNet'] = pseudo_dim_MoNet\n",
+ " net_params['kernel'] = kernel\n",
+ " \n",
+ " # specific for GIN\n",
+ " net_params['n_mlp_GIN'] = n_mlp_GIN\n",
+ " net_params['learn_eps_GIN'] = True\n",
+ " net_params['neighbor_aggr_GIN'] = 'sum'\n",
+ " \n",
+ " # specific for graphsage\n",
+ " net_params['sage_aggregator'] = 'meanpool' \n",
+ " net_params['sage_aggregator'] = 'maxpool' \n",
+ "\n",
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " run = 0\n",
+ " num_nodes_train = [trainset[run][i][0].number_of_nodes() for i in range(len(trainset))]\n",
+ " num_nodes_test = [testset[run][i][0].number_of_nodes() for i in range(len(testset))]\n",
+ " num_nodes = num_nodes_train + num_nodes_test\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " net_params['in_dim'] = pos_enc_dim\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n",
+ " net_params['in_dim'] = pos_enc_dim\n",
+ " \n",
+ " # specific for pos_enc_dim\n",
+ " net_params['pos_enc'] = True\n",
+ " net_params['pos_enc_dim'] = pos_enc_dim\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "MODEL DETAILS:\n",
+ "\n",
+ "MODEL/Total parameters: 3WLGNN 102054\n"
+ ]
+ }
+ ],
+ "source": [
+ "\n",
+ "\"\"\"\n",
+ " VIEWING MODEL CONFIG AND PARAMS\n",
+ "\"\"\"\n",
+ "def view_model_param(MODEL_NAME, net_params):\n",
+ " model = gnn_model(MODEL_NAME, net_params)\n",
+ " total_param = 0\n",
+ " print(\"MODEL DETAILS:\\n\")\n",
+ " #print(model)\n",
+ " for param in model.parameters():\n",
+ " # print(param.data.size())\n",
+ " total_param += np.prod(list(param.data.size()))\n",
+ " print('MODEL/Total parameters:', MODEL_NAME, total_param)\n",
+ " return total_param\n",
+ "\n",
+ "if notebook_mode == True:\n",
+ " view_model_param(MODEL_NAME, net_params)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\"\"\"\n",
+ " TRAINING CODE\n",
+ "\"\"\"\n",
+ "\n",
+ "def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):\n",
+ " avg_test_acc = []\n",
+ " avg_train_acc = []\n",
+ " avg_epochs = []\n",
+ "\n",
+ " t0 = time.time()\n",
+ " per_epoch_time = []\n",
+ "\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " \n",
+ " if MODEL_NAME in ['GCN', 'GAT']:\n",
+ " if net_params['self_loop']:\n",
+ " print(\"[!] Adding graph self-loops for GCN/GAT models (central node trick).\")\n",
+ " dataset._add_self_loops()\n",
+ "\n",
+ " if net_params['pos_enc']:\n",
+ " print(\"[!] Adding graph positional encoding.\")\n",
+ " dataset._add_positional_encodings(net_params['pos_enc_dim'])\n",
+ " \n",
+ " trainset, valset, testset = dataset.train, dataset.val, dataset.test\n",
+ " \n",
+ " root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs\n",
+ " device = net_params['device']\n",
+ " \n",
+ " # Write the network and optimization hyper-parameters in folder config/\n",
+ " with open(write_config_file + '.txt', 'w') as f:\n",
+ " f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n\\nTotal Parameters: {}\\n\\n\"\"\"\\\n",
+ " .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))\n",
+ " \n",
+ " # At any point you can hit Ctrl + C to break out of training early.\n",
+ " try:\n",
+ " for split_number in range(5):\n",
+ " \n",
+ " t0_split = time.time()\n",
+ " log_dir = os.path.join(root_log_dir, \"RUN_\" + str(split_number))\n",
+ " writer = SummaryWriter(log_dir=log_dir)\n",
+ "\n",
+ " # setting seeds\n",
+ " random.seed(params['seed'])\n",
+ " np.random.seed(params['seed'])\n",
+ " torch.manual_seed(params['seed'])\n",
+ " if device.type == 'cuda':\n",
+ " torch.cuda.manual_seed(params['seed'])\n",
+ "\n",
+ " print(\"RUN NUMBER: \", split_number)\n",
+ " trainset, valset, testset = dataset.train[split_number], dataset.val[split_number], dataset.test[split_number]\n",
+ " print(\"Training Graphs: \", len(trainset))\n",
+ " print(\"Validation Graphs: \", len(valset))\n",
+ " print(\"Test Graphs: \", len(testset))\n",
+ " print(\"Number of Classes: \", net_params['n_classes'])\n",
+ "\n",
+ " model = gnn_model(MODEL_NAME, net_params)\n",
+ " model = model.to(device)\n",
+ " optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])\n",
+ " scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n",
+ " factor=params['lr_reduce_factor'],\n",
+ " patience=params['lr_schedule_patience'],\n",
+ " verbose=True)\n",
+ "\n",
+ " epoch_train_losses, epoch_val_losses = [], []\n",
+ " epoch_train_accs, epoch_val_accs = [], [] \n",
+ "\n",
+ " # batching exception for Diffpool\n",
+ " drop_last = True if MODEL_NAME == 'DiffPool' else False\n",
+ " # drop_last = False\n",
+ "\n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WL-GNNs\n",
+ " from train.train_CSL_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
+ " from functools import partial # util function to pass pos_enc flag to collate function\n",
+ "\n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))\n",
+ "\n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_CSL_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
+ "\n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ "\n",
+ " \n",
+ " with tqdm(range(params['epochs'])) as t:\n",
+ " for epoch in t:\n",
+ "\n",
+ " t.set_description('Epoch %d' % epoch) \n",
+ "\n",
+ " start = time.time()\n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
+ " #epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n",
+ "\n",
+ " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)\n",
+ " \n",
+ " epoch_train_losses.append(epoch_train_loss)\n",
+ " epoch_val_losses.append(epoch_val_loss)\n",
+ " epoch_train_accs.append(epoch_train_acc)\n",
+ " epoch_val_accs.append(epoch_val_acc)\n",
+ "\n",
+ " writer.add_scalar('train/_loss', epoch_train_loss, epoch)\n",
+ " writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
+ " writer.add_scalar('train/_acc', epoch_train_acc, epoch)\n",
+ " writer.add_scalar('val/_acc', epoch_val_acc, epoch)\n",
+ " writer.add_scalar('test/_acc', epoch_test_acc, epoch)\n",
+ " writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n",
+ " \n",
+ " epoch_train_acc = 100.* epoch_train_acc\n",
+ " epoch_test_acc = 100.* epoch_test_acc\n",
+ " \n",
+ " t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
+ " train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n",
+ " train_acc=epoch_train_acc, val_acc=epoch_val_acc,\n",
+ " test_acc=epoch_test_acc) \n",
+ "\n",
+ " per_epoch_time.append(time.time()-start)\n",
+ "\n",
+ " # Saving checkpoint\n",
+ " ckpt_dir = os.path.join(root_ckpt_dir, \"RUN_\" + str(split_number))\n",
+ " if not os.path.exists(ckpt_dir):\n",
+ " os.makedirs(ckpt_dir)\n",
+ " torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + \"/epoch_\" + str(epoch)))\n",
+ "\n",
+ " files = glob.glob(ckpt_dir + '/*.pkl')\n",
+ " for file in files:\n",
+ " epoch_nb = file.split('_')[-1]\n",
+ " epoch_nb = int(epoch_nb.split('.')[0])\n",
+ " if epoch_nb < epoch-1:\n",
+ " os.remove(file)\n",
+ "\n",
+ " scheduler.step(epoch_val_loss)\n",
+ "\n",
+ " if optimizer.param_groups[0]['lr'] < params['min_lr']:\n",
+ " print(\"\\n!! LR EQUAL TO MIN LR SET.\")\n",
+ " break\n",
+ " \n",
+ " # Stop training after params['max_time'] hours\n",
+ " if time.time()-t0_split > params['max_time']*3600/10: # Dividing max_time by 10, since there are 10 runs in TUs\n",
+ " print('-' * 89)\n",
+ " print(\"Max_time for one train-val-test split experiment elapsed {:.3f} hours, so stopping\".format(params['max_time']/10))\n",
+ " break\n",
+ "\n",
+ " _, test_acc = evaluate_network(model, device, test_loader, epoch) \n",
+ " _, train_acc = evaluate_network(model, device, train_loader, epoch) \n",
+ " avg_test_acc.append(test_acc) \n",
+ " avg_train_acc.append(train_acc)\n",
+ " avg_epochs.append(epoch)\n",
+ "\n",
+ " print(\"Test Accuracy [LAST EPOCH]: {:.4f}\".format(test_acc))\n",
+ " print(\"Train Accuracy [LAST EPOCH]: {:.4f}\".format(train_acc))\n",
+ " \n",
+ " except KeyboardInterrupt:\n",
+ " print('-' * 89)\n",
+ " print('Exiting from training early because of KeyboardInterrupt')\n",
+ " \n",
+ " \n",
+ " print(\"TOTAL TIME TAKEN: {:.4f}hrs\".format((time.time()-t0)/3600))\n",
+ " print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
+ "\n",
+ " # Final test accuracy value averaged over 5-fold\n",
+ " print(\"\"\"\\n\\n\\nFINAL RESULTS\\n\\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\"\"\"\\\n",
+ " .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))\n",
+ " print(\"\\nAll splits Test Accuracies:\\n\", avg_test_acc)\n",
+ " print(\"\"\"\\n\\n\\nFINAL RESULTS\\n\\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\"\"\"\\\n",
+ " .format(np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100))\n",
+ " print(\"\\nAll splits Train Accuracies:\\n\", avg_train_acc)\n",
+ "\n",
+ " writer.close()\n",
+ "\n",
+ " \"\"\"\n",
+ " Write the results in out/results folder\n",
+ " \"\"\"\n",
+ " with open(write_file_name + '.txt', 'w') as f:\n",
+ " f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
+ " FINAL RESULTS\\nTEST ACCURACY averaged: {:.3f}\\n with test acc s.d. {:.3f}\\nTRAIN ACCURACY averaged: {:.3f}\\n with train s.d. {:.3f}\\n\\n\n",
+ " Convergence Time (Epochs): {:.3f}\\nTotal Time Taken: {:.3f} hrs\\nAverage Time Per Epoch: {:.3f} s\\n\\n\\nAll Splits Test Accuracies: {}\\n\\nAll Splits Train Accuracies: {}\"\"\"\\\n",
+ " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
+ " np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,\n",
+ " np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100, np.mean(np.array(avg_epochs)),\n",
+ " (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc, avg_train_acc))\n",
+ " \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Convert main_CSL_graph_classification.ipynb to main_CSL_graph_classification.py\n",
+ "Clean main_CSL_graph_classification.py\n",
+ "Done. \n",
+ "[I] Preparing Circular Skip Link Graphs v4 ...\n",
+ "[I] Finished preparation after 0.1407s\n",
+ "[!] Dataset: CSL\n",
+ "Time taken: 0.1521s\n",
+ "cuda not available\n",
+ "MODEL DETAILS:\n",
+ "\n",
+ "MODEL/Total parameters: 3WLGNN 102054\n",
+ "[I] Preparing Circular Skip Link Graphs v4 ...\n",
+ "[I] Finished preparation after 0.1089s\n",
+ "[!] Dataset: CSL\n",
+ "Time taken: 0.1152s\n",
+ "[!] Adding graph positional encoding.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Epoch 0: 0%| | 0/1000 [00:00, ?it/s]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "RUN NUMBER: 0\n",
+ "Training Graphs: 90\n",
+ "Validation Graphs: 30\n",
+ "Test Graphs: 30\n",
+ "Number of Classes: 10\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Epoch 0: 0%| | 0/1000 [00:02, ?it/s]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-----------------------------------------------------------------------------------------\n",
+ "Exiting from training early because of KeyboardInterrupt\n",
+ "TOTAL TIME TAKEN: 0.0011hrs\n",
+ "AVG TIME PER EPOCH: nans\n",
+ "\n",
+ "\n",
+ "\n",
+ "FINAL RESULTS\n",
+ "\n",
+ "TEST ACCURACY averaged: nan with s.d. nan\n",
+ "\n",
+ "All splits Test Accuracies:\n",
+ " []\n",
+ "\n",
+ "\n",
+ "\n",
+ "FINAL RESULTS\n",
+ "\n",
+ "TRAIN ACCURACY averaged: nan with s.d. nan\n",
+ "\n",
+ "All splits Train Accuracies:\n",
+ " []\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "def main(notebook_mode=False,config=None):\n",
+ " \n",
+ " \"\"\"\n",
+ " USER CONTROLS\n",
+ " \"\"\"\n",
+ " \n",
+ " # terminal mode\n",
+ " if notebook_mode==False:\n",
+ " \n",
+ " parser = argparse.ArgumentParser()\n",
+ " parser.add_argument('--config', help=\"Please give a config.json file with training/model/data/param details\")\n",
+ " parser.add_argument('--gpu_id', help=\"Please give a value for gpu id\")\n",
+ " parser.add_argument('--model', help=\"Please give a value for model name\")\n",
+ " parser.add_argument('--dataset', help=\"Please give a value for dataset name\")\n",
+ " parser.add_argument('--out_dir', help=\"Please give a value for out_dir\")\n",
+ " parser.add_argument('--seed', help=\"Please give a value for seed\")\n",
+ " parser.add_argument('--epochs', help=\"Please give a value for epochs\")\n",
+ " parser.add_argument('--batch_size', help=\"Please give a value for batch_size\")\n",
+ " parser.add_argument('--init_lr', help=\"Please give a value for init_lr\")\n",
+ " parser.add_argument('--lr_reduce_factor', help=\"Please give a value for lr_reduce_factor\")\n",
+ " parser.add_argument('--lr_schedule_patience', help=\"Please give a value for lr_schedule_patience\")\n",
+ " parser.add_argument('--min_lr', help=\"Please give a value for min_lr\")\n",
+ " parser.add_argument('--weight_decay', help=\"Please give a value for weight_decay\")\n",
+ " parser.add_argument('--print_epoch_interval', help=\"Please give a value for print_epoch_interval\") \n",
+ " parser.add_argument('--L', help=\"Please give a value for L\")\n",
+ " parser.add_argument('--hidden_dim', help=\"Please give a value for hidden_dim\")\n",
+ " parser.add_argument('--out_dim', help=\"Please give a value for out_dim\")\n",
+ " parser.add_argument('--residual', help=\"Please give a value for residual\")\n",
+ " parser.add_argument('--edge_feat', help=\"Please give a value for edge_feat\")\n",
+ " parser.add_argument('--readout', help=\"Please give a value for readout\")\n",
+ " parser.add_argument('--kernel', help=\"Please give a value for kernel\")\n",
+ " parser.add_argument('--n_heads', help=\"Please give a value for n_heads\")\n",
+ " parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
+ " parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
+ " parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
+ " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
+ " parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
+ " parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
+ " parser.add_argument('--num_pool', help=\"Please give a value for num_pool\")\n",
+ " parser.add_argument('--gnn_per_block', help=\"Please give a value for gnn_per_block\")\n",
+ " parser.add_argument('--embedding_dim', help=\"Please give a value for embedding_dim\")\n",
+ " parser.add_argument('--pool_ratio', help=\"Please give a value for pool_ratio\")\n",
+ " parser.add_argument('--linkpred', help=\"Please give a value for linkpred\")\n",
+ " parser.add_argument('--cat', help=\"Please give a value for cat\")\n",
+ " parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n",
+ " parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n",
+ " parser.add_argument('--pos_enc_dim', help=\"Please give a value for pos_enc_dim\")\n",
+ " args = parser.parse_args()\n",
+ " with open(args.config) as f:\n",
+ " config = json.load(f)\n",
+ " \n",
+ "\n",
+ " # device\n",
+ " if args.gpu_id is not None:\n",
+ " config['gpu']['id'] = int(args.gpu_id)\n",
+ " config['gpu']['use'] = True\n",
+ " device = gpu_setup(config['gpu']['use'], config['gpu']['id'])\n",
+ "\n",
+ " # model, dataset, out_dir\n",
+ " if args.model is not None:\n",
+ " MODEL_NAME = args.model\n",
+ " else:\n",
+ " MODEL_NAME = config['model']\n",
+ " if args.dataset is not None:\n",
+ " DATASET_NAME = args.dataset\n",
+ " else:\n",
+ " DATASET_NAME = config['dataset']\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " if args.out_dir is not None:\n",
+ " out_dir = args.out_dir\n",
+ " else:\n",
+ " out_dir = config['out_dir']\n",
+ "\n",
+ " # parameters\n",
+ " params = config['params']\n",
+ " if args.seed is not None:\n",
+ " params['seed'] = int(args.seed)\n",
+ " if args.epochs is not None:\n",
+ " params['epochs'] = int(args.epochs)\n",
+ " if args.batch_size is not None:\n",
+ " params['batch_size'] = int(args.batch_size)\n",
+ " if args.init_lr is not None:\n",
+ " params['init_lr'] = float(args.init_lr)\n",
+ " if args.lr_reduce_factor is not None:\n",
+ " params['lr_reduce_factor'] = float(args.lr_reduce_factor)\n",
+ " if args.lr_schedule_patience is not None:\n",
+ " params['lr_schedule_patience'] = int(args.lr_schedule_patience)\n",
+ " if args.min_lr is not None:\n",
+ " params['min_lr'] = float(args.min_lr)\n",
+ " if args.weight_decay is not None:\n",
+ " params['weight_decay'] = float(args.weight_decay)\n",
+ " if args.print_epoch_interval is not None:\n",
+ " params['print_epoch_interval'] = int(args.print_epoch_interval)\n",
+ " if args.max_time is not None:\n",
+ " params['max_time'] = float(args.max_time)\n",
+ "\n",
+ " # network parameters\n",
+ " net_params = config['net_params']\n",
+ " net_params['device'] = device\n",
+ " net_params['gpu_id'] = config['gpu']['id']\n",
+ " net_params['batch_size'] = params['batch_size']\n",
+ " if args.L is not None:\n",
+ " net_params['L'] = int(args.L)\n",
+ " if args.hidden_dim is not None:\n",
+ " net_params['hidden_dim'] = int(args.hidden_dim)\n",
+ " if args.out_dim is not None:\n",
+ " net_params['out_dim'] = int(args.out_dim) \n",
+ " if args.residual is not None:\n",
+ " net_params['residual'] = True if args.residual=='True' else False\n",
+ " if args.edge_feat is not None:\n",
+ " net_params['edge_feat'] = True if args.edge_feat=='True' else False\n",
+ " if args.readout is not None:\n",
+ " net_params['readout'] = args.readout\n",
+ " if args.kernel is not None:\n",
+ " net_params['kernel'] = int(args.kernel)\n",
+ " if args.n_heads is not None:\n",
+ " net_params['n_heads'] = int(args.n_heads)\n",
+ " if args.gated is not None:\n",
+ " net_params['gated'] = True if args.gated=='True' else False\n",
+ " if args.in_feat_dropout is not None:\n",
+ " net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
+ " if args.dropout is not None:\n",
+ " net_params['dropout'] = float(args.dropout)\n",
+ " if args.graph_norm is not None:\n",
+ " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.batch_norm is not None:\n",
+ " net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
+ " if args.sage_aggregator is not None:\n",
+ " net_params['sage_aggregator'] = args.sage_aggregator\n",
+ " if args.data_mode is not None:\n",
+ " net_params['data_mode'] = args.data_mode\n",
+ " if args.num_pool is not None:\n",
+ " net_params['num_pool'] = int(args.num_pool)\n",
+ " if args.gnn_per_block is not None:\n",
+ " net_params['gnn_per_block'] = int(args.gnn_per_block)\n",
+ " if args.embedding_dim is not None:\n",
+ " net_params['embedding_dim'] = int(args.embedding_dim)\n",
+ " if args.pool_ratio is not None:\n",
+ " net_params['pool_ratio'] = float(args.pool_ratio)\n",
+ " if args.linkpred is not None:\n",
+ " net_params['linkpred'] = True if args.linkpred=='True' else False\n",
+ " if args.cat is not None:\n",
+ " net_params['cat'] = True if args.cat=='True' else False\n",
+ " if args.self_loop is not None:\n",
+ " net_params['self_loop'] = True if args.self_loop=='True' else False\n",
+ " if args.pos_enc_dim is not None:\n",
+ " net_params['pos_enc_dim'] = int(args.pos_enc_dim)\n",
+ "\n",
+ " \n",
+ " \n",
+ " # notebook mode\n",
+ " if notebook_mode:\n",
+ " \n",
+ " # parameters\n",
+ " params = config['params']\n",
+ " \n",
+ " # dataset\n",
+ " DATASET_NAME = config['dataset']\n",
+ " dataset = LoadData(DATASET_NAME)\n",
+ " \n",
+ " # device\n",
+ " device = gpu_setup(config['gpu']['use'], config['gpu']['id'])\n",
+ " out_dir = config['out_dir']\n",
+ " \n",
+ " # GNN model\n",
+ " MODEL_NAME = config['model']\n",
+ " \n",
+ " # network parameters\n",
+ " net_params = config['net_params']\n",
+ " net_params['device'] = device\n",
+ " net_params['gpu_id'] = config['gpu']['id']\n",
+ " net_params['batch_size'] = params['batch_size']\n",
+ " \n",
+ " \n",
+ " # CSL\n",
+ " net_params['num_node_type'] = dataset.all.num_node_type\n",
+ " net_params['num_edge_type'] = dataset.all.num_edge_type\n",
+ " num_classes = len(np.unique(dataset.all.graph_labels))\n",
+ " net_params['n_classes'] = num_classes\n",
+ " \n",
+ " # RingGNN\n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes_train = [dataset.train[0][i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " num_nodes_test = [dataset.test[0][i][0].number_of_nodes() for i in range(len(dataset.test))]\n",
+ " num_nodes = num_nodes_train + num_nodes_test\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # RingGNN, 3WLGNN\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " if net_params['pos_enc']:\n",
+ " net_params['in_dim'] = net_params['pos_enc_dim']\n",
+ " else:\n",
+ " net_params['in_dim'] = 1\n",
+ " \n",
+ " root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " write_file_name = out_dir + 'results/result_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " write_config_file = out_dir + 'configs/config_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
+ " dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file\n",
+ "\n",
+ " if not os.path.exists(out_dir + 'results'):\n",
+ " os.makedirs(out_dir + 'results')\n",
+ " \n",
+ " if not os.path.exists(out_dir + 'configs'):\n",
+ " os.makedirs(out_dir + 'configs')\n",
+ "\n",
+ " net_params['total_param'] = view_model_param(MODEL_NAME, net_params)\n",
+ " train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs)\n",
+ "\n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ "if notebook_mode==True:\n",
+ " \n",
+ " config = {}\n",
+ " # gpu config\n",
+ " gpu = {}\n",
+ " gpu['use'] = use_gpu\n",
+ " gpu['id'] = gpu_id\n",
+ " config['gpu'] = gpu\n",
+ " # GNN model, dataset, out_dir\n",
+ " config['model'] = MODEL_NAME\n",
+ " config['dataset'] = DATASET_NAME\n",
+ " config['out_dir'] = out_dir\n",
+ " # parameters\n",
+ " params = {}\n",
+ " params['seed'] = seed\n",
+ " params['epochs'] = epochs\n",
+ " params['batch_size'] = batch_size\n",
+ " params['init_lr'] = init_lr\n",
+ " params['lr_reduce_factor'] = lr_reduce_factor \n",
+ " params['lr_schedule_patience'] = lr_schedule_patience\n",
+ " params['min_lr'] = min_lr\n",
+ " params['weight_decay'] = weight_decay\n",
+ " params['print_epoch_interval'] = 5\n",
+ " params['max_time'] = max_time\n",
+ " config['params'] = params\n",
+ " # network parameters\n",
+ " config['net_params'] = net_params\n",
+ " \n",
+ " # convert to .py format\n",
+ " from utils.cleaner_main import *\n",
+ " cleaner_main('main_CSL_graph_classification')\n",
+ " \n",
+ " main(True,config)\n",
+ " \n",
+ "else:\n",
+ " \n",
+ " main()\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/main_CSL_graph_classification.py b/main_CSL_graph_classification.py
new file mode 100644
index 000000000..599f86a8b
--- /dev/null
+++ b/main_CSL_graph_classification.py
@@ -0,0 +1,485 @@
+
+
+
+
+
+"""
+ IMPORTING LIBS
+"""
+import dgl
+
+import numpy as np
+import os
+import socket
+import time
+import random
+import glob
+import argparse, json
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import torch.optim as optim
+from torch.utils.data import DataLoader
+
+from tensorboardX import SummaryWriter
+from tqdm import tqdm
+
+class DotDict(dict):
+ def __init__(self, **kwds):
+ self.update(kwds)
+ self.__dict__ = self
+
+
+
+
+
+
+
+"""
+ IMPORTING CUSTOM MODULES/METHODS
+"""
+
+from nets.CSL_graph_classification.load_net import gnn_model # import GNNs
+from data.data import LoadData # import dataset
+
+
+
+
+"""
+ GPU Setup
+"""
+def gpu_setup(use_gpu, gpu_id):
+ os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
+
+ if torch.cuda.is_available() and use_gpu:
+ print('cuda available with GPU:',torch.cuda.get_device_name(0))
+ device = torch.device("cuda")
+ else:
+ print('cuda not available')
+ device = torch.device("cpu")
+ return device
+
+
+
+
+
+
+
+
+
+
+"""
+ VIEWING MODEL CONFIG AND PARAMS
+"""
+def view_model_param(MODEL_NAME, net_params):
+ model = gnn_model(MODEL_NAME, net_params)
+ total_param = 0
+ print("MODEL DETAILS:\n")
+ #print(model)
+ for param in model.parameters():
+ # print(param.data.size())
+ total_param += np.prod(list(param.data.size()))
+ print('MODEL/Total parameters:', MODEL_NAME, total_param)
+ return total_param
+
+"""
+ TRAINING CODE
+"""
+
+def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
+ avg_test_acc = []
+ avg_train_acc = []
+ avg_epochs = []
+
+ t0 = time.time()
+ per_epoch_time = []
+
+ dataset = LoadData(DATASET_NAME)
+
+ if MODEL_NAME in ['GCN', 'GAT']:
+ if net_params['self_loop']:
+ print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
+ dataset._add_self_loops()
+
+ if net_params['pos_enc']:
+ print("[!] Adding graph positional encoding.")
+ dataset._add_positional_encodings(net_params['pos_enc_dim'])
+
+ trainset, valset, testset = dataset.train, dataset.val, dataset.test
+
+ root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
+ device = net_params['device']
+
+ # Write the network and optimization hyper-parameters in folder config/
+ with open(write_config_file + '.txt', 'w') as f:
+ f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
+
+ # At any point you can hit Ctrl + C to break out of training early.
+ try:
+ for split_number in range(5):
+
+ t0_split = time.time()
+ log_dir = os.path.join(root_log_dir, "RUN_" + str(split_number))
+ writer = SummaryWriter(log_dir=log_dir)
+
+ # setting seeds
+ random.seed(params['seed'])
+ np.random.seed(params['seed'])
+ torch.manual_seed(params['seed'])
+ if device.type == 'cuda':
+ torch.cuda.manual_seed(params['seed'])
+
+ print("RUN NUMBER: ", split_number)
+ trainset, valset, testset = dataset.train[split_number], dataset.val[split_number], dataset.test[split_number]
+ print("Training Graphs: ", len(trainset))
+ print("Validation Graphs: ", len(valset))
+ print("Test Graphs: ", len(testset))
+ print("Number of Classes: ", net_params['n_classes'])
+
+ model = gnn_model(MODEL_NAME, net_params)
+ model = model.to(device)
+ optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
+ scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
+ factor=params['lr_reduce_factor'],
+ patience=params['lr_schedule_patience'],
+ verbose=True)
+
+ epoch_train_losses, epoch_val_losses = [], []
+ epoch_train_accs, epoch_val_accs = [], []
+
+ # batching exception for Diffpool
+ drop_last = True if MODEL_NAME == 'DiffPool' else False
+ # drop_last = False
+
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WL-GNNs
+ from train.train_CSL_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+ from functools import partial # util function to pass pos_enc flag to collate function
+
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, pos_enc=net_params['pos_enc']))
+
+ else:
+ # import train functions for all other GCNs
+ from train.train_CSL_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+
+
+ with tqdm(range(params['epochs'])) as t:
+ for epoch in t:
+
+ t.set_description('Epoch %d' % epoch)
+
+ start = time.time()
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
+ #epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+ epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
+
+ _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
+
+ epoch_train_losses.append(epoch_train_loss)
+ epoch_val_losses.append(epoch_val_loss)
+ epoch_train_accs.append(epoch_train_acc)
+ epoch_val_accs.append(epoch_val_acc)
+
+ writer.add_scalar('train/_loss', epoch_train_loss, epoch)
+ writer.add_scalar('val/_loss', epoch_val_loss, epoch)
+ writer.add_scalar('train/_acc', epoch_train_acc, epoch)
+ writer.add_scalar('val/_acc', epoch_val_acc, epoch)
+ writer.add_scalar('test/_acc', epoch_test_acc, epoch)
+ writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
+
+ epoch_train_acc = 100.* epoch_train_acc
+ epoch_test_acc = 100.* epoch_test_acc
+
+ t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
+ train_loss=epoch_train_loss, val_loss=epoch_val_loss,
+ train_acc=epoch_train_acc, val_acc=epoch_val_acc,
+ test_acc=epoch_test_acc)
+
+ per_epoch_time.append(time.time()-start)
+
+ # Saving checkpoint
+ ckpt_dir = os.path.join(root_ckpt_dir, "RUN_" + str(split_number))
+ if not os.path.exists(ckpt_dir):
+ os.makedirs(ckpt_dir)
+ torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
+
+ files = glob.glob(ckpt_dir + '/*.pkl')
+ for file in files:
+ epoch_nb = file.split('_')[-1]
+ epoch_nb = int(epoch_nb.split('.')[0])
+ if epoch_nb < epoch-1:
+ os.remove(file)
+
+ scheduler.step(epoch_val_loss)
+
+ if optimizer.param_groups[0]['lr'] < params['min_lr']:
+ print("\n!! LR EQUAL TO MIN LR SET.")
+ break
+
+ # Stop training after params['max_time'] hours
+ if time.time()-t0_split > params['max_time']*3600/10: # Dividing max_time by 10, since there are 10 runs in TUs
+ print('-' * 89)
+ print("Max_time for one train-val-test split experiment elapsed {:.3f} hours, so stopping".format(params['max_time']/10))
+ break
+
+ _, test_acc = evaluate_network(model, device, test_loader, epoch)
+ _, train_acc = evaluate_network(model, device, train_loader, epoch)
+ avg_test_acc.append(test_acc)
+ avg_train_acc.append(train_acc)
+ avg_epochs.append(epoch)
+
+ print("Test Accuracy [LAST EPOCH]: {:.4f}".format(test_acc))
+ print("Train Accuracy [LAST EPOCH]: {:.4f}".format(train_acc))
+
+ except KeyboardInterrupt:
+ print('-' * 89)
+ print('Exiting from training early because of KeyboardInterrupt')
+
+
+ print("TOTAL TIME TAKEN: {:.4f}hrs".format((time.time()-t0)/3600))
+ print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
+
+ # Final test accuracy value averaged over 5-fold
+ print("""\n\n\nFINAL RESULTS\n\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))
+ print("\nAll splits Test Accuracies:\n", avg_test_acc)
+ print("""\n\n\nFINAL RESULTS\n\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100))
+ print("\nAll splits Train Accuracies:\n", avg_train_acc)
+
+ writer.close()
+
+ """
+ Write the results in out/results folder
+ """
+ with open(write_file_name + '.txt', 'w') as f:
+ f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
+ FINAL RESULTS\nTEST ACCURACY averaged: {:.3f}\n with test acc s.d. {:.3f}\nTRAIN ACCURACY averaged: {:.3f}\n with train s.d. {:.3f}\n\n
+ Convergence Time (Epochs): {:.3f}\nTotal Time Taken: {:.3f} hrs\nAverage Time Per Epoch: {:.3f} s\n\n\nAll Splits Test Accuracies: {}\n\nAll Splits Train Accuracies: {}"""\
+ .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
+ np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
+ np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100, np.mean(np.array(avg_epochs)),
+ (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc, avg_train_acc))
+
+
+
+
+
+def main():
+ """
+ USER CONTROLS
+ """
+
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
+ parser.add_argument('--gpu_id', help="Please give a value for gpu id")
+ parser.add_argument('--model', help="Please give a value for model name")
+ parser.add_argument('--dataset', help="Please give a value for dataset name")
+ parser.add_argument('--out_dir', help="Please give a value for out_dir")
+ parser.add_argument('--seed', help="Please give a value for seed")
+ parser.add_argument('--epochs', help="Please give a value for epochs")
+ parser.add_argument('--batch_size', help="Please give a value for batch_size")
+ parser.add_argument('--init_lr', help="Please give a value for init_lr")
+ parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
+ parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
+ parser.add_argument('--min_lr', help="Please give a value for min_lr")
+ parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
+ parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
+ parser.add_argument('--L', help="Please give a value for L")
+ parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
+ parser.add_argument('--out_dim', help="Please give a value for out_dim")
+ parser.add_argument('--residual', help="Please give a value for residual")
+ parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
+ parser.add_argument('--readout', help="Please give a value for readout")
+ parser.add_argument('--kernel', help="Please give a value for kernel")
+ parser.add_argument('--n_heads', help="Please give a value for n_heads")
+ parser.add_argument('--gated', help="Please give a value for gated")
+ parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
+ parser.add_argument('--dropout', help="Please give a value for dropout")
+ parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
+ parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
+ parser.add_argument('--data_mode', help="Please give a value for data_mode")
+ parser.add_argument('--num_pool', help="Please give a value for num_pool")
+ parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
+ parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
+ parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
+ parser.add_argument('--linkpred', help="Please give a value for linkpred")
+ parser.add_argument('--cat', help="Please give a value for cat")
+ parser.add_argument('--self_loop', help="Please give a value for self_loop")
+ parser.add_argument('--max_time', help="Please give a value for max_time")
+ parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
+ args = parser.parse_args()
+ with open(args.config) as f:
+ config = json.load(f)
+
+ # device
+ if args.gpu_id is not None:
+ config['gpu']['id'] = int(args.gpu_id)
+ config['gpu']['use'] = True
+ device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
+ # model, dataset, out_dir
+ if args.model is not None:
+ MODEL_NAME = args.model
+ else:
+ MODEL_NAME = config['model']
+ if args.dataset is not None:
+ DATASET_NAME = args.dataset
+ else:
+ DATASET_NAME = config['dataset']
+ dataset = LoadData(DATASET_NAME)
+ if args.out_dir is not None:
+ out_dir = args.out_dir
+ else:
+ out_dir = config['out_dir']
+ # parameters
+ params = config['params']
+ if args.seed is not None:
+ params['seed'] = int(args.seed)
+ if args.epochs is not None:
+ params['epochs'] = int(args.epochs)
+ if args.batch_size is not None:
+ params['batch_size'] = int(args.batch_size)
+ if args.init_lr is not None:
+ params['init_lr'] = float(args.init_lr)
+ if args.lr_reduce_factor is not None:
+ params['lr_reduce_factor'] = float(args.lr_reduce_factor)
+ if args.lr_schedule_patience is not None:
+ params['lr_schedule_patience'] = int(args.lr_schedule_patience)
+ if args.min_lr is not None:
+ params['min_lr'] = float(args.min_lr)
+ if args.weight_decay is not None:
+ params['weight_decay'] = float(args.weight_decay)
+ if args.print_epoch_interval is not None:
+ params['print_epoch_interval'] = int(args.print_epoch_interval)
+ if args.max_time is not None:
+ params['max_time'] = float(args.max_time)
+ # network parameters
+ net_params = config['net_params']
+ net_params['device'] = device
+ net_params['gpu_id'] = config['gpu']['id']
+ net_params['batch_size'] = params['batch_size']
+ if args.L is not None:
+ net_params['L'] = int(args.L)
+ if args.hidden_dim is not None:
+ net_params['hidden_dim'] = int(args.hidden_dim)
+ if args.out_dim is not None:
+ net_params['out_dim'] = int(args.out_dim)
+ if args.residual is not None:
+ net_params['residual'] = True if args.residual=='True' else False
+ if args.edge_feat is not None:
+ net_params['edge_feat'] = True if args.edge_feat=='True' else False
+ if args.readout is not None:
+ net_params['readout'] = args.readout
+ if args.kernel is not None:
+ net_params['kernel'] = int(args.kernel)
+ if args.n_heads is not None:
+ net_params['n_heads'] = int(args.n_heads)
+ if args.gated is not None:
+ net_params['gated'] = True if args.gated=='True' else False
+ if args.in_feat_dropout is not None:
+ net_params['in_feat_dropout'] = float(args.in_feat_dropout)
+ if args.dropout is not None:
+ net_params['dropout'] = float(args.dropout)
+ if args.graph_norm is not None:
+ net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.batch_norm is not None:
+ net_params['batch_norm'] = True if args.batch_norm=='True' else False
+ if args.sage_aggregator is not None:
+ net_params['sage_aggregator'] = args.sage_aggregator
+ if args.data_mode is not None:
+ net_params['data_mode'] = args.data_mode
+ if args.num_pool is not None:
+ net_params['num_pool'] = int(args.num_pool)
+ if args.gnn_per_block is not None:
+ net_params['gnn_per_block'] = int(args.gnn_per_block)
+ if args.embedding_dim is not None:
+ net_params['embedding_dim'] = int(args.embedding_dim)
+ if args.pool_ratio is not None:
+ net_params['pool_ratio'] = float(args.pool_ratio)
+ if args.linkpred is not None:
+ net_params['linkpred'] = True if args.linkpred=='True' else False
+ if args.cat is not None:
+ net_params['cat'] = True if args.cat=='True' else False
+ if args.self_loop is not None:
+ net_params['self_loop'] = True if args.self_loop=='True' else False
+ if args.pos_enc_dim is not None:
+ net_params['pos_enc_dim'] = int(args.pos_enc_dim)
+
+
+
+
+ # CSL
+ net_params['num_node_type'] = dataset.all.num_node_type
+ net_params['num_edge_type'] = dataset.all.num_edge_type
+ num_classes = len(np.unique(dataset.all.graph_labels))
+ net_params['n_classes'] = num_classes
+
+ # RingGNN
+ if MODEL_NAME == 'RingGNN':
+ num_nodes_train = [dataset.train[0][i][0].number_of_nodes() for i in range(len(dataset.train))]
+ num_nodes_test = [dataset.test[0][i][0].number_of_nodes() for i in range(len(dataset.test))]
+ num_nodes = num_nodes_train + num_nodes_test
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
+
+ # RingGNN, 3WLGNN
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ if net_params['pos_enc']:
+ net_params['in_dim'] = net_params['pos_enc_dim']
+ else:
+ net_params['in_dim'] = 1
+
+ root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
+ dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
+
+ if not os.path.exists(out_dir + 'results'):
+ os.makedirs(out_dir + 'results')
+
+ if not os.path.exists(out_dir + 'configs'):
+ os.makedirs(out_dir + 'configs')
+
+ net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
+ train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs)
+
+
+
+
+
+
+
+
+
+main()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/main_SBMs_node_classification.ipynb b/main_SBMs_node_classification.ipynb
index 90a85d759..c1114bc9d 100644
--- a/main_SBMs_node_classification.ipynb
+++ b/main_SBMs_node_classification.ipynb
@@ -19,6 +19,8 @@
"- MLP\n",
"- GIN\n",
"- MoNet\n",
+ "- RingGNN \n",
+ "- 3WLGNN \n",
"\n",
"### DATASET\n",
"- SBM_CLUSTER\n",
@@ -108,8 +110,7 @@
"\"\"\"\n",
"\n",
"from nets.SBMs_node_classification.load_net import gnn_model # import GNNs\n",
- "from data.data import LoadData # import dataset\n",
- "from train.train_SBMs_node_classification import train_epoch, evaluate_network # import train functions\n"
+ "from data.data import LoadData # import dataset\n"
]
},
{
@@ -148,10 +149,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[I] Loading dataset SBM_CLUSTER...\n",
- "train, test, val sizes : 10000 1000 1000\n",
+ "[I] Loading dataset SBM_PATTERN...\n",
+ "train, test, val sizes : 10000 2000 2000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 15.6901s\n"
+ "[I] Data load time: 18.0153s\n"
]
}
],
@@ -161,6 +162,8 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
+ " #MODEL_NAME = '3WLGNN'\n",
+ " #MODEL_NAME = 'RingGNN'\n",
" MODEL_NAME = 'GatedGCN'\n",
" #MODEL_NAME = 'GCN'\n",
" #MODEL_NAME = 'GAT'\n",
@@ -170,8 +173,8 @@
" #MODEL_NAME = 'MoNet'\n",
" \n",
"\n",
- " DATASET_NAME = 'SBM_CLUSTER'\n",
- " #DATASET_NAME = 'SBM_PATTERN'\n",
+ " #DATASET_NAME = 'SBM_CLUSTER'\n",
+ " DATASET_NAME = 'SBM_PATTERN'\n",
"\n",
" out_dir = 'out/SBMs_node_classification/'\n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -187,8 +190,9 @@
"metadata": {},
"outputs": [],
"source": [
+ "#MODEL_NAME = 'RingGNN'\n",
"MODEL_NAME = 'GatedGCN'\n",
- "MODEL_NAME = 'GCN'\n",
+ "#MODEL_NAME = 'GCN'\n",
"#MODEL_NAME = 'GAT'\n",
"#MODEL_NAME = 'GraphSage'\n",
"#MODEL_NAME = 'MLP'\n",
@@ -219,7 +223,10 @@
" gated = False\n",
" self_loop = False\n",
" #self_loop = True\n",
- " max_time = 48\n",
+ " max_time = 12\n",
+ " pos_enc = True\n",
+ " #pos_enc = False\n",
+ " pos_enc_dim = 10\n",
" \n",
"\n",
" if MODEL_NAME == 'GatedGCN':\n",
@@ -260,6 +267,15 @@
" seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
" L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
" pseudo_dim_MoNet=2; kernel=3;\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=4; hidden_dim=25; out_dim=hidden_dim; dropout=0.0\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " L=3; hidden_dim=82; out_dim=hidden_dim; dropout=0.0\n",
" \n",
"\n",
" \n",
@@ -273,7 +289,7 @@
" net_params['n_classes'] = num_classes\n",
" net_params['L'] = L # min L should be 2\n",
" net_params['readout'] = \"mean\"\n",
- " net_params['graph_norm'] = True\n",
+ " net_params['layer_norm'] = True\n",
" net_params['batch_norm'] = True\n",
" net_params['in_feat_dropout'] = 0.0\n",
" net_params['dropout'] = 0.0\n",
@@ -299,13 +315,18 @@
" net_params['pseudo_dim_MoNet'] = pseudo_dim_MoNet\n",
" net_params['kernel'] = kernel\n",
" \n",
- " # setting seeds\n",
- " random.seed(seed)\n",
- " np.random.seed(seed)\n",
- " torch.manual_seed(seed)\n",
- " if device == 'cuda':\n",
- " torch.cuda.manual_seed(seed)\n",
- " "
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n",
+ " \n",
+ " # specific for pos_enc_dim\n",
+ " net_params['pos_enc'] = pos_enc\n",
+ " net_params['pos_enc_dim'] = pos_enc_dim\n",
+ " "
]
},
{
@@ -319,7 +340,7 @@
"text": [
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GCN 101655\n"
+ "MODEL/Total parameters: GatedGCN 104773\n"
]
}
],
@@ -352,7 +373,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@@ -372,6 +393,12 @@
" print(\"[!] Adding graph self-loops for GCN/GAT models (central node trick).\")\n",
" dataset._add_self_loops()\n",
" \n",
+ " if MODEL_NAME in ['GatedGCN']:\n",
+ " if net_params['pos_enc']:\n",
+ " print(\"[!] Adding graph positional encoding.\")\n",
+ " dataset._add_positional_encodings(net_params['pos_enc_dim'])\n",
+ " print('Time PE:',time.time()-start0)\n",
+ " \n",
" trainset, valset, testset = dataset.train, dataset.val, dataset.test\n",
" \n",
" root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs\n",
@@ -389,7 +416,7 @@
" random.seed(params['seed'])\n",
" np.random.seed(params['seed'])\n",
" torch.manual_seed(params['seed'])\n",
- " if device == 'cuda':\n",
+ " if device.type == 'cuda':\n",
" torch.cuda.manual_seed(params['seed'])\n",
" \n",
" print(\"Training Graphs: \", len(trainset))\n",
@@ -409,9 +436,21 @@
" epoch_train_losses, epoch_val_losses = [], []\n",
" epoch_train_accs, epoch_val_accs = [], [] \n",
" \n",
- " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)\n",
- " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
- " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WL-GNNs\n",
+ " from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
+ " \n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
+ " \n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network # import train functions\n",
+ " \n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
" \n",
" # At any point you can hit Ctrl + C to break out of training early.\n",
" try:\n",
@@ -422,10 +461,14 @@
"\n",
" start = time.time()\n",
"\n",
- " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
" epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n",
- " epoch_test_loss, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)\n",
- "\n",
+ " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" epoch_train_losses.append(epoch_train_loss)\n",
" epoch_val_losses.append(epoch_val_loss)\n",
" epoch_train_accs.append(epoch_train_acc)\n",
@@ -435,9 +478,9 @@
" writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
" writer.add_scalar('train/_acc', epoch_train_acc, epoch)\n",
" writer.add_scalar('val/_acc', epoch_val_acc, epoch)\n",
+ " writer.add_scalar('test/_acc', epoch_test_acc, epoch)\n",
" writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n",
"\n",
- " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) \n",
" t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
" train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n",
" train_acc=epoch_train_acc, val_acc=epoch_val_acc,\n",
@@ -479,6 +522,7 @@
" _, train_acc = evaluate_network(model, device, train_loader, epoch)\n",
" print(\"Test Accuracy: {:.4f}\".format(test_acc))\n",
" print(\"Train Accuracy: {:.4f}\".format(train_acc))\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
" print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-start0))\n",
" print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
"\n",
@@ -490,29 +534,16 @@
" with open(write_file_name + '.txt', 'w') as f:\n",
" f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
" FINAL RESULTS\\nTEST ACCURACY: {:.4f}\\nTRAIN ACCURACY: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
+ " Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
" .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time)))\n",
+ " test_acc, train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))\n",
"\n",
- " \n",
- " # send results to gmail\n",
- " try:\n",
- " from gmail import send\n",
- " subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)\n",
- " body = \"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
- " FINAL RESULTS\\nTEST ACCURACY: {:.4f}\\nTRAIN ACCURACY: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
- " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time))\n",
- " send(subject, body)\n",
- " except:\n",
- " pass\n",
" "
]
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -522,46 +553,23 @@
"Convert main_SBMs_node_classification.ipynb to main_SBMs_node_classification.py\n",
"Clean main_SBMs_node_classification.py\n",
"Done. \n",
- "[I] Loading dataset SBM_CLUSTER...\n",
- "train, test, val sizes : 10000 1000 1000\n",
+ "[I] Loading dataset SBM_PATTERN...\n",
+ "train, test, val sizes : 10000 2000 2000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 19.9212s\n",
- "cuda not available\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/1000 [00:00, ?it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
+ "[I] Data load time: 24.4756s\n",
+ "cuda not available\n",
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GCN 101655\n",
- "Training Graphs: 10000\n",
- "Validation Graphs: 1000\n",
- "Test Graphs: 1000\n",
- "Number of Classes: 6\n"
+ "MODEL/Total parameters: GatedGCN 104773\n",
+ "[!] Adding graph positional encoding.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "Epoch 0: 0%| | 0/1000 [00:19, ?it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "-----------------------------------------------------------------------------------------\n",
- "Exiting from training early because of KeyboardInterrupt\n"
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/base.py:25: UserWarning: Currently adjacency_matrix() returns a matrix with destination as rows by default. In 0.5 the result will have source as rows (i.e. transpose=True)\n",
+ " warnings.warn(msg, warn_type)\n"
]
},
{
@@ -571,14 +579,14 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 226\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_SBMs_node_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 227\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 228\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 229\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 188\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'total_param'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mview_model_param\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 189\u001b[0;31m \u001b[0mtrain_val_pipeline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 190\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_val_pipeline\u001b[0;34m(MODEL_NAME, dataset, params, net_params, dirs)\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_acc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 121\u001b[0;31m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_acc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 122\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Test Accuracy: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_acc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Train Accuracy: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_acc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/train/train_SBMs_node_classification.py\u001b[0m in \u001b[0;36mevaluate_network\u001b[0;34m(model, device, data_loader, epoch)\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0mbatch_labels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_labels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 52\u001b[0;31m \u001b[0mbatch_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_graphs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_e\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_e\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 53\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_labels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0mepoch_test_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/nets/SBMs_node_classification/gcn_net.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, e, snorm_n, snorm_e)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;31m# GCN\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mconv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;31m# output\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 493\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 494\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gcn_layer.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, feature, snorm_n)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatchnorm_h\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# batch normalization\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mactivation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresidual\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/functional.py\u001b[0m in \u001b[0;36mrelu\u001b[0;34m(input, inplace)\u001b[0m\n\u001b[1;32m 941\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 942\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 943\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 944\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 945\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_SBMs_node_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 238\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 239\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'total_param'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mview_model_param\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mtrain_val_pipeline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_val_pipeline\u001b[0;34m(MODEL_NAME, dataset, params, net_params, dirs)\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'pos_enc'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"[!] Adding graph positional encoding.\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0mdataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_add_positional_encodings\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'pos_enc_dim'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Time PE:'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mstart0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/SBMs.py\u001b[0m in \u001b[0;36m_add_positional_encodings\u001b[0;34m(self, pos_enc_dim)\u001b[0m\n\u001b[1;32m 242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;31m# Graph positional encoding v/ Laplacian eigenvectors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 244\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 245\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/SBMs.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;31m# Graph positional encoding v/ Laplacian eigenvectors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 244\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 245\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mpositional_encoding\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_lists\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/SBMs.py\u001b[0m in \u001b[0;36mpositional_encoding\u001b[0;34m(g, pos_enc_dim)\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;31m# Eigenvectors with scipy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;31m#EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 141\u001b[0;31m \u001b[0mEigVal\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEigVec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinalg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meigs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mL\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwhich\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'SR'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtol\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1e-2\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# for 40 PEs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 142\u001b[0m \u001b[0mEigVec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mEigVec\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEigVal\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margsort\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# increasing order\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'pos_enc'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mEigVec\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mpos_enc_dim\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py\u001b[0m in \u001b[0;36meigs\u001b[0;34m(A, k, M, sigma, which, v0, ncv, maxiter, tol, return_eigenvectors, Minv, OPinv, OPpart)\u001b[0m\n\u001b[1;32m 1345\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0m_ARPACK_LOCK\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverged\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1347\u001b[0;31m \u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miterate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1348\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1349\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextract\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreturn_eigenvectors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/scipy/sparse/linalg/eigen/arpack/arpack.py\u001b[0m in \u001b[0;36miterate\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 725\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miparam\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 726\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mipntr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mworkd\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mworkl\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 727\u001b[0;31m self.info)\n\u001b[0m\u001b[1;32m 728\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 729\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mido\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtol\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miparam\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mipntr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m \u001b[0;34m=\u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
@@ -619,7 +627,7 @@
" parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
" parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
" parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
- " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
" parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
" parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
" parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
@@ -631,6 +639,8 @@
" parser.add_argument('--cat', help=\"Please give a value for cat\")\n",
" parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n",
" parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n",
+ " parser.add_argument('--pos_enc_dim', help=\"Please give a value for pos_enc_dim\")\n",
+ " parser.add_argument('--pos_enc', help=\"Please give a value for pos_enc\")\n",
" args = parser.parse_args()\n",
" with open(args.config) as f:\n",
" config = json.load(f)\n",
@@ -707,8 +717,8 @@
" net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
" if args.dropout is not None:\n",
" net_params['dropout'] = float(args.dropout)\n",
- " if args.graph_norm is not None:\n",
- " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
" if args.batch_norm is not None:\n",
" net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
" if args.sage_aggregator is not None:\n",
@@ -729,6 +739,10 @@
" net_params['cat'] = True if args.cat=='True' else False\n",
" if args.self_loop is not None:\n",
" net_params['self_loop'] = True if args.self_loop=='True' else False\n",
+ " if args.pos_enc is not None:\n",
+ " net_params['pos_enc'] = True if args.pos_enc=='True' else False\n",
+ " if args.pos_enc_dim is not None:\n",
+ " net_params['pos_enc_dim'] = int(args.pos_enc_dim)\n",
"\n",
" \n",
" # notebook mode\n",
@@ -758,6 +772,10 @@
" # SBM\n",
" net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)\n",
" net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
"\n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -819,6 +837,34 @@
" "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/main_SBMs_node_classification.py b/main_SBMs_node_classification.py
index 85b795100..68b225a05 100644
--- a/main_SBMs_node_classification.py
+++ b/main_SBMs_node_classification.py
@@ -43,7 +43,6 @@ def __init__(self, **kwds):
from nets.SBMs_node_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
-from train.train_SBMs_node_classification import train_epoch, evaluate_network # import train functions
@@ -103,6 +102,12 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
+ if MODEL_NAME in ['GatedGCN']:
+ if net_params['pos_enc']:
+ print("[!] Adding graph positional encoding.")
+ dataset._add_positional_encodings(net_params['pos_enc_dim'])
+ print('Time PE:',time.time()-start0)
+
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
@@ -119,7 +124,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
@@ -139,9 +144,21 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
- train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
- val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
- test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WL-GNNs
+ from train.train_SBMs_node_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+
+ else:
+ # import train functions for all other GCNs
+ from train.train_SBMs_node_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
@@ -152,10 +169,14 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start = time.time()
- epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
- epoch_test_loss, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
-
+ _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
+
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
@@ -165,9 +186,9 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
+ writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
- _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
@@ -209,6 +230,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-start0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
@@ -220,24 +242,11 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
+ Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time)))
+ test_acc, train_acc, epoch, (time.time()-start0)/3600, np.mean(per_epoch_time)))
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- test_acc, train_acc, (time.time()-start0)/3600, np.mean(per_epoch_time))
- send(subject, body)
- except:
- pass
-
@@ -274,7 +283,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -286,6 +295,8 @@ def main():
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
+ parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
+ parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
@@ -358,8 +369,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -380,10 +391,18 @@ def main():
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
+ if args.pos_enc is not None:
+ net_params['pos_enc'] = True if args.pos_enc=='True' else False
+ if args.pos_enc_dim is not None:
+ net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# SBM
net_params['in_dim'] = torch.unique(dataset.train[0][0].ndata['feat'],dim=0).size(0) # node_dim (feat is an integer)
net_params['n_classes'] = torch.unique(dataset.train[0][1],dim=0).size(0)
+
+ if MODEL_NAME == 'RingGNN':
+ num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
@@ -413,3 +432,23 @@ def main():
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/main_TSP_edge_classification.ipynb b/main_TSP_edge_classification.ipynb
index ef5c49347..9af9d42f4 100644
--- a/main_TSP_edge_classification.ipynb
+++ b/main_TSP_edge_classification.ipynb
@@ -18,7 +18,9 @@
"- GraphSage \n",
"- GIN \n",
"- MoNet \n",
- "- MLP\n",
+ "- MLP \n",
+ "- RingGNN \n",
+ "- 3WLGNN \n",
"\n",
"### DATASET\n",
"- TSP\n",
@@ -108,8 +110,7 @@
" IMPORTING CUSTOM MODULES/METHODS\n",
"\"\"\"\n",
"from nets.TSP_edge_classification.load_net import gnn_model # import all GNNS\n",
- "from data.data import LoadData # import dataset\n",
- "from train.train_TSP_edge_classification import train_epoch, evaluate_network # import train functions\n"
+ "from data.data import LoadData # import dataset\n"
]
},
{
@@ -152,7 +153,7 @@
"[I] Loading dataset TSP...\n",
"train, test, val sizes : 10000 1000 1000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 20.0447s\n",
+ "[I] Data load time: 18.5780s\n",
"[I] Finished loading.\n"
]
}
@@ -163,9 +164,12 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
- " MODEL_NAME = 'MLP'\n",
- " # MODEL_NAME = 'GCN'\n",
- " MODEL_NAME = 'GatedGCN'\n",
+ " # MODEL_NAME = '3WLGNN'\n",
+ " # MODEL_NAME = 'RingGNN'\n",
+ " # MODEL_NAME = 'MLP'\n",
+ " # MODEL_NAME = 'MLP'\n",
+ " MODEL_NAME = 'GAT'\n",
+ " # MODEL_NAME = 'GatedGCN'\n",
" # MODEL_NAME = 'GAT'\n",
" # MODEL_NAME = 'GraphSage'\n",
" # MODEL_NAME = 'DiffPool'\n",
@@ -173,7 +177,7 @@
"\n",
" DATASET_NAME = 'TSP'\n",
"\n",
- " out_dir = 'out/TSP_edge_classification/'\n",
+ " out_dir = 'out/TSP_edge_classification/debug/'\n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
"\n",
@@ -190,9 +194,9 @@
"metadata": {},
"outputs": [],
"source": [
- "MODEL_NAME = 'GatedGCN'\n",
- "MODEL_NAME = 'GCN'\n",
- "MODEL_NAME = 'GAT'\n",
+ "#MODEL_NAME = 'GatedGCN'\n",
+ "#MODEL_NAME = 'GCN'\n",
+ "#MODEL_NAME = 'GAT'\n",
"#MODEL_NAME = 'GraphSage'\n",
"#MODEL_NAME = 'MLP'\n",
"#MODEL_NAME = 'GIN'\n",
@@ -210,7 +214,7 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
- " #MODEL_NAME = 'GCN'\n",
+ " MODEL_NAME = 'GatedGCN'\n",
" \n",
" n_heads = -1\n",
" edge_feat = False\n",
@@ -222,36 +226,47 @@
" n_mlp_GIN = -1\n",
" gated = False\n",
" self_loop = False\n",
- " max_time = 48\n",
+ " max_time = 12\n",
+ " layer_type = 'dgl'\n",
"\n",
" \n",
" if MODEL_NAME == 'MLP':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=3; hidden_dim=144; out_dim=hidden_dim; dropout=0.0; readout='mean'; gated = False # Change gated = True for Gated MLP model\n",
+ " L=4; hidden_dim=132; out_dim=hidden_dim; dropout=0.0; readout='mean'; gated = False # Change gated = True for Gated MLP model\n",
" \n",
" if MODEL_NAME == 'GCN':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; hidden_dim=128; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " L=4; hidden_dim=120; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
" \n",
" if MODEL_NAME == 'GraphSage':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; hidden_dim=96; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " L=4; hidden_dim=82; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
"\n",
" if MODEL_NAME == 'GAT':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; n_heads=8; hidden_dim=16; out_dim=128; dropout=0.0; readout='mean';\n",
+ " L=4; n_heads=8; hidden_dim=15; out_dim=n_heads*hidden_dim; dropout=0.0; readout='mean'; layer_type='isotropic'\n",
" \n",
" if MODEL_NAME == 'GIN':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; hidden_dim=112; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " L=4; hidden_dim=73; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
" \n",
" if MODEL_NAME == 'MoNet':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; hidden_dim=80; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
+ " L=4; hidden_dim=82; out_dim=hidden_dim; dropout=0.0; readout='mean';\n",
" \n",
" if MODEL_NAME == 'GatedGCN':\n",
" seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0\n",
- " L=4; hidden_dim=64; out_dim=hidden_dim; dropout=0.0; readout='mean'; edge_feat = True\n",
+ " L=4; hidden_dim=96; out_dim=hidden_dim; dropout=0.0; readout='mean'; edge_feat = True; layer_type='isotropic'\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=4; hidden_dim=24; out_dim=hidden_dim; dropout=0.0;\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=3; hidden_dim=82; out_dim=hidden_dim; dropout=0.0; \n",
"\n",
" # generic new_params\n",
" net_params = {}\n",
@@ -266,12 +281,13 @@
" net_params['n_heads'] = n_heads\n",
" net_params['L'] = L # min L should be 2\n",
" net_params['readout'] = \"mean\"\n",
- " net_params['graph_norm'] = True\n",
+ " net_params['layer_norm'] = True\n",
" net_params['batch_norm'] = True\n",
" net_params['in_feat_dropout'] = 0.0\n",
" net_params['dropout'] = 0.0\n",
" net_params['edge_feat'] = edge_feat\n",
" net_params['self_loop'] = self_loop\n",
+ " net_params['layer_type'] = layer_type\n",
" \n",
" # for MLPNet \n",
" net_params['gated'] = gated\n",
@@ -286,15 +302,15 @@
" net_params['neighbor_aggr_GIN'] = 'sum'\n",
" \n",
" # specific for graphsage\n",
- " net_params['sage_aggregator'] = 'meanpool' \n",
- "\n",
- " # setting seeds\n",
- " random.seed(seed)\n",
- " np.random.seed(seed)\n",
- " torch.manual_seed(seed)\n",
- " if device == 'cuda':\n",
- " torch.cuda.manual_seed(seed)\n",
- " "
+ " net_params['sage_aggregator'] = 'maxpool' \n",
+ " \n",
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " num_nodes = [trainset[i][0].number_of_nodes() for i in range(len(trainset))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n"
]
},
{
@@ -308,7 +324,24 @@
"text": [
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GAT 109250\n"
+ "GatedGCNNet(\n",
+ " (embedding_h): Linear(in_features=2, out_features=96, bias=True)\n",
+ " (embedding_e): Linear(in_features=1, out_features=96, bias=True)\n",
+ " (layers): ModuleList(\n",
+ " (0): GatedGCNLayerIsotropic(in_channels=96, out_channels=96)\n",
+ " (1): GatedGCNLayerIsotropic(in_channels=96, out_channels=96)\n",
+ " (2): GatedGCNLayerIsotropic(in_channels=96, out_channels=96)\n",
+ " (3): GatedGCNLayerIsotropic(in_channels=96, out_channels=96)\n",
+ " )\n",
+ " (MLP_layer): MLPReadout(\n",
+ " (FC_layers): ModuleList(\n",
+ " (0): Linear(in_features=192, out_features=96, bias=True)\n",
+ " (1): Linear(in_features=96, out_features=48, bias=True)\n",
+ " (2): Linear(in_features=48, out_features=2, bias=True)\n",
+ " )\n",
+ " )\n",
+ ")\n",
+ "MODEL/Total parameters: GatedGCN 99026\n"
]
}
],
@@ -320,7 +353,7 @@
" model = gnn_model(MODEL_NAME, net_params)\n",
" total_param = 0\n",
" print(\"MODEL DETAILS:\\n\")\n",
- " #print(model)\n",
+ " print(model)\n",
" for param in model.parameters():\n",
" # print(param.data.size())\n",
" total_param += np.prod(list(param.data.size()))\n",
@@ -335,14 +368,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": 9,
+ "execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@@ -375,7 +401,7 @@
" random.seed(params['seed'])\n",
" np.random.seed(params['seed'])\n",
" torch.manual_seed(params['seed'])\n",
- " if device == 'cuda':\n",
+ " if device.type == 'cuda':\n",
" torch.cuda.manual_seed(params['seed'])\n",
" \n",
" print(\"Training Graphs: \", len(trainset))\n",
@@ -395,10 +421,25 @@
" epoch_train_losses, epoch_val_losses = [], []\n",
" epoch_train_f1s, epoch_val_f1s = [], [] \n",
" \n",
- " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)\n",
- " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
- " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WL-GNNs\n",
+ " from train.train_TSP_edge_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
+ " from functools import partial # util function to pass edge_feat to collate function\n",
+ " \n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
+ "\n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_TSP_edge_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
"\n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)\n",
+ " \n",
+ " \n",
" # At any point you can hit Ctrl + C to break out of training early.\n",
" try:\n",
" with tqdm(range(params['epochs'])) as t:\n",
@@ -407,10 +448,15 @@
" t.set_description('Epoch %d' % epoch) \n",
"\n",
" start = time.time()\n",
- "\n",
- " epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs\n",
+ " epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
" epoch_val_loss, epoch_val_f1 = evaluate_network(model, device, val_loader, epoch)\n",
- "\n",
+ " _, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" epoch_train_losses.append(epoch_train_loss)\n",
" epoch_val_losses.append(epoch_val_loss)\n",
" epoch_train_f1s.append(epoch_train_f1)\n",
@@ -420,9 +466,10 @@
" writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
" writer.add_scalar('train/_f1', epoch_train_f1, epoch)\n",
" writer.add_scalar('val/_f1', epoch_val_f1, epoch)\n",
+ " writer.add_scalar('test/_f1', epoch_test_f1, epoch)\n",
" writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch) \n",
"\n",
- " _, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
" train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n",
" train_f1=epoch_train_f1, val_f1=epoch_val_f1,\n",
@@ -463,6 +510,7 @@
" _, train_f1 = evaluate_network(model, device, train_loader, epoch)\n",
" print(\"Test F1: {:.4f}\".format(test_f1))\n",
" print(\"Train F1: {:.4f}\".format(train_f1))\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
" print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-t0))\n",
" print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
"\n",
@@ -474,29 +522,15 @@
" with open(write_file_name + '.txt', 'w') as f:\n",
" f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
" FINAL RESULTS\\nTEST F1: {:.4f}\\nTRAIN F1: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f}hrs\\nAverage Time Per Epoch: {:.4f}s\\n\\n\\n\"\"\"\\\n",
+ " Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f}hrs\\nAverage Time Per Epoch: {:.4f}s\\n\\n\\n\"\"\"\\\n",
" .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
- " \n",
- "\n",
- " # send results to gmail\n",
- " try:\n",
- " from gmail import send\n",
- " subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)\n",
- " body = \"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
- " FINAL RESULTS\\nTEST F1: {:.4f}\\nTRAIN F1: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f}hrs\\nAverage Time Per Epoch: {:.4f}s\\n\\n\\n\"\"\"\\\n",
- " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time))\n",
- " send(subject, body)\n",
- " except:\n",
- " pass\n",
+ " np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
" "
]
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 11,
"metadata": {},
"outputs": [
{
@@ -506,46 +540,7 @@
"Convert main_TSP_edge_classification.ipynb to main_TSP_edge_classification.py\n",
"Clean main_TSP_edge_classification.py\n",
"Done. \n",
- "[I] Loading dataset TSP...\n",
- "train, test, val sizes : 10000 1000 1000\n",
- "[I] Finished loading.\n",
- "[I] Data load time: 24.1761s\n",
- "cuda not available\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/500 [00:00, ?it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "MODEL DETAILS:\n",
- "\n",
- "MODEL/Total parameters: GAT 109250\n",
- "Training Graphs: 10000\n",
- "Validation Graphs: 1000\n",
- "Test Graphs: 1000\n",
- "Number of Classes: 2\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/500 [01:00, ?it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "-----------------------------------------------------------------------------------------\n",
- "Exiting from training early because of KeyboardInterrupt\n"
+ "[I] Loading dataset TSP...\n"
]
},
{
@@ -555,22 +550,15 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 229\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_TSP_edge_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 231\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 232\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 189\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 190\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'total_param'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mview_model_param\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 191\u001b[0;31m \u001b[0mtrain_val_pipeline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 192\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_val_pipeline\u001b[0;34m(MODEL_NAME, dataset, params, net_params, dirs)\u001b[0m\n\u001b[1;32m 113\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_f1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 115\u001b[0;31m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_f1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 116\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Test F1: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_f1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Train F1: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_f1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/train/train_TSP_edge_classification.py\u001b[0m in \u001b[0;36mevaluate_network\u001b[0;34m(model, device, data_loader, epoch)\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 54\u001b[0;31m \u001b[0mbatch_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_graphs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_e\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_e\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 55\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_labels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0mepoch_test_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/nets/TSP_edge_classification/gat_net.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, e, snorm_n, snorm_e)\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0min_feat_dropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mconv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 47\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 48\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'h'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 493\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 494\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, snorm_n)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mh_in\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m \u001b[0;31m# for residual connection\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mhead_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mattn_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mattn_head\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mheads\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmerge\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'cat'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mh_in\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m \u001b[0;31m# for residual connection\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mhead_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mattn_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mattn_head\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mheads\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmerge\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'cat'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 493\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 494\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, snorm_n)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'z'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mz\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_edges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medge_attention\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 40\u001b[0;31m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate_all\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage_func\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 41\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'h'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_norm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/graph.py\u001b[0m in \u001b[0;36mupdate_all\u001b[0;34m(self, message_func, reduce_func, apply_node_func)\u001b[0m\n\u001b[1;32m 2745\u001b[0m \u001b[0mreduce_func\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreduce_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2746\u001b[0m apply_func=apply_node_func)\n\u001b[0;32m-> 2747\u001b[0;31m \u001b[0mRuntime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprog\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2748\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2749\u001b[0m def prop_nodes(self,\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/runtime.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(prog)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mexe\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mprog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# prog.pprint_exe(exe)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mexe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/ir/executor.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[0mmail_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfdmail\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 132\u001b[0;31m \u001b[0mudf_ret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFrameRef\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mudf_ret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/degree_bucketing.py\u001b[0m in \u001b[0;36m_rfunc_wrapper\u001b[0;34m(node_data, mail_data)\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[0mreshaped_mail_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLazyDict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_reshaped_getter\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mnbatch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNodeBatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvbkt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnode_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreshaped_mail_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 153\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mreduce_udf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 154\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_rfunc_wrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mreduce_func\u001b[0;34m(self, nodes)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0malpha\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msoftmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnodes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmailbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'e'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0malpha\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0malpha\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 33\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0malpha\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mnodes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmailbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'z'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 34\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'h'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/udf.py\u001b[0m in \u001b[0;36mmailbox\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 122\u001b[0;31m \u001b[0;34m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 123\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmailbox\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \"\"\"Return the received messages.\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/TSP.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_dir\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'.pkl'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"rb\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 125\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 126\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/graph_index.py\u001b[0m in \u001b[0;36m__setstate__\u001b[0;34m(self, state)\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_nodes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 69\u001b[0;31m readonly)\n\u001b[0m\u001b[1;32m 70\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: ",
+ "\nDuring handling of the above exception, another exception occurred:\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_TSP_edge_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 238\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 239\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0;31m# dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'dataset'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 159\u001b[0;31m \u001b[0mdataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLoadData\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[0;31m# device\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/data.py\u001b[0m in \u001b[0;36mLoadData\u001b[0;34m(DATASET_NAME)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m# handling for TSP dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'TSP'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 39\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mTSPDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;31m# handling for COLLAB dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/TSP.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0mdata_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'data/TSP/'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_dir\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'.pkl'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"rb\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 125\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 126\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 127\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
@@ -611,7 +599,7 @@
" parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
" parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
" parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
- " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
" parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
" parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
" parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
@@ -623,6 +611,7 @@
" parser.add_argument('--cat', help=\"Please give a value for cat\")\n",
" parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n",
" parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n",
+ " parser.add_argument('--layer_type', help=\"Please give a value for layer_type (for GAT and GatedGCN only)\")\n",
" args = parser.parse_args()\n",
" with open(args.config) as f:\n",
" config = json.load(f)\n",
@@ -699,8 +688,8 @@
" net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
" if args.dropout is not None:\n",
" net_params['dropout'] = float(args.dropout)\n",
- " if args.graph_norm is not None:\n",
- " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
" if args.batch_norm is not None:\n",
" net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
" if args.sage_aggregator is not None:\n",
@@ -721,6 +710,8 @@
" net_params['cat'] = True if args.cat=='True' else False\n",
" if args.self_loop is not None:\n",
" net_params['self_loop'] = True if args.self_loop=='True' else False\n",
+ " if args.layer_type is not None:\n",
+ " net_params['layer_type'] = layer_type\n",
" \n",
"\n",
" # notebook mode\n",
@@ -753,6 +744,10 @@
" num_classes = len(np.unique(np.concatenate(dataset.train[:][1])))\n",
" net_params['n_classes'] = num_classes\n",
" \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" write_file_name = out_dir + 'results/result_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -814,6 +809,34 @@
" "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/main_TSP_edge_classification.py b/main_TSP_edge_classification.py
index 36c883f6d..393ea8d58 100644
--- a/main_TSP_edge_classification.py
+++ b/main_TSP_edge_classification.py
@@ -43,7 +43,6 @@ def __init__(self, **kwds):
"""
from nets.TSP_edge_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
-from train.train_TSP_edge_classification import train_epoch, evaluate_network # import train functions
@@ -68,10 +67,6 @@ def gpu_setup(use_gpu, gpu_id):
-
-
-
-
"""
VIEWING MODEL CONFIG AND PARAMS
"""
@@ -79,7 +74,7 @@ def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
- #print(model)
+ print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
@@ -115,7 +110,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
@@ -135,10 +130,25 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
epoch_train_losses, epoch_val_losses = [], []
epoch_train_f1s, epoch_val_f1s = [], []
- train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
- val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
- test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WL-GNNs
+ from train.train_TSP_edge_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+ from functools import partial # util function to pass edge_feat to collate function
+
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+
+ else:
+ # import train functions for all other GCNs
+ from train.train_TSP_edge_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
+
+
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
@@ -147,10 +157,15 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t.set_description('Epoch %d' % epoch)
start = time.time()
-
- epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
+ epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
epoch_val_loss, epoch_val_f1 = evaluate_network(model, device, val_loader, epoch)
-
+ _, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch)
+
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_f1s.append(epoch_train_f1)
@@ -160,9 +175,10 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_f1', epoch_train_f1, epoch)
writer.add_scalar('val/_f1', epoch_val_f1, epoch)
+ writer.add_scalar('test/_f1', epoch_test_f1, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
- _, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch)
+
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_f1=epoch_train_f1, val_f1=epoch_val_f1,
@@ -203,6 +219,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
_, train_f1 = evaluate_network(model, device, train_loader, epoch)
print("Test F1: {:.4f}".format(test_f1))
print("Train F1: {:.4f}".format(train_f1))
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
@@ -214,23 +231,9 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST F1: {:.4f}\nTRAIN F1: {:.4f}\n\n
- Total Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time)))
-
-
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST F1: {:.4f}\nTRAIN F1: {:.4f}\n\n
- Total Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
+ Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time))
- send(subject, body)
- except:
- pass
+ np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
@@ -268,7 +271,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -280,6 +283,7 @@ def main():
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
+ parser.add_argument('--layer_type', help="Please give a value for layer_type (for GAT and GatedGCN only)")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
@@ -352,8 +356,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -374,6 +378,8 @@ def main():
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
+ if args.layer_type is not None:
+ net_params['layer_type'] = layer_type
@@ -384,6 +390,10 @@ def main():
num_classes = len(np.unique(np.concatenate(dataset.train[:][1])))
net_params['n_classes'] = num_classes
+ if MODEL_NAME == 'RingGNN':
+ num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
+
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
@@ -413,3 +423,23 @@ def main():
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/main_TUs_graph_classification.ipynb b/main_TUs_graph_classification.ipynb
index cc428c685..b3c6b683c 100644
--- a/main_TUs_graph_classification.ipynb
+++ b/main_TUs_graph_classification.ipynb
@@ -18,8 +18,9 @@
"- GraphSage \n",
"- GIN \n",
"- MoNet \n",
- "- DiffPool\n",
- "- MLP\n",
+ "- MLP \n",
+ "- RingGNN \n",
+ "- 3WLGNN \n",
"\n",
"### DATASET\n",
"- DD \n",
@@ -110,8 +111,7 @@
"\"\"\"\n",
"\n",
"from nets.TUs_graph_classification.load_net import gnn_model # import GNNs\n",
- "from data.data import LoadData # import dataset\n",
- "from train.train_TUs_graph_classification import train_epoch, evaluate_network # import train functions\n"
+ "from data.data import LoadData # import dataset\n"
]
},
{
@@ -151,9 +151,8 @@
"output_type": "stream",
"text": [
"[I] Loading data (notebook) ...\n",
- "No Node Attribute Data\n",
- "[!] Dataset: DD\n",
- "Time taken: 10.7704s\n",
+ "[!] Dataset: PROTEINS_full\n",
+ "Time taken: 2.6439s\n",
"[I] Finished loading.\n"
]
}
@@ -164,16 +163,18 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
+ " #MODEL_NAME = '3WLGNN'\n",
+ " #MODEL_NAME = 'RingGNN'\n",
" MODEL_NAME = 'GatedGCN'\n",
" #MODEL_NAME = 'MoNet'\n",
" #MODEL_NAME = 'GCN'\n",
" #MODEL_NAME = 'GAT'\n",
" #MODEL_NAME = 'GraphSage'\n",
- " #MODEL_NAME = 'DiffPool'\n",
+ " #MODEL_NAME = 'GIN'\n",
" #MODEL_NAME = 'MLP'\n",
"\n",
" DATASET_NAME = 'PROTEINS_full'\n",
- " DATASET_NAME = 'DD'\n",
+ " #DATASET_NAME = 'DD'\n",
"\n",
" out_dir = 'out/TUs_graph_classification/'\n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -191,7 +192,8 @@
"metadata": {},
"outputs": [],
"source": [
- "MODEL_NAME = 'GatedGCN'\n",
+ "#MODEL_NAME = 'RingGNN'\n",
+ "#MODEL_NAME = 'GatedGCN'\n",
"#MODEL_NAME = 'GCN'\n",
"#MODEL_NAME = 'GAT'\n",
"#MODEL_NAME = 'GraphSage'\n",
@@ -222,7 +224,7 @@
" gated = False\n",
" self_loop = False\n",
" #self_loop = True\n",
- " max_time = 48\n",
+ " max_time = 12\n",
" \n",
"\n",
" if MODEL_NAME == 'GatedGCN':\n",
@@ -261,6 +263,15 @@
" L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
" pseudo_dim_MoNet=2; kernel=3;\n",
" \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=4; hidden_dim=22; out_dim=hidden_dim; dropout=0.0;\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=3; hidden_dim=76; out_dim=hidden_dim; dropout=0.0;\n",
" \n",
"\n",
" # generic new_params\n",
@@ -276,7 +287,7 @@
" net_params['n_heads'] = n_heads\n",
" net_params['L'] = L # min L should be 2\n",
" net_params['readout'] = \"mean\"\n",
- " net_params['graph_norm'] = True\n",
+ " net_params['layer_norm'] = True\n",
" net_params['batch_norm'] = True\n",
" net_params['in_feat_dropout'] = 0.0\n",
" net_params['dropout'] = 0.0\n",
@@ -305,18 +316,18 @@
" net_params['cat'] = False\n",
" net_params['batch_size'] = batch_size \n",
" \n",
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n",
+ " \n",
" # calculate assignment dimension: pool_ratio * largest graph's maximum\n",
" # number of nodes in the dataset\n",
- " num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]\n",
" max_num_node = max(num_nodes)\n",
- " net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
- "\n",
- " # setting seeds\n",
- " random.seed(seed)\n",
- " np.random.seed(seed)\n",
- " torch.manual_seed(seed)\n",
- " if device == 'cuda':\n",
- " torch.cuda.manual_seed(seed)\n"
+ " net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n"
]
},
{
@@ -330,7 +341,7 @@
"text": [
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GatedGCN 116253\n"
+ "MODEL/Total parameters: GatedGCN 107853\n"
]
}
],
@@ -363,7 +374,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@@ -374,6 +385,7 @@
"def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):\n",
" avg_test_acc = []\n",
" avg_train_acc = []\n",
+ " avg_convergence_epochs = []\n",
"\n",
" t0 = time.time()\n",
" per_epoch_time = []\n",
@@ -406,7 +418,7 @@
" random.seed(params['seed'])\n",
" np.random.seed(params['seed'])\n",
" torch.manual_seed(params['seed'])\n",
- " if device == 'cuda':\n",
+ " if device.type == 'cuda':\n",
" torch.cuda.manual_seed(params['seed'])\n",
"\n",
" print(\"RUN NUMBER: \", split_number)\n",
@@ -430,19 +442,36 @@
" # batching exception for Diffpool\n",
" drop_last = True if MODEL_NAME == 'DiffPool' else False\n",
"\n",
- " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WL-GNNs\n",
+ " from train.train_TUs_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
"\n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
"\n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_TUs_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
+ "\n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " \n",
" with tqdm(range(params['epochs'])) as t:\n",
" for epoch in t:\n",
"\n",
" t.set_description('Epoch %d' % epoch) \n",
"\n",
" start = time.time()\n",
- " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ "\n",
" epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n",
+ " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)\n",
"\n",
" epoch_train_losses.append(epoch_train_loss)\n",
" epoch_val_losses.append(epoch_val_loss)\n",
@@ -453,6 +482,7 @@
" writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
" writer.add_scalar('train/_acc', epoch_train_acc, epoch)\n",
" writer.add_scalar('val/_acc', epoch_val_acc, epoch)\n",
+ " writer.add_scalar('test/_acc', epoch_test_acc, epoch)\n",
" writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n",
"\n",
" _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)\n",
@@ -492,9 +522,11 @@
" _, train_acc = evaluate_network(model, device, train_loader, epoch) \n",
" avg_test_acc.append(test_acc) \n",
" avg_train_acc.append(train_acc)\n",
+ " avg_convergence_epochs.append(epoch)\n",
"\n",
" print(\"Test Accuracy [LAST EPOCH]: {:.4f}\".format(test_acc))\n",
" print(\"Train Accuracy [LAST EPOCH]: {:.4f}\".format(train_acc))\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
" \n",
" except KeyboardInterrupt:\n",
" print('-' * 89)\n",
@@ -503,7 +535,7 @@
" \n",
" print(\"TOTAL TIME TAKEN: {:.4f}hrs\".format((time.time()-t0)/3600))\n",
" print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
- "\n",
+ " print(\"AVG CONVERGENCE Time (Epochs): {:.4f}\".format(np.mean(np.array(avg_convergence_epochs))))\n",
" # Final test accuracy value averaged over 10-fold\n",
" print(\"\"\"\\n\\n\\nFINAL RESULTS\\n\\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\"\"\"\\\n",
" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))\n",
@@ -520,33 +552,17 @@
" with open(write_file_name + '.txt', 'w') as f:\n",
" f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
" FINAL RESULTS\\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\nAll Splits Test Accuracies: {}\"\"\"\\\n",
+ " Average Convergence Time (Epochs): {:.4f} with s.d. {:.4f}\\nTotal Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\nAll Splits Test Accuracies: {}\"\"\"\\\n",
" .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
" np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,\n",
" np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,\n",
- " (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))\n",
- " \n",
- "\n",
- " # send results to gmail\n",
- " try:\n",
- " from gmail import send\n",
- " subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)\n",
- " body = \"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
- " FINAL RESULTS\\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\nAll Splits Test Accuracies: {}\"\"\"\\\n",
- " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,\n",
- " np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,\n",
- " (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc)\n",
- " send(subject, body)\n",
- " except:\n",
- " pass\n",
- " \n"
+ " np.mean(avg_convergence_epochs), np.std(avg_convergence_epochs),\n",
+ " (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))"
]
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -556,30 +572,85 @@
"Convert main_TUs_graph_classification.ipynb to main_TUs_graph_classification.py\n",
"Clean main_TUs_graph_classification.py\n",
"Done. \n",
- "No Node Attribute Data\n",
- "[!] Dataset: DD\n",
- "Time taken: 12.4625s\n",
+ "[!] Dataset: PROTEINS_full\n",
+ "Time taken: 2.5648s\n",
"cuda not available\n",
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GatedGCN 116253\n"
+ "MODEL/Total parameters: GatedGCN 107853\n",
+ "[!] Dataset: PROTEINS_full\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Epoch 0: 0%| | 0/1000 [00:00, ?it/s]"
]
},
{
- "ename": "KeyboardInterrupt",
- "evalue": "",
- "output_type": "error",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_TUs_graph_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 238\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 239\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 195\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 196\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'total_param'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mview_model_param\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 197\u001b[0;31m \u001b[0mtrain_val_pipeline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 198\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_val_pipeline\u001b[0;34m(MODEL_NAME, DATASET_NAME, params, net_params, dirs)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mper_epoch_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mdataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLoadData\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mMODEL_NAME\u001b[0m \u001b[0;32min\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'GCN'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'GAT'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/data/data.py\u001b[0m in \u001b[0;36mLoadData\u001b[0;34m(DATASET_NAME)\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0mTU_DATASETS\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'COLLAB'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ENZYMES'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'DD'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'PROTEINS_full'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mTU_DATASETS\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mTUsDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;31m# handling for SBM datasets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/data/TUs.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0;31m#dataset = TUDataset(self.name, hidden_size=1)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 142\u001b[0;31m \u001b[0mdataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLegacyTUDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhidden_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# dgl 4.0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 143\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0;31m# frankenstein has labels 0 and 2; so correcting them as 0 and 1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/data/tu.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name, use_pandas, hidden_size, max_allow_node)\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m DS_edge_list = self._idx_from_zero(\n\u001b[0;32m---> 42\u001b[0;31m np.genfromtxt(self._file_path(\"A\"), delimiter=\",\", dtype=int))\n\u001b[0m\u001b[1;32m 43\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m DS_indicator = self._idx_from_zero(\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/lib/npyio.py\u001b[0m in \u001b[0;36mgenfromtxt\u001b[0;34m(fname, dtype, comments, delimiter, skip_header, skip_footer, converters, missing_values, filling_values, usecols, names, excludelist, deletechars, replace_space, autostrip, case_sensitive, defaultfmt, unpack, usemask, loose, invalid_raise, max_rows, encoding)\u001b[0m\n\u001b[1;32m 2203\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mttype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2204\u001b[0m \u001b[0;31m#\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2205\u001b[0;31m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2206\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0musemask\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2207\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnames\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Time taken: 2.7937s\n",
+ "RUN NUMBER: 0\n",
+ "Training Graphs: 889\n",
+ "Validation Graphs: 112\n",
+ "Test Graphs: 112\n",
+ "Number of Classes: 2\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Epoch 0: 0%| | 0/1000 [00:11, ?it/s]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-----------------------------------------------------------------------------------------\n",
+ "Exiting from training early because of KeyboardInterrupt\n",
+ "TOTAL TIME TAKEN: 0.0038hrs\n",
+ "AVG TIME PER EPOCH: nans\n",
+ "AVG CONVERGENCE Time (Epochs): nan\n",
+ "\n",
+ "\n",
+ "\n",
+ "FINAL RESULTS\n",
+ "\n",
+ "TEST ACCURACY averaged: nan with s.d. nan\n",
+ "\n",
+ "All splits Test Accuracies:\n",
+ " []\n",
+ "\n",
+ "\n",
+ "\n",
+ "FINAL RESULTS\n",
+ "\n",
+ "TRAIN ACCURACY averaged: nan with s.d. nan\n",
+ "\n",
+ "All splits Train Accuracies:\n",
+ " []\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/core/fromnumeric.py:3118: RuntimeWarning: Mean of empty slice.\n",
+ " out=out, **kwargs)\n",
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/core/_methods.py:85: RuntimeWarning: invalid value encountered in double_scalars\n",
+ " ret = ret.dtype.type(ret / rcount)\n",
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/core/_methods.py:140: RuntimeWarning: Degrees of freedom <= 0 for slice\n",
+ " keepdims=keepdims)\n",
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/core/_methods.py:110: RuntimeWarning: invalid value encountered in true_divide\n",
+ " arrmean, rcount, out=arrmean, casting='unsafe', subok=False)\n",
+ "/home/vijay/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/numpy/core/_methods.py:132: RuntimeWarning: invalid value encountered in double_scalars\n",
+ " ret = ret.dtype.type(ret / rcount)\n"
]
}
],
@@ -619,7 +690,7 @@
" parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
" parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
" parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
- " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
" parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
" parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
" parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
@@ -707,8 +778,8 @@
" net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
" if args.dropout is not None:\n",
" net_params['dropout'] = float(args.dropout)\n",
- " if args.graph_norm is not None:\n",
- " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
" if args.batch_norm is not None:\n",
" net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
" if args.sage_aggregator is not None:\n",
@@ -766,6 +837,10 @@
" num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]\n",
" max_num_node = max(num_nodes)\n",
" net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
" \n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -829,6 +904,20 @@
" "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/main_TUs_graph_classification.py b/main_TUs_graph_classification.py
index 4ca0ce021..e2a3ca3d3 100644
--- a/main_TUs_graph_classification.py
+++ b/main_TUs_graph_classification.py
@@ -43,7 +43,6 @@ def __init__(self, **kwds):
from nets.TUs_graph_classification.load_net import gnn_model # import GNNs
from data.data import LoadData # import dataset
-from train.train_TUs_graph_classification import train_epoch, evaluate_network # import train functions
@@ -68,10 +67,6 @@ def gpu_setup(use_gpu, gpu_id):
-
-
-
-
"""
VIEWING MODEL CONFIG AND PARAMS
"""
@@ -93,6 +88,7 @@ def view_model_param(MODEL_NAME, net_params):
def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
avg_test_acc = []
avg_train_acc = []
+ avg_convergence_epochs = []
t0 = time.time()
per_epoch_time = []
@@ -124,7 +120,7 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("RUN NUMBER: ", split_number)
@@ -148,19 +144,36 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
- train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
- val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
- test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WL-GNNs
+ from train.train_TUs_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+ else:
+ # import train functions for all other GCNs
+ from train.train_TUs_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
- epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
+ _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
@@ -171,6 +184,7 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
+ writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
@@ -210,9 +224,11 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
_, train_acc = evaluate_network(model, device, train_loader, epoch)
avg_test_acc.append(test_acc)
avg_train_acc.append(train_acc)
+ avg_convergence_epochs.append(epoch)
print("Test Accuracy [LAST EPOCH]: {:.4f}".format(test_acc))
print("Train Accuracy [LAST EPOCH]: {:.4f}".format(train_acc))
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
except KeyboardInterrupt:
print('-' * 89)
@@ -221,7 +237,7 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
print("TOTAL TIME TAKEN: {:.4f}hrs".format((time.time()-t0)/3600))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
-
+ print("AVG CONVERGENCE Time (Epochs): {:.4f}".format(np.mean(np.array(avg_convergence_epochs))))
# Final test accuracy value averaged over 10-fold
print("""\n\n\nFINAL RESULTS\n\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}""" .format(np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100))
print("\nAll splits Test Accuracies:\n", avg_test_acc)
@@ -236,28 +252,12 @@ def train_val_pipeline(MODEL_NAME, DATASET_NAME, params, net_params, dirs):
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
+ Average Convergence Time (Epochs): {:.4f} with s.d. {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,
+ np.mean(avg_convergence_epochs), np.std(avg_convergence_epochs),
(time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc))
-
-
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST ACCURACY averaged: {:.4f} with s.d. {:.4f}\nTRAIN ACCURACY averaged: {:.4f} with s.d. {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\nAll Splits Test Accuracies: {}"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(avg_test_acc))*100, np.std(avg_test_acc)*100,
- np.mean(np.array(avg_train_acc))*100, np.std(avg_train_acc)*100,
- (time.time()-t0)/3600, np.mean(per_epoch_time), avg_test_acc)
- send(subject, body)
- except:
- pass
-
@@ -294,7 +294,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -378,8 +378,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -414,6 +414,10 @@ def main():
num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
+
+ if MODEL_NAME == 'RingGNN':
+ num_nodes = [dataset.all[i][0].number_of_nodes() for i in range(len(dataset.all))]
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
@@ -445,3 +449,13 @@ def main():
+
+
+
+
+
+
+
+
+
+
diff --git a/main_molecules_graph_regression.ipynb b/main_molecules_graph_regression.ipynb
index ac8088ff0..42a7dd3c4 100644
--- a/main_molecules_graph_regression.ipynb
+++ b/main_molecules_graph_regression.ipynb
@@ -17,8 +17,10 @@
"- GAT \n",
"- GraphSage \n",
"- GIN \n",
- "- DiffPool\n",
- "- MLP\n",
+ "- MoNet \n",
+ "- MLP \n",
+ "- RingGNN \n",
+ "- 3WLGNN \n",
"\n",
"### DATASET\n",
"- ZINC Molecules \n",
@@ -106,8 +108,7 @@
" IMPORTING CUSTOM MODULES/METHODS\n",
"\"\"\"\n",
"from nets.molecules_graph_regression.load_net import gnn_model # import all GNNS\n",
- "from data.data import LoadData # import dataset\n",
- "from train.train_molecules_graph_regression import train_epoch, evaluate_network # import train functions"
+ "from data.data import LoadData # import dataset"
]
},
{
@@ -150,7 +151,7 @@
"[I] Loading dataset ZINC...\n",
"train, test, val sizes : 10000 1000 1000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 6.4807s\n",
+ "[I] Data load time: 7.1463s\n",
"[I] Finished loading.\n"
]
}
@@ -161,9 +162,11 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
+ " #MODEL_NAME = '3WLGNN'\n",
+ " #MODEL_NAME = 'RingGNN'\n",
" MODEL_NAME = 'GatedGCN'\n",
" #MODEL_NAME = 'MoNet'\n",
- " MODEL_NAME = 'GCN'\n",
+ " #MODEL_NAME = 'GCN'\n",
" # MODEL_NAME = 'GAT'\n",
" # MODEL_NAME = 'GraphSage'\n",
" # MODEL_NAME = 'DiffPool'\n",
@@ -188,6 +191,7 @@
"metadata": {},
"outputs": [],
"source": [
+ "#MODEL_NAME = 'RingGNN'\n",
"MODEL_NAME = 'GatedGCN'\n",
"#MODEL_NAME = 'GCN'\n",
"#MODEL_NAME = 'GAT'\n",
@@ -220,7 +224,10 @@
" gated = False\n",
" self_loop = False\n",
" #self_loop = True\n",
- " max_time = 48\n",
+ " max_time = 12\n",
+ " pos_enc = True\n",
+ " #pos_enc = False\n",
+ " pos_enc_dim = 8\n",
" \n",
"\n",
" if MODEL_NAME == 'GatedGCN':\n",
@@ -261,7 +268,16 @@
" seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
" L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
" pseudo_dim_MoNet=2; kernel=3;\n",
- " \n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=4; hidden_dim=22; out_dim=hidden_dim; dropout=0.0; edge_feat=False\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=3; hidden_dim=79; out_dim=hidden_dim; dropout=0.0; edge_feat=False\n",
" \n",
" \n",
" # generic new_params\n",
@@ -275,7 +291,7 @@
" net_params['n_heads'] = n_heads\n",
" net_params['L'] = L # min L should be 2\n",
" net_params['readout'] = \"sum\"\n",
- " net_params['graph_norm'] = True\n",
+ " net_params['layer_norm'] = True\n",
" net_params['batch_norm'] = True\n",
" net_params['in_feat_dropout'] = 0.0\n",
" net_params['dropout'] = 0.0\n",
@@ -306,20 +322,24 @@
" net_params['num_pool'] = 1\n",
" net_params['cat'] = False\n",
" net_params['batch_size'] = batch_size \n",
+ " \n",
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " num_nodes = [trainset[i][0].number_of_nodes() for i in range(len(trainset))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n",
"\n",
" # calculate assignment dimension: pool_ratio * largest graph's maximum\n",
" # number of nodes in the dataset\n",
- " \n",
- " num_nodes = [trainset[i][0].number_of_nodes() for i in range(len(trainset))]\n",
" max_num_node = max(num_nodes)\n",
" net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
" \n",
- " # setting seeds\n",
- " random.seed(seed)\n",
- " np.random.seed(seed)\n",
- " torch.manual_seed(seed)\n",
- " if device == 'cuda':\n",
- " torch.cuda.manual_seed(seed)"
+ " # specific for pos_enc_dim\n",
+ " net_params['pos_enc'] = pos_enc\n",
+ " net_params['pos_enc_dim'] = pos_enc_dim\n",
+ " "
]
},
{
@@ -333,7 +353,7 @@
"text": [
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GatedGCN 105735\n"
+ "MODEL/Total parameters: GatedGCN 106365\n"
]
}
],
@@ -366,7 +386,7 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@@ -384,7 +404,13 @@
" if net_params['self_loop']:\n",
" print(\"[!] Adding graph self-loops for GCN/GAT models (central node trick).\")\n",
" dataset._add_self_loops()\n",
- " \n",
+ " \n",
+ " if MODEL_NAME in ['GatedGCN']:\n",
+ " if net_params['pos_enc']:\n",
+ " print(\"[!] Adding graph positional encoding.\")\n",
+ " dataset._add_positional_encodings(net_params['pos_enc_dim'])\n",
+ " print('Time PE:',time.time()-t0)\n",
+ " \n",
" trainset, valset, testset = dataset.train, dataset.val, dataset.test\n",
" \n",
" root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs\n",
@@ -402,7 +428,7 @@
" random.seed(params['seed'])\n",
" np.random.seed(params['seed'])\n",
" torch.manual_seed(params['seed'])\n",
- " if device == 'cuda':\n",
+ " if device.type == 'cuda':\n",
" torch.cuda.manual_seed(params['seed'])\n",
" \n",
" print(\"Training Graphs: \", len(trainset))\n",
@@ -424,10 +450,22 @@
" # batching exception for Diffpool\n",
" drop_last = True if MODEL_NAME == 'DiffPool' else False\n",
" \n",
- " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WLGNNs\n",
+ " from train.train_molecules_graph_regression import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
+ " from functools import partial # util function to pass edge_feat to collate function\n",
+ "\n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))\n",
" \n",
+ " else:\n",
+ " # import train functions for all other GNNs\n",
+ " from train.train_molecules_graph_regression import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
+ " \n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
" \n",
" # At any point you can hit Ctrl + C to break out of training early.\n",
" try:\n",
@@ -438,9 +476,14 @@
"\n",
" start = time.time()\n",
"\n",
- " epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for RingGNN\n",
+ " epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
+ " \n",
" epoch_val_loss, epoch_val_mae = evaluate_network(model, device, val_loader, epoch)\n",
- "\n",
+ " _, epoch_test_mae = evaluate_network(model, device, test_loader, epoch)\n",
+ " \n",
" epoch_train_losses.append(epoch_train_loss)\n",
" epoch_val_losses.append(epoch_val_loss)\n",
" epoch_train_MAEs.append(epoch_train_mae)\n",
@@ -450,13 +493,14 @@
" writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
" writer.add_scalar('train/_mae', epoch_train_mae, epoch)\n",
" writer.add_scalar('val/_mae', epoch_val_mae, epoch)\n",
+ " writer.add_scalar('test/_mae', epoch_test_mae, epoch)\n",
" writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n",
"\n",
- " _, epoch_test_mae = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
" train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n",
- " train_MAE=epoch_train_mae.item(), val_MAE=epoch_val_mae.item(),\n",
- " test_MAE=epoch_test_mae.item())\n",
+ " train_MAE=epoch_train_mae, val_MAE=epoch_val_mae,\n",
+ " test_MAE=epoch_test_mae)\n",
"\n",
"\n",
" per_epoch_time.append(time.time()-start)\n",
@@ -494,6 +538,7 @@
" _, train_mae = evaluate_network(model, device, train_loader, epoch)\n",
" print(\"Test MAE: {:.4f}\".format(test_mae))\n",
" print(\"Train MAE: {:.4f}\".format(train_mae))\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
" print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-t0))\n",
" print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
"\n",
@@ -505,30 +550,15 @@
" with open(write_file_name + '.txt', 'w') as f:\n",
" f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
" FINAL RESULTS\\nTEST MAE: {:.4f}\\nTRAIN MAE: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
- " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_mae.cpu())), np.array(train_mae.cpu()), (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
- " \n",
- "\n",
- " \n",
- " # send results to gmail\n",
- " try:\n",
- " from gmail import send\n",
- " subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)\n",
- " body = \"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
- " FINAL RESULTS\\nTEST MAE: {:.4f}\\nTRAIN MAE: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
+ " Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
" .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_mae.cpu())), np.array(train_mae.cpu()), (time.time()-t0)/3600, np.mean(per_epoch_time))\n",
- " send(subject, body)\n",
- " except:\n",
- " pass\n",
+ " test_mae, train_mae, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
" "
]
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 10,
"metadata": {},
"outputs": [
{
@@ -538,49 +568,24 @@
"Convert main_molecules_graph_regression.ipynb to main_molecules_graph_regression.py\n",
"Clean main_molecules_graph_regression.py\n",
"Done. \n",
- "[I] Loading dataset ZINC...\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/1000 [00:00, ?it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
+ "[I] Loading dataset ZINC...\n",
"train, test, val sizes : 10000 1000 1000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 8.0048s\n",
- "cuda not available\n",
- "MODEL DETAILS:\n",
- "\n",
- "MODEL/Total parameters: GatedGCN 105735\n",
- "Training Graphs: 10000\n",
- "Validation Graphs: 1000\n",
- "Test Graphs: 1000\n"
+ "[I] Data load time: 8.1105s\n"
]
},
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/1000 [00:00, ?it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "-----------------------------------------------------------------------------------------\n",
- "Exiting from training early because of KeyboardInterrupt\n",
- "Test MAE: 3.4859\n",
- "Train MAE: 3.4787\n",
- "TOTAL TIME TAKEN: 45.3402s\n",
- "AVG TIME PER EPOCH: nans\n"
+ "ename": "KeyboardInterrupt",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 245\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_molecules_graph_regression'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 247\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;31m# device\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 165\u001b[0;31m \u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgpu_setup\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'gpu'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'use'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'gpu'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 166\u001b[0m \u001b[0mout_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'out_dir'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mgpu_setup\u001b[0;34m(use_gpu, gpu_id)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menviron\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"CUDA_VISIBLE_DEVICES\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgpu_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0muse_gpu\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cuda available with GPU:'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_device_name\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"cuda\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/cuda/__init__.py\u001b[0m in \u001b[0;36mis_available\u001b[0;34m()\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;34mr\"\"\"Returns a bool indicating if CUDA is currently available.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m if (not hasattr(torch._C, '_cuda_isDriverSufficient') or\n\u001b[0;32m---> 36\u001b[0;31m not torch._C._cuda_isDriverSufficient()):\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_C\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_cuda_getDeviceCount\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
@@ -620,7 +625,7 @@
" parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
" parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
" parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
- " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
" parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
" parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
" parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
@@ -632,6 +637,8 @@
" parser.add_argument('--cat', help=\"Please give a value for cat\")\n",
" parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n",
" parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n",
+ " parser.add_argument('--pos_enc_dim', help=\"Please give a value for pos_enc_dim\")\n",
+ " parser.add_argument('--pos_enc', help=\"Please give a value for pos_enc\")\n",
" args = parser.parse_args()\n",
" with open(args.config) as f:\n",
" config = json.load(f)\n",
@@ -708,8 +715,8 @@
" net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
" if args.dropout is not None:\n",
" net_params['dropout'] = float(args.dropout)\n",
- " if args.graph_norm is not None:\n",
- " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
" if args.batch_norm is not None:\n",
" net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
" if args.sage_aggregator is not None:\n",
@@ -730,6 +737,10 @@
" net_params['cat'] = True if args.cat=='True' else False\n",
" if args.self_loop is not None:\n",
" net_params['self_loop'] = True if args.self_loop=='True' else False\n",
+ " if args.pos_enc is not None:\n",
+ " net_params['pos_enc'] = True if args.pos_enc=='True' else False\n",
+ " if args.pos_enc_dim is not None:\n",
+ " net_params['pos_enc_dim'] = int(args.pos_enc_dim)\n",
"\n",
" \n",
" # notebook mode\n",
@@ -767,6 +778,10 @@
" num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
" max_num_node = max(num_nodes)\n",
" net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
" \n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -836,6 +851,13 @@
"outputs": [],
"source": []
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/main_molecules_graph_regression.py b/main_molecules_graph_regression.py
index 49d208c0b..a45db8522 100644
--- a/main_molecules_graph_regression.py
+++ b/main_molecules_graph_regression.py
@@ -42,7 +42,6 @@ def __init__(self, **kwds):
"""
from nets.molecules_graph_regression.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
-from train.train_molecules_graph_regression import train_epoch, evaluate_network # import train functions
@@ -100,7 +99,13 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
-
+
+ if MODEL_NAME in ['GatedGCN']:
+ if net_params['pos_enc']:
+ print("[!] Adding graph positional encoding.")
+ dataset._add_positional_encodings(net_params['pos_enc_dim'])
+ print('Time PE:',time.time()-t0)
+
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
@@ -117,7 +122,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
@@ -139,10 +144,22 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
- train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
- val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
- test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WLGNNs
+ from train.train_molecules_graph_regression import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+ from functools import partial # util function to pass edge_feat to collate function
+
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=partial(dataset.collate_dense_gnn, edge_feat=net_params['edge_feat']))
+ else:
+ # import train functions for all other GNNs
+ from train.train_molecules_graph_regression import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
@@ -153,9 +170,14 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start = time.time()
- epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for RingGNN
+ epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_mae, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+
epoch_val_loss, epoch_val_mae = evaluate_network(model, device, val_loader, epoch)
-
+ _, epoch_test_mae = evaluate_network(model, device, test_loader, epoch)
+
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_MAEs.append(epoch_train_mae)
@@ -165,13 +187,14 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_mae', epoch_train_mae, epoch)
writer.add_scalar('val/_mae', epoch_val_mae, epoch)
+ writer.add_scalar('test/_mae', epoch_test_mae, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
- _, epoch_test_mae = evaluate_network(model, device, test_loader, epoch)
+
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
- train_MAE=epoch_train_mae.item(), val_MAE=epoch_val_mae.item(),
- test_MAE=epoch_test_mae.item())
+ train_MAE=epoch_train_mae, val_MAE=epoch_val_mae,
+ test_MAE=epoch_test_mae)
per_epoch_time.append(time.time()-start)
@@ -209,6 +232,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
_, train_mae = evaluate_network(model, device, train_loader, epoch)
print("Test MAE: {:.4f}".format(test_mae))
print("Train MAE: {:.4f}".format(train_mae))
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
@@ -220,24 +244,9 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST MAE: {:.4f}\nTRAIN MAE: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
+ Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_mae.cpu())), np.array(train_mae.cpu()), (time.time()-t0)/3600, np.mean(per_epoch_time)))
-
-
-
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST MAE: {:.4f}\nTRAIN MAE: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_mae.cpu())), np.array(train_mae.cpu()), (time.time()-t0)/3600, np.mean(per_epoch_time))
- send(subject, body)
- except:
- pass
+ test_mae, train_mae, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
@@ -275,7 +284,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -287,6 +296,8 @@ def main():
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
+ parser.add_argument('--pos_enc_dim', help="Please give a value for pos_enc_dim")
+ parser.add_argument('--pos_enc', help="Please give a value for pos_enc")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
@@ -359,8 +370,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -381,6 +392,10 @@ def main():
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
+ if args.pos_enc is not None:
+ net_params['pos_enc'] = True if args.pos_enc=='True' else False
+ if args.pos_enc_dim is not None:
+ net_params['pos_enc_dim'] = int(args.pos_enc_dim)
# ZINC
@@ -394,6 +409,10 @@ def main():
num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
max_num_node = max(num_nodes)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
+
+ if MODEL_NAME == 'RingGNN':
+ num_nodes = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
@@ -429,3 +448,8 @@ def main():
+
+
+
+
+
diff --git a/main_superpixels_graph_classification.ipynb b/main_superpixels_graph_classification.ipynb
index a71f51254..b3c2c8ea7 100644
--- a/main_superpixels_graph_classification.ipynb
+++ b/main_superpixels_graph_classification.ipynb
@@ -18,8 +18,9 @@
"- GraphSage \n",
"- GIN \n",
"- MoNet \n",
- "- DiffPool\n",
- "- MLP\n",
+ "- MLP \n",
+ "- RingGNN \n",
+ "- 3WLGNN \n",
"\n",
"### DATASET\n",
"- MNIST \n",
@@ -108,8 +109,7 @@
" IMPORTING CUSTOM MODULES/METHODS\n",
"\"\"\"\n",
"from nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS\n",
- "from data.data import LoadData # import dataset\n",
- "from train.train_superpixels_graph_classification import train_epoch, evaluate_network # import train functions\n"
+ "from data.data import LoadData # import dataset\n"
]
},
{
@@ -149,10 +149,10 @@
"output_type": "stream",
"text": [
"[I] Loading data (notebook) ...\n",
- "[I] Loading dataset MNIST...\n",
- "train, test, val sizes : 55000 10000 5000\n",
+ "[I] Loading dataset CIFAR10...\n",
+ "train, test, val sizes : 45000 10000 5000\n",
"[I] Finished loading.\n",
- "[I] Data load time: 35.4297s\n",
+ "[I] Data load time: 45.0019s\n",
"[I] Finished loading.\n"
]
}
@@ -163,6 +163,8 @@
"# \"\"\"\n",
"if notebook_mode == True:\n",
" \n",
+ " # MODEL_NAME = '3WLGNN'\n",
+ " # MODEL_NAME = 'RingGNN'\n",
" MODEL_NAME = 'GatedGCN'\n",
" # MODEL_NAME = 'MoNet'\n",
" # MODEL_NAME = 'GCN'\n",
@@ -172,8 +174,8 @@
" # MODEL_NAME = 'MLP'\n",
" # MODEL_NAME = 'GIN'\n",
"\n",
- " DATASET_NAME = 'MNIST'\n",
- " #DATASET_NAME = 'CIFAR10'\n",
+ " #DATASET_NAME = 'MNIST'\n",
+ " DATASET_NAME = 'CIFAR10'\n",
"\n",
" out_dir = 'out/superpixels_graph_classification/'\n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -191,9 +193,10 @@
"metadata": {},
"outputs": [],
"source": [
+ "#MODEL_NAME = 'RingGNN'\n",
"MODEL_NAME = 'GatedGCN'\n",
- "MODEL_NAME = 'GCN'\n",
- "MODEL_NAME = 'GAT'\n",
+ "#MODEL_NAME = 'GCN'\n",
+ "#MODEL_NAME = 'GAT'\n",
"#MODEL_NAME = 'GraphSage'\n",
"#MODEL_NAME = 'MLP'\n",
"#MODEL_NAME = 'DiffPool'\n",
@@ -205,15 +208,7 @@
"cell_type": "code",
"execution_count": 7,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "True hidden dim: 152\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# \"\"\"\n",
"# PARAMETERS\n",
@@ -231,7 +226,7 @@
" gated = False\n",
" self_loop = False\n",
" #self_loop = True\n",
- " max_time = 48\n",
+ " max_time = 12\n",
" \n",
" \n",
"\n",
@@ -275,6 +270,16 @@
" seed=41; epochs=1000; batch_size=50; init_lr=5e-4; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
" L=4; hidden_dim=90; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
" pseudo_dim_MoNet=2; kernel=3;\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=4; hidden_dim=25; out_dim=hidden_dim; dropout=0.0;\n",
+ " \n",
+ " if MODEL_NAME == '3WLGNN':\n",
+ " seed=41; epochs=1000; batch_size=1; init_lr=5e-5; lr_reduce_factor=0.5; lr_schedule_patience=25; min_lr = 1e-6; weight_decay=0\n",
+ " #L=4; hidden_dim=145; out_dim=hidden_dim; dropout=0.0; readout='mean'\n",
+ " L=3; hidden_dim=80; out_dim=hidden_dim; dropout=0.0;\n",
" \n",
" \n",
" \n",
@@ -292,7 +297,7 @@
" net_params['n_heads'] = n_heads\n",
" net_params['L'] = L # min L should be 2\n",
" net_params['readout'] = \"sum\"\n",
- " net_params['graph_norm'] = True\n",
+ " net_params['layer_norm'] = True\n",
" net_params['batch_norm'] = True\n",
" net_params['in_feat_dropout'] = 0.0\n",
" net_params['dropout'] = 0.0\n",
@@ -324,19 +329,22 @@
" net_params['cat'] = False\n",
" net_params['batch_size'] = batch_size \n",
"\n",
+ " # specific for RingGNN\n",
+ " net_params['radius'] = 2\n",
+ " num_nodes_train = [trainset[i][0].number_of_nodes() for i in range(len(trainset))]\n",
+ " num_nodes_test = [testset[i][0].number_of_nodes() for i in range(len(testset))]\n",
+ " num_nodes = num_nodes_train + num_nodes_test\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
+ " # specific for 3WLGNN\n",
+ " net_params['depth_of_mlp'] = 2\n",
+ " \n",
" # calculate assignment dimension: pool_ratio * largest graph's maximum\n",
" # number of nodes in the dataset\n",
- " max_num_nodes_train = max([trainset[i][0].number_of_nodes() for i in range(len(trainset))])\n",
- " max_num_nodes_test = max([testset[i][0].number_of_nodes() for i in range(len(testset))])\n",
+ " max_num_nodes_train = max(num_nodes_train)\n",
+ " max_num_nodes_test = max(num_nodes_test)\n",
" max_num_node = max(max_num_nodes_train, max_num_nodes_test)\n",
- " net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
- "\n",
- " # setting seeds\n",
- " random.seed(seed)\n",
- " np.random.seed(seed)\n",
- " torch.manual_seed(seed)\n",
- " if device == 'cuda':\n",
- " torch.cuda.manual_seed(seed)"
+ " net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n"
]
},
{
@@ -350,7 +358,7 @@
"text": [
"MODEL DETAILS:\n",
"\n",
- "MODEL/Total parameters: GAT 110400\n"
+ "MODEL/Total parameters: GatedGCN 104357\n"
]
}
],
@@ -419,7 +427,7 @@
" random.seed(params['seed'])\n",
" np.random.seed(params['seed'])\n",
" torch.manual_seed(params['seed'])\n",
- " if device == 'cuda':\n",
+ " if device.type == 'cuda':\n",
" torch.cuda.manual_seed(params['seed'])\n",
" \n",
" print(\"Training Graphs: \", len(trainset))\n",
@@ -442,9 +450,21 @@
" # batching exception for Diffpool\n",
" drop_last = True if MODEL_NAME == 'DiffPool' else False\n",
" \n",
- " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
- " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']:\n",
+ " # import train functions specific for WL-GNNs\n",
+ " from train.train_superpixels_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network\n",
+ "\n",
+ " train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)\n",
+ " val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
+ " test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)\n",
+ "\n",
+ " else:\n",
+ " # import train functions for all other GCNs\n",
+ " from train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network\n",
+ "\n",
+ " train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
+ " test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)\n",
"\n",
" # At any point you can hit Ctrl + C to break out of training early.\n",
" try:\n",
@@ -455,9 +475,14 @@
"\n",
" start = time.time()\n",
"\n",
- " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
- " epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n",
+ " if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])\n",
+ " else: # for all other models common train function\n",
+ " epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n",
"\n",
+ " epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n",
+ " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" epoch_train_losses.append(epoch_train_loss)\n",
" epoch_val_losses.append(epoch_val_loss)\n",
" epoch_train_accs.append(epoch_train_acc)\n",
@@ -467,9 +492,10 @@
" writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n",
" writer.add_scalar('train/_acc', epoch_train_acc, epoch)\n",
" writer.add_scalar('val/_acc', epoch_val_acc, epoch)\n",
+ " writer.add_scalar('test/_acc', epoch_test_acc, epoch)\n",
" writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n",
"\n",
- " _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) \n",
+ " \n",
" t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n",
" train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n",
" train_acc=epoch_train_acc, val_acc=epoch_val_acc,\n",
@@ -510,6 +536,7 @@
" _, train_acc = evaluate_network(model, device, train_loader, epoch)\n",
" print(\"Test Accuracy: {:.4f}\".format(test_acc))\n",
" print(\"Train Accuracy: {:.4f}\".format(train_acc))\n",
+ " print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n",
" print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-t0))\n",
" print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n",
"\n",
@@ -521,24 +548,10 @@
" with open(write_file_name + '.txt', 'w') as f:\n",
" f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
" FINAL RESULTS\\nTEST ACCURACY: {:.4f}\\nTRAIN ACCURACY: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
+ " Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
" .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
- " \n",
- " \n",
- " # send results to gmail\n",
- " try:\n",
- " from gmail import send\n",
- " subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)\n",
- " body = \"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n",
- " FINAL RESULTS\\nTEST ACCURACY: {:.4f}\\nTRAIN ACCURACY: {:.4f}\\n\\n\n",
- " Total Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n",
- " .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n",
- " np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time))\n",
- " send(subject, body)\n",
- " except:\n",
- " pass\n",
- " "
+ " np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n",
+ " "
]
},
{
@@ -553,33 +566,7 @@
"Convert main_superpixels_graph_classification.ipynb to main_superpixels_graph_classification.py\n",
"Clean main_superpixels_graph_classification.py\n",
"Done. \n",
- "[I] Loading dataset MNIST...\n",
- "train, test, val sizes : 55000 10000 5000\n",
- "[I] Finished loading.\n",
- "[I] Data load time: 40.6646s\n",
- "cuda not available\n",
- "MODEL DETAILS:\n",
- "\n",
- "MODEL/Total parameters: GAT 110400\n",
- "Training Graphs: 55000\n",
- "Validation Graphs: 5000\n",
- "Test Graphs: 10000\n",
- "Number of Classes: 10\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch 0: 0%| | 0/1000 [01:39, ?it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "-----------------------------------------------------------------------------------------\n",
- "Exiting from training early because of KeyboardInterrupt\n"
+ "[I] Loading dataset CIFAR10...\n"
]
},
{
@@ -589,27 +576,15 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_superpixels_graph_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 235\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 236\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 237\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'total_param'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mview_model_param\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mtrain_val_pipeline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mMODEL_NAME\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnet_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdirs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain_val_pipeline\u001b[0;34m(MODEL_NAME, dataset, params, net_params, dirs)\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_acc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 121\u001b[0;31m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_acc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_network\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 122\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Test Accuracy: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_acc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Train Accuracy: {:.4f}\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_acc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/train/train_superpixels_graph_classification.py\u001b[0m in \u001b[0;36mevaluate_network\u001b[0;34m(model, device, data_loader, epoch)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mbatch_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_graphs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_e\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_n\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_snorm_e\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_labels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mepoch_test_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/nets/superpixels_graph_classification/gat_net.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, e, snorm_n, snorm_e)\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0min_feat_dropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mconv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 45\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 46\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'h'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 493\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 494\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, snorm_n)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mh_in\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m \u001b[0;31m# for residual connection\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mhead_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mattn_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mattn_head\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mheads\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmerge\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'cat'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0mh_in\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mh\u001b[0m \u001b[0;31m# for residual connection\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 71\u001b[0;31m \u001b[0mhead_outs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mattn_head\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msnorm_n\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mattn_head\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mheads\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 72\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmerge\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'cat'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 491\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 492\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 493\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 494\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, g, h, snorm_n)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'z'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mz\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_edges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medge_attention\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 40\u001b[0;31m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate_all\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage_func\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 41\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'h'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_norm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/graph.py\u001b[0m in \u001b[0;36mupdate_all\u001b[0;34m(self, message_func, reduce_func, apply_node_func)\u001b[0m\n\u001b[1;32m 2745\u001b[0m \u001b[0mreduce_func\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreduce_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2746\u001b[0m apply_func=apply_node_func)\n\u001b[0;32m-> 2747\u001b[0;31m \u001b[0mRuntime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprog\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2748\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2749\u001b[0m def prop_nodes(self,\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/runtime.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(prog)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mexe\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mprog\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# prog.pprint_exe(exe)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mexe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/ir/executor.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[0mmail_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfdmail\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 132\u001b[0;31m \u001b[0mudf_ret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFrameRef\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mFrame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mudf_ret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/degree_bucketing.py\u001b[0m in \u001b[0;36m_rfunc_wrapper\u001b[0;34m(node_data, mail_data)\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[0mreshaped_mail_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLazyDict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_reshaped_getter\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mnbatch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNodeBatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvbkt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnode_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreshaped_mail_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 153\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mreduce_udf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 154\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_rfunc_wrapper\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/benchmarking-gnn/layers/gat_layer.py\u001b[0m in \u001b[0;36mreduce_func\u001b[0;34m(self, nodes)\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreduce_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnodes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 31\u001b[0;31m \u001b[0malpha\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msoftmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnodes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmailbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'e'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 32\u001b[0m \u001b[0malpha\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0malpha\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0malpha\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mnodes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmailbox\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'z'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/utils.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 283\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkey\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keys\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 284\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 285\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 286\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__contains__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/runtime/degree_bucketing.py\u001b[0m in \u001b[0;36m_reshaped_getter\u001b[0;34m(key)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_rfunc_wrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_reshaped_getter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 148\u001b[0;31m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmail_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 149\u001b[0m \u001b[0mnew_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvbkt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdeg\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/utils.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 283\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkey\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_keys\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 284\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 285\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 286\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__contains__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/frame.py\u001b[0m in \u001b[0;36m\u001b[0;34m(key)\u001b[0m\n\u001b[1;32m 653\u001b[0m \"\"\"\n\u001b[1;32m 654\u001b[0m \u001b[0mrows\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_getrows\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 655\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLazyDict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mlambda\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_frame\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mrows\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeys\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 656\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setitem__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mval\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/frame.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, idx)\u001b[0m\n\u001b[1;32m 95\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0muser_idx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtousertensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 97\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgather_row\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muser_idx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 98\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__setitem__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0midx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeats\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/backend/pytorch/tensor.py\u001b[0m in \u001b[0;36mgather_row\u001b[0;34m(data, row_index)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mgather_row\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow_index\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 152\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mth\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex_select\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow_index\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 153\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mslice_axis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbegin\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/superpixels.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_dir\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'.pkl'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"rb\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 270\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 271\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/miniconda3/envs/benchmark_gnn/lib/python3.7/site-packages/dgl/graph_index.py\u001b[0m in \u001b[0;36m__setstate__\u001b[0;34m(self, state)\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_nodes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 69\u001b[0;31m readonly)\n\u001b[0m\u001b[1;32m 70\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: ",
+ "\nDuring handling of the above exception, another exception occurred:\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0mcleaner_main\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'main_superpixels_graph_classification'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 241\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 242\u001b[0;31m \u001b[0mmain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 243\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m\u001b[0m in \u001b[0;36mmain\u001b[0;34m(notebook_mode, config)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0;31m# dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'dataset'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0mdataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mLoadData\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;31m# device\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/data.py\u001b[0m in \u001b[0;36mLoadData\u001b[0;34m(DATASET_NAME)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;31m# handling for MNIST or CIFAR Superpixels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'MNIST'\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mDATASET_NAME\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'CIFAR10'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mSuperPixDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mDATASET_NAME\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;31m# handling for (ZINC) molecule dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
+ "\u001b[0;32m~/graphdeeplearning/benchmarking-gnns-dev/data/superpixels.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 268\u001b[0m \u001b[0mdata_dir\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'data/superpixels/'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_dir\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'.pkl'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"rb\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 270\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 271\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 272\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
@@ -650,7 +625,7 @@
" parser.add_argument('--gated', help=\"Please give a value for gated\")\n",
" parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n",
" parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n",
- " parser.add_argument('--graph_norm', help=\"Please give a value for graph_norm\")\n",
+ " parser.add_argument('--layer_norm', help=\"Please give a value for layer_norm\")\n",
" parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n",
" parser.add_argument('--sage_aggregator', help=\"Please give a value for sage_aggregator\")\n",
" parser.add_argument('--data_mode', help=\"Please give a value for data_mode\")\n",
@@ -738,8 +713,8 @@
" net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n",
" if args.dropout is not None:\n",
" net_params['dropout'] = float(args.dropout)\n",
- " if args.graph_norm is not None:\n",
- " net_params['graph_norm'] = True if args.graph_norm=='True' else False\n",
+ " if args.layer_norm is not None:\n",
+ " net_params['layer_norm'] = True if args.layer_norm=='True' else False\n",
" if args.batch_norm is not None:\n",
" net_params['batch_norm'] = True if args.batch_norm=='True' else False\n",
" if args.sage_aggregator is not None:\n",
@@ -799,6 +774,13 @@
" max_num_nodes_test = max([dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))])\n",
" max_num_node = max(max_num_nodes_train, max_num_nodes_test)\n",
" net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']\n",
+ " \n",
+ " if MODEL_NAME == 'RingGNN':\n",
+ " num_nodes_train = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]\n",
+ " num_nodes_test = [dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))]\n",
+ " num_nodes = num_nodes_train + num_nodes_test\n",
+ " net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))\n",
+ " \n",
" \n",
" root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
" root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n",
@@ -857,6 +839,20 @@
" "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
{
"cell_type": "code",
"execution_count": null,
diff --git a/main_superpixels_graph_classification.py b/main_superpixels_graph_classification.py
index 517c652e6..f6a38bcd4 100644
--- a/main_superpixels_graph_classification.py
+++ b/main_superpixels_graph_classification.py
@@ -42,7 +42,6 @@ def __init__(self, **kwds):
"""
from nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
-from train.train_superpixels_graph_classification import train_epoch, evaluate_network # import train functions
@@ -117,7 +116,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
- if device == 'cuda':
+ if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
@@ -140,9 +139,21 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
- train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
- val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
- test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']:
+ # import train functions specific for WL-GNNs
+ from train.train_superpixels_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
+
+ train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
+ val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+ test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
+
+ else:
+ # import train functions for all other GCNs
+ from train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
+
+ train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
+ val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
+ test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
@@ -153,9 +164,14 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
start = time.time()
- epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
- epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
+ if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
+ else: # for all other models common train function
+ epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
+ epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
+ _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
+
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
@@ -165,9 +181,10 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
+ writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
- _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
+
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
@@ -208,6 +225,7 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
+ print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
@@ -219,24 +237,10 @@ def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
+ Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time)))
-
-
- # send results to gmail
- try:
- from gmail import send
- subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
- body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
- FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
- Total Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
- .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
- np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, (time.time()-t0)/3600, np.mean(per_epoch_time))
- send(subject, body)
- except:
- pass
-
+ np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
+
@@ -273,7 +277,7 @@ def main():
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
- parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
+ parser.add_argument('--layer_norm', help="Please give a value for layer_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
@@ -357,8 +361,8 @@ def main():
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
- if args.graph_norm is not None:
- net_params['graph_norm'] = True if args.graph_norm=='True' else False
+ if args.layer_norm is not None:
+ net_params['layer_norm'] = True if args.layer_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
@@ -393,6 +397,13 @@ def main():
max_num_nodes_test = max([dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))])
max_num_node = max(max_num_nodes_train, max_num_nodes_test)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
+
+ if MODEL_NAME == 'RingGNN':
+ num_nodes_train = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
+ num_nodes_test = [dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))]
+ num_nodes = num_nodes_train + num_nodes_test
+ net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
+
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
@@ -424,3 +435,8 @@ def main():
+
+
+
+
+
diff --git a/nets/COLLAB_edge_classification/gat_net.py b/nets/COLLAB_edge_classification/gat_net.py
new file mode 100644
index 000000000..065acc5d9
--- /dev/null
+++ b/nets/COLLAB_edge_classification/gat_net.py
@@ -0,0 +1,82 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GAT: Graph Attention Network
+ Graph Attention Networks (Veličković et al., ICLR 2018)
+ https://arxiv.org/abs/1710.10903
+"""
+from layers.gat_layer import GATLayer, CustomGATLayer, CustomGATLayerEdgeReprFeat, CustomGATLayerIsotropic
+from layers.mlp_readout_layer import MLPReadout
+
+class GATNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ in_dim = net_params['in_dim']
+ in_dim_edge = net_params['in_dim_edge']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ num_heads = net_params['n_heads']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.dropout = dropout
+ self.device = net_params['device']
+
+ self.layer_type = {
+ "dgl": GATLayer,
+ "edgereprfeat": CustomGATLayerEdgeReprFeat,
+ "edgefeat": CustomGATLayer,
+ "isotropic": CustomGATLayerIsotropic,
+ }.get(net_params['layer_type'], GATLayer)
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim * num_heads)
+
+ if self.layer_type != GATLayer:
+ self.edge_feat = net_params['edge_feat']
+ self.embedding_e = nn.Linear(in_dim_edge, hidden_dim * num_heads)
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layers = nn.ModuleList([self.layer_type(hidden_dim * num_heads, hidden_dim, num_heads,
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(self.layer_type(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
+ self.MLP_layer = MLPReadout(2*out_dim, 1)
+
+ def forward(self, g, h, e):
+ h = self.embedding_h(h.float())
+ h = self.in_feat_dropout(h)
+
+ if self.layer_type == GATLayer:
+ for conv in self.layers:
+ h = conv(g, h)
+ else:
+ if not self.edge_feat:
+ e = torch.ones_like(e).to(self.device)
+ e = self.embedding_e(e.float())
+
+ for conv in self.layers:
+ h, e = conv(g, h, e)
+
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/COLLAB_edge_classification/gated_gcn_net.py b/nets/COLLAB_edge_classification/gated_gcn_net.py
new file mode 100644
index 000000000..67995ed1f
--- /dev/null
+++ b/nets/COLLAB_edge_classification/gated_gcn_net.py
@@ -0,0 +1,78 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ ResGatedGCN: Residual Gated Graph ConvNets
+ An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
+ https://arxiv.org/pdf/1711.07553v2.pdf
+"""
+from layers.gated_gcn_layer import GatedGCNLayer, GatedGCNLayerEdgeFeatOnly, GatedGCNLayerIsotropic
+from layers.mlp_readout_layer import MLPReadout
+
+class GatedGCNNet(nn.Module):
+
+ def __init__(self, net_params):
+ super().__init__()
+ in_dim = net_params['in_dim']
+ in_dim_edge = net_params['in_dim_edge']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+
+ self.layer_type = {
+ "edgereprfeat": GatedGCNLayer,
+ "edgefeat": GatedGCNLayerEdgeFeatOnly,
+ "isotropic": GatedGCNLayerIsotropic,
+ }.get(net_params['layer_type'], GatedGCNLayer)
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim)
+ self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
+ self.layers = nn.ModuleList([ self.layer_type(hidden_dim, hidden_dim, dropout,
+ self.batch_norm, self.residual) for _ in range(n_layers-1) ])
+ self.layers.append(self.layer_type(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
+
+ self.MLP_layer = MLPReadout(2*out_dim, 1)
+
+ def forward(self, g, h, e, h_pos_enc=None):
+
+ h = self.embedding_h(h.float())
+ if self.pos_enc:
+ h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
+ h = h + h_pos_enc
+ if not self.edge_feat:
+ e = torch.ones_like(e).to(self.device)
+ e = self.embedding_e(e.float())
+
+ # convnets
+ for conv in self.layers:
+ h, e = conv(g, h, e)
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
+
\ No newline at end of file
diff --git a/nets/COLLAB_edge_classification/gcn_net.py b/nets/COLLAB_edge_classification/gcn_net.py
new file mode 100644
index 000000000..956dc7540
--- /dev/null
+++ b/nets/COLLAB_edge_classification/gcn_net.py
@@ -0,0 +1,57 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GCN: Graph Convolutional Networks
+ Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
+ http://arxiv.org/abs/1609.02907
+"""
+from layers.gcn_layer import GCNLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class GCNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ in_dim = net_params['in_dim']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim)
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
+ self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
+ self.MLP_layer = MLPReadout(2*out_dim, 1)
+
+ def forward(self, g, h, e):
+ h = self.embedding_h(h.float())
+ h = self.in_feat_dropout(h)
+ for conv in self.layers:
+ h = conv(g, h)
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/COLLAB_edge_classification/gin_net.py b/nets/COLLAB_edge_classification/gin_net.py
new file mode 100644
index 000000000..4586055c5
--- /dev/null
+++ b/nets/COLLAB_edge_classification/gin_net.py
@@ -0,0 +1,62 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GIN: Graph Isomorphism Networks
+ HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
+ https://arxiv.org/pdf/1810.00826.pdf
+"""
+
+from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
+from layers.mlp_readout_layer import MLPReadout
+
+class GINNet(nn.Module):
+
+ def __init__(self, net_params):
+ super().__init__()
+ in_dim = net_params['in_dim']
+ hidden_dim = net_params['hidden_dim']
+ dropout = net_params['dropout']
+ self.n_layers = net_params['L']
+ n_mlp_layers = net_params['n_mlp_GIN'] # GIN
+ learn_eps = net_params['learn_eps_GIN'] # GIN
+ neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim)
+
+ # List of MLPs
+ self.ginlayers = torch.nn.ModuleList()
+ for layer in range(self.n_layers):
+ mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
+ self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
+ dropout, batch_norm, residual, 0, learn_eps))
+
+ self.MLP_layer = MLPReadout(2*hidden_dim, 1)
+
+ def forward(self, g, h, e):
+ h = self.embedding_h(h.float())
+
+ for conv in self.ginlayers:
+ h = conv(g, h)
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/COLLAB_edge_classification/graphsage_net.py b/nets/COLLAB_edge_classification/graphsage_net.py
new file mode 100644
index 000000000..c919b6e19
--- /dev/null
+++ b/nets/COLLAB_edge_classification/graphsage_net.py
@@ -0,0 +1,83 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GraphSAGE:
+ William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
+ https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
+"""
+
+from layers.graphsage_layer import GraphSageLayer, GraphSageLayerEdgeFeat, GraphSageLayerEdgeReprFeat
+from layers.mlp_readout_layer import MLPReadout
+
+class GraphSageNet(nn.Module):
+ """
+ Grahpsage network with multiple GraphSageLayer layers
+ """
+ def __init__(self, net_params):
+ super().__init__()
+ in_dim = net_params['in_dim']
+ in_dim_edge = net_params['in_dim_edge']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ aggregator_type = net_params['sage_aggregator']
+ n_layers = net_params['L']
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.readout = net_params['readout']
+ self.device = net_params['device']
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim)
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layer_type = {
+ "edgereprfeat": GraphSageLayerEdgeReprFeat,
+ "edgefeat": GraphSageLayerEdgeFeat,
+ "isotropic": GraphSageLayer,
+ }.get(net_params['layer_type'], GraphSageLayer)
+
+ if self.layer_type == GraphSageLayerEdgeReprFeat:
+ self.edge_feat = net_params['edge_feat']
+ self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
+
+ self.layers = nn.ModuleList([self.layer_type(hidden_dim, hidden_dim, F.relu,
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(self.layer_type(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
+ self.MLP_layer = MLPReadout(2*out_dim, 1)
+
+ def forward(self, g, h, e):
+ h = self.embedding_h(h.float())
+ h = self.in_feat_dropout(h)
+
+ if self.layer_type == GraphSageLayerEdgeReprFeat:
+ if not self.edge_feat:
+ e = torch.ones_like(e).to(self.device)
+ e = self.embedding_e(e.float())
+
+ for conv in self.layers:
+ h, e = conv(g, h, e)
+ else:
+ for conv in self.layers:
+ h = conv(g, h)
+
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/COLLAB_edge_classification/load_net.py b/nets/COLLAB_edge_classification/load_net.py
new file mode 100644
index 000000000..802163be6
--- /dev/null
+++ b/nets/COLLAB_edge_classification/load_net.py
@@ -0,0 +1,52 @@
+"""
+ Utility file to select GraphNN model as
+ selected by the user
+"""
+
+from nets.COLLAB_edge_classification.gated_gcn_net import GatedGCNNet
+from nets.COLLAB_edge_classification.gcn_net import GCNNet
+from nets.COLLAB_edge_classification.gat_net import GATNet
+from nets.COLLAB_edge_classification.graphsage_net import GraphSageNet
+from nets.COLLAB_edge_classification.gin_net import GINNet
+from nets.COLLAB_edge_classification.mo_net import MoNet as MoNet_
+from nets.COLLAB_edge_classification.mlp_net import MLPNet
+from nets.COLLAB_edge_classification.matrix_factorization import MatrixFactorization
+
+
+def GatedGCN(net_params):
+ return GatedGCNNet(net_params)
+
+def GCN(net_params):
+ return GCNNet(net_params)
+
+def GAT(net_params):
+ return GATNet(net_params)
+
+def GraphSage(net_params):
+ return GraphSageNet(net_params)
+
+def GIN(net_params):
+ return GINNet(net_params)
+
+def MoNet(net_params):
+ return MoNet_(net_params)
+
+def MLP(net_params):
+ return MLPNet(net_params)
+
+def MF(net_params):
+ return MatrixFactorization(net_params)
+
+def gnn_model(MODEL_NAME, net_params):
+ models = {
+ 'GatedGCN': GatedGCN,
+ 'GCN': GCN,
+ 'GAT': GAT,
+ 'GraphSage': GraphSage,
+ 'GIN': GIN,
+ 'MoNet': MoNet,
+ 'MLP': MLP,
+ 'MF': MF,
+ }
+
+ return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/COLLAB_edge_classification/matrix_factorization.py b/nets/COLLAB_edge_classification/matrix_factorization.py
new file mode 100644
index 000000000..48f4da638
--- /dev/null
+++ b/nets/COLLAB_edge_classification/matrix_factorization.py
@@ -0,0 +1,36 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+from layers.mlp_readout_layer import MLPReadout
+
+class MatrixFactorization(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_embs = net_params['num_embs']
+ hidden_dim = net_params['hidden_dim']
+ self.device = net_params['device']
+
+ # MF trains a hidden embedding per graph node
+ self.emb = torch.nn.Embedding(num_embs, hidden_dim)
+
+ self.readout_mlp = MLPReadout(2*hidden_dim, 1)
+
+ def forward(self, g, h, e):
+ # Return the entire node embedding matrix
+ return self.emb.weight
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.readout_mlp(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/CitationGraphs_node_classification/mlp_net.py b/nets/COLLAB_edge_classification/mlp_net.py
similarity index 65%
rename from nets/CitationGraphs_node_classification/mlp_net.py
rename to nets/COLLAB_edge_classification/mlp_net.py
index b310a3381..72c0afca8 100644
--- a/nets/CitationGraphs_node_classification/mlp_net.py
+++ b/nets/COLLAB_edge_classification/mlp_net.py
@@ -11,11 +11,11 @@ def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
- n_classes = net_params['n_classes']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
n_layers = net_params['L']
self.gated = net_params['gated']
+ self.device = net_params['device']
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
@@ -30,27 +30,24 @@ def __init__(self, net_params):
feat_mlp_modules.append(nn.Dropout(dropout))
self.feat_mlp = nn.Sequential(*feat_mlp_modules)
- if self.gated:
- self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
-
- self.readout_mlp = MLPReadout(hidden_dim, n_classes)
+ self.readout_mlp = MLPReadout(2*hidden_dim, 1)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.in_feat_dropout(h)
h = self.feat_mlp(h)
- if self.gated:
- h = torch.sigmoid(self.gates(h)) * h
- g.ndata['h'] = h
+ g.ndata['h'] = h
- else:
- g.ndata['h'] = h
+ return h
- h_out = self.readout_mlp(h)
- return h_out
-
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.readout_mlp(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
- def loss(self, pred, label):
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
return loss
-
diff --git a/nets/COLLAB_edge_classification/mo_net.py b/nets/COLLAB_edge_classification/mo_net.py
new file mode 100644
index 000000000..5d36bff6e
--- /dev/null
+++ b/nets/COLLAB_edge_classification/mo_net.py
@@ -0,0 +1,74 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+import numpy as np
+
+"""
+ GMM: Gaussian Mixture Model Convolution layer
+ Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
+ https://arxiv.org/pdf/1611.08402.pdf
+"""
+
+from layers.gmm_layer import GMMLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class MoNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.name = 'MoNet'
+ in_dim = net_params['in_dim']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ kernel = net_params['kernel'] # for MoNet
+ dim = net_params['pseudo_dim_MoNet'] # for MoNet
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.device = net_params['device']
+
+ aggr_type = "sum" # default for MoNet
+
+ self.embedding_h = nn.Linear(in_dim, hidden_dim)
+
+ self.layers = nn.ModuleList()
+ self.pseudo_proj = nn.ModuleList()
+
+ # Hidden layer
+ for _ in range(n_layers-1):
+ self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
+ dropout, batch_norm, residual))
+ self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
+
+ # Output layer
+ self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
+ dropout, batch_norm, residual))
+ self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
+
+ self.MLP_layer = MLPReadout(2*out_dim, 1)
+
+ def forward(self, g, h, pseudo):
+ h = self.embedding_h(h.float())
+
+ for i in range(len(self.layers)):
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
+ g.ndata['h'] = h
+
+ return h
+
+ def edge_predictor(self, h_i, h_j):
+ x = torch.cat([h_i, h_j], dim=1)
+ x = self.MLP_layer(x)
+
+ return torch.sigmoid(x)
+
+ def loss(self, pos_out, neg_out):
+ pos_loss = -torch.log(pos_out + 1e-15).mean() # positive samples
+ neg_loss = -torch.log(1 - neg_out + 1e-15).mean() # negative samples
+ loss = pos_loss + neg_loss
+
+ return loss
diff --git a/nets/CSL_graph_classification/gat_net.py b/nets/CSL_graph_classification/gat_net.py
new file mode 100644
index 000000000..c6f983d05
--- /dev/null
+++ b/nets/CSL_graph_classification/gat_net.py
@@ -0,0 +1,76 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GAT: Graph Attention Network
+ Graph Attention Networks (Veličković et al., ICLR 2018)
+ https://arxiv.org/abs/1710.10903
+"""
+from layers.gat_layer import GATLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class GATNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ num_edge_type = net_params['num_edge_type']
+ hidden_dim = net_params['hidden_dim']
+ num_heads = net_params['n_heads']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.dropout = dropout
+
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim*num_heads)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim*num_heads)
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1,
+ dropout, self.batch_norm, self.residual))
+ self.MLP_layer = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+ # input embedding
+
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+ h = self.in_feat_dropout(h)
+ for conv in self.layers:
+ h = conv(g, h)
+ g.ndata['h'] = h
+
+ if self.readout == "sum":
+ hg = dgl.sum_nodes(g, 'h')
+ elif self.readout == "max":
+ hg = dgl.max_nodes(g, 'h')
+ elif self.readout == "mean":
+ hg = dgl.mean_nodes(g, 'h')
+ else:
+ hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
+
+ return self.MLP_layer(hg)
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/gated_gcn_net.py b/nets/CSL_graph_classification/gated_gcn_net.py
new file mode 100644
index 000000000..cfa68a879
--- /dev/null
+++ b/nets/CSL_graph_classification/gated_gcn_net.py
@@ -0,0 +1,83 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ ResGatedGCN: Residual Gated Graph ConvNets
+ An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
+ https://arxiv.org/pdf/1711.07553v2.pdf
+"""
+from layers.gated_gcn_layer import GatedGCNLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class GatedGCNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ num_edge_type = net_params['num_edge_type']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+ if self.edge_feat:
+ self.embedding_e = nn.Embedding(num_edge_type, hidden_dim)
+ else:
+ self.embedding_e = nn.Linear(1, hidden_dim)
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
+ self.batch_norm, self.residual) for _ in range(n_layers-1) ])
+ self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
+ self.MLP_layer = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+
+ # input embedding
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+ h = self.in_feat_dropout(h)
+ if not self.edge_feat: # edge feature set to 1
+ e = torch.ones(e.size(0),1).to(self.device)
+ e = self.embedding_e(e)
+
+ # convnets
+ for conv in self.layers:
+ h, e = conv(g, h, e)
+ g.ndata['h'] = h
+
+ if self.readout == "sum":
+ hg = dgl.sum_nodes(g, 'h')
+ elif self.readout == "max":
+ hg = dgl.max_nodes(g, 'h')
+ elif self.readout == "mean":
+ hg = dgl.mean_nodes(g, 'h')
+ else:
+ hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
+
+ return self.MLP_layer(hg)
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
diff --git a/nets/CSL_graph_classification/gcn_net.py b/nets/CSL_graph_classification/gcn_net.py
new file mode 100644
index 000000000..dd76f13c1
--- /dev/null
+++ b/nets/CSL_graph_classification/gcn_net.py
@@ -0,0 +1,76 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GCN: Graph Convolutional Networks
+ Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
+ http://arxiv.org/abs/1609.02907
+"""
+from layers.gcn_layer import GCNLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class GCNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ num_edge_type = net_params['num_edge_type']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ self.batch_norm = net_params['batch_norm']
+ self.residual = net_params['residual']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ #self.embedding_h = nn.Embedding(num_node_type, hidden_dim)
+
+ self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu,
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu,
+ dropout, self.batch_norm, self.residual))
+ self.MLP_layer = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+ # input embedding
+
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+ h = self.in_feat_dropout(h)
+
+ for conv in self.layers:
+ h = conv(g, h)
+ g.ndata['h'] = h
+
+ if self.readout == "sum":
+ hg = dgl.sum_nodes(g, 'h')
+ elif self.readout == "max":
+ hg = dgl.max_nodes(g, 'h')
+ elif self.readout == "mean":
+ hg = dgl.mean_nodes(g, 'h')
+ else:
+ hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
+
+ return self.MLP_layer(hg)
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/gin_net.py b/nets/CSL_graph_classification/gin_net.py
new file mode 100644
index 000000000..4fbabb537
--- /dev/null
+++ b/nets/CSL_graph_classification/gin_net.py
@@ -0,0 +1,91 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
+
+"""
+ GIN: Graph Isomorphism Networks
+ HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
+ https://arxiv.org/pdf/1810.00826.pdf
+"""
+
+from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
+
+class GINNet(nn.Module):
+
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ self.n_layers = net_params['L']
+ n_mlp_layers = net_params['n_mlp_GIN'] # GIN
+ learn_eps = net_params['learn_eps_GIN'] # GIN
+ neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
+ readout = net_params['readout'] # this is graph_pooling_type
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+ # List of MLPs
+ self.ginlayers = torch.nn.ModuleList()
+
+ for layer in range(self.n_layers):
+ mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
+
+ self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
+ dropout, batch_norm, residual, 0, learn_eps))
+
+ # Linear function for graph poolings (readout) of output of each layer
+ # which maps the output of different layers into a prediction score
+ self.linears_prediction = torch.nn.ModuleList()
+
+ for layer in range(self.n_layers+1):
+ self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
+
+ if readout == 'sum':
+ self.pool = SumPooling()
+ elif readout == 'mean':
+ self.pool = AvgPooling()
+ elif readout == 'max':
+ self.pool = MaxPooling()
+ else:
+ raise NotImplementedError
+
+
+ def forward(self, g, h, e, pos_enc=None):
+
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+
+ # list of hidden representation at each layer (including input)
+ hidden_rep = [h]
+
+ for i in range(self.n_layers):
+ h = self.ginlayers[i](g, h)
+ hidden_rep.append(h)
+
+ score_over_layer = 0
+
+ # perform pooling over all nodes in each graph in every layer
+ for i, h in enumerate(hidden_rep):
+ pooled_h = self.pool(g, h)
+ score_over_layer += self.linears_prediction[i](pooled_h)
+
+ return score_over_layer
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/graphsage_net.py b/nets/CSL_graph_classification/graphsage_net.py
new file mode 100644
index 000000000..37c0cf9dd
--- /dev/null
+++ b/nets/CSL_graph_classification/graphsage_net.py
@@ -0,0 +1,77 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+"""
+ GraphSAGE:
+ William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
+ https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
+"""
+
+from layers.graphsage_layer import GraphSageLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class GraphSageNet(nn.Module):
+ """
+ Grahpsage network with multiple GraphSageLayer layers
+ """
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ num_edge_type = net_params['num_edge_type']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ aggregator_type = net_params['sage_aggregator']
+ n_layers = net_params['L']
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.readout = net_params['readout']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
+ self.MLP_layer = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+
+ # input embedding
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+ h = self.in_feat_dropout(h)
+ for conv in self.layers:
+ h = conv(g, h)
+ g.ndata['h'] = h
+
+ if self.readout == "sum":
+ hg = dgl.sum_nodes(g, 'h')
+ elif self.readout == "max":
+ hg = dgl.max_nodes(g, 'h')
+ elif self.readout == "mean":
+ hg = dgl.mean_nodes(g, 'h')
+ else:
+ hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
+
+ return self.MLP_layer(hg)
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/load_net.py b/nets/CSL_graph_classification/load_net.py
new file mode 100644
index 000000000..ec70544e1
--- /dev/null
+++ b/nets/CSL_graph_classification/load_net.py
@@ -0,0 +1,57 @@
+"""
+ Utility file to select GraphNN model as
+ selected by the user
+"""
+
+from nets.CSL_graph_classification.gated_gcn_net import GatedGCNNet
+from nets.CSL_graph_classification.gcn_net import GCNNet
+from nets.CSL_graph_classification.gat_net import GATNet
+from nets.CSL_graph_classification.graphsage_net import GraphSageNet
+from nets.CSL_graph_classification.gin_net import GINNet
+from nets.CSL_graph_classification.mo_net import MoNet as MoNet_
+from nets.CSL_graph_classification.mlp_net import MLPNet
+from nets.CSL_graph_classification.ring_gnn_net import RingGNNNet
+from nets.CSL_graph_classification.three_wl_gnn_net import ThreeWLGNNNet
+
+
+def GatedGCN(net_params):
+ return GatedGCNNet(net_params)
+
+def GCN(net_params):
+ return GCNNet(net_params)
+
+def GAT(net_params):
+ return GATNet(net_params)
+
+def GraphSage(net_params):
+ return GraphSageNet(net_params)
+
+def GIN(net_params):
+ return GINNet(net_params)
+
+def MoNet(net_params):
+ return MoNet_(net_params)
+
+def MLP(net_params):
+ return MLPNet(net_params)
+
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
+def gnn_model(MODEL_NAME, net_params):
+ models = {
+ 'GatedGCN': GatedGCN,
+ 'GCN': GCN,
+ 'GAT': GAT,
+ 'GraphSage': GraphSage,
+ 'GIN': GIN,
+ 'MoNet': MoNet,
+ 'MLP': MLP,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
+ }
+
+ return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/mlp_net.py b/nets/CSL_graph_classification/mlp_net.py
new file mode 100644
index 000000000..6a7ae4098
--- /dev/null
+++ b/nets/CSL_graph_classification/mlp_net.py
@@ -0,0 +1,81 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+from layers.mlp_readout_layer import MLPReadout
+
+class MLPNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ num_edge_type = net_params['num_edge_type']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ in_feat_dropout = net_params['in_feat_dropout']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.gated = net_params['gated']
+ self.readout = net_params['readout']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+ self.in_feat_dropout = nn.Dropout(in_feat_dropout)
+
+ feat_mlp_modules = [
+ nn.Linear(hidden_dim, hidden_dim, bias=True),
+ nn.ReLU(),
+ nn.Dropout(dropout),
+ ]
+ for _ in range(n_layers-1):
+ feat_mlp_modules.append(nn.Linear(hidden_dim, hidden_dim, bias=True))
+ feat_mlp_modules.append(nn.ReLU())
+ feat_mlp_modules.append(nn.Dropout(dropout))
+ self.feat_mlp = nn.Sequential(*feat_mlp_modules)
+
+ if self.gated:
+ self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)
+
+ self.readout_mlp = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+
+ # input embedding
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+ h = self.in_feat_dropout(h)
+ h = self.feat_mlp(h)
+ if self.gated:
+ h = torch.sigmoid(self.gates(h)) * h
+ g.ndata['h'] = h
+ hg = dgl.sum_nodes(g, 'h')
+ # hg = torch.cat(
+ # (
+ # dgl.sum_nodes(g, 'h'),
+ # dgl.max_nodes(g, 'h')
+ # ),
+ # dim=1
+ # )
+
+ else:
+ g.ndata['h'] = h
+ hg = dgl.mean_nodes(g, 'h')
+
+ return self.readout_mlp(hg)
+
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/nets/CSL_graph_classification/mo_net.py b/nets/CSL_graph_classification/mo_net.py
new file mode 100644
index 000000000..2a144ccb6
--- /dev/null
+++ b/nets/CSL_graph_classification/mo_net.py
@@ -0,0 +1,100 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+
+import numpy as np
+
+"""
+ GMM: Gaussian Mixture Model Convolution layer
+ Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs (Federico Monti et al., CVPR 2017)
+ https://arxiv.org/pdf/1611.08402.pdf
+"""
+
+from layers.gmm_layer import GMMLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class MoNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ num_node_type = net_params['num_node_type']
+ hidden_dim = net_params['hidden_dim']
+ out_dim = net_params['out_dim']
+ n_classes = net_params['n_classes']
+ kernel = net_params['kernel'] # for MoNet
+ dim = net_params['pseudo_dim_MoNet'] # for MoNet
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.readout = net_params['readout']
+ batch_norm = net_params['batch_norm']
+ residual = net_params['residual']
+ self.device = net_params['device']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
+ else:
+ in_dim = 1
+ self.embedding_h = nn.Embedding(in_dim, hidden_dim)
+
+ aggr_type = "sum" # default for MoNet
+
+
+ self.layers = nn.ModuleList()
+ self.pseudo_proj = nn.ModuleList()
+
+ # Hidden layer
+ for _ in range(n_layers-1):
+ self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
+ dropout, batch_norm, residual))
+ self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
+
+ # Output layer
+ self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
+ dropout, batch_norm, residual))
+ self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
+
+ self.MLP_layer = MLPReadout(out_dim, n_classes)
+
+
+ def forward(self, g, h, e, pos_enc=None):
+
+ # input embedding
+ if self.pos_enc:
+ h = self.embedding_pos_enc(pos_enc)
+ else:
+ h = self.embedding_h(h)
+
+ # computing the 'pseudo' named tensor which depends on node degrees
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
+
+ for i in range(len(self.layers)):
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
+ g.ndata['h'] = h
+
+ if self.readout == "sum":
+ hg = dgl.sum_nodes(g, 'h')
+ elif self.readout == "max":
+ hg = dgl.max_nodes(g, 'h')
+ elif self.readout == "mean":
+ hg = dgl.mean_nodes(g, 'h')
+ else:
+ hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
+
+ return self.MLP_layer(hg)
+
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
diff --git a/nets/CSL_graph_classification/ring_gnn_net.py b/nets/CSL_graph_classification/ring_gnn_net.py
new file mode 100644
index 000000000..c350450a7
--- /dev/null
+++ b/nets/CSL_graph_classification/ring_gnn_net.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.depth = [torch.LongTensor([1+self.in_dim_node])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), n_classes)
+
+ def forward(self, x):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ """
+
+ # this x is the tensor with all info available => adj, node feat
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ # # readout
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.prediction(x_list)
+
+ return x_out
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
diff --git a/nets/CSL_graph_classification/three_wl_gnn_net.py b/nets/CSL_graph_classification/three_wl_gnn_net.py
new file mode 100644
index 000000000..d523e7e9c
--- /dev/null
+++ b/nets/CSL_graph_classification/three_wl_gnn_net.py
@@ -0,0 +1,82 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+ self.diag_pool_readout = True # if True, uses the new_suffix readout from original code
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ original_features_num = self.in_dim_node + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+ if self.diag_pool_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(2*output_features, n_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, n_classes)
+
+ def forward(self, x):
+ if self.diag_pool_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.diag_pool_readout:
+ scores = self.fc_layers[i](diag_offdiag_maxpool(x)) + scores
+ else:
+ x_list.append(x)
+
+ if self.diag_pool_readout:
+ return scores
+ else:
+ # readout like RingGNN
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.mlp_prediction(x_list)
+ return x_out
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/nets/CitationGraphs_node_classification/gat_net.py b/nets/CitationGraphs_node_classification/gat_net.py
deleted file mode 100644
index 9acedbad4..000000000
--- a/nets/CitationGraphs_node_classification/gat_net.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import dgl
-from dgl.nn.pytorch import GATConv
-
-"""
- GAT: Graph Attention Network
- Graph Attention Networks (Veličković et al., ICLR 2018)
- https://arxiv.org/abs/1710.10903
-"""
-from layers.gat_layer import GATLayer
-
-class GATNet(nn.Module):
-
- def __init__(self, net_params):
- super().__init__()
-
- in_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- out_dim = net_params['out_dim']
- n_classes = net_params['n_classes']
- num_heads = net_params['n_heads']
- in_feat_dropout = net_params['in_feat_dropout']
- dropout = net_params['dropout']
- n_layers = net_params['L']
-
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- self.residual = net_params['residual']
- self.dropout = dropout
- self.n_classes = n_classes
- self.device = net_params['device']
- self.dgl_builtin = net_params['builtin']
-
- feat_drop = dropout
- attn_drop = dropout
- negative_slope = 0.2
- residual = False
- self.layers = nn.ModuleList()
- self.activation = F.elu
- # input projection (no residual)
- self.layers.append(GATLayer(
- in_dim, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual,
- activation=self.activation, dgl_builtin=self.dgl_builtin))
- # hidden layers
- for l in range(1, n_layers):
- # due to multi-head, the in_dim = hidden_dim * num_heads
- self.layers.append(GATLayer(
- hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual,
- activation=self.activation, dgl_builtin=self.dgl_builtin))
- # output projection
- self.layers.append(GATLayer(
- hidden_dim * num_heads, n_classes, 1,
- dropout, self.graph_norm, self.batch_norm, self.residual,
- activation=None, dgl_builtin=self.dgl_builtin))
-
- def forward(self, g, h, e, snorm_n, snorm_e):
-
- for conv in self.layers[:-1]:
- h = conv(g, h, snorm_n)
-
- h = self.layers[-1](g, h, snorm_n)
-
- return h
-
-
- def loss(self, pred, label):
- # Cross-entropy
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
-
- return loss
diff --git a/nets/CitationGraphs_node_classification/gated_gcn_net.py b/nets/CitationGraphs_node_classification/gated_gcn_net.py
deleted file mode 100644
index d6f2a7129..000000000
--- a/nets/CitationGraphs_node_classification/gated_gcn_net.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import dgl
-import numpy as np
-
-"""
- ResGatedGCN: Residual Gated Graph ConvNets
- An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
- https://arxiv.org/pdf/1711.07553v2.pdf
-"""
-from layers.gated_gcn_layer import GatedGCNLayer
-from layers.mlp_readout_layer import MLPReadout
-
-class GatedGCNNet(nn.Module):
-
- def __init__(self, net_params):
- super().__init__()
-
- in_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- n_classes = net_params['n_classes']
- dropout = net_params['dropout']
- n_layers = net_params['L']
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- self.residual = net_params['residual']
- self.n_classes = n_classes
- self.device = net_params['device']
-
- self.embedding_h = nn.Linear(in_dim, hidden_dim)
- self.embedding_e = nn.Linear(in_dim, hidden_dim)
- self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers) ])
- self.MLP_layer = MLPReadout(hidden_dim, n_classes, L=0)
-
-
- def forward(self, g, h, e, snorm_n, snorm_e):
-
- # input embedding
- h = self.embedding_h(h)
- e = self.embedding_e(e)
-
- # res gated convnets
- for conv in self.layers:
- h, e = conv(g, h, e, snorm_n.unsqueeze(-1), snorm_e.unsqueeze(-1))
-
- # output
- h_out = self.MLP_layer(h)
-
- return h_out
-
-
- def loss(self, pred, label):
-
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
-
- return loss
-
diff --git a/nets/CitationGraphs_node_classification/gcn_net.py b/nets/CitationGraphs_node_classification/gcn_net.py
deleted file mode 100644
index 022398ce4..000000000
--- a/nets/CitationGraphs_node_classification/gcn_net.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import dgl
-import numpy as np
-
-"""
- GCN: Graph Convolutional Networks
- Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
- http://arxiv.org/abs/1609.02907
-"""
-from layers.gcn_layer import GCNLayer
-from dgl.nn.pytorch import GraphConv
-
-class GCNNet(nn.Module):
-
- def __init__(self, net_params):
- super().__init__()
-
- in_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- out_dim = net_params['out_dim']
- n_classes = net_params['n_classes']
- in_feat_dropout = net_params['in_feat_dropout']
- dropout = net_params['dropout']
- n_layers = net_params['L']
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- self.residual = net_params['residual']
- self.n_classes = n_classes
- self.device = net_params['device']
- self.dgl_builtin = net_params['builtin']
-
- self.layers = nn.ModuleList()
- # input
- self.layers.append(GCNLayer(in_dim, hidden_dim, F.relu, dropout,
- self.graph_norm, self.batch_norm, self.residual,
- dgl_builtin=self.dgl_builtin))
-
- # hidden
- self.layers.extend(nn.ModuleList([GCNLayer(hidden_dim, hidden_dim,
- F.relu, dropout, self.graph_norm, self.batch_norm, self.residual,
- dgl_builtin=self.dgl_builtin)
- for _ in range(n_layers-1)]))
-
- # output
- self.layers.append(GCNLayer(hidden_dim, n_classes, None, 0,
- self.graph_norm, self.batch_norm, self.residual,
- dgl_builtin=self.dgl_builtin))
-
- self.dropout = nn.Dropout(p=dropout)
-
- def forward(self, g, h, e, snorm_n, snorm_e):
-
- # GCN
- for i, conv in enumerate(self.layers):
- h = conv(g, h, snorm_n)
- return h
-
-
- def loss(self, pred, label):
-
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
-
- return loss
-
-
-
-
-
-
-
-
-
-
-
diff --git a/nets/CitationGraphs_node_classification/gin_net.py b/nets/CitationGraphs_node_classification/gin_net.py
deleted file mode 100644
index 1bddf3e1c..000000000
--- a/nets/CitationGraphs_node_classification/gin_net.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import dgl
-from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
-
-"""
- GIN: Graph Isomorphism Networks
- HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
- https://arxiv.org/pdf/1810.00826.pdf
-"""
-
-from layers.gin_layer import GINLayer, ApplyNodeFunc, MLP
-
-class GINNet(nn.Module):
-
- def __init__(self, net_params):
- super().__init__()
- in_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- n_classes = net_params['n_classes']
- dropout = net_params['dropout']
- self.n_layers = net_params['L']
- n_mlp_layers = net_params['n_mlp_GIN'] # GIN
- learn_eps = net_params['learn_eps_GIN'] # GIN
- neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- graph_norm = net_params['graph_norm']
- batch_norm = net_params['batch_norm']
- residual = net_params['residual']
-
- # List of MLPs
- self.ginlayers = torch.nn.ModuleList()
-
- self.embedding_h = nn.Linear(in_dim, hidden_dim)
-
- # Input layer
- mlp = MLP(1, in_dim, hidden_dim, hidden_dim)
- self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm,
- residual, 0, learn_eps,
- activation=F.relu))
-
- # Hidden layers
- for layer in range(self.n_layers-1):
- mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
-
- self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp),
- neighbor_aggr_type, dropout, graph_norm, batch_norm, residual,
- 0, learn_eps, activation=F.relu))
-
- # Output layer
- mlp = MLP(1, hidden_dim, n_classes, n_classes)
- self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm,
- residual, 0, learn_eps))
-
- def forward(self, g, h, e, snorm_n, snorm_e):
-
- for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
-
-
- return h
-
- def loss(self, pred, label):
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
- return loss
diff --git a/nets/CitationGraphs_node_classification/graphsage_net.py b/nets/CitationGraphs_node_classification/graphsage_net.py
deleted file mode 100644
index 330a69089..000000000
--- a/nets/CitationGraphs_node_classification/graphsage_net.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import dgl
-
-"""
- GraphSAGE:
- William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)
- https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
-"""
-
-from layers.graphsage_layer import GraphSageLayer
-from layers.mlp_readout_layer import MLPReadout
-from dgl.nn.pytorch.conv import SAGEConv
-
-class GraphSageNet(nn.Module):
- """
- Grahpsage network with multiple GraphSageLayer layers
- """
- def __init__(self, net_params):
- super().__init__()
- in_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- out_dim = net_params['out_dim']
- n_classes = net_params['n_classes']
- in_feat_dropout = net_params['in_feat_dropout']
- dropout = net_params['dropout']
- aggregator_type = net_params['sage_aggregator']
- n_layers = net_params['L']
- self.residual = net_params['residual']
- dgl_builtin = net_params['builtin']
- bnorm = net_params['batch_norm']
-
- self.layers = nn.ModuleList()
- # Input
- self.layers.append(GraphSageLayer(in_dim, hidden_dim, F.relu,
- dropout, aggregator_type, self.residual,
- batch_norm=bnorm, dgl_builtin=dgl_builtin))
- # Hidden layers
- self.layers.extend(nn.ModuleList([GraphSageLayer(hidden_dim,
- hidden_dim, F.relu, dropout, aggregator_type, self.residual,
- batch_norm=bnorm, dgl_builtin=dgl_builtin) for _ in range(n_layers-1)]))
- # Output layer
- self.layers.append(GraphSageLayer(hidden_dim, n_classes, None,
- dropout, aggregator_type, self.residual, batch_norm=bnorm,
- dgl_builtin=dgl_builtin))
-
- def forward(self, g, h, e, snorm_n, snorm_e):
- for conv in self.layers:
- h = conv(g, h)
- return h
-
-
- def loss(self, pred, label):
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
- return loss
-
diff --git a/nets/CitationGraphs_node_classification/load_net.py b/nets/CitationGraphs_node_classification/load_net.py
deleted file mode 100644
index a8a632c63..000000000
--- a/nets/CitationGraphs_node_classification/load_net.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Utility file to select GraphNN model as
- selected by the user
-"""
-
-from nets.CitationGraphs_node_classification.gcn_net import GCNNet
-from nets.CitationGraphs_node_classification.gat_net import GATNet
-from nets.CitationGraphs_node_classification.graphsage_net import GraphSageNet
-from nets.CitationGraphs_node_classification.mlp_net import MLPNet
-
-
-
-def GCN(net_params):
- return GCNNet(net_params)
-
-def GAT(net_params):
- return GATNet(net_params)
-
-def GraphSage(net_params):
- return GraphSageNet(net_params)
-
-def MLP(net_params):
- return MLPNet(net_params)
-
-def gnn_model(MODEL_NAME, net_params):
- models = {
- 'GCN': GCN,
- 'GAT': GAT,
- 'GraphSage': GraphSage,
- 'MLP': MLP,
- }
-
- return models[MODEL_NAME](net_params)
diff --git a/nets/SBMs_node_classification/gat_net.py b/nets/SBMs_node_classification/gat_net.py
index e87789727..2d4bed943 100644
--- a/nets/SBMs_node_classification/gat_net.py
+++ b/nets/SBMs_node_classification/gat_net.py
@@ -27,7 +27,6 @@ def __init__(self, net_params):
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
@@ -39,12 +38,12 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.graph_norm, self.batch_norm, self.residual))
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
@@ -52,7 +51,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
# GAT
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
# output
h_out = self.MLP_layer(h)
diff --git a/nets/SBMs_node_classification/gated_gcn_net.py b/nets/SBMs_node_classification/gated_gcn_net.py
index ea4982ea6..76b23a89d 100644
--- a/nets/SBMs_node_classification/gated_gcn_net.py
+++ b/nets/SBMs_node_classification/gated_gcn_net.py
@@ -25,28 +25,34 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
self.device = net_params['device']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers) ])
+ self.batch_norm, self.residual) for _ in range(n_layers) ])
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
+ if self.pos_enc:
+ h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
+ h = h + h_pos_enc
e = self.embedding_e(e)
# res gated convnets
for conv in self.layers:
- h, e = conv(g, h, e, snorm_n, snorm_e)
+ h, e = conv(g, h, e)
# output
h_out = self.MLP_layer(h)
diff --git a/nets/SBMs_node_classification/gcn_net.py b/nets/SBMs_node_classification/gcn_net.py
index 42cd604ae..22eac8b65 100644
--- a/nets/SBMs_node_classification/gcn_net.py
+++ b/nets/SBMs_node_classification/gcn_net.py
@@ -26,7 +26,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
@@ -35,12 +34,12 @@ def __init__(self, net_params):
self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
@@ -48,7 +47,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
# GCN
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
# output
h_out = self.MLP_layer(h)
diff --git a/nets/SBMs_node_classification/gin_net.py b/nets/SBMs_node_classification/gin_net.py
index 537672344..4f45db560 100644
--- a/nets/SBMs_node_classification/gin_net.py
+++ b/nets/SBMs_node_classification/gin_net.py
@@ -25,8 +25,7 @@ def __init__(self, net_params):
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- readout = net_params['readout'] # this is graph_pooling_type
- graph_norm = net_params['graph_norm']
+ readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
@@ -41,7 +40,7 @@ def __init__(self, net_params):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm, residual, 0, learn_eps))
+ dropout, batch_norm, residual, 0, learn_eps))
# Linear function for output of each layer
# which maps the output of different layers into a prediction score
@@ -51,7 +50,7 @@ def __init__(self, net_params):
self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
@@ -59,7 +58,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hidden_rep = [h]
for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
+ h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
diff --git a/nets/SBMs_node_classification/graphsage_net.py b/nets/SBMs_node_classification/graphsage_net.py
index eda37a756..f3f100798 100644
--- a/nets/SBMs_node_classification/graphsage_net.py
+++ b/nets/SBMs_node_classification/graphsage_net.py
@@ -28,8 +28,7 @@ def __init__(self, net_params):
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
- n_layers = net_params['L']
- graph_norm = net_params['graph_norm']
+ n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
@@ -40,12 +39,12 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
- dropout, aggregator_type, graph_norm, batch_norm, residual) for _ in range(n_layers-1)])
- self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, graph_norm, batch_norm, residual))
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
@@ -53,7 +52,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
# graphsage
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
# output
h_out = self.MLP_layer(h)
diff --git a/nets/SBMs_node_classification/load_net.py b/nets/SBMs_node_classification/load_net.py
index 37acf7a52..9307e942b 100644
--- a/nets/SBMs_node_classification/load_net.py
+++ b/nets/SBMs_node_classification/load_net.py
@@ -10,6 +10,8 @@
from nets.SBMs_node_classification.mlp_net import MLPNet
from nets.SBMs_node_classification.gin_net import GINNet
from nets.SBMs_node_classification.mo_net import MoNet as MoNet_
+from nets.SBMs_node_classification.ring_gnn_net import RingGNNNet
+from nets.SBMs_node_classification.three_wl_gnn_net import ThreeWLGNNNet
def GatedGCN(net_params):
@@ -33,6 +35,13 @@ def GIN(net_params):
def MoNet(net_params):
return MoNet_(net_params)
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
+
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
@@ -41,7 +50,9 @@ def gnn_model(MODEL_NAME, net_params):
'GraphSage': GraphSage,
'MLP': MLP,
'GIN': GIN,
- 'MoNet': MoNet
+ 'MoNet': MoNet,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
}
return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/SBMs_node_classification/mlp_net.py b/nets/SBMs_node_classification/mlp_net.py
index e29f732c3..aa0c32f20 100644
--- a/nets/SBMs_node_classification/mlp_net.py
+++ b/nets/SBMs_node_classification/mlp_net.py
@@ -42,7 +42,7 @@ def __init__(self, net_params):
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
diff --git a/nets/SBMs_node_classification/mo_net.py b/nets/SBMs_node_classification/mo_net.py
index 28f9a53cb..0c2b6bb54 100644
--- a/nets/SBMs_node_classification/mo_net.py
+++ b/nets/SBMs_node_classification/mo_net.py
@@ -18,7 +18,7 @@
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
-
+ self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
@@ -27,8 +27,7 @@ def __init__(self, net_params):
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
- self.readout = net_params['readout']
- graph_norm = net_params['graph_norm']
+ self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
@@ -44,29 +43,36 @@ def __init__(self, net_params):
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
- us, vs = g.edges()
- # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
- pseudo = [ [1/np.sqrt(g.in_degree(us[i])+1), 1/np.sqrt(g.in_degree(vs[i])+1)] for i in range(g.number_of_edges()) ]
- pseudo = torch.Tensor(pseudo).to(self.device)
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
- h = self.layers[i](g, h, self.pseudo_proj[i](pseudo), snorm_n)
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
return self.MLP_layer(h)
+
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
def loss(self, pred, label):
diff --git a/nets/SBMs_node_classification/ring_gnn_net.py b/nets/SBMs_node_classification/ring_gnn_net.py
new file mode 100644
index 000000000..816bbe1c4
--- /dev/null
+++ b/nets/SBMs_node_classification/ring_gnn_net.py
@@ -0,0 +1,81 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.num_node_type = net_params['in_dim'] # 'num_node_type' is 'nodeclasses' as in RingGNN original repo
+ # node_classes = net_params['node_classes']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.n_classes = net_params['n_classes']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.depth = [torch.LongTensor([1+self.num_node_type])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), self.n_classes)
+
+ def forward(self, x_with_node_feat):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ : preparing input to the model in form new_adj
+ """
+ x = x_with_node_feat
+ # this x is the tensor with all info available => adj, node feat
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ # readout
+ x_list = [torch.sum(x, dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ # reshaping in form of [n x d_out]
+ x_out = x_list.squeeze().permute(1,0)
+
+ x_out = self.prediction(x_out)
+
+ return x_out
+
+ def loss(self, pred, label):
+
+ # calculating label weights for weighted loss computation
+ V = label.size(0)
+ label_count = torch.bincount(label)
+ label_count = label_count[label_count.nonzero()].squeeze()
+ cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
+ cluster_sizes[torch.unique(label)] = label_count
+ weight = (V - cluster_sizes).float() / V
+ weight *= (cluster_sizes>0).float()
+
+ # weighted cross-entropy for unbalanced classes
+ criterion = nn.CrossEntropyLoss(weight=weight)
+ loss = criterion(pred, label)
+
+ return loss
diff --git a/nets/SBMs_node_classification/three_wl_gnn_net.py b/nets/SBMs_node_classification/three_wl_gnn_net.py
new file mode 100644
index 000000000..f959d58f3
--- /dev/null
+++ b/nets/SBMs_node_classification/three_wl_gnn_net.py
@@ -0,0 +1,106 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+
+ self.num_node_type = net_params['in_dim']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.n_classes = net_params['n_classes']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+ self.gin_like_readout = True # if True, uses GIN like readout, but without diag poool, since node task
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ original_features_num = self.num_node_type + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+
+ if self.gin_like_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(output_features, self.n_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, self.n_classes)
+
+
+ def forward(self, x_with_node_feat):
+ x = x_with_node_feat
+ # this x is the tensor with all info available => adj, node feat
+
+ if self.gin_like_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.gin_like_readout:
+ x_out = torch.sum(x, dim=2) # from [1 x d_out x n x n] to [1 x d_out x n]
+ x_out = x_out.squeeze().permute(1,0) # reshaping in form of [n x d_out]
+ scores = self.fc_layers[i](x_out) + scores
+ else:
+ x_list.append(x)
+
+ if self.gin_like_readout:
+ return scores
+ else:
+ # readout
+ x_list = [torch.sum(x, dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ # reshaping in form of [n x d_out]
+ x_out = x_list.squeeze().permute(1,0)
+
+ x_out = self.mlp_prediction(x_out)
+
+ return x_out
+
+
+ def loss(self, pred, label):
+
+ # calculating label weights for weighted loss computation
+ V = label.size(0)
+ label_count = torch.bincount(label)
+ label_count = label_count[label_count.nonzero()].squeeze()
+ cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
+ cluster_sizes[torch.unique(label)] = label_count
+ weight = (V - cluster_sizes).float() / V
+ weight *= (cluster_sizes>0).float()
+
+ # weighted cross-entropy for unbalanced classes
+ criterion = nn.CrossEntropyLoss(weight=weight)
+ loss = criterion(pred, label)
+
+ return loss
diff --git a/nets/TSP_edge_classification/gat_net.py b/nets/TSP_edge_classification/gat_net.py
index 0477c2996..6676b9ffc 100644
--- a/nets/TSP_edge_classification/gat_net.py
+++ b/nets/TSP_edge_classification/gat_net.py
@@ -9,13 +9,14 @@
Graph Attention Networks (Veličković et al., ICLR 2018)
https://arxiv.org/abs/1710.10903
"""
-from layers.gat_layer import GATLayer
+from layers.gat_layer import GATLayer, CustomGATLayer, CustomGATLayerEdgeReprFeat, CustomGATLayerIsotropic
from layers.mlp_readout_layer import MLPReadout
class GATNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim = net_params['in_dim']
+ in_dim_edge = net_params['in_dim_edge']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
n_classes = net_params['n_classes']
@@ -24,27 +25,47 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
self.n_classes = n_classes
self.device = net_params['device']
+ self.layer_type = {
+ "dgl": GATLayer,
+ "edgereprfeat": CustomGATLayerEdgeReprFeat,
+ "edgefeat": CustomGATLayer,
+ "isotropic": CustomGATLayerIsotropic,
+ }.get(net_params['layer_type'], GATLayer)
+
self.embedding_h = nn.Linear(in_dim, hidden_dim * num_heads)
+ if self.layer_type != GATLayer:
+ self.edge_feat = net_params['edge_feat']
+ self.embedding_e = nn.Linear(in_dim_edge, hidden_dim * num_heads)
+
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
- self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.layers = nn.ModuleList([self.layer_type(hidden_dim * num_heads, hidden_dim, num_heads,
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(self.layer_type(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
- for conv in self.layers:
- h = conv(g, h, snorm_n)
+
+ if self.layer_type == GATLayer:
+ for conv in self.layers:
+ h = conv(g, h)
+ else:
+ if not self.edge_feat:
+ e = torch.ones_like(e).to(self.device)
+ e = self.embedding_e(e.float())
+
+ for conv in self.layers:
+ h, e = conv(g, h, e)
+
g.ndata['h'] = h
def _edge_feat(edges):
diff --git a/nets/TSP_edge_classification/gated_gcn_net.py b/nets/TSP_edge_classification/gated_gcn_net.py
index 46e0094ee..4e5920e17 100644
--- a/nets/TSP_edge_classification/gated_gcn_net.py
+++ b/nets/TSP_edge_classification/gated_gcn_net.py
@@ -9,7 +9,7 @@
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
-from layers.gated_gcn_layer import GatedGCNLayer
+from layers.gated_gcn_layer import GatedGCNLayer, GatedGCNLayerEdgeFeatOnly, GatedGCNLayerIsotropic
from layers.mlp_readout_layer import MLPReadout
class GatedGCNNet(nn.Module):
@@ -24,21 +24,26 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.n_classes = n_classes
self.device = net_params['device']
+ self.layer_type = {
+ "edgereprfeat": GatedGCNLayer,
+ "edgefeat": GatedGCNLayerEdgeFeatOnly,
+ "isotropic": GatedGCNLayerIsotropic,
+ }.get(net_params['layer_type'], GatedGCNLayer)
+
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
- self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1) ])
- self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.layers = nn.ModuleList([ self.layer_type(hidden_dim, hidden_dim, dropout,
+ self.batch_norm, self.residual) for _ in range(n_layers-1) ])
+ self.layers.append(self.layer_type(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h.float())
if not self.edge_feat:
@@ -47,7 +52,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
# convnets
for conv in self.layers:
- h, e = conv(g, h, e, snorm_n, snorm_e)
+ h, e = conv(g, h, e)
g.ndata['h'] = h
def _edge_feat(edges):
diff --git a/nets/TSP_edge_classification/gcn_net.py b/nets/TSP_edge_classification/gcn_net.py
index 36ef587e4..f042d2020 100644
--- a/nets/TSP_edge_classification/gcn_net.py
+++ b/nets/TSP_edge_classification/gcn_net.py
@@ -23,7 +23,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.n_classes = n_classes
@@ -33,15 +32,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
def _edge_feat(edges):
diff --git a/nets/TSP_edge_classification/gin_net.py b/nets/TSP_edge_classification/gin_net.py
index 507e004f6..a3eccaadb 100644
--- a/nets/TSP_edge_classification/gin_net.py
+++ b/nets/TSP_edge_classification/gin_net.py
@@ -25,8 +25,7 @@ def __init__(self, net_params):
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- readout = net_params['readout'] # this is graph_pooling_type
- graph_norm = net_params['graph_norm']
+ readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.n_classes = n_classes
@@ -41,7 +40,7 @@ def __init__(self, net_params):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm, residual, 0, learn_eps))
+ dropout, batch_norm, residual, 0, learn_eps))
# Non-linear function for output of each layer
# which maps the output of different layers into a prediction score
@@ -56,7 +55,7 @@ def __init__(self, net_params):
)
)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
def _edge_feat(edges):
e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
@@ -70,7 +69,7 @@ def _edge_feat(edges):
hidden_rep = [g.edata['e']]
for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
+ h = self.ginlayers[i](g, h)
g.ndata['h'] = h
g.apply_edges(_edge_feat)
hidden_rep.append(g.edata['e'])
diff --git a/nets/TSP_edge_classification/graphsage_net.py b/nets/TSP_edge_classification/graphsage_net.py
index fdea4f55e..162fcf12c 100644
--- a/nets/TSP_edge_classification/graphsage_net.py
+++ b/nets/TSP_edge_classification/graphsage_net.py
@@ -26,8 +26,7 @@ def __init__(self, net_params):
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
- n_layers = net_params['L']
- graph_norm = net_params['graph_norm']
+ n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
@@ -38,15 +37,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
- dropout, aggregator_type, graph_norm, batch_norm, residual) for _ in range(n_layers-1)])
- self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, graph_norm, batch_norm, residual))
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h.float())
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
def _edge_feat(edges):
diff --git a/nets/TSP_edge_classification/load_net.py b/nets/TSP_edge_classification/load_net.py
index b0afcbce7..d208aa4a8 100644
--- a/nets/TSP_edge_classification/load_net.py
+++ b/nets/TSP_edge_classification/load_net.py
@@ -10,6 +10,8 @@
from nets.TSP_edge_classification.gin_net import GINNet
from nets.TSP_edge_classification.mo_net import MoNet as MoNet_
from nets.TSP_edge_classification.mlp_net import MLPNet
+from nets.TSP_edge_classification.ring_gnn_net import RingGNNNet
+from nets.TSP_edge_classification.three_wl_gnn_net import ThreeWLGNNNet
def GatedGCN(net_params):
@@ -33,6 +35,12 @@ def MoNet(net_params):
def MLP(net_params):
return MLPNet(net_params)
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
@@ -41,7 +49,9 @@ def gnn_model(MODEL_NAME, net_params):
'GraphSage': GraphSage,
'GIN': GIN,
'MoNet': MoNet,
- 'MLP': MLP
+ 'MLP': MLP,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
}
return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/TSP_edge_classification/mlp_net.py b/nets/TSP_edge_classification/mlp_net.py
index 96cef23ee..ab989ae39 100644
--- a/nets/TSP_edge_classification/mlp_net.py
+++ b/nets/TSP_edge_classification/mlp_net.py
@@ -37,7 +37,7 @@ def __init__(self, net_params):
self.readout_mlp = MLPReadout(2*hidden_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.in_feat_dropout(h)
h = self.feat_mlp(h)
if self.gated:
diff --git a/nets/TSP_edge_classification/mo_net.py b/nets/TSP_edge_classification/mo_net.py
index 1a7b39efe..132219b7a 100644
--- a/nets/TSP_edge_classification/mo_net.py
+++ b/nets/TSP_edge_classification/mo_net.py
@@ -18,7 +18,7 @@
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
-
+ self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
@@ -27,8 +27,7 @@ def __init__(self, net_params):
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
- self.readout = net_params['readout']
- graph_norm = net_params['graph_norm']
+ self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
@@ -44,27 +43,26 @@ def __init__(self, net_params):
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(2*out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h.float())
# computing the 'pseudo' named tensor which depends on node degrees
- us, vs = g.edges()
- # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
- pseudo = [ [1/np.sqrt(g.in_degree(us[i])+1), 1/np.sqrt(g.in_degree(vs[i])+1)] for i in range(g.number_of_edges()) ]
- pseudo = torch.Tensor(pseudo).to(self.device)
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
- h = self.layers[i](g, h, self.pseudo_proj[i](pseudo), snorm_n)
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
g.ndata['h'] = h
def _edge_feat(edges):
@@ -75,6 +73,14 @@ def _edge_feat(edges):
return g.edata['e']
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
+
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss(weight=None)
loss = criterion(pred, label)
diff --git a/nets/TSP_edge_classification/ring_gnn_net.py b/nets/TSP_edge_classification/ring_gnn_net.py
new file mode 100644
index 000000000..a038e91af
--- /dev/null
+++ b/nets/TSP_edge_classification/ring_gnn_net.py
@@ -0,0 +1,86 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ self.in_dim_edge = net_params['in_dim_edge']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+
+ if self.edge_feat:
+ self.depth = [torch.LongTensor([1+self.in_dim_node+self.in_dim_edge])] + [torch.LongTensor([hidden_dim])] * n_layers
+ else:
+ self.depth = [torch.LongTensor([1+self.in_dim_node])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item()*2, n_classes)
+
+ def forward(self, x_no_edge_feat, x_with_edge_feat, edge_list):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ """
+
+ x = x_no_edge_feat
+
+ if self.edge_feat:
+ x = x_with_edge_feat
+
+ # this x is the tensor with all info available => adj, node feat, and edge feat (if edge_feat flag True)
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ x_list = [torch.sum(x, dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ # node_feats will be of size (num nodes, features)
+ node_feats = x_list.squeeze(0).permute(1,0)
+
+ # edge sources and destinations which are node indexes
+ srcs, dsts = edge_list
+
+
+ # To make a prediction for each edge e_{ij}, we first concatenate
+ # node features h_i and h_j from the final GNN layer.
+ # The concatenated features are then passed to an MLP for prediction.
+ edge_outs = [torch.cat([node_feats[srcs[idx].item()], node_feats[dsts[idx].item()]]) for idx in range(len(srcs))]
+ edge_outs = self.prediction(torch.stack(edge_outs))
+
+ return edge_outs
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss(weight=None)
+ loss = criterion(pred, label)
+
+ return loss
+
diff --git a/nets/TSP_edge_classification/three_wl_gnn_net.py b/nets/TSP_edge_classification/three_wl_gnn_net.py
new file mode 100644
index 000000000..03af2ab95
--- /dev/null
+++ b/nets/TSP_edge_classification/three_wl_gnn_net.py
@@ -0,0 +1,121 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ self.in_dim_edge = net_params['in_dim_edge']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+ self.gin_like_readout = False # if True, uses GIN like readout, but without diag poool, since node task
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ if not self.edge_feat:
+ original_features_num = self.in_dim_node + 1 # Number of features of the input
+ else:
+ original_features_num = self.in_dim_node + self.in_dim_edge + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+
+ if self.gin_like_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(2*output_features, n_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(2*(sum(block_features)+original_features_num), n_classes)
+
+
+ def forward(self, x_no_edge_feat, x_with_edge_feat, edge_list):
+
+ x = x_no_edge_feat
+
+ if self.edge_feat:
+ x = x_with_edge_feat
+
+ # this x is the tensor with all info available => adj, node feat, and edge feat (if edge_feat flag True)
+
+ if self.gin_like_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.gin_like_readout:
+ x_out = torch.sum(x, dim=2) # from [1 x d_out x n x n] to [1 x d_out x n]
+ node_feats = x_out.squeeze().permute(1,0) # reshaping in form of [n x d_out]
+
+ # edge sources and destinations which are node indexes
+ srcs, dsts = edge_list
+
+ # To make a prediction for each edge e_{ij}, we first concatenate
+ # node features h_i and h_j from the final GNN layer.
+ # The concatenated features are then passed to an MLP for prediction.
+ edge_outs = [torch.cat([node_feats[srcs[idx].item()], node_feats[dsts[idx].item()]]) for idx in range(len(srcs))]
+
+ scores = self.fc_layers[i](torch.stack(edge_outs)) + scores
+ else:
+ x_list.append(x)
+
+ if self.gin_like_readout:
+ return scores
+ else:
+ # readout
+ x_list = [torch.sum(x, dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+
+ # node_feats will be of size (num nodes, features)
+ node_feats = x_list.squeeze(0).permute(1,0)
+
+ # edge sources and destinations which are node indexes
+ srcs, dsts = edge_list
+
+ # To make a prediction for each edge e_{ij}, we first concatenate
+ # node features h_i and h_j from the final GNN layer.
+ # The concatenated features are then passed to an MLP for prediction.
+ edge_outs = [torch.cat([node_feats[srcs[idx].item()], node_feats[dsts[idx].item()]]) for idx in range(len(srcs))]
+ edge_outs = self.mlp_prediction(torch.stack(edge_outs))
+
+ return edge_outs
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss(weight=None)
+ loss = criterion(pred, label)
+
+ return loss
+
\ No newline at end of file
diff --git a/nets/TUs_graph_classification/diffpool_net.py b/nets/TUs_graph_classification/diffpool_net.py
deleted file mode 100644
index 7507bb47e..000000000
--- a/nets/TUs_graph_classification/diffpool_net.py
+++ /dev/null
@@ -1,252 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn import init
-import torch.nn.functional as F
-
-import time
-import numpy as np
-from scipy.linalg import block_diag
-
-import dgl
-
-"""
-
-
- DIFFPOOL:
- Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec,
- Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)
- https://arxiv.org/pdf/1806.08804.pdf
-
- ! code started from dgl diffpool examples dir
-"""
-
-from layers.graphsage_layer import GraphSageLayer # this is GraphSageLayer, DiffPoolBatchedGraphLayer
-from layers.diffpool_layer import DiffPoolLayer # this is GraphSageLayer, DiffPoolBatchedGraphLayer
-# from .graphsage_net import GraphSageNet # this is GraphSage
-# replace BatchedDiffPool with DenseDiffPool and BatchedGraphSAGE with DenseGraphSage
-from layers.tensorized.dense_graphsage_layer import DenseGraphSage
-from layers.tensorized.dense_diffpool_layer import DenseDiffPool
-
-
-class DiffPoolNet(nn.Module):
- """
- DiffPool Fuse with GNN layers and pooling layers in sequence
- """
-
- def __init__(self, net_params):
-
- super().__init__()
- input_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- embedding_dim = net_params['embedding_dim']
- label_dim = net_params['n_classes']
- activation = F.relu
- n_layers = net_params['L'] # this is the gnn_per_block param
- dropout = net_params['dropout']
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- self.residual = net_params['residual']
- aggregator_type = net_params['sage_aggregator']
- pool_ratio = net_params['pool_ratio']
-
- self.device = net_params['device']
- self.link_pred = net_params['linkpred']
- self.concat = net_params['cat']
- self.n_pooling = net_params['num_pool']
- self.batch_size = net_params['batch_size']
- self.link_pred_loss = []
- self.entropy_loss = []
-
- self.embedding_h = nn.Linear(input_dim, hidden_dim)
-
- # list of GNN modules before the first diffpool operation
- self.gc_before_pool = nn.ModuleList()
-
- self.assign_dim = net_params['assign_dim']
- # self.bn = True
- self.num_aggs = 1
-
- # constructing layers
- # layers before diffpool
- assert n_layers >= 3, "n_layers too few"
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation,
- dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- for _ in range(n_layers - 2):
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation,
- dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, embedding_dim, None, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
-
- assign_dims = []
- assign_dims.append(self.assign_dim)
- if self.concat:
- # diffpool layer receive pool_emedding_dim node feature tensor
- # and return pool_embedding_dim node embedding
- pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
- else:
-
- pool_embedding_dim = embedding_dim
-
- self.first_diffpool_layer = DiffPoolLayer(pool_embedding_dim, self.assign_dim, hidden_dim,
- activation, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.link_pred)
- gc_after_per_pool = nn.ModuleList()
-
- # list of list of GNN modules, each list after one diffpool operation
- self.gc_after_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- self.diffpool_layers = nn.ModuleList()
- # each pooling module
- for _ in range(self.n_pooling - 1):
- self.diffpool_layers.append(DenseDiffPool(pool_embedding_dim, self.assign_dim, hidden_dim, self.link_pred))
-
- gc_after_per_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- assign_dims.append(self.assign_dim)
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- # predicting layer
- if self.concat:
- self.pred_input_dim = pool_embedding_dim * \
- self.num_aggs * (n_pooling + 1)
- else:
- self.pred_input_dim = embedding_dim * self.num_aggs
- self.pred_layer = nn.Linear(self.pred_input_dim, label_dim)
-
- # weight initialization
- for m in self.modules():
- if isinstance(m, nn.Linear):
- m.weight.data = init.xavier_uniform_(m.weight.data,
- gain=nn.init.calculate_gain('relu'))
- if m.bias is not None:
- m.bias.data = init.constant_(m.bias.data, 0.0)
-
- def gcn_forward(self, g, h, snorm_n, gc_layers, cat=False):
- """
- Return gc_layer embedding cat.
- """
- block_readout = []
- for gc_layer in gc_layers[:-1]:
- h = gc_layer(g, h, snorm_n)
- block_readout.append(h)
- h = gc_layers[-1](g, h, snorm_n)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=1) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
- block_readout = []
- for gc_layer in gc_layers:
- h = gc_layer(h, adj)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=2) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def forward(self, g, h, e, snorm_n, snorm_e):
- self.link_pred_loss = []
- self.entropy_loss = []
-
- # node feature for assignment matrix computation is the same as the
- # original node feature
- h = self.embedding_h(h)
- h_a = h
-
- out_all = []
-
- # we use GCN blocks to get an embedding first
- g_embedding = self.gcn_forward(g, h, snorm_n, self.gc_before_pool, self.concat)
-
- g.ndata['h'] = g_embedding
-
- readout = dgl.sum_nodes(g, 'h')
- out_all.append(readout)
- if self.num_aggs == 2:
- readout = dgl.max_nodes(g, 'h')
- out_all.append(readout)
-
- adj, h = self.first_diffpool_layer(g, g_embedding, snorm_n)
- node_per_pool_graph = int(adj.size()[0] / self.batch_size)
-
- h, adj = self.batch2tensor(adj, h, node_per_pool_graph)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[0], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- for i, diffpool_layer in enumerate(self.diffpool_layers):
- h, adj = diffpool_layer(h, adj)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[i + 1], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
-
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- if self.concat or self.num_aggs > 1:
- final_readout = torch.cat(out_all, dim=1)
- else:
- final_readout = readout
- ypred = self.pred_layer(final_readout)
- return ypred
-
- def batch2tensor(self, batch_adj, batch_feat, node_per_pool_graph):
- """
- transform a batched graph to batched adjacency tensor and node feature tensor
- """
- batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
- adj_list = []
- feat_list = []
-
- for i in range(batch_size):
- start = i * node_per_pool_graph
- end = (i + 1) * node_per_pool_graph
-
- # 1/sqrt(V) normalization
- snorm_n = torch.FloatTensor(node_per_pool_graph, 1).fill_(1./float(node_per_pool_graph)).sqrt().to(self.device)
-
- adj_list.append(batch_adj[start:end, start:end])
- feat_list.append((batch_feat[start:end, :])*snorm_n)
- adj_list = list(map(lambda x: torch.unsqueeze(x, 0), adj_list))
- feat_list = list(map(lambda x: torch.unsqueeze(x, 0), feat_list))
- adj = torch.cat(adj_list, dim=0)
- feat = torch.cat(feat_list, dim=0)
-
- return feat, adj
-
- def loss(self, pred, label):
- '''
- loss function
- '''
- #softmax + CE
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
- for diffpool_layer in self.diffpool_layers:
- for key, value in diffpool_layer.loss_log.items():
- loss += value
- return loss
-
-
\ No newline at end of file
diff --git a/nets/TUs_graph_classification/gat_net.py b/nets/TUs_graph_classification/gat_net.py
index d374fb6ff..57ac4fdf0 100644
--- a/nets/TUs_graph_classification/gat_net.py
+++ b/nets/TUs_graph_classification/gat_net.py
@@ -24,7 +24,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
@@ -34,15 +33,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.graph_norm, self.batch_norm, self.residual))
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/TUs_graph_classification/gated_gcn_net.py b/nets/TUs_graph_classification/gated_gcn_net.py
index e4c55b41a..44013eb75 100644
--- a/nets/TUs_graph_classification/gated_gcn_net.py
+++ b/nets/TUs_graph_classification/gated_gcn_net.py
@@ -23,24 +23,23 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim, hidden_dim)
- self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.graph_norm,
+ self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
self.batch_norm, self.residual) for _ in range(n_layers-1) ])
- self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
e = self.embedding_e(e)
# convnets
for conv in self.layers:
- h, e = conv(g, h, e, snorm_n, snorm_e)
+ h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/TUs_graph_classification/gcn_net.py b/nets/TUs_graph_classification/gcn_net.py
index 27f8b65ba..bf088665a 100644
--- a/nets/TUs_graph_classification/gcn_net.py
+++ b/nets/TUs_graph_classification/gcn_net.py
@@ -23,7 +23,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
@@ -31,15 +30,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/TUs_graph_classification/gin_net.py b/nets/TUs_graph_classification/gin_net.py
index d7c3eb522..f6bd0bd60 100644
--- a/nets/TUs_graph_classification/gin_net.py
+++ b/nets/TUs_graph_classification/gin_net.py
@@ -25,8 +25,7 @@ def __init__(self, net_params):
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- readout = net_params['readout'] # this is graph_pooling_type
- graph_norm = net_params['graph_norm']
+ readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
@@ -39,7 +38,7 @@ def __init__(self, net_params):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm, residual, 0, learn_eps))
+ dropout, batch_norm, residual, 0, learn_eps))
# Linear function for graph poolings (readout) of output of each layer
# which maps the output of different layers into a prediction score
@@ -57,7 +56,7 @@ def __init__(self, net_params):
else:
raise NotImplementedError
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
@@ -65,7 +64,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hidden_rep = [h]
for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
+ h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
diff --git a/nets/TUs_graph_classification/graphsage_net.py b/nets/TUs_graph_classification/graphsage_net.py
index c3ab56c13..0cc75027c 100644
--- a/nets/TUs_graph_classification/graphsage_net.py
+++ b/nets/TUs_graph_classification/graphsage_net.py
@@ -26,8 +26,7 @@ def __init__(self, net_params):
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
- n_layers = net_params['L']
- graph_norm = net_params['graph_norm']
+ n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
@@ -36,15 +35,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
- dropout, aggregator_type, graph_norm, batch_norm, residual) for _ in range(n_layers-1)])
- self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, graph_norm, batch_norm, residual))
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/TUs_graph_classification/load_net.py b/nets/TUs_graph_classification/load_net.py
index 9a2863337..dc979e7dc 100644
--- a/nets/TUs_graph_classification/load_net.py
+++ b/nets/TUs_graph_classification/load_net.py
@@ -9,9 +9,9 @@
from nets.TUs_graph_classification.graphsage_net import GraphSageNet
from nets.TUs_graph_classification.gin_net import GINNet
from nets.TUs_graph_classification.mo_net import MoNet as MoNet_
-from nets.TUs_graph_classification.diffpool_net import DiffPoolNet
from nets.TUs_graph_classification.mlp_net import MLPNet
-
+from nets.TUs_graph_classification.ring_gnn_net import RingGNNNet
+from nets.TUs_graph_classification.three_wl_gnn_net import ThreeWLGNNNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
@@ -31,12 +31,16 @@ def GIN(net_params):
def MoNet(net_params):
return MoNet_(net_params)
-def DiffPool(net_params):
- return DiffPoolNet(net_params)
-
def MLP(net_params):
return MLPNet(net_params)
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
+
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
@@ -45,8 +49,9 @@ def gnn_model(MODEL_NAME, net_params):
'GraphSage': GraphSage,
'GIN': GIN,
'MoNet': MoNet_,
- 'DiffPool': DiffPool,
- 'MLP': MLP
+ 'MLP': MLP,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
}
return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/TUs_graph_classification/mlp_net.py b/nets/TUs_graph_classification/mlp_net.py
index 81247ab15..028a61c7c 100644
--- a/nets/TUs_graph_classification/mlp_net.py
+++ b/nets/TUs_graph_classification/mlp_net.py
@@ -35,7 +35,7 @@ def __init__(self, net_params):
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.in_feat_dropout(h)
h = self.feat_mlp(h)
if self.gated:
diff --git a/nets/TUs_graph_classification/mo_net.py b/nets/TUs_graph_classification/mo_net.py
index 631c12c0f..6c0fb97e6 100644
--- a/nets/TUs_graph_classification/mo_net.py
+++ b/nets/TUs_graph_classification/mo_net.py
@@ -18,7 +18,7 @@
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
-
+ self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
@@ -27,8 +27,7 @@ def __init__(self, net_params):
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
- self.readout = net_params['readout']
- graph_norm = net_params['graph_norm']
+ self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
@@ -43,27 +42,26 @@ def __init__(self, net_params):
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
- us, vs = g.edges()
- # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
- pseudo = [ [1/np.sqrt(g.in_degree(us[i])+1), 1/np.sqrt(g.in_degree(vs[i])+1)] for i in range(g.number_of_edges()) ]
- pseudo = torch.Tensor(pseudo).to(self.device)
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
- h = self.layers[i](g, h, self.pseudo_proj[i](pseudo), snorm_n)
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
g.ndata['h'] = h
if self.readout == "sum":
@@ -76,6 +74,14 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg)
+
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
diff --git a/nets/TUs_graph_classification/ring_gnn_net.py b/nets/TUs_graph_classification/ring_gnn_net.py
new file mode 100644
index 000000000..c350450a7
--- /dev/null
+++ b/nets/TUs_graph_classification/ring_gnn_net.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.depth = [torch.LongTensor([1+self.in_dim_node])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), n_classes)
+
+ def forward(self, x):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ """
+
+ # this x is the tensor with all info available => adj, node feat
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ # # readout
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.prediction(x_list)
+
+ return x_out
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
diff --git a/nets/TUs_graph_classification/three_wl_gnn_net.py b/nets/TUs_graph_classification/three_wl_gnn_net.py
new file mode 100644
index 000000000..238c16a11
--- /dev/null
+++ b/nets/TUs_graph_classification/three_wl_gnn_net.py
@@ -0,0 +1,84 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+
+ self.in_dim_node = net_params['in_dim']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+ self.diag_pool_readout = True # if True, uses the new_suffix readout from original code
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ original_features_num = self.in_dim_node + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+ if self.diag_pool_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(2*output_features, n_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, n_classes)
+
+ def forward(self, x):
+ if self.diag_pool_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.diag_pool_readout:
+ scores = self.fc_layers[i](diag_offdiag_maxpool(x)) + scores
+ else:
+ x_list.append(x)
+
+ if self.diag_pool_readout:
+ return scores
+ else:
+ # readout like RingGNN
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.mlp_prediction(x_list)
+ return x_out
+
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/nets/molecules_graph_regression/diffpool_net.py b/nets/molecules_graph_regression/diffpool_net.py
deleted file mode 100644
index 363b1b74e..000000000
--- a/nets/molecules_graph_regression/diffpool_net.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn import init
-import torch.nn.functional as F
-
-import time
-import numpy as np
-from scipy.linalg import block_diag
-
-import dgl
-
-"""
-
-
- DIFFPOOL:
- Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec,
- Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)
- https://arxiv.org/pdf/1806.08804.pdf
-
- ! code started from dgl diffpool examples dir
-"""
-
-from layers.graphsage_layer import GraphSageLayer # this is GraphSageLayer
-from layers.diffpool_layer import DiffPoolLayer # this is DiffPoolBatchedGraphLayer
-# from .graphsage_net import GraphSageNet # this is GraphSage
-# replace BatchedDiffPool with DenseDiffPool and BatchedGraphSAGE with DenseGraphSage
-from layers.tensorized.dense_graphsage_layer import DenseGraphSage
-from layers.tensorized.dense_diffpool_layer import DenseDiffPool
-
-class DiffPoolNet(nn.Module):
- """
- DiffPool Fuse with GNN layers and pooling layers in sequence
- """
-
- def __init__(self, net_params):
-
- super().__init__()
- num_atom_type = net_params['num_atom_type']
- num_bond_type = net_params['num_bond_type']
- hidden_dim = net_params['hidden_dim']
- embedding_dim = net_params['embedding_dim']
- activation = F.relu
- n_layers = net_params['L'] # this is the same 'gnn_per_block' param
- dropout = net_params['dropout']
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- aggregator_type = net_params['sage_aggregator']
- pool_ratio = net_params['pool_ratio']
-
- self.device = net_params['device']
- self.link_pred = net_params['linkpred']
- self.concat = net_params['cat']
- self.residual = net_params['residual']
- self.n_pooling = net_params['num_pool']
- self.batch_size = net_params['batch_size']
- self.link_pred_loss = []
- self.entropy_loss = []
-
- self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
-
- # list of GNN modules before the first diffpool operation
- self.gc_before_pool = nn.ModuleList()
-
- self.assign_dim = net_params['assign_dim']
- # self.bn = True
- self.num_aggs = 1
-
- # constructing layers
- # layers before diffpool
- assert n_layers >= 3, "n_layers too few"
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- for _ in range(n_layers - 2):
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, embedding_dim, None, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
-
- assign_dims = []
- assign_dims.append(self.assign_dim)
- if self.concat:
- # diffpool layer receive pool_emedding_dim node feature tensor
- # and return pool_embedding_dim node embedding
- pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
- else:
-
- pool_embedding_dim = embedding_dim
-
- self.first_diffpool_layer = DiffPoolLayer(pool_embedding_dim, self.assign_dim, hidden_dim,
- activation, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.link_pred)
- gc_after_per_pool = nn.ModuleList()
-
- # list of list of GNN modules, each list after one diffpool operation
- self.gc_after_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- self.diffpool_layers = nn.ModuleList()
- # each pooling module
- for _ in range(self.n_pooling - 1):
- self.diffpool_layers.append(DenseDiffPool(pool_embedding_dim, self.assign_dim, hidden_dim, self.link_pred))
-
- gc_after_per_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- assign_dims.append(self.assign_dim)
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- # predicting layer
- if self.concat:
- self.pred_input_dim = pool_embedding_dim * \
- self.num_aggs * (n_pooling + 1)
- else:
- self.pred_input_dim = embedding_dim * self.num_aggs
- self.pred_layer = nn.Linear(self.pred_input_dim, 1) # 1 out dim since regression problem
-
- # weight initialization
- for m in self.modules():
- if isinstance(m, nn.Linear):
- m.weight.data = init.xavier_uniform_(m.weight.data,
- gain=nn.init.calculate_gain('relu'))
- if m.bias is not None:
- m.bias.data = init.constant_(m.bias.data, 0.0)
-
- def gcn_forward(self, g, h, snorm_n, gc_layers, cat=False):
- """
- Return gc_layer embedding cat.
- """
- block_readout = []
- for gc_layer in gc_layers[:-1]:
- h = gc_layer(g, h, snorm_n)
- block_readout.append(h)
- h = gc_layers[-1](g, h, snorm_n)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=1) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
- block_readout = []
- for gc_layer in gc_layers:
- h = gc_layer(h, adj)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=2) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def forward(self, g, h, e, snorm_n, snorm_e):
- self.link_pred_loss = []
- self.entropy_loss = []
-
- # node feature for assignment matrix computation is the same as the
- # original node feature
- h = self.embedding_h(h)
- h_a = h
-
- out_all = []
-
- # we use GCN blocks to get an embedding first
- g_embedding = self.gcn_forward(g, h, snorm_n, self.gc_before_pool, self.concat)
-
- g.ndata['h'] = g_embedding
-
- readout = dgl.sum_nodes(g, 'h')
- out_all.append(readout)
- if self.num_aggs == 2:
- readout = dgl.max_nodes(g, 'h')
- out_all.append(readout)
-
- adj, h = self.first_diffpool_layer(g, g_embedding, snorm_n)
- node_per_pool_graph = int(adj.size()[0] / self.batch_size)
-
- h, adj = self.batch2tensor(adj, h, node_per_pool_graph)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[0], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- for i, diffpool_layer in enumerate(self.diffpool_layers):
- h, adj = diffpool_layer(h, adj)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[i + 1], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
-
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- if self.concat or self.num_aggs > 1:
- final_readout = torch.cat(out_all, dim=1)
- else:
- final_readout = readout
- ypred = self.pred_layer(final_readout)
- return ypred
-
- def batch2tensor(self, batch_adj, batch_feat, node_per_pool_graph):
- """
- transform a batched graph to batched adjacency tensor and node feature tensor
- """
- batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
- adj_list = []
- feat_list = []
-
- for i in range(batch_size):
- start = i * node_per_pool_graph
- end = (i + 1) * node_per_pool_graph
-
- # 1/sqrt(V) normalization
- snorm_n = torch.FloatTensor(node_per_pool_graph, 1).fill_(1./float(node_per_pool_graph)).sqrt().to(self.device)
-
- adj_list.append(batch_adj[start:end, start:end])
- feat_list.append((batch_feat[start:end, :])*snorm_n)
- adj_list = list(map(lambda x: torch.unsqueeze(x, 0), adj_list))
- feat_list = list(map(lambda x: torch.unsqueeze(x, 0), feat_list))
- adj = torch.cat(adj_list, dim=0)
- feat = torch.cat(feat_list, dim=0)
-
- return feat, adj
-
- def loss(self, pred, label):
- '''
- loss function
- '''
- criterion = nn.L1Loss()
- loss = criterion(pred, label)
- for diffpool_layer in self.diffpool_layers:
- for key, value in diffpool_layer.loss_log.items():
- loss += value
- return loss
diff --git a/nets/molecules_graph_regression/gat_net.py b/nets/molecules_graph_regression/gat_net.py
index 1407dc61e..8cd54c371 100644
--- a/nets/molecules_graph_regression/gat_net.py
+++ b/nets/molecules_graph_regression/gat_net.py
@@ -24,7 +24,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
@@ -34,16 +33,16 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1,
- dropout, self.graph_norm, self.batch_norm, self.residual))
+ dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/molecules_graph_regression/gated_gcn_net.py b/nets/molecules_graph_regression/gated_gcn_net.py
index 67e0c73e9..5f511b987 100644
--- a/nets/molecules_graph_regression/gated_gcn_net.py
+++ b/nets/molecules_graph_regression/gated_gcn_net.py
@@ -23,11 +23,14 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
self.device = net_params['device']
+ self.pos_enc = net_params['pos_enc']
+ if self.pos_enc:
+ pos_enc_dim = net_params['pos_enc_dim']
+ self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
@@ -39,22 +42,25 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1) ])
- self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1) ])
+ self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e, h_pos_enc=None):
# input embedding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
+ if self.pos_enc:
+ h_pos_enc = self.embedding_pos_enc(h_pos_enc.float())
+ h = h + h_pos_enc
if not self.edge_feat: # edge feature set to 1
e = torch.ones(e.size(0),1).to(self.device)
e = self.embedding_e(e)
# convnets
for conv in self.layers:
- h, e = conv(g, h, e, snorm_n, snorm_e)
+ h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/molecules_graph_regression/gcn_net.py b/nets/molecules_graph_regression/gcn_net.py
index 1d11c85a8..fcadc4c89 100644
--- a/nets/molecules_graph_regression/gcn_net.py
+++ b/nets/molecules_graph_regression/gcn_net.py
@@ -23,7 +23,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
@@ -32,17 +31,17 @@ def __init__(self, net_params):
self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu,
- dropout, self.graph_norm, self.batch_norm, self.residual))
+ dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/molecules_graph_regression/gin_net.py b/nets/molecules_graph_regression/gin_net.py
index 39464d9fb..df51ff1ff 100644
--- a/nets/molecules_graph_regression/gin_net.py
+++ b/nets/molecules_graph_regression/gin_net.py
@@ -24,8 +24,7 @@ def __init__(self, net_params):
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- readout = net_params['readout'] # this is graph_pooling_type
- graph_norm = net_params['graph_norm']
+ readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
@@ -38,7 +37,7 @@ def __init__(self, net_params):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm, residual, 0, learn_eps))
+ dropout, batch_norm, residual, 0, learn_eps))
# Linear function for graph poolings (readout) of output of each layer
# which maps the output of different layers into a prediction score
@@ -56,7 +55,7 @@ def __init__(self, net_params):
else:
raise NotImplementedError
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
@@ -64,7 +63,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hidden_rep = [h]
for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
+ h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
diff --git a/nets/molecules_graph_regression/graphsage_net.py b/nets/molecules_graph_regression/graphsage_net.py
index 7575a5609..d8857d65c 100644
--- a/nets/molecules_graph_regression/graphsage_net.py
+++ b/nets/molecules_graph_regression/graphsage_net.py
@@ -27,7 +27,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
n_layers = net_params['L']
- graph_norm = net_params['graph_norm']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
@@ -37,15 +36,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
- dropout, aggregator_type, graph_norm, batch_norm, residual) for _ in range(n_layers-1)])
- self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, graph_norm, batch_norm, residual))
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, 1) # 1 out dim since regression problem
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/molecules_graph_regression/load_net.py b/nets/molecules_graph_regression/load_net.py
index 511b0e641..120608b1e 100644
--- a/nets/molecules_graph_regression/load_net.py
+++ b/nets/molecules_graph_regression/load_net.py
@@ -9,9 +9,9 @@
from nets.molecules_graph_regression.graphsage_net import GraphSageNet
from nets.molecules_graph_regression.gin_net import GINNet
from nets.molecules_graph_regression.mo_net import MoNet as MoNet_
-from nets.molecules_graph_regression.diffpool_net import DiffPoolNet
from nets.molecules_graph_regression.mlp_net import MLPNet
-
+from nets.molecules_graph_regression.ring_gnn_net import RingGNNNet
+from nets.molecules_graph_regression.three_wl_gnn_net import ThreeWLGNNNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
@@ -31,12 +31,15 @@ def GIN(net_params):
def MoNet(net_params):
return MoNet_(net_params)
-def DiffPool(net_params):
- return DiffPoolNet(net_params)
-
def MLP(net_params):
return MLPNet(net_params)
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
@@ -45,8 +48,9 @@ def gnn_model(MODEL_NAME, net_params):
'GraphSage': GraphSage,
'GIN': GIN,
'MoNet': MoNet,
- 'DiffPool': DiffPool,
- 'MLP': MLP
+ 'MLP': MLP,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
}
return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/molecules_graph_regression/mlp_net.py b/nets/molecules_graph_regression/mlp_net.py
index a472363be..085f3a6d5 100644
--- a/nets/molecules_graph_regression/mlp_net.py
+++ b/nets/molecules_graph_regression/mlp_net.py
@@ -39,7 +39,7 @@ def __init__(self, net_params):
self.readout_mlp = MLPReadout(out_dim, 1) # 1 out dim since regression problem
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
h = self.feat_mlp(h)
diff --git a/nets/molecules_graph_regression/mo_net.py b/nets/molecules_graph_regression/mo_net.py
index 620787f0f..8716ea984 100644
--- a/nets/molecules_graph_regression/mo_net.py
+++ b/nets/molecules_graph_regression/mo_net.py
@@ -18,6 +18,7 @@
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
+ self.name = 'MoNet'
num_atom_type = net_params['num_atom_type']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
@@ -25,8 +26,7 @@ def __init__(self, net_params):
dim = net_params['pseudo_dim_MoNet'] # for MoNet
dropout = net_params['dropout']
n_layers = net_params['L']
- self.readout = net_params['readout']
- graph_norm = net_params['graph_norm']
+ self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
@@ -41,27 +41,26 @@ def __init__(self, net_params):
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, 1) # out dim 1 since regression
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
- us, vs = g.edges()
- # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
- pseudo = [ [1/np.sqrt(g.in_degree(us[i])+1), 1/np.sqrt(g.in_degree(vs[i])+1)] for i in range(g.number_of_edges()) ]
- pseudo = torch.Tensor(pseudo).to(self.device)
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
- h = self.layers[i](g, h, self.pseudo_proj[i](pseudo), snorm_n)
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
g.ndata['h'] = h
if self.readout == "sum":
@@ -74,6 +73,14 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes
return self.MLP_layer(hg)
+
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
def loss(self, scores, targets):
# loss = nn.MSELoss()(scores,targets)
diff --git a/nets/molecules_graph_regression/ring_gnn_net.py b/nets/molecules_graph_regression/ring_gnn_net.py
new file mode 100644
index 000000000..23f9052c2
--- /dev/null
+++ b/nets/molecules_graph_regression/ring_gnn_net.py
@@ -0,0 +1,306 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.num_atom_type = net_params['num_atom_type'] # 'num_atom_type' is 'nodeclasses' as in RingGNN original repo
+ self.num_bond_type = net_params['num_bond_type']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+
+ if self.edge_feat:
+ self.depth = [torch.LongTensor([1+self.num_atom_type+self.num_bond_type])] + [torch.LongTensor([hidden_dim])] * n_layers
+ else:
+ self.depth = [torch.LongTensor([1+self.num_atom_type])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), 1) # 1 out dim since regression problem
+
+ def forward(self, x_no_edge_feat, x_with_edge_feat):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ """
+
+ x = x_no_edge_feat
+
+ if self.edge_feat:
+ x = x_with_edge_feat
+
+ # this x is the tensor with all info available => adj, node feat and edge feat (if flag edge_feat true)
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ # # readout
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.prediction(x_list)
+
+ return x_out
+
+ def loss(self, scores, targets):
+ # loss = nn.MSELoss()(scores,targets)
+ loss = nn.L1Loss()(scores, targets)
+ return loss
+
+
+
+"""
+OLD CODE BELOW Thu 14 May,2020 for ROLLBACK, just in case.
+using the following code and depth of only 29->64->64 achieved 0.44 test MAE on ZINC
+
+"""
+
+
+##############################################################################################################
+# import torch
+# import torch.nn as nn
+# import torch.nn.functional as F
+
+# import dgl
+# import time
+
+# """
+# Ring-GNN
+# On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+# https://arxiv.org/pdf/1905.12560v1.pdf
+# """
+# from layers.gated_gcn_layer import GatedGCNLayer
+# from layers.mlp_readout_layer import MLPReadout
+
+# class RingGNNNet(nn.Module):
+# def __init__(self, net_params):
+# super().__init__()
+# self.num_atom_type = net_params['num_atom_type'] # 'num_atom_type' is 'nodeclasses' as in RingGNN original repo
+# self.num_bond_type = net_params['num_bond_type']
+# # node_classes = net_params['node_classes']
+# avg_node_num = net_params['avg_node_num'] #10
+# radius = net_params['radius'] #4
+# hidden_dim = net_params['hidden_dim']
+# out_dim = net_params['out_dim']
+# in_feat_dropout = net_params['in_feat_dropout']
+# dropout = net_params['dropout']
+# n_layers = net_params['L']
+# self.readout = net_params['readout']
+# self.graph_norm = net_params['graph_norm']
+# self.batch_norm = net_params['batch_norm']
+# self.residual = net_params['residual']
+# self.edge_feat = net_params['edge_feat']
+# self.device = net_params['device']
+
+# self.depth = [torch.LongTensor([self.num_atom_type+1]), torch.LongTensor([22]), torch.LongTensor([22]), torch.LongTensor([22]), torch.LongTensor([22])]
+# #print(self.depth)
+
+# # for m, n in zip(self.depth[:-1], self.depth[1:]):
+# # print(m,n)
+
+# self.equi_modulelist = nn.ModuleList([equi_2_to_2(self.device, m, n, radius = radius,
+# k2_init = 0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+# #print(self.equi_modulelist)
+# self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), 1) # 1 out dim since regression problem
+
+# def forward(self, g, h, e, snorm_n, snorm_e):
+# """
+# CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+# : preparing input to the model in form new_adj
+# : new_adj is of shape [num_atom_type x num_nodes_in_g x num_nodes_in_g]
+# """
+# adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
+# nlabel_dict = {}
+# for i in range(self.num_atom_type): nlabel_dict[i] = i
+# new_adj = torch.stack([adj for j in range(self.num_atom_type + 1)])
+
+# for node, node_label in enumerate(g.ndata['feat']):
+# new_adj[nlabel_dict[node_label.item()]+1][node][node] = 1
+# """"""
+
+# h = new_adj.unsqueeze(0).to(self.device)
+
+# h_list = [h]
+# for layer in self.equi_modulelist:
+# h = F.relu(layer(h))
+# h_list.append(h)
+
+# h_list = [torch.sum(torch.sum(h, dim=3), dim=2) for h in h_list]
+# h_list = torch.cat(h_list, dim=1)
+
+# h_out = self.prediction(h_list)
+
+# return h_out
+
+# def _sym_normalize_adj(self, adj):
+# deg = torch.sum(adj, dim = 0)#.squeeze()
+# deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
+# deg_inv = torch.diag(deg_inv)
+# return torch.mm(deg_inv, torch.mm(adj, deg_inv))
+
+# def loss(self, scores, targets):
+# # loss = nn.MSELoss()(scores,targets)
+# loss = nn.L1Loss()(scores, targets)
+# return loss
+
+# class equi_2_to_2(nn.Module):
+# def __init__(self, device, input_depth, output_depth, normalization='inf', normalization_val=1.0, radius=2, k2_init = 0.1):
+# super(equi_2_to_2, self).__init__()
+# self.device = device
+# basis_dimension = 15
+# self.radius = radius
+# coeffs_values = lambda i, j, k: torch.randn([i, j, k]) * torch.sqrt(2. / (i + j).float())
+# self.diag_bias_list = nn.ParameterList([])
+
+# for i in range(radius):
+# for j in range(i+1):
+# self.diag_bias_list.append(nn.Parameter(torch.zeros(1, output_depth, 1, 1)))
+
+# self.all_bias = nn.Parameter(torch.zeros(1, output_depth, 1, 1))
+# self.coeffs_list = nn.ParameterList([])
+
+# for i in range(radius):
+# for j in range(i+1):
+# self.coeffs_list.append(nn.Parameter(coeffs_values(input_depth, output_depth, basis_dimension)))
+
+# self.switch = nn.ParameterList([nn.Parameter(torch.FloatTensor([1])), nn.Parameter(torch.FloatTensor([k2_init]))])
+# self.output_depth = output_depth
+
+# self.normalization = normalization
+# self.normalization_val = normalization_val
+
+
+# def forward(self, inputs):
+# m = inputs.size()[3]
+
+# ops_out = ops_2_to_2(inputs, m, normalization=self.normalization)
+# ops_out = torch.stack(ops_out, dim = 2)
+
+
+# output_list = []
+
+# for i in range(self.radius):
+# for j in range(i+1):
+# output_i = torch.einsum('dsb,ndbij->nsij', self.coeffs_list[i*(i+1)//2 + j], ops_out)
+
+# mat_diag_bias = torch.eye(inputs.size()[3]).unsqueeze(0).unsqueeze(0).to(self.device) * self.diag_bias_list[i*(i+1)//2 + j]
+# # mat_diag_bias = torch.eye(inputs.size()[3]).to('cuda:0').unsqueeze(0).unsqueeze(0) * self.diag_bias_list[i*(i+1)//2 + j]
+# if j == 0:
+# output = output_i + mat_diag_bias
+# else:
+# output = torch.einsum('abcd,abde->abce', output_i, output)
+# output_list.append(output)
+
+# output = 0
+# for i in range(self.radius):
+# output += output_list[i] * self.switch[i]
+
+# output = output + self.all_bias
+# return output
+
+
+# def ops_2_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
+# # input: N x D x m x m
+# diag_part = torch.diagonal(inputs, dim1 = 2, dim2 = 3) # N x D x m
+# sum_diag_part = torch.sum(diag_part, dim=2, keepdim = True) # N x D x 1
+# sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
+# sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
+# sum_all = torch.sum(sum_of_rows, dim=2) # N x D
+
+# # op1 - (1234) - extract diag
+# op1 = torch.diag_embed(diag_part) # N x D x m x m
+
+# # op2 - (1234) + (12)(34) - place sum of diag on diag
+# op2 = torch.diag_embed(sum_diag_part.repeat(1, 1, dim))
+
+# # op3 - (1234) + (123)(4) - place sum of row i on diag ii
+# op3 = torch.diag_embed(sum_of_rows)
+
+# # op4 - (1234) + (124)(3) - place sum of col i on diag ii
+# op4 = torch.diag_embed(sum_of_cols)
+
+# # op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
+# op5 = torch.diag_embed(sum_all.unsqueeze(2).repeat(1, 1, dim))
+
+# # op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
+# op6 = sum_of_cols.unsqueeze(3).repeat(1, 1, 1, dim)
+
+# # op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
+# op7 = sum_of_rows.unsqueeze(3).repeat(1, 1, 1, dim)
+
+# # op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
+# op8 = sum_of_cols.unsqueeze(2).repeat(1, 1, dim, 1)
+
+# # op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
+# op9 = sum_of_rows.unsqueeze(2).repeat(1, 1, dim, 1)
+
+# # op10 - (1234) + (14)(23) - identity
+# op10 = inputs
+
+# # op11 - (1234) + (13)(24) - transpose
+# op11 = torch.transpose(inputs, -2, -1)
+
+# # op12 - (1234) + (234)(1) - place ii element in row i
+# op12 = diag_part.unsqueeze(3).repeat(1, 1, 1, dim)
+
+# # op13 - (1234) + (134)(2) - place ii element in col i
+# op13 = diag_part.unsqueeze(2).repeat(1, 1, dim, 1)
+
+# # op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
+# op14 = sum_diag_part.unsqueeze(3).repeat(1, 1, dim, dim)
+
+# # op15 - sum of all ops - place sum of all entries in all entries
+# op15 = sum_all.unsqueeze(2).unsqueeze(3).repeat(1, 1, dim, dim)
+
+# #A_2 = torch.einsum('abcd,abde->abce', inputs, inputs)
+# #A_4 = torch.einsum('abcd,abde->abce', A_2, A_2)
+# #op16 = torch.where(A_4>1, torch.ones(A_4.size()), A_4)
+
+# if normalization is not None:
+# float_dim = float(dim)
+# if normalization is 'inf':
+# op2 = torch.div(op2, float_dim)
+# op3 = torch.div(op3, float_dim)
+# op4 = torch.div(op4, float_dim)
+# op5 = torch.div(op5, float_dim**2)
+# op6 = torch.div(op6, float_dim)
+# op7 = torch.div(op7, float_dim)
+# op8 = torch.div(op8, float_dim)
+# op9 = torch.div(op9, float_dim)
+# op14 = torch.div(op14, float_dim)
+# op15 = torch.div(op15, float_dim**2)
+
+# #return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15, op16]
+# '''
+# l = [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
+# for i, ls in enumerate(l):
+# print(i+1)
+# print(torch.sum(ls))
+# print("$%^&*(*&^%$#$%^&*(*&^%$%^&*(*&^%$%^&*(")
+# '''
+# return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
\ No newline at end of file
diff --git a/nets/molecules_graph_regression/three_wl_gnn_net.py b/nets/molecules_graph_regression/three_wl_gnn_net.py
new file mode 100644
index 000000000..1ab939c2c
--- /dev/null
+++ b/nets/molecules_graph_regression/three_wl_gnn_net.py
@@ -0,0 +1,95 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.num_atom_type = net_params['num_atom_type'] # 'num_atom_type' is 'nodeclasses' as in RingGNN original repo
+ self.num_bond_type = net_params['num_bond_type']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.edge_feat = net_params['edge_feat']
+ self.device = net_params['device']
+ self.diag_pool_readout = True # if True, uses the new_suffix readout from original code
+ num_classes = 1 # since regression problem
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ if not self.edge_feat:
+ original_features_num = self.num_atom_type + 1 # Number of features of the input
+ else:
+ original_features_num = self.num_atom_type + self.num_bond_type + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+
+ if self.diag_pool_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(2*output_features, num_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, num_classes)
+
+ def forward(self, x_no_edge_feat, x_with_edge_feat):
+ x = x_no_edge_feat
+
+ if self.edge_feat:
+ x = x_with_edge_feat
+
+ if self.diag_pool_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.diag_pool_readout:
+ scores = self.fc_layers[i](diag_offdiag_maxpool(x)) + scores
+ else:
+ x_list.append(x)
+
+ if self.diag_pool_readout:
+ return scores
+ else:
+ # readout like RingGNN
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.mlp_prediction(x_list)
+ return x_out
+
+
+
+ def loss(self, scores, targets):
+ # loss = nn.MSELoss()(scores,targets)
+ loss = nn.L1Loss()(scores, targets)
+ return loss
+
\ No newline at end of file
diff --git a/nets/superpixels_graph_classification/diffpool_net.py b/nets/superpixels_graph_classification/diffpool_net.py
deleted file mode 100644
index ced1b0494..000000000
--- a/nets/superpixels_graph_classification/diffpool_net.py
+++ /dev/null
@@ -1,249 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn import init
-import torch.nn.functional as F
-
-import time
-import numpy as np
-from scipy.linalg import block_diag
-
-import dgl
-
-"""
-
-
- DIFFPOOL:
- Z. Ying, J. You, C. Morris, X. Ren, W. Hamilton, and J. Leskovec,
- Hierarchical graph representation learning with differentiable pooling (NeurIPS 2018)
- https://arxiv.org/pdf/1806.08804.pdf
-
- ! code started from dgl diffpool examples dir
-"""
-
-from layers.graphsage_layer import GraphSageLayer # this is GraphSageLayer
-from layers.diffpool_layer import DiffPoolLayer # this is DiffPoolBatchedGraphLayer
-# from .graphsage_net import GraphSageNet # this is GraphSage
-# replace BatchedDiffPool with DenseDiffPool and BatchedGraphSAGE with DenseGraphSage
-from layers.tensorized.dense_graphsage_layer import DenseGraphSage
-from layers.tensorized.dense_diffpool_layer import DenseDiffPool
-
-class DiffPoolNet(nn.Module):
- """
- DiffPool Fuse with GNN layers and pooling layers in sequence
- """
-
- def __init__(self, net_params):
-
- super().__init__()
- input_dim = net_params['in_dim']
- hidden_dim = net_params['hidden_dim']
- embedding_dim = net_params['embedding_dim']
- label_dim = net_params['n_classes']
- activation = F.relu
- n_layers = net_params['L'] # this is the gnn_per_block param
- dropout = net_params['dropout']
- self.graph_norm = net_params['graph_norm']
- self.batch_norm = net_params['batch_norm']
- self.residual = net_params['residual']
- aggregator_type = net_params['sage_aggregator']
- pool_ratio = net_params['pool_ratio']
-
- self.device = net_params['device']
- self.link_pred = net_params['linkpred']
- self.concat = net_params['cat']
- self.n_pooling = net_params['num_pool']
- self.batch_size = net_params['batch_size']
- self.link_pred_loss = []
- self.entropy_loss = []
-
- self.embedding_h = nn.Linear(input_dim, hidden_dim)
-
- # list of GNN modules before the first diffpool operation
- self.gc_before_pool = nn.ModuleList()
-
- self.assign_dim = net_params['assign_dim']
- # self.bn = True
- self.num_aggs = 1
-
- # constructing layers
- # layers before diffpool
- assert n_layers >= 3, "n_layers too few"
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation,
- dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- for _ in range(n_layers - 2):
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, hidden_dim, activation,
- dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
- self.gc_before_pool.append(GraphSageLayer(hidden_dim, embedding_dim, None, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.residual))
-
-
- assign_dims = []
- assign_dims.append(self.assign_dim)
- if self.concat:
- # diffpool layer receive pool_emedding_dim node feature tensor
- # and return pool_embedding_dim node embedding
- pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
- else:
-
- pool_embedding_dim = embedding_dim
-
- self.first_diffpool_layer = DiffPoolLayer(pool_embedding_dim, self.assign_dim, hidden_dim,
- activation, dropout, aggregator_type, self.graph_norm, self.batch_norm, self.link_pred)
- gc_after_per_pool = nn.ModuleList()
-
- # list of list of GNN modules, each list after one diffpool operation
- self.gc_after_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- self.diffpool_layers = nn.ModuleList()
- # each pooling module
- for _ in range(self.n_pooling - 1):
- self.diffpool_layers.append(DenseDiffPool(pool_embedding_dim, self.assign_dim, hidden_dim, self.link_pred))
-
- gc_after_per_pool = nn.ModuleList()
-
- for _ in range(n_layers - 1):
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, hidden_dim, self.residual))
- gc_after_per_pool.append(DenseGraphSage(hidden_dim, embedding_dim, self.residual))
- self.gc_after_pool.append(gc_after_per_pool)
-
- assign_dims.append(self.assign_dim)
- self.assign_dim = int(self.assign_dim * pool_ratio)
-
- # predicting layer
- if self.concat:
- self.pred_input_dim = pool_embedding_dim * \
- self.num_aggs * (n_pooling + 1)
- else:
- self.pred_input_dim = embedding_dim * self.num_aggs
- self.pred_layer = nn.Linear(self.pred_input_dim, label_dim)
-
- # weight initialization
- for m in self.modules():
- if isinstance(m, nn.Linear):
- m.weight.data = init.xavier_uniform_(m.weight.data,
- gain=nn.init.calculate_gain('relu'))
- if m.bias is not None:
- m.bias.data = init.constant_(m.bias.data, 0.0)
-
- def gcn_forward(self, g, h, snorm_n, gc_layers, cat=False):
- """
- Return gc_layer embedding cat.
- """
- block_readout = []
- for gc_layer in gc_layers[:-1]:
- h = gc_layer(g, h, snorm_n)
- block_readout.append(h)
- h = gc_layers[-1](g, h, snorm_n)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=1) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def gcn_forward_tensorized(self, h, adj, gc_layers, cat=False):
- block_readout = []
- for gc_layer in gc_layers:
- h = gc_layer(h, adj)
- block_readout.append(h)
- if cat:
- block = torch.cat(block_readout, dim=2) # N x F, F = F1 + F2 + ...
- else:
- block = h
- return block
-
- def forward(self, g, h, e, snorm_n, snorm_e):
- self.link_pred_loss = []
- self.entropy_loss = []
-
- # node feature for assignment matrix computation is the same as the
- # original node feature
- h = self.embedding_h(h)
- h_a = h
-
- out_all = []
-
- # we use GCN blocks to get an embedding first
- g_embedding = self.gcn_forward(g, h, snorm_n, self.gc_before_pool, self.concat)
-
- g.ndata['h'] = g_embedding
-
- readout = dgl.sum_nodes(g, 'h')
- out_all.append(readout)
- if self.num_aggs == 2:
- readout = dgl.max_nodes(g, 'h')
- out_all.append(readout)
-
- adj, h = self.first_diffpool_layer(g, g_embedding, snorm_n)
- node_per_pool_graph = int(adj.size()[0] / self.batch_size)
-
- h, adj = self.batch2tensor(adj, h, node_per_pool_graph)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[0], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- for i, diffpool_layer in enumerate(self.diffpool_layers):
- h, adj = diffpool_layer(h, adj)
- h = self.gcn_forward_tensorized(h, adj, self.gc_after_pool[i + 1], self.concat)
-
- readout = torch.sum(h, dim=1)
- out_all.append(readout)
-
- if self.num_aggs == 2:
- readout, _ = torch.max(h, dim=1)
- out_all.append(readout)
-
- if self.concat or self.num_aggs > 1:
- final_readout = torch.cat(out_all, dim=1)
- else:
- final_readout = readout
- ypred = self.pred_layer(final_readout)
- return ypred
-
- def batch2tensor(self, batch_adj, batch_feat, node_per_pool_graph):
- """
- transform a batched graph to batched adjacency tensor and node feature tensor
- """
- batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
- adj_list = []
- feat_list = []
-
- for i in range(batch_size):
- start = i * node_per_pool_graph
- end = (i + 1) * node_per_pool_graph
-
- # 1/sqrt(V) normalization
- snorm_n = torch.FloatTensor(node_per_pool_graph, 1).fill_(1./float(node_per_pool_graph)).sqrt().to(self.device)
-
- adj_list.append(batch_adj[start:end, start:end])
- feat_list.append((batch_feat[start:end, :])*snorm_n)
- adj_list = list(map(lambda x: torch.unsqueeze(x, 0), adj_list))
- feat_list = list(map(lambda x: torch.unsqueeze(x, 0), feat_list))
- adj = torch.cat(adj_list, dim=0)
- feat = torch.cat(feat_list, dim=0)
-
- return feat, adj
-
- def loss(self, pred, label):
- '''
- loss function
- '''
- #softmax + CE
- criterion = nn.CrossEntropyLoss()
- loss = criterion(pred, label)
- for diffpool_layer in self.diffpool_layers:
- for key, value in diffpool_layer.loss_log.items():
- loss += value
- return loss
diff --git a/nets/superpixels_graph_classification/gat_net.py b/nets/superpixels_graph_classification/gat_net.py
index 93a2ccdbc..da46e27d0 100644
--- a/nets/superpixels_graph_classification/gat_net.py
+++ b/nets/superpixels_graph_classification/gat_net.py
@@ -24,7 +24,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.dropout = dropout
@@ -34,15 +33,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
- dropout, self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.graph_norm, self.batch_norm, self.residual))
+ dropout, self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/superpixels_graph_classification/gated_gcn_net.py b/nets/superpixels_graph_classification/gated_gcn_net.py
index 846051ad3..9d98a2266 100644
--- a/nets/superpixels_graph_classification/gated_gcn_net.py
+++ b/nets/superpixels_graph_classification/gated_gcn_net.py
@@ -24,7 +24,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.edge_feat = net_params['edge_feat']
@@ -33,11 +32,11 @@ def __init__(self, net_params):
self.embedding_h = nn.Linear(in_dim, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1) ])
- self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1) ])
+ self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
# input embedding
h = self.embedding_h(h)
@@ -47,7 +46,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
# convnets
for conv in self.layers:
- h, e = conv(g, h, e, snorm_n, snorm_e)
+ h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/superpixels_graph_classification/gcn_net.py b/nets/superpixels_graph_classification/gcn_net.py
index 5013e79ca..2f7af1f1a 100644
--- a/nets/superpixels_graph_classification/gcn_net.py
+++ b/nets/superpixels_graph_classification/gcn_net.py
@@ -23,7 +23,6 @@ def __init__(self, net_params):
dropout = net_params['dropout']
n_layers = net_params['L']
self.readout = net_params['readout']
- self.graph_norm = net_params['graph_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
@@ -31,15 +30,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
- self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers-1)])
- self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.graph_norm, self.batch_norm, self.residual))
+ self.batch_norm, self.residual) for _ in range(n_layers-1)])
+ self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/superpixels_graph_classification/gin_net.py b/nets/superpixels_graph_classification/gin_net.py
index d7c3eb522..d65242407 100644
--- a/nets/superpixels_graph_classification/gin_net.py
+++ b/nets/superpixels_graph_classification/gin_net.py
@@ -25,8 +25,7 @@ def __init__(self, net_params):
n_mlp_layers = net_params['n_mlp_GIN'] # GIN
learn_eps = net_params['learn_eps_GIN'] # GIN
neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN
- readout = net_params['readout'] # this is graph_pooling_type
- graph_norm = net_params['graph_norm']
+ readout = net_params['readout'] # this is graph_pooling_type
batch_norm = net_params['batch_norm']
residual = net_params['residual']
@@ -39,7 +38,7 @@ def __init__(self, net_params):
mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type,
- dropout, graph_norm, batch_norm, residual, 0, learn_eps))
+ dropout, batch_norm, residual, 0, learn_eps))
# Linear function for graph poolings (readout) of output of each layer
# which maps the output of different layers into a prediction score
@@ -57,7 +56,7 @@ def __init__(self, net_params):
else:
raise NotImplementedError
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
@@ -65,7 +64,7 @@ def forward(self, g, h, e, snorm_n, snorm_e):
hidden_rep = [h]
for i in range(self.n_layers):
- h = self.ginlayers[i](g, h, snorm_n)
+ h = self.ginlayers[i](g, h)
hidden_rep.append(h)
score_over_layer = 0
diff --git a/nets/superpixels_graph_classification/graphsage_net.py b/nets/superpixels_graph_classification/graphsage_net.py
index a799abb63..20acebcd1 100644
--- a/nets/superpixels_graph_classification/graphsage_net.py
+++ b/nets/superpixels_graph_classification/graphsage_net.py
@@ -26,8 +26,7 @@ def __init__(self, net_params):
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
aggregator_type = net_params['sage_aggregator']
- n_layers = net_params['L']
- graph_norm = net_params['graph_norm']
+ n_layers = net_params['L']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.readout = net_params['readout']
@@ -36,15 +35,15 @@ def __init__(self, net_params):
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
- dropout, aggregator_type, graph_norm, batch_norm, residual) for _ in range(n_layers-1)])
- self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, graph_norm, batch_norm, residual))
+ dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
+ self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
for conv in self.layers:
- h = conv(g, h, snorm_n)
+ h = conv(g, h)
g.ndata['h'] = h
if self.readout == "sum":
diff --git a/nets/superpixels_graph_classification/load_net.py b/nets/superpixels_graph_classification/load_net.py
index b2769f66f..c1040316f 100644
--- a/nets/superpixels_graph_classification/load_net.py
+++ b/nets/superpixels_graph_classification/load_net.py
@@ -9,9 +9,9 @@
from nets.superpixels_graph_classification.graphsage_net import GraphSageNet
from nets.superpixels_graph_classification.gin_net import GINNet
from nets.superpixels_graph_classification.mo_net import MoNet as MoNet_
-from nets.superpixels_graph_classification.diffpool_net import DiffPoolNet
from nets.superpixels_graph_classification.mlp_net import MLPNet
-
+from nets.superpixels_graph_classification.ring_gnn_net import RingGNNNet
+from nets.superpixels_graph_classification.three_wl_gnn_net import ThreeWLGNNNet
def GatedGCN(net_params):
return GatedGCNNet(net_params)
@@ -31,12 +31,16 @@ def GIN(net_params):
def MoNet(net_params):
return MoNet_(net_params)
-def DiffPool(net_params):
- return DiffPoolNet(net_params)
-
def MLP(net_params):
return MLPNet(net_params)
+def RingGNN(net_params):
+ return RingGNNNet(net_params)
+
+def ThreeWLGNN(net_params):
+ return ThreeWLGNNNet(net_params)
+
+
def gnn_model(MODEL_NAME, net_params):
models = {
'GatedGCN': GatedGCN,
@@ -45,8 +49,9 @@ def gnn_model(MODEL_NAME, net_params):
'GraphSage': GraphSage,
'GIN': GIN,
'MoNet': MoNet,
- 'DiffPool': DiffPool,
- 'MLP': MLP
+ 'MLP': MLP,
+ 'RingGNN': RingGNN,
+ '3WLGNN': ThreeWLGNN
}
return models[MODEL_NAME](net_params)
\ No newline at end of file
diff --git a/nets/superpixels_graph_classification/mlp_net.py b/nets/superpixels_graph_classification/mlp_net.py
index 5ad7ebfdc..8bda7132e 100644
--- a/nets/superpixels_graph_classification/mlp_net.py
+++ b/nets/superpixels_graph_classification/mlp_net.py
@@ -35,7 +35,7 @@ def __init__(self, net_params):
self.readout_mlp = MLPReadout(hidden_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.in_feat_dropout(h)
h = self.feat_mlp(h)
if self.gated:
diff --git a/nets/superpixels_graph_classification/mo_net.py b/nets/superpixels_graph_classification/mo_net.py
index 631c12c0f..ab055a427 100644
--- a/nets/superpixels_graph_classification/mo_net.py
+++ b/nets/superpixels_graph_classification/mo_net.py
@@ -18,7 +18,7 @@
class MoNet(nn.Module):
def __init__(self, net_params):
super().__init__()
-
+ self.name = 'MoNet'
in_dim = net_params['in_dim']
hidden_dim = net_params['hidden_dim']
out_dim = net_params['out_dim']
@@ -27,8 +27,7 @@ def __init__(self, net_params):
n_classes = net_params['n_classes']
dropout = net_params['dropout']
n_layers = net_params['L']
- self.readout = net_params['readout']
- graph_norm = net_params['graph_norm']
+ self.readout = net_params['readout']
batch_norm = net_params['batch_norm']
residual = net_params['residual']
self.device = net_params['device']
@@ -43,27 +42,26 @@ def __init__(self, net_params):
# Hidden layer
for _ in range(n_layers-1):
self.layers.append(GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
# Output layer
self.layers.append(GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type,
- dropout, graph_norm, batch_norm, residual))
+ dropout, batch_norm, residual))
self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
self.MLP_layer = MLPReadout(out_dim, n_classes)
- def forward(self, g, h, e, snorm_n, snorm_e):
+ def forward(self, g, h, e):
h = self.embedding_h(h)
# computing the 'pseudo' named tensor which depends on node degrees
- us, vs = g.edges()
- # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
- pseudo = [ [1/np.sqrt(g.in_degree(us[i])+1), 1/np.sqrt(g.in_degree(vs[i])+1)] for i in range(g.number_of_edges()) ]
- pseudo = torch.Tensor(pseudo).to(self.device)
+ g.ndata['deg'] = g.in_degrees()
+ g.apply_edges(self.compute_pseudo)
+ pseudo = g.edata['pseudo'].to(self.device).float()
for i in range(len(self.layers)):
- h = self.layers[i](g, h, self.pseudo_proj[i](pseudo), snorm_n)
+ h = self.layers[i](g, h, self.pseudo_proj[i](pseudo))
g.ndata['h'] = h
if self.readout == "sum":
@@ -77,6 +75,14 @@ def forward(self, g, h, e, snorm_n, snorm_e):
return self.MLP_layer(hg)
+ def compute_pseudo(self, edges):
+ # compute pseudo edge features for MoNet
+ # to avoid zero division in case in_degree is 0, we add constant '1' in all node degrees denoting self-loop
+ srcs = 1/np.sqrt(edges.src['deg']+1)
+ dsts = 1/np.sqrt(edges.dst['deg']+1)
+ pseudo = torch.cat((srcs.unsqueeze(-1), dsts.unsqueeze(-1)), dim=1)
+ return {'pseudo': pseudo}
+
def loss(self, pred, label):
criterion = nn.CrossEntropyLoss()
loss = criterion(pred, label)
diff --git a/nets/superpixels_graph_classification/ring_gnn_net.py b/nets/superpixels_graph_classification/ring_gnn_net.py
new file mode 100644
index 000000000..c350450a7
--- /dev/null
+++ b/nets/superpixels_graph_classification/ring_gnn_net.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ Ring-GNN
+ On the equivalence between graph isomorphism testing and function approximation with GNNs (Chen et al, 2019)
+ https://arxiv.org/pdf/1905.12560v1.pdf
+"""
+from layers.ring_gnn_equiv_layer import RingGNNEquivLayer
+from layers.mlp_readout_layer import MLPReadout
+
+class RingGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ avg_node_num = net_params['avg_node_num']
+ radius = net_params['radius']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+
+ self.depth = [torch.LongTensor([1+self.in_dim_node])] + [torch.LongTensor([hidden_dim])] * n_layers
+
+ self.equi_modulelist = nn.ModuleList([RingGNNEquivLayer(self.device, m, n,
+ layer_norm=self.layer_norm,
+ residual=self.residual,
+ dropout=dropout,
+ radius=radius,
+ k2_init=0.5/avg_node_num) for m, n in zip(self.depth[:-1], self.depth[1:])])
+
+ self.prediction = MLPReadout(torch.sum(torch.stack(self.depth)).item(), n_classes)
+
+ def forward(self, x):
+ """
+ CODE ADPATED FROM https://github.com/leichen2018/Ring-GNN/
+ """
+
+ # this x is the tensor with all info available => adj, node feat
+
+ x_list = [x]
+ for layer in self.equi_modulelist:
+ x = layer(x)
+ x_list.append(x)
+
+ # # readout
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.prediction(x_list)
+
+ return x_out
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
diff --git a/nets/superpixels_graph_classification/three_wl_gnn_net.py b/nets/superpixels_graph_classification/three_wl_gnn_net.py
new file mode 100644
index 000000000..d523e7e9c
--- /dev/null
+++ b/nets/superpixels_graph_classification/three_wl_gnn_net.py
@@ -0,0 +1,82 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+import dgl
+import time
+
+"""
+ 3WLGNN / ThreeWLGNN
+ Provably Powerful Graph Networks (Maron et al., 2019)
+ https://papers.nips.cc/paper/8488-provably-powerful-graph-networks.pdf
+
+ CODE adapted from https://github.com/hadarser/ProvablyPowerfulGraphNetworks_torch/
+"""
+
+from layers.three_wl_gnn_layers import RegularBlock, MlpBlock, SkipConnection, FullyConnected, diag_offdiag_maxpool
+from layers.mlp_readout_layer import MLPReadout
+
+class ThreeWLGNNNet(nn.Module):
+ def __init__(self, net_params):
+ super().__init__()
+ self.in_dim_node = net_params['in_dim']
+ depth_of_mlp = net_params['depth_of_mlp']
+ hidden_dim = net_params['hidden_dim']
+ n_classes = net_params['n_classes']
+ dropout = net_params['dropout']
+ n_layers = net_params['L']
+ self.layer_norm = net_params['layer_norm']
+ self.residual = net_params['residual']
+ self.device = net_params['device']
+ self.diag_pool_readout = True # if True, uses the new_suffix readout from original code
+
+ block_features = [hidden_dim] * n_layers # L here is the block number
+
+ original_features_num = self.in_dim_node + 1 # Number of features of the input
+
+ # sequential mlp blocks
+ last_layer_features = original_features_num
+ self.reg_blocks = nn.ModuleList()
+ for layer, next_layer_features in enumerate(block_features):
+ mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
+ self.reg_blocks.append(mlp_block)
+ last_layer_features = next_layer_features
+
+ if self.diag_pool_readout:
+ self.fc_layers = nn.ModuleList()
+ for output_features in block_features:
+ # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
+ fc = FullyConnected(2*output_features, n_classes, activation_fn=None)
+ self.fc_layers.append(fc)
+ else:
+ self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, n_classes)
+
+ def forward(self, x):
+ if self.diag_pool_readout:
+ scores = torch.tensor(0, device=self.device, dtype=x.dtype)
+ else:
+ x_list = [x]
+
+ for i, block in enumerate(self.reg_blocks):
+
+ x = block(x)
+ if self.diag_pool_readout:
+ scores = self.fc_layers[i](diag_offdiag_maxpool(x)) + scores
+ else:
+ x_list.append(x)
+
+ if self.diag_pool_readout:
+ return scores
+ else:
+ # readout like RingGNN
+ x_list = [torch.sum(torch.sum(x, dim=3), dim=2) for x in x_list]
+ x_list = torch.cat(x_list, dim=1)
+
+ x_out = self.mlp_prediction(x_list)
+ return x_out
+
+ def loss(self, pred, label):
+ criterion = nn.CrossEntropyLoss()
+ loss = criterion(pred, label)
+ return loss
+
\ No newline at end of file
diff --git a/script_main_CitationGraphs_node_classification.sh b/script_main_CitationGraphs_node_classification.sh
deleted file mode 100644
index e0c49bea4..000000000
--- a/script_main_CitationGraphs_node_classification.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-# check :
-# bash script.sh
-# tmux attach -t script_cit_graph
-# tmux detach
-# pkill python
-
-# bash script_main_CitationGraphs_node_classification.sh
-
-
-############
-# GNNs
-############
-
-#GCN
-#GraphSage
-#MLP
-#MLP_GATED
-#GAT
-
-code=main_CitationGraphs_node_classification.py
-tmux new -s benchmark_CitationGraphs_node_classification -d
-tmux send-keys "conda activate benchmark_gnn" C-m
-
-datasets=(CORA CITESEER PUBMED)
-nets=(GCN GraphSage GAT MLP MLP_GATED)
-for dataset in ${datasets[@]}; do
- for net in ${nets[@]}; do
- tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/CitationGraphs_node_classification_$net.json' &
-wait" C-m
- done
-done
-tmux send-keys "tmux kill-session -t benchmark_CitationGraphs_node_classification" C-m
diff --git a/script_main_TSP_edge_classification.sh b/script_main_TSP_edge_classification.sh
deleted file mode 100644
index 86f4a71cb..000000000
--- a/script_main_TSP_edge_classification.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-
-# check :
-# bash script.sh
-# tmux attach -t script_tsp
-# tmux detach
-# pkill python
-
-# bash script_main_TSP_edge_classification.sh
-
-
-
-
-############
-# GNNs
-############
-
-#GatedGCN
-#GCN
-#GraphSage
-#MLP
-#GIN
-#MoNet
-#GAT
-
-
-
-
-
-############
-# TSP - 2 RUNS
-############
-
-seed0=41
-seed1=42
-code=main_TSP_edge_classification.py
-tmux new -s benchmark_TSP_edge_classification -d
-tmux send-keys "source activate benchmark_gnn" C-m
-dataset=TSP
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MLP.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_MLP.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MLP_GATED.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_MLP_GATED.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GIN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GIN.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GCN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GCN.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GraphSage.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GraphSage.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MoNet.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_MoNet.json' &
-wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_TSP_edge_classification" C-m
diff --git a/script_main_TUs_graph_classification.sh b/script_main_TUs_graph_classification.sh
deleted file mode 100644
index 30670d6eb..000000000
--- a/script_main_TUs_graph_classification.sh
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/bin/bash
-
-# check :
-# bash script.sh
-# tmux attach -t script_mol_opt
-# tmux detach
-# pkill python
-
-# bash script_main_TUs_graph_classification.sh
-
-
-############
-# GNNs
-############
-
-#GatedGCN
-#GCN
-#GraphSage
-#MLP
-#GIN
-#MoNet
-#GAT
-#DiffPool
-
-
-
-
-
-############
-# ENZYMES & DD & PROTEINS_full
-############
-
-code=main_TUs_graph_classification.py
-tmux new -s benchmark_TUs_graph_classification -d
-tmux send-keys "source activate benchmark_gnn" C-m
-dataset=ENZYMES
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_ENZYMES.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_ENZYMES.json' &
-wait" C-m
-dataset=DD
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_DD.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_DD.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_DD.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_DD.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_DD.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_DD.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_DD.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_DD.json' &
-wait" C-m
-dataset=PROTEINS_full
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_PROTEINS_full.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_PROTEINS_full.json' &
-wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
-
-
-
-
-
-
-# ############
-# # ENZYMES
-# ############
-
-# code=main_TUs_graph_classification.py
-# tmux new -s benchmark_TUs_graph_classification -d
-# tmux send-keys "source activate benchmark_gnn" C-m
-# dataset=ENZYMES
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_ENZYMES.json' &
-# wait" C-m
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_ENZYMES.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_ENZYMES.json' &
-# wait" C-m
-# tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
-
-
-
-
-
-
-# ############
-# # DD
-# ############
-
-# code=main_TUs_graph_classification.py
-# tmux new -s benchmark_TUs_graph_classification -d
-# tmux send-keys "source activate benchmark_gnn" C-m
-# dataset=DD
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_DD.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_DD.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_DD.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_DD.json' &
-# wait" C-m
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_DD.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_DD.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_DD.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_DD.json' &
-# wait" C-m
-# tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
-
-
-
-
-
-
-
-# ############
-# # PROTEINS_full
-# ############
-
-# code=main_TUs_graph_classification.py
-# tmux new -s benchmark_TUs_graph_classification -d
-# tmux send-keys "source activate benchmark_gnn" C-m
-# dataset=PROTEINS_full
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_GCN_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GraphSage_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_MLP_PROTEINS_full.json' &
-# wait" C-m
-# tmux send-keys "
-# python $code --dataset $dataset --gpu_id 0 --config 'configs/TUs_graph_classification_GIN_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 1 --config 'configs/TUs_graph_classification_MoNet_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 2 --config 'configs/TUs_graph_classification_GAT_PROTEINS_full.json' &
-# python $code --dataset $dataset --gpu_id 3 --config 'configs/TUs_graph_classification_DiffPool_PROTEINS_full.json' &
-# wait" C-m
-# tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/script_one_code_to_rull_them_all.sh b/script_one_code_to_rull_them_all.sh
deleted file mode 100644
index 6d83924ab..000000000
--- a/script_one_code_to_rull_them_all.sh
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/bin/bash
-
-# bash script_one_code_to_rull_them_all.sh
-
-
-
-
-tmux new -s benchmark_script -d
-tmux send-keys "source activate benchmark_gnn" C-m
-seed0=41
-
-
-############
-# TU
-############
-
-code=main_TUs_graph_classification.py
-dataset=ENZYMES
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_GATED_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GIN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_GCN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_GraphSage_ENZYMES.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_GatedGCN_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GAT_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_DiffPool_ENZYMES.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_MoNet_ENZYMES.json' &
-wait" C-m
-
-dataset=DD
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_DD.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_GATED_DD.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GIN_DD.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_GCN_DD.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_GraphSage_DD.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_GatedGCN_DD.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GAT_DD.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_DiffPool_DD.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_MoNet_DD.json' &
-wait" C-m
-
-dataset=PROTEINS_full
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_MLP_GATED_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GIN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_GCN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_GraphSage_PROTEINS_full.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TUs_graph_classification_GatedGCN_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TUs_graph_classification_GAT_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TUs_graph_classification_DiffPool_PROTEINS_full.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TUs_graph_classification_MoNet_PROTEINS_full.json' &
-wait" C-m
-
-
-############
-# ZINC
-############
-
-code=main_molecules_graph_regression.py
-dataset=ZINC
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MLP_ZINC.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MLP_GATED_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/molecules_graph_regression_GIN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/molecules_graph_regression_GCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/molecules_graph_regression_GraphSage_ZINC.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/molecules_graph_regression_GAT_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/molecules_graph_regression_DiffPool_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/molecules_graph_regression_MoNet_ZINC.json' &
-wait" C-m
-
-
-############
-# MNIST and CIFAR10
-############
-
-code=main_superpixels_graph_classification.py
-dataset=MNIST
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_MNIST.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_GATED_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_MNIST.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/superpixels_graph_classification_DiffPool_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_MNIST.json' &
-wait" C-m
-
-dataset=CIFAR10
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/superpixels_graph_classification_DiffPool_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10.json' &
-wait" C-m
-
-
-############
-# PATTERN and CLUSTER
-############
-
-code=main_SBMs_node_classification.py
-dataset=SBM_PATTERN
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_GATED_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_PATTERN.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_PATTERN.json' &
-wait" C-m
-
-dataset=SBM_CLUSTER
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_CLUSTER.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER.json' &
-wait" C-m
-
-
-############
-# TSP
-############
-
-code=main_TSP_edge_classification.py
-dataset=TSP
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MLP.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TSP_edge_classification_MLP_GATED.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TSP_edge_classification_GIN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TSP_edge_classification_GCN.json' &
-wait" C-m
-tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GraphSage.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed0 --config 'configs/TSP_edge_classification_GAT.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed0 --config 'configs/TSP_edge_classification_MoNet.json' &
-wait" C-m
-
-
-tmux send-keys "tmux kill-session -t benchmark_script" C-m
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/script_main_SBMs_node_classification_PATTERN.sh b/scripts/COLLAB/script_main_COLLAB_edge_classification_40k.sh
similarity index 53%
rename from script_main_SBMs_node_classification_PATTERN.sh
rename to scripts/COLLAB/script_main_COLLAB_edge_classification_40k.sh
index 5faefa4fb..372ebff94 100644
--- a/script_main_SBMs_node_classification_PATTERN.sh
+++ b/scripts/COLLAB/script_main_COLLAB_edge_classification_40k.sh
@@ -2,11 +2,12 @@
# check :
# bash script.sh
-# tmux attach -t script_mol_opt
+# tmux attach -t script_tsp
# tmux detach
# pkill python
-# bash script_main_SBMs_node_classification_PATTERN.sh
+# bash script_main_COLLAB_edge_classification_40k.sh
+
@@ -21,79 +22,69 @@
#GIN
#MoNet
#GAT
-#DiffPool
############
-# SBM_PATTERN - 4 RUNS
+# OGBL-COLLAB - 4 RUNS
############
seed0=41
-seed1=95
-seed2=12
-seed3=35
-code=main_SBMs_node_classification.py
-tmux new -s benchmark_SBMs_node_classification -d
+seed1=42
+seed2=9
+seed3=23
+code=main_COLLAB_edge_classification.py
+tmux new -s benchmark_COLLAB_edge_classification -d
tmux send-keys "source activate benchmark_gnn" C-m
-dataset=SBM_PATTERN
+dataset=OGBL-COLLAB
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_MLP_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_MLP_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_MLP_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_MLP_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_GATED_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_GATED_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_GATED_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_GATED_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GCN_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GCN_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GCN_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GCN_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GraphSage_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GraphSage_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GraphSage_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GraphSage_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GIN_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GIN_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GIN_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GIN_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GAT_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GAT_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GAT_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GAT_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_MoNet_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_MoNet_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_MoNet_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_MoNet_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_PATTERN.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_PATTERN.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_40k.json' --edge_feat True &
wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_SBMs_node_classification" C-m
-
-
-
-
-
-
-
-
-
+tmux send-keys "tmux kill-session -t benchmark_COLLAB_edge_classification" C-m
\ No newline at end of file
diff --git a/scripts/COLLAB/script_main_COLLAB_edge_classification_PE_GatedGCN_40k.sh b/scripts/COLLAB/script_main_COLLAB_edge_classification_PE_GatedGCN_40k.sh
new file mode 100644
index 000000000..433159bd0
--- /dev/null
+++ b/scripts/COLLAB/script_main_COLLAB_edge_classification_PE_GatedGCN_40k.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+# check :
+# bash script.sh
+# tmux attach -t script_tsp
+# tmux detach
+# pkill python
+
+# bash script_main_COLLAB_edge_classification_PE_GatedGCN_40k.sh
+
+
+
+
+############
+# GNNs
+############
+
+#GatedGCN
+#GCN
+#GraphSage
+#MLP
+#GIN
+#MoNet
+#GAT
+
+
+
+
+
+############
+# COLLAB - 4 RUNS
+############
+
+seed0=41
+seed1=42
+seed2=9
+seed3=23
+code=main_COLLAB_edge_classification.py
+dataset=COLLAB
+tmux new -s benchmark_COLLAB_edge_classification -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_PE_40k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_PE_40k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_PE_40k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_PE_40k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_COLLAB_edge_classification" C-m
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/COLLAB/script_main_COLLAB_edge_classification_edge_feature_analysis.sh b/scripts/COLLAB/script_main_COLLAB_edge_classification_edge_feature_analysis.sh
new file mode 100644
index 000000000..021b1b88d
--- /dev/null
+++ b/scripts/COLLAB/script_main_COLLAB_edge_classification_edge_feature_analysis.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# check :
+# bash script.sh
+# tmux attach -t script_tsp
+# tmux detach
+# pkill python
+
+# bash script_main_COLLAB_edge_classification_edge_feature_analysis.sh
+
+
+
+
+############
+# GNNs
+############
+
+#GatedGCN
+#GAT
+
+
+
+
+
+############
+# COLLAB - 4 RUNS
+############
+
+seed0=411
+seed1=421
+seed2=91
+seed3=231
+code=main_COLLAB_edge_classification.py
+tmux new -s benchmark_COLLAB_edge_classification -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=COLLAB
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GAT_isotropic.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GAT_edgefeat.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_isotropic.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_edgefeat.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/COLLAB_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_COLLAB_edge_classification" C-m
diff --git a/scripts/CSL/script_main_CSL_graph_classification_20_seeds.sh b/scripts/CSL/script_main_CSL_graph_classification_20_seeds.sh
new file mode 100644
index 000000000..15162b678
--- /dev/null
+++ b/scripts/CSL/script_main_CSL_graph_classification_20_seeds.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_CSL_graph_classification_20_seeds.sh
+
+
+
+############
+# GNNs
+############
+
+#20 seeds for
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+# with Positional Encoding (P.E.)
+
+
+############
+# CSL
+############
+
+code=main_CSL_graph_classification.py
+dataset=CSL
+
+dir_MLP='out/CSL/MLP/'
+dir_GCN='out/CSL/GCN/'
+dir_GraphSage='out/CSL/GraphSage/'
+dir_GatedGCN='out/CSL/GatedGCN/'
+dir_GAT='out/CSL/GAT/'
+dir_MoNet='out/CSL/MoNet/'
+dir_GIN='out/CSL/GIN/'
+
+dir_RingGNN_small='out/CSL/RingGNN_small/'
+dir_3WLGNN_small='out/CSL/3WLGNN_small/'
+dir_RingGNN_large='out/CSL/RingGNN_large/'
+dir_3WLGNN_large='out/CSL/3WLGNN_large/'
+tmux new -s benchmark_CSL_20_seeds -d
+tmux send-keys "source activate benchmark_gnn" C-m
+all_seeds=(12 32 52 82 92)
+# above are starting seeds; from each value, 4 seeds are generated, see below,
+# therefore, 5 x 4 = 20 seeds
+
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_3WLGNN_large --hidden_dim 181 --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_3WLGNN_large --hidden_dim 181 --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_3WLGNN_large --hidden_dim 181 --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_3WLGNN_large --hidden_dim 181 --config 'configs/CSL_graph_classification_3WLGNN_CSL_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_RingGNN_large --hidden_dim 102 --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_RingGNN_large --hidden_dim 102 --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_RingGNN_large --hidden_dim 102 --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_RingGNN_large --hidden_dim 102 --config 'configs/CSL_graph_classification_RingGNN_CSL_100k.json' &
+wait" C-m
+done
+
+tmux send-keys "tmux kill-session -t benchmark_CSL_20_seeds" C-m
+
+
+
+
+
diff --git a/scripts/CSL/script_main_CSL_graph_classification_PE_20_seeds.sh b/scripts/CSL/script_main_CSL_graph_classification_PE_20_seeds.sh
new file mode 100644
index 000000000..cd133a9db
--- /dev/null
+++ b/scripts/CSL/script_main_CSL_graph_classification_PE_20_seeds.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_CSL_graph_classification_PE_20_seeds.sh
+
+
+
+############
+# GNNs
+############
+
+#20 seeds for
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+# with Positional Encoding (P.E.)
+
+
+############
+# CSL
+############
+
+code=main_CSL_graph_classification.py
+dataset=CSL
+
+dir_MLP='out/CSL/MLP/'
+dir_GCN='out/CSL/GCN/'
+dir_GraphSage='out/CSL/GraphSage/'
+dir_GatedGCN='out/CSL/GatedGCN/'
+dir_GAT='out/CSL/GAT/'
+dir_MoNet='out/CSL/MoNet/'
+dir_GIN='out/CSL/GIN/'
+
+dir_RingGNN_small='out/CSL/RingGNN_small/'
+dir_3WLGNN_small='out/CSL/3WLGNN_small/'
+dir_RingGNN_large='out/CSL/RingGNN_large/'
+dir_3WLGNN_large='out/CSL/3WLGNN_large/'
+tmux new -s benchmark_CSL_20_seeds -d
+tmux send-keys "source activate benchmark_gnn" C-m
+all_seeds=(12 32 52 82 92)
+# above are starting seeds; from each value, 4 seeds are generated, see below,
+# therefore, 5 x 4 = 20 seeds
+
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_MLP --config 'configs/CSL_graph_classification_MLP_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GCN --config 'configs/CSL_graph_classification_GCN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GraphSage --config 'configs/CSL_graph_classification_GraphSage_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GatedGCN --config 'configs/CSL_graph_classification_GatedGCN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GAT --config 'configs/CSL_graph_classification_GAT_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_MoNet --config 'configs/CSL_graph_classification_MoNet_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_GIN --config 'configs/CSL_graph_classification_GIN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_3WLGNN_small --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_RingGNN_small --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_3WLGNN_large --hidden_dim 179 --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_3WLGNN_large --hidden_dim 179 --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_3WLGNN_large --hidden_dim 179 --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_3WLGNN_large --hidden_dim 179 --config 'configs/CSL_graph_classification_3WLGNN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+for seed in ${all_seeds[@]}; do
+ tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $((seed+2)) --out_dir $dir_RingGNN_large --hidden_dim 93 --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $((seed+3)) --out_dir $dir_RingGNN_large --hidden_dim 93 --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $((seed+6)) --out_dir $dir_RingGNN_large --hidden_dim 93 --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $((seed+7)) --out_dir $dir_RingGNN_large --hidden_dim 93 --config 'configs/CSL_graph_classification_RingGNN_CSL_PE_100k.json' &
+wait" C-m
+done
+
+tmux send-keys "tmux kill-session -t benchmark_CSL_20_seeds" C-m
+
+
+
+
+
diff --git a/script_main_molecules_graph_regression_ZINC.sh b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_100k.sh
similarity index 53%
rename from script_main_molecules_graph_regression_ZINC.sh
rename to scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_100k.sh
index d0d367477..ef4c41c6b 100644
--- a/script_main_molecules_graph_regression_ZINC.sh
+++ b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_100k.sh
@@ -1,100 +1,97 @@
#!/bin/bash
-# check :
-# bash script.sh
-# tmux attach -t script_mol_opt
-# tmux detach
-# pkill python
-# bash script_main_molecules_graph_regression_ZINC.sh
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_CLUSTER_100k.sh
+
############
# GNNs
############
-#GatedGCN
+#MLP
#GCN
#GraphSage
-#MLP
-#GIN
-#MoNet
+#GatedGCN
#GAT
-#DiffPool
-
-
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
############
-# ZINC - 4 RUNS
+# SBM_CLUSTER - 4 RUNS
############
seed0=41
seed1=95
seed2=12
seed3=35
-code=main_molecules_graph_regression.py
-dataset=ZINC
-tmux new -s benchmark_molecules_graph_regression -d
+code=main_SBMs_node_classification.py
+tmux new -s benchmark -d
tmux send-keys "source activate benchmark_gnn" C-m
+dataset=SBM_CLUSTER
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MLP_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MLP_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MLP_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MLP_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MLP_GATED_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MLP_GATED_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MLP_GATED_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MLP_GATED_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GIN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GIN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GIN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GIN_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GCN_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GraphSage_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GraphSage_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GraphSage_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GraphSage_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GatedGCN_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GAT_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GAT_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GAT_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GAT_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_DiffPool_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_DiffPool_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_DiffPool_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_DiffPool_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MoNet_ZINC.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MoNet_ZINC.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MoNet_ZINC.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MoNet_ZINC.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_100k.json' &
wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_molecules_graph_regression" C-m
-
-
+tmux send-keys "tmux kill-session -t benchmark" C-m
diff --git a/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_500k.sh b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_500k.sh
new file mode 100644
index 000000000..7e26525b6
--- /dev/null
+++ b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_500k.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_CLUSTER_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# SBM_CLUSTER - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_SBMs_node_classification.py
+dataset=SBM_CLUSTER
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_CLUSTER_L8_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_CLUSTER_L8_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_PE_GatedGCN_500k.sh b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_PE_GatedGCN_500k.sh
new file mode 100644
index 000000000..1393dc1dd
--- /dev/null
+++ b/scripts/SBMs/script_main_SBMs_node_classification_CLUSTER_PE_GatedGCN_500k.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_CLUSTER_PE_GatedGCN_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# SBM_CLUSTER - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_SBMs_node_classification.py
+dataset=SBM_CLUSTER
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER_PE_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_100k.sh b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_100k.sh
new file mode 100644
index 000000000..632863ec0
--- /dev/null
+++ b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_100k.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_PATTERN_100k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# SBM_PATTERN - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_SBMs_node_classification.py
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=SBM_PATTERN
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_100k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_500k.sh b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_500k.sh
new file mode 100644
index 000000000..a43612be8
--- /dev/null
+++ b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_500k.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_PATTERN_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# SBM_PATTERN - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_SBMs_node_classification.py
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=SBM_PATTERN
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_3WLGNN_PATTERN_L8_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --init_lr 1e-5 --config 'configs/SBMs_node_clustering_RingGNN_PATTERN_L8_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_PE_GatedGCN_500k.sh b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_PE_GatedGCN_500k.sh
new file mode 100644
index 000000000..2983d2c1c
--- /dev/null
+++ b/scripts/SBMs/script_main_SBMs_node_classification_PATTERN_PE_GatedGCN_500k.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_SBMs_node_classification_PATTERN_PE_GatedGCN_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# SBM_PATTERN - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_SBMs_node_classification.py
+dataset=SBM_PATTERN
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_PATTERN_PE_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC_100k.ipynb b/scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC_100k.ipynb
new file mode 100644
index 000000000..ac087518d
--- /dev/null
+++ b/scripts/StatisticalResults/generate_statistics_molecules_graph_regression_ZINC_100k.ipynb
@@ -0,0 +1,207 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "f_dir = \"./small\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & MLP & 4 & 108975 & 0.706$\\pm$0.006 & 0.644$\\pm$0.005 & 116.75 & 1.01s/0.03hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & GCN & 4 & 103077 & 0.459$\\pm$0.006 & 0.343$\\pm$0.011 & 196.25 & 2.89s/0.16hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & GraphSage & 4 & 94977 & 0.468$\\pm$0.003 & 0.251$\\pm$0.004 & 147.25 & 3.74s/0.15hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & GatedGCN & 4 & 105735 & 0.435$\\pm$0.011 & 0.287$\\pm$0.014 & 173.50 & 5.76s/0.28hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & GAT & 4 & 102385 & 0.475$\\pm$0.007 & 0.317$\\pm$0.006 & 137.50 & 2.93s/0.11hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & MoNet & 4 & 106002 & 0.388$\\pm$0.008 & 0.295$\\pm$0.019 & 154.75 & 12.33s/0.54hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & GIN & 4 & 103079 & 0.387$\\pm$0.015 & 0.319$\\pm$0.015 & 153.25 & 2.29s/0.10hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & 3WLGNN & 3 & 102150 & 0.407$\\pm$0.028 & 0.272$\\pm$0.037 & 111.25 & 286.23s/8.88hr\n",
+ "\n",
+ "Dataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\n",
+ "ZINC & 4 & RingGNN & 2 & 97978 & 0.512$\\pm$0.023 & 0.383$\\pm$0.020 & 90.25 & 327.65s/8.32hr\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "file = []\n",
+ "dataset = []\n",
+ "model = []\n",
+ "layer = []\n",
+ "params = []\n",
+ "acc_test = []\n",
+ "acc_train = []\n",
+ "convergence = []\n",
+ "total_time = []\n",
+ "epoch_time = []\n",
+ "\n",
+ "for filename in os.listdir(f_dir):\n",
+ "\n",
+ " if filename[-4:] == \".txt\":\n",
+ " file.append( filename )\n",
+ " \n",
+ " with open(os.path.join(f_dir, filename), \"r\") as f:\n",
+ " lines = f.readlines()\n",
+ "\n",
+ " for line in lines:\n",
+ " #print('h1c',line)\n",
+ "\n",
+ " if line[:9] == \"Dataset: \":\n",
+ " dataset.append( line[9:-2] )\n",
+ "\n",
+ " if line[:7] == \"Model: \":\n",
+ " model.append( line[7:-1] )\n",
+ "\n",
+ " if line[:17] == \"net_params={'L': \":\n",
+ " layer.append( line[17:18] )\n",
+ " \n",
+ " if line[:18] == \"Total Parameters: \":\n",
+ " params.append( line[18:-1] )\n",
+ "\n",
+ " if line[:10] == \"TEST MAE: \":\n",
+ " acc_test.append( float(line[10:-1]) )\n",
+ " \n",
+ " if line[:11] == \"TRAIN MAE: \":\n",
+ " acc_train.append( float(line[11:-1]) )\n",
+ " \n",
+ " if line[4:31] == \"Convergence Time (Epochs): \":\n",
+ " convergence.append( float(line[31:-1]) )\n",
+ "\n",
+ " if line[:18] == \"Total Time Taken: \":\n",
+ " total_time.append( float(line[18:-4]) )\n",
+ "\n",
+ " if line[:24] == 'Average Time Per Epoch: ':\n",
+ " epoch_time.append( float(line[24:-2]) )\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ "# print('file',file)\n",
+ "# print('dataset',dataset)\n",
+ "# print('model',model)\n",
+ "# print('layer',layer)\n",
+ "# print('params',params)\n",
+ "# print('acc_test',acc_test)\n",
+ "# print('acc_train',acc_train)\n",
+ "# print('convergence',convergence)\n",
+ "# print('total_time',total_time)\n",
+ "# print('epoch_time',epoch_time)\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "list_datasets = ['ZINC','MNIST','CIFAR10','SBM_PATTERN','SBM_CLUSTER','TSP','COLLAB','CSL']\n",
+ "#print('list_datasets',list_datasets)\n",
+ "\n",
+ "list_gnns = ['MLP','GCN','GraphSage','GatedGCN','GAT','MoNet','GIN','3WLGNN','RingGNN']\n",
+ "#print('list_gnns',list_gnns)\n",
+ "\n",
+ "\n",
+ "#list_datasets = ['SBM_CLUSTER']\n",
+ "for data in list_datasets:\n",
+ " #print(data)\n",
+ " \n",
+ " for gnn in list_gnns:\n",
+ " #print('gnn:',gnn)\n",
+ " \n",
+ " acc_test_one_gnn = []\n",
+ " acc_train_one_gnn = []\n",
+ " convergence_one_gnn = []\n",
+ " total_time_one_gnn = []\n",
+ " epoch_time_one_gnn = []\n",
+ " nb_seeds = 0\n",
+ " \n",
+ " for i in range(len(file)):\n",
+ " \n",
+ " if data==dataset[i] and gnn==model[i]:\n",
+ " params_one_gnn = params[i]\n",
+ " acc_test_one_gnn.append(acc_test[i])\n",
+ " acc_train_one_gnn.append(acc_train[i])\n",
+ " convergence_one_gnn.append(convergence[i])\n",
+ " total_time_one_gnn.append(total_time[i])\n",
+ " epoch_time_one_gnn.append(epoch_time[i])\n",
+ " L = layer[i]\n",
+ " nb_seeds = nb_seeds + 1\n",
+ " \n",
+ " \n",
+ " \n",
+ " if len(acc_test_one_gnn)>0:\n",
+ " latex_str = f\"{data} & {nb_seeds} & {gnn} & {L} & {params_one_gnn} & {np.mean(acc_test_one_gnn):.3f}$\\pm${np.std(acc_test_one_gnn):.3f} & {np.mean(acc_train_one_gnn):.3f}$\\pm${np.std(acc_train_one_gnn):.3f} & {np.mean(convergence_one_gnn):.2f} & {np.mean(epoch_time_one_gnn):.2f}s/{np.mean(total_time_one_gnn):.2f}hr\"\n",
+ " print(\"\\nDataset & #Seeds & Model & L & Param & Acc_test & Acc_train & Speed & Epoch/Time\\n{}\".format(latex_str,nb_seeds))\n",
+ "\n",
+ " \n",
+ "\n",
+ "print(\"\\n\")\n",
+ "\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/script_main_superpixels_graph_classification_CIFAR10.sh b/scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_100k.sh
similarity index 51%
rename from script_main_superpixels_graph_classification_CIFAR10.sh
rename to scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_100k.sh
index 138c76e4c..554c89ca3 100644
--- a/script_main_superpixels_graph_classification_CIFAR10.sh
+++ b/scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_100k.sh
@@ -1,34 +1,32 @@
#!/bin/bash
-# check :
-# bash script.sh
-# tmux attach -t script_mol_opt
-# tmux detach
-# pkill python
-# bash script_main_superpixels_graph_classification_CIFAR10.sh
+############
+# Usage
+############
+
+# bash script_main_superpixels_graph_classification_CIFAR10_100k.sh
+
############
# GNNs
############
-#GatedGCN
+#MLP
#GCN
#GraphSage
-#MLP
-#GIN
-#MoNet
+#GatedGCN
#GAT
-#DiffPool
-
-
-
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
############
-# CIFAR - 4 RUNS
+# CIFAR10 - 4 RUNS
############
seed0=41
@@ -36,66 +34,64 @@ seed1=95
seed2=12
seed3=35
code=main_superpixels_graph_classification.py
-tmux new -s benchmark_superpixels_graph_classification -d
+tmux new -s benchmark_CIFAR10 -d
tmux send-keys "source activate benchmark_gnn" C-m
dataset=CIFAR10
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_GATED_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GCN_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GIN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GIN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GIN_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GCN_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GraphSage_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GAT_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GAT_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GAT_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GatedGCN_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GAT_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GAT_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GAT_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GIN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GIN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GIN_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_DiffPool_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_DiffPool_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_DiffPool_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_DiffPool_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MoNet_CIFAR10.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_superpixels_graph_classification" C-m
-
-
+tmux send-keys "tmux kill-session -t benchmark_CIFAR10" C-m
diff --git a/scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_500k.sh b/scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_500k.sh
new file mode 100644
index 000000000..9f1c4b26d
--- /dev/null
+++ b/scripts/SuperPixels/script_main_superpixels_graph_classification_CIFAR10_500k.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_superpixels_graph_classification_CIFAR10_500k.sh
+
+
+
+############
+# GNNs
+############
+
+
+#3WLGNN
+#RingGNN
+
+
+
+############
+# CIFAR10 - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_superpixels_graph_classification.py
+tmux new -s benchmark_CIFAR10 -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=CIFAR10
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_3WLGNN_CIFAR10_L8_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --hidden_dim 100 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --hidden_dim 100 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --hidden_dim 100 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --hidden_dim 100 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_RingGNN_CIFAR10_L8_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_CIFAR10" C-m
+
+
+
+
+
+
+
+
diff --git a/script_main_superpixels_graph_classification_MNIST.sh b/scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_100k.sh
similarity index 51%
rename from script_main_superpixels_graph_classification_MNIST.sh
rename to scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_100k.sh
index 0b1d66efd..54a505dba 100644
--- a/script_main_superpixels_graph_classification_MNIST.sh
+++ b/scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_100k.sh
@@ -1,32 +1,32 @@
#!/bin/bash
-# check :
-# bash script.sh
-# tmux attach -t script_mol_opt
-# tmux detach
-# pkill python
-# bash script_main_superpixels_graph_classification_MNIST.sh
+############
+# Usage
+############
+
+# bash script_main_superpixels_graph_classification_MNIST_100k.sh
+
############
# GNNs
############
-#GatedGCN
+#MLP
#GCN
#GraphSage
-#MLP
-#GIN
-#MoNet
+#GatedGCN
#GAT
-#DiffPool
-
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
############
-# MNIST - 4 RUNS
+# MNIST - 4 RUNS
############
seed0=41
@@ -34,66 +34,64 @@ seed1=95
seed2=12
seed3=35
code=main_superpixels_graph_classification.py
-tmux new -s benchmark_superpixels_graph_classification -d
+tmux new -s benchmark -d
tmux send-keys "source activate benchmark_gnn" C-m
dataset=MNIST
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MLP_GATED_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MLP_GATED_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MLP_GATED_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MLP_GATED_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GCN_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GIN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GIN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GIN_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GraphSage_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GraphSage_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GraphSage_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GCN_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GraphSage_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GraphSage_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GraphSage_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GraphSage_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GAT_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GAT_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GAT_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GatedGCN_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MoNet_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MoNet_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MoNet_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GAT_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GAT_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GAT_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GAT_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_GIN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_GIN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_GIN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_GIN_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_DiffPool_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_DiffPool_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_DiffPool_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_DiffPool_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_MoNet_MNIST.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_MoNet_MNIST.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_MoNet_MNIST.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_MoNet_MNIST.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_superpixels_graph_classification" C-m
-
-
+tmux send-keys "tmux kill-session -t benchmark" C-m
diff --git a/scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_500k.sh b/scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_500k.sh
new file mode 100644
index 000000000..ac94eb149
--- /dev/null
+++ b/scripts/SuperPixels/script_main_superpixels_graph_classification_MNIST_500k.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_superpixels_graph_classification_MNIST_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#3WLGNN
+#RingGNN
+
+
+
+############
+# MNIST - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_superpixels_graph_classification.py
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=MNIST
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --hidden_dim 180 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_3WLGNN_MNIST_L8_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --hidden_dim 101 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --hidden_dim 101 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --hidden_dim 101 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --hidden_dim 101 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/superpixels_graph_classification_RingGNN_MNIST_L8_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
diff --git a/scripts/TSP/script_main_TSP_edge_classification_100k.sh b/scripts/TSP/script_main_TSP_edge_classification_100k.sh
new file mode 100644
index 000000000..3569f4e0f
--- /dev/null
+++ b/scripts/TSP/script_main_TSP_edge_classification_100k.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+# check :
+# bash script.sh
+# tmux attach -t script_tsp
+# tmux detach
+# pkill python
+
+# bash script_main_TSP_edge_classification_100k.sh
+
+
+
+
+############
+# GNNs
+############
+
+#GatedGCN
+#GCN
+#GraphSage
+#MLP
+#GIN
+#MoNet
+#GAT
+
+
+
+
+
+############
+# TSP - 4 RUNS
+############
+
+seed0=41
+seed1=42
+seed2=9
+seed3=23
+code=main_TSP_edge_classification.py
+tmux new -s benchmark_TSP_edge_classification -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=TSP
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MLP_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_MLP_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_MLP_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_MLP_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GCN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GCN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GCN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GCN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GIN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GIN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GIN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GIN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GraphSage_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GraphSage_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GraphSage_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GraphSage_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_100k.json' --edge_feat True &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GAT_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GAT_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_MoNet_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_MoNet_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_MoNet_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_MoNet_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_3WLGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_3WLGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_3WLGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_3WLGNN_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_RingGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_RingGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_RingGNN_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_RingGNN_100k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_TSP_edge_classification" C-m
diff --git a/script_main_SBMs_node_classification_CLUSTER.sh b/scripts/TSP/script_main_TSP_edge_classification_edge_feature_analysis.sh
similarity index 50%
rename from script_main_SBMs_node_classification_CLUSTER.sh
rename to scripts/TSP/script_main_TSP_edge_classification_edge_feature_analysis.sh
index 8b7d2a6ce..44a574da8 100644
--- a/script_main_SBMs_node_classification_CLUSTER.sh
+++ b/scripts/TSP/script_main_TSP_edge_classification_edge_feature_analysis.sh
@@ -2,11 +2,11 @@
# check :
# bash script.sh
-# tmux attach -t script_mol_opt
+# tmux attach -t script_tsp
# tmux detach
# pkill python
-# bash script_main_SBMs_node_classification_CLUSTER.sh
+# bash script_main_TSP_edge_classification_edge_feature_analysis.sh
@@ -16,85 +16,70 @@
############
#GatedGCN
-#GCN
-#GraphSage
-#MLP
-#GIN
-#MoNet
#GAT
-#DiffPool
############
-# SBM_CLUSTER - 4 RUNS
+# TSP - 4 RUNS
############
seed0=41
-seed1=95
-seed2=12
-seed3=35
-code=main_SBMs_node_classification.py
-tmux new -s benchmark_SBMs_node_classification -d
+seed1=42
+seed2=9
+seed3=23
+code=main_TSP_edge_classification.py
+tmux new -s benchmark_TSP_edge_classification -d
tmux send-keys "source activate benchmark_gnn" C-m
-dataset=SBM_CLUSTER
+dataset=TSP
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_isotropic.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_isotropic.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MLP_GATED_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_edgefeat.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GIN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GIN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GIN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GIN_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GCN_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GatedGCN_edgereprfeat.json' --edge_feat True &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GraphSage_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GAT_isotropic.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GAT_isotropic.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GatedGCN_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GAT_edgefeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GAT_edgefeat.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_GAT_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_GAT_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_GAT_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_GAT_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' &
wait" C-m
tmux send-keys "
-python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER.json' &
-python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/SBMs_node_clustering_MoNet_CLUSTER.json' &
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/TSP_edge_classification_GAT_edgereprfeat.json' --edge_feat True &
wait" C-m
-tmux send-keys "tmux kill-session -t benchmark_SBMs_node_classification" C-m
-
-
-
-
-
-
-
-
-
+tmux send-keys "tmux kill-session -t benchmark_TSP_edge_classification" C-m
diff --git a/scripts/TU/script_main_TUs_graph_classification_100k_seed1.sh b/scripts/TU/script_main_TUs_graph_classification_100k_seed1.sh
new file mode 100644
index 000000000..d6ff92621
--- /dev/null
+++ b/scripts/TU/script_main_TUs_graph_classification_100k_seed1.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+
+
+# bash script_main_TUs_graph_classification_100k_seed1.sh
+
+
+############
+# GNNs
+############
+
+#GatedGCN
+#GCN
+#GraphSage
+#MLP
+#GIN
+#MoNet
+#GAT
+#DiffPool
+
+
+
+
+
+############
+# ENZYMES & DD & PROTEINS_full
+############
+seed=41
+code=main_TUs_graph_classification.py
+tmux new -s benchmark_TUs_graph_classification -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=ENZYMES
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_ENZYMES_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_ENZYMES_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_ENZYMES_100k.json' &
+wait" C-m
+dataset=DD
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_DD_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_DD_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_DD_100k.json' &
+wait" C-m
+dataset=PROTEINS_full
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
+
+
diff --git a/scripts/TU/script_main_TUs_graph_classification_100k_seed2.sh b/scripts/TU/script_main_TUs_graph_classification_100k_seed2.sh
new file mode 100644
index 000000000..132dc65c6
--- /dev/null
+++ b/scripts/TU/script_main_TUs_graph_classification_100k_seed2.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+
+
+# bash script_main_TUs_graph_classification_100k_seed2.sh
+
+
+############
+# GNNs
+############
+
+#GatedGCN
+#GCN
+#GraphSage
+#MLP
+#GIN
+#MoNet
+#GAT
+#DiffPool
+
+
+
+
+
+############
+# ENZYMES & DD & PROTEINS_full
+############
+seed=95
+code=main_TUs_graph_classification.py
+tmux new -s benchmark_TUs_graph_classification -d
+tmux send-keys "source activate benchmark_gnn" C-m
+dataset=ENZYMES
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_ENZYMES_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_ENZYMES_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_ENZYMES_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_ENZYMES_100k.json' &
+wait" C-m
+dataset=DD
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_DD_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_DD_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_DD_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_DD_100k.json' &
+wait" C-m
+dataset=PROTEINS_full
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GatedGCN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_GCN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GraphSage_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_MLP_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_GIN_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed --config 'configs/TUs_graph_classification_MoNet_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed --config 'configs/TUs_graph_classification_GAT_PROTEINS_full_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed --config 'configs/TUs_graph_classification_3WLGNN_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed --config 'configs/TUs_graph_classification_RingGNN_PROTEINS_full_100k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark_TUs_graph_classification" C-m
+
+
+
diff --git a/scripts/TensorBoard/script_tensorboard.sh b/scripts/TensorBoard/script_tensorboard.sh
new file mode 100644
index 000000000..ad22d87cc
--- /dev/null
+++ b/scripts/TensorBoard/script_tensorboard.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+
+# bash script_tensorboard.sh
+
+
+
+
+
+tmux new -s tensorboard -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "tensorboard --logdir out/ --port 6006" C-m
+
+
+
+
+
+
+
+
+
diff --git a/scripts/ZINC/script_main_molecules_graph_regression_ZINC_100k.sh b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_100k.sh
new file mode 100644
index 000000000..f65dedd2e
--- /dev/null
+++ b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_100k.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_molecules_graph_regression_ZINC_100k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# ZINC - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_molecules_graph_regression.py
+dataset=ZINC
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MLP_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MLP_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MLP_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MLP_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GCN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GraphSage_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GraphSage_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GraphSage_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GraphSage_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GAT_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GAT_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GAT_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GAT_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MoNet_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MoNet_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MoNet_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MoNet_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GIN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GIN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GIN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GIN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_100k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/ZINC/script_main_molecules_graph_regression_ZINC_500k.sh b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_500k.sh
new file mode 100644
index 000000000..7e6f2fbed
--- /dev/null
+++ b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_500k.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_molecules_graph_regression_ZINC_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# ZINC - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_molecules_graph_regression.py
+dataset=ZINC
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GCN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GraphSage_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GraphSage_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GraphSage_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GraphSage_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GAT_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GAT_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GAT_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GAT_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_MoNet_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_MoNet_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_MoNet_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_MoNet_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_GIN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_GIN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_GIN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_GIN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_3WLGNN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_RingGNN_ZINC_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --init_lr 1e-4 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --init_lr 1e-4 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --init_lr 1e-4 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --init_lr 1e-4 --config 'configs/molecules_graph_regression_3WLGNN_ZINC_L8_500k.json' &
+wait" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --init_lr 1e-5 --config 'configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --init_lr 1e-5 --config 'configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --init_lr 1e-5 --config 'configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --init_lr 1e-5 --config 'configs/molecules_graph_regression_RingGNN_ZINC_L8_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/ZINC/script_main_molecules_graph_regression_ZINC_PE_GatedGCN_500k.sh b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_PE_GatedGCN_500k.sh
new file mode 100644
index 000000000..8ef125ba5
--- /dev/null
+++ b/scripts/ZINC/script_main_molecules_graph_regression_ZINC_PE_GatedGCN_500k.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+
+############
+# Usage
+############
+
+# bash script_main_molecules_graph_regression_ZINC_PE_GatedGCN_500k.sh
+
+
+
+############
+# GNNs
+############
+
+#MLP
+#GCN
+#GraphSage
+#GatedGCN
+#GAT
+#MoNet
+#GIN
+#3WLGNN
+#RingGNN
+
+
+
+############
+# ZINC - 4 RUNS
+############
+
+seed0=41
+seed1=95
+seed2=12
+seed3=35
+code=main_molecules_graph_regression.py
+dataset=ZINC
+tmux new -s benchmark -d
+tmux send-keys "source activate benchmark_gnn" C-m
+tmux send-keys "
+python $code --dataset $dataset --gpu_id 0 --seed $seed0 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 1 --seed $seed1 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 2 --seed $seed2 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json' &
+python $code --dataset $dataset --gpu_id 3 --seed $seed3 --edge_feat True --config 'configs/molecules_graph_regression_GatedGCN_ZINC_PE_500k.json' &
+wait" C-m
+tmux send-keys "tmux kill-session -t benchmark" C-m
+
+
+
+
+
+
+
+
+
+
+
diff --git a/train/metrics.py b/train/metrics.py
index 8f4a3ef92..b584da038 100644
--- a/train/metrics.py
+++ b/train/metrics.py
@@ -9,6 +9,7 @@
def MAE(scores, targets):
MAE = F.l1_loss(scores, targets)
+ MAE = MAE.detach().item()
return MAE
@@ -46,7 +47,7 @@ def accuracy_SBM(scores, targets):
nb_non_empty_classes += 1
else:
pr_classes[r] = 0.0
- acc = 100.* np.sum(pr_classes)/ float(nb_non_empty_classes)
+ acc = 100.* np.sum(pr_classes)/ float(nb_classes)
return acc
diff --git a/train/train_COLLAB_edge_classification.py b/train/train_COLLAB_edge_classification.py
new file mode 100644
index 000000000..fcfb18b24
--- /dev/null
+++ b/train/train_COLLAB_edge_classification.py
@@ -0,0 +1,152 @@
+"""
+ Utility functions for training one epoch
+ and evaluating one epoch
+"""
+import torch
+import torch.nn as nn
+from torch.utils.data import DataLoader
+import dgl
+import numpy as np
+
+from tqdm import tqdm
+
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, graph, train_edges, batch_size, epoch, monet_pseudo=None):
+
+ model.train()
+
+ train_edges = train_edges.to(device)
+
+ total_loss = total_examples = 0
+ for perm in tqdm(DataLoader(range(train_edges.size(0)), batch_size, shuffle=True)):
+
+ optimizer.zero_grad()
+
+ graph = graph.to(device)
+ x = graph.ndata['feat'].to(device)
+ e = graph.edata['feat'].to(device).float()
+
+ if monet_pseudo is not None:
+ # Assign e as pre-computed pesudo edges for MoNet
+ e = monet_pseudo.to(device)
+
+ # Compute node embeddings
+ try:
+ x_pos_enc = graph.ndata['pos_enc'].to(device)
+ h = model(graph, x, e, x_pos_enc)
+ except:
+ h = model(graph, x, e)
+
+ # Positive samples
+ edge = train_edges[perm].t()
+ pos_out = model.edge_predictor( h[edge[0]], h[edge[1]] )
+
+ # Just do some trivial random sampling
+ edge = torch.randint(0, x.size(0), edge.size(), dtype=torch.long, device=x.device)
+ neg_out = model.edge_predictor( h[edge[0]], h[edge[1]] )
+
+ loss = model.loss(pos_out, neg_out)
+
+ loss.backward()
+ optimizer.step()
+
+ num_examples = pos_out.size(0)
+ total_loss += loss.detach().item() * num_examples
+ total_examples += num_examples
+
+ return total_loss/total_examples, optimizer
+
+
+def evaluate_network_sparse(model, device, graph, pos_train_edges,
+ pos_valid_edges, neg_valid_edges,
+ pos_test_edges, neg_test_edges,
+ evaluator, batch_size, epoch, monet_pseudo=None):
+
+ model.eval()
+ with torch.no_grad():
+
+ graph = graph.to(device)
+ x = graph.ndata['feat'].to(device)
+ e = graph.edata['feat'].to(device).float()
+
+ if monet_pseudo is not None:
+ # Assign e as pre-computed pesudo edges for MoNet
+ e = monet_pseudo.to(device)
+
+ # Compute node embeddings
+ try:
+ x_pos_enc = graph.ndata['pos_enc'].to(device)
+ h = model(graph, x, e, x_pos_enc)
+ except:
+ h = model(graph, x, e)
+
+ pos_train_edges = pos_train_edges.to(device)
+ pos_valid_edges = pos_valid_edges.to(device)
+ neg_valid_edges = neg_valid_edges.to(device)
+ pos_test_edges = pos_test_edges.to(device)
+ neg_test_edges = neg_test_edges.to(device)
+
+ pos_train_preds = []
+ for perm in DataLoader(range(pos_train_edges.size(0)), batch_size):
+ edge = pos_train_edges[perm].t()
+ pos_train_preds += [model.edge_predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
+ pos_train_pred = torch.cat(pos_train_preds, dim=0)
+
+ pos_valid_preds = []
+ for perm in DataLoader(range(pos_valid_edges.size(0)), batch_size):
+ edge = pos_valid_edges[perm].t()
+ pos_valid_preds += [model.edge_predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
+ pos_valid_pred = torch.cat(pos_valid_preds, dim=0)
+
+ neg_valid_preds = []
+ for perm in DataLoader(range(pos_valid_edges.size(0)), batch_size):
+ edge = neg_valid_edges[perm].t()
+ neg_valid_preds += [model.edge_predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
+ neg_valid_pred = torch.cat(neg_valid_preds, dim=0)
+
+ pos_test_preds = []
+ for perm in DataLoader(range(pos_test_edges.size(0)), batch_size):
+ edge = pos_test_edges[perm].t()
+ pos_test_preds += [model.edge_predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
+ pos_test_pred = torch.cat(pos_test_preds, dim=0)
+
+ neg_test_preds = []
+ for perm in DataLoader(range(pos_test_edges.size(0)), batch_size):
+ edge = neg_test_edges[perm].t()
+ neg_test_preds += [model.edge_predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
+ neg_test_pred = torch.cat(neg_test_preds, dim=0)
+
+
+ train_hits = []
+ for K in [10, 50, 100]:
+ evaluator.K = K
+ train_hits.append(
+ evaluator.eval({
+ 'y_pred_pos': pos_train_pred,
+ 'y_pred_neg': neg_valid_pred, # negative samples for valid == training
+ })[f'hits@{K}']
+ )
+
+ valid_hits = []
+ for K in [10, 50, 100]:
+ evaluator.K = K
+ valid_hits.append(
+ evaluator.eval({
+ 'y_pred_pos': pos_valid_pred,
+ 'y_pred_neg': neg_valid_pred,
+ })[f'hits@{K}']
+ )
+
+ test_hits = []
+ for K in [10, 50, 100]:
+ evaluator.K = K
+ test_hits.append(
+ evaluator.eval({
+ 'y_pred_pos': pos_test_pred,
+ 'y_pred_neg': neg_test_pred,
+ })[f'hits@{K}']
+ )
+
+ return train_hits, valid_hits, test_hits
diff --git a/train/train_CSL_graph_classification.py b/train/train_CSL_graph_classification.py
new file mode 100644
index 000000000..76351cccc
--- /dev/null
+++ b/train/train_CSL_graph_classification.py
@@ -0,0 +1,126 @@
+"""
+ Utility functions for training one epoch
+ and evaluating one epoch
+"""
+import torch
+import torch.nn as nn
+import math
+
+from train.metrics import accuracy_TU as accuracy
+
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
+ model.train()
+ epoch_loss = 0
+ epoch_train_acc = 0
+ nb_data = 0
+ gpu_mem = 0
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
+ batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
+ batch_e = batch_graphs.edata['feat'].to(device)
+ batch_labels = batch_labels.to(device)
+ optimizer.zero_grad()
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
+ loss = model.loss(batch_scores, batch_labels)
+ loss.backward()
+ optimizer.step()
+ epoch_loss += loss.detach().item()
+ epoch_train_acc += accuracy(batch_scores, batch_labels)
+ nb_data += batch_labels.size(0)
+ epoch_loss /= (iter + 1)
+ epoch_train_acc /= nb_data
+
+ return epoch_loss, epoch_train_acc, optimizer
+
+def evaluate_network_sparse(model, device, data_loader, epoch):
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_acc = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
+ batch_x = batch_graphs.ndata['feat'].to(device)
+ batch_e = batch_graphs.edata['feat'].to(device)
+ batch_labels = batch_labels.to(device)
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
+ loss = model.loss(batch_scores, batch_labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_acc += accuracy(batch_scores, batch_labels)
+ nb_data += batch_labels.size(0)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_acc /= nb_data
+
+ return epoch_test_loss, epoch_test_acc
+
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+ model.train()
+ epoch_loss = 0
+ epoch_train_acc = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_loss /= (iter + 1)
+ epoch_train_acc /= nb_data
+
+ return epoch_loss, epoch_train_acc, optimizer
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_acc = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_acc /= nb_data
+
+ return epoch_test_loss, epoch_test_acc
+
+
+def check_patience(all_losses, best_loss, best_epoch, curr_loss, curr_epoch, counter):
+ if curr_loss < best_loss:
+ counter = 0
+ best_loss = curr_loss
+ best_epoch = curr_epoch
+ else:
+ counter += 1
+ return best_loss, best_epoch, counter
\ No newline at end of file
diff --git a/train/train_CitationGraphs_node_classification.py b/train/train_CitationGraphs_node_classification.py
deleted file mode 100644
index a8b17a371..000000000
--- a/train/train_CitationGraphs_node_classification.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
- Utility functions for training one epoch
- and evaluating one epoch
-"""
-import torch
-import torch.nn as nn
-import math
-import dgl
-
-from train.metrics import accuracy_CITATION_GRAPH as accuracy
-
-
-def train_epoch(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, train_mask, labels, epoch):
-
- model.train()
- epoch_loss = 0
- epoch_train_acc = 0
- nb_data = 0
- gpu_mem = 0
-
- #logits = model.forward(graph, nfeat, efeat, norm_n, norm_e)
- logits = model(graph, nfeat, efeat, norm_n, norm_e)
- loss = model.loss(logits[train_mask], labels[train_mask])
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- epoch_loss = loss.detach().item()
- epoch_train_acc = accuracy(logits[train_mask], labels[train_mask])
- return epoch_loss, epoch_train_acc, optimizer
-
-
-def evaluate_network(model, optimizer, device, graph, nfeat, efeat, norm_n, norm_e, mask, labels, epoch):
-
- model.eval()
- epoch_test_loss = 0
- epoch_test_acc = 0
- nb_data = 0
- with torch.no_grad():
- logits = model.forward(graph, nfeat, efeat, norm_n, norm_e)
- loss = model.loss(logits[mask], labels[mask])
- epoch_test_loss = loss.detach().item()
- epoch_test_acc = accuracy(logits[mask], labels[mask])
-
- return epoch_test_loss, epoch_test_acc
diff --git a/train/train_SBMs_node_classification.py b/train/train_SBMs_node_classification.py
index 56d698abe..21fde8158 100644
--- a/train/train_SBMs_node_classification.py
+++ b/train/train_SBMs_node_classification.py
@@ -9,22 +9,26 @@
from train.metrics import accuracy_SBM as accuracy
-
-def train_epoch(model, optimizer, device, data_loader, epoch):
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
@@ -36,20 +40,22 @@ def train_epoch(model, optimizer, device, data_loader, epoch):
return epoch_loss, epoch_train_acc, optimizer
-def evaluate_network(model, device, data_loader, epoch):
+def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device)
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
@@ -59,3 +65,57 @@ def evaluate_network(model, device, data_loader, epoch):
return epoch_test_loss, epoch_test_acc
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+
+ model.train()
+ epoch_loss = 0
+ epoch_train_acc = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_acc += accuracy(scores, labels)
+ epoch_loss /= (iter + 1)
+ epoch_train_acc /= (iter + 1)
+
+ return epoch_loss, epoch_train_acc, optimizer
+
+
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_acc = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_acc += accuracy(scores, labels)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_acc /= (iter + 1)
+
+ return epoch_test_loss, epoch_test_acc
diff --git a/train/train_TSP_edge_classification.py b/train/train_TSP_edge_classification.py
index ebac360bb..78fdcd01e 100644
--- a/train/train_TSP_edge_classification.py
+++ b/train/train_TSP_edge_classification.py
@@ -9,23 +9,23 @@
from train.metrics import binary_f1_score
-
-def train_epoch(model, optimizer, device, data_loader, epoch):
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_f1 = 0
nb_data = 0
gpu_mem = 0
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_e = batch_snorm_e.to(device)
- batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
@@ -37,21 +37,19 @@ def train_epoch(model, optimizer, device, data_loader, epoch):
return epoch_loss, epoch_train_f1, optimizer
-def evaluate_network(model, device, data_loader, epoch):
+def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_f1 = 0
nb_data = 0
with torch.no_grad():
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_e = batch_snorm_e.to(device)
- batch_snorm_n = batch_snorm_n.to(device)
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_f1 += binary_f1_score(batch_scores, batch_labels)
@@ -61,3 +59,64 @@ def evaluate_network(model, device, data_loader, epoch):
return epoch_test_loss, epoch_test_f1
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+
+ model.train()
+ epoch_loss = 0
+ epoch_train_f1 = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
+ if x_no_edge_feat is not None:
+ x_no_edge_feat = x_no_edge_feat.to(device)
+ if x_with_edge_feat is not None:
+ x_with_edge_feat = x_with_edge_feat.to(device)
+ labels = labels.to(device)
+ edge_list = edge_list[0].to(device), edge_list[1].to(device)
+
+ scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
+ loss = model.loss(scores, labels)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_f1 += binary_f1_score(scores, labels)
+ epoch_loss /= (iter + 1)
+ epoch_train_f1 /= (iter + 1)
+
+ return epoch_loss, epoch_train_f1, optimizer
+
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_f1 = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_no_edge_feat, x_with_edge_feat, labels, edge_list) in enumerate(data_loader):
+ if x_no_edge_feat is not None:
+ x_no_edge_feat = x_no_edge_feat.to(device)
+ if x_with_edge_feat is not None:
+ x_with_edge_feat = x_with_edge_feat.to(device)
+ labels = labels.to(device)
+ edge_list = edge_list[0].to(device), edge_list[1].to(device)
+
+ scores = model.forward(x_no_edge_feat, x_with_edge_feat, edge_list)
+ loss = model.loss(scores, labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_f1 += binary_f1_score(scores, labels)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_f1 /= (iter + 1)
+
+ return epoch_test_loss, epoch_test_f1
\ No newline at end of file
diff --git a/train/train_TUs_graph_classification.py b/train/train_TUs_graph_classification.py
index d54f66dbe..ede3f43cb 100644
--- a/train/train_TUs_graph_classification.py
+++ b/train/train_TUs_graph_classification.py
@@ -8,21 +8,22 @@
from train.metrics import accuracy_TU as accuracy
-def train_epoch(model, optimizer, device, data_loader, epoch):
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
@@ -34,20 +35,18 @@ def train_epoch(model, optimizer, device, data_loader, epoch):
return epoch_loss, epoch_train_acc, optimizer
-def evaluate_network(model, device, data_loader, epoch):
+def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device)
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
@@ -57,6 +56,62 @@ def evaluate_network(model, device, data_loader, epoch):
return epoch_test_loss, epoch_test_acc
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+ model.train()
+ epoch_loss = 0
+ epoch_train_acc = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_loss /= (iter + 1)
+ epoch_train_acc /= nb_data
+
+ return epoch_loss, epoch_train_acc, optimizer
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_acc = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_acc /= nb_data
+
+ return epoch_test_loss, epoch_test_acc
+
+
+
+
def check_patience(all_losses, best_loss, best_epoch, curr_loss, curr_epoch, counter):
if curr_loss < best_loss:
counter = 0
diff --git a/train/train_molecules_graph_regression.py b/train/train_molecules_graph_regression.py
index 8be7d34bf..a60c32e76 100644
--- a/train/train_molecules_graph_regression.py
+++ b/train/train_molecules_graph_regression.py
@@ -8,21 +8,25 @@
from train.metrics import MAE
-def train_epoch(model, optimizer, device, data_loader, epoch):
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_mae = 0
nb_data = 0
gpu_mem = 0
- for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_targets) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
- batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
-
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_targets)
loss.backward()
optimizer.step()
@@ -34,20 +38,21 @@ def train_epoch(model, optimizer, device, data_loader, epoch):
return epoch_loss, epoch_train_mae, optimizer
-def evaluate_network(model, device, data_loader, epoch):
+def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_mae = 0
nb_data = 0
with torch.no_grad():
- for iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_targets) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_targets = batch_targets.to(device)
- batch_snorm_n = batch_snorm_n.to(device)
-
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ try:
+ batch_pos_enc = batch_graphs.ndata['pos_enc'].to(device)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_pos_enc)
+ except:
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_targets)
epoch_test_loss += loss.detach().item()
epoch_test_mae += MAE(batch_scores, batch_targets)
@@ -55,4 +60,63 @@ def evaluate_network(model, device, data_loader, epoch):
epoch_test_loss /= (iter + 1)
epoch_test_mae /= (iter + 1)
+ return epoch_test_loss, epoch_test_mae
+
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+ model.train()
+ epoch_loss = 0
+ epoch_train_mae = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_no_edge_feat, x_with_edge_feat, targets) in enumerate(data_loader):
+ if x_no_edge_feat is not None:
+ x_no_edge_feat = x_no_edge_feat.to(device)
+ if x_with_edge_feat is not None:
+ x_with_edge_feat = x_with_edge_feat.to(device)
+ targets = targets.to(device)
+
+ scores = model.forward(x_no_edge_feat, x_with_edge_feat)
+ loss = model.loss(scores, targets)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_mae += MAE(scores, targets)
+ nb_data += targets.size(0)
+ epoch_loss /= (iter + 1)
+ epoch_train_mae /= (iter + 1)
+
+ return epoch_loss, epoch_train_mae, optimizer
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_mae = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_no_edge_feat, x_with_edge_feat, targets) in enumerate(data_loader):
+ if x_no_edge_feat is not None:
+ x_no_edge_feat = x_no_edge_feat.to(device)
+ if x_with_edge_feat is not None:
+ x_with_edge_feat = x_with_edge_feat.to(device)
+ targets = targets.to(device)
+
+ scores = model.forward(x_no_edge_feat, x_with_edge_feat)
+ loss = model.loss(scores, targets)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_mae += MAE(scores, targets)
+ nb_data += targets.size(0)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_mae /= (iter + 1)
+
return epoch_test_loss, epoch_test_mae
\ No newline at end of file
diff --git a/train/train_superpixels_graph_classification.py b/train/train_superpixels_graph_classification.py
index e3a9f1eb4..5bc2824aa 100644
--- a/train/train_superpixels_graph_classification.py
+++ b/train/train_superpixels_graph_classification.py
@@ -8,21 +8,22 @@
from train.metrics import accuracy_MNIST_CIFAR as accuracy
-def train_epoch(model, optimizer, device, data_loader, epoch):
+"""
+ For GCNs
+"""
+def train_epoch_sparse(model, optimizer, device, data_loader, epoch):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device) # num x feat
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device) # num x 1
optimizer.zero_grad()
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
@@ -34,20 +35,18 @@ def train_epoch(model, optimizer, device, data_loader, epoch):
return epoch_loss, epoch_train_acc, optimizer
-def evaluate_network(model, device, data_loader, epoch):
+def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
- for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
+ for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
- batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
- batch_snorm_n = batch_snorm_n.to(device)
- batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
+ batch_scores = model.forward(batch_graphs, batch_x, batch_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
@@ -55,4 +54,58 @@ def evaluate_network(model, device, data_loader, epoch):
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
+ return epoch_test_loss, epoch_test_acc
+
+
+
+
+
+"""
+ For WL-GNNs
+"""
+def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
+ model.train()
+ epoch_loss = 0
+ epoch_train_acc = 0
+ nb_data = 0
+ gpu_mem = 0
+ optimizer.zero_grad()
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ loss.backward()
+
+ if not (iter%batch_size):
+ optimizer.step()
+ optimizer.zero_grad()
+
+ epoch_loss += loss.detach().item()
+ epoch_train_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_loss /= (iter + 1)
+ epoch_train_acc /= nb_data
+
+ return epoch_loss, epoch_train_acc, optimizer
+
+def evaluate_network_dense(model, device, data_loader, epoch):
+ model.eval()
+ epoch_test_loss = 0
+ epoch_test_acc = 0
+ nb_data = 0
+ with torch.no_grad():
+ for iter, (x_with_node_feat, labels) in enumerate(data_loader):
+ x_with_node_feat = x_with_node_feat.to(device)
+ labels = labels.to(device)
+
+ scores = model.forward(x_with_node_feat)
+ loss = model.loss(scores, labels)
+ epoch_test_loss += loss.detach().item()
+ epoch_test_acc += accuracy(scores, labels)
+ nb_data += labels.size(0)
+ epoch_test_loss /= (iter + 1)
+ epoch_test_acc /= nb_data
+
return epoch_test_loss, epoch_test_acc
\ No newline at end of file