@@ -140,10 +140,10 @@ def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
140
140
g_repeat_interleave = g .repeat_interleave (n_nodes , dim = 0 )
141
141
# Now we concatenate to get
142
142
# $$\{\overrightarrow{g_1} \Vert \overrightarrow{g_1},
143
- # \overrightarrow{g_1}, \Vert \overrightarrow{g_2},
143
+ # \overrightarrow{g_1} \Vert \overrightarrow{g_2},
144
144
# \dots, \overrightarrow{g_1} \Vert \overrightarrow{g_N},
145
145
# \overrightarrow{g_2} \Vert \overrightarrow{g_1},
146
- # \overrightarrow{g_2}, \Vert \overrightarrow{g_2},
146
+ # \overrightarrow{g_2} \Vert \overrightarrow{g_2},
147
147
# \dots, \overrightarrow{g_2} \Vert \overrightarrow{g_N}, ...\}$$
148
148
g_concat = torch .cat ([g_repeat_interleave , g_repeat ], dim = - 1 )
149
149
# Reshape so that `g_concat[i, j]` is $\overrightarrow{g_i} \Vert \overrightarrow{g_j}$
@@ -170,7 +170,7 @@ def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
170
170
171
171
# We then normalize attention scores (or coefficients)
172
172
# $$\alpha_{ij} = \text{softmax}_j(e_{ij}) =
173
- # \frac{\exp(e_{ij})}{\sum_{j \in \mathcal{N}_i} \exp(e_{ij })}$$
173
+ # \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik })}$$
174
174
#
175
175
# where $\mathcal{N}_i$ is the set of nodes connected to $i$.
176
176
#
0 commit comments