Skip to content

Commit 08b914d

Browse files
committed
try not pooling all the sos tokens and instead using the last one
1 parent f1d392d commit 08b914d

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

meshgpt_pytorch/meshgpt_pytorch.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1475,10 +1475,10 @@ def forward_on_codes(
14751475

14761476
# if calling without kv cache, pool the sos tokens, if greater than 1 sos token
14771477

1478-
if not exists(cache) and self.num_sos_tokens > 1:
1478+
if not exists(cache):
14791479
sos_tokens, attended_face_codes = unpack(attended_face_codes, packed_sos_shape, 'b * d')
1480-
pooled_sos_token = reduce(sos_tokens, 'b n d -> b 1 d', 'mean')
1481-
attended_face_codes = torch.cat((pooled_sos_token, attended_face_codes), dim = 1)
1480+
last_sos_token = sos_tokens[:, -1:]
1481+
attended_face_codes = torch.cat((last_sos_token, attended_face_codes), dim = 1)
14821482

14831483
# maybe project from coarse to fine dimension for hierarchical transformers
14841484

meshgpt_pytorch/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '1.2.5'
1+
__version__ = '1.2.6'

0 commit comments

Comments
 (0)