mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-17 11:20:32 +08:00
fix: fix cls_token bug in vit.
This commit is contained in:
@ -191,11 +191,11 @@ class VisionTransformer(Module):
|
|||||||
"""
|
"""
|
||||||
# Get patch embeddings. This gives a tensor of shape `[patches, batch_size, d_model]`
|
# Get patch embeddings. This gives a tensor of shape `[patches, batch_size, d_model]`
|
||||||
x = self.patch_emb(x)
|
x = self.patch_emb(x)
|
||||||
# Add positional embeddings
|
|
||||||
x = self.pos_emb(x)
|
|
||||||
# Concatenate the `[CLS]` token embeddings before feeding the transformer
|
# Concatenate the `[CLS]` token embeddings before feeding the transformer
|
||||||
cls_token_emb = self.cls_token_emb.expand(-1, x.shape[1], -1)
|
cls_token_emb = self.cls_token_emb.expand(-1, x.shape[1], -1)
|
||||||
x = torch.cat([cls_token_emb, x])
|
x = torch.cat([cls_token_emb, x])
|
||||||
|
# Add positional embeddings
|
||||||
|
x = self.pos_emb(x)
|
||||||
|
|
||||||
# Pass through transformer layers with no attention masking
|
# Pass through transformer layers with no attention masking
|
||||||
for layer in self.transformer_layers:
|
for layer in self.transformer_layers:
|
||||||
|
Reference in New Issue
Block a user