I created embeddings for my patches and then feed them to the vanilla vision transformer for binary classification.
Here’s the forward method:
def forward(self, x):
#x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
#x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
x = self.mlp_head(x)
print('x is: ', x)
return x
However, I am not getting labels as a result. How should I convert this to binary labels?
I get this error consequently:
x is: tensor([[-8.7743e-01, -1.1380e-01],
[-4.8789e-01, 5.5360e-04],
[-7.1857e-01, 3.6758e-01],
[-5.9797e-01, 2.3756e-01],
[-6.1892e-01, -1.2594e-02],
[-4.2626e-01, 1.5825e-01],
[-8.1902e-01, -1.5155e-01],
[-5.5616e-01, 1.7184e-02]], device='cuda:0', grad_fn=<AddmmBackward0>)
out shape is: torch.Size([8, 2])
out is: tensor([[-8.7743e-01, -1.1380e-01],
[-4.8789e-01, 5.5360e-04],
[-7.1857e-01, 3.6758e-01],
[-5.9797e-01, 2.3756e-01],
[-6.1892e-01, -1.2594e-02],
[-4.2626e-01, 1.5825e-01],
[-8.1902e-01, -1.5155e-01],
[-5.5616e-01, 1.7184e-02]], device='cuda:0', grad_fn=<AddmmBackward0>)
labels are: tensor([1, 1, 1, 1, 1, 1, 1, 1], dtype=torch.int32)
Traceback (most recent call last):
File "main_classifier.py", line 250, in <module>
pred,label,loss = trainer.train(sample_batched, model)
pred, labels, loss = model.forward(feats, labels)
File "/home/jalal/research/venv/dpcc/lib/python3.8/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward
return self.module(*inputs[0], **kwargs[0])
File "/home/jalal/research/venv/dpcc/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
loss = self.criterion(out.cuda(), labels.cuda())
File "/home/jalal/research/venv/dpcc/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/home/jalal/research/venv/dpcc/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 1150, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "/home/jalal/research/venv/dpcc/lib/python3.8/site-packages/torch/nn/functional.py", line 2846, in cross_entropy
return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
RuntimeError: "nll_loss_forward_reduce_cuda_kernel_2d_index" not implemented for 'Int'
I also call the transformer like this:
labels = torch.IntTensor(labels)
print('labels :', labels)
stacked_X = torch.stack(X)
out = self.transformer(stacked_X)
print('out shape is: ', out.shape)
print('out is: ', out)
# loss
print('labels are: ', labels)
print(type(labels))
loss = self.criterion(out.cuda(), labels.cuda())
Here’s the full code of vanilla vision transformer:
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class VisionTransformer(nn.Module):
def __init__(self, *, image_size=256, patch_size=16, dim=512, depth=4, heads=12, mlp_dim=256, dropout=0.25, pool = 'cls', channels = 3, dim_head=64, emb_dropout = 0., num_classes=2):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, x):
#x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
#x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
x = self.mlp_head(x)
print('x is: ', x)
return x
My batch size here is 8 and labels are 0 and 1.