Hello,
I’m trying to get this model to run and I keep getting the following error:
BTW, I checked out this but it didn’t really help my error…or I didn’t understand it: Expected tensor for argument #1 'indices' to have scalar type Long; but got CPUFloatTensor instead (while checking arguments for embedding)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-778-34aa93336f9e> in <module>
94
95 # Forward pass
---> 96 outputs = tab_model(numerical_data, categorical_data, train_tensor)
97 loss = criterion(outputs, label)
98 print(f"tab epoch: {epoch}, tab loss: {loss}")
~\AppData\Local\Continuum\anaconda3\envs\torch_env\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
<ipython-input-775-fc3cf1d18657> in forward(self, x_categorical, x_numerical, predictions)
35 embeddings = []
36 for i, e in enumerate(self.all_embeddings):
---> 37 embeddings.append(e(x_categorical[:,i]).type(torch.LongTensor))
38
39 cat_embedd = torch.cat(embeddings, 1)
~\AppData\Local\Continuum\anaconda3\envs\torch_env\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
~\AppData\Local\Continuum\anaconda3\envs\torch_env\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
112 return F.embedding(
113 input, self.weight, self.padding_idx, self.max_norm,
--> 114 self.norm_type, self.scale_grad_by_freq, self.sparse)
115
116 def extra_repr(self):
~\AppData\Local\Continuum\anaconda3\envs\torch_env\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1482 # remove once script supports set_grad_enabled
1483 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1484 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1485
1486
RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got torch.cuda.FloatTensor instead (while checking arguments for embedding)
Here is the modell:
class Data_Only_Model(nn.Module):
def __init__(self, embedding_size):
super().__init__()
#list of ModuleList objects for all categorical columns
self.all_embeddings = nn.ModuleList([nn.Embedding(ni, nf) for ni, nf in embedding_size])
self.embedding_dropout = nn.Dropout(p = .04)
self.fc1 = nn.Linear(78, 1000)
self.fc2 = nn.BatchNorm1d(1000)
self.fc3 = nn.Dropout(p = .04)
self.fc4= nn.Linear(1000, 256)
self.fc5= nn.BatchNorm1d(256)
self.fc6= nn.Dropout(p = .04)
self.fc7= nn.Linear(256, 128)
self.fc8= nn.BatchNorm1d(128)
self.fc9= nn.Dropout(p = .04)
self.fc10= nn.Linear(128, 32)
self.fc11= nn.BatchNorm1d(32)
self.fc12= nn.Dropout(p = .04)
self.fc13= nn.Linear(32, 2)
#define the foward method
def forward(self, x_categorical, x_numerical, predictions):
embeddings = []
for i, e in enumerate(self.all_embeddings):
embeddings.append(e(x_categorical[:,i]))
cat_embedd = torch.cat(embeddings, 1)
print("cat", x.size())
numerical = x_numerical
print("numerical", numerical.size())
x = torch.cat((cat_embedd, x_numerical), dim = 1)
print('1 concat', x.size())
x4 = torch.cat((x, predictions), dim = 1)
print('X4', x4.size())
x4 = F.relu(self.fc1(x4))
x4 = self.fc2(x4)
x4 = self.fc3(x4)
x4 = self.fc4(x4)
x4 = self.fc5(x4)
x4 = self.fc6(x4)
x4 = F.relu(self.fc7(x4))
x4 = self.fc8(x4)
x4 = self.fc9(x4)
x4 = F.relu(self.fc10(x4))
x4 = self.fc11(x4)
x4 = self.fc12(x4)
x4 = self.fc13(x4)
x4 = F.log_softmax(x4)
return x4
tab_model = Data_Only_Model(embedding_size=embeddings)
tab_model.to(device)
Data_Only_Model(
(all_embeddings): ModuleList(
(0): Embedding(3, 2)
(1): Embedding(20, 10)
(2): Embedding(3007, 50)
(3): Embedding(48, 24)
(4): Embedding(4, 2)
(5): Embedding(6, 3)
(6): Embedding(6, 3)
(7): Embedding(15, 8)
(8): Embedding(3, 2)
(9): Embedding(10, 5)
(10): Embedding(13, 7)
)
(embedding_dropout): Dropout(p=0.04, inplace=False)
(fc1): Linear(in_features=78, out_features=1000, bias=True)
(fc2): BatchNorm1d(1000, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(fc3): Dropout(p=0.04, inplace=False)
(fc4): Linear(in_features=1000, out_features=256, bias=True)
(fc5): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(fc6): Dropout(p=0.04, inplace=False)
(fc7): Linear(in_features=256, out_features=128, bias=True)
(fc8): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(fc9): Dropout(p=0.04, inplace=False)
(fc10): Linear(in_features=128, out_features=32, bias=True)
(fc11): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(fc12): Dropout(p=0.04, inplace=False)
(fc13): Linear(in_features=32, out_features=2, bias=True)
)
I’m not sure where I’ve gone wrong.