Hello,
I’m getting a weird error I haven’t seen mentioned on the forum yet. I’m putting a tensor of one hot encoded embeddings and a tensor of numerical data through. This is the error i’m getting:
RuntimeError Traceback (most recent call last)
in
9 break
10
—> 11 y_pred = d_o_model(cat, num)
12 single_loss = criterion(y_pred, label)
13 aggregated_losses.append(single_loss)C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
→ 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)in forward(self, x_categorical, x_numerical)
53
54 #normalizing numerical columns
—> 55 x_numerical = self.batch_norm_num(x_numerical)
56
57 #concatenating numerical and categorical columnsC:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
→ 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\batchnorm.py in forward(self, input)
79 input, self.running_mean, self.running_var, self.weight, self.bias,
80 self.training or not self.track_running_stats,
—> 81 exponential_average_factor, self.eps)
82
83 def extra_repr(self):C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1668 return torch.batch_norm(
1669 input, weight, bias, running_mean, running_var,
→ 1670 training, momentum, eps, torch.backends.cudnn.enabled
1671 )
1672RuntimeError: “batch_norm” not implemented for ‘Long’
Does anyone know what this means? My model ran before but i’m using a new custom data class. That is below:
class image_Data_Dataset(Dataset):
'''
image class data set
'''
def __init__(self, data, cont, cat, transform = None):
'''
Args:
------------------------------------------------------------
data = dataframe
image = column in dataframe with absolute path to the image
cont = list of continuous data columns
cat = list of categorical data columns
policy = ID variable
'''
#data frame
self.image_frame = data
#transform
self.transform = transform
#categorical data
df_cat = np.stack(pd.get_dummies(self.image_frame, columns=cat, drop_first=True).iloc[:, 15:].values)
self.categorical = df_cat
#numerical data
features = self.image_frame[cont]
df_numerical = features.values
df_numerical_s = np.stack(df_numerical)
self.numerical = df_numerical_s
def __len__(self):
return len(self.image_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
label = self.image_frame.iloc[idx, 15]
label = torch.tensor(label)
pic = Path(self.image_frame.iloc[idx,18])
img = Image.open(pic)
policy = self.image_frame.iloc[idx, 0]
categorical_data = self.categorical[idx]
categorical_data = torch.tensor(categorical_data)
numerical_data = self.numerical[idx]
numerical_data = torch.tensor(numerical_data)
sample = {'image': img, 'policy': policy, 'label':label, 'categorical_data': categorical_data
,'numerical_data': numerical_data}
if self.transform:
image = self.transform(img)
#return image, label, policy
return label, categorical_data, numerical_data
Here is the training loop:
for i in range(epochs):
for b, (image, label, policy) in enumerate(train_loader):
image = image.cuda()
label = label.cuda()
categorical_data = categorical_data.cuda()
numerical_data = numerical_data.cuda()
#print(image, label, categorical_data, numerical_data)
#count batches
b += 1
#throttle teh batches
if b == max_trn_batch:
break
y_pred = combined_model(image, categorical_data, numerical_data)
single_loss = loss_function(image, y_pred, label)
aggregated_losses.append(single_loss)
# statistics
running_loss += single_loss.item() * image.size(0)
running_corrects += torch.sum(y_pred == label.data)
print(f'train-epoch: {i}, train-batch: {b}')
optimizer.zero_grad()
single_loss.backward()
optimizer.step()
Here is the model class:
class Data_Only_Model(nn.Module):
def __init__(self, embedding_size, num_numerical_cols, output_size, layers, p = 0.4):
'''
Args
---------------------------
embedding_size: Contains the embedding size for the categorical columns
num_numerical_cols: Stores the total number of numerical columns
output_size: The size of the output layer or the number of possible outputs.
layers: List which contains number of neurons for all the layers.
p: Dropout with the default value of 0.5
'''
super().__init__()
#list of ModuleList objects for all categorical columns
self.all_embeddings = nn.ModuleList([nn.Embedding(ni, nf) for ni, nf in embedding_size])
#drop out value for all layers
self.embedding_dropout = nn.Dropout(p)
#list of 1 dimension batch normalization objects for all numerical columns
self.batch_norm_num = nn.BatchNorm1d(num_numerical_cols)
#the number of categorical and numerical columns are added together and stored in input_size
all_layers = []
num_categorical_cols = sum((nf for ni, nf in embedding_size))
input_size = num_categorical_cols + num_numerical_cols
#loop iterates to add corresonding layers to all_layers list above
for i in layers:
all_layers.append(nn.Linear(input_size, i))
all_layers.append(nn.ReLU(inplace=True))
all_layers.append(nn.BatchNorm1d(i))
all_layers.append(nn.Dropout(p))
input_size = i
#append output layer to list of layers
all_layers.append(nn.Linear(layers[-1], output_size))
#pass all layers to the sequential class
self.layers = nn.Sequential(*all_layers)
#define the foward method
def forward(self, x_categorical, x_numerical):
#this starts the embedding of categorical columns
embeddings = []
for i,e in enumerate(self.all_embeddings):
embeddings.append(e(x_categorical[:,i]))
x = torch.cat(embeddings, 1)
x = self.embedding_dropout(x)
#normalizing numerical columns
x_numerical = self.batch_norm_num(x_numerical)
#concatenating numerical and categorical columns
x = torch.cat([x, x_numerical], 1)
x = self.layers(x)
x = F.log_softmax(x)
return x
Is there something my loader is missing?