How should i reshape my data

i am using Resent for 1d data and its showing this error
RuntimeError: size mismatch, m1: [16 x 51200], m2: [2048 x 2] at C:\w\1\s\tmp_conda_3.7_080832\conda\conda-bld\pytorch_1580544815892\work\aten\src\TH/generic/THTensorMath.cpp:41
here

def forward(self, x):

    x  = F.relu(self.bn1(self.conv1(x.float())))
    
    x  = self.layer1(x)
    x  = self.layer2(x)
    x  = self.layer3(x)
    x  = self.layer4(x)
    
    x  = F.avg_pool1d(x,4)

    x  = x.view(x.size(0),-1)
    x  = self.fc(x)
    
    return x

The view operation itself is right.
How did you define the in_features of self.fc, as they seem to create the mismatch?

so, what should i do for the correction.
this my Resnet block

class ResNet(torch.nn.Module):
def init(self, block , num_layers, classes=2):
super(ResNet,self).init()
self.input_planes = 64
self.conv1 = torch.nn.Conv1d(1, 64, kernel_size=3, stride=1, padding=1)
self.bn1 = torch.nn.BatchNorm1d(64)
self.layer1 = self._layer(block, 64, num_layers[0], stride=1)
self.layer2 = self._layer(block, 128, num_layers[1], stride=1)
self.layer3 = self._layer(block, 256, num_layers[2], stride=1)
self.layer4 = self._layer(block, 512, num_layers[3], stride=1)
self.averagePool = torch.nn.AvgPool1d(kernel_size = 4, stride = 1)
self.fc = torch.nn.Linear(512*block.expension, classes)

def _layer(self, block, planes, num_layers, stride= 1):
    dim_change= None
    if stride != 1 or planes != self.input_planes*block.expension:
        dim_change = torch.nn.Sequential(torch.nn.Conv1d(self.input_planes, planes*block.expension, kernel_size=1, stride=stride),
                                        torch.nn.BatchNorm1d(planes*block.expension))
    netLayers = []
    netLayers.append(block(self.input_planes,planes, stride=stride, dim_change= dim_change))
    self.input_planes = planes * block.expension
    for i in range(1,num_layers):
        netLayers.append(block(self.input_planes,planes))
        self.input_planes = planes* block.expension
        
    return torch.nn.Sequential(*netLayers)

512*block.expension doesn’t match the incoming number of features, so you would either have to decrease the spatial size of the activation through conv/pool layers or set in_features=51200 for self.fc.

Could you please explain that what happen was the problem with my network and how it is removed.

it has been running still now for three hours and did not even show a single output whats the problem

The problem is that the passed activation map contains more features than you’ve specified in the linear layer, which should accept this input.

When should what kind of output be printed?
If you are expecting an output in each iteration, your code might hang.
In this case, could you set num_workers=0 in your DataLoader and rerun the script?

it is still not showing accuracy and loss

train_iterator = torch.utils.data.TensorDataset(x_train, y_train.reshape(-1).long())
train_data = torch.utils.data.DataLoader(train_iterator, batch_size = 16, shuffle = True)
for x, y in train_data:
break
print(x.shape, y.shape)
criterion = torch.nn.CrossEntropyLoss()

optimizer = torch.optim.Adam(net.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
epochs = 3
batches = len(train_data)# number of batches in your dataset (where each batch contains 64 examples from your training data)
losses=[]
accuracy=[]
for epoch in range(epochs):
epoch_loss = 0.0
epoch_accuracy = 0.0
for features, labels in train_data:
outputs = net(features)
loss = criterion (outputs,labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item() # here we are accumulating loss for each training batch.
epoch_accuracy += (outputs.argmax(1) == labels).float().mean()

losses.append(epoch_loss/batches)
accuracy.append(epoch_accuracy/batches)   
print(f'Epoch: {epoch} -> Loss: {(epoch_loss/batches):.8f}, Accuracy: {(epoch_accuracy/batches):.8f}')
# dividing epoch_loss by number of batches in our train_data to get mean epoch_loss
# and printing epoch loss round of to 8 decimals.... same for epoch_accuracy

Could you add a print statement in each iteration?

PS: You can post code snippets by wrapping them into three backticks ``` :wink:
This would make is easier to debug the code, as I’m currently unsure about the indentation of the code.