Hi
GPU memory consumption increases during training… I thougth reshape is the reason of this. How can I fix it?
its my code:
class Net(nn.Module):
def __init__(self,SR,block_size,phi):
super(MHCSResNet,self).__init__()
self.conv1= nn.Sequential(
nn.Conv2d(..),
nn.BatchNorm2d(..),
nn.ReLU()
)
self.conv2= nn.Sequential(
nn.Conv2d(...),
nn.BatchNorm2d(...),
nn.ReLU()
)
self.conv3= nn.Sequential(
nn.Conv2d(...),
nn.BatchNorm2d(...),
nn.ReLU()
)
self.conv4= nn.Sequential(
nn.Conv2d(...),
nn.BatchNorm2d(...),
nn.ReLU()
)
self.conv5= nn.Sequential(
nn.Conv2d(...),
nn.BatchNorm2d(...),
nn.ReLU()
)
self.conv6= nn.Sequential(
nn.Conv2d(...),
nn.BatchNorm2d(...),
nn.ReLU()
)
self.conv7= nn.Sequential(
nn.Conv2d(...),
)
self.fc=nn.Linear(6400,64)
def forward(self,kr,y,phi):
out_conv1=self.conv1(kr)
out_conv2=self.conv2(out_conv1)
out_conv3=self.conv3(out_conv2)
out_conv4=self.conv4(out_conv3))
out_conv5=self.conv5(out_conv4)
out_conv6=self.conv6(out_conv5)
out_conv7=self.conv7(out_conv6)
#print('out_conv7',out_conv7.shape)
out_feedback=kr+out_conv7
out_linear=self.fc(out_feedback.flatten(2))
out_reshape=out_linear.reshape([out_feedback.shape[0],out_feedback.shape[1],8,8])
outfc=torch.zeros(out_feedback.shape[0],out_feedback.shape[1],8,8)
fxr=Block_Compressed_Sensing(outfc,phi)
#print('fxr',fxr)
return fxr,out_reshape,outfc
what should I do