hello, i am trying to train my net but i am getting this error: “one of the variables needed for gradient computation has been modified by an inplace operation”.
here is the code:
class PolyNet(nn.Module): # nn.Module is parent class
def init(self, rank=4):
super(PolyNet, self).init() #calls init of parent class
self.layer1 = nn.Sequential(
nn.Conv2d(3, 1, kernel_size=7, stride=1, padding=2),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(63631, rank)def forward(self, x): """ Feed forward through network Args: x - input to the network Returns "out", which is the network's output """ out = self.layer1(x) out = out.reshape(out.size(0), -1) out = self.fc(out) return out
def func(x,a,b,c,d): return (x**3)*a+(x**2)*b+c*x+d def my_loss(outputs, labels): """ Args: outputs - output of network ([batch size, rank+1]) labels - desired coefficients ([batch size, rank+1]) """ loss = torch.zeros(1, dtype=torch.float, requires_grad=True) loss = loss.to(device) arr2 = torch.zeros(4, dtype=torch.float, requires_grad=True) arr = torch.zeros(10, dtype=torch.float, requires_grad=True) for i in range(0,4): x=0 for j in range(0,10): x = x + 0.1*j y1=func(x,outputs[i,0],outputs[i,1],outputs[i,2],outputs[i,3]) y2=func(x,labels[i,0],labels[i,1],labels[i,2],labels[i,3]) n=torch.abs(y1-y2) arr[j]=n a=torch.max(arr) arr2[i] = a loss=(torch.sum(arr2))/4 print(loss) # Observe: If you need to iterate and add certain values to loss defined above # you cannot write: loss +=... because this will raise the error: # "Leaf variable was used in an inplace operation" # Instead, to avoid this error write: loss = loss + ... return loss model = PolyNet().to(device) print ("Number of model trainable parameters:", get_train_params_num(model)) #---------------------------------------------- # Choose your optimizer: learning_rate = 0.001 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) #---------------------------------------------- def train_model(model, optimizer, train_loader, validation_loader, train_losses, validation_losses, epochs=2): """ Trains a neural network. Args; model - model to be trained optimizer - optimizer used for training train_loader - loader from which data for training comes validation_loader - loader from which data for validation comes (maybe at the end, you use test_loader) train_losses - adding train loss value to this list for future analysis validation_losses - adding validation loss value to this list for future analysis epochs - number of runs over the entire data set """ for epoch in range(epochs): for i, data in enumerate(loader, 0): inputs = (data['image']).to(device) labels = (data['labels']).to(device) # Forward pass outputs = model(inputs.float()) loss = my_loss(outputs, labels) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() return if not 'train_losses' in vars(): train_losses = [] if not 'validation_losses' in vars(): validation_losses = [] train_model(model, optimizer, train_loader, validation_loader, train_losses, validation_losses, epochs=2)
and the error:
RuntimeError Traceback (most recent call last)
in ()
17 train_losses,
18 validation_losses,
—> 19 epochs=2)in train_model(model, optimizer, train_loader, validation_loader, train_losses, validation_losses, epochs)
30 # Backward and optimize
31 optimizer.zero_grad()
—> 32 loss.backward()
33 optimizer.step()
34~\Miniconda3\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
91 products. Defaults toFalse
.
92 “”"
—> 93 torch.autograd.backward(self, gradient, retain_graph, create_graph)
94
95 def register_hook(self, hook):~\Miniconda3\lib\site-packages\torch\autograd_init_.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
87 Variable._execution_engine.run_backward(
88 tensors, grad_tensors, retain_graph, create_graph,
—> 89 allow_unreachable=True) # allow_unreachable flag
90
91RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation