considering these two nets:

```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=1, stride=1, bias=False)
self.conv2 = nn.Conv2d(6, 6, kernel_size=1, stride=1, bias=False)
self.conv3 = nn.Conv2d(6, 6, kernel_size=1, stride=1, bias=False)
def forward(self,input):
input = F.relu(self.conv1(input))
input2 = input
output = F.relu(self.conv2(input))
output2 = F.relu(self.conv3(input2))
return output, output2
```

```
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=1, stride=1, bias=False)
self.conv2 = nn.Conv2d(6, 6, kernel_size=1, stride=1, bias=False)
self.conv3 = nn.Conv2d(6, 6, kernel_size=1, stride=1, bias=False)
def forward(self,input):
input = F.relu(self.conv1(input))
input2 = input.clone()
output = F.relu(self.conv2(input))
output2 = F.relu(self.conv3(input2))
return output, output2
```

What is the difference between Net and Net2? is the Net model wrong?

When I use clone in Net2, if i do back-propagation (via backward on the loss between output2 and target)

will weights in conv1 get updated as well or it just updates the conv3 weights?