tl;dr: I am very new to Pytorch and this is really important for me. I know is a very long question but I am only providing the whole code to contextualize everything. The problem itself is probably very straightforward to solve and to understand, but I tried in another forum and to read some references but could’t solve. I am more used to tensorflow, but I am learning pytorch and I need this code to run and I tried a lot of things. Probably anyone who works with torch can solve this in a few minutes. Any help will be highly appreciated. It’s very important for me. Thanks in advance.
The whole thing:
When I run the following code:
class Generator(nn.Module):
def __init__(self, hilbert_size, num_points, noise=None):
super(Generator, self).__init__()
self.initializer = nn.init.normal_
ops = nn.Parameter(torch.empty(1, hilbert_size, hilbert_size, num_points * 2))
inputs = torch.zeros((1, num_points), requires_grad=True)
inputs = torch.nn.init.uniform_(inputs, a=0.0, b=1.0)
layer = nn.Linear(num_points, 16 * 16 * 2, bias=False)
init.normal_(layer.weight, mean=0.0, std=0.02)
self.x = nn.Sequential(
layer,
nn.LeakyReLU(),
nn.Unflatten(1, (2,16,16))
)
self.conv_transpose_1 = nn.Sequential(
nn.ConvTranspose2d(2, 64, kernel_size=4, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(64),
nn.LeakyReLU(),
)
self.conv_transpose_2 = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=4, stride=1, padding=2, bias=False),
nn.InstanceNorm2d(64),
nn.LeakyReLU(),
)
self.conv_transpose_3 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=4, stride=1, padding=1, bias=False),
)
self.conv_transpose_4 = nn.Sequential(
nn.ConvTranspose2d(32, 2, kernel_size=4, stride=1, padding=2, bias=False),
)
self.density_matrix = DensityMatrix()
self.expectation = Expectation()
def forward(self, ops, inputs):
x = self.x(inputs)
x = self.conv_transpose_1(x)
x = self.conv_transpose_2(x)
x = self.conv_transpose_3(x)
x = self.conv_transpose_4(x)
x = self.density_matrix(x)
complex_ops = convert_to_complex_ops(ops)
prefactor = 1.0
x = self.expectation(complex_ops, x, prefactor)
return x
class Discriminator(nn.Module):
def __init__(self, hilbert_size, num_points):
super(Discriminator, self).__init__()
initializer = nn.init.normal_
self.inp = nn.Identity(num_points)
self.tar = nn.Identity(num_points)
self.ops = nn.Identity(hilbert_size, hilbert_size, num_points*2)
self.fc1 = nn.Linear(num_points*2, 128)
self.lrelu1 = nn.LeakyReLU()
self.fc2 = nn.Linear(128, 128)
self.lrelu2 = nn.LeakyReLU()
self.fc3 = nn.Linear(128, 64)
self.relu3 = nn.ReLU()
self.fc4 = nn.Linear(64, 1)
initializer(self.fc1.weight, mean=0.0, std=0.002)
initializer(self.fc2.weight, mean=0.0, std=0.002)
initializer(self.fc3.weight, mean=0.0, std=0.002)
initializer(self.fc4.weight, mean=0.0, std=0.002)
def forward(self, ops, inp, tar):
x = torch.cat([inp, tar], dim=1)
x = self.fc1(x)
x = self.lrelu1(x)
x = self.fc2(x)
x = self.lrelu2(x)
x = self.fc3(x)
x = self.relu3(x)
x = self.fc4(x)
return x
def train_step(A, x):
gen_output = generator(A, x)
disc_real_output = discriminator([A, x, x])
disc_generated_output = discriminator([A, x, gen_output])
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(
disc_generated_output, gen_output, x, lam=lam
)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
generator.zero_grad()
discriminator.zero_grad()
gen_total_loss.backward(retain_graph=True)
disc_loss.backward()
generator_optimizer.step()
discriminator_optimizer.step()
And:
generator = Generator(hilbert_size, num_measurements, noise=0.)
discriminator = Discriminator(hilbert_size, num_measurements)
density_layer_idx = None
for i, (name, layer) in enumerate(generator.named_modules()):
if "density_matrix" in name:
density_layer_idx = i
break
model_dm = nn.Sequential(*list(generator.children())[:density_layer_idx + 1])
initial_learning_rate = 0.0002
decay_steps = 10000
decay_rate = 0.96
lam = 100.0
generator_optimizer = optim.Adam(generator.parameters(), lr=initial_learning_rate, betas=(0.5, 0.5))
discriminator_optimizer = optim.Adam(discriminator.parameters(), lr=initial_learning_rate, betas=(0.5,0.5))
lr_scheduler_G = optim.lr_scheduler.StepLR(generator_optimizer, step_size=decay_steps, gamma=decay_rate)
lr_scheduler_D = optim.lr_scheduler.StepLR(discriminator_optimizer, step_size=decay_steps, gamma=decay_rate)
max_iterations = 1000
pbar = tqdm(range(max_iterations))
for i in pbar:
train_step(A, x)
density_matrix = model_dm([A, x])
f = tf_fidelity(density_matrix, rho_tf)[-1]
fidelities.append(f)
pbar.set_description("Fidelity {} | Gen loss {} | L1 loss {} | Disc loss {}".format(f, loss.generator[-1], loss.l1[-1], loss.discriminator[-1]))
I’m giving this bunch of code just to contextualize the whole thing. The problems are here:
-
train_step(A,x)
-
model_dm([A, x])
The first raises the following error:
In [81]: train_step(A,x)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[81], line 1
----> 1 train_step(A,x)
Cell In[10], line 313, in train_step(A, x)
266 def train_step(A, x):
267 """Takes one step of training for the full A matrix representing the
268 measurement operators and data x.
269
(...)
311 >> density_matrix = model_dm([A, x])
312 """
--> 313 gen_output = generator([A, x])
315 disc_real_output = discriminator([A, x, x])
316 disc_generated_output = discriminator([A, x, gen_output])
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
TypeError: Generator.forward() missing 1 required positional argument: 'inputs'
I really do not understand why: the inputs
isn’t defined inside the Generator
class as input to the forward
function?
The second raises the following error:
In [82]: model_dm([A, x])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[82], line 1
----> 1 model_dm([A, x])
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
TypeError: linear(): argument 'input' (position 1) must be Tensor, not list
Which I also can’t understand. What I want is to access the output of the specific layer density_matrix
.
If I replace gen_output = generator([A, x])
in the train_step
function to gen_output = generator(A, x)
then a different error is raised:
In [90]: gen_output = generator(A, x)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[90], line 1
----> 1 gen_output = generator(A, x)
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
Cell In[73], line 88, in Generator.forward(self, ops, inputs)
87 def forward(self, ops, inputs):
---> 88 x = self.x(inputs)
89 x = self.conv_transpose_1(x)
90 x = self.conv_transpose_2(x)
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/.virtualenvs/cgan/lib/python3.10/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: expected scalar type Double but found Float