Hi, I was excited to see PyTorch announcing support for complex numbers, including the statement that
PyTorch supports autograd for complex tensors. The autograd APIs can be used for both holomorphic and non-holomorphic functions.
But, then to test this out I tried something which should be super simple:
import torch
class ModuleWithComplexLayer(torch.nn.Module):
def __init__(self):
super(ModuleWithComplexLayer, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=5,
kernel_size=(4, 4), stride=(2, 2))
self.dense_real = torch.nn.Linear(5*9*9, 5*9*9)
self.dense_imag = torch.nn.Linear(5*9*9, 5*9*9)
self.deconv1 = torch.nn.ConvTranspose2d(in_channels=5, out_channels=3,
kernel_size=(4, 4), stride=(2, 2))
def forward(self, x):
x = self.conv1(x)
x = torch.abs(self.dense_real(x.view((-1, 5*9*9))) +
1j*self.dense_imag(x.view((-1, 5*9*9)))) # <-- doesn't work
# x = self.dense_real(x.view((-1, 5*9*9))) # <-- works
x = x.view((-1, 5, 9, 9))
return self.deconv1(x)
torch_device = torch.device('cuda:0')
dummy_data = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
torch.randn([10, 3, 20, 20]).to(torch_device)), batch_size=5)
#Instantiate the model
model = ModuleWithComplexLayer().to(torch_device)
#Loss function
loss_object = torch.nn.MSELoss()
#Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
EPOCHS = 5
for epoch in range(EPOCHS):
for images in dummy_data:
optimizer.zero_grad()
outputs = model(images[0])
loss = loss_object(outputs, images[0])
loss.backward()
optimizer.step()
print('Epoch {}, Loss: {}'.format(epoch+1, loss.item()))
But this throws the following autograd error:
Traceback (most recent call last):
File "complex_number_error_example.py", line 43, in <module>
loss.backward()
File "/home/me/miniconda3/envs/PyTorch1.6+SciComputing/lib/python3.6/site-packages/torch/tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/home/me/miniconda3/envs/PyTorch1.6+SciComputing/lib/python3.6/site-packages/torch/autograd/__init__.py", line 127, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: "sign_cuda" not implemented for 'ComplexFloat'
Exception raised from operator() at /opt/conda/conda-bld/pytorch_1595629427286/work/aten/src/ATen/native/cuda/UnarySignKernels.cu:44 (most recent call first):
... and then some more scary-looking errors form .so files...
What am I missing here? It would appear that autograd does not support complex numbers.