I have the following code.
import torch
from torch.autograd import Variable
import torch.nn as nn
def conv3d(in_channels, out_channels, kernel_size = 4, stride = 2, padding = 1):
return nn.Conv3d(in_channels, out_channels, kernel_size = kernel_size, stride = stride, padding = padding, bias = True)
class G_encode(nn.Module):
def __init__(self):
super(G_encode, self).__init__()
self.model = nn.Sequential(
conv3d(3,128),
)
def forward(self,x):
print('G_encode Input =', x.size())
out = self.model(x)
print('G_encode Output =', out.size())
return out
x = Variable(torch.rand([1,3,1,64,64])).cuda()
model = G_encode().cuda()
out = model(x)
~
This code seems to be working fine when I remove .cuda(). However, it is giving RuntimeError: CUDNN_STATUS_BAD_PARAM with .cuda(). My cuda version is 8.0.61, and the nvidia driver version is 384.111.