class DenseLayer(nn.Module):
def __init__(self, in_channels, growth_rate):
super(DenseLayer, self).__init__()
self.norm = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(in_channels, growth_rate, kernel_size=3, stride=1, padding=1, bias=True)
self.drop = nn.Dropout2d(p=0.2)
def forward(self, x):
x = self.norm(x)
x = self.relu(x)
x = self.conv(x
x = self.drop(x)
return x
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
super(DenseBlock, self).__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(in_channels + i * growth_rate, growth_rate) for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features, 1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1) # 1 = channel axis
return x
class Bottleneck(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers):
super(Bottleneck, self).__init__()
self.bottleneck = DenseBlock(in_channels, growth_rate, n_layers, upsample=True)
def forward(self, x):
return self.bottleneck(x)
Above is the code I am currently using. When I make a call to the bottleneck module, I get the error RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking arugment for argument weight in method wrapper_cudnn_batch_norm)