ModuleAttributeError: 'Softmax' object has no attribute 'log_softmax'

Hi,
I try to train my network, but I get this error:
ModuleAttributeError: ‘Softmax’ object has no attribute ‘log_softmax’

Does anyone know anything about this?

I suspect you are calling log_softmax on nn.Softmax() layer.
Posting some code will help to point out the issue.

Hi, Thanks
This is a part of my code:
class attention_block(nn.Module):
def init(self):
super(attention_block, self).init()

#Global Feature and Global Score

# conv_1_1, conv_1_2self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)

self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)

self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)

self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)

self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)

self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)

self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)

self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)

self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)

self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)

self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)

self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)

self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)

self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)

self.Global_1 = nn.Conv2d(512,128,kernel_size=3, padding=1)

self.Global_2 = nn.Conv2d(128,128,kernel_size=3, padding=1)

self.Global_features = nn.Conv2d(128,128,kernel_size=3, padding=1)

self.Global_Score = nn.Conv2d(128,2,kernel_size=1, padding=1)

self.X1 = nn.Conv2d(64,128,kernel_size=3, padding=1)

self.X2 = nn.Conv2d(128,128,kernel_size=3, padding=1)

self.X3 = nn.Conv2d(256,128,kernel_size=3, padding=1)

self.X4 = nn.Conv2d(512,128,kernel_size=3, padding=1)

self.X5 = nn.Conv2d(512,128,kernel_size=3, padding=1)

# Deconvolution

self.deconv5 = nn.ConvTranspose2d(128,128,5,2)

self.deconv4 = nn.ConvTranspose2d(128,256,5,2)

self.deconv3 = nn.ConvTranspose2d(256,384,5,2)

self.deconv2 = nn.ConvTranspose2d(384,512,5,2)

self.local_output = nn.Conv2d(512,640,kernel_size=1, padding=1)

self.local_score = nn.Conv2d(640,2,kernel_size=1, padding=1)

max pooling (kernel_size, stride)

self.pool = nn.MaxPool2d(2,1)

self.averagepool = nn.AvgPool2d(128,128,3)

def forward(self, x):
out = F.relu(self.conv1_1(x))
out = F.relu(self.conv1_2(out))
out1 = self.pool(out)
out = F.relu(self.conv2_1(out1))
out = F.relu(self.conv2_2(out))
out2 = self.pool(out)
out = F.relu(self.conv3_1(out2))
out = F.relu(self.conv3_2(out))
out = F.relu(self.conv3_3(out))
out3 = self.pool(out)
out = F.relu(self.conv4_1(out3))
out = F.relu(self.conv4_2(out))
out = F.relu(self.conv4_3(out))
out4 = self.pool(out)
out = F.relu(self.conv5_1(out4))
out = F.relu(self.conv5_2(out))
out = F.relu(self.conv5_3(out))
out5 = self.pool(out)
out = F.relu(self.Global_1(out5))
out = F.relu(self.Global_2(out))
out = self.Global_features(out)
Global_Score = self.Global_Score(out)
# # Local features && contrast layer
X1 = F.relu(self.X1(out1))
c1 = torch.subtract(X1 ,self.averagepool(X1))
X2 = F.relu(self.X2(out2))
c2 = torch.subtract(X2 ,self.averagepool(X2))
X3 = F.relu(self.X3(out3))
c3 = torch.subtract(X3 ,self.averagepool(X3))
X4 = F.relu(self.X4(out4))
c4 = torch.subtract(X4,self.averagepool(X4))
X5 = F.relu(self.X5(out5))
c5 = torch.subtract(X5,self.averagepool(X5))
U5 = F.relu(self.deconv5(torch.cat((X5,c5 ),dim = 3)))
U4 = F.relu(self.deconv4(torch.cat((X4,c4,U5),dim = 3)))
U3 = F.relu(self.deconv3(torch.cat((X3,c3,U4),dim = 3))
U2 = F.relu(self.deconv2(torch.cat((X2,c2,U3),dim = 3)))
local_output = self.local_output(torch.cat((X1,c1,U2),dim = 3))

Local_score = self.local_score(local_output)

Score = Local_score + Global_Score
Score = torch.reshape(Score,[-1,2])
Attentiom_map = torch.nn.Softmax(Score)
return Attentiom_map

This seems incorrect. Here you are creating the Softmax module. but not performing softmax.
You can either call Score.softmax() or F.softmax() from nn.functional.