Local variable 'temp_for_transpose' referenced before assignment

Hello below is part of my code

class AttnDecoderRNN(nn.Module):
    def __init__(self,hidden_size,output_size,dropout_p=0.1,n_layers=1,max_length=MAX_LENGTH):
        super(AttnDecoderRNN,self).__init__()
        self.hidden_size=hidden_size
        self.output_size=output_size
        self.n_layers=n_layers
        self.dropout_p=dropout_p
        self.max_length=MAX_LENGTH
        self.embedding=nn.Embedding(self.output_size,self.hidden_size)
        self.attn=nn.Linear(self.hidden_size*(2*n_layers+1),self.hidden_size)
        self.attn_combine=nn.Linear(self.hidden_size*3,self.hidden_size)
        self.dropout=nn.Dropout(self.dropout_p)
        self.gru=nn.GRU(self.hidden_size,self.hidden_size,bidirectional=True,num_layers=self.n_layers,batch_first=True)
        self.out=nn.Linear(self.hidden_size*2,self.output_size)
    def forward(self,input,hidden,encoder_outputs):
        embedded=self.embedding(input)
        embedded=embedded[:,0,:]
        embedded=self.dropout(embedded)
       ====== temp_for_transpose=temp_for_transpose(hidden,0,1).contiguous()
       ====== temp_for_transpose=temp_for_transpose.view(temp_for_transpose.size()[0],-1)
       ======= hidden_attn=temp_for_transpose
        input_to_attention=torch.cat((embedded,hidden_attn),1)
        attn_weights=F.softmax(self.attn(input_to_attention))
        attn_weights=attn_weights[:,:encoder_outputs.size()[1]]
        attn_weights=attn_weights.unsqueeze(1)
        attn_applied=torch.bmm(attn_weights,encoder_outputs)
        output=torch.cat((embedded,attn_applied[:,0,:],1))
        output=self.attn_combine(output).unsqueeze(1)
        output=F.relu(output)
        output=self.dropout(output)
        output,hidden=self.gru(output,hidden)
        output=self.out(output[:,-1,:])
        output=F.log_softmax(output,dim=1)
        return output,hidden,attn_weights
    def initHidden(self,batch_size):
        result=Variable(torch.zeros(self.n_layers*2,batch_size,self.hidden_size)).cuda()
        return result
=======
num_epoch=100
plot_losses=[]
for epoch in range(num_epoch):
    #turn on dropout
    decoder.train()
    print_loss_total=0
    for data in train_loader:
        input_variable=Variable(data[0]).cuda()#input
        target_variable=Variable(data[1]).cuda()#target
        encoder.zero_grad()
        decoder.zero_grad()
        encoder_hidden=encoder.initHidden(data[0].size()[0])
        loss=0
        encoder_outputs,encoder_hidden=encoder(input_variable,encoder_hidden)
        decoder_input=Variable(torch.LongTensor([[SOS_token]]*target_variable.size()[0]))#two dimentsion
        decoder_input=decoder_input.cuda()
        decoder_hidden=encoder_hidden
        use_teacher_forcing=True if random.random()<teacher_forcing_ratio else False
        if use_teacher_forcing:
            for di in range(MAX_LENGTH):
                decoder_output,decoder_hidden,decoder_attention=decoder(decoder_input,decoder_hidden,encoder_outputs)
                loss+=criterion(decoder_output,target_variable[:di])
                decoder_input=target_variable[:di].unsqueeze(1)
        else:
            for di in range(MAX_LENGTH):
                decoder_output,decoder_hidden,decoder_attention=decoder(decoder_input,decoder_hidden,encoder_outputs)
                topv,topi=decoder_output.data.topk(1,dim=1)
                ni=topi[:,0]
                decoder_input=Variable(ni.unsqueeze(1))
                decoder_input=decoder_input.cuda()
                loss+=criterion(decoder_output,target_variable[:,di])
        loss.backward()
        loss=loss.data.cpu().numpy()
        encoder_optimizer.step()
        decoder_optimizer.step()
        print_loss_total+=loss.data.numpy()[0]
    loss_avg=print_loss_total/len(train_loader)
    valid_loss=0
    rights=[]
    decoder.eval()
    for data in valid_loader:
        input_variable=Variable(data[0]).cuda()
        target_variable=Variable(data[1]).cuda()
        encoder_hidden=encoder.initHidden(data[0].size()[0])
        loss=0
        encoder_outputs,encoder_hidden=encoder(input_variable,encoder_hidden)
        decoder_input=Variable(torch.LongTensor([[SOS_token]]*target_variable.size()[0]))
        decoder_input=decoder_input.cuda()
        decoder_hidden=encoder_hidden
        for di in range(MAX_LENGTH):
            decoder_output,decoder_hidden,decoder_attention=decoder(decoder_input,decoder,hidden,encoder_outputs)
            topv,topi=decoder_output.data.topk(1,dim1)
            ni=topi[:,0]
            decoder_input=Variable(ni.unsqueeze(1))
            decoder_input=decoder_input.cuda()
            right=rightness(decoder_output,target_variable[:,di])
            rights.append(right)
            loss+=criterion(decoder_output,target_variable[:di])
        loss=loss.data.cpu().numpy()[0]
        right_ratio=1.0*np.sum([i[0] for i in rights])/np.sum([i[1] for i in rights])
        print("{}epoch,train_loss{} ,valid_loss{}".format(loss_avg,valid_loss/len(valid_loader),100.0*right_ratio))
        plot_losses.append([loss_avg,valid_loss/len(valid_loader,right_ratio)])

UnboundLocalError                         Traceback (most recent call last)
<ipython-input-35-9fc3d09c8d81> in <module>
     24         else:
     25             for di in range(MAX_LENGTH):
---> 26                 decoder_output,decoder_hidden,decoder_attention=decoder(decoder_input,decoder_hidden,encoder_outputs)
     27                 topv,topi=decoder_output.data.topk(1,dim=1)
     28                 ni=topi[:,0]

c:\python36\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
    545             result = self._slow_forward(*input, **kwargs)
    546         else:
--> 547             result = self.forward(*input, **kwargs)
    548         for hook in self._forward_hooks.values():
    549             hook_result = hook(self, input, result)

<ipython-input-33-5f5aac3c1ec1> in forward(self, input, hidden, encoder_outputs)
     17         embedded=embedded[:,0,:]
     18         embedded=self.dropout(embedded)
---> 19         temp_for_transpose=temp_for_transpose(hidden,0,1).contiguous()
     20         temp_for_transpose=temp_for_transpose.view(temp_for_transpose.size()[0],-1)
     21         hidden_attn=temp_for_transpose

error:local variable ‘temp_for_transpose’ referenced before assignment
how can i do?

As the error message explains, you cannot use a variable before it was created.
Usually you would initialize it with a start values and reassign the desired value afterwards.

That being said, it also seems as if you would like to use temp_for_tranpose as a method, since you are calling it:

temp_for_transpose(hidden,0,1).contiguous()

which will raise another error, since you cannot “call” a tensor.

Could you explain, what this line of code should do? :slight_smile:

I see the explain of contiguous() method some of methods are depend on continuous Tensor Elements,view method is one of them ,so in this case,I use contiguous to make sure that some of them would not cause error.
Does it have other way to change?

The contiguous() call on a tensor should be fine.
I was talking about “calling” the tensor as a method: temp_for_transpose(hidden,0,1), which would raise an error. If you want to index it, you should use temp_for_transpose[hidden,0,1].

Okay,I will try it thank you for your suggestion