Given transposed=1, weight of size 128 1 4 4, expected input[64, 64, 13, 13] to have 128 channels, but got 64 channels instead

Hello,below it is part of my code

 image_size=28
input_dim=100
num_channels=1
num_features=64
batch_size=64

class ModelG(nn.Module):
    def __init__(self):
        super(ModelG,self).__init__()
        self.model=nn.Sequential()
        self.model.add_module("deconv1",nn.ConvTranspose2d(input_dim,num_features*2,5,2,0,bias=False))
        self.model.add_module("batch_norm1",nn.BatchNorm2d(num_features*2))
        self.model.add_module("relu1",nn.ReLU())
        self.model.add_module("deconv2",nn.ConvTranspose2d(num_features*2,num_features,5,2,0,bias=False))#kernel,stride,padding
        self.model.add_module("batch_norm2",nn.BatchNorm2d(num_features))
        self.model.add_module("relu2",nn.ReLU())
        self.model.add_module("deconv3",nn.ConvTranspose2d(2*num_features,num_channels,4,2,0,bias=False))
        self.model.add_module("sigmoid",nn.Sigmoid())
    def forward(self,input):
        output=input
        for name,module in self.model.named_children():
            output=module(output)
        return output
def weight_init(m):
    class_name=m.__class__.__name__
    if class_name.find("conv")!=-1:
        m.weight.data.normal_(0,0.02)
    if class_name.find("norm")!=-1:
        m.weight.data.normal_(0.5,0.2)#mean std

=====
#deconvotion
step=0
num_epochs=10
record=[]
for epoch in range(num_epochs):
    train_loss=[]
    for batch_idx,(data,target) in enumerate(train_loader):
        target,data=Variable(data).cuda(),Variable(target).cuda()#deconvotion
        data=data.type(dtype)
        ----------------->data=data.resize(data.size()[0],1,1,1)
        ---------------->data=data.expand(data.size()[0],input_dim,1,1)
        net.train()
        output=net(data)
        loss=criterion(output,target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        step+=1
        loss=loss.data.cpu().numpy()
        train_loss.append(loss)
        if step%100==0:
            net.eval()
            val_loss=[]
            idx=0
            for (data,target) in validation_loader:
                target,data=Variable(data).cuda(),Variable(target).cuda()
                idx+=1
                data=data.type(itype)
                data=data.resize(data.size()[0],1,1,1)
                data=data.expand(data.size()[0],input_dim,1,1)
                output=net(data)
                loss=loss.data.cpu().numpy()
                val_loss.append(loss)
                print("num{}epochs,train_accuracy{:4f}".format(epoch,100.*batch_idx/len(train_loader)))
            record.append([np.mean(train_loss),np.mean(val_loss)])
        samples.data.resize(data.size()[0],1,1,1)
        samples=Variable(samples.data.expand(batch_size,input_dim,1,1))
        samples=samples.cuda()
        fake_u=net(samples).cuda()
        img=transform(fake_u)
        vutil.save_image(img,"temp1/fake%.png"%(epoch))

error raise:Given transposed=1, weight of size 128 1 4 4, expected input[64, 64, 13, 13] to have 128 channels, but got 64 channels instead
How can I modifiy them??
nn.ConvTranspose2d(input_dim,num_features*2,5,2,0,bias=False) ,I input input_dim to first parameter
and I want them output’s dimension is at second parameter is that not true??

It seems deconv3 uses in_channels=2*num_features, while deconv2 outputs out_channels=num_features, which would create this error.