Use CUDA to train autoencode

Hi guys,
I’m trying to use CUDA to train my autoencoder, however, I get the error like this. My code is as followed. Could someone help me to figure out the problem?
AttributeError: ‘numpy.ndarray’ object has no attribute ‘dim’

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

### Change dataset to tensor
train_target = torch.tensor(y_train.values.astype(np.float32)).to(device) 
train = torch.tensor(X_train.values.astype(np.float32)).to(device)
train_tensor = data_utils.TensorDataset(train, train_target)

train_loader = Data.DataLoader(dataset = train_tensor, batch_size = BATCH_SIZE, shuffle = True)

class AutoEncoder(nn.Module):
    def __init__(self):
        super(AutoEncoder, self).__init__()

        self.encoder = nn.Sequential(
            nn.Linear(1*input_layer, 256),
            nn.ReLU(True),
            nn.Linear(256, 64),
            nn.ReLU(True),
            nn.Linear(64, 16),
            nn.ReLU(True),
            nn.Linear(16, 2),   
        )
        self.decoder = nn.Sequential(
            nn.Linear(2, 16),
            nn.ReLU(True),
            nn.Linear(16, 64),
            nn.ReLU(True),
            nn.Linear(64, 256),
            nn.ReLU(True),
            nn.Linear(256, 1*input_layer),
            nn.Sigmoid(),       # compress to a range (0, 1)
        )

    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded

autoencoder = AutoEncoder().to(device) 
params = list(autoencoder.parameters()) 
optimizer = torch.optim.SGD(params, lr=LR)
loss_func = nn.MSELoss()

for epoch in range(EPOCH):
    for step, (x, b_label) in enumerate(train_loader):
        batch_x = x.view(-1, 1*input_layer).cpu().numpy()   
        batch_y = x.view(-1, 1*input_layer).cpu().numpy()  

        encoded, decoded = autoencoder(batch_x)

        loss = loss_func(decoded, batch_y)      
        optimizer.zero_grad()               
        loss.backward()                     
        optimizer.step()                    

        if step % 100 == 0:
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())

You shouldn’t transform tensor to numpy like this:
batch_x = x.view(-1, 1input_layer).cpu().numpy()
batch_y = x.view(-1, 1
input_layer).cpu().numpy()
but write as follows
batch_x = x.view(-1, 1input_layer).to(device)
batch_y = x.view(-1, 1
input_layer).to(device)

Thanks a lot !!! I just solved the problem.