Dimension error: What dimensions does your input have to be?

I am trying to implement a variational autoencoder, but I seem to have trouble understanding the input dimensions of the batches.
I have an array of 750 x 800 entries, that is going row wise (array of 800 values) into the autoencoder.
The output is 750 x 1200, so each array of 800 values of the input has to be mapped to an array of 1200 values in the output.

If anyone has resources on the bridge of data preparation and input to the neural net, I would be happy to read!

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, TensorDataset, IterableDataset
import os
from torch import optim
from model import Network

path = os.getcwd()

inputs = np.load(path + '/data/in1a.npy')
outputs = np.load(path + '/data/out1a.npy')

batch_size = 400

inputs_0 = torch.Tensor(inputs[0,:])
inputs_1 = torch.Tensor(inputs[1,:])

outputs_0 = torch.Tensor(outputs[0,:])
outputs_1 = torch.Tensor(outputs[1,:])
outputs_2 = torch.Tensor(outputs[2,:])


inputs_tensor = torch.Tensor(inputs)

dataset_in = TensorDataset(inputs_0, inputs_1)
dataset_out = TensorDataset(outputs_0, outputs_1, outputs_2)

dataloader_in = DataLoader(dataset_in, batch_size=batch_size, shuffle=False)
dataloader_out = DataLoader(dataset_out, batch_size=batch_size, shuffle = False)

number_batches_in = int(len(dataset_in)/batch_size)
number_batches_out = int(len(dataset_out)/batch_size)

x = torch.empty(size=(number_batches_in, 800))
for index, (x1, x2) in enumerate(dataloader_in):
    batch = torch.cat((x1, x2), 0)
    x[index] = batch
#print(x[1])

y = torch.empty(size=(number_batches_out,1200))
for index, (y1, y2, y3) in enumerate(dataloader_out):
    batch = torch.cat((y1, y2, y3), 0)
    y[index] = batch


model = Network(800,1200,3,200)
SAVE_PATH = "trained/model.dat"
epochs = 5
learning_rate = 0.001
optimizer = optim.Adam(model.parameters(),lr=learning_rate, eps=1e-08)
hist_error = []
hist_loss = []
beta = 0.5

for epoch in range(epochs):
    epoch_error = []
    epoch_loss = []
    for i in range(len(x)):
        optimizer.zero_grad()
        pred = model.forward(i)
        loss = torch.mean(torch.sum((pred - y[i]) ** 2))
        loss.backward()
        optimizer.step()
        error = torch.mean(torch.sqrt((pred - y[i]) ** 2)).detach().numpy()
        epoch_error.append(error)
        epoch_loss.append(loss.data.detach().numpy())
    hist_error.append(np.mean(epoch_error))
    hist_loss.append(np.mean(epoch_loss))
    print("Epoch %d -- loss %f, RMS error %f " % (epoch+1, hist_loss[-1], hist_error[-1]))
torch.save(model.state_dict(), SAVE_PATH)
print("Model saved to %s" % SAVE_PATH)

and the model:

import torch
import torch.nn as nn
import torch.nn.functional as F


class Network(nn.Module):
    def __init__(self, input_dim, output_dim, latent_dim, layer_dim):
        """
        Parameter:
        input_dim (int): number of inputs
        output_dim (int): number of outputs
        latent_dim (int): number of latent neurons
        Layer_dim (int): number of neurons in hidden layers
        """
        super(Network, self).__init__()
        self.latent_dim = latent_dim

        self.enc1 = nn.Linear(input_dim, layer_dim)
        self.enc2 = nn.Linear(layer_dim, layer_dim)

        self.latent = nn.Linear(layer_dim, latent_dim*2)

        self.dec1 = nn.Linear(latent_dim, layer_dim)
        self.dec2 = nn.Linear(layer_dim, layer_dim)

        self.out = nn.Linear(layer_dim, output_dim)

    def encoder(self, x):
        z = F.elu(self.enc1(x))
        z = F.elu(self.enc2(x))
        z = self.latent(z)
        self.mu = z[0:self.latent_dim]
        self.log_sigma = z[self.latent_dim:]
        self.sigma = torch.exp(self.log_sigma)

        eps = torch.randn(x.size(0), self.latent_dim)
        z_sample = self.mu + self.sigma*eps

        self.kl_loss = kl_divergence(self.mu, self.log_sigma, dim=self.latent_dim)

        return z_sample

    def decoder(self, z):
        x = F.elu(self.dec1(z))
        x = F.elu(self.dec2(x))
        return self.out(x)

    def forward(self, batch):
        self.latent_rep = self.encoder(batch)
        dec_input = self.latent_rep
        return self.decoder(dec_input)


def kl_divergence(means, log_sigma, dim, target_sigma=0.1):
    """
    Computes Kullback–Leibler divergence for arrays of mean and log(sigma)
    """
    target_sigma = torch.Tensor([target_sigma])
    out = 1 / 2. * torch.mean(torch.mean(1 / target_sigma**2 * means**2 + torch.exp(2 * log_sigma) / target_sigma**2 - 2 * log_sigma + 2 * torch.log(target_sigma), dim=1) - dim)
    return out
Traceback (most recent call last):
  File "/home/samim/miniconda3/envs/deep/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3343, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-2-54864ad18480>", line 1, in <module>
    runfile('/home/samim/Documents/train.py', wdir='/home/samim/Documents/')
  File "/home/samim/.local/share/JetBrains/PyCharm2020.3/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
    pydev_imports.execfile(filename, global_vars, local_vars)  # execute the script
  File "/home/samim/.local/share/JetBrains/PyCharm2020.3/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "/home/samim/Documents/train.py", line 73, in <module>
    pred = model.forward(i)
  File "/home/samim/Documents/model.py", line 50, in forward
    self.latent_rep = self.encoder(batch)
  File "/home/samim/Documents/model.py", line 29, in encoder
    z = F.elu(self.enc1(x))
  File "/home/samim/miniconda3/envs/deep/lib/python3.6/site-packages/torch/nn/modules/module.py", line 727, in _call_impl
    result = self.forward(*input, **kwargs)
  File "/home/samim/miniconda3/envs/deep/lib/python3.6/site-packages/torch/nn/modules/linear.py", line 93, in forward
    return F.linear(input, self.weight, self.bias)
  File "/home/samim/miniconda3/envs/deep/lib/python3.6/site-packages/torch/nn/functional.py", line 1688, in linear
    if input.dim() == 2 and bias is not None:
AttributeError: 'int' object has no attribute 'dim'

Your code pass integer to model, not Tensor and does not use DataLoader nor DataSet. I think this caused the error.

For clarification:
I used DataLoader to bring the data in the shape I needed.
Before, I had 2 x 400 numpy array for input, and 3 x 400 for output. I have flattened those to tensors of size 800 and 1200 before transforming them to tensors of 750 x 800 and 750 x 1200, respectively.

The input to the training loop then is each row of size 800. Because I couldn’t get the dataloader to handle two rows for input and three rows for output at the same time, I did this little maneuver.

Edit:
Yes! Thank you, I had changed the training loop to the range of x, it worked when I changed it back to the tensor itself.