TypeError: Train() takes 1 positional argument but 2 were given when trying to call model.eval() after loading model

After having trained and saved my model, I load it by calling
PATH = xyz
model_load = torch.load(PATH)
model_load.eval()

The following is how my class and functions are defined:

class DA_rnn(nn.Module):
** “”“da_rnn.”""**

** def init(self, X, y, T,**
** encoder_num_hidden,**
** decoder_num_hidden,**
** batch_size,**
** learning_rate,**
** epochs,**
** parallel=False):**
** “”“da_rnn initialization.”""**
** super(DA_rnn, self).init()**
** self.encoder_num_hidden = encoder_num_hidden**
** self.decoder_num_hidden = decoder_num_hidden**
** self.learning_rate = learning_rate**
** self.batch_size = batch_size**
** self.parallel = parallel**
** self.shuffle = False**
** self.epochs = epochs**
** self.T = T**
** self.X = X**
** self.y = y**

** self.device = torch.device(‘cuda:0’ if torch.cuda.is_available() else ‘cpu’)**
** print("==> Use accelerator: ", self.device)**

** self.Encoder = Encoder(input_size=X.shape[1],**
** encoder_num_hidden=encoder_num_hidden,**
** T=T).to(self.device)**
** self.Decoder = Decoder(encoder_num_hidden=encoder_num_hidden,**
** decoder_num_hidden=decoder_num_hidden,**
** T=T).to(self.device)**

** # Loss function**
** self.criterion = nn.MSELoss()**

** if self.parallel:**
** self.encoder = nn.DataParallel(self.encoder)**
** self.decoder = nn.DataParallel(self.decoder)**

** self.encoder_optimizer = optim.Adam(params=filter(lambda p: p.requires_grad,**
** self.Encoder.parameters()),**
** lr=self.learning_rate)**
** self.decoder_optimizer = optim.Adam(params=filter(lambda p: p.requires_grad,**
** self.Decoder.parameters()),**
** lr=self.learning_rate)**

** # Training set**
** self.train_timesteps = int(self.X.shape[0] * 0.7)**
** self.y = self.y - np.mean(self.y[:self.train_timesteps]) # Normalize**
** self.input_size = self.X.shape[1]**

**def train(self):**
    **"""training process."""**

** iter_per_epoch = int(np.ceil(self.train_timesteps * 1. / self.batch_size))**
** self.iter_losses = np.zeros(self.epochs * iter_per_epoch)**
** self.epoch_losses = np.zeros(self.epochs)**

** n_iter = 0**

** for epoch in range(self.epochs):**
** if self.shuffle:**
** ref_idx = np.random.permutation(self.train_timesteps - self.T)**
** else:**
** ref_idx = np.array(range(self.train_timesteps - self.T))**

** idx = 0**

** while (idx < self.train_timesteps):**
** # get the indices of X_train**
** indices = ref_idx[idx:(idx + self.batch_size)]**
** # x = np.zeros((self.T - 1, len(indices), self.input_size))**
** x = np.zeros((len(indices), self.T - 1, self.input_size))**
** y_prev = np.zeros((len(indices), self.T - 1))**
** y_gt = self.y[indices + self.T]**

** # format x into 3D tensor**
** for bs in range(len(indices)):**
** x[bs, :, :] = self.X[indices[bs]:(indices[bs] + self.T - 1), :]**
** y_prev[bs, :] = self.y[indices[bs]: (indices[bs] + self.T - 1)]**

** loss = self.train_forward(x, y_prev, y_gt)**
** self.iter_losses[int(epoch * iter_per_epoch + idx / self.batch_size)] = loss**

** idx += self.batch_size**
** n_iter += 1**

** if n_iter % 10000 == 0 and n_iter != 0:**
** for param_group in self.encoder_optimizer.param_groups:**
** param_group[‘lr’] = param_group[‘lr’] * 0.9**
** for param_group in self.decoder_optimizer.param_groups:**
** param_group[‘lr’] = param_group[‘lr’] * 0.9**

** self.epoch_losses[epoch] = np.mean(self.iter_losses[range(**
** epoch * iter_per_epoch, (epoch + 1) * iter_per_epoch)])**

** if epoch % 10 == 0:**
** print("Epochs: ", epoch, " Iterations: ", n_iter,**
** " Loss: ", self.epoch_losses[epoch])**

** if epoch % 10 == 0:**
** y_train_pred = self.test(on_train=True)**
** y_test_pred = self.test(on_train=False)**
** y_pred = np.concatenate((y_train_pred, y_test_pred))**
** plt.ioff()**
** plt.figure()**
** plt.plot(range(1, 1 + len(self.y)), self.y, label=“True”)**
** plt.plot(range(self.T, len(y_train_pred) + self.T),**
** y_train_pred, label=‘Predicted - Train’)**
** plt.plot(range(self.T + len(y_train_pred), len(self.y) + 1),**
** y_test_pred, label=‘Predicted - Test’)**
** plt.legend(loc=‘upper left’)**
** plt.show()**

** # # Save files in last iterations**
** # if epoch == self.epochs - 1:**
** # np.savetxt(’…/loss.txt’, np.array(self.epoch_losses), delimiter=’,’)**
** # np.savetxt(’…/y_pred.txt’,**
** # np.array(self.y_pred), delimiter=’,’)**
** # np.savetxt(’…/y_true.txt’,**
** # np.array(self.y_true), delimiter=’,’)**

and then it throws the following error:
“TypeError: train() takes 1 positional argument but 2 were given”

Could you repost the formatted code by wrapping it into three backticks ``` or by clicking on the “Preformatted text” button in the text window?
The current code is quite hard to read and also doesn’t seem to show the actual model.train() call.

1 Like