Autoencoder accuracy calculator positional argument error

I got this autoencoder

class AE(nn.Module):
    def __init__(self, **kwargs):
        super(AE,self).__init__()
        self.encoder_hidden_layer = nn.Linear(
            in_features=kwargs["input_shape"], out_features=128
        )
        self.encoder_output_layer = nn.Linear(
            in_features=128, out_features=128
        )
        self.decoder_hidden_layer = nn.Linear(
            in_features=128, out_features=128
        )
        self.decoder_output_layer = nn.Linear(
            in_features=128, out_features=kwargs["input_shape"]
        )

    def forward(self, features):
        activation = self.encoder_hidden_layer(features)
        activation = torch.relu(activation)
        code = self.encoder_output_layer(activation)
        code = torch.sigmoid(code)
        activation = self.decoder_hidden_layer(code)
        activation = torch.relu(activation)
        activation = self.decoder_output_layer(activation)
        reconstructed = torch.sigmoid(activation)
        return reconstructed

and I am trying to build a accuracy calculator

with torch.no_grad():
    for batch_features in test_loader:
        batch_features = batch_features[0]
        test_examples = batch_features.view(-1, 784)
        reconstruction = model(test_examples)
        
        images, labels = data[0].to(device), data[1].to(device)
        print(labels)
        
        outputs_auto = AE(images.to(device))
        _, predicted_auto = torch.max(outputs_auto.data, 1)
        total += labels.size(0)
        correct += (predicted_auto == labels).sum().item()
        break
    print('Accuracy of the network on the 10000 test images: %d %%' % (
    100 * correct / total))

but it shows

TypeError                                 Traceback (most recent call last)
<ipython-input-60-47c95a24e155> in <module>
     19         print(labels)
     20 
---> 21         outputs_auto = AE(images.to(device))
     22         _, predicted_auto = torch.max(outputs_auto.data, 1)
     23         total += labels.size(0)

TypeError: __init__() takes 1 positional argument but 2 were given

Here, you are calling AE class with the input.
Instead, you should instantiate AE class to create model object and then call that object.

AE_model = AE(<params>)
AE_model.to(device)
...

with torch.no_grad():
    for batch_features in test_loader:
        ...
        outputs_auto = AE_model(images.to(device))