import torch
import torch.nn as nn
print(torch.__version__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
class Net(nn.Module):
def __init__(self,):
super(Net, self).__init__()
self.rnn = nn.LSTMCell(10,20)
@torch.cuda.amp.autocast()
def forward(self, input):
hx, cx = self.rnn(input)
return hx
model = Net().to(device)
input = torch.randn(3, 10).to(device)
scaler = torch.cuda.amp.GradScaler()
with torch.cuda.amp.autocast():
output = model(input)
print(output.size())
error
import torch
import torch.nn as nn
print(torch.__version__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
class Net(nn.Module):
def __init__(self,):
super(Net, self).__init__()
self.rnn = nn.Linear(10,20)
@torch.cuda.amp.autocast()
def forward(self, input):
hx = self.rnn(input)
return hx
model = Net().to(device)
input = torch.randn(3, 10).to(device)
# scaler = torch.cuda.amp.GradScaler()
with torch.cuda.amp.autocast():
output = model(input)
print(output.size())
How to fix the first piece of code