I make the dataset this way. But other code here I upload as it is.
model = GRU_Model(num_inputs=1, num_hidden=5, num_layers=1, num_outputs=4)
model.to(device)
def data_pep(df, feat_col_idx, target_col_idx, seq_len=100):
X_out = []
y_out = []
X = df[:, feat_col_idx]
y = df[:, target_col_idx]
start_idx = 0
end_idx = seq_len
length = len(df)
while end_idx <= length:
X_seq = X[start_idx:end_idx].reshape(seq_len, -1, 1)
y_seq = y[end_idx-1].reshape(-1, 1)
X_out.append(X_seq)
y_out.append(y_seq)
start_idx += seq_len
end_idx += seq_len
X_out = np.array(X_out)
y_out = np.array(y_out)
return X_out, y_out
from torch.utils.data import DataLoader, Dataset
class CustomDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
X_item = self.X[idx]
y_item = self.y[idx]
return X_item, y_item
def train_one_epoch(model, train_loader, optimizer, device, criterion):
"""Train model for one epoch and return the mean train_loss."""
model.train()
running_loss_train = 0
for inputs, labels in train_loader:
labels = labels.type(torch.LongTensor)
inputs = inputs.type(torch.cuda.FloatTensor)
#labels = labels.type(torch.cuda.FloatTensor)
inputs, labels = inputs.to(device=device), labels.to(device=device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss_train += loss.item()
train_loss = running_loss_train / len(train_loader.dataset)
return train_loss
def validate(model, valid_loader, device, criterion):
"""Validate model and return the accuracy and mean loss."""
model.eval()
correct = 0
running_loss_val = 0
with torch.no_grad():
for inputs, labels in valid_loader:
labels = labels.type(torch.LongTensor)
inputs = inputs.type(torch.cuda.FloatTensor)
#labels = labels.type(torch.cuda.FloatTensor)
inputs, labels = inputs.to(device=device), labels.to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
pred = outputs.argmax(dim=1)
correct += pred.eq(labels).sum().item()
running_loss_val += loss.item()
val_acc = correct / len(valid_loader.dataset)
val_loss = running_loss_val / len(valid_loader.dataset)
return val_acc, val_loss
def fit(model, train_loader, valid_loader, learning_rate, num_epochs):
criterion = nn.CrossEntropyLoss(reduction='sum')
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
es = EarlyStopping(mode='min', path='./x.pth', patience=10)
model = model.to(device)
scheduler = ExponentialLR(optimizer, gamma=0.1)
for epoch in range(1, num_epochs + 1):
train_loss = train_one_epoch(model, train_loader, optimizer, device, criterion)
val_acc, val_loss = validate(model, valid_loader, device, criterion)
scheduler.step()
print(f'Epoch {epoch:2}/{num_epochs}',
f'train loss: {train_loss:.4f}',
f'val loss: {val_loss:.4f}',
f'val acc: {val_acc:.2%}',
sep=' | '
)
if es(val_loss, model):
break