Hello, I recently started learning pytorch, and started to play around a bit with regression.
For that, I made simple Neural Network, which I am training with a dataset consisting of roughly 780 000 samples, with 34 features.
Since its alot of data, I wanted to use my GPU(NVIDIA 1050Ti) instead of CPU for training.
However the training process still seems a bit slow for me, and when I checked GPU usage at the windows task manager, it was basically locked at 5%.
I would like to know if its possible to increase my gpu usage, so that my training would be faster, and if so, how?
Thank you in advance, and heres my code in case anything needs to be checked:
class Model(nn.Module):
def __init__(self,input_size):
super().__init__()
self.input = nn.Linear(input_size,256)
self.l1 = nn.Linear(256,128)
self.l2 = nn.Linear(128,64)
self.l3 = nn.Linear(64,32)
self.l4 = nn.Linear(32,16)
self.l5 = nn.Linear(16,8)
self.l6 = nn.Linear(8,4)
self.l7 = nn.Linear(4,2)
self.output = nn.Linear(2,1)
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
x = self.dropout(F.relu(self.input(x)))
x = self.dropout(F.relu(self.l1(x)))
x = self.dropout(F.relu(self.l2(x)))
x = self.dropout(F.relu(self.l3(x)))
x = self.dropout(F.relu(self.l4(x)))
x = self.dropout(F.relu(self.l5(x)))
x = self.dropout(F.relu(self.l6(x)))
x = self.dropout(F.relu(self.l7(x)))
return self.output(x)
input_dim = train.shape[1]
model = Model(input_dim)
optimizer = optim.SGD(model.parameters(),lr=0.003)
criterion = nn.MSELoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
epochs = 1
for e in range(epochs):
steps = 0
for inputs, values in train_loader:
steps += 1
inputs, values = inputs.to(device), values.to(device)
optimizer.zero_grad()
output = model.forward(inputs)
loss = criterion(output,values)
l = loss.item()
loss.backward()
optimizer.step()
if(steps % 100000 == 0):
print(steps)
print("Epoch ", e, "MSE: ", l)