Linear regression and multi variable linear regression

Hi, there
trying to so linear regression with multi variable.

train_data = np.array([
[40, 6, 4],
[44, 10, 4],
[46, 12, 5],
[48, 14, 7],
[52, 16, 9],
[58, 18, 12],
[60, 22, 14],
[68, 24, 20],
[74, 26, 21],
[80, 32, 24]])

test_data = np.array([
[6, 4],
[10, 5],
[4, 8]])

x_train = train_data[:,1:3]
y_train = train_data[:,0]

POLY_DEGREE = 3
input_size = 2
output_size = 1

poly = PolynomialFeatures(input_size * POLY_DEGREE, include_bias=False)

x_train_poly = poly.fit_transform(x_train)

class LinearRegression(nn.Module):
def init(self,input_size,output_size):
super(LinearRegression,self).init()
self.linear = nn.Linear(input_size,output_size)

def forward(self,x):
    out = self.linear(x)
    return out

model = LinearRegression(input_size,output_size)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
losses = []

for epoch in range(50):
inputs = Variable(torch.from_numpy(x_train))
targets = Variable(torch.from_numpy(y_train))

    #feedforward + Backward + Optimize
    optimizer.zero_grad()
    outputs = model(inputs)

==============================================
error is following

TypeError: addmm_ received an invalid combination of arguments - got (int, int, torch.LongTensor, torch.FloatTensor), but expected one of:

  • (torch.LongTensor mat1, torch.LongTensor mat2)
  • (torch.SparseLongTensor mat1, torch.LongTensor mat2)
  • (int beta, torch.LongTensor mat1, torch.LongTensor mat2)
  • (int alpha, torch.LongTensor mat1, torch.LongTensor mat2)
  • (int beta, torch.SparseLongTensor mat1, torch.LongTensor mat2)
  • (int alpha, torch.SparseLongTensor mat1, torch.LongTensor mat2)
  • (int beta, int alpha, torch.LongTensor mat1, torch.LongTensor mat2)
  • (int beta, int alpha, torch.SparseLongTensor mat1, torch.LongTensor mat2)

as the error says, you are not converting from a LongTensor (or np.int64) to a FloatTensor

I would love to ask a question on top of this!
Question: I am trying to train a simple linear regression on the popular Boston Datasets in a way to get to know pytorch.
This is my code:

from sklearn.datasets import load_boston
import torch
import pandas as pd
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
import numpy as np


boston = load_boston()

data = pd.DataFrame(boston['data'], columns=boston['feature_names'])

target = pd.Series(boston['target'])

data.shape, target.shape

dataA = Variable(torch.from_numpy(data.values).float())
y = Variable(torch.from_numpy(target.values).float())

linear = nn.Linear(data.shape[1], 1)

criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
loss2 = []


for i in range(5):
    optimizer.zero_grad()
    outputs = linear(dataA)
    
    loss = criterion(outputs, y)
    loss2.append(loss.data[0])
    loss.backward()        

    optimizer.step()
    
plt.plot(range(5), loss2)
plt.show()

print(loss2)

The issue is that the loss keeps going up, can anyone please help look through and help find out what can be done differently

1 Like