from __future__ import division
import argparse
import torch
from torch.utils import model_zoo
from torch.autograd import Variable
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
from torch.optim import Adam, SGD
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import models
import utils
import os
import pickle
import pandas as pd
# from Lenet import *
# from Utils import *
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
from data_loader import get_train_test_loader, get_office31_dataloader
from sklearn.utils import resample
import warnings
warnings.filterwarnings("ignore")
import logging
handler=logging.basicConfig(level=logging.INFO)
lgr = logging.getLogger(__name__)
from sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc,accuracy_score
from utils import accuracy, Tracker
from torchmetrics.classification import BinaryAccuracy
########################################################################
fnameand='vectors_Qv_vlen1_updated_location_variance_android.csv'
fnameios='vectors_Qv_vlen1_updated_location_variance_ios.csv'
dfand = pd.read_csv(fnameand, sep=',')
dfios = pd.read_csv(fnameios, sep=',')
dfandupsample = resample(dfand,replace=True,n_samples=len(dfios),random_state=42)
Xs=dfios[["location_variance0","time_spent_moving0","total_distance0","AMS0","unique_locations0","entropy0","normalized_entropy0","time_home0"]]
ys = dfios[['finallabel']]
# changing labels to 1 or 0
ys.loc[ys["finallabel"] == "improved", "finallabel"] = 0
ys.loc[ys["finallabel"] == "nonImproved", "finallabel"] = 1
ys=np.array(ys).astype("float32")
Xt=dfandupsample[["location_variance0","time_spent_moving0","total_distance0","AMS0","unique_locations0","entropy0","normalized_entropy0","time_home0"]]
yt = dfandupsample[['finallabel']]
yt.loc[yt["finallabel"] == "improved", "finallabel"] = 0
yt.loc[yt["finallabel"] == "nonImproved", "finallabel"] = 1
yt=np.array(yt).astype("float32")
trainX, trainY = Xs, ys
targetX,targetY=Xt,yt
print (trainX.shape,trainY.shape,targetX.shape,targetY.shape)
########################################################################################
def XnumpyToTensor(x_data_np):
x_data_np = np.array(x_data_np.values, dtype=np.float32)
print(x_data_np.shape)
print(type(x_data_np))
#x_data_np.reshape(1,209,8)
lgr.info ("Using the CPU")
X_tensor = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
print(type(X_tensor.data)) # should be 'torch.cuda.FloatTensor'
print((X_tensor.data.shape)) # torch.Size([108405, 29])
return X_tensor
def YnumpyToTensor(y_data_np):
y_data_np=y_data_np.reshape((y_data_np.shape[0],1)) # Must be reshaped for PyTorch!
print(y_data_np.shape)
print(type(y_data_np))
lgr.info ("Using the CPU")
Y_tensor = Variable(torch.from_numpy(y_data_np)).type(torch.FloatTensor) # BCEloss requires Float
print(type(Y_tensor.data)) # should be 'torch.cuda.FloatTensor'
print(y_data_np.shape)
print(type(y_data_np))
return Y_tensor
#######################################################################################
use_cuda=False
X_tensor_train= XnumpyToTensor(trainX) # default order is NBC for a 3d tensor, but we have a 2d tensor
X_shape=X_tensor_train.data.size()
DEBUG_ON=False
def debug(x):
if DEBUG_ON:
print ('(x.size():' + str (x.size()))
##########################################################################################
class Net22(nn.Module):
def __init__(self, num_classes: int = 2, dropout: float = 0.5):
super(Net22, self).__init__()
#_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv1d(2, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=3, stride=2),
nn.Conv1d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=3, stride=2),
nn.Conv1d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, 2),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# #--------------------------------------------------------------------------------------
# # Dimensions
net = Net22()
print("model description---")
print(net)
##################################################################################
import time
start_time = time.time()
epochs=20
all_losses = []
X_tensor_train= XnumpyToTensor(trainX)
Y_tensor_train= YnumpyToTensor(trainY)
X_tensor_target= XnumpyToTensor(targetX)
Y_tensor_target= YnumpyToTensor(targetY)
#################################################################
def train(model, epoch, param):
discriminative_loss_param=param[0]
domain_loss_param=param[1]
adver_loss_param=param[2]
result = []
source_out,target_out = net(X_tensor_train),net(X_tensor_target)
#----------------------------------------------------------------
if __name__=='__main__':
discriminative_loss_param = 0.01 ##0.03 for InstanceBased method, 0.01 for CenterBased method
domain_loss_param = 8
adver_loss_param=0
param=[discriminative_loss_param, domain_loss_param,adver_loss_param]
training_statistic = []
testing_s_statistic = []
testing_t_statistic = []
final_res=[]
tracker = Tracker()
tuf=[]
accuracies_source = []
accuracies_target= []
for e in range(0,epochs):
print("epoch===",e)
total=0
running_accuracy = 0.0
correct=0
res = train(net, e, param=param)
Thanks for your reply. Here is my code with backticks.
This time I am not reshaping the inputs but still getting the same error
“RuntimeError: Given groups=1, weight of size [64, 2, 11], expected input[1, 209, 8] to have 2 channels, but got 209 channels instead”.
Don’t know how should I reshape my inputs to get this model work.