My complete code is here: I am following githubs code for accomplishing my task.
def windowz(data, size):
start = 0
while start < len(data):
yield start, start + size
start += (size // 2)
def segment_pa2(x_train,y_train,window_size):
segments = np.zeros(((len(x_train)//(window_size//2))-1,window_size,52))
labels = np.zeros(((len(y_train)//(window_size//2))-1))
i_segment = 0
i_label = 0
for (start,end) in windowz(x_train,window_size):
if(len(x_train[start:end]) == window_size):
m = stats.mode(y_train[start:end])
segments[i_segment] = x_train[start:end]
labels[i_label] = m[0]
i_label+=1
i_segment+=1
return segments, labels
print ('starting...')
start_time = time.time()
dataset = sys.argv[1]
path = '/Users/tehreem/Desktop/PAMAP2/PAMAP2_Dataset/pamap2.h5'
f = h5py.File(path, 'r')
print(f)
x_train = f.get('train').get('inputs')[()]
y_train = f.get('train').get('targets')[()]
x_test = f.get('test').get('inputs')[()]
y_test = f.get('test').get('targets')[()]
print ("x_train shape =",x_train.shape)
print ("y_train shape =",y_train.shape)
print ("x_test shape =" ,x_test.shape)
print ("y_test shape =",y_test.shape)
x_train = x_train[::3,:]
y_train = y_train[::3]
x_test = x_test[::3,:]
y_test = y_test[::3]
print ("x_train shape(downsampled) = ", x_train.shape)
print ("y_train shape(downsampled) =",y_train.shape)
print ("x_test shape(downsampled) =" ,x_test.shape)
print ("y_test shape(downsampled) =",y_test.shape)
print (np.unique(y_train))
print (np.unique(y_test))
unq = np.unique(y_test)
input_width = 52
print("segmenting signal...")
train_x, train_y = segment_pa2(x_train,y_train,input_width)
test_x, test_y = segment_pa2(x_test,y_test,input_width)
print ("signal segmented.")
train = pd.get_dummies(train_y)
test = pd.get_dummies(test_y)
train, test = train.align(test, join='inner', axis=1)
train_y = np.asarray(train)
test_y = np.asarray(test)
input_height = 1
input_width = input_width
num_labels = 11
num_channels = 52
batch_size = 64
stride_size = 2
kernel_size_1 = 7
kernel_size_2 = 3
kernel_size_3 = 1
depth_1 = 128
depth_2 = 128
depth_3 = 128
num_hidden = 512
dropout_1 = 0.1 #0.1
dropout_2 = 0.25 #0.25
dropout_3 = 0.5 #0.5
learning_rate = 0.0005
training_epochs = 50
total_batches = train_x.shape[0] // batch_size
train_x = train_x.reshape(len(train_x),1,input_width,num_channels)
test_x = test_x.reshape(len(test_x),1,input_width,num_channels)
print ("test_x_reshaped = " , test_x.shape)
print ("train_x shape =",train_x.shape)
print ("train_y shape =",train_y.shape)
print ("test_x shape =",test_x.shape)
print ("test_y shape =",test_y.shape)
train_x = train_x.reshape(-1,input_width,num_channels)
test_x = test_x.reshape(-1,input_width,num_channels)
def init_weights(m):
if type(m) == nn.LSTM:
for name, param in m.named_parameters():
if 'weight_ih' in name:
torch.nn.init.orthogonal_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
elif type(m) == nn.Conv1d or type(m) == nn.Linear:
torch.nn.init.orthogonal_(m.weight)
m.bias.data.fill_(0)
model.apply(init_weights)
import torch
import torch.nn as nn
import torch.nn.functional as F
class CharCNN(nn.Module):
def __init__(self):
super(CharCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(num_channels, depth_1, kernel_size=kernel_size_1, stride=stride_size),
nn.ReLU(),
nn.MaxPool1d(kernel_size=kernel_size_1, stride=stride_size),
nn.Dropout(0.1),
)
self.conv2 = nn.Sequential(
nn.Conv1d(depth_1, depth_2, kernel_size=kernel_size_2, stride=stride_size),
nn.ReLU(),
nn.MaxPool1d(kernel_size=kernel_size_2, stride=stride_size),
nn.Dropout(0.25)
)
self.fc1 = nn.Sequential(
nn.Linear(128*64, num_hidden),
nn.ReLU(),
nn.Dropout(0.5)
)
self.fc2 = nn.Sequential(
nn.Linear(num_hidden, 11),
nn.ReLU(),
nn.Dropout(0.5)
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
# collapse
out = out.reshape(-1,128*64)
#out = out.view(out.size(0), -1)
# linear layer
out = self.fc1(out)
# output layer
out = self.fc2(out)
#out = self.log_softmax(x,dim=1)
return out
model = CharCNN()
print(model)
def iterate_minibatches(inputs, targets, batch_size, shuffle=True):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
criterion = nn.CrossEntropyLoss()
opt = torch.optim.Adam(model.parameters(),lr=learning_rate)
for e in range(training_epochs):
if(train_on_gpu):
net.cuda()
train_losses = []
for batch in iterate_minibatches(train_x, train_y, batch_size):
x, y = batch
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
#inputs= inputs.view(batch_size, 128*64)
#targets = targets.view(batch_size)
opt.zero_grad()
output = model(inputs)
loss = criterion(output, targets.long())
train_losses.append(loss.item())
loss.backward()
opt.step()
val_losses = []
accuracy=0
f1score=0
print("Epoch: {}/{}...".format(e+1, training_epochs),
"Train Loss: {:.4f}...".format(np.mean(train_losses)))