def runSim(pa, pb, ca_type,cb_type,g_ca_pa,g_ca_pb,g_cb_pa,g_cb_pb):
pa, pb, ca_type,cb_type,g_ca_pa,g_ca_pb,g_cb_pa,g_cb_pb = Variable(pa), Variable(pb), Variable(ca_type),Variable(cb_type),Variable(g_ca_pa),Variable(g_ca_pb),Variable(g_cb_pa),Variable(g_cb_pb)
optimizer = optim.SGD(net.parameters(), lr=0.01,
momentum=0.01)
BATCH_SIZE = 64
EPOCH = 10
torch_dataset = Data.TensorDataset(pa, pb, ca_type,cb_type,g_ca_pa,g_ca_pb,g_cb_pa,g_cb_pb)
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=True, num_workers=2,)
iteration = 0
# start training
for epoch in range(EPOCH):
print("EPOCH:",epoch)
for step, (batch_pa, batch_pb, batch_ca_type,batch_cb_type,batch_g_ca_pa,batch_g_ca_pb,batch_g_cb_pa,batch_g_cb_pb) in enumerate(loader): # for each training step
#batch_x1, batch_x2, batch_f1, batch_f2 = batch_x1.to(device), batch_x2.to(device), batch_f1.to(device), batch_f2.to(device)
#b_pa = Variable(batch_pa).to(device)
#b_pb = Variable(batch_pb).to(device)
#b_ca_type = Variable(batch_ca_type).to(device)
#b_cb_type = Variable(batch_cb_type).to(device)
#b_g_ca_pa = Variable(batch_g_ca_pa).to(device)
#b_g_ca_pb = Variable(batch_g_ca_pb).to(device)
#b_g_cb_pa = Variable(batch_g_cb_pa).to(device)
#b_g_cb_pb = Variable(batch_g_cb_pb).to(device)
b_pa = batch_pa.to(device)
b_pb = batch_pb.to(device)
b_ca_type = batch_ca_type.to(device)
b_cb_type = batch_cb_type.to(device)
b_g_ca_pa = batch_g_ca_pa.to(device)
b_g_ca_pb = batch_g_ca_pb.to(device)
b_g_cb_pa = batch_g_cb_pa.to(device)
b_g_cb_pb = batch_g_cb_pb.to(device)
prediction_1 = (b_g_ca_pa + b_ca_type*net(b_pa)).to(device) # input x and predict based on x
prediction_2 = (b_g_ca_pb + b_ca_type*net(b_pb)).to(device)
prediction_3 = (b_g_cb_pa + b_cb_type*net(b_pa)).to(device)
prediction_4 = (b_g_cb_pb + b_cb_type*net(b_pb)).to(device)
loss = deepci_loss(prediction_1, prediction_2,prediction_3,prediction_4) # must be (1. nn output, 2. target)
if iteration%30 == 0:
print(loss)
iteration +=1
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
#print(calculate_misclassification_error(example_data_val, example_targets_val, net))
return (test_error(net))
I cannot see which line of code is raising the error, but the issue is raised if you e.g. try to use an if condition using multiple values as seen here:
x = torch.tensor([True, False])
if x:
print("true")
# RuntimeError: Boolean value of Tensor with more than one value is ambiguous
Use .all()
or e.g. .any()
instead:
if x.any():
print("any")
# any
I saw the error!! Thanks!!