I want to threshold a tensor used in self-defined loss function into binary values. Previously, I used torch.round(prob) to do it. Since my prob tensor value range in [0 1]. This is equivalent to threshold the tensor prob using a threshold value 0.5.
For example,
prob = [0.1, 0.3, 0.7, 0.9], torch.round(prob) = [0, 0, 1, 1]
Now, I would like to use a changeable threshold value, how to do it?
1 Like
It seems that only the lastest pytorch 0.4 has torch.where.
Something else you can do is something like
prob = ... # Tensor([0.1, 0.3, 0.7, 0.9])
t = Variable(torch.Tensor([0.5])) # threshold
out = (prob > t).float() * 1
3 Likes
Hi Richard, thanks for your kind help. I tried and just got an error.
RuntimeError: gt() received an invalid combination of arguments - got (numpy.float32), but expected one of:
(float other)
didn’t match because some of the arguments have invalid types: (numpy.float32)
(Variable other)
didn’t match because some of the arguments have invalid types: (numpy.float32)
jpeg729
(jpeg729)
February 9, 2018, 9:33pm
6
Both of these work on pytorch 0.3
t = 0.5
t = torch.Tensor([0.5])
richard
February 9, 2018, 11:10pm
7
You have to make sure that in (prob > t)
, both elements are tensors.
jpeg729
(jpeg729)
February 9, 2018, 11:44pm
8
It also works if both prob and t are Variables.
Deeply
(Deeply)
May 20, 2019, 9:13am
9
I am curious to know what role does multiplying by 1, *1
, play in out = (prob > t).float() * 1
?
1 Like
himat
(Hima)
July 10, 2019, 10:09pm
10
I would also like to know @richard .
The *1
is definitely not necessary
1 Like
hs271095
(Hs271095)
April 6, 2020, 3:06am
12
Hey this gives me bool values and not 0 1
bsridatta
(Sri Datta Budaraju)
September 20, 2020, 12:47pm
13
.float() should convert booleans to 0., 1.
Aamir_M1
(Aamir_M)
October 14, 2021, 5:52am
14
import torch
from torch import nn
from torch.nn import functional as F
import math
import numpy as np
class SparseActivations(nn.Module):
""" Custom Linear layer but mimics a standard linear layer """
def __init__(self, size_in, size_out, threshold):
super().__init__()
self.size_in, self.size_out = size_in, size_out
self.threshold = threshold
weights = torch.Tensor(size_out, size_in)
self.weights = nn.Parameter(weights) # nn.Parameter is a Tensor that's a module parameter.
bias = torch.Tensor(size_out)
self.bias = nn.Parameter(bias)
self.sigmoid = nn.Sigmoid()
# initialize weights and biases
self.init_weights()
def init_weights(self):
stdv = 1. / math.sqrt(self.weights.size(1))
self.weights.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
w_times_x = torch.mm(x, self.weights.t())
outs = self.sigmoid(torch.add(w_times_x, self.bias)) # w times x + b
for i in range(0, self.size_out):
if outs[:, i] > self.threshold:
outs[:, i] = 1
else:
outs[:, i] = 0
return outs
class ForwardNetwork(nn.Module):
# Using custom layer
def __init__(self):
super().__init__()
self.l1 = nn.Linear(100, 10000)
self.custom1 = SparseActivations(10000, 10000, threshold=0.56)
self.l2 = nn.Linear(10000, 100)
self.l3 = nn.Linear(100, 10)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.custom1(x)
return x
def main():
fr = ForwardNetwork()
n = torch.tensor(np.random.uniform(0, 1, 100).reshape(1, 100).astype(np.float32))
results = fr.forward(n).detach().numpy().flatten().astype(np.uint8)
counter = 0
on_bit_list = []
for idx, r in enumerate(results):
if r == 1:
counter += 1
on_bit_list.append(idx)
else:
pass
print(counter)
print(on_bit_list)
if __name__ == '__main__':
main()