ah okay, sorry for my misunderstanding, here’s my code
dataset = ProcessDataset(train_x, train_y)
negs = torch.zeros((1, 256, 256)) #make sure the size here matches the dims and sizes for 1 image
posv = torch.zeros((1, 256, 256))
for i in range(len(train_y)):
image, mask, negatives, positives = dataset[i]
negs += negatives.sum(dim=1)
posv += positives.sum(dim=1)
pos_weight = negs/posv
print(pos_weight)
and the result is
tensor([[[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215],
[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215],
[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215],
...,
[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215],
[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215],
[ inf, inf, inf, ..., 158.5177, 164.4146, 170.7215]]])