the fonction i am calling for each fold is :
def train_one_fold(i_fold, model, criterion, optimizer, dataloader_train, dataloader_valid):
train_fold_results = []
for epoch in range(N_EPOCHS):
model.train()
tr_loss = 0
for i, data in enumerate(dataloader_train, 0):
# get the inputs
inputs, labels = data
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda(non_blocking=True))
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
tr_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
# Validate
model.eval()
val_loss = 0
val_preds = None
val_labels = None
for i, data in enumerate(dataloader_valid, 0):
images, labels = data
if use_gpu:
images, labels = (images.cuda()), (labels.cuda(async=True))
else:
images, labels = Variable(images), Variable(labels)
with torch.no_grad():
outputs = model(images)
loss = criterion(outputs, labels)
val_loss += loss.item()
preds = torch.softmax(outputs, dim=1).data.cpu()
if val_preds is None:
val_preds = preds
else:
val_preds = torch.cat((val_preds, preds), dim=0)
return val_preds
so here is the trace of error
AssertionError Traceback (most recent call last)
in ()
24 optimizer = optim.Adam(plist, lr=5e-5)
25
—> 26 val_preds, train_fold_results = train_one_fold(i_fold, model, criterion, optimizer, trainloader, testloader)
27 oof_preds[valid_idx, :] = val_preds.numpy()
28
---------------------------------------------------6 frames-------------------------------------------------------
in train_one_fold(i_fold, model, criterion, optimizer, dataloader_train, dataloader_valid)
12 tr_loss = 0
13
—> 14 for i, data in enumerate(dataloader_train, 0):
15 # get the inputs
16
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _ next_(self)
343
344 def _ next_(self):
→ 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \
/usr/local/lib/python3.6/dist-packages/torch/utils/data/dataloader.py in _next_data(self)
383 def _next_data(self):
384 index = self._next_index() # may raise StopIteration
→ 385 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
386 if self._pin_memory:
387 data = _utils.pin_memory.pin_memory(data)
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in fetch(self, possibly_batched_index)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
—> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py in (.0)
42 def fetch(self, possibly_batched_index):
43 if self.auto_collation:
—> 44 data = [self.dataset[idx] for idx in possibly_batched_index]
45 else:
46 data = self.dataset[possibly_batched_index]
in _ getitem_(self, idx)
14 def _ getitem_(self, idx):
15 im, labels = self.dataset[self.indices[idx]]
—> 16 return self.transform(im), labels
17
18 def _ len_(self):
/usr/local/lib/python3.6/dist-packages/albumentations/core/composition.py in _ call_(self, force_apply, **data)
162
163 def _ call_(self, force_apply=False, **data):
→ 164 assert isinstance(force_apply, (bool, int)), “force_apply must have bool or int type”
165 need_to_run = force_apply or random.random() < self.p
166 for p in self.processors.values():
AssertionError: force_apply must have bool or int type