How to solv th error

TODO: Display an image along with the top 5 classes

#using the restored one
file_path = ‘flowers/test/11/image_03098.jpg’ #an example from test set

img = process_image (file_path)
imshow (img)
plt.show()
probs, classes = predict (file_path, model, 5)

#print (probs)
#print (classes)

#preparing class_names using mapping with cat_to_name

class_names = [cat_to_name [item] for item in classes]

#fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)
plt.figure(figsize = (6,10))
plt.subplot(2,1,2)
#ax2.barh(class_names, probs)
#ax2.set_aspect(0.1)
#ax2.set_yticks(classes)
#ax2.set_title(‘Flower Class Probability’)
#ax2.set_xlim(0, 1.1)

sns.barplot(x=probs, y=class_names, color= ‘red’);

#width = 1/5
#plt.subplot(2,1,2)
#plt.bar (classes, probs, width, color = ‘blue’)
plt.show()

error


RuntimeError Traceback (most recent call last)
in ()
7 imshow (img)
8 plt.show()
----> 9 probs, classes = predict (file_path, model, 5)
10
11 #print (probs)

in predict(image_path, model, topk)
18
19 with torch.no_grad ():
—> 20 output = model.forward (img)
21 output_prob = torch.exp (output) #converting into a probability
22

/opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py in forward(self, x)
218
219 def forward(self, x):
→ 220 features = self.features(x)
221 out = F.relu(features, inplace=True)
222 out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
→ 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/container.py in forward(self, input)
89 def forward(self, input):
90 for module in self._modules.values():
—> 91 input = module(input)
92 return input
93

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/module.py in call(self, *input, **kwargs)
489 result = self._slow_forward(*input, **kwargs)
490 else:
→ 491 result = self.forward(*input, **kwargs)
492 for hook in self._forward_hooks.values():
493 hook_result = hook(self, input, result)

/opt/conda/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
299 def forward(self, input):
300 return F.conv2d(input, self.weight, self.bias, self.stride,
→ 301 self.padding, self.dilation, self.groups)
302
303

RuntimeError: Expected object of type torch.FloatTensor but found type torch.cuda.FloatTensor for argument #2 ‘weight’

The 2 objects are on different devices. I.e one is on cpu and other one on cuda

thank you i use the model load in the last code and problem solved but the other problem appear

TODO: Display an image along with the top 5 classes

#using the restored one

model= load_model(‘checkpoint.pth’)

file_path = ‘flowers/test/11/image_03098.jpg’

img = process_image (file_path)
imshow (img)
plt.show()
probs, classes = predict (file_path, model, 5)

#print (probs)
#print (classes)

#preparing class_names using mapping with cat_to_name

class_names = [cat_to_name [item] for item in classes]

#fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)
plt.figure(figsize = (6,10))
plt.subplot(2,1,2)

sns.barplot(x=probs, y=class_names, color= ‘red’);

plt.show()

the error


AttributeError Traceback (most recent call last)
in ()
10 imshow (img)
11 plt.show()
—> 12 probs, classes = predict (file_path, model, 5)
13
14 #print (probs)

in predict(image_path, model, topk)
18
19 with torch.no_grad ():
—> 20 output = model.forward (img)
21 output_prob = torch.exp (output) #converting into a probability
22

AttributeError: ‘NoneType’ object has no attribute ‘forward’

the model you trying to use, is None
You might want to check your load_model function. Or can you provide the code you using to fetch the model?

Imports here

%matplotlib inline
%config InlineBackend.figure_format = ‘retina’

import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
from torchvision import datasets, models, transforms
import torch.nn.functional as F
import torch.utils.data
import pandas as pd
#import helper
from collections import OrderedDict
from PIL import Image
import seaborn as sns

TODO: Define your transforms for the training, validation, and testing sets

train_data_transforms = transforms.Compose ([transforms.RandomRotation (30),
transforms.RandomResizedCrop (224),
transforms.RandomHorizontalFlip (),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])

valid_data_transforms = transforms.Compose ([transforms.Resize (255),
transforms.CenterCrop (224),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])

test_data_transforms = transforms.Compose ([transforms.Resize (255),
transforms.CenterCrop (224),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])

TODO: Load the datasets with ImageFolder

train_image_datasets = datasets.ImageFolder (train_dir, transform = train_data_transforms)
valid_image_datasets = datasets.ImageFolder (valid_dir, transform = valid_data_transforms)
test_image_datasets = datasets.ImageFolder (test_dir, transform = test_data_transforms)

TODO: Using the image datasets and the trainforms, define the dataloaders

train_loader = torch.utils.data.DataLoader(train_image_datasets, batch_size = 64, shuffle = True)
valid_loader = torch.utils.data.DataLoader(valid_image_datasets, batch_size = 64, shuffle = True)
test_loader = torch.utils.data.DataLoader(test_image_datasets, batch_size = 64, shuffle = True)


#Label mapping
#This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers

import json

with open(‘cat_to_name.json’, ‘r’) as f:
cat_to_name = json.load(f)
cat_to_name

updating classifer in the network

for param in model.parameters():
param.requires_grad = False

classifier = nn.Sequential (OrderedDict ([
(‘fc1’, nn.Linear (9216, 4096)),
(‘relu1’, nn.ReLU ()),
(‘dropout1’, nn.Dropout (p = 0.4)),
(‘fc2’, nn.Linear (4096, 2048)),
(‘relu2’, nn.ReLU ()),
(‘dropout2’, nn.Dropout (p = 0.4)),
(‘fc3’, nn.Linear (2048, 102)),
(‘output’, nn.LogSoftmax (dim =1))
]))
model.classifier = classifier
model

--------------------#initializing criterion and optimizer
criterion = nn.NLLLoss ()
optimizer = optim.Adam (model.classifier.parameters (), lr = 0.001)

Defining validation for the model

def validation(model, valid_loader, criterion):
model.to (‘cuda’)

valid_loss = 0
accuracy = 0
for inputs, labels in valid_loader:
    
    inputs, labels = inputs.to('cuda'), labels.to('cuda')
    output = model.forward(inputs)
    valid_loss += criterion(output, labels).item()

    ps = torch.exp(output)
    equality = (labels.data == ps.max(dim=1)[1])
    accuracy += equality.type(torch.FloatTensor).mean()

return valid_loss, accuracy

#training a model

#change to cuda if enabled
model.to (‘cuda’)
epochs = 12
print_every = 40
steps = 0

for e in range (epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate (train_loader):
steps += 1

    inputs, labels = inputs.to('cuda'), labels.to('cuda')

    optimizer.zero_grad () #where optimizer is working on classifier paramters only

    # Forward and backward passes
    outputs = model.forward (inputs) #calculating output
    loss = criterion (outputs, labels) #calculating loss
    loss.backward () 
    optimizer.step () #performs single optimization step 

    running_loss += loss.item () # loss.item () returns scalar value of Loss function

    if steps % print_every == 0:
        model.eval () #switching to evaluation mode so that dropout is turned off
        
        # Turn off gradients for validation, saves memory and computations
        with torch.no_grad():
            valid_loss, accuracy = validation(model, valid_loader, criterion)
        
        print("Epoch: {}/{}.. ".format(e+1, epochs),
              "Training Loss: {:.3f}.. ".format(running_loss/print_every),
              "Valid Loss: {:.3f}.. ".format(valid_loss/len(valid_loader)),
              "Valid Accuracy: {:.3f}%".format(accuracy/len(valid_loader)*100))
        
        running_loss = 0
        
        # Make sure training is back on
        model.train()
        
        ------------------------------------------

def check_accuracy_on_test(test_loader):
right = 0
total = 0
#training a model
#change to cuda if enabled

model.to('cuda:0')

#switching to evaluation mode so that dropout is turned off

Turn off gradients for validation, saves memory and computations

with torch.no_grad():
    for data in test_loader:
        images, labels = data
        images, labels = images.to('cuda'), labels.to('cuda')
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        right += (predicted == labels).sum().item()

print('kindly be informed that the accuracy of the network on the test images: %d %%' % (100 * right / total))

check_accuracy_on_test(test_loader)

model.to (‘cpu’) #no need to use cuda for saving/loading model.

TODO: Save the checkpoint

model.class_to_idx = train_image_datasets.class_to_idx #saving mapping between predicted class and class name,
#second variable is a class name in numeric

#creating dictionary
checkpoint = {‘classifier’: model.classifier,
‘state_dict’: model.state_dict (),
‘mapping’: model.class_to_idx
}

torch.save (checkpoint, ‘project_checkpoint.pth’)
#you should also store other hyper-parameters like the number of epochs, the learning_rate, arch param
#along with the checkpoint. This parameters are required in case you need to continue training your model

TODO: Write a function that loads a checkpoint and rebuilds the model

def loading_model (file_path):
checkpoint = torch.load (file_path) #loading checkpoint from a file
model = models.alexnet (pretrained = True) #function works solely for Alexnet
#you can use the arch from the checkpoint and choose the model architecture in a more generic way:
#model = getattr(models, checkpoint[‘arch’]

model.classifier = checkpoint ['classifier']
model.load_state_dict (checkpoint ['state_dict'])
model.class_to_idx = checkpoint ['mapping']

for param in model.parameters(): 
    param.requires_grad = False #turning off tuning of the model

return model

print (model)

def process_image(image):
‘’’ Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
‘’’
#size = 256, 256
img = Image.open (image) #loading image
Image_width, Image_height = img.size #original size
#proportion = Image_width/ float (Image_height) #to keep aspect ratio

if Image_width > Image_height: 
    Image_height = 256
    img.thumbnail ((50000, Image_height), Image.ANTIALIAS)
else: 
    Image_width = 256
    img.thumbnail ((Image_width,50000), Image.ANTIALIAS)
    

Image_width, Image_height = img.size 
#new size of img
#resize 224x224 in the center
img_reduce = 224
img_left = (Image_width - img_reduce)/2 
img_top = (Image_height - img_reduce)/2
img_right = img_left + 224 
img_bottom = img_top + 224
img = img.crop ((img_left, img_top, img_right, img_bottom))

#preparing numpy array
np_image = np.array (img)/255 
np_image -= np.array ([0.485, 0.456, 0.406]) 
np_image /= np.array ([0.229, 0.224, 0.225])

np_image= np_image.transpose ((2,0,1))

return np_image

img_1 = (data_dir  +'/test' + '/1/' + 'image_06743.jpg')
img_1 = process_image(img_1)
print(img_1.shape)
(np.array([0.229, 0.224, 0.225])).shape
img_1 = img_1.transpose ((1,2,0))
img_1

def imshow(image, ax=None, title=None):
“”“Imshow for Tensor.”“”
if ax is None:
fig, ax = plt.subplots()

# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = np.array(image)
image = image.transpose((1, 2, 0))

# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean

# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
 
ax.imshow(image)

return ax

image_path = ‘flowers/train/13/image_05744.jpg’
img = process_image(image_path)
#img.shape
imshow(img)

#mapping = train_image_datasets.class_to_idx

#indeces = np.array ([1, 10, 100, 101, 102])
#classes = pd.DataFrame ([mapping [item] for item in indeces]) #replacing indeces with classes
#classes = np.array (classes) #converting to Numpy array
def predict(image_path, model, topk = 5):
‘’’ Predict the class (or classes) of an image using a trained deep learning model.

'''

print (type(model))
# TODO: Implement the code to predict the class from an image file
image = process_image (image_path) #loading image and processing it using above defined function

#we cannot pass image to model.forward 'as is' as it is expecting tensor, not numpy array
#converting to tensor
img = torch.from_numpy (image).type(torch.FloatTensor)

img = img.unsqueeze (dim = 0) #used to make size of torch as expected. as forward method is working with batches,
#doing that we will have batch size = 1 
    
with torch.no_grad ():
    output = model.forward (img)
output_prob = torch.exp (output) #converting into a probability

probs, indeces = output_prob.topk (topk=5)
probs = probs.numpy () #converting both to numpy array
indeces = indeces.numpy () 

probs = probs.tolist () [0] #converting both to list
indeces = indeces.tolist () [0]


mapping = {val: key for key, val in
model.class_to_idx.items()
            }

classes = [mapping [item] for item in indeces]
#classes = pd.DataFrame ([mapping [item] for item in indeces]) #replacing indeces with classes
classes = np.array (classes) #converting to Numpy array 

return probs, classes

TODO: Display an image along with the top 5 classes

#using the restored one

model=loading_model(‘project_checkpoint.pth’)

file_path = ‘flowers/test/11/image_03130.jpg’

img = process_image (file_path)
imshow (img)
plt.show()
probs, classes = predict(file_path, model, 5)
#probs = probs.numpy ()
#indeces = indeces.numpy ()

#print (probs)
#print (classes)

class ‘torchvision.models.alexnet.AlexNet’>

TypeError Traceback (most recent call last)
in ()
10 imshow (img)
11 plt.show()
—> 12 probs, classes = predict(file_path, model, 5)
13 #probs = probs.numpy ()
14 #indeces = indeces.numpy ()

in predict(image_path, model, topk)
24 output_prob = torch.exp (output) #converting into a probability
25
—> 26 probs, indeces = output_prob.topk (topk=5)
27 probs = probs.numpy () #converting both to numpy array
28 indeces = indeces.numpy ()

TypeError: topk() missing 1 required positional arguments: “k”

#preparing class_names using mapping with cat_to_name

class_names = [cat_to_name [item] for item in classes]

#fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)
plt.figure(figsize = (6,10))
plt.subplot(2,1,2)

sns.barplot(x=probs, y=class_names, color= ‘red’);

plt.show()

change output_prob.topk(topk=5) to output_prob.topk(k=5)
as k is the argument name

Many thanks this solve the issue but iave anothe error as the following

TODO: Display an image along with the top 5 classes

#using the restored one

model=loading_model(‘project_checkpoint.pth’)

file_path = ‘flowers/test/11/image_03130.jpg’

img = process_image (file_path
)
imshow (img)

plt.show()
probs, classes = predict(file_path, model, 5)
#probs = probs.numpy ()
#indeces = indeces.numpy ()

#print (probs)
#print (classes)
#preparing class_names using mapping with cat_to_name

class_names = [cat_to_name [item] for item in classes]

#fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)
plt.figure(figsize = (6,10))
plt.subplot(2,1,2)

sns.barplot(x=probs, y=class_names, color= ‘red’);

plt.show()


error


TypeError Traceback (most recent call last)
in ()
20 #preparing class_names using mapping with cat_to_name
21
—> 22 class_names = [cat_to_name [item] for item in classes]
23
24 #fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)

in (.0)
20 #preparing class_names using mapping with cat_to_name
21
—> 22 class_names = [cat_to_name [item] for item in classes]
23
24 #fig, (ax2) = plt.subplots(figsize=(6,9), ncols=2)

TypeError: unhashable type: ‘numpy.ndarray’

Basically its saying that you trying to do some illegal operation on datatype i.e numpy.ndarray . Can you provide the output of classes and cat_to_name?

TODO: Display an image along with the top 5 classes

#using the restored one

model=loading_model(‘project_checkpoint.pth’)

file_path = ‘flowers/test/11/image_03130.jpg’

img = process_image (file_path)
imshow (img)

plt.show()
probs, classes = predict(file_path, model, 5)

print (classes)
print(cat_to_name)

#probs = probs.numpy ()
#indeces = indeces.numpy ()

#print (probs)
#print (classes)
#preparing class_names using mapping with cat_to_name

class_names = [cat_to_name [item] for item in classes]

output

[[‘72’]
[‘18’]
[‘94’]
[‘83’]
[‘96’]]
{‘21’: ‘fire lily’, ‘3’: ‘canterbury bells’, ‘45’: ‘bolero deep blue’, ‘1’: ‘pink primrose’, ‘34’: ‘mexican aster’, ‘27’: ‘prince of wales feathers’, ‘7’: ‘moon orchid’, ‘16’: ‘globe-flower’, ‘25’: ‘grape hyacinth’, ‘26’: ‘corn poppy’, ‘79’: ‘toad lily’, ‘39’: ‘siam tulip’, ‘24’: ‘red ginger’, ‘67’: ‘spring crocus’, ‘35’: ‘alpine sea holly’, ‘32’: ‘garden phlox’, ‘10’: ‘globe thistle’, ‘6’: ‘tiger lily’, ‘93’: ‘ball moss’, ‘33’: ‘love in the mist’, ‘9’: ‘monkshood’, ‘102’: ‘blackberry lily’, ‘14’: ‘spear thistle’, ‘19’: ‘balloon flower’, ‘100’: ‘blanket flower’, ‘13’: ‘king protea’, ‘49’: ‘oxeye daisy’, ‘15’: ‘yellow iris’, ‘61’: ‘cautleya spicata’, ‘31’: ‘carnation’, ‘64’: ‘silverbush’, ‘68’: ‘bearded iris’, ‘63’: ‘black-eyed susan’, ‘69’: ‘windflower’, ‘62’: ‘japanese anemone’, ‘20’: ‘giant white arum lily’, ‘38’: ‘great masterwort’, ‘4’: ‘sweet pea’, ‘86’: ‘tree mallow’, ‘101’: ‘trumpet creeper’, ‘42’: ‘daffodil’, ‘22’: ‘pincushion flower’, ‘2’: ‘hard-leaved pocket orchid’, ‘54’: ‘sunflower’, ‘66’: ‘osteospermum’, ‘70’: ‘tree poppy’, ‘85’: ‘desert-rose’, ‘99’: ‘bromelia’, ‘87’: ‘magnolia’, ‘5’: ‘english marigold’, ‘92’: ‘bee balm’, ‘28’: ‘stemless gentian’, ‘97’: ‘mallow’, ‘57’: ‘gaura’, ‘40’: ‘lenten rose’, ‘47’: ‘marigold’, ‘59’: ‘orange dahlia’, ‘48’: ‘buttercup’, ‘55’: ‘pelargonium’, ‘36’: ‘ruby-lipped cattleya’, ‘91’: ‘hippeastrum’, ‘29’: ‘artichoke’, ‘71’: ‘gazania’, ‘90’: ‘canna lily’, ‘18’: ‘peruvian lily’, ‘98’: ‘mexican petunia’, ‘8’: ‘bird of paradise’, ‘30’: ‘sweet william’, ‘17’: ‘purple coneflower’, ‘52’: ‘wild pansy’, ‘84’: ‘columbine’, ‘12’: “colt’s foot”, ‘11’: ‘snapdragon’, ‘96’: ‘camellia’, ‘23’: ‘fritillary’, ‘50’: ‘common dandelion’, ‘44’: ‘poinsettia’, ‘53’: ‘primula’, ‘72’: ‘azalea’, ‘65’: ‘californian poppy’, ‘80’: ‘anthurium’, ‘76’: ‘morning glory’, ‘37’: ‘cape flower’, ‘56’: ‘bishop of llandaff’, ‘60’: ‘pink-yellow dahlia’, ‘82’: ‘clematis’, ‘58’: ‘geranium’, ‘75’: ‘thorn apple’, ‘41’: ‘barbeton daisy’, ‘95’: ‘bougainvillea’, ‘43’: ‘sword lily’, ‘83’: ‘hibiscus’, ‘78’: ‘lotus lotus’, ‘88’: ‘cyclamen’, ‘94’: ‘foxglove’, ‘81’: ‘frangipani’, ‘74’: ‘rose’, ‘89’: ‘watercress’, ‘73’: ‘water lily’, ‘46’: ‘wallflower’, ‘77’: ‘passion flower’, ‘51’: ‘petunia’}

TypeError Traceback (most recent call last)
in ()
23 #preparing class_names using mapping with cat_to_name
24
—> 25 class_names = [cat_to_name [item] for item in classes]
26

in (.0)
23 #preparing class_names using mapping with cat_to_name
24
—> 25 class_names = [cat_to_name [item] for item in classes]
26

TypeError: unhashable type: ‘numpy.ndarray’

As you can see there, classes has 2 dimensions. So to make it work you will have to use:
class_names = [cat_to_name [item[0]] for item in classes] . This will help you get the class_names .Can you tell me though if there’s any reason you have all those int as str ?

thank you very much may alot of code made me not foucse
thank you

for train.py file have error could you help please

def load_model (arch, hidden_layers):
if arch == ‘alexnet’: # checkpoint saving will be defined
model = models.alexnet (pretrained = True)
for param in model.parameters():
param.requires_grad = False
if hidden_layers: #in case hidden_layers were given
classifier = nn.Sequential (OrderedDict ([
(‘fc1’, nn.Linear (9216, 4096)),
(‘relu1’, nn.ReLU ()),
(‘dropout1’, nn.Dropout (p = 0.4)),
(‘fc2’, nn.Linear (4096,hidden_layers)),
(‘relu2’, nn.ReLU ()),
(‘dropout2’, nn.Dropout (p = 0.4)),
(‘fc3’, nn.Linear (hidden_layers, 102)),
(‘output’, nn.LogSoftmax (dim =1))
]))
else: #if hidden_layers not given
classifier = nn.Sequential (OrderedDict ([
(‘fc1’, nn.Linear (9216, 4096)),
(‘relu1’, nn.ReLU ()),
(‘dropout1’, nn.Dropout (p = 0.4)),
(‘fc2’, nn.Linear (4096, 2048)),
(‘relu2’, nn.ReLU ()),
(‘dropout2’, nn.Dropout (p = 0.4)),
(‘fc3’, nn.Linear (2048, 102)),
(‘output’, nn.LogSoftmax (dim =1))
]))
model.classifier = classifier
return model, arch


Traceback (most recent call last):
File “train.py”, line 143, in
model, arch = load_model (args.arch, args.hidden_layers)
File “train.py”, line 121, in load_model
model.classifier = classifier
UnboundLocalError: local variable ‘classifier’ referenced before assignment