AttributeError: 'numpy.ndarray' object has no attribute 'numpy'

@ptrblck, Hi!

I’m trying to visualize the adversarial images generated by this script:
https://pytorch.org/tutorials/beginner/fgsm_tutorial.html

This tutorial is used for the mnist data. Now I want to use for other data which is trained using the inception_v1 architecture, below is the gist for that one:

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, models, transforms
import numpy as np
import matplotlib.pyplot as plt
import os



epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "googlenet/googlenet_aux03_ep30.pth"
use_cuda=True

Net = models.googlenet()

#Even if set aux_logits to False, the state_dict throws the unexpected aux arguments. 
num_ftrs = Net.aux2.fc2.in_features
Net.aux2.fc2 = nn.Linear(num_ftrs, 5)

num_ftrs = Net.fc.in_features
Net.fc = nn.Linear(num_ftrs, 5)


test_loader = torch.utils.data.DataLoader( 
            datasets.ImageFolder(os.path.join('data/val'), transform=transforms.Compose([
                 transforms.Resize(224),
                 transforms.RandomCrop(224),
                 transforms.ToTensor(),
                 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 
            ])),
               batch_size=1, shuffle=True)


print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda:1" if (use_cuda and torch.cuda.is_available()) else "cpu")

model = Net.to(device)

model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))

model.eval()

def fgsm_attack(image, epsilon, data_grad):
    # Collect the element-wise sign of the data gradient
    sign_data_grad = data_grad.sign()
    # Create the perturbed image by adjusting each pixel of the input image
    perturbed_image = image + epsilon*sign_data_grad
    # Adding clipping to maintain [0,1] range
    perturbed_image = torch.clamp(perturbed_image, 0, 1)
    # Return the perturbed image
    return perturbed_image




def test( model, device, test_loader, epsilon ):

    # Accuracy counter
    correct = 0
    adv_examples = []

    # Loop over all examples in test set
    for data, target in test_loader:

        # Send the data and label to the device
        data, target = data.to(device), target.to(device)

        # Set requires_grad attribute of tensor. Important for Attack
        data.requires_grad = True

        # Forward pass the data through the model
        output = model(data)
        init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability

        # If the initial prediction is wrong, dont bother attacking, just move on
        if init_pred.item() != target.item():
            continue

        # Calculate the loss
        loss = F.nll_loss(output, target)

        # Zero all existing gradients
        model.zero_grad()

        # Calculate gradients of model in backward pass
        loss.backward()

        # Collect datagrad
        data_grad = data.grad.data

        # Call FGSM Attack
        perturbed_data = fgsm_attack(data, epsilon, data_grad)

        # Re-classify the perturbed image
        output = model(perturbed_data)

        # Check for success
        final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
        if final_pred.item() == target.item():
            correct += 1
            # Special case for saving 0 epsilon examples
            if (epsilon == 0) and (len(adv_examples) < 5):
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
        else:
            # Save some adv examples for visualization later
            if len(adv_examples) < 5:
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )

    # Calculate final accuracy for this epsilon
    final_acc = correct/float(len(test_loader))
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))

    # Return the accuracy and an adversarial example
    return final_acc, adv_examples




accuracies = []
examples = []

# Run test for each epsilon
for eps in epsilons:
    acc, ex = test(model, device, test_loader, eps)
    accuracies.append(acc)
    examples.append(ex)



plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()


cnt = 0
plt.figure(figsize=(8,10)) 
for i in range(len(epsilons)):
    for j in range(len(examples[i])):
        cnt += 1
        plt.subplot(len(epsilons),len(examples[0]),cnt)
        plt.xticks([], [])
        plt.yticks([], [])
        if j == 0:
            plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
        orig,adv,ex = examples[i][j]
        ex = ex.numpy().transpose((1, 2, 0))
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225]) 
        ex = std * ex + mean
        ex = np.clip(ex, 0, 1)
        #ex = ex.permute(1, 2, 0)
        plt.title("{} -> {}".format(orig, adv))
        plt.imshow(ex)
plt.tight_layout()
plt.show()

Upto the accuracy graph, the code ran well, but for the visualizing images it throws this error:

Traceback (most recent call last):
  File "fgsm.py", line 156, in <module>
    ex = ex.numpy().transpose((1, 2, 0))
AttributeError: 'numpy.ndarray' object has no attribute 'numpy'

any thoughts?

adv_ex is already a numpy array, so you can’t call .numpy() again on it (which is a tensor method).
Store adv_ex as a tensor or avoid calling numpy on it:

adv_ex = perturbed_data.squeeze().detach().cpu()
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
1 Like

Thanks!. My bad, I haven’t noticed that it is already called.

cnt = 0
plt.figure(figsize=(8,10)) 
for i in range(len(epsilons)):
    for j in range(len(examples[i])):
        cnt += 1
        plt.subplot(len(epsilons),len(examples[0]),cnt)
        plt.xticks([], [])
        plt.yticks([], [])
        if j == 0:
            plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
        orig,adv,ex = examples[i][j]
        ex = ex.transpose((1, 2, 0))
        plt.title("{} -> {}".format(orig, adv))
        plt.imshow(ex)
plt.tight_layout()
plt.show()

Now, i’m looking for print the class names, as of now, it prints the number order for the categories.

plt.subplot(len(epsilons),len(examples[0]),cnt)
....
plt.title("{} -> {}".format(orig, adv))

@ptrblck: This gives prediction as corresponding label_index, but i want to print prediction as corresponding class_names. How can i do that?

Example plot:

I want to plot predictions as bus -> car.

You could create a dict containing an index to class mapping. Then just use this dict to get the corresponding names:

mapping = {
    0: 'bus',
    1: 'car',
    ...
}

plt.title("{} -> {}".format(mapping[orig], mapping[adv]))
1 Like

Thanks!! worked like a charm! :smiley:

1 Like

There is an another problem with the Epsilon Vs. Accuracy graph. For eps=0, the graph should start from actual validation accuracy. I have a 15K samples with 5 categories and took 20% for validation. When I run the attack, for eps=0, it is printing like this, which is quiet weird:

Epsilon: 0	Test Accuracy = 596 / 3564 = 0.16722783389450055

It should be the 88%, that reported at training the network time.
Could you share some thoughts?

This time changed the loss criterion:

loss = F.nll_loss(output, target)

to

loss = criterion(output, target)
...
...
criterion = nn.CrossEntropyLoss()

As far as I understand the issue, it seems that you are seeing a discrepancy between the validation and test accuracy?
If so, could you post the validation code so that we could search for some differences?

1 Like

script:

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, models, transforms
import numpy as np
import matplotlib.pyplot as plt
import os



epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "googlenet/googlenet_aux03_ep30.pth"
use_cuda=True

Net = models.googlenet()

#Even if set aux_logits to False, the state_dict throws the unexpected aux arguments. 
num_ftrs = Net.aux2.fc2.in_features
Net.aux2.fc2 = nn.Linear(num_ftrs, 5)

num_ftrs = Net.fc.in_features
Net.fc = nn.Linear(num_ftrs, 5)


test_loader = torch.utils.data.DataLoader( 
            datasets.ImageFolder(os.path.join('data/val'), transform=transforms.Compose([
                 transforms.Resize(224),
                 transforms.RandomCrop(224),
                 transforms.ToTensor(),
                 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 
            ])),
               batch_size=1, shuffle=True)


print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda:1" if (use_cuda and torch.cuda.is_available()) else "cpu")

model = Net.to(device)

model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))

model.eval()

def fgsm_attack(image, epsilon, data_grad):
    # Collect the element-wise sign of the data gradient
    sign_data_grad = data_grad.sign()
    # Create the perturbed image by adjusting each pixel of the input image
    perturbed_image = image + epsilon*sign_data_grad
    # Adding clipping to maintain [0,1] range
    perturbed_image = torch.clamp(perturbed_image, 0, 1)
    # Return the perturbed image
    return perturbed_image




def test( model, device, test_loader, epsilon ):

    # Accuracy counter
    correct = 0
    adv_examples = []

    # Loop over all examples in test set
    for data, target in test_loader:

        # Send the data and label to the device
        data, target = data.to(device), target.to(device)

        # Set requires_grad attribute of tensor. Important for Attack
        data.requires_grad = True

        # Forward pass the data through the model
        output = model(data)
        init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability

        # If the initial prediction is wrong, dont bother attacking, just move on
        if init_pred.item() != target.item():
            continue

        # Calculate the loss
        loss = F.nll_loss(output, target)

        # Zero all existing gradients
        model.zero_grad()

        # Calculate gradients of model in backward pass
        loss.backward()

        # Collect datagrad
        data_grad = data.grad.data

        # Call FGSM Attack
        perturbed_data = fgsm_attack(data, epsilon, data_grad)

        # Re-classify the perturbed image
        output = model(perturbed_data)

        # Check for success
        final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
        if final_pred.item() == target.item():
            correct += 1
            # Special case for saving 0 epsilon examples
            if (epsilon == 0) and (len(adv_examples) < 5):
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )
        else:
            # Save some adv examples for visualization later
            if len(adv_examples) < 5:
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )

    # Calculate final accuracy for this epsilon
    final_acc = correct/float(len(test_loader))
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))

    # Return the accuracy and an adversarial example
    return final_acc, adv_examples




accuracies = []
examples = []

# Run test for each epsilon
for eps in epsilons:
    acc, ex = test(model, device, test_loader, eps)
    accuracies.append(acc)
    examples.append(ex)



plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()


cnt = 0
plt.figure(figsize=(8,10)) 
for i in range(len(epsilons)):
    for j in range(len(examples[i])):
        cnt += 1
        plt.subplot(len(epsilons),len(examples[0]),cnt)
        plt.xticks([], [])
        plt.yticks([], [])
        if j == 0:
            plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
        orig,adv,ex = examples[i][j]
        ex = ex.numpy().transpose((1, 2, 0))
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225]) 
        ex = std * ex + mean
        ex = np.clip(ex, 0, 1)
        #ex = ex.permute(1, 2, 0)
        plt.title("{} -> {}".format(orig, adv))
        plt.imshow(ex)
plt.tight_layout()
plt.show()

This is the same code I’m following. and when I change the batch_size above 1, the code throws this error.

Traceback (most recent call last):
In def test( model, device, test_loader, epsilon ):
Line:  if init_pred.item() != target.item(): 
            continue

Value Error:only one element tensors can be converted to Python scalars

For test accuracy, it even if the batch_size=1 it should take all samples from 3K right?

or i’m incorrect?

Could you check the shape of init_pred and target?
Based on your code, it should be a single element tensor, but apparently that’s not the case.

Yes, your code currently should only work with batch_size=1, since you are using .item() ops and calculate the dataset length based on the length of your DataLoader, which will be wrong for another batch size.

1 Like

There a power failure with my server. I will check once its up. But the problem is why it’s giving such a low accuracy with epsilon =0. When I comment transforms.Normalize(), it rasied to 40%pc.

IMG_20190428_045009_062

is there any problem with the NxCxHxW?, since the original script is used for MNIST data and model, which based on the 1-channel.

target shape: torch.Size([1])
data shape: torch.Size([1, 3, 224, 224])
init_pred shape: torch.Size([1, 1])