AML21: 06 Adversarial Example for CIFAR-10¶

Data and Libraries¶

In [1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms, utils
import matplotlib.pyplot as plt
import numpy

# this 'device' will be used for training our model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
cuda:0

Load the CIFAR10 dataset¶

Observe that we set shuffle=True, which means that data is randomized

In [2]:
input_size  = 32*32*3   # images are 32x32 pixels with 3 channels
output_size = 10      # there are 10 classes

train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ])),
    batch_size=64, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ])),
    batch_size=1, shuffle=True)

classNames= ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
Files already downloaded and verified
In [3]:
# show some training images
def imshow(img, plot):
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()   # convert from tensor
    plot.imshow(numpy.transpose(npimg, (1, 2, 0))) 
    

plt.figure(figsize=(16,4))

# fetch a batch of train images; RANDOM
image_batch, label_batch = next(iter(train_loader))
#imshow(torchvision.utils.make_grid(image_batch))
for i in range(20):
    image = image_batch[i]
    label = classNames[label_batch[i].item()]
    plt.subplot(2, 10, i + 1)
    #image, label = train_loader.dataset.__getitem__(i)
    #plt.imshow(image.squeeze().numpy())
    imshow(image, plt)
    plt.axis('off')
    plt.title(label)
plt.show()

CNN for CIFAR-10¶

Helper functions for training and testing¶

In [4]:
# function to count number of parameters
def get_n_params(model):
    np=0
    for p in list(model.parameters()):
        np += p.nelement()
    return np

accuracy_list = []
# we pass a model object to this trainer, and it trains this model for one epoch
def train(epoch, model):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        # send to device
        data, target = data.to(device), target.to(device)
        
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
            
def test(model):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        # send to device
        data, target = data.to(device), target.to(device)
        
        output = model(data)
        test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability                                                                 
        correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    accuracy_list.append(accuracy)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        accuracy))

Defining the Convolutional Neural Network¶

In [5]:
# FROM: https://github.com/boazbk/mltheoryseminar/blob/main/code/hw0/simple_train.ipynb
## 5-Layer CNN for CIFAR
## This is the Myrtle5 network by David Page (https://myrtle.ai/learn/how-to-train-your-resnet-4-architecture/)

class Flatten(nn.Module):
    def forward(self, x): return x.view(x.size(0), x.size(1))

class PrintShape(nn.Module):
    def forward(self, x): 
        print(x.shape)
        return x

def make_myrtle5(c=64, num_classes=10):
    ''' Returns a 5-layer CNN with width parameter c. '''
    return nn.Sequential(
        # Layer 0
        nn.Conv2d(3, c, kernel_size=3, stride=1,
                  padding=1, bias=True),
        nn.BatchNorm2d(c),
        nn.ReLU(),

        # Layer 1
        nn.Conv2d(c, c*2, kernel_size=3,
                  stride=1, padding=1, bias=True),
        nn.BatchNorm2d(c*2),
        nn.ReLU(),
        nn.MaxPool2d(2),

        # Layer 2
        nn.Conv2d(c*2, c*4, kernel_size=3,
                  stride=1, padding=1, bias=True),
        nn.BatchNorm2d(c*4),
        nn.ReLU(),
        nn.MaxPool2d(2),

        # Layer 3
        nn.Conv2d(c*4, c*8, kernel_size=3,
                  stride=1, padding=1, bias=True),
        nn.BatchNorm2d(c*8),
        nn.ReLU(),
        nn.MaxPool2d(2),

        # Layer 4
        nn.MaxPool2d(4),
        Flatten(),
        nn.Linear(c*8, num_classes, bias=True),
        #PrintShape(),
        nn.LogSoftmax(dim=1)
    )

Train¶

In [6]:
print("Training on ", device)
model_cnn = make_myrtle5()
model_cnn.to(device)
optimizer = optim.SGD(model_cnn.parameters(), lr=0.01, momentum=0.5)
print('Number of parameters: {}'.format(get_n_params(model_cnn)))

for epoch in range(0, 20):
    train(epoch, model_cnn)
    test(model_cnn)
Training on  cuda:0
Number of parameters: 1558026
Train Epoch: 0 [0/50000 (0%)]	Loss: 3.106524
Train Epoch: 0 [6400/50000 (13%)]	Loss: 1.625264
Train Epoch: 0 [12800/50000 (26%)]	Loss: 1.381416
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.344878
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.379884
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.179641
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.059914
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.072445

Test set: Average loss: 1.5596, Accuracy: 4696/10000 (47%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.358958
Train Epoch: 1 [6400/50000 (13%)]	Loss: 0.811542
Train Epoch: 1 [12800/50000 (26%)]	Loss: 0.946492
Train Epoch: 1 [19200/50000 (38%)]	Loss: 0.962316
Train Epoch: 1 [25600/50000 (51%)]	Loss: 0.659359
Train Epoch: 1 [32000/50000 (64%)]	Loss: 0.761658
Train Epoch: 1 [38400/50000 (77%)]	Loss: 0.924984
Train Epoch: 1 [44800/50000 (90%)]	Loss: 0.730772

Test set: Average loss: 0.8927, Accuracy: 6875/10000 (69%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 0.660586
Train Epoch: 2 [6400/50000 (13%)]	Loss: 0.597657
Train Epoch: 2 [12800/50000 (26%)]	Loss: 0.552809
Train Epoch: 2 [19200/50000 (38%)]	Loss: 0.722587
Train Epoch: 2 [25600/50000 (51%)]	Loss: 0.696710
Train Epoch: 2 [32000/50000 (64%)]	Loss: 0.564168
Train Epoch: 2 [38400/50000 (77%)]	Loss: 0.329048
Train Epoch: 2 [44800/50000 (90%)]	Loss: 0.619982

Test set: Average loss: 0.9030, Accuracy: 6926/10000 (69%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 0.711989
Train Epoch: 3 [6400/50000 (13%)]	Loss: 0.611674
Train Epoch: 3 [12800/50000 (26%)]	Loss: 0.378978
Train Epoch: 3 [19200/50000 (38%)]	Loss: 0.543387
Train Epoch: 3 [25600/50000 (51%)]	Loss: 0.372165
Train Epoch: 3 [32000/50000 (64%)]	Loss: 0.493843
Train Epoch: 3 [38400/50000 (77%)]	Loss: 0.598401
Train Epoch: 3 [44800/50000 (90%)]	Loss: 0.619193

Test set: Average loss: 0.8889, Accuracy: 6970/10000 (70%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 0.388548
Train Epoch: 4 [6400/50000 (13%)]	Loss: 0.353815
Train Epoch: 4 [12800/50000 (26%)]	Loss: 0.505328
Train Epoch: 4 [19200/50000 (38%)]	Loss: 0.559288
Train Epoch: 4 [25600/50000 (51%)]	Loss: 0.532493
Train Epoch: 4 [32000/50000 (64%)]	Loss: 0.440807
Train Epoch: 4 [38400/50000 (77%)]	Loss: 0.291763
Train Epoch: 4 [44800/50000 (90%)]	Loss: 0.421708

Test set: Average loss: 0.6595, Accuracy: 7806/10000 (78%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 0.416892
Train Epoch: 5 [6400/50000 (13%)]	Loss: 0.336936
Train Epoch: 5 [12800/50000 (26%)]	Loss: 0.284662
Train Epoch: 5 [19200/50000 (38%)]	Loss: 0.330351
Train Epoch: 5 [25600/50000 (51%)]	Loss: 0.281405
Train Epoch: 5 [32000/50000 (64%)]	Loss: 0.369309
Train Epoch: 5 [38400/50000 (77%)]	Loss: 0.426486
Train Epoch: 5 [44800/50000 (90%)]	Loss: 0.534155

Test set: Average loss: 1.1481, Accuracy: 6656/10000 (67%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 0.257439
Train Epoch: 6 [6400/50000 (13%)]	Loss: 0.177836
Train Epoch: 6 [12800/50000 (26%)]	Loss: 0.194339
Train Epoch: 6 [19200/50000 (38%)]	Loss: 0.214020
Train Epoch: 6 [25600/50000 (51%)]	Loss: 0.229724
Train Epoch: 6 [32000/50000 (64%)]	Loss: 0.086430
Train Epoch: 6 [38400/50000 (77%)]	Loss: 0.193378
Train Epoch: 6 [44800/50000 (90%)]	Loss: 0.196569

Test set: Average loss: 0.7545, Accuracy: 7597/10000 (76%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 0.248811
Train Epoch: 7 [6400/50000 (13%)]	Loss: 0.105808
Train Epoch: 7 [12800/50000 (26%)]	Loss: 0.087254
Train Epoch: 7 [19200/50000 (38%)]	Loss: 0.088972
Train Epoch: 7 [25600/50000 (51%)]	Loss: 0.078368
Train Epoch: 7 [32000/50000 (64%)]	Loss: 0.095803
Train Epoch: 7 [38400/50000 (77%)]	Loss: 0.089239
Train Epoch: 7 [44800/50000 (90%)]	Loss: 0.047729

Test set: Average loss: 0.7794, Accuracy: 7620/10000 (76%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 0.228080
Train Epoch: 8 [6400/50000 (13%)]	Loss: 0.042177
Train Epoch: 8 [12800/50000 (26%)]	Loss: 0.048291
Train Epoch: 8 [19200/50000 (38%)]	Loss: 0.049205
Train Epoch: 8 [25600/50000 (51%)]	Loss: 0.040406
Train Epoch: 8 [32000/50000 (64%)]	Loss: 0.081263
Train Epoch: 8 [38400/50000 (77%)]	Loss: 0.109781
Train Epoch: 8 [44800/50000 (90%)]	Loss: 0.066910

Test set: Average loss: 0.5836, Accuracy: 8174/10000 (82%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 0.024628
Train Epoch: 9 [6400/50000 (13%)]	Loss: 0.030493
Train Epoch: 9 [12800/50000 (26%)]	Loss: 0.027586
Train Epoch: 9 [19200/50000 (38%)]	Loss: 0.024024
Train Epoch: 9 [25600/50000 (51%)]	Loss: 0.025618
Train Epoch: 9 [32000/50000 (64%)]	Loss: 0.047199
Train Epoch: 9 [38400/50000 (77%)]	Loss: 0.024290
Train Epoch: 9 [44800/50000 (90%)]	Loss: 0.024210

Test set: Average loss: 0.5391, Accuracy: 8344/10000 (83%)

Train Epoch: 10 [0/50000 (0%)]	Loss: 0.016167
Train Epoch: 10 [6400/50000 (13%)]	Loss: 0.016917
Train Epoch: 10 [12800/50000 (26%)]	Loss: 0.012637
Train Epoch: 10 [19200/50000 (38%)]	Loss: 0.012082
Train Epoch: 10 [25600/50000 (51%)]	Loss: 0.010251
Train Epoch: 10 [32000/50000 (64%)]	Loss: 0.007521
Train Epoch: 10 [38400/50000 (77%)]	Loss: 0.013054
Train Epoch: 10 [44800/50000 (90%)]	Loss: 0.017279

Test set: Average loss: 0.5222, Accuracy: 8381/10000 (84%)

Train Epoch: 11 [0/50000 (0%)]	Loss: 0.006792
Train Epoch: 11 [6400/50000 (13%)]	Loss: 0.008937
Train Epoch: 11 [12800/50000 (26%)]	Loss: 0.005801
Train Epoch: 11 [19200/50000 (38%)]	Loss: 0.008794
Train Epoch: 11 [25600/50000 (51%)]	Loss: 0.009368
Train Epoch: 11 [32000/50000 (64%)]	Loss: 0.008313
Train Epoch: 11 [38400/50000 (77%)]	Loss: 0.006226
Train Epoch: 11 [44800/50000 (90%)]	Loss: 0.004737

Test set: Average loss: 0.5144, Accuracy: 8429/10000 (84%)

Train Epoch: 12 [0/50000 (0%)]	Loss: 0.006254
Train Epoch: 12 [6400/50000 (13%)]	Loss: 0.005907
Train Epoch: 12 [12800/50000 (26%)]	Loss: 0.006535
Train Epoch: 12 [19200/50000 (38%)]	Loss: 0.011760
Train Epoch: 12 [25600/50000 (51%)]	Loss: 0.010084
Train Epoch: 12 [32000/50000 (64%)]	Loss: 0.011553
Train Epoch: 12 [38400/50000 (77%)]	Loss: 0.005591
Train Epoch: 12 [44800/50000 (90%)]	Loss: 0.005311

Test set: Average loss: 0.5172, Accuracy: 8420/10000 (84%)

Train Epoch: 13 [0/50000 (0%)]	Loss: 0.003888
Train Epoch: 13 [6400/50000 (13%)]	Loss: 0.006023
Train Epoch: 13 [12800/50000 (26%)]	Loss: 0.006655
Train Epoch: 13 [19200/50000 (38%)]	Loss: 0.005831
Train Epoch: 13 [25600/50000 (51%)]	Loss: 0.005143
Train Epoch: 13 [32000/50000 (64%)]	Loss: 0.007148
Train Epoch: 13 [38400/50000 (77%)]	Loss: 0.007719
Train Epoch: 13 [44800/50000 (90%)]	Loss: 0.012502

Test set: Average loss: 0.6239, Accuracy: 8229/10000 (82%)

Train Epoch: 14 [0/50000 (0%)]	Loss: 0.022868
Train Epoch: 14 [6400/50000 (13%)]	Loss: 0.004719
Train Epoch: 14 [12800/50000 (26%)]	Loss: 0.006129
Train Epoch: 14 [19200/50000 (38%)]	Loss: 0.007271
Train Epoch: 14 [25600/50000 (51%)]	Loss: 0.005520
Train Epoch: 14 [32000/50000 (64%)]	Loss: 0.004840
Train Epoch: 14 [38400/50000 (77%)]	Loss: 0.013462
Train Epoch: 14 [44800/50000 (90%)]	Loss: 0.004616

Test set: Average loss: 0.5223, Accuracy: 8428/10000 (84%)

Train Epoch: 15 [0/50000 (0%)]	Loss: 0.003577
Train Epoch: 15 [6400/50000 (13%)]	Loss: 0.005274
Train Epoch: 15 [12800/50000 (26%)]	Loss: 0.003778
Train Epoch: 15 [19200/50000 (38%)]	Loss: 0.004391
Train Epoch: 15 [25600/50000 (51%)]	Loss: 0.003953
Train Epoch: 15 [32000/50000 (64%)]	Loss: 0.005661
Train Epoch: 15 [38400/50000 (77%)]	Loss: 0.004470
Train Epoch: 15 [44800/50000 (90%)]	Loss: 0.006085

Test set: Average loss: 0.5277, Accuracy: 8432/10000 (84%)

Train Epoch: 16 [0/50000 (0%)]	Loss: 0.004538
Train Epoch: 16 [6400/50000 (13%)]	Loss: 0.003264
Train Epoch: 16 [12800/50000 (26%)]	Loss: 0.004078
Train Epoch: 16 [19200/50000 (38%)]	Loss: 0.005221
Train Epoch: 16 [25600/50000 (51%)]	Loss: 0.007165
Train Epoch: 16 [32000/50000 (64%)]	Loss: 0.002838
Train Epoch: 16 [38400/50000 (77%)]	Loss: 0.005930
Train Epoch: 16 [44800/50000 (90%)]	Loss: 0.002435

Test set: Average loss: 0.5394, Accuracy: 8424/10000 (84%)

Train Epoch: 17 [0/50000 (0%)]	Loss: 0.003780
Train Epoch: 17 [6400/50000 (13%)]	Loss: 0.004128
Train Epoch: 17 [12800/50000 (26%)]	Loss: 0.003847
Train Epoch: 17 [19200/50000 (38%)]	Loss: 0.004604
Train Epoch: 17 [25600/50000 (51%)]	Loss: 0.003644
Train Epoch: 17 [32000/50000 (64%)]	Loss: 0.002973
Train Epoch: 17 [38400/50000 (77%)]	Loss: 0.004471
Train Epoch: 17 [44800/50000 (90%)]	Loss: 0.003981

Test set: Average loss: 0.5396, Accuracy: 8405/10000 (84%)

Train Epoch: 18 [0/50000 (0%)]	Loss: 0.003025
Train Epoch: 18 [6400/50000 (13%)]	Loss: 0.003844
Train Epoch: 18 [12800/50000 (26%)]	Loss: 0.003228
Train Epoch: 18 [19200/50000 (38%)]	Loss: 0.002966
Train Epoch: 18 [25600/50000 (51%)]	Loss: 0.003388
Train Epoch: 18 [32000/50000 (64%)]	Loss: 0.003130
Train Epoch: 18 [38400/50000 (77%)]	Loss: 0.003530
Train Epoch: 18 [44800/50000 (90%)]	Loss: 0.003037

Test set: Average loss: 0.5370, Accuracy: 8438/10000 (84%)

Train Epoch: 19 [0/50000 (0%)]	Loss: 0.002627
Train Epoch: 19 [6400/50000 (13%)]	Loss: 0.004111
Train Epoch: 19 [12800/50000 (26%)]	Loss: 0.003743
Train Epoch: 19 [19200/50000 (38%)]	Loss: 0.003888
Train Epoch: 19 [25600/50000 (51%)]	Loss: 0.002524
Train Epoch: 19 [32000/50000 (64%)]	Loss: 0.004148
Train Epoch: 19 [38400/50000 (77%)]	Loss: 0.003245
Train Epoch: 19 [44800/50000 (90%)]	Loss: 0.002573

Test set: Average loss: 0.5504, Accuracy: 8408/10000 (84%)

Show some predictions of the test network¶

In [7]:
def visualize_pred(img, pred_prob, real_label):
    ''' Function for viewing an image and it's predicted classes.
    '''
    #pred_prob = pred_prob.data.numpy().squeeze()

    fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
    #ax1.imshow(img.numpy().squeeze())
    imshow(img, ax1)
    ax1.axis('off')
    pred_label = numpy.argmax(pred_prob)
    ax1.set_title([classNames[real_label], classNames[pred_label]])
    
    ax2.barh(numpy.arange(10), pred_prob)
    ax2.set_aspect(0.1)
    ax2.set_yticks(numpy.arange(10))
    ax2.set_yticklabels(classNames)
    ax2.set_title('Prediction Probability')
    ax2.set_xlim(0, 1.1)
    plt.tight_layout()
In [14]:
model_cnn.to('cpu') 

# make a batch of predictions
for i in range(10):
    # fetch a batch of test images and predict; note batch contains only 1 image
    image_batch, label_batch = next(iter(test_loader))
    with torch.no_grad():
        log_pred_prob_batch = model_cnn(image_batch)
    img = image_batch[0]
    real_label = label_batch[0].item()
    log_pred_prob = log_pred_prob_batch[0]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img, pred_prob, real_label)

Is the CNN really seeing like we do ?¶

Adversarial examples using Fast Gradient Sign Attack see: https://pytorch.org/tutorials/beginner/fgsm_tutorial.html#fast-gradient-sign-attack

In [9]:
epsilons = [0.01, 0.02, 0.04, 0.08, 0.16, .32]
# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
    # Collect the element-wise sign of the data gradient
    sign_data_grad = data_grad.sign()
    # Create the perturbed image by adjusting each pixel of the input image using the gradients
    perturbed_image = image + epsilon*sign_data_grad
    # Adding clipping to maintain [0,1] range
    perturbed_image = torch.clamp(perturbed_image, -1, 1)
    # Return the perturbed image
    return perturbed_image
In [10]:
def fgsm_test( model, device, test_loader, epsilon ):
    # Accuracy counter
    correct = 0
    adv_examples = []
    
    progress_count = 0;
    # Loop over all examples in test set, one by one (test_loader has batch_size=1)
    for data, target in test_loader:
        progress_count+= 1
        
        # Send the data and label to the device
        data, target = data.to(device), target.to(device)

        # Set requires_grad attribute of tensor. Important for Attack
        data.requires_grad = True

        # Forward pass the data through the model
        output = model(data)
        init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability

        # If the model is wrong, then this can't be an adversarial example, move on to the next example
        if init_pred.item() != target.item():
            continue

        # Calculate the loss
        loss = F.nll_loss(output, target)

        # Zero all existing gradients
        model.zero_grad()

        # Calculate gradients of model in backward pass
        loss.backward()

        # Collect gradients of the data
        data_grad = data.grad.data

        # Call FGSM Attack
        perturbed_data = fgsm_attack(data, epsilon, data_grad)

        # Apply the model to the perturbed image
        output = model(perturbed_data)
        final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
        
        # check if the perturbation forces a misclassification
        if final_pred.item() != target.item():
            # Save some adv examples for visualization later
            if len(adv_examples) < 10:
                adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
                real_im = data.squeeze().detach().cpu().numpy()
                adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex, real_im) )
        else:
            correct+= 1
        
        if progress_count % 1000 == 0:
            print('FGSM Attack Iteration: {}'.format(progress_count * len(data)))

    # Calculate final accuracy for this epsilon
    final_acc = correct/float(len(test_loader))
    print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))

    # Return the accuracy and an adversarial example
    return final_acc, adv_examples
In [11]:
accuracies = []
adv_examples = []

# move to model to gpu
model_cnn.to(device)

# Run test for each epsilon
for eps in epsilons:
    acc, ex = fgsm_test(model_cnn, device, test_loader, eps)
    accuracies.append(acc)
    adv_examples.append(ex)
FGSM Attack Iteration: 1000
FGSM Attack Iteration: 2000
FGSM Attack Iteration: 3000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 5000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 8000
FGSM Attack Iteration: 9000
FGSM Attack Iteration: 10000
Epsilon: 0.01	Test Accuracy = 3308 / 10000 = 0.3308
FGSM Attack Iteration: 1000
FGSM Attack Iteration: 3000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 9000
FGSM Attack Iteration: 10000
Epsilon: 0.02	Test Accuracy = 1073 / 10000 = 0.1073
FGSM Attack Iteration: 2000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 5000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 8000
FGSM Attack Iteration: 9000
FGSM Attack Iteration: 10000
Epsilon: 0.04	Test Accuracy = 314 / 10000 = 0.0314
FGSM Attack Iteration: 1000
FGSM Attack Iteration: 2000
FGSM Attack Iteration: 3000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 5000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 8000
FGSM Attack Iteration: 9000
FGSM Attack Iteration: 10000
Epsilon: 0.08	Test Accuracy = 138 / 10000 = 0.0138
FGSM Attack Iteration: 2000
FGSM Attack Iteration: 3000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 8000
FGSM Attack Iteration: 9000
Epsilon: 0.16	Test Accuracy = 259 / 10000 = 0.0259
FGSM Attack Iteration: 1000
FGSM Attack Iteration: 2000
FGSM Attack Iteration: 3000
FGSM Attack Iteration: 4000
FGSM Attack Iteration: 5000
FGSM Attack Iteration: 6000
FGSM Attack Iteration: 7000
FGSM Attack Iteration: 8000
FGSM Attack Iteration: 9000
FGSM Attack Iteration: 10000
Epsilon: 0.32	Test Accuracy = 756 / 10000 = 0.0756
In [12]:
plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(numpy.arange(0, 1.1, step=0.1))
plt.xticks(numpy.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
In [27]:
# Plot several examples of adversarial samples at each epsilon
def imshow2(img, plot):
    img = img / 2 + 0.5 # unnormalize
    #npimg = img.numpy()   # convert from tensor
    plot.imshow(numpy.transpose(img, (1, 2, 0)))

plt.figure(figsize=(8,20))
for i in range(len(epsilons)):
    eps = epsilons[i]
    for j in range(len(adv_examples[i])):      
        orig,adv,ex,img = adv_examples[i][j]
        noise = ex - img
        
        fig, (ax1, ax2, ax3, ax4) = plt.subplots(figsize=(6,9), ncols=4)
        
        imshow2(img, ax1)
        ax1.axis('off')
        ax1.set_title(classNames[orig])

        imshow2(ex, ax2)
        ax2.axis('off')
        ax2.set_title(classNames[adv])
        
        imshow2(noise+1, ax3)
        ax3.axis('off')
        ax3.set_title("noise * {}".format(eps))
        
        imshow2(noise/eps, ax4)
        ax4.axis('off')
        ax4.set_title("noise")
        
        plt.tight_layout()
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:14: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  
<Figure size 576x1440 with 0 Axes>