AML: 03 Deep Neural Network for CIFAR-10¶

Based on https://github.com/Atcold/pytorch-Deep-Learning

Data and Libraries¶

In [1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms, utils
import matplotlib.pyplot as plt
import numpy, random

# set the PseudoRandom Generator Seeds for better reproducibility
# see here for more: https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(99)
random.seed(99)
numpy.random.seed(99)

# this 'device' will be used for training our model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
cuda:0

Load the CIFAR10 dataset¶

Observe that we set shuffle=True, which means that data is randomized

In [2]:
input_size  = 32*32*3   # images are 32x32 pixels with 3 channels
output_size = 10      # there are 10 classes

train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ])),
    batch_size=64, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ])),
    batch_size=1000, shuffle=True)

classNames= ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
Extracting ../data/cifar-10-python.tar.gz to ../data
In [3]:
# show some training images
def imshow(img, plot):
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()   # convert from tensor
    plot.imshow(numpy.transpose(npimg, (1, 2, 0))) 
    

plt.figure(figsize=(8,3), dpi=200)

# fetch a batch of train images; RANDOM
image_batch, label_batch = next(iter(train_loader))
#imshow(torchvision.utils.make_grid(image_batch))
for i in range(20):
    image = image_batch[i]
    label = classNames[label_batch[i].item()]
    plt.subplot(2, 10, i + 1)
    #image, label = train_loader.dataset.__getitem__(i)
    #plt.imshow(image.squeeze().numpy())
    imshow(image, plt)
    plt.axis('off')
    plt.title(label)
plt.show()

A 2-hidden layer Fully Connected Neural Network¶

Helper functions for training and testing¶

In [4]:
# function to count number of parameters
def get_n_params(model):
    np=0
    for p in list(model.parameters()):
        np += p.nelement()
    return np

accuracy_list = []
# we pass a model object to this trainer, and it trains this model for one epoch
def train(epoch, model):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        # send to device
        data, target = data.to(device), target.to(device)
        
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
            
def test(model):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        # send to device
        data, target = data.to(device), target.to(device)
        
        output = model(data)
        test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability                                                                 
        correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    accuracy_list.append(accuracy)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        accuracy))

Defining the Fully Connected Network¶

In [15]:
class FC2Layer(nn.Module):
    def __init__(self, input_size, output_size):
        super(FC2Layer, self).__init__()
        self.input_size = input_size
        self.network = nn.Sequential(
            nn.Linear(input_size, 200), 
            nn.ReLU(), 
            nn.Linear(200,100),
            nn.ReLU(),
            nn.Linear(100,60), 
            nn.ReLU(), 
            nn.Linear(60, output_size), 
            nn.LogSoftmax(dim=1)
        )

    def forward(self, x):
        x = x.view(-1, self.input_size)
        return self.network(x)

Train the Network¶

In [16]:
print("Training on ", device)
model_fnn = FC2Layer(input_size, output_size)
model_fnn.to(device)
optimizer = optim.SGD(model_fnn.parameters(), lr=0.1)
print('Number of parameters: {}'.format(get_n_params(model_fnn)))

for epoch in range(0, 10):
    train(epoch, model_fnn)
    test(model_fnn)
Training on  cuda:0
Number of parameters: 641370
Train Epoch: 0 [0/50000 (0%)]	Loss: 2.304162
Train Epoch: 0 [6400/50000 (13%)]	Loss: 1.995115
Train Epoch: 0 [12800/50000 (26%)]	Loss: 1.795919
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.738522
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.689042
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.591286
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.627036
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.694444

Test set: Average loss: 1.6544, Accuracy: 4071/10000 (41%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.653215
Train Epoch: 1 [6400/50000 (13%)]	Loss: 1.547389
Train Epoch: 1 [12800/50000 (26%)]	Loss: 1.377378
Train Epoch: 1 [19200/50000 (38%)]	Loss: 1.658055
Train Epoch: 1 [25600/50000 (51%)]	Loss: 1.501340
Train Epoch: 1 [32000/50000 (64%)]	Loss: 1.585052
Train Epoch: 1 [38400/50000 (77%)]	Loss: 1.281278
Train Epoch: 1 [44800/50000 (90%)]	Loss: 1.367464

Test set: Average loss: 1.5052, Accuracy: 4725/10000 (47%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 1.479052
Train Epoch: 2 [6400/50000 (13%)]	Loss: 1.311661
Train Epoch: 2 [12800/50000 (26%)]	Loss: 1.571302
Train Epoch: 2 [19200/50000 (38%)]	Loss: 1.413308
Train Epoch: 2 [25600/50000 (51%)]	Loss: 1.378344
Train Epoch: 2 [32000/50000 (64%)]	Loss: 1.438764
Train Epoch: 2 [38400/50000 (77%)]	Loss: 1.305411
Train Epoch: 2 [44800/50000 (90%)]	Loss: 1.670251

Test set: Average loss: 1.4518, Accuracy: 4749/10000 (47%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 1.242830
Train Epoch: 3 [6400/50000 (13%)]	Loss: 1.175503
Train Epoch: 3 [12800/50000 (26%)]	Loss: 1.039811
Train Epoch: 3 [19200/50000 (38%)]	Loss: 1.013818
Train Epoch: 3 [25600/50000 (51%)]	Loss: 1.482157
Train Epoch: 3 [32000/50000 (64%)]	Loss: 1.043769
Train Epoch: 3 [38400/50000 (77%)]	Loss: 1.156700
Train Epoch: 3 [44800/50000 (90%)]	Loss: 1.411407

Test set: Average loss: 1.4242, Accuracy: 5020/10000 (50%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 1.445109
Train Epoch: 4 [6400/50000 (13%)]	Loss: 1.229388
Train Epoch: 4 [12800/50000 (26%)]	Loss: 1.415585
Train Epoch: 4 [19200/50000 (38%)]	Loss: 1.240678
Train Epoch: 4 [25600/50000 (51%)]	Loss: 1.481849
Train Epoch: 4 [32000/50000 (64%)]	Loss: 1.447140
Train Epoch: 4 [38400/50000 (77%)]	Loss: 1.498198
Train Epoch: 4 [44800/50000 (90%)]	Loss: 1.141731

Test set: Average loss: 1.4544, Accuracy: 5014/10000 (50%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 1.140694
Train Epoch: 5 [6400/50000 (13%)]	Loss: 1.225999
Train Epoch: 5 [12800/50000 (26%)]	Loss: 1.107019
Train Epoch: 5 [19200/50000 (38%)]	Loss: 1.088318
Train Epoch: 5 [25600/50000 (51%)]	Loss: 1.116142
Train Epoch: 5 [32000/50000 (64%)]	Loss: 1.258776
Train Epoch: 5 [38400/50000 (77%)]	Loss: 1.187100
Train Epoch: 5 [44800/50000 (90%)]	Loss: 0.931305

Test set: Average loss: 1.3825, Accuracy: 5107/10000 (51%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 1.202020
Train Epoch: 6 [6400/50000 (13%)]	Loss: 1.319472
Train Epoch: 6 [12800/50000 (26%)]	Loss: 1.020665
Train Epoch: 6 [19200/50000 (38%)]	Loss: 1.242994
Train Epoch: 6 [25600/50000 (51%)]	Loss: 0.889903
Train Epoch: 6 [32000/50000 (64%)]	Loss: 1.173812
Train Epoch: 6 [38400/50000 (77%)]	Loss: 0.916369
Train Epoch: 6 [44800/50000 (90%)]	Loss: 1.247842

Test set: Average loss: 1.4739, Accuracy: 4950/10000 (50%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 1.301803
Train Epoch: 7 [6400/50000 (13%)]	Loss: 1.010605
Train Epoch: 7 [12800/50000 (26%)]	Loss: 1.077342
Train Epoch: 7 [19200/50000 (38%)]	Loss: 0.956575
Train Epoch: 7 [25600/50000 (51%)]	Loss: 0.912950
Train Epoch: 7 [32000/50000 (64%)]	Loss: 1.442288
Train Epoch: 7 [38400/50000 (77%)]	Loss: 1.067153
Train Epoch: 7 [44800/50000 (90%)]	Loss: 1.060209

Test set: Average loss: 1.4573, Accuracy: 5031/10000 (50%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 0.983081
Train Epoch: 8 [6400/50000 (13%)]	Loss: 1.037802
Train Epoch: 8 [12800/50000 (26%)]	Loss: 1.042636
Train Epoch: 8 [19200/50000 (38%)]	Loss: 0.926930
Train Epoch: 8 [25600/50000 (51%)]	Loss: 0.999057
Train Epoch: 8 [32000/50000 (64%)]	Loss: 0.937924
Train Epoch: 8 [38400/50000 (77%)]	Loss: 0.936452
Train Epoch: 8 [44800/50000 (90%)]	Loss: 0.840367

Test set: Average loss: 1.4752, Accuracy: 5045/10000 (50%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 1.012519
Train Epoch: 9 [6400/50000 (13%)]	Loss: 0.730817
Train Epoch: 9 [12800/50000 (26%)]	Loss: 0.935427
Train Epoch: 9 [19200/50000 (38%)]	Loss: 1.156811
Train Epoch: 9 [25600/50000 (51%)]	Loss: 0.845543
Train Epoch: 9 [32000/50000 (64%)]	Loss: 1.072321
Train Epoch: 9 [38400/50000 (77%)]	Loss: 1.193695
Train Epoch: 9 [44800/50000 (90%)]	Loss: 0.924021

Test set: Average loss: 1.6443, Accuracy: 4809/10000 (48%)

Show some predictions of the test network¶

In [5]:
def visualize_pred(img, pred_prob, real_label):
    ''' Function for viewing an image and it's predicted classes.
    '''
    #pred_prob = pred_prob.data.numpy().squeeze()

    fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
    #ax1.imshow(img.numpy().squeeze())
    imshow(img, ax1)
    ax1.axis('off')
    pred_label = numpy.argmax(pred_prob)
    ax1.set_title([classNames[real_label], classNames[pred_label]])
    
    ax2.barh(numpy.arange(10), pred_prob)
    ax2.set_aspect(0.1)
    ax2.set_yticks(numpy.arange(10))
    ax2.set_yticklabels(classNames)
    ax2.set_title('Prediction Probability')
    ax2.set_xlim(0, 1.1)
    plt.tight_layout()
In [18]:
model_fnn.to('cpu') 

# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))

# Turn off gradients to speed up this part
with torch.no_grad():
    log_pred_prob_batch = model_fnn(image_batch)
for i in range(10):
    img = image_batch[i]
    real_label = label_batch[i].item()
    log_pred_prob = log_pred_prob_batch[i]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img, pred_prob, real_label)

FCN for CIFAR10 with Dropout¶

In [6]:
class FC2LayerDropout(nn.Module):
    def __init__(self, input_size, output_size):
        super(FC2LayerDropout, self).__init__()
        self.input_size = input_size
        self.network = nn.Sequential(
            nn.Linear(input_size, 200),
            nn.Dropout(0.25),
            nn.ReLU(), 
            nn.Linear(200, 100),
            nn.Dropout(0.25),
            nn.ReLU(), 
            nn.Linear(100,60),
            nn.Dropout(0.25),
            nn.ReLU(),
            nn.Linear(60, output_size), 
            nn.LogSoftmax(dim=1)
        )

    def forward(self, x):
        x = x.view(-1, self.input_size)
        return self.network(x)
In [22]:
print("With Dropout Training on ", device)
model = FC2LayerDropout(input_size, output_size)
model.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.1)
print('Number of parameters: {}'.format(get_n_params(model)))

for epoch in range(0, 10):
    model.train() # model in training mode. Turns on dropout, batch-norm etc during training
    train(epoch, model)
    model.eval() # model in evaluation mode. Turn off dropout, batch-norm etc during validation/testing
    test(model)
With Dropout Training on  cuda:0
Number of parameters: 641370
Train Epoch: 0 [0/50000 (0%)]	Loss: 2.326961
Train Epoch: 0 [6400/50000 (13%)]	Loss: 2.193686
Train Epoch: 0 [12800/50000 (26%)]	Loss: 2.060671
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.708519
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.863665
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.619628
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.721974
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.783918

Test set: Average loss: 1.6828, Accuracy: 3981/10000 (40%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.809963
Train Epoch: 1 [6400/50000 (13%)]	Loss: 1.687869
Train Epoch: 1 [12800/50000 (26%)]	Loss: 1.879291
Train Epoch: 1 [19200/50000 (38%)]	Loss: 1.684056
Train Epoch: 1 [25600/50000 (51%)]	Loss: 1.609680
Train Epoch: 1 [32000/50000 (64%)]	Loss: 1.784850
Train Epoch: 1 [38400/50000 (77%)]	Loss: 1.631134
Train Epoch: 1 [44800/50000 (90%)]	Loss: 1.689877

Test set: Average loss: 1.6224, Accuracy: 4223/10000 (42%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 1.743572
Train Epoch: 2 [6400/50000 (13%)]	Loss: 1.571040
Train Epoch: 2 [12800/50000 (26%)]	Loss: 1.605759
Train Epoch: 2 [19200/50000 (38%)]	Loss: 1.410265
Train Epoch: 2 [25600/50000 (51%)]	Loss: 1.720144
Train Epoch: 2 [32000/50000 (64%)]	Loss: 1.475236
Train Epoch: 2 [38400/50000 (77%)]	Loss: 1.726437
Train Epoch: 2 [44800/50000 (90%)]	Loss: 1.706026

Test set: Average loss: 1.4774, Accuracy: 4680/10000 (47%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 1.387784
Train Epoch: 3 [6400/50000 (13%)]	Loss: 1.339004
Train Epoch: 3 [12800/50000 (26%)]	Loss: 1.563184
Train Epoch: 3 [19200/50000 (38%)]	Loss: 1.359827
Train Epoch: 3 [25600/50000 (51%)]	Loss: 1.688420
Train Epoch: 3 [32000/50000 (64%)]	Loss: 1.458492
Train Epoch: 3 [38400/50000 (77%)]	Loss: 1.703303
Train Epoch: 3 [44800/50000 (90%)]	Loss: 1.403051

Test set: Average loss: 1.4374, Accuracy: 4914/10000 (49%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 1.483899
Train Epoch: 4 [6400/50000 (13%)]	Loss: 1.663017
Train Epoch: 4 [12800/50000 (26%)]	Loss: 1.711492
Train Epoch: 4 [19200/50000 (38%)]	Loss: 1.462738
Train Epoch: 4 [25600/50000 (51%)]	Loss: 1.497886
Train Epoch: 4 [32000/50000 (64%)]	Loss: 1.795812
Train Epoch: 4 [38400/50000 (77%)]	Loss: 1.554744
Train Epoch: 4 [44800/50000 (90%)]	Loss: 1.486042

Test set: Average loss: 1.4285, Accuracy: 4942/10000 (49%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 1.380921
Train Epoch: 5 [6400/50000 (13%)]	Loss: 1.426521
Train Epoch: 5 [12800/50000 (26%)]	Loss: 1.585329
Train Epoch: 5 [19200/50000 (38%)]	Loss: 1.478709
Train Epoch: 5 [25600/50000 (51%)]	Loss: 1.311152
Train Epoch: 5 [32000/50000 (64%)]	Loss: 1.378225
Train Epoch: 5 [38400/50000 (77%)]	Loss: 1.303110
Train Epoch: 5 [44800/50000 (90%)]	Loss: 1.551261

Test set: Average loss: 1.4207, Accuracy: 4930/10000 (49%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 1.543542
Train Epoch: 6 [6400/50000 (13%)]	Loss: 1.289722
Train Epoch: 6 [12800/50000 (26%)]	Loss: 1.363730
Train Epoch: 6 [19200/50000 (38%)]	Loss: 1.589529
Train Epoch: 6 [25600/50000 (51%)]	Loss: 1.520684
Train Epoch: 6 [32000/50000 (64%)]	Loss: 1.621660
Train Epoch: 6 [38400/50000 (77%)]	Loss: 1.471547
Train Epoch: 6 [44800/50000 (90%)]	Loss: 1.436551

Test set: Average loss: 1.3892, Accuracy: 5041/10000 (50%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 1.127630
Train Epoch: 7 [6400/50000 (13%)]	Loss: 1.713830
Train Epoch: 7 [12800/50000 (26%)]	Loss: 1.427624
Train Epoch: 7 [19200/50000 (38%)]	Loss: 1.495048
Train Epoch: 7 [25600/50000 (51%)]	Loss: 1.213217
Train Epoch: 7 [32000/50000 (64%)]	Loss: 1.374471
Train Epoch: 7 [38400/50000 (77%)]	Loss: 1.415822
Train Epoch: 7 [44800/50000 (90%)]	Loss: 1.601325

Test set: Average loss: 1.3574, Accuracy: 5178/10000 (52%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 1.683112
Train Epoch: 8 [6400/50000 (13%)]	Loss: 1.582352
Train Epoch: 8 [12800/50000 (26%)]	Loss: 1.421546
Train Epoch: 8 [19200/50000 (38%)]	Loss: 1.518243
Train Epoch: 8 [25600/50000 (51%)]	Loss: 1.601671
Train Epoch: 8 [32000/50000 (64%)]	Loss: 1.373556
Train Epoch: 8 [38400/50000 (77%)]	Loss: 1.678169
Train Epoch: 8 [44800/50000 (90%)]	Loss: 1.408652

Test set: Average loss: 1.3423, Accuracy: 5260/10000 (53%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 1.298152
Train Epoch: 9 [6400/50000 (13%)]	Loss: 1.589452
Train Epoch: 9 [12800/50000 (26%)]	Loss: 1.268388
Train Epoch: 9 [19200/50000 (38%)]	Loss: 1.207027
Train Epoch: 9 [25600/50000 (51%)]	Loss: 1.561248
Train Epoch: 9 [32000/50000 (64%)]	Loss: 1.288476
Train Epoch: 9 [38400/50000 (77%)]	Loss: 1.324280
Train Epoch: 9 [44800/50000 (90%)]	Loss: 1.316210

Test set: Average loss: 1.3518, Accuracy: 5216/10000 (52%)

In [23]:
model.to('cpu') 

# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))

# Turn off gradients to speed up this part
with torch.no_grad():
    log_pred_prob_batch = model(image_batch)
for i in range(10):
    img = image_batch[i]
    real_label = label_batch[i].item()
    log_pred_prob = log_pred_prob_batch[i]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img, pred_prob, real_label)

Does the Fully Connected Network use "Visual Information" ?¶

In [24]:
fixed_perm = torch.randperm(3072) # Fix a permutation of the image pixels; We apply the same permutation to all images

# show some training images
plt.figure(figsize=(8, 8))

# fetch a batch of train images; RANDOM
image_batch, label_batch = next(iter(train_loader))

for i in range(6):
    image = image_batch[i]
    image_perm = image.view(-1, 32*32*3).clone()
    image_perm = image_perm[:, fixed_perm]
    image_perm = image_perm.view(3, 32, 32)
    
    label = label_batch[i].item()
    plt.subplot(3,4 , 2*i + 1)
    #image, label = train_loader.dataset.__getitem__(i)
    #plt.imshow(image.squeeze().numpy())
    imshow(image, plt)
    plt.axis('off')
    plt.title(classNames[label])
    plt.subplot(3, 4, 2*i+2)
    #plt.imshow(image_perm.squeeze().numpy())
    imshow(image_perm, plt)
    plt.axis('off')
    plt.title(classNames[label])
In [25]:
accuracy_list = []

def scramble_train(epoch, model, perm=torch.arange(0, 3072).long()):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        # send to device
        data, target = data.to(device), target.to(device)
        
        # permute pixels
        data = data.view(-1, 32*32*3)
        data = data[:, perm]
        data = data.view(-1, 3, 32, 32)

        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
            
def scramble_test(model, perm=torch.arange(0, 3072).long()):
    model.eval()
    test_loss = 0
    correct = 0
    for data, target in test_loader:
        # send to device
        data, target = data.to(device), target.to(device)
        
        # permute pixels
        data = data.view(-1, 32*32*3)
        data = data[:, perm]
        data = data.view(-1, 3, 32, 32)
        
        output = model(data)
        test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss                                                               
        pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability                                                                 
        correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100. * correct / len(test_loader.dataset)
    accuracy_list.append(accuracy)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        accuracy))
In [26]:
print("Training on ", device)
model_fnn_2 = FC2Layer(input_size, output_size)
model_fnn_2.to(device)
optimizer = optim.SGD(model_fnn_2.parameters(), lr=0.1)
print('Number of parameters: {}'.format(get_n_params(model_fnn_2)))

for epoch in range(0, 10):
    scramble_train(epoch, model_fnn_2, fixed_perm)
    scramble_test(model_fnn_2, fixed_perm)
Training on  cuda:0
Number of parameters: 641370
Train Epoch: 0 [0/50000 (0%)]	Loss: 2.314555
Train Epoch: 0 [6400/50000 (13%)]	Loss: 1.945306
Train Epoch: 0 [12800/50000 (26%)]	Loss: 2.047602
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.693697
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.610765
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.851736
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.579800
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.540179

Test set: Average loss: 1.7372, Accuracy: 3810/10000 (38%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.707948
Train Epoch: 1 [6400/50000 (13%)]	Loss: 1.578136
Train Epoch: 1 [12800/50000 (26%)]	Loss: 1.466384
Train Epoch: 1 [19200/50000 (38%)]	Loss: 1.666063
Train Epoch: 1 [25600/50000 (51%)]	Loss: 1.532931
Train Epoch: 1 [32000/50000 (64%)]	Loss: 1.475235
Train Epoch: 1 [38400/50000 (77%)]	Loss: 1.601979
Train Epoch: 1 [44800/50000 (90%)]	Loss: 1.489185

Test set: Average loss: 1.6283, Accuracy: 4320/10000 (43%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 1.438966
Train Epoch: 2 [6400/50000 (13%)]	Loss: 1.291575
Train Epoch: 2 [12800/50000 (26%)]	Loss: 1.522585
Train Epoch: 2 [19200/50000 (38%)]	Loss: 1.403267
Train Epoch: 2 [25600/50000 (51%)]	Loss: 1.448815
Train Epoch: 2 [32000/50000 (64%)]	Loss: 1.367090
Train Epoch: 2 [38400/50000 (77%)]	Loss: 1.258148
Train Epoch: 2 [44800/50000 (90%)]	Loss: 1.220952

Test set: Average loss: 1.5266, Accuracy: 4658/10000 (47%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 1.262444
Train Epoch: 3 [6400/50000 (13%)]	Loss: 1.163555
Train Epoch: 3 [12800/50000 (26%)]	Loss: 1.127904
Train Epoch: 3 [19200/50000 (38%)]	Loss: 1.377617
Train Epoch: 3 [25600/50000 (51%)]	Loss: 1.362298
Train Epoch: 3 [32000/50000 (64%)]	Loss: 1.180639
Train Epoch: 3 [38400/50000 (77%)]	Loss: 1.566242
Train Epoch: 3 [44800/50000 (90%)]	Loss: 1.279482

Test set: Average loss: 1.6021, Accuracy: 4609/10000 (46%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 1.352084
Train Epoch: 4 [6400/50000 (13%)]	Loss: 1.075933
Train Epoch: 4 [12800/50000 (26%)]	Loss: 1.212736
Train Epoch: 4 [19200/50000 (38%)]	Loss: 1.110207
Train Epoch: 4 [25600/50000 (51%)]	Loss: 1.182105
Train Epoch: 4 [32000/50000 (64%)]	Loss: 1.412850
Train Epoch: 4 [38400/50000 (77%)]	Loss: 1.125314
Train Epoch: 4 [44800/50000 (90%)]	Loss: 1.292240

Test set: Average loss: 1.4353, Accuracy: 5018/10000 (50%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 1.280638
Train Epoch: 5 [6400/50000 (13%)]	Loss: 1.000484
Train Epoch: 5 [12800/50000 (26%)]	Loss: 1.210972
Train Epoch: 5 [19200/50000 (38%)]	Loss: 1.079221
Train Epoch: 5 [25600/50000 (51%)]	Loss: 1.079155
Train Epoch: 5 [32000/50000 (64%)]	Loss: 1.186914
Train Epoch: 5 [38400/50000 (77%)]	Loss: 1.243894
Train Epoch: 5 [44800/50000 (90%)]	Loss: 1.227057

Test set: Average loss: 1.3979, Accuracy: 5186/10000 (52%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 1.065750
Train Epoch: 6 [6400/50000 (13%)]	Loss: 1.150188
Train Epoch: 6 [12800/50000 (26%)]	Loss: 0.994556
Train Epoch: 6 [19200/50000 (38%)]	Loss: 1.257917
Train Epoch: 6 [25600/50000 (51%)]	Loss: 1.055667
Train Epoch: 6 [32000/50000 (64%)]	Loss: 0.918310
Train Epoch: 6 [38400/50000 (77%)]	Loss: 1.042536
Train Epoch: 6 [44800/50000 (90%)]	Loss: 1.299432

Test set: Average loss: 1.4244, Accuracy: 5119/10000 (51%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 1.297706
Train Epoch: 7 [6400/50000 (13%)]	Loss: 0.922033
Train Epoch: 7 [12800/50000 (26%)]	Loss: 1.239818
Train Epoch: 7 [19200/50000 (38%)]	Loss: 1.402998
Train Epoch: 7 [25600/50000 (51%)]	Loss: 1.290644
Train Epoch: 7 [32000/50000 (64%)]	Loss: 1.247686
Train Epoch: 7 [38400/50000 (77%)]	Loss: 1.289694
Train Epoch: 7 [44800/50000 (90%)]	Loss: 1.165282

Test set: Average loss: 1.4324, Accuracy: 5094/10000 (51%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 0.929140
Train Epoch: 8 [6400/50000 (13%)]	Loss: 0.906331
Train Epoch: 8 [12800/50000 (26%)]	Loss: 1.174455
Train Epoch: 8 [19200/50000 (38%)]	Loss: 1.072977
Train Epoch: 8 [25600/50000 (51%)]	Loss: 1.177196
Train Epoch: 8 [32000/50000 (64%)]	Loss: 1.004120
Train Epoch: 8 [38400/50000 (77%)]	Loss: 1.103997
Train Epoch: 8 [44800/50000 (90%)]	Loss: 1.088350

Test set: Average loss: 1.5456, Accuracy: 4928/10000 (49%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 1.140876
Train Epoch: 9 [6400/50000 (13%)]	Loss: 0.988899
Train Epoch: 9 [12800/50000 (26%)]	Loss: 0.660672
Train Epoch: 9 [19200/50000 (38%)]	Loss: 1.041876
Train Epoch: 9 [25600/50000 (51%)]	Loss: 0.917581
Train Epoch: 9 [32000/50000 (64%)]	Loss: 0.934038
Train Epoch: 9 [38400/50000 (77%)]	Loss: 1.084958
Train Epoch: 9 [44800/50000 (90%)]	Loss: 1.147675

Test set: Average loss: 1.5613, Accuracy: 4900/10000 (49%)

In [27]:
model_fnn_2.to('cpu') 

# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))
image_batch_scramble = image_batch.view(-1, 32*32*3)
image_batch_scramble = image_batch_scramble[:, fixed_perm]
image_batch_scramble = image_batch_scramble.view(-1, 3, 32, 32)
# Turn off gradients to speed up this part
with torch.no_grad():
    log_pred_prob_batch = model_fnn_2(image_batch_scramble)
for i in range(10):
    img = image_batch[i]
    img_perm = image_batch_scramble[i]
    real_label = label_batch[i].item()
    log_pred_prob = log_pred_prob_batch[i]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img_perm, pred_prob, real_label)

Better optimizer parameters¶

Smaller learning rate + momentum

In [31]:
print("Training on ", device)
model_fnn = FC2Layer(input_size, output_size)
model_fnn.to(device)
#optimizer = optim.SGD(model_fnn.parameters(), lr=0.01, momentum=0.5)
optimizer = optim.Adam(model_fnn.parameters())
print('Number of parameters: {}'.format(get_n_params(model_fnn)))

for epoch in range(0, 10):
    train(epoch, model_fnn)
    test(model_fnn)
Training on  cuda:0
Number of parameters: 641370
Train Epoch: 0 [0/50000 (0%)]	Loss: 2.297055
Train Epoch: 0 [6400/50000 (13%)]	Loss: 1.914771
Train Epoch: 0 [12800/50000 (26%)]	Loss: 1.759817
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.644658
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.997101
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.513815
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.563055
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.666883

Test set: Average loss: 1.5225, Accuracy: 4605/10000 (46%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.628603
Train Epoch: 1 [6400/50000 (13%)]	Loss: 1.456675
Train Epoch: 1 [12800/50000 (26%)]	Loss: 1.448235
Train Epoch: 1 [19200/50000 (38%)]	Loss: 1.396404
Train Epoch: 1 [25600/50000 (51%)]	Loss: 1.395977
Train Epoch: 1 [32000/50000 (64%)]	Loss: 1.408025
Train Epoch: 1 [38400/50000 (77%)]	Loss: 1.119812
Train Epoch: 1 [44800/50000 (90%)]	Loss: 1.216307

Test set: Average loss: 1.4368, Accuracy: 4905/10000 (49%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 1.158974
Train Epoch: 2 [6400/50000 (13%)]	Loss: 1.352131
Train Epoch: 2 [12800/50000 (26%)]	Loss: 1.191554
Train Epoch: 2 [19200/50000 (38%)]	Loss: 1.487108
Train Epoch: 2 [25600/50000 (51%)]	Loss: 1.291840
Train Epoch: 2 [32000/50000 (64%)]	Loss: 1.549883
Train Epoch: 2 [38400/50000 (77%)]	Loss: 1.374493
Train Epoch: 2 [44800/50000 (90%)]	Loss: 1.336537

Test set: Average loss: 1.3790, Accuracy: 5128/10000 (51%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 1.131928
Train Epoch: 3 [6400/50000 (13%)]	Loss: 1.261525
Train Epoch: 3 [12800/50000 (26%)]	Loss: 1.268056
Train Epoch: 3 [19200/50000 (38%)]	Loss: 1.465625
Train Epoch: 3 [25600/50000 (51%)]	Loss: 1.203398
Train Epoch: 3 [32000/50000 (64%)]	Loss: 1.271948
Train Epoch: 3 [38400/50000 (77%)]	Loss: 1.556391
Train Epoch: 3 [44800/50000 (90%)]	Loss: 1.283208

Test set: Average loss: 1.3653, Accuracy: 5230/10000 (52%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 1.321396
Train Epoch: 4 [6400/50000 (13%)]	Loss: 1.110044
Train Epoch: 4 [12800/50000 (26%)]	Loss: 1.127870
Train Epoch: 4 [19200/50000 (38%)]	Loss: 1.297065
Train Epoch: 4 [25600/50000 (51%)]	Loss: 1.361948
Train Epoch: 4 [32000/50000 (64%)]	Loss: 1.299795
Train Epoch: 4 [38400/50000 (77%)]	Loss: 0.971293
Train Epoch: 4 [44800/50000 (90%)]	Loss: 1.255963

Test set: Average loss: 1.3366, Accuracy: 5293/10000 (53%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 1.301015
Train Epoch: 5 [6400/50000 (13%)]	Loss: 1.110148
Train Epoch: 5 [12800/50000 (26%)]	Loss: 0.885359
Train Epoch: 5 [19200/50000 (38%)]	Loss: 1.066070
Train Epoch: 5 [25600/50000 (51%)]	Loss: 0.906468
Train Epoch: 5 [32000/50000 (64%)]	Loss: 1.096440
Train Epoch: 5 [38400/50000 (77%)]	Loss: 1.218597
Train Epoch: 5 [44800/50000 (90%)]	Loss: 1.114765

Test set: Average loss: 1.3598, Accuracy: 5323/10000 (53%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 0.978328
Train Epoch: 6 [6400/50000 (13%)]	Loss: 0.977796
Train Epoch: 6 [12800/50000 (26%)]	Loss: 0.947598
Train Epoch: 6 [19200/50000 (38%)]	Loss: 1.059045
Train Epoch: 6 [25600/50000 (51%)]	Loss: 1.486445
Train Epoch: 6 [32000/50000 (64%)]	Loss: 1.158538
Train Epoch: 6 [38400/50000 (77%)]	Loss: 0.956776
Train Epoch: 6 [44800/50000 (90%)]	Loss: 1.072191

Test set: Average loss: 1.3791, Accuracy: 5342/10000 (53%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 0.778208
Train Epoch: 7 [6400/50000 (13%)]	Loss: 0.958302
Train Epoch: 7 [12800/50000 (26%)]	Loss: 0.988872
Train Epoch: 7 [19200/50000 (38%)]	Loss: 1.207706
Train Epoch: 7 [25600/50000 (51%)]	Loss: 0.999954
Train Epoch: 7 [32000/50000 (64%)]	Loss: 0.973935
Train Epoch: 7 [38400/50000 (77%)]	Loss: 0.833382
Train Epoch: 7 [44800/50000 (90%)]	Loss: 0.844945

Test set: Average loss: 1.3899, Accuracy: 5322/10000 (53%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 0.901314
Train Epoch: 8 [6400/50000 (13%)]	Loss: 0.935898
Train Epoch: 8 [12800/50000 (26%)]	Loss: 0.677269
Train Epoch: 8 [19200/50000 (38%)]	Loss: 0.731430
Train Epoch: 8 [25600/50000 (51%)]	Loss: 0.783139
Train Epoch: 8 [32000/50000 (64%)]	Loss: 0.883091
Train Epoch: 8 [38400/50000 (77%)]	Loss: 0.999105
Train Epoch: 8 [44800/50000 (90%)]	Loss: 0.824875

Test set: Average loss: 1.4484, Accuracy: 5215/10000 (52%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 0.978075
Train Epoch: 9 [6400/50000 (13%)]	Loss: 0.973602
Train Epoch: 9 [12800/50000 (26%)]	Loss: 0.942389
Train Epoch: 9 [19200/50000 (38%)]	Loss: 0.552757
Train Epoch: 9 [25600/50000 (51%)]	Loss: 0.976692
Train Epoch: 9 [32000/50000 (64%)]	Loss: 0.682977
Train Epoch: 9 [38400/50000 (77%)]	Loss: 0.772186
Train Epoch: 9 [44800/50000 (90%)]	Loss: 1.014110

Test set: Average loss: 1.4275, Accuracy: 5334/10000 (53%)

In [33]:
model_fnn.to('cpu') 

# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))

# Turn off gradients to speed up this part
with torch.no_grad():
    log_pred_prob_batch = model_fnn(image_batch)
for i in range(10):
    img = image_batch[i]
    real_label = label_batch[i].item()
    log_pred_prob = log_pred_prob_batch[i]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img, pred_prob, real_label)

Better Optimizer + Dropout¶

In [8]:
print("Training on ", device)
model_fnn = FC2LayerDropout(input_size, output_size)
model_fnn.to(device)
#optimizer = optim.SGD(model_fnn.parameters(), lr=0.01, momentum=0.5)
optimizer = optim.Adam(model_fnn.parameters())
print('Number of parameters: {}'.format(get_n_params(model_fnn)))

for epoch in range(0, 10):
    train(epoch, model_fnn)
    test(model_fnn)
Training on  cuda:0
Number of parameters: 641370
Train Epoch: 0 [0/50000 (0%)]	Loss: 2.322593
Train Epoch: 0 [6400/50000 (13%)]	Loss: 1.953479
Train Epoch: 0 [12800/50000 (26%)]	Loss: 1.685546
Train Epoch: 0 [19200/50000 (38%)]	Loss: 1.842913
Train Epoch: 0 [25600/50000 (51%)]	Loss: 1.787046
Train Epoch: 0 [32000/50000 (64%)]	Loss: 1.826012
Train Epoch: 0 [38400/50000 (77%)]	Loss: 1.801230
Train Epoch: 0 [44800/50000 (90%)]	Loss: 1.787566

Test set: Average loss: 1.6015, Accuracy: 4286/10000 (43%)

Train Epoch: 1 [0/50000 (0%)]	Loss: 1.645393
Train Epoch: 1 [6400/50000 (13%)]	Loss: 1.581569
Train Epoch: 1 [12800/50000 (26%)]	Loss: 1.705407
Train Epoch: 1 [19200/50000 (38%)]	Loss: 1.686496
Train Epoch: 1 [25600/50000 (51%)]	Loss: 1.850170
Train Epoch: 1 [32000/50000 (64%)]	Loss: 1.783980
Train Epoch: 1 [38400/50000 (77%)]	Loss: 1.481783
Train Epoch: 1 [44800/50000 (90%)]	Loss: 1.474117

Test set: Average loss: 1.5150, Accuracy: 4656/10000 (47%)

Train Epoch: 2 [0/50000 (0%)]	Loss: 1.617220
Train Epoch: 2 [6400/50000 (13%)]	Loss: 1.655384
Train Epoch: 2 [12800/50000 (26%)]	Loss: 1.581456
Train Epoch: 2 [19200/50000 (38%)]	Loss: 1.714770
Train Epoch: 2 [25600/50000 (51%)]	Loss: 1.549179
Train Epoch: 2 [32000/50000 (64%)]	Loss: 1.471822
Train Epoch: 2 [38400/50000 (77%)]	Loss: 1.632568
Train Epoch: 2 [44800/50000 (90%)]	Loss: 1.357233

Test set: Average loss: 1.4630, Accuracy: 4846/10000 (48%)

Train Epoch: 3 [0/50000 (0%)]	Loss: 1.503623
Train Epoch: 3 [6400/50000 (13%)]	Loss: 1.719511
Train Epoch: 3 [12800/50000 (26%)]	Loss: 1.563157
Train Epoch: 3 [19200/50000 (38%)]	Loss: 1.537603
Train Epoch: 3 [25600/50000 (51%)]	Loss: 1.402210
Train Epoch: 3 [32000/50000 (64%)]	Loss: 1.345465
Train Epoch: 3 [38400/50000 (77%)]	Loss: 1.374540
Train Epoch: 3 [44800/50000 (90%)]	Loss: 1.406796

Test set: Average loss: 1.4420, Accuracy: 4832/10000 (48%)

Train Epoch: 4 [0/50000 (0%)]	Loss: 1.551064
Train Epoch: 4 [6400/50000 (13%)]	Loss: 1.313918
Train Epoch: 4 [12800/50000 (26%)]	Loss: 1.688068
Train Epoch: 4 [19200/50000 (38%)]	Loss: 1.641083
Train Epoch: 4 [25600/50000 (51%)]	Loss: 1.353122
Train Epoch: 4 [32000/50000 (64%)]	Loss: 1.421218
Train Epoch: 4 [38400/50000 (77%)]	Loss: 1.457240
Train Epoch: 4 [44800/50000 (90%)]	Loss: 1.391036

Test set: Average loss: 1.4218, Accuracy: 4992/10000 (50%)

Train Epoch: 5 [0/50000 (0%)]	Loss: 1.327910
Train Epoch: 5 [6400/50000 (13%)]	Loss: 1.410913
Train Epoch: 5 [12800/50000 (26%)]	Loss: 1.353290
Train Epoch: 5 [19200/50000 (38%)]	Loss: 1.117393
Train Epoch: 5 [25600/50000 (51%)]	Loss: 1.468343
Train Epoch: 5 [32000/50000 (64%)]	Loss: 1.612051
Train Epoch: 5 [38400/50000 (77%)]	Loss: 1.955342
Train Epoch: 5 [44800/50000 (90%)]	Loss: 1.209989

Test set: Average loss: 1.4002, Accuracy: 5031/10000 (50%)

Train Epoch: 6 [0/50000 (0%)]	Loss: 1.551533
Train Epoch: 6 [6400/50000 (13%)]	Loss: 1.571274
Train Epoch: 6 [12800/50000 (26%)]	Loss: 1.476239
Train Epoch: 6 [19200/50000 (38%)]	Loss: 1.363612
Train Epoch: 6 [25600/50000 (51%)]	Loss: 1.146764
Train Epoch: 6 [32000/50000 (64%)]	Loss: 1.582720
Train Epoch: 6 [38400/50000 (77%)]	Loss: 1.523766
Train Epoch: 6 [44800/50000 (90%)]	Loss: 1.427450

Test set: Average loss: 1.3758, Accuracy: 5186/10000 (52%)

Train Epoch: 7 [0/50000 (0%)]	Loss: 1.608967
Train Epoch: 7 [6400/50000 (13%)]	Loss: 1.584855
Train Epoch: 7 [12800/50000 (26%)]	Loss: 1.414193
Train Epoch: 7 [19200/50000 (38%)]	Loss: 1.370704
Train Epoch: 7 [25600/50000 (51%)]	Loss: 1.577043
Train Epoch: 7 [32000/50000 (64%)]	Loss: 1.508765
Train Epoch: 7 [38400/50000 (77%)]	Loss: 1.438964
Train Epoch: 7 [44800/50000 (90%)]	Loss: 1.215909

Test set: Average loss: 1.3854, Accuracy: 5089/10000 (51%)

Train Epoch: 8 [0/50000 (0%)]	Loss: 1.437622
Train Epoch: 8 [6400/50000 (13%)]	Loss: 1.327296
Train Epoch: 8 [12800/50000 (26%)]	Loss: 1.353759
Train Epoch: 8 [19200/50000 (38%)]	Loss: 1.405474
Train Epoch: 8 [25600/50000 (51%)]	Loss: 1.325254
Train Epoch: 8 [32000/50000 (64%)]	Loss: 1.421556
Train Epoch: 8 [38400/50000 (77%)]	Loss: 1.292889
Train Epoch: 8 [44800/50000 (90%)]	Loss: 1.533075

Test set: Average loss: 1.3650, Accuracy: 5175/10000 (52%)

Train Epoch: 9 [0/50000 (0%)]	Loss: 1.473073
Train Epoch: 9 [6400/50000 (13%)]	Loss: 1.365849
Train Epoch: 9 [12800/50000 (26%)]	Loss: 1.547923
Train Epoch: 9 [19200/50000 (38%)]	Loss: 1.145274
Train Epoch: 9 [25600/50000 (51%)]	Loss: 1.571810
Train Epoch: 9 [32000/50000 (64%)]	Loss: 1.391635
Train Epoch: 9 [38400/50000 (77%)]	Loss: 1.470437
Train Epoch: 9 [44800/50000 (90%)]	Loss: 1.307042

Test set: Average loss: 1.3553, Accuracy: 5214/10000 (52%)

In [9]:
model_fnn.to('cpu') 

# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))

# Turn off gradients to speed up this part
with torch.no_grad():
    log_pred_prob_batch = model_fnn(image_batch)
for i in range(10):
    img = image_batch[i]
    real_label = label_batch[i].item()
    log_pred_prob = log_pred_prob_batch[i]
    # Output of the network are log-probabilities, need to take exponential for probabilities
    pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
    visualize_pred(img, pred_prob, real_label)