import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms, utils
import matplotlib.pyplot as plt
import numpy
# this 'device' will be used for training our model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
Observe that we set shuffle=True
, which means that data is randomized
input_size = 32*32*3 # images are 32x32 pixels with 3 channels
output_size = 10 # there are 10 classes
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=1000, shuffle=True)
classNames= ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# show some training images
def imshow(img, plot):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy() # convert from tensor
plot.imshow(numpy.transpose(npimg, (1, 2, 0)))
plt.figure(figsize=(16,4))
# fetch a batch of train images; RANDOM
image_batch, label_batch = next(iter(train_loader))
#imshow(torchvision.utils.make_grid(image_batch))
for i in range(20):
image = image_batch[i]
label = classNames[label_batch[i].item()]
plt.subplot(2, 10, i + 1)
#image, label = train_loader.dataset.__getitem__(i)
#plt.imshow(image.squeeze().numpy())
imshow(image, plt)
plt.axis('off')
plt.title(label)
plt.show()
# function to count number of parameters
def get_n_params(model):
np=0
for p in list(model.parameters()):
np += p.nelement()
return np
accuracy_list = []
# we pass a model object to this trainer, and it trains this model for one epoch
def train(epoch, model):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# send to device
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(model):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
# send to device
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
accuracy_list.append(accuracy)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy))
# FROM: https://github.com/boazbk/mltheoryseminar/blob/main/code/hw0/simple_train.ipynb
## 5-Layer CNN for CIFAR
## This is the Myrtle5 network by David Page (https://myrtle.ai/learn/how-to-train-your-resnet-4-architecture/)
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), x.size(1))
class PrintShape(nn.Module):
def forward(self, x):
print(x.shape)
return x
def make_myrtle5(c=64, num_classes=10):
''' Returns a 5-layer CNN with width parameter c. '''
return nn.Sequential(
# Layer 0
nn.Conv2d(3, c, kernel_size=3, stride=1,
padding=1, bias=True),
nn.BatchNorm2d(c),
nn.ReLU(),
# Layer 1
nn.Conv2d(c, c*2, kernel_size=3,
stride=1, padding=1, bias=True),
nn.BatchNorm2d(c*2),
nn.ReLU(),
nn.MaxPool2d(2),
# Layer 2
nn.Conv2d(c*2, c*4, kernel_size=3,
stride=1, padding=1, bias=True),
nn.BatchNorm2d(c*4),
nn.ReLU(),
nn.MaxPool2d(2),
# Layer 3
nn.Conv2d(c*4, c*8, kernel_size=3,
stride=1, padding=1, bias=True),
nn.BatchNorm2d(c*8),
nn.ReLU(),
nn.MaxPool2d(2),
# Layer 4
nn.MaxPool2d(4),
Flatten(),
nn.Linear(c*8, num_classes, bias=True),
#PrintShape(),
nn.LogSoftmax(dim=1)
)
print("Training on ", device)
model_cnn = make_myrtle5()
model_cnn.to(device)
optimizer = optim.SGD(model_cnn.parameters(), lr=0.01, momentum=0.5)
print('Number of parameters: {}'.format(get_n_params(model_cnn)))
for epoch in range(0, 20):
train(epoch, model_cnn)
test(model_cnn)
def visualize_pred(img, pred_prob, real_label):
''' Function for viewing an image and it's predicted classes.
'''
#pred_prob = pred_prob.data.numpy().squeeze()
fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
#ax1.imshow(img.numpy().squeeze())
imshow(img, ax1)
ax1.axis('off')
pred_label = numpy.argmax(pred_prob)
ax1.set_title([classNames[real_label], classNames[pred_label]])
ax2.barh(numpy.arange(10), pred_prob)
ax2.set_aspect(0.1)
ax2.set_yticks(numpy.arange(10))
ax2.set_yticklabels(classNames)
ax2.set_title('Prediction Probability')
ax2.set_xlim(0, 1.1)
plt.tight_layout()
model_cnn.to('cpu')
# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))
# Turn off gradients to speed up this part
with torch.no_grad():
log_pred_prob_batch = model_cnn(image_batch)
for i in range(10):
img = image_batch[i]
real_label = label_batch[i].item()
log_pred_prob = log_pred_prob_batch[i]
# Output of the network are log-probabilities, need to take exponential for probabilities
pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
visualize_pred(img, pred_prob, real_label)
fixed_perm = torch.randperm(3072) # Fix a permutation of the image pixels; We apply the same permutation to all images
# show some training images
plt.figure(figsize=(8, 8))
# fetch a batch of train images; RANDOM
image_batch, label_batch = next(iter(train_loader))
for i in range(6):
image = image_batch[i]
image_perm = image.view(-1, 32*32*3).clone()
image_perm = image_perm[:, fixed_perm]
image_perm = image_perm.view(3, 32, 32)
label = label_batch[i].item()
plt.subplot(3,4 , 2*i + 1)
#image, label = train_loader.dataset.__getitem__(i)
#plt.imshow(image.squeeze().numpy())
imshow(image, plt)
plt.axis('off')
plt.title(classNames[label])
plt.subplot(3, 4, 2*i+2)
#plt.imshow(image_perm.squeeze().numpy())
imshow(image_perm, plt)
plt.axis('off')
plt.title(classNames[label])
accuracy_list = []
def scramble_train(epoch, model, perm=torch.arange(0, 3072).long()):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# send to device
data, target = data.to(device), target.to(device)
# permute pixels
data = data.view(-1, 32*32*3)
data = data[:, perm]
data = data.view(-1, 3, 32, 32)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def scramble_test(model, perm=torch.arange(0, 3072).long()):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
# send to device
data, target = data.to(device), target.to(device)
# permute pixels
data = data.view(-1, 32*32*3)
data = data[:, perm]
data = data.view(-1, 3, 32, 32)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
accuracy_list.append(accuracy)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy))
print("Training on ", device)
model_3 = make_myrtle5()
model_3.to(device)
optimizer = optim.SGD(model_3.parameters(), lr=0.01, momentum=0.5)
print('Number of parameters: {}'.format(get_n_params(model_3)))
for epoch in range(0, 20):
scramble_train(epoch, model_3, fixed_perm)
scramble_test(model_3, fixed_perm)
model_3.to('cpu')
# fetch a batch of test images
image_batch, label_batch = next(iter(test_loader))
image_batch_scramble = image_batch.view(-1, 32*32*3)
image_batch_scramble = image_batch_scramble[:, fixed_perm]
image_batch_scramble = image_batch_scramble.view(-1, 3, 32, 32)
# Turn off gradients to speed up this part
with torch.no_grad():
log_pred_prob_batch = model_3(image_batch_scramble)
for i in range(10):
img = image_batch[i]
img_perm = image_batch_scramble[i]
real_label = label_batch[i].item()
log_pred_prob = log_pred_prob_batch[i]
# Output of the network are log-probabilities, need to take exponential for probabilities
pred_prob = torch.exp(log_pred_prob).data.numpy().squeeze()
visualize_pred(img_perm, pred_prob, real_label)