cifiar10_pytorch
import torchimport torchvisionimport torch.nn as nnfrom torch.utils.data.sampler import SubsetRandomSamplerimport torch.optim as optimimport torchvision.transforms as transformsimport numpy as npimpor
·
import torch
import torchvision
import torch.nn as nn
from torch.utils.data.sampler import SubsetRandomSampler
import torch.optim as optim
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
valid_size = 0.2
num_workers = 0
batch_size = 32
transform = transforms.ToTensor()
train_data = torchvision.datasets.CIFAR10(root='C:/Users/Administrator/.keras/datasets', train=True,
download=False, transform=transform)
test_data = torchvision.datasets.CIFAR10(root='C:/Users/Administrator/.keras/datasets', train=False,
download=False, transform=transform)
## 将训练集重新划分为验证集和训练集
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size*num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sample = SubsetRandomSampler(train_idx)
valid_sample = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(train_data,batch_size=batch_size,
sampler=train_sample,num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data,batch_size=batch_size,
sampler=valid_sample,num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=batch_size,num_workers=num_workers)
可视化
dataiter = iter(trainloader)
images,labels = dataiter.next()
images = images.numpy()
可视化
fig = plt.figure(figsize=(25,4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx + 1, xticks = [],yticks = [])
ax.imshow(np.squeeze(images[idx]),cmap = ‘rainbow’)
ax.set_title(labels[idx].item())
dataiter = iter(train_loader)
images,labels = dataiter.next()
def imshow(img):
img = img /2 +0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg,(1, 2, 0))) # chanel height weight -> height weight chanel, 0 1 2-> 1 2 0
plt.show()
imshow(torchvision.utils.make_grid(images))
images.shape
torch.Size([32, 3, 32, 32])
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5) #通道数,卷积核数量,卷积核大小
self.pool1 = nn.MaxPool2d(2, 2) #池化核大小, 步距
self.conv2 = nn.Conv2d(16, 32, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(32*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.relu(self.conv1(x)) # input(3, 32, 32) output(16, 28, 28)
x = self.pool1(x) # output(16, 14, 14)
x = F.relu(self.conv2(x)) # output(32, 10, 10)
x = self.pool2(x) # output(32, 5, 5)
x = x.view(-1, 32*5*5) # output(32*5*5)
x = F.relu(self.fc1(x)) # output(120)
x = F.relu(self.fc2(x)) # output(84)
x = self.fc3(x) # output(10)
return x
model = LeNet()
print(model)
LeNet(
(conv1): Conv2d(3, 16, kernel_size=(5, 5), stride=(1, 1))
(pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv2): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1))
(pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(fc1): Linear(in_features=800, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
len(train_loader.dataset)
50000
n_epochs = 5
valid_loss_min = np.Inf
train_loss_list = []
val_loss_list = []
acces = []
eval_acces = []
for epoch in range(n_epochs):
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
eval_acc = 0
model.train()
for data,target in train_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output,target)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
# 计算分类的准确率
_, pred = output.max(1)
num_correct = (pred == target).sum().item()
acc = num_correct / data.shape[0]
train_acc += acc
model.eval()
for data,target in valid_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output,target)
loss.backward()
optimizer.step()
valid_loss += loss.item()*data.size(0)
# 记录准确率
_, pred = output.max(1)
num_correct = (pred == target).sum().item()
acc = num_correct / data.shape[0]
eval_acc += acc
train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset)
train_loss_list.append(train_loss)
val_loss_list.append(valid_loss)
acces.append(train_acc / len(train_loader))
eval_acces.append(eval_acc / len(test_loader))
print('第{}轮 \t训练损失 {:.6f} \t验证损失:{:.6f} \t 训练得分:{:.6f} \t 测试得分:{:.6f}'.format(
epoch + 1,train_loss,valid_loss,train_acc / len(train_loader),eval_acc / len(test_loader)))
if valid_loss <= valid_loss_min:
print('验证损失比之前降低了: ({:.6f}-->{:.6f}).保存模型'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(),'model.pt')
valid_loss_min = valid_loss
第1轮 训练损失 0.749417 验证损失:0.184242 训练得分:0.668575 测试得分:0.671725
验证损失比之前降低了: (inf-->0.184242).保存模型
第2轮 训练损失 0.709427 验证损失:0.177007 训练得分:0.683125 测试得分:0.688798
验证损失比之前降低了: (0.184242-->0.177007).保存模型
第3轮 训练损失 0.668711 验证损失:0.166981 训练得分:0.702050 测试得分:0.707568
验证损失比之前降低了: (0.177007-->0.166981).保存模型
第4轮 训练损失 0.634524 验证损失:0.157820 训练得分:0.718600 测试得分:0.727336
验证损失比之前降低了: (0.166981-->0.157820).保存模型
第5轮 训练损失 0.607878 验证损失:0.151540 训练得分:0.731100 测试得分:0.735523
验证损失比之前降低了: (0.157820-->0.151540).保存模型
plt.plot(acces,label = 'train_accuracy')
plt.plot(eval_acces,label = 'valid_accuracy')
plt.legend(loc = 'best')
plt.title("train_valid_loss")
plt.show()
plt.plot(train_loss_list,label = 'train_loss')
plt.plot(val_loss_list,label = 'valid_loss')
plt.legend(loc = 'best')
plt.title("train_valid_loss")
plt.show()
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-HRKni1it-1590063125883)(output_18_0.png)]
model.load_state_dict(torch.load('model.pt'))
<All keys matched successfully>
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
class_correct
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
class_total
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
dataiter = iter(train_loader)
data,target = dataiter.next()
data.shape
torch.Size([32, 3, 32, 32])
target.shape
torch.Size([32])
target
tensor([0, 1, 6, 8, 2, 8, 8, 8, 8, 8, 7, 5, 7, 3, 3, 9, 5, 7, 6, 7, 5, 1, 4, 2,
7, 9, 2, 8, 9, 3, 0, 0])
model.eval()
LeNet(
(conv1): Conv2d(3, 16, kernel_size=(5, 5), stride=(1, 1))
(pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv2): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1))
(pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(fc1): Linear(in_features=800, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
output = model(data)
output.shape
torch.Size([32, 10])
output[0]
tensor([ 4.5920, -1.7966, -0.8052, -1.8838, -2.1414, -3.1830, -2.9373, -4.1878,
-1.7232, 0.3000], grad_fn=<SelectBackward>)
loss = criterion(output,target)
loss
tensor(0.7815, grad_fn=<NllLossBackward>)
test_loss += loss.item()*data.size(0)
test_loss
25.00724220275879
_,pred = torch.max(output,1)
pred
tensor([0, 1, 6, 8, 2, 8, 8, 8, 8, 8, 7, 5, 7, 3, 3, 9, 2, 5, 0, 7, 5, 1, 7, 2,
7, 9, 5, 8, 9, 5, 0, 9])
target
tensor([0, 1, 6, 8, 2, 8, 8, 8, 8, 8, 7, 5, 7, 3, 3, 9, 5, 7, 6, 7, 5, 1, 4, 2,
7, 9, 2, 8, 9, 3, 0, 0])
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
correct
tensor([ True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, False, False, False, True,
True, True, False, True, True, True, False, True, True, False,
True, False])
model.eval()
for data,target in test_loader:
output = model(data)
loss = criterion(output,target)
test_loss += loss.item()*data.size(0)
_,pred = torch.max(output,1)
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# for i in range(batch_size):
# label = target.data[i]
# class_correct[label] += correct[i].item()
# class_total[label] += 1
test_loss = test_loss /len(test_loader.dataset)
print('测试集上的误差是: {:.6f}'.format(test_loss))
测试集上的误差是: 3.208603
dataiter = iter(test_loader)
images, labels = dataiter.next()
output = model(images)
_,preds = torch.max(output,1)
images = images.numpy()
fig = plt.figure(figsize=(25,4))
for idx in np.arange(32):
ax = fig.add_subplot(2, 32/2, idx+1, xticks = [],yticks = [])
# ax.imshow(images[idx], cmap = 'rainbow')
ax.imshow(np.transpose(images[idx],(1, 2, 0)), cmap = 'rainbow')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color = ("green" if pred[idx] == labels[idx] else "red"))
更多推荐
所有评论(0)