MNIST手写数字识别pytorch
可以说是非常入门了,就是全连接神经网络预测手写数字,没用到卷积,准确率在98%以上。
·
可以说是非常入门了,就是全连接神经网络预测手写数字,没用到卷积,准确率在98%以上。
import os
import torch
import torchvision
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
import torch.nn.functional as F
from torchvision import datasets, transforms
# 使用torchvision.datasets获取数据集,并使用DataLoader进行数据加载,每次迭代的数据
train_data = torchvision.datasets.MNIST(
'dataset/mnist-pytorch', train=True,
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5], [0.5])]),
download=True
)
test_data = torchvision.datasets.MNIST(
'dataset/mnist-pytorch', train=False,
transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5], [0.5])])
)
print("train_data:", train_data.train_data.size())
print("train_labels:", train_data.train_labels.size())
print("test_data:", test_data.test_data.size())
#批大小
batch_size=500
#训练的批次数
epochs=40
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.dense = torch.nn.Sequential(
# torch.nn.Linear(784, 512),
# torch.nn.BatchNorm1d(512),
# torch.nn.ReLU(),
# torch.nn.Linear(512, 10),
# torch.nn.ReLU()
# )
self.dense = torch.nn.Sequential(# 进行改进,大概提升0.2%
torch.nn.Linear(784,512),
torch.nn.BatchNorm1d(512),
torch.nn.ReLU(),
torch.nn.Linear(512,256),
torch.nn.ReLU(),
torch.nn.Linear(256,10),
torch.nn.ReLU()
)
def forward(self, x):
x = x.view(-1, 784)# 将32*32的图像转换为1*784
x=self.dense(x)
return torch.nn.functional.log_softmax(x, dim=1)# softmax返回概率值
def main():
# 自适应使用GPU还是CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Net().to(device)
print(model)
optimizer = torch.optim.Adam(model.parameters())
loss_func = torch.nn.CrossEntropyLoss()
train_loader = Data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_loader = Data.DataLoader(dataset=test_data, batch_size=batch_size)
for epoch in range(epochs):
sum_loss = 0.0
for i, data in enumerate(train_loader):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# 梯度清零
optimizer.zero_grad()
# forward + backward
outputs = model(inputs)
loss = loss_func(outputs, labels)
loss.backward()
optimizer.step()
# 每训练100个batch打印一次平均loss
sum_loss += loss.item()
if (i+1) % 100 == 0:
print('epoch=%d, batch=%d loss: %.04f'
% (epoch + 1, i+1, sum_loss / 100))
sum_loss = 0.0
# 每跑完一次epoch测试一下准确率 进入测试模式 禁止梯度传递,with torch.no_grad()将会禁止梯度传递,不做任何计算 不再计算梯度. 在predict中返回预测结果
with torch.no_grad():
correct = 0
total = 0
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
# 取得分最高的那个类
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('epoch=%d accuracy=%.02f%%' % (epoch + 1, (100 * correct / total)))
main()
更多推荐




所有评论(0)