0
点赞
收藏
分享

微信扫一扫

实验4 卷积神经网络【机器学习】


创作模板1

  • ​​前言​​
  • ​​代码 自己​​
  • ​​结果​​
  • ​​最后​​

前言

仅供学习交流使用
​请您阅读文章声明,默认同意该声明

代码 自己

import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim

num_epochs = 6
batch_size = 100
learning_rate = 0.1
device = torch.device("cpu") # 这代表将模型加载到指定设备上cpu

train_dataset = torchvision.datasets.MNIST('./data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
test_dataset = torchvision.datasets.MNIST('./data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))

train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)


class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential( # input shape(1,28,28)
nn.Conv2d(in_channels=1, # input height
out_channels=16, # n_filter
kernel_size=3, # filter_size
stride=1, # filter_step
padding=1 # conv2d出来的图片不变
), # output_shape(16,28,28)
nn.ReLU(),
nn.MaxPool2d(kernel_size=2) # 2x2采样,output_shape(16,14,14)
)
self.layer2 = nn.Sequential(nn.Conv2d(16, 32, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(2)) # output_size(32,7,7)
self.out = nn.Linear(32 * 7 * 7, 10)

def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = x.view(x.size(0), -1) # flat_(batch_size,32x7x7)
output = self.out(x)
return output


model = CNN()

# 损失函数
criterion = nn.CrossEntropyLoss()
# 优化器选择
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)

# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)

# Backprpagation and optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 600 == 0:
print("Epoch :{} \t Loss:{:.6f}".format(epoch, loss.item()))

def acc(labels, outputs):
_, predicted = torch.max(outputs.data, 1)
num = len(labels)
right = (predicted == labels).sum().item()
return num, right


with torch.no_grad():
correct, total = 0, 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
num, right = acc(labels, outputs)
correct = correct + right
total = total + num
print('Accuracy of the network on the 10000 test images:{}%'.format(100 * correct / total))

torch.save(model, 'model_total.ckpt')
# torch.save(net.state_dict(),'model_para.ckpt')

# -------------------------------------------------------------------
print("-------------------------导出数据------------------------------------------")
model = torch.load('model_total.ckpt')
'''
net=NeuralNet(input_size,[500,100],num_classes)
net.load_state_dict(torch.load('model_para.ckpt'))
'''

with torch.no_grad():
correct, total = 0, 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
num, right = acc(labels, outputs)
correct = correct + right
total = total + num
print('Accuracy of the network on the 10000 test images:{}%'.format(100 * correct / total))

结果

实验4 卷积神经网络【机器学习】_损失函数

最后

请您阅读文章声明,默认同意该声明

举报

相关推荐

0 条评论