0
点赞
收藏
分享

微信扫一扫

使用pytorch搭建AlexNet模型(CIFAR10数据集)

Sky飞羽 2022-02-16 阅读 83

使用pytorch搭建AlexNet模型

网上那些训练准确率在10-40%的文章就别看了,都是缩减版的拼拼凑凑罢了

# import packages
import torch
import torchvision
# Device configuration.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 10
num_classes = 10
batch_size = 100
learning_rate = 0.0001
# Transform configuration.
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Load downloaded dataset.(download=False)
train_dataset = torchvision.datasets.CIFAR10('data/CIFAR/', train=True, download=False, transform=transform)
test_dataset = torchvision.datasets.CIFAR10('data/CIFAR/', train=False, download=False, transform=transform)
# Data Loader.
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# AlexNet
class AlexNet(torch.nn.Module):
    def __init__(self, num_classes, init_weights=False):
        super(AlexNet, self).__init__()
        self.layer1 = torch.nn.Sequential(torch.nn.Conv2d(3, 96, kernel_size=5, stride=2, padding=2),
                                          # raw kernel_size=11, stride=4, padding=2. For use img size 224 * 224.
                                          torch.nn.BatchNorm2d(96),
                                          torch.nn.ReLU(inplace=True),
                                          torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
        self.layer2 = torch.nn.Sequential(torch.nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
                                          torch.nn.BatchNorm2d(256),
                                          torch.nn.ReLU(inplace=True),
                                          torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
        self.layer3 = torch.nn.Sequential(torch.nn.Conv2d(256, 384, kernel_size=3, padding=1),
                                          torch.nn.BatchNorm2d(384),
                                          torch.nn.ReLU(inplace=True))
        self.layer4 = torch.nn.Sequential(torch.nn.Conv2d(384, 384, kernel_size=3, padding=1),
                                          torch.nn.BatchNorm2d(384),
                                          torch.nn.ReLU(inplace=True))
        self.layer5 = torch.nn.Sequential(torch.nn.Conv2d(384, 384, kernel_size=3, padding=1),
                                          torch.nn.BatchNorm2d(384),
                                          torch.nn.ReLU(inplace=True),
                                          torch.nn.MaxPool2d(kernel_size=2, stride=2))
        self.fc1 = torch.nn.Sequential(torch.nn.Dropout(p=0.5),
                                       torch.nn.Linear(384 * 2 * 2, 4096),
                                       torch.nn.ReLU(inplace=True))
        self.fc2 = torch.nn.Sequential(torch.nn.Dropout(p=0.5),
                                       torch.nn.Linear(4096, 4096),
                                       torch.nn.ReLU(inplace=True))
        self.fc3 = torch.nn.Sequential(torch.nn.Dropout(p=0.5),
                                       torch.nn.Linear(4096, num_classes))
        if init_weights:
            self._initialize_weights()
    
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.layer5(out)
        out = out.reshape(out.size(0), -1)
        out = self.fc1(out)
        out = self.fc2(out)
        out = self.fc3(out)
        return out
    
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, torch.nn.Conv2d):
                torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    torch.nn.init.constant_(m.bias, 0)
            elif isinstance(m, torch.nn.Linear):
                torch.nn.init.normal_(m.weight, 0, 0.01)
                torch.nn.init.constant_(m.bias, 0)
# Make model
model = AlexNet(num_classes, True).to(device)
# Loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)
        
        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)
        
        # Backward and optim
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ('Epoch [{}/{}], Step [{}/{}], Loss {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item())) 
Epoch [1/10], Step [100/500], Loss 1.6698
Epoch [1/10], Step [200/500], Loss 1.4329
Epoch [1/10], Step [300/500], Loss 1.5223
Epoch [1/10], Step [400/500], Loss 1.2276
Epoch [1/10], Step [500/500], Loss 1.0247
Epoch [2/10], Step [100/500], Loss 1.2769
Epoch [2/10], Step [200/500], Loss 1.0408
Epoch [2/10], Step [300/500], Loss 0.9882
Epoch [2/10], Step [400/500], Loss 0.7543
Epoch [2/10], Step [500/500], Loss 1.1513
Epoch [3/10], Step [100/500], Loss 0.7595
Epoch [3/10], Step [200/500], Loss 0.9058
Epoch [3/10], Step [300/500], Loss 0.9082
Epoch [3/10], Step [400/500], Loss 0.9025
Epoch [3/10], Step [500/500], Loss 0.7106
Epoch [4/10], Step [100/500], Loss 0.6317
Epoch [4/10], Step [200/500], Loss 0.9360
Epoch [4/10], Step [300/500], Loss 0.6402
Epoch [4/10], Step [400/500], Loss 0.6619
Epoch [4/10], Step [500/500], Loss 0.6308
Epoch [5/10], Step [100/500], Loss 0.4849
Epoch [5/10], Step [200/500], Loss 0.6839
Epoch [5/10], Step [300/500], Loss 0.5940
Epoch [5/10], Step [400/500], Loss 0.4146
Epoch [5/10], Step [500/500], Loss 0.6590
Epoch [6/10], Step [100/500], Loss 0.4791
Epoch [6/10], Step [200/500], Loss 0.5881
Epoch [6/10], Step [300/500], Loss 0.3901
Epoch [6/10], Step [400/500], Loss 0.5108
Epoch [6/10], Step [500/500], Loss 0.7277
Epoch [7/10], Step [100/500], Loss 0.5787
Epoch [7/10], Step [200/500], Loss 0.5279
Epoch [7/10], Step [300/500], Loss 0.2926
Epoch [7/10], Step [400/500], Loss 0.4181
Epoch [7/10], Step [500/500], Loss 0.4489
Epoch [8/10], Step [100/500], Loss 0.3174
Epoch [8/10], Step [200/500], Loss 0.5291
Epoch [8/10], Step [300/500], Loss 0.3161
Epoch [8/10], Step [400/500], Loss 0.4721
Epoch [8/10], Step [500/500], Loss 0.4909
Epoch [9/10], Step [100/500], Loss 0.4290
Epoch [9/10], Step [200/500], Loss 0.2808
Epoch [9/10], Step [300/500], Loss 0.4152
Epoch [9/10], Step [400/500], Loss 0.3311
Epoch [9/10], Step [500/500], Loss 0.3916
Epoch [10/10], Step [100/500], Loss 0.0874
Epoch [10/10], Step [200/500], Loss 0.1275
Epoch [10/10], Step [300/500], Loss 0.3132
Epoch [10/10], Step [400/500], Loss 0.2241
Epoch [10/10], Step [500/500], Loss 0.3666
# Test the model.
model.eval()
with torch.no_grad():
    total = 0
    correct = 0
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
    print ('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
Test Accuracy of the model on the 10000 test images: 72.61 %
举报

相关推荐

0 条评论