0
点赞
收藏
分享

微信扫一扫

深度学习------tensorflow2.0,keras实现卷积神经网络(cifar10:ResNet-34、ResNet-18)

数数扁桃 2022-02-26 阅读 80

1. ResNet-34卷积神经网络(cifar10)

在这里插入图片描述
随着网络的加深,出现了训练集准确率下降,错误率上升的现象,就是所谓的“退化”问题。按理说更深的模型不应当比它浅的模型产生更高的错误率,这不是由于过拟合产生的,而是由于模型复杂时,SGD的优化变得更加困难,导致模型达不到好的学习效果。ResNet就是针对这个问题应运而生的。
ResNet,深度残差网络,基本思想是引入了能够跳过一层或多层的“shortcut connection”,如上图所示,即图中的“弯弯的弧线”。ResNet中提出了两种mapping:一种是identity mapping,另一种是residual mapping。最后的输出为y=F(x)+x,顾名思义,identity mapping指的是自身,也就是x,而residual mapping,残差,指的就是y-x=F(x)。这个简单的加法并不会给网络增加额外的参数和计算量,同时却能够大大增加模型的训练速度,提高训练效果,并且当模型的层数加深时,这个简单的结构能够很好的解决退化问题。

from tensorflow.keras import models,layers,datasets,losses,utils,optimizers,activations
from tensorflow.keras.datasets import mnist,cifar10,cifar100
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Dropout,BatchNormalization,Activation,ReLU,GlobalAveragePooling2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import RMSprop,SGD,Adagrad,Adadelta,Adam
from tensorflow.keras.losses import mean_squared_error,sparse_categorical_crossentropy,categorical_crossentropy
import os
import tensorflow as tf
import matplotlib.pyplot as plt
class cellblock(models.Model):
    def __init__(self,filter_num,strides=1):
        super(cellblock, self).__init__()
        self.conv1=Conv2D(filter_num,(3,3),strides=strides,padding='same')
        self.bn1=BatchNormalization()
        self.relu=Activation('relu')

        self.conv2 = Conv2D(filter_num,(3,3), strides=1,padding='same')
        self.bn2 = BatchNormalization()

        if strides!=1:
            self.residual=Conv2D(filter_num,(1,1), strides=strides)
        else:
            self.residual=lambda x:x


    def call(self, inputs, training=None, mask=None):
        x=self.conv1(inputs)
        x=self.bn1(x)
        x=self.relu(x)

        x=self.conv2(x)
        x=self.bn2(x)

        r=self.residual(inputs)
        x=layers.add([x,r])
        output=tf.nn.relu(x)
        return output

class ResNet(models.Model):
    def __init__(self,layers_dims,n_classes=10):
        super(ResNet, self).__init__()
        self.model=Sequential([
            Conv2D(64,(7,7),strides=(2,2),padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D((3,3),strides=(2,2),padding='same')
        ])

        self.layer1=self.build_cellblock(64,layers_dims[0])
        self.layer2=self.build_cellblock(128,layers_dims[1],strides=2)
        self.layer3=self.build_cellblock(256,layers_dims[2],strides=2)
        self.layer4=self.build_cellblock(512,layers_dims[3],strides=2)

        self.avgpool=GlobalAveragePooling2D()
        self.fc=Dense(n_classes,activation='softmax')


    def build_cellblock(self,filter_num,blocks,strides=1):
        res_cellblock=Sequential()
        res_cellblock.add(cellblock(filter_num,strides=strides))
        for _ in range(1,blocks):
            res_cellblock.add(cellblock(filter_num,strides=1))
        return res_cellblock


    def call(self, inputs, training=None, mask=None):
        x=self.model(inputs)
        x=self.layer1(x)
        x=self.layer2(x)
        x=self.layer3(x)
        x=self.layer4(x)

        x=self.avgpool(x)
        x=self.fc(x)
        return x

def bulid_resnet(resname,n_classes):
    res_configer={'ResNet32':[3,4,6,3],
                  'ResNet18':[2,2,2,3]}
    return ResNet(res_configer[resname],n_classes)


if __name__ == '__main__':

    (train_x,train_y),(test_x,test_y)=cifar10.load_data()
    print(train_x.shape,train_y.shape,test_x.shape,test_y.shape)
    train_x=train_x.reshape(-1,32,32,3)/255
    test_x=test_x.reshape(-1,32,32,3)/255

    model=bulid_resnet('ResNet32',10)
    model.build(input_shape=(None,32,32,3))
    model.summary()
    model.compile(optimizer=optimizers.Adam(learning_rate=0.01),loss=losses.sparse_categorical_crossentropy,metrics=['accuracy'])
    history=model.fit(train_x,train_y,validation_data=(test_x,test_y),epochs=5,batch_size=64)
    #求损失函数和精度值
    score=model.evaluate(test_x,test_y)
    print('loss',score[0])
    print('accuracy',score[1])

    train_loss = history.history['loss']
    test_loss = history.history['val_loss']

    train_acc = history.history['accuracy']
    test_acc = history.history['val_accuracy']

	#画损失函数和精度图
    plt.figure(figsize=(10, 5))
    plt.subplot(121)
    plt.title('Train_loss And Test_loss')
    plt.plot(train_loss, label='train_loss')
    plt.plot(test_loss, label='test_loss')
    plt.legend()

    plt.subplot(122)
    plt.title('Train_acc And Test_acc')
    plt.plot(train_acc, label='train_acc')
    plt.plot(test_acc, label='test_acc')
    plt.legend()
    plt.show()

2. ResNet-34卷积神经网络(cifar2)

''':cvar
1.使用cifar2,处理数据
2.将cifar2数据读入管道
3.使用ResNet模型处理数据
4.自定义参数,将模型调优
5.绘制训练和测试集的准确率曲线和代价函数曲线
'''

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(40)

from tensorflow.keras import losses,models,layers,Sequential,optimizers
from tensorflow.keras.layers import Dense,Activation,BatchNormalization,GlobalAveragePooling2D,MaxPooling2D,Conv2D
import matplotlib.pyplot as plt

class CellBlock(layers.Layer):
    def __init__(self,filter_num,strides = 1):
        super(CellBlock, self).__init__()

        self.conv1 = Conv2D(filter_num,(3,3),strides=strides,padding='same')
        self.bn1 = BatchNormalization()
        self.relu = Activation('relu')

        self.conv2 = Conv2D(filter_num,(3,3),strides=1,padding='same')
        self.bn2 = BatchNormalization()

        if strides!=1:
            self.residual = Conv2D(filter_num,(1,1),strides=strides)
        else:
            self.residual = lambda x:x

    def call(self, inputs, **kwargs):

        x = self.conv1(inputs)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)

        r = self.residual(inputs)

        x = layers.add([x,r])
        x = tf.nn.relu(x)

        return x

class ResNet(models.Model):
    def __init__(self,layers_dims=[3,4,6,3],nb_classes=10):
        super(ResNet, self).__init__()

        self.stem = Sequential([
            Conv2D(64,(7,7),strides=(2,2),padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D((3,3),strides=(2,2),padding='same')
        ])

        self.layer1 = self.build_cellblock(64,layers_dims[0])#2 3
        self.layer2 = self.build_cellblock(128,layers_dims[1],strides=2)#2 3
        self.layer3 = self.build_cellblock(256,layers_dims[2],strides=2)#2 6
        self.layer4 = self.build_cellblock(512,layers_dims[3],strides=2)#2 3

        self.avgpool = GlobalAveragePooling2D()
        self.fc = Dense(nb_classes,activation='sigmoid')

    def call(self, inputs, training=None, mask=None):

        x = self.stem(inputs)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = self.fc(x)

        return x

    def build_cellblock(self,filter_num,blocks,strides=1):
        res_cellblock= Sequential()
        res_cellblock.add(CellBlock(filter_num,strides=strides))

        for _ in range(1,blocks):
            res_cellblock.add(CellBlock(filter_num,strides=1))

        return res_cellblock

def build_resnet(resname,nb_classes):
    res_config = {'resnet18':[2,2,2,2],
                  'resnet32':[3,4,6,3]}
    return ResNet(res_config[resname],nb_classes)


def load_imag(path):
    label = tf.constant(1,tf.int32)if tf.strings.regex_full_match(path,'.*automobile.*')else tf.constant(0,tf.int32)

    img = tf.io.read_file(path)
    img = tf.image.decode_jpeg(img)
    img = tf.image.resize(img,size=(32,32))/255.0

    return img,label

if __name__ == '__main__':

    train = tf.data.Dataset.list_files('../data/cifar2/train/*/*.jpg')\
            .map(load_imag,tf.data.experimental.AUTOTUNE).shuffle(buffer_size=1000).batch(100)\
            .prefetch(tf.data.experimental.AUTOTUNE)

    test = tf.data.Dataset.list_files('../data/cifar2/test/*/*.jpg')\
            .map(load_imag,tf.data.experimental.AUTOTUNE).batch(100)\
            .prefetch(tf.data.experimental.AUTOTUNE)

    model = ResNet(nb_classes=1)

    model.build(input_shape=(None,32,32,3))

    model.summary()

    model.compile(loss = losses.binary_crossentropy,
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'])

    history = model.fit(train,epochs=6,validation_data=test,workers=4)

    train_loss = history.history['loss']
    test_loss = history.history['val_loss']

    train_acc = history.history['accuracy']
    test_acc = history.history['val_accuracy']

    plt.figure(figsize=(10,5))

    plt.subplot(121)
    plt.title('Train_loss And Test_loss')
    plt.plot(train_loss,label='train_loss')
    plt.plot(test_loss,label='test_loss')
    plt.legend()

    plt.subplot(122)
    plt.title('Train_acc And Test_acc')
    plt.plot(train_acc,label='train_acc')
    plt.plot(test_acc,label='test_acc')
    plt.legend()
    plt.show()

总结

LeNet是第一个成功应用于手写字体识别的卷积神经网络
ALexNet展示了卷积神经网络的强大性能,开创了卷积神经网络空前的高潮
ZFNet通过可视化展示了卷积神经网络各层的功能和作用
VGG采用堆积的小卷积核替代采用大的卷积核,堆叠的小卷积核的卷积层等同于单个的大卷积核的卷积层,不仅能够增加决策函数的判别性还能减少参数量
GoogleNet增加了卷积神经网络的宽度,在多个不同尺寸的卷积核上进行卷积后再聚合,并使用1*1卷积降维减少参数量
ResNet解决了网络模型的退化问题,允许神经网络更深

举报

相关推荐

0 条评论