0
点赞
收藏
分享

微信扫一扫

吴恩达DeepLearning第四部分作业week2 基于keras搭建CNN&Resnet50

今天终于体会到了好电脑有多爽了,我看别人博客说这次作业的运算量比较大,我就放弃了用自己这台烂电脑的打算,改用实验室的电脑。

首先使用keras搭建一个CNN,熟悉下keras。感觉就是tensorflow的进阶版,具体的函数参数在这里都有:FAQ 常见问题解答 - Keras 中文文档

搭建CNN:

首先导包,导入数据

import os
import time
import keras as ks
import kt_utils

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 忽略警告
train_x, train_y, test_x, test_y, classes = kt_utils.load_dataset()
train_x = train_x/255
test_x = test_x/255
train_y = train_y.T
test_y = test_y.T
# print(train_x.shape, train_y.shape, test_x.shape, test_y.shape, classes.shape)
# plt.title(train_y[:,2])
# plt.imshow(train_x[2])
# plt.show()

 搭建网络,比较简单:

def ks_model(x):
    x_input = ks.layers.Input(x.shape[1:])  # 导入数据 不包含数据数目列
    # 第一卷积层
    z1 = ks.layers.Conv2D(6, (5, 5), name="conv1")(x_input)
    z1_bn = ks.layers.BatchNormalization(name="bn1")(z1)  # 添加BN层
    a1 = ks.layers.Activation("relu")(z1_bn)
    a1_max = ks.layers.MaxPool2D((2, 2), strides=2, name='mp1')(a1)
    # 第二卷积层
    z2 = ks.layers.Conv2D(16, (5, 5), name="conv2")(a1_max)
    z2_bn = ks.layers.BatchNormalization(name="bn2")(z2)  # 添加BN层
    a2 = ks.layers.Activation("relu")(z2_bn)
    a2_max = ks.layers.MaxPool2D((2, 2), strides=2, name='mp2')(a2)
    a2_fc = ks.layers.Flatten()(a2_max)
    # 第三、四全连接层
    a3 = ks.layers.Dense(120, activation="relu", name="FC3")(a2_fc)
    a4 = ks.layers.Dense(84, activation="relu", name="FC4")(a3)
    # 第五层输出层
    a5 = ks.layers.Dense(1, activation="sigmoid", name="output")(a4)
    # 建模
    model = ks.Model(x_input, outputs=a5, name="ks_model")
    return model

运算数据,并评估模型:

train_model = ks_model(train_x)
train_model.compile(ks.optimizers.Adam(), loss="binary_crossentropy", metrics=["accuracy"])
time_start = time.time()
train_model.fit(train_x, train_y, 32, 50)
# binary_crossentropy sigmoid  categorical_crossentropy softmax
_, acc = train_model.evaluate(test_x, test_y)
time_end = time.time()
train_model.summary()
print("用时:", time_end-time_start)
print("测试集准确率:", acc)

运算过程:

 

50次epoch居然只用了7秒钟!!!

 

 



下面是使用keras搭建Resnet50模型

原理的话下面这位博主讲的比较详细,但是有一些小错误。

【中文】【吴恩达课后编程作业】Course 4 - 卷积神经网络 - 第二周作业_何宽的博客-CSDN博客_吴恩达卷积神经网络课后作业

import os
import keras as ks
import resnets_utils
import tensorflow as tf
import matplotlib.pyplot as plt
import time
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 忽略警告
np.random.seed(1)
train_x, train_y, test_x, test_y, classes = resnets_utils.load_dataset()
train_x = train_x/255
test_x = test_x/255


def onehot(y, c):
    run = tf.one_hot(y, c)
    with tf.compat.v1.Session() as sess:
        result = sess.run(run)
        sess.close()
    return np.squeeze(result)


train_y = onehot(train_y, classes.shape[0])
test_y = onehot(test_y, classes.shape[0])
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape, classes.shape)
def same_block(a, f1, f2, f3):
    b_z0 = ks.layers.Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding="valid")(a)
    b_z0_n = ks.layers.BatchNormalization()(b_z0)
    b_a0 = ks.layers.Activation("relu")(b_z0_n)

    b_z1 = ks.layers.Conv2D(f2, kernel_size=(3, 3), strides=(1, 1), padding="same")(b_a0)
    b_z1_n = ks.layers.BatchNormalization()(b_z1)
    b_a1 = ks.layers.Activation("relu")(b_z1_n)

    b_z2 = ks.layers.Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding="valid")(b_a1)
    b_z2_n = ks.layers.BatchNormalization()(b_z2)

    b_z2_add = ks.layers.Add()([b_z2_n, a])
    return ks.layers.Activation("relu")(b_z2_add)


def diffrerent_block(a, f1, f2, f3, s):
    b_z0 = ks.layers.Conv2D(f1, kernel_size=(1, 1), strides=(1, 1), padding="valid")(a)
    b_z0_n = ks.layers.BatchNormalization()(b_z0)
    b_a0 = ks.layers.Activation("relu")(b_z0_n)

    b_z1 = ks.layers.Conv2D(f2, kernel_size=(3, 3), strides=(s, s), padding="same")(b_a0)
    b_z1_n = ks.layers.BatchNormalization()(b_z1)
    b_a1 = ks.layers.Activation("relu")(b_z1_n)

    b_z2 = ks.layers.Conv2D(f3, kernel_size=(1, 1), strides=(1, 1), padding="valid")(b_a1)
    b_z2_n = ks.layers.BatchNormalization()(b_z2)

    b_a = ks.layers.Conv2D(f3, kernel_size=(1, 1), strides=(s, s), padding="valid")(a)
    b_a_n = ks.layers.BatchNormalization()(b_a)

    b_z2_add = ks.layers.Add()([b_z2_n, b_a_n])
    return ks.layers.Activation("relu")(b_z2_add)
def resnet50(x, c):
    x_input = ks.layers.Input(x.shape[1:], name="a0")

    # conv1
    a0 = ks.layers.ZeroPadding2D((3, 3))(x_input)
    z1 = ks.layers.Conv2D(64, (7, 7), strides=(2, 2))(a0)
    z1_bn = ks.layers.BatchNormalization(name="z1_bn")(z1)
    a1 = ks.layers.Activation(activation="relu")(z1_bn)

    # conv2
    a1_max = ks.layers.MaxPool2D((3, 3), strides=(2, 2))(a1)
    a2 = diffrerent_block(a1_max, 64, 64, 256, 1)
    a3 = same_block(a2, 64, 64, 256)
    a4 = same_block(a3, 64, 64, 256)

    # conv3
    a5 = diffrerent_block(a4, 128, 128, 512, 2)
    a6 = same_block(a5, 128, 128, 512)
    a7 = same_block(a6, 128, 128, 512)
    a8 = same_block(a7, 128, 128, 512)

    # conv4
    a9 = diffrerent_block(a8, 256, 256, 1024, 2)
    a10 = same_block(a9, 256, 256, 1024)
    a11 = same_block(a10, 256, 256, 1024)
    a12 = same_block(a11, 256, 256, 1024)
    a13 = same_block(a12, 256, 256, 1024)
    a14 = same_block(a13, 256, 256, 1024)

    # conv5
    a15 = diffrerent_block(a14, 512, 512, 2048, 2)
    a16 = same_block(a15, 512, 512, 2048)
    a17 = same_block(a16, 512, 512, 2048)
    a18 = ks.layers.AveragePooling2D(pool_size=(2, 2),padding="same" )(a17)
    # FC
    a19 = ks.layers.Flatten()(a18)
    a20 = ks.layers.Dense(c, activation="softmax")(a19)

    model = ks.Model(x_input, a20, name="resnet50")

    return model
train_model = resnet50(train_x, classes.shape[0])
train_model.compile(optimizer=ks.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"])
train_model.fit(train_x, train_y, 128, 80)
_, acc = train_model.evaluate(test_x, test_y)
train_model.save("my_model.h5")  # 保存模型
time_end = time.time()
train_model.summary()
print("用时:", time_end-time_start)
print("准确率 = ", acc)

  损失随epoch变化趋势:

 80次epoch,用时86s,测试集准确率0.9666

 

举报

相关推荐

0 条评论