您好,登錄后才能下訂單哦!
不懂keras實(shí)現(xiàn)多種分類網(wǎng)絡(luò)的方法?其實(shí)想解決這個(gè)問題也不難,下面讓小編帶著大家一起學(xué)習(xí)怎么去解決,希望大家閱讀完這篇文章后大所收獲。
Keras應(yīng)該是最簡單的一種深度學(xué)習(xí)框架了,入門非常的簡單.
簡單記錄一下keras實(shí)現(xiàn)多種分類網(wǎng)絡(luò):如AlexNet、Vgg、ResNet
采用kaggle貓狗大戰(zhàn)的數(shù)據(jù)作為數(shù)據(jù)集.
由于AlexNet采用的是LRN標(biāo)準(zhǔn)化,Keras沒有內(nèi)置函數(shù)實(shí)現(xiàn),這里用batchNormalization代替
收件建立一個(gè)model.py的文件,里面存放著alexnet,vgg兩種模型,直接導(dǎo)入就可以了
#coding=utf-8 from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization from keras.layers import * from keras.layers.advanced_activations import LeakyReLU,PReLU from keras.models import Model def keras_batchnormalization_relu(layer): BN = BatchNormalization()(layer) ac = PReLU()(BN) return ac def AlexNet(resize=227, classes=2): model = Sequential() # 第一段 model.add(Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', input_shape=(resize, resize, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第二段 model.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第三段 model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(Conv2D(filters=384, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')) # 第四段 model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='relu')) model.add(Dropout(0.5)) # Output Layer model.add(Dense(classes,activation='softmax')) # model.add(Activation('softmax')) return model def AlexNet2(inputs, classes=2, prob=0.5): ''' 自己寫的函數(shù),嘗試keras另外一種寫法 :param inputs: 輸入 :param classes: 類別的個(gè)數(shù) :param prob: dropout的概率 :return: 模型 ''' # Conv2D(32, (3, 3), dilation_rate=(2, 2), padding='same')(inputs) print "input shape:", inputs.shape conv1 = Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid')(inputs) conv1 = keras_batchnormalization_relu(conv1) print "conv1 shape:", conv1.shape pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv1) print "pool1 shape:", pool1.shape conv2 = Conv2D(filters=256, kernel_size=(5, 5), padding='same')(pool1) conv2 = keras_batchnormalization_relu(conv2) print "conv2 shape:", conv2.shape pool2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv2) print "pool2 shape:", pool2.shape conv3 = Conv2D(filters=384, kernel_size=(3, 3), padding='same')(pool2) conv3 = PReLU()(conv3) print "conv3 shape:", conv3.shape conv4 = Conv2D(filters=384, kernel_size=(3, 3), padding='same')(conv3) conv4 = PReLU()(conv4) print "conv4 shape:", conv4 conv5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(conv4) conv5 = PReLU()(conv5) print "conv5 shape:", conv5 pool3 = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(conv5) print "pool3 shape:", pool3.shape dense1 = Flatten()(pool3) dense1 = Dense(4096, activation='relu')(dense1) print "dense2 shape:", dense1 dense1 = Dropout(prob)(dense1) # print "dense1 shape:", dense1 dense2 = Dense(4096, activation='relu')(dense1) print "dense2 shape:", dense2 dense2 = Dropout(prob)(dense2) # print "dense2 shape:", dense2 predict= Dense(classes, activation='softmax')(dense2) model = Model(inputs=inputs, outputs=predict) return model def vgg13(resize=224, classes=2, prob=0.5): model = Sequential() model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(resize, resize, 3), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(prob)) model.add(Dense(4096, activation='relu')) model.add(Dropout(prob)) model.add(Dense(classes, activation='softmax')) return model def vgg16(resize=224, classes=2, prob=0.5): model = Sequential() model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(resize, resize, 3), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 2), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(prob)) model.add(Dense(4096, activation='relu')) model.add(Dropout(prob)) model.add(Dense(classes, activation='softmax')) return model
然后建立一個(gè)train.py文件,用于讀取數(shù)據(jù)和訓(xùn)練數(shù)據(jù)的.
#coding=utf-8 import keras import cv2 import os import numpy as np import model import modelResNet import tensorflow as tf from keras.layers import Input, Dense from keras.preprocessing.image import ImageDataGenerator resize = 224 batch_size = 128 path = "/home/hjxu/PycharmProjects/01_cats_vs_dogs/data" trainDirectory = '/home/hjxu/PycharmProjects/01_cats_vs_dogs/data/train/' def load_data(): imgs = os.listdir(path + "/train/") num = len(imgs) train_data = np.empty((5000, resize, resize, 3), dtype="int32") train_label = np.empty((5000, ), dtype="int32") test_data = np.empty((5000, resize, resize, 3), dtype="int32") test_label = np.empty((5000, ), dtype="int32") for i in range(5000): if i % 2: train_data[i] = cv2.resize(cv2.imread(path + '/train/' + 'dog.' + str(i) + '.jpg'), (resize, resize)) train_label[i] = 1 else: train_data[i] = cv2.resize(cv2.imread(path + '/train/' + 'cat.' + str(i) + '.jpg'), (resize, resize)) train_label[i] = 0 for i in range(5000, 10000): if i % 2: test_data[i-5000] = cv2.resize(cv2.imread(path + '/train/' + 'dog.' + str(i) + '.jpg'), (resize, resize)) test_label[i-5000] = 1 else: test_data[i-5000] = cv2.resize(cv2.imread(path + '/train/' + 'cat.' + str(i) + '.jpg'), (resize, resize)) test_label[i-5000] = 0 return train_data, train_label, test_data, test_label def main(): train_data, train_label, test_data, test_label = load_data() train_data, test_data = train_data.astype('float32'), test_data.astype('float32') train_data, test_data = train_data/255, test_data/255 train_label = keras.utils.to_categorical(train_label, 2) ''' #one_hot轉(zhuǎn)碼,如果使用 categorical_crossentropy,就需要用到to_categorical函數(shù)完成轉(zhuǎn)碼 ''' test_label = keras.utils.to_categorical(test_label, 2) inputs = Input(shape=(224, 224, 3)) modelAlex = model.AlexNet2(inputs, classes=2) ''' 導(dǎo)入模型 ''' modelAlex.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) ''' def compile(self, optimizer, loss, metrics=None, loss_weights=None, sample_weight_mode=None, **kwargs): optimizer:優(yōu)化器,為預(yù)定義優(yōu)化器名或優(yōu)化器對象,參考優(yōu)化器 loss: 損失函數(shù),為預(yù)定義損失函數(shù)名或者一個(gè)目標(biāo)函數(shù) metrics:列表,包含評(píng)估模型在訓(xùn)練和測試時(shí)的性能指標(biāo),典型用法是 metrics=['accuracy'] sample_weight_mode:如果需要按時(shí)間步為樣本賦值,需要將改制設(shè)置為"temoral" 如果想用自定義的性能評(píng)估函數(shù):如下 def mean_pred(y_true, y_pred): return k.mean(y_pred) model.compile(loss = 'binary_crossentropy', metrics=['accuracy', mean_pred],...) 損失函數(shù)同理,再看 keras內(nèi)置支持的損失函數(shù)有 mean_squared_error mean_absolute_error mean_absolute_percentage_error mean_squared_logarithmic_error squared_hinge hinge categorical_hinge logcosh categorical_crossentropy sparse_categorical_crossentropy binary_crossentropy kullback_leibler_divergence poisson cosine_proximity ''' modelAlex.summary() ''' # 打印模型信息 ''' modelAlex.fit(train_data, train_label, batch_size=batch_size, epochs=50, validation_split=0.2, shuffle=True) ''' def fit(self, x=None, # x:輸入數(shù)據(jù) y=None, # y:標(biāo)簽 Numpy array batch_size=32, # batch_size:訓(xùn)練時(shí),一個(gè)batch的樣本會(huì)被計(jì)算一次梯度下降 epochs=1, # epochs: 訓(xùn)練的輪數(shù),每個(gè)epoch會(huì)把訓(xùn)練集循環(huán)一遍 verbose=1, # 日志顯示:0表示不在標(biāo)準(zhǔn)輸入輸出流輸出,1表示輸出進(jìn)度條,2表示每個(gè)epoch輸出 callbacks=None, # 回調(diào)函數(shù) validation_split=0., # 0-1的浮點(diǎn)數(shù),用來指定訓(xùn)練集一定比例作為驗(yàn)證集,驗(yàn)證集不參與訓(xùn)練 validation_data=None, # (x,y)的tuple,是指定的驗(yàn)證集 shuffle=True, # 如果是"batch",則是用來處理HDF5數(shù)據(jù)的特殊情況,將在batch內(nèi)部將數(shù)據(jù)打亂 class_weight=None, # 字典,將不同的類別映射為不同的權(quán)值,用來在訓(xùn)練過程中調(diào)整損失函數(shù)的 sample_weight=None, # 權(quán)值的numpy array,用于訓(xùn)練的時(shí)候調(diào)整損失函數(shù) initial_epoch=0, # 該參數(shù)用于從指定的epoch開始訓(xùn)練,繼續(xù)之前的訓(xùn)練 **kwargs): 返回:返回一個(gè)History的對象,其中History.history損失函數(shù)和其他指標(biāo)的數(shù)值隨epoch變化的情況 ''' scores = modelAlex.evaluate(train_data, train_label, verbose=1) print(scores) scores = modelAlex.evaluate(test_data, test_label, verbose=1) print(scores) modelAlex.save('my_model_weights2.h6') def main2(): train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1. / 255) train_generator = train_datagen.flow_from_directory(trainDirectory, target_size=(224, 224), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory(trainDirectory, target_size=(224, 224), batch_size=32, class_mode='binary') inputs = Input(shape=(224, 224, 3)) # modelAlex = model.AlexNet2(inputs, classes=2) modelAlex = model.vgg13(resize=224, classes=2, prob=0.5) # modelAlex = modelResNet.ResNet50(shape=224, classes=2) modelAlex.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) modelAlex.summary() modelAlex.fit_generator(train_generator, steps_per_epoch=1000, epochs=60, validation_data=validation_generator, validation_steps=200) modelAlex.save('model32.hdf5') # if __name__ == "__main__": ''' 如果數(shù)據(jù)是按照貓狗大戰(zhàn)的數(shù)據(jù),都在同一個(gè)文件夾下,使用main()函數(shù) 如果數(shù)據(jù)按照貓和狗分成兩類,則使用main2()函數(shù) ''' main2()
得到模型后該怎么測試一張圖像呢?
建立一個(gè)testOneImg.py腳本,代碼如下
#coding=utf-8 from keras.preprocessing.image import load_img#load_image作用是載入圖片 from keras.preprocessing.image import img_to_array from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import decode_predictions import numpy as np import cv2 import model from keras.models import Sequential pats = '/home/hjxu/tf_study/catVsDogsWithKeras/my_model_weights.h6' modelAlex = model.AlexNet(resize=224, classes=2) # AlexModel = model.AlexNet(weightPath='/home/hjxu/tf_study/catVsDogsWithKeras/my_model_weights.h6') modelAlex.load_weights(pats) # img = cv2.imread('/home/hjxu/tf_study/catVsDogsWithKeras/111.jpg') img = cv2.resize(img, (224, 224)) x = img_to_array(img/255) # 三維(224,224,3) x = np.expand_dims(x, axis=0) # 四維(1,224,224,3)#因?yàn)閗eras要求的維度是這樣的,所以要增加一個(gè)維度 # x = preprocess_input(x) # 預(yù)處理 print(x.shape) y_pred = modelAlex.predict(x) # 預(yù)測概率 t1 = time.time() print("測試圖:", decode_predictions(y_pred)) # 輸出五個(gè)最高概率(類名, 語義概念, 預(yù)測概率) print y_pred
不得不說,Keras真心簡單方便。
補(bǔ)充知識(shí):keras中的函數(shù)式API——?dú)埐钸B接+權(quán)重共享的理解
1、殘差連接
# coding: utf-8 """殘差連接 residual connection: 是一種常見的類圖網(wǎng)絡(luò)結(jié)構(gòu),解決了所有大規(guī)模深度學(xué)習(xí)的兩個(gè)共性問題: 1、梯度消失 2、表示瓶頸 (甚至,向任何>10層的神經(jīng)網(wǎng)絡(luò)添加殘差連接,都可能會(huì)有幫助) 殘差連接:讓前面某層的輸出作為后面某層的輸入,從而在序列網(wǎng)絡(luò)中有效地創(chuàng)造一條捷徑。 """ from keras import layers x = ... y = layers.Conv2D(128, 3, activation='relu', padding='same')(x) y = layers.Conv2D(128, 3, activation='relu', padding='same')(y) y = layers.Conv2D(128, 3, activation='relu', padding='same')(y) y = layers.add([y, x]) # 將原始x與輸出特征相加 # ---------------------如果特征圖尺寸不同,采用線性殘差連接------------------- x = ... y = layers.Conv2D(128, 3, activation='relu', padding='same')(x) y = layers.Conv2D(128, 3, activation='relu', padding='same')(y) y = layers.MaxPooling2D(2, strides=2)(y) residual = layers.Conv2D(128, 1, strides=2, padding='same')(x) # 使用1*1的卷積,將原始張量線性下采樣為y具有相同的形狀 y = layers.add([y, residual]) # 將原始x與輸出特征相加
2、權(quán)重共享
即多次調(diào)用同一個(gè)實(shí)例
# coding: utf-8 """函數(shù)式子API:權(quán)重共享 能夠重復(fù)的使用同一個(gè)實(shí)例,這樣相當(dāng)于重復(fù)使用一個(gè)層的權(quán)重,不需要重新編寫""" from keras import layers from keras import Input from keras.models import Model lstm = layers.LSTM(32) # 實(shí)例化一個(gè)LSTM層,后面被調(diào)用很多次 # ------------------------左邊分支-------------------------------- left_input = Input(shape=(None, 128)) left_output = lstm(left_input) # 調(diào)用lstm實(shí)例 # ------------------------右分支--------------------------------- right_input = Input(shape=(None, 128)) right_output = lstm(right_input) # 調(diào)用lstm實(shí)例 # ------------------------將層進(jìn)行連接合并------------------------ merged = layers.concatenate([left_output, right_output], axis=-1) # -----------------------在上面構(gòu)建一個(gè)分類器--------------------- predictions = layers.Dense(1, activation='sigmoid')(merged) # -------------------------構(gòu)建模型,并擬合訓(xùn)練----------------------------------- model = Model([left_input, right_input], predictions) model.fit([left_data, right_data], targets)
感謝你能夠認(rèn)真閱讀完這篇文章,希望小編分享keras實(shí)現(xiàn)多種分類網(wǎng)絡(luò)的方法內(nèi)容對大家有幫助,同時(shí)也希望大家多多支持億速云,關(guān)注億速云行業(yè)資訊頻道,遇到問題就找億速云,詳細(xì)的解決方法等著你來學(xué)習(xí)!
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場,如果涉及侵權(quán)請聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。