keras實(shí)現(xiàn)VGG16 CIFAR10數(shù)據(jù)集方式
我就廢話不多說了,大家還是直接看代碼吧!
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras.optimizers import SGD
from keras import regularizers
#import data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
weight_decay = 0.0005
nb_epoch=100
batch_size=32
#layer1 32*32*3
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=(32,32,3),kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
#layer2 32*32*64
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer3 16*16*64
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer4 16*16*128
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer5 8*8*128
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer6 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer7 8*8*256
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer8 4*4*256
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer9 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer10 4*4*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#layer11 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer12 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
#layer13 2*2*512
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
#layer14 1*1*512
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer15 512
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
#layer16 512
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
# 10
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
model.fit(x_train,y_train,epochs=nb_epoch, batch_size=batch_size,
validation_split=0.1, verbose=1)
補(bǔ)充知識:pytorch一步一步在VGG16上訓(xùn)練自己的數(shù)據(jù)集
準(zhǔn)備數(shù)據(jù)集及加載,ImageFolder
在很多機(jī)器學(xué)習(xí)或者深度學(xué)習(xí)的任務(wù)中,往往我們要提供自己的圖片。也就是說我們的數(shù)據(jù)集不是預(yù)先處理好的,像mnist,cifar10等它已經(jīng)給你處理好了,更多的是原始的圖片。比如我們以貓狗分類為例。在data文件下,有兩個分別為train和val的文件夾。然后train下是cat和dog兩個文件夾,里面存的是自己的圖片數(shù)據(jù),val文件夾同train。這樣我們的數(shù)據(jù)集就準(zhǔn)備好了。

ImageFolder能夠以目錄名作為標(biāo)簽來對數(shù)據(jù)集做劃分,下面是pytorch中文文檔中關(guān)于ImageFolder的介紹:

#對訓(xùn)練集做一個變換 train_transforms = transforms.Compose([ transforms.RandomResizedCrop(224), #對圖片尺寸做一個縮放切割 transforms.RandomHorizontalFlip(), #水平翻轉(zhuǎn) transforms.ToTensor(), #轉(zhuǎn)化為張量 transforms.Normalize((.5, .5, .5), (.5, .5, .5)) #進(jìn)行歸一化 ]) #對測試集做變換 val_transforms = transforms.Compose([ transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize((.5, .5, .5), (.5, .5, .5)) ]) train_dir = "G:/data/train" #訓(xùn)練集路徑 #定義數(shù)據(jù)集 train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms) #加載數(shù)據(jù)集 train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True) val_dir = "G:/datat/val" val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms) val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)
遷移學(xué)習(xí)以VGG16為例
下面是遷移代碼的實(shí)現(xiàn):
class VGGNet(nn.Module):
def __init__(self, num_classes=2): #num_classes,此處為 二分類值為2
super(VGGNet, self).__init__()
net = models.vgg16(pretrained=True) #從預(yù)訓(xùn)練模型加載VGG16網(wǎng)絡(luò)參數(shù)
net.classifier = nn.Sequential() #將分類層置空,下面將改變我們的分類層
self.features = net #保留VGG16的特征層
self.classifier = nn.Sequential( #定義自己的分類層
nn.Linear(512 * 7 * 7, 512), #512 * 7 * 7不能改變 ,由VGG16網(wǎng)絡(luò)決定的,第二個參數(shù)為神經(jīng)元個數(shù)可以微調(diào)
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 128),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(128, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
完整代碼如下
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
from torchvision import models
batch_size = 16
learning_rate = 0.0002
epoch = 10
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
val_transforms = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
train_dir = './VGGDataSet/train'
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
val_dir = './VGGDataSet/val'
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)
class VGGNet(nn.Module):
def __init__(self, num_classes=3):
super(VGGNet, self).__init__()
net = models.vgg16(pretrained=True)
net.classifier = nn.Sequential()
self.features = net
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 128),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(128, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
#--------------------訓(xùn)練過程---------------------------------
model = VGGNet()
if torch.cuda.is_available():
model.cuda()
params = [{'params': md.parameters()} for md in model.children()
if md in [model.classifier]]
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss_func = nn.CrossEntropyLoss()
Loss_list = []
Accuracy_list = []
for epoch in range(100):
print('epoch {}'.format(epoch + 1))
# training-----------------------------
train_loss = 0.
train_acc = 0.
for batch_x, batch_y in train_dataloader:
batch_x, batch_y = Variable(batch_x).cuda(), Variable(batch_y).cuda()
out = model(batch_x)
loss = loss_func(out, batch_y)
train_loss += loss.data[0]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
train_datasets)), train_acc / (len(train_datasets))))
# evaluation--------------------------------
model.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in val_dataloader:
batch_x, batch_y = Variable(batch_x, volatile=True).cuda(), Variable(batch_y, volatile=True).cuda()
out = model(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.data[0]
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.data[0]
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
val_datasets)), eval_acc / (len(val_datasets))))
Loss_list.append(eval_loss / (len(val_datasets)))
Accuracy_list.append(100 * eval_acc / (len(val_datasets)))
x1 = range(0, 100)
x2 = range(0, 100)
y1 = Accuracy_list
y2 = Loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Test accuracy vs. epoches')
plt.ylabel('Test accuracy')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('Test loss vs. epoches')
plt.ylabel('Test loss')
plt.show()
# plt.savefig("accuracy_loss.jpg")
以上這篇keras實(shí)現(xiàn)VGG16 CIFAR10數(shù)據(jù)集方式就是小編分享給大家的全部內(nèi)容了,希望能給大家一個參考,也希望大家多多支持腳本之家。
相關(guān)文章
Python使用fastAPI如何實(shí)現(xiàn)一個流式傳輸接口
這篇文章主要介紹了Python使用fastAPI如何實(shí)現(xiàn)一個流式傳輸接口問題,具有很好的參考價值,希望對大家有所幫助,如有錯誤或未考慮完全的地方,望不吝賜教2024-06-06
Python自定義類的數(shù)組排序?qū)崿F(xiàn)代碼
這篇文章主要介紹了Python自定義類的數(shù)組排序?qū)崿F(xiàn)代碼,需要的朋友可以參考下2016-08-08
基于python解線性矩陣方程(numpy中的matrix類)
這篇文章主要介紹了基于python解線性矩陣方程(numpy中的matrix類),文中通過示例代碼介紹的非常詳細(xì),對大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價值,需要的朋友可以參考下2019-10-10
使用Python中的cookielib模擬登錄網(wǎng)站
這篇文章主要介紹了使用Python中的cookielib模擬登錄網(wǎng)站,用作生成cookie然后登錄,需要的朋友可以參考下2015-04-04
python 網(wǎng)絡(luò)編程詳解及簡單實(shí)例
這篇文章主要介紹了python 網(wǎng)絡(luò)編程詳解及簡單實(shí)例的相關(guān)資料,需要的朋友可以參考下2017-04-04
python實(shí)現(xiàn)發(fā)送form-data數(shù)據(jù)的方法詳解
這篇文章主要介紹了python實(shí)現(xiàn)發(fā)送form-data數(shù)據(jù)的方法,結(jié)合實(shí)例形式分析了Python發(fā)送form-data數(shù)據(jù)的相關(guān)操作步驟、實(shí)現(xiàn)方法與注意事項(xiàng),需要的朋友可以參考下2019-09-09
Python循環(huán)緩沖區(qū)的應(yīng)用詳解
循環(huán)緩沖區(qū)是一個線性緩沖區(qū),邏輯上被視為一個循環(huán)的結(jié)構(gòu),本文主要為大家介紹了Python中循環(huán)緩沖區(qū)的相關(guān)應(yīng)用,有興趣的小伙伴可以了解一下2025-03-03

