keras的ImageDataGenerator和flow()的用法說明
ImageDataGenerator的參數(shù)自己看文檔
from keras.preprocessing import image import numpy as np X_train=np.ones((3,123,123,1)) Y_train=np.array([[1],[2],[2]]) generator=image.ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-6, rotation_range=180, width_shift_range=0.2, height_shift_range=0.2, shear_range=0, zoom_range=0.001, channel_shift_range=0, fill_mode='nearest', cval=0., horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=None, data_format='channels_last') a=generator.flow(X_train,Y_train,batch_size=20)#生成的是一個(gè)迭代器,可直接用于for循環(huán) ''' batch_size如果小于X的第一維m,next生成的多維矩陣的第一維是為batch_size,輸出是從輸入中隨機(jī)選取batch_size個(gè)數(shù)據(jù) batch_size如果大于X的第一維m,next生成的多維矩陣的第一維是m,輸出是m個(gè)數(shù)據(jù),不過順序隨機(jī) ,輸出的X,Y是一一對(duì)對(duì)應(yīng)的 如果要直接用于tf.placeholder(),要求生成的矩陣和要與tf.placeholder相匹配 ''' X,Y=next(a) print(Y) X,Y=next(a) print(Y) X,Y=next(a) print(Y) X,Y=next(a)
輸出
[[2] [1] [2]] [[2] [2] [1]] [[2] [2] [1]] [[2] [2] [1]]
補(bǔ)充知識(shí):tensorflow 與keras 混用之坑
在使用tensorflow與keras混用是model.save 是正常的但是在load_model的時(shí)候報(bào)錯(cuò)了在這里mark 一下
其中錯(cuò)誤為:TypeError: tuple indices must be integers, not list
再一一番百度后無結(jié)果,上谷歌后找到了類似的問題。但是是一對(duì)鳥文不知道什么東西(翻譯后發(fā)現(xiàn)是俄文)。后來谷歌翻譯了一下找到了解決方法。故將原始問題文章貼上來警示一下
原訓(xùn)練代碼
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
#Каталог с данными для обучения
train_dir = 'train'
# Каталог с данными для проверки
val_dir = 'val'
# Каталог с данными для тестирования
test_dir = 'val'
# Размеры изображения
img_width, img_height = 800, 800
# Размерность тензора на основе изображения для входных данных в нейронную сеть
# backend Tensorflow, channels_last
input_shape = (img_width, img_height, 3)
# Количество эпох
epochs = 1
# Размер мини-выборки
batch_size = 4
# Количество изображений для обучения
nb_train_samples = 300
# Количество изображений для проверки
nb_validation_samples = 25
# Количество изображений для тестирования
nb_test_samples = 25
model = Sequential()
model.add(Conv2D(32, (7, 7), padding="same", input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(10, 10)))
model.add(Conv2D(64, (5, 5), padding="same"))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(10, 10)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer="Nadam",
metrics=['accuracy'])
print(model.summary())
datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
val_generator = datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test_generator = datagen.flow_from_directory(
test_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=nb_validation_samples // batch_size)
print('Сохраняем сеть')
model.save("grib.h5")
print("Сохранение завершено!")
模型載入
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
from keras.models import load_model
print("Загрузка сети")
model = load_model("grib.h5")
print("Загрузка завершена!")
報(bào)錯(cuò)
/usr/bin/python3.5 /home/disk2/py/neroset/do.py
/home/mama/.local/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
Загрузка сети
Traceback (most recent call last):
File "/home/disk2/py/neroset/do.py", line 13, in <module>
model = load_model("grib.h5")
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 243, in load_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 317, in model_from_config
return layer_module.deserialize(config, custom_objects=custom_objects)
File "/usr/local/lib/python3.5/dist-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/usr/local/lib/python3.5/dist-packages/keras/utils/generic_utils.py", line 144, in deserialize_keras_object
list(custom_objects.items())))
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 1350, in from_config
model.add(layer)
File "/usr/local/lib/python3.5/dist-packages/keras/models.py", line 492, in add
output_tensor = layer(self.outputs[0])
File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 590, in __call__
self.build(input_shapes[0])
File "/usr/local/lib/python3.5/dist-packages/keras/layers/normalization.py", line 92, in build
dim = input_shape[self.axis]
TypeError: tuple indices must be integers or slices, not list
Process finished with exit code 1
戰(zhàn)斗種族解釋
убераю BatchNormalization всё работает хорошо. Не подскажите в чём ошибка?Выяснил что сохранение keras и нормализация tensorflow не работают вместе нужно просто изменить строку импорта.(譯文:整理BatchNormalization一切正常。 不要告訴我錯(cuò)誤是什么?我發(fā)現(xiàn)保存keras和規(guī)范化tensorflow不能一起工作;只需更改導(dǎo)入字符串即可。)
強(qiáng)調(diào)文本 強(qiáng)調(diào)文本
keras.preprocessing.image import ImageDataGenerator keras.models import Sequential keras.layers import Conv2D, MaxPooling2D, BatchNormalization keras.layers import Activation, Dropout, Flatten, Dense
##完美解決
##附上原文鏈接
https://qa-help.ru/questions/keras-batchnormalization
以上這篇keras的ImageDataGenerator和flow()的用法說明就是小編分享給大家的全部內(nèi)容了,希望能給大家一個(gè)參考,也希望大家多多支持腳本之家。
相關(guān)文章
一文帶你了解CNN(卷積神經(jīng)網(wǎng)絡(luò))
CNN是神經(jīng)網(wǎng)絡(luò)中的一種,它的權(quán)值共享網(wǎng)絡(luò)結(jié)構(gòu)使之更類似于生物神經(jīng)網(wǎng)絡(luò),降低了網(wǎng)絡(luò)模型的復(fù)雜度,減少了權(quán)值的數(shù)量。本文主要講解了CNN(卷積神經(jīng)網(wǎng)絡(luò))的基礎(chǔ)內(nèi)容,想了解更多的小伙伴可以看一看這篇文章2021-09-09
Python數(shù)據(jù)分析之獲取雙色球歷史信息的方法示例
這篇文章主要介紹了Python數(shù)據(jù)分析之獲取雙色球歷史信息的方法,涉及Python網(wǎng)頁抓取、正則匹配、文件讀寫及數(shù)值運(yùn)算等相關(guān)操作技巧,需要的朋友可以參考下2018-02-02
舉例詳解Python中threading模塊的幾個(gè)常用方法
這篇文章主要介紹了舉例詳解Python中threading模塊的幾個(gè)常用方法,threading模塊用來創(chuàng)建和操作線程,是Python學(xué)習(xí)當(dāng)中的重要知識(shí),需要的朋友可以參考下2015-06-06
django框架實(shí)現(xiàn)一次性上傳多個(gè)文件功能示例【批量上傳】
這篇文章主要介紹了django框架實(shí)現(xiàn)一次性上傳多個(gè)文件功能,結(jié)合實(shí)例形式分析了Django框架批量上傳相關(guān)實(shí)現(xiàn)技巧與操作注意事項(xiàng),需要的朋友可以參考下2019-06-06

