سلام میشه توضیحی در مورد fit_generator در کراس بدید و اینکه وقتی از این استفاده میکنیم نیازی به حلقه for برای لود دیتاها داریم؟ یا طبق دایرکتوری که میدیدم بهش خودش دیتاها رو لود و ترین می کند؟ میخوام ببینم روش گارم درسته من دایرکتوری رو مشخص کردوم و با fit_generator لود میشه و ترین میشه نیازی به حلقه برای خوندن دیتاست نیست؟
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras import optimizers
from RoiPooling import RoiPooling
import numpy as np
from SpatialPyramidPooling import SpatialPyramidPooling
from keras.callbacks import EarlyStopping
# dimensions of our images.
img_width, img_height = 224, 224
train_data_dir = 'MyData/textDis_training'
validation_data_dir = 'MyData/textDis_val'
nb_train_samples = 11302
nb_validation_samples = 1000
epochs = 500
batch_size =15
pooling_regions = [1, 3, 5, 7]
num_rois=2
num_channels = 3
img_size=224
num_classes=2
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
in_img = input_shape
else:
input_shape = (img_width, img_height, 3)
in_img = input_shape
pooling_regions = [1, 2, 4]
num_rois = 2
num_channels = 3
in_roi = Input_shape=(num_rois, 4)
model = Sequential([
Conv2D(64, (3, 3), input_shape=input_shape, padding='same',
activation='relu'),
Conv2D(64, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(128, (3, 3), activation='relu', padding='same'),
Conv2D(128, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
Conv2D(256, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
Conv2D(512, (3, 3), activation='relu', padding='same',),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2DTranspose(128, (1, 1), activation='relu', padding='same'),
Conv2DTranspose(256, (4, 4), activation='relu', padding='same', strides=(2, 2)),
Conv2DTranspose(256, (8, 8), activation='relu', padding='same', strides=(4, 4)),
# RoiPooling(pooling_regions, num_rois)([in_img, in_roi]),
# SpatialPyramidPooling([6]),
Dense(num_classes),
#Activation('softmax'),
# SpatialPyramidPooling([6]),
#RoiPooling(pooling_regions, num_rois),
#SpatialPyramidPooling([6]),
#Dense(4096, activation='relu'),
#Dense(4096, activation='relu'),
#Flatten(),
#Dense(num_classes),
#Activation('softmax')
# Dense(num_classes),
#RoiPooling([1, 3, 5, 7], 84),
#RoiPooling(pooling_regions, num_rois)([in_img, in_roi])
# random cnn model:
#SpatialPyramidPooling([6]),
#SpatialPyramidPooling([1, 2, 4])),
Flatten(),
Dense(256, activation='relu'),
Dense(256, activation='relu'),
# Dense(4096, activation='relu'),
# Dense(4096, activation='relu'),
Dense(2, activation='softmax')
])
#sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
RMSprop= optimizers.RMSprop(lr=0.0001, rho=0.9, epsilon=None, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.summary()
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('flowers.h5')