import tensorflow as tf
from tensorflow.keras.applications import VGG16
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import matplotlib as plt
from keras.optimizers import SGD
#import Scipy
classes_num=6
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# 控制屏幕的打印信息级别,忽略掉INFO级别的log
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# # 程序可以观察到的gpu id
os.environ['CUDA_VISIBLE_DEVICES'] = '0'


train_dir = './train' # 文件夹路径
validation_dir="./test"

train_datagen = ImageDataGenerator(
      rescale=1./255,
      rotation_range=40,
      width_shift_range=0.2,
      height_shift_range=0.2,
      shear_range=0.2,
     zoom_range=0.2,
      horizontal_flip=True,
      fill_mode='nearest')

test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)


train_generator = train_datagen.flow_from_directory(train_dir,
       # All images will be resized to 150x150\n",
        target_size=(64,64),
        batch_size=64,
       # Since we use binary_crossentropy loss, we need binary labels\n",
        class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(validation_dir,
            target_size=(64, 64),
            batch_size=64,
            class_mode='categorical')
# 模型构建
model = tf.keras.Sequential()

# 卷积层1
model.add(tf.keras.layers.Conv2D(64,(3, 3),input_shape=(64, 64, 3),padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(64,(3, 3), padding='same',activation='relu'))
# 池化层1
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.Dropout(0.25))


#卷积层2
model.add(tf.keras.layers.Conv2D(128,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(128,(3, 3), padding='same',activation='relu'))
# 池化层2
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.Dropout(0.25))



#卷积层3
model.add(tf.keras.layers.Conv2D(256,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(256,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(256,(3, 3), padding='same',activation='relu'))
# 池化层3
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.Dropout(0.25))


#卷积层4
model.add(tf.keras.layers.Conv2D(512,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(512,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(512,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(512,(3, 3), padding='same',activation='relu'))
# 池化层4
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.Dropout(0.25))

#卷积层5
model.add(tf.keras.layers.Conv2D(1024,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(1024,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(1024,(3, 3), padding='same',activation='relu'))
model.add(tf.keras.layers.Conv2D(1024,(3, 3), padding='same',activation='relu'))

# 池化层5
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(tf.keras.layers.BatchNormalization())
#model.add(tf.keras.layers.Dropout(0.25))




# 平坦层
model.add(tf.keras.layers.Flatten())
# 全连接层1
model.add(tf.keras.layers.Dense(1024, activation='relu'))
model.add(tf.keras.layers.Activation("relu"))
model.add(tf.keras.layers.Dropout(0.5))
# 全连接层2
model.add(tf.keras.layers.Dense(classes_num, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=tf.keras.optimizers.Adam(0.0001),
              metrics=['accuracy'])
history = model.fit_generator(
          train_generator,
          steps_per_epoch=100,
          epochs=1000,
        validation_data=validation_generator,
         validation_steps=50,
          verbose=1)

model.save('my_model.h5')
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(accuracy))

plt.plot(epochs, accuracy, 'bo', label='Training acc')
plt.plot(epochs, val_accuracy, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()

数据一般情况下训练可以快速到90以上识别率

Logo

汇聚全球AI编程工具,助力开发者即刻编程。

更多推荐