-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_model_VGG_full.py
executable file
·93 lines (76 loc) · 2.96 KB
/
train_model_VGG_full.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from keras.models import Model
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.callbacks import EarlyStopping, ModelCheckpoint
import sys
# path to the model weights files.
#weights_path = '../keras/examples/vgg16_weights.h5'
top_model_weights_path = 'models/bottleneck_fc_model.h5'
# dimensions of our images.
img_width, img_height = 128, 128
train_data_dir = 'data_scaled/'
validation_data_dir = 'data_scaled_validation/'
nb_train_samples = 1763
nb_validation_samples = 194
epochs = 100
batch_size = 16
# build the VGG16 network
model = applications.VGG16(weights='imagenet', include_top=False,
input_shape=(128, 128, 3))
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
#model.add(top_model)
model = Model(input= model.input, output= top_model(model.output))
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
#for layer in model.layers[:25]:
# layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
#rescale=1. / 255,
#shear_range=0.2,
#zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True)
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# fine-tune the model
best_model_VA = ModelCheckpoint('BM_VA_'+sys.argv[1],monitor='val_acc',
mode = 'max', verbose=1, save_best_only=True)
best_model_VL = ModelCheckpoint('BM_VL_'+sys.argv[1],monitor='val_loss',
mode = 'min', verbose=1, save_best_only=True)
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples, callbacks=[best_model_VA,best_model_VL])
print('saving model...',sys.argv[1])
model.save(sys.argv[1])