2 parser = argparse.ArgumentParser(description=
'Run CNN training on patches with a few different hyperparameter sets.')
3 parser.add_argument(
'-c',
'--config', help=
"JSON with script configuration", default=
'config.json')
4 parser.add_argument(
'-o',
'--output', help=
"Output model file name", default=
'model')
5 parser.add_argument(
'-g',
'--gpu', help=
"Which GPU index", default=
'0')
6 args = parser.parse_args()
9 os.environ[
'KERAS_BACKEND'] =
"tensorflow" 10 os.environ[
"CUDA_VISIBLE_DEVICES"] = args.gpu
12 import tensorflow
as tf
14 if keras.__version__[0] !=
'2':
15 print 'Please use the newest Keras 2.x.x API with the Tensorflow backend' 17 keras.backend.set_image_data_format(
'channels_last')
18 keras.backend.set_image_dim_ordering(
'tf')
22 from keras.models
import Model
23 from keras.layers
import Input
24 from keras.layers.core
import Dense, Dropout, Activation, Flatten
25 from keras.layers.convolutional
import Conv2D, MaxPooling2D
26 from keras.layers.advanced_activations
import LeakyReLU
28 from keras.preprocessing.image
import ImageDataGenerator
29 from keras.optimizers
import SGD
30 from keras.utils
import np_utils
31 from os.path
import exists, isfile, join
34 from utils
import read_config, get_patch_size, count_events
38 with
open(name +
'_architecture.json',
'w')
as f:
39 f.write(model.to_json())
40 model.save_weights(name +
'_weights.h5', overwrite=
True)
46 print 'Reading configuration...' 49 CNN_INPUT_DIR = config[
'training_on_patches'][
'input_dir']
52 img_rows, img_cols = PATCH_SIZE_W, PATCH_SIZE_D
54 batch_size = config[
'training_on_patches'][
'batch_size']
55 nb_classes = config[
'training_on_patches'][
'nb_classes']
56 nb_epoch = config[
'training_on_patches'][
'nb_epoch']
60 cfg_name =
'sgd_lorate' 83 print 'Compiling CNN model...' 84 with tf.device(
'/gpu:' + args.gpu):
85 main_input = Input(shape=(img_rows, img_cols, 1), name=
'main_input')
87 if convactfn1 ==
'leaky':
88 x = Conv2D(nb_filters1, (nb_conv1, nb_conv1),
89 padding=
'valid', data_format=
'channels_last',
90 activation=LeakyReLU())(main_input)
92 x = Conv2D(nb_filters1, (nb_conv1, nb_conv1),
93 padding=
'valid', data_format=
'channels_last',
94 activation=convactfn1)(main_input)
98 x = MaxPooling2D(pool_size=(nb_pool, nb_pool))(x)
99 x = Conv2D(nb_filters2, (nb_conv2, nb_conv2))(x)
100 if convactfn2 ==
'leaky':
101 x = Conv2D(nb_filters2, (nb_conv2, nb_conv2), activation=LeakyReLU())(x)
103 x = Conv2D(nb_filters2, (nb_conv2, nb_conv2), activation=convactfn2)(x)
105 x = Dropout(drop1)(x)
110 x = Dense(densesize1, activation=actfn1)(x)
111 x = Dropout(drop2)(x)
114 x = Dense(densesize2, activation=actfn2)(x)
115 x = Dropout(drop2)(x)
118 em_trk_none = Dense(3, activation=
'softmax', name=
'em_trk_none_netout')(x)
119 michel = Dense(1, activation=
'sigmoid', name=
'michel_netout')(x)
121 sgd = SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=
True)
122 model = Model(inputs=[main_input], outputs=[em_trk_none, michel])
123 model.compile(optimizer=sgd,
124 loss={
'em_trk_none_netout':
'categorical_crossentropy',
'michel_netout':
'mean_squared_error'},
125 loss_weights={
'em_trk_none_netout': 0.1,
'michel_netout': 1.})
129 X_train = np.zeros((n_training, PATCH_SIZE_W, PATCH_SIZE_D, 1), dtype=np.float32)
130 EmTrkNone_train = np.zeros((n_training, 3), dtype=np.int32)
131 Michel_train = np.zeros((n_training, 1), dtype=np.int32)
132 print 'Training data size:', n_training,
'events; patch size:', PATCH_SIZE_W,
'x', PATCH_SIZE_D
135 subdirs = [f
for f
in os.listdir(CNN_INPUT_DIR)
if 'training' in f]
137 for dirname
in subdirs:
138 print 'Reading data in', dirname
139 filesX = [f
for f
in os.listdir(CNN_INPUT_DIR +
'/' + dirname)
if '_x.npy' in f]
140 for fnameX
in filesX:
141 print '...training data', fnameX
142 fnameY = fnameX.replace(
'_x.npy',
'_y.npy')
143 dataX = np.load(CNN_INPUT_DIR +
'/' + dirname +
'/' + fnameX)
144 if dataX.dtype != np.dtype(
'float32'):
145 dataX = dataX.astype(
"float32")
146 dataY = np.load(CNN_INPUT_DIR +
'/' + dirname +
'/' + fnameY)
148 X_train[ntot:ntot+n] = dataX.reshape(n, img_rows, img_cols, 1)
149 EmTrkNone_train[ntot:ntot+n] = dataY[:,[0, 1, 3]]
150 Michel_train[ntot:ntot+n] = dataY[:,[2]]
152 print ntot,
'events ready' 155 X_test = np.zeros((n_testing, PATCH_SIZE_W, PATCH_SIZE_D, 1), dtype=np.float32)
156 EmTrkNone_test = np.zeros((n_testing, 3), dtype=np.int32)
157 Michel_test = np.zeros((n_testing, 1), dtype=np.int32)
158 print 'Testing data size:', n_testing,
'events' 161 subdirs = [f
for f
in os.listdir(CNN_INPUT_DIR)
if 'testing' in f]
163 for dirname
in subdirs:
164 print 'Reading data in', dirname
165 filesX = [f
for f
in os.listdir(CNN_INPUT_DIR +
'/' + dirname)
if '_x.npy' in f]
166 for fnameX
in filesX:
167 print '...testing data', fnameX
168 fnameY = fnameX.replace(
'_x.npy',
'_y.npy')
169 dataX = np.load(CNN_INPUT_DIR +
'/' + dirname +
'/' + fnameX)
170 if dataX.dtype != np.dtype(
'float32'):
171 dataX = dataX.astype(
"float32")
172 dataY = np.load(CNN_INPUT_DIR +
'/' + dirname +
'/' + fnameY)
174 X_test[ntot:ntot+n] = dataX.reshape(n, img_rows, img_cols, 1)
175 EmTrkNone_test[ntot:ntot+n] = dataY[:,[0, 1, 3]]
176 Michel_test[ntot:ntot+n] = dataY[:,[2]]
178 print ntot,
'events ready' 183 print 'Training', X_train.shape,
'testing', X_test.shape
186 datagen = ImageDataGenerator(
187 featurewise_center=
False, samplewise_center=
False,
188 featurewise_std_normalization=
False,
189 samplewise_std_normalization=
False,
191 rotation_range=0, width_shift_range=0, height_shift_range=0,
192 horizontal_flip=
True,
197 genY1 = generator.flow(X, Y1, batch_size=b, seed=7)
198 genY2 = generator.flow(X, Y2, batch_size=b, seed=7)
202 yield {
'main_input': g1[0]}, {
'em_trk_none_netout': g1[1],
'michel_netout': g2[1]}
204 print 'Fit config:', cfg_name
205 h = model.fit_generator(
208 {
'main_input': X_test},
209 {
'em_trk_none_netout': EmTrkNone_test,
'michel_netout': Michel_test}),
210 steps_per_epoch=X_train.shape[0]/batch_size, epochs=nb_epoch,
214 EmTrkNone_train =
None 217 score = model.evaluate({
'main_input': X_test},
218 {
'em_trk_none_netout': EmTrkNone_test,
'michel_netout': Michel_test},
220 print(
'Test score:', score)
223 EmTrkNone_test =
None 227 print h.history[
'loss']
228 print h.history[
'val_loss']
233 print(
'Error: model not saved.')
def save_model(model, name)
int open(const char *, int)
Opens a file descriptor.
def count_events(folder, key)
def generate_data_generator(generator, X, Y1, Y2, b)
def get_patch_size(folder)