my_callbacks.py
Go to the documentation of this file.
1 import keras
2 import math
3 import os
4 import numpy as np
5 from keras import backend as K
6 
7 class MultiGPUCheckpointCallback(keras.callbacks.Callback):
8 
9  def __init__(self, filepath, base_model, monitor='val_loss', verbose=0,
10  save_best_only=False, save_weights_only=False,
11  mode='auto', period=1):
12  super(MultiGPUCheckpointCallback, self).__init__()
13  self.base_model = base_model
14  self.monitor = monitor
15  self.verbose = verbose
16  self.filepath = filepath
17  self.save_best_only = save_best_only
18  self.save_weights_only = save_weights_only
19  self.period = period
21 
22  if mode not in ['auto', 'min', 'max']:
23  warnings.warn('ModelCheckpoint mode %s is unknown, '
24  'fallback to auto mode.' % (mode),
25  RuntimeWarning)
26  mode = 'auto'
27 
28  if mode == 'min':
29  self.monitor_op = np.less
30  self.best = np.Inf
31  elif mode == 'max':
32  self.monitor_op = np.greater
33  self.best = -np.Inf
34  else:
35  if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
36  self.monitor_op = np.greater
37  self.best = -np.Inf
38  else:
39  self.monitor_op = np.less
40  self.best = np.Inf
41 
42  def on_epoch_end(self, epoch, logs=None):
43 
44  logs = logs or {}
45  self.epochs_since_last_save += 1
46  if self.epochs_since_last_save >= self.period:
47  self.epochs_since_last_save = 0
48  filepath = self.filepath.format(epoch=epoch + 1, **logs)
49  if self.save_best_only:
50  current = logs.get(self.monitor)
51  if current is None:
52  warnings.warn('Can save best model only with %s available, '
53  'skipping.' % (self.monitor), RuntimeWarning)
54  else:
55  if self.monitor_op(current, self.best):
56  if self.verbose > 0:
57  print('Epoch %05d: %s improved from %0.5f to %0.5f,'
58  ' saving model to %s'
59  % (epoch + 1, self.monitor, self.best,
60  current, filepath))
61  self.best = current
62  if self.save_weights_only:
63  self.base_model.save_weights(filepath, overwrite=True)
64  else:
65  self.base_model.save(filepath, overwrite=True)
66  else:
67  if self.verbose > 0:
68  print('Epoch %05d: %s did not improve' %
69  (epoch + 1, self.monitor))
70  else:
71  if self.verbose > 0:
72  print('Epoch %05d: saving model to %s' % (epoch + 1, filepath))
73  if self.save_weights_only:
74  self.base_model.save_weights(filepath, overwrite=True)
75  else:
76  self.base_model.save(filepath, overwrite=True)
77 
78 def detachmodel(m):
79  """ Detach model trained on GPUs from its encapsulation
80  # Arguments
81  :param m: obj, keras model
82  # Returns
83  :return: obj, keras model
84  """
85 
86  for l in m.layers:
87  if l.name == 'resnext':
88  return l
89 
90  return m
91 
92 class ModelCheckpointDetached(keras.callbacks.Callback):
93 
94  """ Save detached from multi-GPU encapsulation model
95  (very small) modification from https://github.com/fchollet/keras/blob/master/keras/callbacks.py#L331
96 
97  `filepath` can contain named formatting options,
98  which will be filled the value of `epoch` and
99  keys in `logs` (passed in `on_epoch_end`).
100 
101  For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
102  then the model checkpoints will be saved with the epoch number and
103  the validation loss in the filename.
104 
105  # Arguments
106  filepath: string, path to save the model file.
107  monitor: quantity to monitor.
108  verbose: verbosity mode, 0 or 1.
109  save_best_only: if `save_best_only=True`,
110  the latest best model according to
111  the quantity monitored will not be overwritten.
112  mode: one of {auto, min, max}.
113  If `save_best_only=True`, the decision
114  to overwrite the current save file is made
115  based on either the maximization or the
116  minimization of the monitored quantity. For `val_acc`,
117  this should be `max`, for `val_loss` this should
118  be `min`, etc. In `auto` mode, the direction is
119  automatically inferred from the name of the monitored quantity.
120  save_weights_only: if True, then only the model's weights will be
121  saved (`model.save_weights(filepath)`), else the full model
122  is saved (`model.save(filepath)`).
123  period: Interval (number of epochs) between checkpoints.
124  """
125 
126  def __init__(self, filepath, monitor='val_loss', verbose=0,
127  save_best_only=False, save_weights_only=False,
128  mode='auto', period=1):
129  super(ModelCheckpointDetached, self).__init__()
130  self.monitor = monitor
131  self.verbose = verbose
132  self.filepath = filepath
133  self.save_best_only = save_best_only
134  self.save_weights_only = save_weights_only
135  self.period = period
137 
138  if mode not in ['auto', 'min', 'max']:
139  warnings.warn('ModelCheckpoint mode %s is unknown, '
140  'fallback to auto mode.' % mode, RuntimeWarning)
141  mode = 'auto'
142 
143  if mode == 'min':
144  self.monitor_op = np.less
145  self.best = np.Inf
146  elif mode == 'max':
147  self.monitor_op = np.greater
148  self.best = -np.Inf
149  else:
150  if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
151  self.monitor_op = np.greater
152  self.best = -np.Inf
153  else:
154  self.monitor_op = np.less
155  self.best = np.Inf
156 
157  def on_epoch_end(self, epoch, logs=None):
158  logs = logs or {}
159  self.epochs_since_last_save += 1
160  if self.epochs_since_last_save >= self.period:
161  self.epochs_since_last_save = 0
162  filepath = self.filepath.format(epoch=epoch, **logs)
163  if self.save_best_only:
164  current = logs.get(self.monitor)
165  if current is None:
166  warnings.warn('Can save best model only with %s available, '
167  'skipping.' % self.monitor, RuntimeWarning)
168  else:
169  if self.monitor_op(current, self.best):
170  if self.verbose > 0:
171  print('Epoch %05d: %s improved from %0.5f to %0.5f,'
172  ' saving model to %s'
173  % (epoch, self.monitor, self.best,
174  current, filepath))
175  self.best = current
176  if self.save_weights_only:
177  detachmodel(self.model).save_weights(filepath, overwrite=True)
178  else:
179  detachmodel(self.model).save(filepath, overwrite=True)
180  else:
181  if self.verbose > 0:
182  print('Epoch %05d: %s did not improve' %
183  (epoch, self.monitor))
184  else:
185  if self.verbose > 0:
186  print('Epoch %05d: saving model to %s' % (epoch, filepath))
187  if self.save_weights_only:
188  detachmodel(self.model).save_weights(filepath, overwrite=True)
189  else:
190  detachmodel(self.model).save(filepath, overwrite=True)
191 
192 class MyCallback(keras.callbacks.Callback):
193  def on_epoch_end(self, epoch, logs={}):
194  current_lr = K.get_value(self.model.optimizer.lr)
195  print "Learning rate:", current_lr
196  new_lr = current_lr * 0.95
197  K.set_value(self.model.optimizer.lr, new_lr)
198  new_lr = K.get_value(self.model.optimizer.lr)
199  print "New learning rate:", new_lr
200  return
201 
202 class InceptionV4Callback(keras.callbacks.Callback):
203  def on_train_begin(self, logs={}):
204  current_lr = K.get_value(self.model.optimizer.lr)
205  print "Learning rate:", current_lr
206  new_lr = current_lr * 0.94
207  K.set_value(self.model.optimizer.lr, new_lr)
208  new_lr = K.get_value(self.model.optimizer.lr)
209  print "New learning rate:", new_lr
210  return
211 
212 class InceptionV3Callback(keras.callbacks.Callback):
213  def on_epoch_end(self, epoch, logs={}):
214  if epoch % 2 == 1:
215  current_lr = K.get_value(self.model.optimizer.lr)
216  print "Learning rate:", current_lr
217  new_lr = current_lr * 0.94
218  K.set_value(self.model.optimizer.lr, new_lr)
219  new_lr = K.get_value(self.model.optimizer.lr)
220  print "New learning rate:", new_lr
221  return
222 
223 class IterationsCallback(keras.callbacks.Callback):
224 
225  def __init__(self, validation_generator, validation_steps):
226  self.validation_generator = validation_generator
227  self.validation_steps = validation_steps
228  self.fil = '/scratch/cvn/branch/log/resnet34'
229 
230  def on_train_begin(self, logs={}):
231  self.losses = []
232  self.iteration = 0
233 
234  with open(self.fil, 'ar+') as fil:
235  if os.stat(self.fil).st_size == 0:
236  self.losses.append(['iter', 'acc', 'loss', 'val_acc', 'val_loss'])
237 
238  else:
239  self.iteration = int(fil.read().split('\n')[-2].split(' ')[0]) + 1
240 
241  def on_batch_end(self, batch, logs={}):
242  if self.iteration % 1000 == 0:
243  val_loss, val_acc = self.model.evaluate_generator(self.validation_generator, steps=self.validation_steps)
244  self.losses.append([self.iteration, logs.get('acc'), logs.get('loss'), val_acc, val_loss])
245  self.iteration += 1
246 
247  def on_epoch_end(self, epoch, logs={}):
248  with open(self.fil, 'a') as fil:
249  for iteration, acc, loss, val_acc, val_loss in self.losses:
250  fil.write(str(iteration) + ' ' + str(acc) + ' ' + str(loss) + ' ' + str(val_acc) + ' ' + str(val_loss) + '\n')
251 
252  self.losses = []
253  return
254 
255 
256  '''
257  def on_epoch_begin(self, epoch, logs={}):
258  current_lr = K.get_value(self.model.optimizer.lr)
259  print "Learning rate:", current_lr
260  new_lr = 0.02
261  K.set_value(self.model.optimizer.lr, new_lr)
262  new_lr = K.get_value(self.model.optimizer.lr)
263  print "New learning rate:", new_lr
264  return
265 
266  def on_train_begin(self, logs={}):
267  current_lr = K.get_value(self.model.optimizer.lr)
268  print "Learning rate:", current_lr
269  new_lr = 0.02
270  K.set_value(self.model.optimizer.lr, new_lr)
271  new_lr = K.get_value(self.model.optimizer.lr)
272  print "New learning rate:", new_lr
273  return
274 
275  def on_train_end(self, logs={}):
276  return
277 
278  def on_epoch_begin(self, epoch, logs={}):
279  return
280 
281  def on_epoch_end(self, epoch, logs={}):
282  self.losses.append(logs.get('loss'))
283  y_pred = self.model.predict(self.validation_data[0])
284  self.aucs.append(roc_auc_score(self.validation_data[1], y_pred))
285  return
286 
287  def on_batch_begin(self, batch, logs={}):
288  return
289 
290  def on_batch_end(self, batch, logs={}):
291  return
292  '''
def on_epoch_end(self, epoch, logs=None)
int open(const char *, int)
Opens a file descriptor.
def on_batch_end(self, batch, logs={})
def on_epoch_end(self, epoch, logs=None)
Definition: my_callbacks.py:42
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
def on_epoch_end(self, epoch, logs={})
def save(obj, fname)
Definition: root.py:19
def on_train_begin(self, logs={})
def on_epoch_end(self, epoch, logs={})
def detachmodel(m)
Definition: my_callbacks.py:78
void split(std::string const &s, char c, OutIter dest)
Definition: split.h:35
def on_epoch_end(self, epoch, logs={})
def __init__(self, filepath, base_model, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
Definition: my_callbacks.py:11
static QCString str
def __init__(self, validation_generator, validation_steps)
def on_train_begin(self, logs={})