test.py
Go to the documentation of this file.
1 """
2 This is the test module.
3 """
4 
5 __version__ = '1.0'
6 __author__ = 'Saul Alonso-Monsalve'
7 __email__ = "saul.alonso.monsalve@cern.ch"
8 
9 import numpy as np
10 import pickle
11 import configparser
12 import ast
13 import re
14 import logging
15 import sys
16 import os
17 import time
18 
19 sys.path.append(os.path.join(sys.path[0], 'modules'))
20 
21 from random import shuffle
22 from keras import optimizers
23 from sklearn.metrics import classification_report, confusion_matrix
24 from os import listdir
25 from os.path import isfile, join
26 from keras.models import load_model
27 from data_generator import DataGenerator
28 from collections import Counter
29 import my_losses
30 
31 # manually specify the GPUs to use
32 os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
33 os.environ["CUDA_VISIBLE_DEVICES"]="0"
34 
35 '''
36 ****************************************
37 ************** PARAMETERS **************
38 ****************************************
39 '''
40 
41 logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
42 
43 config = configparser.ConfigParser()
44 config.read('config/config.ini')
45 
46 # random
47 
48 SEED = int(config['random']['seed'])
49 
50 if SEED == -1:
51  SEED = int(time.time())
52 
53 np.random.seed(SEED)
54 SHUFFLE = ast.literal_eval(config['random']['shuffle'])
55 SHUFFLE = False
56 
57 # images
58 
59 IMAGES_PATH = config['images']['path']
60 VIEWS = int(config['images']['views'])
61 PLANES = int(config['images']['planes'])
62 CELLS = int(config['images']['cells'])
63 STANDARDIZE = ast.literal_eval(config['images']['standardize'])
64 
65 # dataset
66 
67 DATASET_PATH = config['dataset']['path']
68 PARTITION_PREFIX = config['dataset']['partition_prefix']
69 LABELS_PREFIX = config['dataset']['labels_prefix']
70 
71 # model
72 
73 CHECKPOINT_PATH = config['model']['checkpoint_path']
74 CHECKPOINT_PREFIX = config['model']['checkpoint_prefix']
75 CHECKPOINT_SAVE_MANY = ast.literal_eval(config['model']['checkpoint_save_many'])
76 CHECKPOINT_SAVE_BEST_ONLY = ast.literal_eval(config['model']['checkpoint_save_best_only'])
77 PRINT_SUMMARY = ast.literal_eval(config['model']['print_summary'])
78 BRANCHES = ast.literal_eval(config['model']['branches'])
79 PARALLELIZE = ast.literal_eval(config['model']['parallelize'])
80 OUTPUTS = int(config['model']['outputs'])
81 
82 # test
83 
84 OUTPUT_PATH = config['test']['output_path']
85 OUTPUT_PREFIX = config['test']['output_prefix']
86 CUT_NUE = float(config['test']['cut_nue'])
87 CUT_NUMU = float(config['test']['cut_numu'])
88 CUT_NUTAU = float(config['test']['cut_nutau'])
89 CUT_NC = float(config['test']['cut_nc'])
90 TEST_BATCH_SIZE = int(config['test']['batch_size'])
91 
92 # test params
93 
94 test_values = []
95 
96 TEST_PARAMS = {'planes':PLANES,
97  'cells':CELLS,
98  'views':VIEWS,
99  'batch_size':TEST_BATCH_SIZE,
100  'branches':BRANCHES,
101  'outputs': OUTPUTS,
102  'images_path':IMAGES_PATH,
103  'standardize':STANDARDIZE,
104  'shuffle':SHUFFLE,
105  'test_values':test_values}
106 
107 
108 '''
109 ****************************************
110 *************** DATASETS ***************
111 ****************************************
112 '''
113 
114 # Load datasets
115 
116 logging.info('Reading datasets from serialized files...')
117 
118 with open(DATASET_PATH + PARTITION_PREFIX + '.p', 'r') as partition_file:
119  partition = pickle.load(partition_file)
120 
121 with open(DATASET_PATH + LABELS_PREFIX + '.p', 'r') as labels_file:
122  labels = pickle.load(labels_file)
123 
124 # Print some dataset statistics
125 
126 logging.info('Number of training examples: %d', len(partition['train']))
127 logging.info('Number of validation examples: %d', len(partition['validation']))
128 logging.info('Number of test examples: %d', len(partition['test']))
129 
130 '''
131 ****************************************
132 ************** GENERATORS **************
133 ****************************************
134 '''
135 
136 prediction_generator = DataGenerator(**TEST_PARAMS).generate(labels, partition['test'], False)
137 
138 
139 '''
140 ****************************************
141 ************** LOAD MODEL **************
142 ****************************************
143 '''
144 
145 # Load model
146 
147 logging.info('Loading model from disk...')
148 
149 if CHECKPOINT_SAVE_MANY:
150  # Load the last generated model
151  files = [f for f in os.listdir(CHECKPOINT_PATH) if os.path.isfile(os.path.join(CHECKPOINT_PATH, f))]
152  files.sort(reverse=True)
153 
154  r = re.compile(CHECKPOINT_PREFIX[1:] + '-.*-.*.h5')
155 
156  for fil in files:
157  if r.match(fil) is not None:
158  model = load_model(CHECKPOINT_PATH + '/' + fil,
159  custom_objects={'masked_loss':my_losses.masked_loss,
160  'multitask_loss': my_losses.multitask_loss,
161  'masked_loss_binary': my_losses.masked_loss_binary,
162  'masked_loss_categorical': my_losses.masked_loss_categorical})
163  logging.info('Loaded model: %s', CHECKPOINT_PATH + '/' + fil)
164  break
165 else:
166  # Load the model
167  model = load_model(CHECKPOINT_PATH + CHECKPOINT_PREFIX + '.h5',
168  custom_objects={'masked_loss':my_losses.masked_loss,
169  'multitask_loss': my_losses.multitask_loss,
170  'masked_loss_binary': my_losses.masked_loss_binary,
171  'masked_loss_categorical': my_losses.masked_loss_categorical})
172 
173  logging.info('Loaded model: %s', CHECKPOINT_PATH + CHECKPOINT_PREFIX + '.h5')
174 
175 # Print model summary
176 
177 if(PRINT_SUMMARY):
178  model.summary()
179 
180 '''
181 ****************************************
182 ***************** TEST *****************
183 ****************************************
184 '''
185 
186 logging.info('PERFORMING TEST...\n')
187 
188 is_antineutrino_target_names = ['neutrino', 'antineutrino', 'NULL']
189 flavour_target_names = ['CC Numu', 'CC Nue', 'CC Nutau', 'NC']
190 interaction_target_names = ['CC QE', 'CC Res', 'CC DIS', 'CC Other', 'NULL']
191 categories_target_names = ['category 0', 'category 1', 'category 2', 'category 3', 'category 4', 'category 5', 'category 6',
192  'category 7', 'category 8', 'category 9', 'category 10', 'category 11', 'category 13']
193 protons_target_names = ['0', '1', '2', '>2']
194 pions_target_names = ['0', '1', '2', '>2']
195 pizeros_target_names = ['0', '1', '2', '>2']
196 neutrons_target_names = ['0', '1', '2', '>2']
197 
198 # Predict results
199 
200 Y_pred = model.predict_generator(generator = prediction_generator,
201  steps = len(partition['test'])//TEST_BATCH_SIZE,
202  verbose = 1
203  )
204 
205 #np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)}, threshold=np.nan)
206 
207 if OUTPUTS == 1:
208  # Single-output network
209  test_values = np.array(test_values[0:Y_pred.shape[0]]) # array with y true value, energies and weights
210 
211  y_pred_categories = np.argmax(Y_pred, axis=1).reshape((Y_pred.shape[0], 1)) # 1-DIM array of predicted values (categories)
212  Y_pred_flavour = np.zeros((Y_pred.shape[0], 4)) # 2-DIM array of predicted values (flavour)
213 
214  y_test_categories = np.array([12 if aux['y_value'] == 13 else aux['y_value'] for aux in test_values]).reshape(y_pred_categories.shape) # 1-DIM array of test values (categories)
215  y_test_flavour = np.zeros(y_test_categories.shape, dtype=int) # 1-DIM array of test values (flavour)
216 
217  # manually set y_pred_flavour and y_test_flavour
218 
219  for i in range(Y_pred_flavour.shape[0]):
220  y_test_flavour[i] = y_test_categories[i]//4 # from 0-13 to 0-3
221 
222  # Add the interaction types for each neutrino type
223  p = Y_pred[i]
224  Y_pred_flavour[i][0] = p[0] + p[1] + p[2] + p[3] # NUMU (0,1,2,3)
225  Y_pred_flavour[i][1] = p[4] + p[5] + p[6] + p[7] # NUE (4,5,6,7)
226  Y_pred_flavour[i][2] = p[8] + p[9] + p[10] + p[11] # NUTAU (8,9,10,11)
227  Y_pred_flavour[i][3] = p[12] # NC (13)
228 
229  y_pred_flavour = np.argmax(Y_pred_flavour, axis=1) # 1-DIM array of predicted values (flavour)
230 
231  # flavour
232 
233  logging.info('flavour report:\n')
234  print(classification_report(y_test_flavour, y_pred_flavour, target_names=flavour_target_names))
235  logging.info('flavour confusion matrix (rows = predicted classes, cols = actual classes):\n')
236  flavour_conf_matrix = confusion_matrix(y_pred_flavour, y_test_flavour)
237  print flavour_conf_matrix, '\n'
238 
239  # categories
240 
241  logging.info('categories report:\n')
242  print(classification_report(y_test_categories, y_pred_categories, target_names=categories_target_names))
243  logging.info('categories confusion matrix (rows = predicted classes, cols = actual classes):\n')
244  categories_conf_matrix = confusion_matrix(y_pred_categories, y_test_categories)
245  print categories_conf_matrix, '\n'
246 
247  # Apply cuts
248 
249  logging.info('Applying a numu cut of %.2f, a nue cut of %.2f, a nutau cut of %.2f, and a NC cut of %.2f...\n' % (CUT_NUMU, CUT_NUE, CUT_NUTAU, CUT_NC))
250 
251  weighted_conf_matrix = np.zeros((4,4), dtype='float32')
252  cut_weighted_conf_matrix = np.zeros((4,4), dtype='float32')
253 
254  for sample in range(len(Y_pred_flavour)):
255  pred_flavour = int(y_pred_flavour[sample]) # get predicted class of sample
256  test_flavour = int(y_test_flavour[sample]) # get actual class of sample
257  weight = test_values[sample]['fEventWeight'] # event weight
258  if Y_pred_flavour[sample][0] >= CUT_NUMU:
259  cut_weighted_conf_matrix[0][test_flavour] += weight
260  if Y_pred_flavour[sample][1] >= CUT_NUE:
261  cut_weighted_conf_matrix[1][test_flavour] += weight
262  if Y_pred_flavour[sample][2] >= CUT_NUTAU:
263  cut_weighted_conf_matrix[2][test_flavour] += weight
264  if Y_pred_flavour[sample][3] >= CUT_NC:
265  cut_weighted_conf_matrix[3][test_flavour] += weight
266  weighted_conf_matrix[pred_flavour][test_flavour] += weight
267 
268  # Confusion matrix - neutrino flavour (weighted)
269 
270  logging.info('Neutrino flavour weighted confusion matrix (rows = predicted classes, cols = actual classes):\n')
271  print weighted_conf_matrix.astype(int), '\n'
272  logging.info('Neutrino flavour weighted confusion matrix (rows = predicted classes, cols = actual classes) after applying the nue and numu cuts:\n')
273  print cut_weighted_conf_matrix.astype(int), '\n'
274  float_formatter = lambda x: "%.4f" % x
275  np.set_printoptions(formatter={'float_kind':float_formatter})
276 
277  # Purity confusion matrix
278 
279  purity_conf_matrix = np.copy(cut_weighted_conf_matrix)
280  for i in range(cut_weighted_conf_matrix.shape[0]):
281  row_sum = np.sum(purity_conf_matrix[i])
282  if(row_sum > 0):
283  for j in range(cut_weighted_conf_matrix.shape[1]):
284  purity_conf_matrix[i][j] /= row_sum
285  logging.info('Purity confusion matrix (rows = predicted classes, cols = actual classes)\n')
286  print purity_conf_matrix, '\n'
287 
288  # Efficiency confusion matrix
289 
290  logging.info('Efficiency confusion matrix (rows = predicted classes, cols = actual classes):\n')
291  efficiency_conf_matrix = cut_weighted_conf_matrix.astype('float32')/ np.add.reduce(weighted_conf_matrix)
292  print efficiency_conf_matrix, '\n'
293 else:
294  # Multi-output network
295  test_values = np.array(test_values[0:Y_pred[0].shape[0]]) # array with y true values, energies and weights
296 
297  y_pred_is_antineutrino = np.around(Y_pred[0]).reshape((Y_pred[0].shape[0], 1)).astype(int) # 1-DIM array of predicted values (is_antineutrino)
298  y_pred_flavour = np.argmax(Y_pred[1], axis=1).reshape((Y_pred[1].shape[0], 1)) # 1-DIM array of predicted values (flavour)
299  y_pred_interaction = np.argmax(Y_pred[2], axis=1).reshape((Y_pred[2].shape[0], 1)) # 1-DIM array of predicted values (interaction)
300  y_pred_categories = np.zeros(y_pred_flavour.shape, dtype=int) # 1-DIM array of predicted values (categories)
301  y_pred_protons = np.argmax(Y_pred[3], axis=1).reshape((Y_pred[3].shape[0], 1)) # 1-DIM array of predicted values (protons)
302  y_pred_pions = np.argmax(Y_pred[4], axis=1).reshape((Y_pred[4].shape[0], 1)) # 1-DIM array of predicted values (pions)
303  y_pred_pizeros = np.argmax(Y_pred[5], axis=1).reshape((Y_pred[5].shape[0], 1)) # 1-DIM array of predicted values (pizeros)
304  y_pred_neutrons = np.argmax(Y_pred[6], axis=1).reshape((Y_pred[6].shape[0], 1)) # 1-DIM array of predicted values (neutrons)
305 
306  y_test_is_antineutrino = np.array([aux['y_value'][0] for aux in test_values]).reshape(y_pred_is_antineutrino.shape)
307  y_test_flavour = np.array([aux['y_value'][1] for aux in test_values]).reshape(y_pred_flavour.shape)
308  y_test_interaction = np.array([aux['y_value'][2] for aux in test_values]).reshape(y_pred_interaction.shape)
309  y_test_categories = np.zeros(y_test_flavour.shape, dtype=int)
310  y_test_protons = np.array([aux['y_value'][3] for aux in test_values]).reshape(y_pred_protons.shape)
311  y_test_pions = np.array([aux['y_value'][4] for aux in test_values]).reshape(y_pred_pions.shape)
312  y_test_pizeros = np.array([aux['y_value'][5] for aux in test_values]).reshape(y_pred_pizeros.shape)
313  y_test_neutrons = np.array([aux['y_value'][6] for aux in test_values]).reshape(y_pred_neutrons.shape)
314 
315  # manually set y_pred_categories and y_test_categories
316 
317  for i in range(y_pred_categories.shape[0]):
318  # inter
319  y_pred_categories[i] = y_pred_interaction[i]
320  y_test_categories[i] = y_test_interaction[i]
321 
322  # flavour
323  y_pred_categories[i] += (y_pred_flavour[i]*4)
324  y_test_categories[i] += (y_test_flavour[i]*4)
325 
326  if y_pred_flavour[i] == 3:
327  y_pred_is_antineutrino[i] = 2
328  y_pred_interaction[i] = 4
329  y_pred_categories[i] = 12
330 
331  if y_test_flavour[i] == 3:
332  y_test_is_antineutrino[i] = 2
333  y_test_interaction[i] = 4
334  y_test_categories[i] = 12
335 
336  #np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)}, threshold=np.nan)
337 
338  # is_antineutrino
339 
340  logging.info('is_antineutrino report:\n')
341  print(classification_report(y_test_is_antineutrino, y_pred_is_antineutrino, target_names=is_antineutrino_target_names))
342  logging.info('is_antineutrino confusion matrix (rows = predicted classes, cols = actual classes):\n')
343  is_antineutrino_conf_matrix = confusion_matrix(y_pred_is_antineutrino, y_test_is_antineutrino)
344  print is_antineutrino_conf_matrix, '\n'
345 
346  # flavour
347 
348  logging.info('flavour report:\n')
349  print(classification_report(y_test_flavour, y_pred_flavour, target_names=flavour_target_names))
350  logging.info('flavour confusion matrix (rows = predicted classes, cols = actual classes):\n')
351  flavour_conf_matrix = confusion_matrix(y_pred_flavour, y_test_flavour)
352  print flavour_conf_matrix, '\n'
353 
354  # interaction
355 
356  logging.info('interaction report:\n')
357  print(classification_report(y_test_interaction, y_pred_interaction, target_names=interaction_target_names))
358  logging.info('interaction confusion matrix (rows = predicted classes, cols = actual classes):\n')
359  interaction_conf_matrix = confusion_matrix(y_pred_interaction, y_test_interaction)
360  print interaction_conf_matrix, '\n'
361 
362  # categories
363 
364  logging.info('categories report:\n')
365  print(classification_report(y_test_categories, y_pred_categories, target_names=categories_target_names))
366  logging.info('categories confusion matrix (rows = predicted classes, cols = actual classes):\n')
367  categories_conf_matrix = confusion_matrix(y_pred_categories, y_test_categories)
368  print categories_conf_matrix, '\n'
369 
370  # protons
371 
372  logging.info('protons report:\n')
373  print(classification_report(y_test_protons, y_pred_protons, target_names=protons_target_names))
374  logging.info('protons confusion matrix (rows = predicted classes, cols = actual classes):\n')
375  protons_conf_matrix = confusion_matrix(y_pred_protons, y_test_protons)
376  print protons_conf_matrix, '\n'
377 
378  # pions
379 
380  logging.info('pions report:\n')
381  print(classification_report(y_test_pions, y_pred_pions, target_names=pions_target_names))
382  logging.info('pions confusion matrix (rows = predicted classes, cols = actual classes):\n')
383  pions_conf_matrix = confusion_matrix(y_pred_pions, y_test_pions)
384  print pions_conf_matrix, '\n'
385 
386  # pizeros
387 
388  logging.info('pizeros report:\n')
389  print(classification_report(y_test_pizeros, y_pred_pizeros, target_names=pizeros_target_names))
390  logging.info('pizeros confusion matrix (rows = predicted classes, cols = actual classes):\n')
391  pizeros_conf_matrix = confusion_matrix(y_pred_pizeros, y_test_pizeros)
392  print pizeros_conf_matrix, '\n'
393 
394  # neutrons
395 
396  logging.info('neutrons report:\n')
397  print(classification_report(y_test_neutrons, y_pred_neutrons, target_names=neutrons_target_names))
398  logging.info('neutrons confusion matrix (rows = predicted classes, cols = actual classes):\n')
399  neutrons_conf_matrix = confusion_matrix(y_pred_neutrons, y_test_neutrons)
400  print neutrons_conf_matrix, '\n'
401 
402  # Apply cuts
403 
404  logging.info('Applying a numu cut of %.2f, a nue cut of %.2f, a nutau cut of %.2f, and a NC cut of %.2f...\n' % (CUT_NUMU, CUT_NUE, CUT_NUTAU, CUT_NC))
405 
406  weighted_conf_matrix = np.zeros((4,4), dtype='float32')
407  cut_weighted_conf_matrix = np.zeros((4,4), dtype='float32')
408 
409  for sample in range(len(Y_pred[1])):
410  pred_flavour = int(y_pred_flavour[sample]) # get predicted class of sample
411  test_flavour = int(y_test_flavour[sample]) # get actual class of sample
412  weight = test_values[sample]['fEventWeight'] # event weight
413  if Y_pred[1][sample][0] >= CUT_NUMU:
414  cut_weighted_conf_matrix[0][test_flavour] += weight
415  if Y_pred[1][sample][1] >= CUT_NUE:
416  cut_weighted_conf_matrix[1][test_flavour] += weight
417  if Y_pred[1][sample][2] >= CUT_NUTAU:
418  cut_weighted_conf_matrix[2][test_flavour] += weight
419  if Y_pred[1][sample][3] >= CUT_NC:
420  cut_weighted_conf_matrix[3][test_flavour] += weight
421  weighted_conf_matrix[pred_flavour][test_flavour] += weight
422 
423  # Confusion matrix - neutrino flavour (weighted)
424 
425  logging.info('Neutrino flavour weighted confusion matrix (rows = predicted classes, cols = actual classes):\n')
426  print weighted_conf_matrix.astype(int), '\n'
427  logging.info('Neutrino flavour weighted confusion matrix (rows = predicted classes, cols = actual classes) after applying the nue and numu cuts:\n')
428  print cut_weighted_conf_matrix.astype(int), '\n'
429  float_formatter = lambda x: "%.4f" % x
430  np.set_printoptions(formatter={'float_kind':float_formatter})
431 
432  # Purity confusion matrix
433 
434  purity_conf_matrix = np.copy(cut_weighted_conf_matrix)
435  for i in range(cut_weighted_conf_matrix.shape[0]):
436  row_sum = np.sum(purity_conf_matrix[i])
437  if(row_sum > 0):
438  for j in range(cut_weighted_conf_matrix.shape[1]):
439  purity_conf_matrix[i][j] /= row_sum
440  logging.info('Purity confusion matrix (rows = predicted classes, cols = actual classes)\n')
441  print purity_conf_matrix, '\n'
442 
443  # Efficiency confusion matrix
444 
445  logging.info('Efficiency confusion matrix (rows = predicted classes, cols = actual classes):\n')
446  efficiency_conf_matrix = cut_weighted_conf_matrix.astype('float32')/ np.add.reduce(weighted_conf_matrix)
447  print efficiency_conf_matrix, '\n'
448 
449  # Dump test information
450 
451  logging.info('Dumping test information to \'%s\'...\n' % (OUTPUT_PATH + OUTPUT_PREFIX + '.np'))
452 
453  test_info = {'test_values':test_values, # Energy and weight values
454  'Y_pred':Y_pred, # 3-DIM array of original probability predicted values
455  'y_pred_is_antineutrino':y_pred_is_antineutrino, # 1-DIM array of is_antineutrino predicted values
456  'y_test_is_antineutrino':y_test_is_antineutrino, # 1-DIM array of is_antineutrino test values
457  'y_pred_flavour':y_pred_flavour, # 1-DIM array of flavour predicted values
458  'y_test_flavour':y_test_flavour, # 1-DIM array of flavour test values
459  'y_pred_interaction':y_pred_interaction, # 1-DIM array of interaction predicted values
460  'y_test_interaction':y_test_interaction, # 1-DIM array of interaction test values
461  'y_pred_categories':y_pred_categories, # 1-DIM array of categories predicted values
462  'y_test_categories':y_test_categories, # 1-DIM array of categories test values
463  'y_pred_protons':y_pred_protons, # 1-DIM array of protons predicted values
464  'y_test_protons':y_test_protons, # 1-DIM array of protons test values
465  'y_pred_pions':y_pred_pions, # 1-DIM array of pions predicted values
466  'y_test_pions':y_test_pions, # 1-DIM array of pions test values
467  'y_pred_pizeros':y_pred_pizeros, # 1-DIM array of pizeros predicted values
468  'y_test_pizeros':y_test_pizeros, # 1-DIM array of pizeros test values
469  'y_pred_neutrons':y_pred_neutrons, # 1-DIM array of neutrons predicted values
470  'y_test_neutrons':y_test_neutrons, # 1-DIM array of neutrons test values
471  'is_antineutrino_cm':is_antineutrino_conf_matrix, # is_antineutrino confusion matrix
472  'flavour_cm':flavour_conf_matrix, # flavour confusion matrix
473  'interaction_cm':interaction_conf_matrix, # interaction confusion matrix
474  'categories_cm':categories_conf_matrix, # categories confusion matrix
475  'protons_cm':protons_conf_matrix, # protons confusion matrix
476  'pions_cm':pions_conf_matrix, # pions confusion matrix
477  'pizeros_cm':pizeros_conf_matrix, # pizeros confusion matrix
478  'neutrons_cm':neutrons_conf_matrix, # neutrons confusion matrix
479  'cut_weighted_cm':cut_weighted_conf_matrix, # Weighted neutrino types confusion matrix (after the cut)
480  'purity_cm':purity_conf_matrix, # Purity confusion matrix
481  'efficiency_cm':efficiency_conf_matrix # Efficiency confusion matrix
482  }
483 
484  test_info = np.array(test_info)
485 
486  with open(OUTPUT_PATH + OUTPUT_PREFIX + '.np', 'w') as test_info_file:
487  test_info.dump(test_info_file)
int open(const char *, int)
Opens a file descriptor.
def load_model(name)
if(!yymsg) yymsg