|
| training.stream |
|
| training.stdout |
|
| training.level |
|
| training.config = configparser.ConfigParser() |
|
| training.SHUFFLE = ast.literal_eval(config['random']['shuffle']) |
|
| training.IMAGES_PATH = config['images']['path'] |
|
| training.VIEWS = int(config['images']['views']) |
|
| training.PLANES = int(config['images']['planes']) |
|
| training.CELLS = int(config['images']['cells']) |
|
| training.STANDARDIZE = ast.literal_eval(config['images']['standardize']) |
|
| training.INTERACTION_LABELS = ast.literal_eval(config['images']['interaction_labels']) |
|
| training.FILTERED = ast.literal_eval(config['images']['filtered']) |
|
| training.INTERACTION_TYPES = ast.literal_eval(config['dataset']['interaction_types']) |
|
list | training.NEUTRINO_LABELS = [] |
|
| training.N_LABELS = len(Counter(INTERACTION_LABELS.values())) |
|
| training.DATASET_PATH = config['dataset']['path'] |
|
| training.PARTITION_PREFIX = config['dataset']['partition_prefix'] |
|
| training.LABELS_PREFIX = config['dataset']['labels_prefix'] |
|
| training.LOG_PATH = config['log']['path'] |
|
| training.LOG_PREFIX = config['log']['prefix'] |
|
| training.CHECKPOINT_PATH = config['model']['checkpoint_path'] |
|
| training.CHECKPOINT_PREFIX = config['model']['checkpoint_prefix'] |
|
| training.CHECKPOINT_SAVE_MANY = ast.literal_eval(config['model']['checkpoint_save_many']) |
|
| training.CHECKPOINT_SAVE_BEST_ONLY = ast.literal_eval(config['model']['checkpoint_save_best_only']) |
|
| training.CHECKPOINT_PERIOD = int(config['model']['checkpoint_period']) |
|
| training.PRINT_SUMMARY = ast.literal_eval(config['model']['print_summary']) |
|
| training.RESUME = ast.literal_eval(config['train']['resume']) |
|
| training.LEARNING_RATE = float(config['train']['lr']) |
|
| training.MOMENTUM = float(config['train']['momentum']) |
|
| training.DECAY = float(config['train']['decay']) |
|
| training.TRAIN_BATCH_SIZE = int(config['train']['batch_size']) |
|
| training.EPOCHS = int(config['train']['epochs']) |
|
| training.EARLY_STOPPING_PATIENCE = int(config['train']['early_stopping_patience']) |
|
| training.WEIGHTED_LOSS_FUNCTION = ast.literal_eval(config['train']['weighted_loss_function']) |
|
| training.CLASS_WEIGHTS_PREFIX = config['train']['class_weights_prefix'] |
|
| training.VALIDATION_FRACTION = float(config['validation']['fraction']) |
|
| training.VALIDATION_BATCH_SIZE = int(config['validation']['batch_size']) |
|
dictionary | training.TRAIN_PARAMS |
|
dictionary | training.VALIDATION_PARAMS |
|
dictionary | training.partition = {'train' : [], 'validation' : [], 'test' : []} |
|
dictionary | training.labels = {} |
|
| training.partition_file = open(DATASET_PATH + PARTITION_PREFIX + '.p', 'r') |
|
| training.labels_file = open(DATASET_PATH + LABELS_PREFIX + '.p', 'r') |
|
| training.class_weights_file = open(DATASET_PATH + CLASS_WEIGHTS_PREFIX + '.p', 'r') |
|
| training.class_weights = pickle.load(class_weights_file) |
|
| training.training_generator = DataGenerator(**TRAIN_PARAMS).generate(labels, partition['train'], True) |
|
| training.validation_generator = DataGenerator(**VALIDATION_PARAMS).generate(labels, partition['validation'], True) |
|
list | training.files = [f for f in os.listdir(CHECKPOINT_PATH) if os.path.isfile(os.path.join(CHECKPOINT_PATH, f))] |
|
| training.reverse |
|
| training.r = re.compile(CHECKPOINT_PREFIX[1:] + '-.*-.*.h5') |
|
| training.model = load_model(CHECKPOINT_PATH + '/' + fil) |
|
list | training.input_shape = [PLANES, CELLS, VIEWS] |
|
| training.opt = optimizers.SGD(lr=LEARNING_RATE, momentum=MOMENTUM, decay=DECAY, nesterov=True) |
|
| training.loss |
|
| training.optimizer |
|
| training.metrics |
|
string | training.filepath = CHECKPOINT_PATH+CHECKPOINT_PREFIX+'.h5' |
|
string | training.monitor_acc = 'val_acc' |
|
string | training.monitor_loss = 'val_loss' |
|
| training.checkpoint = ModelCheckpoint(filepath, monitor=monitor_acc, verbose=1, save_best_only=CHECKPOINT_SAVE_BEST_ONLY, mode='max', period=CHECKPOINT_PERIOD) |
|
| training.lr_reducer = ReduceLROnPlateau(monitor=monitor_loss, factor=0.1, cooldown=0, patience=3, min_lr=0.5e-6, verbose=1) |
|
| training.early_stopping = EarlyStopping(monitor=monitor_acc, patience=EARLY_STOPPING_PATIENCE, mode='auto') |
|
| training.csv_logger = CSVLogger(LOG_PATH + LOG_PREFIX + '.log', append=RESUME) |
|
| training.my_callback = my_callbacks.MyCallback() |
|
list | training.callbacks_list = [lr_reducer, checkpoint, early_stopping, csv_logger] |
|
int | training.initial_epoch = int(re.search(r'\d+', logfile.read().split('\n')[-2]).group())+1 |
|
| training.generator |
|
| training.steps_per_epoch |
|
| training.validation_data |
|
| training.validation_steps |
|
| training.epochs |
|
| training.class_weight |
|
| training.callbacks |
|
| training.verbose |
|