Functions
se_densenet Namespace Reference

Functions

def preprocess_input (x, data_format=None)
 
def SEDenseNet (input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
 
def SEDenseNetImageNet121 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='N')
 
def SEDenseNetImageNet169 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax')
 
def SEDenseNetImageNet201 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax')
 
def SEDenseNetImageNet264 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax')
 
def SEDenseNetImageNet161 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax')
 
def __conv_block (ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4)
 
def __dense_block (x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False)
 
def __transition_block (ip, nb_filter, compression=1.0, weight_decay=1e-4)
 
def __create_dense_net (nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4, subsample_initial_block=False, activation='softmax')
 

Detailed Description

DenseNet models for Keras.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation](https://arxiv.org/pdf/1611.09326.pdf)

Function Documentation

def se_densenet.__conv_block (   ip,
  nb_filter,
  bottleneck = False,
  dropout_rate = None,
  weight_decay = 1e-4 
)
private
Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
Args:
    ip: Input keras tensor
    nb_filter: number of filters
    bottleneck: add bottleneck block
    dropout_rate: dropout rate
    weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)

Definition at line 260 of file se_densenet.py.

260 def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
261  ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
262  Args:
263  ip: Input keras tensor
264  nb_filter: number of filters
265  bottleneck: add bottleneck block
266  dropout_rate: dropout rate
267  weight_decay: weight decay factor
268  Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
269  '''
270  concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
271 
272  x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
273  x = Activation('relu')(x)
274 
275  if bottleneck:
276  inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
277 
278  x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
279  kernel_regularizer=l2(weight_decay))(x)
280  x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
281  x = Activation('relu')(x)
282 
283  x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
284  if dropout_rate:
285  x = Dropout(dropout_rate)(x)
286 
287  return x
288 
289 
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4)
Definition: se_densenet.py:260
def se_densenet.__create_dense_net (   nb_classes,
  img_input,
  include_top,
  depth = 40,
  nb_dense_block = 3,
  growth_rate = 12,
  nb_filter = -1,
  nb_layers_per_block = -1,
  bottleneck = False,
  reduction = 0.0,
  dropout_rate = None,
  weight_decay = 1e-4,
  subsample_initial_block = False,
  activation = 'softmax' 
)
private
Build the DenseNet model
Args:
    nb_classes: number of classes
    img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
    include_top: flag to include the final Dense layer
    depth: number or layers
    nb_dense_block: number of dense blocks to add to end (generally = 3)
    growth_rate: number of filters to add per dense block
    nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
    nb_layers_per_block: number of layers in each dense block.
            Can be a -1, positive integer or a list.
            If -1, calculates nb_layer_per_block from the depth of the network.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
    bottleneck: add bottleneck blocks
    reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
    dropout_rate: dropout rate
    weight_decay: weight decay rate
    subsample_initial_block: Set to True to subsample the initial convolution and
            add a MaxPool2D before the dense blocks are added.
    subsample_initial:
    activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
            Note that if sigmoid is used, classes must be 1.
Returns: keras tensor with nb_layers of conv_block appended

Definition at line 354 of file se_densenet.py.

354  subsample_initial_block=False, activation='softmax'):
355  ''' Build the DenseNet model
356  Args:
357  nb_classes: number of classes
358  img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
359  include_top: flag to include the final Dense layer
360  depth: number or layers
361  nb_dense_block: number of dense blocks to add to end (generally = 3)
362  growth_rate: number of filters to add per dense block
363  nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
364  nb_layers_per_block: number of layers in each dense block.
365  Can be a -1, positive integer or a list.
366  If -1, calculates nb_layer_per_block from the depth of the network.
367  If positive integer, a set number of layers per dense block.
368  If list, nb_layer is used as provided. Note that list size must
369  be (nb_dense_block + 1)
370  bottleneck: add bottleneck blocks
371  reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
372  dropout_rate: dropout rate
373  weight_decay: weight decay rate
374  subsample_initial_block: Set to True to subsample the initial convolution and
375  add a MaxPool2D before the dense blocks are added.
376  subsample_initial:
377  activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
378  Note that if sigmoid is used, classes must be 1.
379  Returns: keras tensor with nb_layers of conv_block appended
380  '''
381 
382  concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
383 
384  if reduction != 0.0:
385  assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'
386 
387  # layers in each dense block
388  if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
389  nb_layers = list(nb_layers_per_block) # Convert tuple to list
390 
391  assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
392  'Note that list size must be (nb_dense_block)'
393  final_nb_layer = nb_layers[-1]
394  nb_layers = nb_layers[:-1]
395  else:
396  if nb_layers_per_block == -1:
397  assert (depth - 4) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
398  count = int((depth - 4) / 3)
399  nb_layers = [count for _ in range(nb_dense_block)]
400  final_nb_layer = count
401  else:
402  final_nb_layer = nb_layers_per_block
403  nb_layers = [nb_layers_per_block] * nb_dense_block
404 
405  # compute initial nb_filter if -1, else accept users initial nb_filter
406  if nb_filter <= 0:
407  nb_filter = 2 * growth_rate
408 
409  # compute compression factor
410  compression = 1.0 - reduction
411 
412  # Initial convolution
413  if subsample_initial_block:
414  initial_kernel = (7, 7)
415  initial_strides = (2, 2)
416  else:
417  initial_kernel = (3, 3)
418  initial_strides = (1, 1)
419 
420  x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
421  strides=initial_strides, use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
422 
423  if subsample_initial_block:
424  x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
425  x = Activation('relu')(x)
426  x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
427 
428  # Add dense blocks
429  for block_idx in range(nb_dense_block - 1):
430  x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
431  dropout_rate=dropout_rate, weight_decay=weight_decay)
432  # add transition_block
433  x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
434  nb_filter = int(nb_filter * compression)
435 
436  # The last dense_block does not have a transition_block
437  x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
438  dropout_rate=dropout_rate, weight_decay=weight_decay)
439 
440  x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
441  x = Activation('relu')(x)
442  x = GlobalAveragePooling2D()(x)
443 
444  if include_top:
445  x = Dense(nb_classes, activation=activation)(x)
446 
447  return x
448 
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4)
Definition: se_densenet.py:327
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False)
Definition: se_densenet.py:291
def se_densenet.__dense_block (   x,
  nb_layers,
  nb_filter,
  growth_rate,
  bottleneck = False,
  dropout_rate = None,
  weight_decay = 1e-4,
  grow_nb_filters = True,
  return_concat_list = False 
)
private
Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
    x: keras tensor
    nb_layers: the number of layers of conv_block to append to the model.
    nb_filter: number of filters
    growth_rate: growth rate
    bottleneck: bottleneck block
    dropout_rate: dropout rate
    weight_decay: weight decay factor
    grow_nb_filters: flag to decide to allow number of filters to grow
    return_concat_list: return the list of feature maps along with the actual output
Returns: keras tensor with nb_layers of conv_block appended

Definition at line 291 of file se_densenet.py.

291  grow_nb_filters=True, return_concat_list=False):
292  ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
293  Args:
294  x: keras tensor
295  nb_layers: the number of layers of conv_block to append to the model.
296  nb_filter: number of filters
297  growth_rate: growth rate
298  bottleneck: bottleneck block
299  dropout_rate: dropout rate
300  weight_decay: weight decay factor
301  grow_nb_filters: flag to decide to allow number of filters to grow
302  return_concat_list: return the list of feature maps along with the actual output
303  Returns: keras tensor with nb_layers of conv_block appended
304  '''
305  concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
306 
307  x_list = [x]
308 
309  for i in range(nb_layers):
310  cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
311  x_list.append(cb)
312 
313  x = concatenate([x, cb], axis=concat_axis)
314 
315  if grow_nb_filters:
316  nb_filter += growth_rate
317 
318  # squeeze and excite block
319  x = squeeze_excite_block(x)
320 
321  if return_concat_list:
322  return x, nb_filter, x_list
323  else:
324  return x, nb_filter
325 
326 
std::string concatenate(H const &h, T const &...t)
Definition: select.h:138
def squeeze_excite_block(input, ratio=16)
Definition: se.py:5
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4)
Definition: se_densenet.py:260
def se_densenet.__transition_block (   ip,
  nb_filter,
  compression = 1.0,
  weight_decay = 1e-4 
)
private
Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
    ip: keras tensor
    nb_filter: number of filters
    compression: calculated as 1 - reduction. Reduces the number of feature maps
                in the transition block.
    dropout_rate: dropout rate
    weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool

Definition at line 327 of file se_densenet.py.

327 def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
328  ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
329  Args:
330  ip: keras tensor
331  nb_filter: number of filters
332  compression: calculated as 1 - reduction. Reduces the number of feature maps
333  in the transition block.
334  dropout_rate: dropout rate
335  weight_decay: weight decay factor
336  Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
337  '''
338  concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
339 
340  x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
341  x = Activation('relu')(x)
342  x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
343  kernel_regularizer=l2(weight_decay))(x)
344  x = AveragePooling2D((2, 2), strides=(2, 2))(x)
345 
346  # squeeze and excite block
347  x = squeeze_excite_block(x)
348 
349  return x
350 
351 
def squeeze_excite_block(input, ratio=16)
Definition: se.py:5
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4)
Definition: se_densenet.py:327
def se_densenet.preprocess_input (   x,
  data_format = None 
)
Preprocesses a tensor encoding a batch of images.

# Arguments
    x: input Numpy tensor, 4D.
    data_format: data format of the image tensor.

# Returns
    Preprocessed tensor.

Definition at line 31 of file se_densenet.py.

31 def preprocess_input(x, data_format=None):
32  """Preprocesses a tensor encoding a batch of images.
33 
34  # Arguments
35  x: input Numpy tensor, 4D.
36  data_format: data format of the image tensor.
37 
38  # Returns
39  Preprocessed tensor.
40  """
41  if data_format is None:
42  data_format = K.image_data_format()
43  assert data_format in {'channels_last', 'channels_first'}
44 
45  if data_format == 'channels_first':
46  if x.ndim == 3:
47  # 'RGB'->'BGR'
48  x = x[::-1, ...]
49  # Zero-center by mean pixel
50  x[0, :, :] -= 103.939
51  x[1, :, :] -= 116.779
52  x[2, :, :] -= 123.68
53  else:
54  x = x[:, ::-1, ...]
55  x[:, 0, :, :] -= 103.939
56  x[:, 1, :, :] -= 116.779
57  x[:, 2, :, :] -= 123.68
58  else:
59  # 'RGB'->'BGR'
60  x = x[..., ::-1]
61  # Zero-center by mean pixel
62  x[..., 0] -= 103.939
63  x[..., 1] -= 116.779
64  x[..., 2] -= 123.68
65 
66  x *= 0.017 # scale values
67 
68  return x
69 
70 
def preprocess_input(x, data_format=None)
Definition: se_densenet.py:31
def se_densenet.SEDenseNet (   input_shape = None,
  depth = 40,
  nb_dense_block = 3,
  growth_rate = 12,
  nb_filter = -1,
  nb_layers_per_block = -1,
  bottleneck = False,
  reduction = 0.0,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  subsample_initial_block = False,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 10,
  activation = 'softmax' 
)
Instantiate the SE DenseNet architecture
    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(32, 32, 3)` (with `channels_last` dim ordering)
            or `(3, 32, 32)` (with `channels_first` dim ordering).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 8.
            E.g. `(200, 200, 3)` would be one valid value.
        depth: number or layers in the DenseNet
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. -1 indicates initial
            number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
            Can be a -1, positive integer or a list.
            If -1, calculates nb_layer_per_block from the network depth.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        bottleneck: flag to add bottleneck blocks in between dense blocks
        reduction: reduction factor of transition blocks.
            Note : reduction value is inverted to compute compression.
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Set to True to subsample the initial convolution and
            add a MaxPool2D before the dense blocks are added.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization) or
            'imagenet' (pre-training on ImageNet)..
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
            Note that if sigmoid is used, classes must be 1.
    # Returns
        A Keras model instance.

Definition at line 86 of file se_densenet.py.

86  activation='softmax'):
87  '''Instantiate the SE DenseNet architecture
88  # Arguments
89  input_shape: optional shape tuple, only to be specified
90  if `include_top` is False (otherwise the input shape
91  has to be `(32, 32, 3)` (with `channels_last` dim ordering)
92  or `(3, 32, 32)` (with `channels_first` dim ordering).
93  It should have exactly 3 inputs channels,
94  and width and height should be no smaller than 8.
95  E.g. `(200, 200, 3)` would be one valid value.
96  depth: number or layers in the DenseNet
97  nb_dense_block: number of dense blocks to add to end (generally = 3)
98  growth_rate: number of filters to add per dense block
99  nb_filter: initial number of filters. -1 indicates initial
100  number of filters is 2 * growth_rate
101  nb_layers_per_block: number of layers in each dense block.
102  Can be a -1, positive integer or a list.
103  If -1, calculates nb_layer_per_block from the network depth.
104  If positive integer, a set number of layers per dense block.
105  If list, nb_layer is used as provided. Note that list size must
106  be (nb_dense_block + 1)
107  bottleneck: flag to add bottleneck blocks in between dense blocks
108  reduction: reduction factor of transition blocks.
109  Note : reduction value is inverted to compute compression.
110  dropout_rate: dropout rate
111  weight_decay: weight decay rate
112  subsample_initial_block: Set to True to subsample the initial convolution and
113  add a MaxPool2D before the dense blocks are added.
114  include_top: whether to include the fully-connected
115  layer at the top of the network.
116  weights: one of `None` (random initialization) or
117  'imagenet' (pre-training on ImageNet)..
118  input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
119  to use as image input for the model.
120  classes: optional number of classes to classify images
121  into, only to be specified if `include_top` is True, and
122  if no `weights` argument is specified.
123  activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
124  Note that if sigmoid is used, classes must be 1.
125  # Returns
126  A Keras model instance.
127  '''
128 
129  if weights not in {'imagenet', None}:
130  raise ValueError('The `weights` argument should be either '
131  '`None` (random initialization) or `cifar10` '
132  '(pre-training on CIFAR-10).')
133 
134  if weights == 'imagenet' and include_top and classes != 1000:
135  raise ValueError('If using `weights` as ImageNet with `include_top`'
136  ' as true, `classes` should be 1000')
137 
138  if activation not in ['softmax', 'sigmoid']:
139  raise ValueError('activation must be one of "softmax" or "sigmoid"')
140 
141  if activation == 'sigmoid' and classes != 1:
142  raise ValueError('sigmoid activation can only be used when classes = 1')
143 
144  # Determine proper input shape
145  input_shape = _obtain_input_shape(input_shape,
146  default_size=32,
147  min_size=8,
148  data_format=K.image_data_format(),
149  require_flatten=include_top)
150 
151  if input_tensor is None:
152  img_input = Input(shape=input_shape)
153  else:
154  if not K.is_keras_tensor(input_tensor):
155  img_input = Input(tensor=input_tensor, shape=input_shape)
156  else:
157  img_input = input_tensor
158 
159  x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
160  growth_rate, nb_filter, nb_layers_per_block, bottleneck, reduction,
161  dropout_rate, weight_decay, subsample_initial_block, activation)
162 
163  # Ensure that the model takes into account
164  # any potential predecessors of `input_tensor`.
165  if input_tensor is not None:
166  inputs = get_source_inputs(input_tensor)
167  else:
168  inputs = img_input
169  # Create model.
170  model = Model(inputs, x, name='se-densenet')
171 
172  return model
173 
174 
def _obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None)
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4, subsample_initial_block=False, activation='softmax')
Definition: se_densenet.py:354
def se_densenet.SEDenseNetImageNet121 (   input_shape = None,
  bottleneck = True,
  reduction = 0.5,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 1000,
  activation = 'N' 
)

Definition at line 184 of file se_densenet.py.

184  activation='N'):
185  return SEDenseNet(input_shape, depth=121, nb_dense_block=4, growth_rate=32, nb_filter=64,
186  nb_layers_per_block=[6, 12, 24, 16], bottleneck=bottleneck, reduction=reduction,
187  dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
188  include_top=include_top, weights=weights, input_tensor=input_tensor,
189  classes=classes, activation=activation)
190 
191 
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
Definition: se_densenet.py:86
def se_densenet.SEDenseNetImageNet161 (   input_shape = None,
  bottleneck = True,
  reduction = 0.5,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 1000,
  activation = 'softmax' 
)

Definition at line 252 of file se_densenet.py.

252  activation='softmax'):
253  return SEDenseNet(input_shape, depth=161, nb_dense_block=4, growth_rate=48, nb_filter=96,
254  nb_layers_per_block=[6, 12, 36, 24], bottleneck=bottleneck, reduction=reduction,
255  dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
256  include_top=include_top, weights=weights, input_tensor=input_tensor,
257  classes=classes, activation=activation)
258 
259 
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
Definition: se_densenet.py:86
def se_densenet.SEDenseNetImageNet169 (   input_shape = None,
  bottleneck = True,
  reduction = 0.5,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 1000,
  activation = 'softmax' 
)

Definition at line 201 of file se_densenet.py.

201  activation='softmax'):
202  return SEDenseNet(input_shape, depth=169, nb_dense_block=4, growth_rate=32, nb_filter=64,
203  nb_layers_per_block=[6, 12, 32, 32], bottleneck=bottleneck, reduction=reduction,
204  dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
205  include_top=include_top, weights=weights, input_tensor=input_tensor,
206  classes=classes, activation=activation)
207 
208 
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
Definition: se_densenet.py:86
def se_densenet.SEDenseNetImageNet201 (   input_shape = None,
  bottleneck = True,
  reduction = 0.5,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 1000,
  activation = 'softmax' 
)

Definition at line 218 of file se_densenet.py.

218  activation='softmax'):
219  return SEDenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
220  nb_layers_per_block=[6, 12, 48, 32], bottleneck=bottleneck, reduction=reduction,
221  dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
222  include_top=include_top, weights=weights, input_tensor=input_tensor,
223  classes=classes, activation=activation)
224 
225 
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
Definition: se_densenet.py:86
def se_densenet.SEDenseNetImageNet264 (   input_shape = None,
  bottleneck = True,
  reduction = 0.5,
  dropout_rate = 0.0,
  weight_decay = 1e-4,
  include_top = True,
  weights = None,
  input_tensor = None,
  classes = 1000,
  activation = 'softmax' 
)

Definition at line 235 of file se_densenet.py.

235  activation='softmax'):
236  return SEDenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
237  nb_layers_per_block=[6, 12, 64, 48], bottleneck=bottleneck, reduction=reduction,
238  dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=True,
239  include_top=include_top, weights=weights, input_tensor=input_tensor,
240  classes=classes, activation=activation)
241 
242 
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
Definition: se_densenet.py:86