|
def | preprocess_input (x, data_format=None) |
|
def | SEDenseNet (input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax') |
|
def | SEDenseNetImageNet121 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='N') |
|
def | SEDenseNetImageNet169 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax') |
|
def | SEDenseNetImageNet201 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax') |
|
def | SEDenseNetImageNet264 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax') |
|
def | SEDenseNetImageNet161 (input_shape=None, bottleneck=True, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, include_top=True, weights=None, input_tensor=None, classes=1000, activation='softmax') |
|
def | __conv_block (ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4) |
|
def | __dense_block (x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False) |
|
def | __transition_block (ip, nb_filter, compression=1.0, weight_decay=1e-4) |
|
def | __create_dense_net (nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4, subsample_initial_block=False, activation='softmax') |
|
DenseNet models for Keras.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
def se_densenet.__create_dense_net |
( |
|
nb_classes, |
|
|
|
img_input, |
|
|
|
include_top, |
|
|
|
depth = 40 , |
|
|
|
nb_dense_block = 3 , |
|
|
|
growth_rate = 12 , |
|
|
|
nb_filter = -1 , |
|
|
|
nb_layers_per_block = -1 , |
|
|
|
bottleneck = False , |
|
|
|
reduction = 0.0 , |
|
|
|
dropout_rate = None , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
subsample_initial_block = False , |
|
|
|
activation = 'softmax' |
|
) |
| |
|
private |
Build the DenseNet model
Args:
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
depth: number or layers
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the depth of the network.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: add bottleneck blocks
reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Set to True to subsample the initial convolution and
add a MaxPool2D before the dense blocks are added.
subsample_initial:
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
Returns: keras tensor with nb_layers of conv_block appended
Definition at line 354 of file se_densenet.py.
354 subsample_initial_block=
False, activation=
'softmax'):
355 ''' Build the DenseNet model 357 nb_classes: number of classes 358 img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels) 359 include_top: flag to include the final Dense layer 360 depth: number or layers 361 nb_dense_block: number of dense blocks to add to end (generally = 3) 362 growth_rate: number of filters to add per dense block 363 nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate 364 nb_layers_per_block: number of layers in each dense block. 365 Can be a -1, positive integer or a list. 366 If -1, calculates nb_layer_per_block from the depth of the network. 367 If positive integer, a set number of layers per dense block. 368 If list, nb_layer is used as provided. Note that list size must 369 be (nb_dense_block + 1) 370 bottleneck: add bottleneck blocks 371 reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression 372 dropout_rate: dropout rate 373 weight_decay: weight decay rate 374 subsample_initial_block: Set to True to subsample the initial convolution and 375 add a MaxPool2D before the dense blocks are added. 377 activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'. 378 Note that if sigmoid is used, classes must be 1. 379 Returns: keras tensor with nb_layers of conv_block appended 382 concat_axis = 1
if K.image_data_format() ==
'channels_first' else -1
385 assert reduction <= 1.0
and reduction > 0.0,
'reduction value must lie between 0.0 and 1.0' 388 if type(nb_layers_per_block)
is list
or type(nb_layers_per_block)
is tuple:
389 nb_layers = list(nb_layers_per_block)
391 assert len(nb_layers) == (nb_dense_block),
'If list, nb_layer is used as provided. ' \
392 'Note that list size must be (nb_dense_block)' 393 final_nb_layer = nb_layers[-1]
394 nb_layers = nb_layers[:-1]
396 if nb_layers_per_block == -1:
397 assert (depth - 4) % 3 == 0,
'Depth must be 3 N + 4 if nb_layers_per_block == -1' 398 count =
int((depth - 4) / 3)
399 nb_layers = [count
for _
in range(nb_dense_block)]
400 final_nb_layer = count
402 final_nb_layer = nb_layers_per_block
403 nb_layers = [nb_layers_per_block] * nb_dense_block
407 nb_filter = 2 * growth_rate
410 compression = 1.0 - reduction
413 if subsample_initial_block:
414 initial_kernel = (7, 7)
415 initial_strides = (2, 2)
417 initial_kernel = (3, 3)
418 initial_strides = (1, 1)
420 x = Conv2D(nb_filter, initial_kernel, kernel_initializer=
'he_normal', padding=
'same',
421 strides=initial_strides, use_bias=
False, kernel_regularizer=l2(weight_decay))(img_input)
423 if subsample_initial_block:
424 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
425 x = Activation(
'relu')(x)
426 x = MaxPooling2D((3, 3), strides=(2, 2), padding=
'same')(x)
429 for block_idx
in range(nb_dense_block - 1):
430 x, nb_filter =
__dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
431 dropout_rate=dropout_rate, weight_decay=weight_decay)
433 x =
__transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
434 nb_filter =
int(nb_filter * compression)
437 x, nb_filter =
__dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
438 dropout_rate=dropout_rate, weight_decay=weight_decay)
440 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
441 x = Activation(
'relu')(x)
442 x = GlobalAveragePooling2D()(x)
445 x = Dense(nb_classes, activation=activation)(x)
448 def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4)
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True, return_concat_list=False)
def se_densenet.__dense_block |
( |
|
x, |
|
|
|
nb_layers, |
|
|
|
nb_filter, |
|
|
|
growth_rate, |
|
|
|
bottleneck = False , |
|
|
|
dropout_rate = None , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
grow_nb_filters = True , |
|
|
|
return_concat_list = False |
|
) |
| |
|
private |
Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
return_concat_list: return the list of feature maps along with the actual output
Returns: keras tensor with nb_layers of conv_block appended
Definition at line 291 of file se_densenet.py.
291 grow_nb_filters=
True, return_concat_list=
False):
292 ''' Build a dense_block where the output of each conv_block is fed to subsequent ones 295 nb_layers: the number of layers of conv_block to append to the model. 296 nb_filter: number of filters 297 growth_rate: growth rate 298 bottleneck: bottleneck block 299 dropout_rate: dropout rate 300 weight_decay: weight decay factor 301 grow_nb_filters: flag to decide to allow number of filters to grow 302 return_concat_list: return the list of feature maps along with the actual output 303 Returns: keras tensor with nb_layers of conv_block appended 305 concat_axis = 1
if K.image_data_format() ==
'channels_first' else -1
309 for i
in range(nb_layers):
310 cb =
__conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
316 nb_filter += growth_rate
321 if return_concat_list:
322 return x, nb_filter, x_list
std::string concatenate(H const &h, T const &...t)
def squeeze_excite_block(input, ratio=16)
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4)
def se_densenet.__transition_block |
( |
|
ip, |
|
|
|
nb_filter, |
|
|
|
compression = 1.0 , |
|
|
|
weight_decay = 1e-4 |
|
) |
| |
|
private |
Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
Definition at line 327 of file se_densenet.py.
328 ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D 331 nb_filter: number of filters 332 compression: calculated as 1 - reduction. Reduces the number of feature maps 333 in the transition block. 334 dropout_rate: dropout rate 335 weight_decay: weight decay factor 336 Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool 338 concat_axis = 1
if K.image_data_format() ==
'channels_first' else -1
340 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
341 x = Activation(
'relu')(x)
342 x = Conv2D(
int(nb_filter * compression), (1, 1), kernel_initializer=
'he_normal', padding=
'same', use_bias=
False,
343 kernel_regularizer=l2(weight_decay))(x)
344 x = AveragePooling2D((2, 2), strides=(2, 2))(x)
def squeeze_excite_block(input, ratio=16)
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4)
def se_densenet.preprocess_input |
( |
|
x, |
|
|
|
data_format = None |
|
) |
| |
Preprocesses a tensor encoding a batch of images.
# Arguments
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
# Returns
Preprocessed tensor.
Definition at line 31 of file se_densenet.py.
32 """Preprocesses a tensor encoding a batch of images. 35 x: input Numpy tensor, 4D. 36 data_format: data format of the image tensor. 41 if data_format
is None:
42 data_format = K.image_data_format()
43 assert data_format
in {
'channels_last',
'channels_first'}
45 if data_format ==
'channels_first':
55 x[:, 0, :, :] -= 103.939
56 x[:, 1, :, :] -= 116.779
57 x[:, 2, :, :] -= 123.68
def preprocess_input(x, data_format=None)
def se_densenet.SEDenseNet |
( |
|
input_shape = None , |
|
|
|
depth = 40 , |
|
|
|
nb_dense_block = 3 , |
|
|
|
growth_rate = 12 , |
|
|
|
nb_filter = -1 , |
|
|
|
nb_layers_per_block = -1 , |
|
|
|
bottleneck = False , |
|
|
|
reduction = 0.0 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
subsample_initial_block = False , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 10 , |
|
|
|
activation = 'softmax' |
|
) |
| |
Instantiate the SE DenseNet architecture
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `channels_last` dim ordering)
or `(3, 32, 32)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
depth: number or layers in the DenseNet
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. -1 indicates initial
number of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the network depth.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: flag to add bottleneck blocks in between dense blocks
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Set to True to subsample the initial convolution and
add a MaxPool2D before the dense blocks are added.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
'imagenet' (pre-training on ImageNet)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
# Returns
A Keras model instance.
Definition at line 86 of file se_densenet.py.
86 activation=
'softmax'):
87 '''Instantiate the SE DenseNet architecture 89 input_shape: optional shape tuple, only to be specified 90 if `include_top` is False (otherwise the input shape 91 has to be `(32, 32, 3)` (with `channels_last` dim ordering) 92 or `(3, 32, 32)` (with `channels_first` dim ordering). 93 It should have exactly 3 inputs channels, 94 and width and height should be no smaller than 8. 95 E.g. `(200, 200, 3)` would be one valid value. 96 depth: number or layers in the DenseNet 97 nb_dense_block: number of dense blocks to add to end (generally = 3) 98 growth_rate: number of filters to add per dense block 99 nb_filter: initial number of filters. -1 indicates initial 100 number of filters is 2 * growth_rate 101 nb_layers_per_block: number of layers in each dense block. 102 Can be a -1, positive integer or a list. 103 If -1, calculates nb_layer_per_block from the network depth. 104 If positive integer, a set number of layers per dense block. 105 If list, nb_layer is used as provided. Note that list size must 106 be (nb_dense_block + 1) 107 bottleneck: flag to add bottleneck blocks in between dense blocks 108 reduction: reduction factor of transition blocks. 109 Note : reduction value is inverted to compute compression. 110 dropout_rate: dropout rate 111 weight_decay: weight decay rate 112 subsample_initial_block: Set to True to subsample the initial convolution and 113 add a MaxPool2D before the dense blocks are added. 114 include_top: whether to include the fully-connected 115 layer at the top of the network. 116 weights: one of `None` (random initialization) or 117 'imagenet' (pre-training on ImageNet).. 118 input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) 119 to use as image input for the model. 120 classes: optional number of classes to classify images 121 into, only to be specified if `include_top` is True, and 122 if no `weights` argument is specified. 123 activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'. 124 Note that if sigmoid is used, classes must be 1. 126 A Keras model instance. 129 if weights
not in {
'imagenet',
None}:
130 raise ValueError(
'The `weights` argument should be either ' 131 '`None` (random initialization) or `cifar10` ' 132 '(pre-training on CIFAR-10).')
134 if weights ==
'imagenet' and include_top
and classes != 1000:
135 raise ValueError(
'If using `weights` as ImageNet with `include_top`' 136 ' as true, `classes` should be 1000')
138 if activation
not in [
'softmax',
'sigmoid']:
139 raise ValueError(
'activation must be one of "softmax" or "sigmoid"')
141 if activation ==
'sigmoid' and classes != 1:
142 raise ValueError(
'sigmoid activation can only be used when classes = 1')
148 data_format=K.image_data_format(),
149 require_flatten=include_top)
151 if input_tensor
is None:
152 img_input = Input(shape=input_shape)
154 if not K.is_keras_tensor(input_tensor):
155 img_input = Input(tensor=input_tensor, shape=input_shape)
157 img_input = input_tensor
160 growth_rate, nb_filter, nb_layers_per_block, bottleneck, reduction,
161 dropout_rate, weight_decay, subsample_initial_block, activation)
165 if input_tensor
is not None:
166 inputs = get_source_inputs(input_tensor)
170 model = Model(inputs, x, name=
'se-densenet')
def _obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None)
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4, subsample_initial_block=False, activation='softmax')
def se_densenet.SEDenseNetImageNet121 |
( |
|
input_shape = None , |
|
|
|
bottleneck = True , |
|
|
|
reduction = 0.5 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 1000 , |
|
|
|
activation = 'N' |
|
) |
| |
Definition at line 184 of file se_densenet.py.
185 return SEDenseNet(input_shape, depth=121, nb_dense_block=4, growth_rate=32, nb_filter=64,
186 nb_layers_per_block=[6, 12, 24, 16], bottleneck=bottleneck, reduction=reduction,
187 dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=
True,
188 include_top=include_top, weights=weights, input_tensor=input_tensor,
189 classes=classes, activation=activation)
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
def se_densenet.SEDenseNetImageNet161 |
( |
|
input_shape = None , |
|
|
|
bottleneck = True , |
|
|
|
reduction = 0.5 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 1000 , |
|
|
|
activation = 'softmax' |
|
) |
| |
Definition at line 252 of file se_densenet.py.
252 activation=
'softmax'):
253 return SEDenseNet(input_shape, depth=161, nb_dense_block=4, growth_rate=48, nb_filter=96,
254 nb_layers_per_block=[6, 12, 36, 24], bottleneck=bottleneck, reduction=reduction,
255 dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=
True,
256 include_top=include_top, weights=weights, input_tensor=input_tensor,
257 classes=classes, activation=activation)
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
def se_densenet.SEDenseNetImageNet169 |
( |
|
input_shape = None , |
|
|
|
bottleneck = True , |
|
|
|
reduction = 0.5 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 1000 , |
|
|
|
activation = 'softmax' |
|
) |
| |
Definition at line 201 of file se_densenet.py.
201 activation=
'softmax'):
202 return SEDenseNet(input_shape, depth=169, nb_dense_block=4, growth_rate=32, nb_filter=64,
203 nb_layers_per_block=[6, 12, 32, 32], bottleneck=bottleneck, reduction=reduction,
204 dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=
True,
205 include_top=include_top, weights=weights, input_tensor=input_tensor,
206 classes=classes, activation=activation)
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
def se_densenet.SEDenseNetImageNet201 |
( |
|
input_shape = None , |
|
|
|
bottleneck = True , |
|
|
|
reduction = 0.5 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 1000 , |
|
|
|
activation = 'softmax' |
|
) |
| |
Definition at line 218 of file se_densenet.py.
218 activation=
'softmax'):
219 return SEDenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
220 nb_layers_per_block=[6, 12, 48, 32], bottleneck=bottleneck, reduction=reduction,
221 dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=
True,
222 include_top=include_top, weights=weights, input_tensor=input_tensor,
223 classes=classes, activation=activation)
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')
def se_densenet.SEDenseNetImageNet264 |
( |
|
input_shape = None , |
|
|
|
bottleneck = True , |
|
|
|
reduction = 0.5 , |
|
|
|
dropout_rate = 0.0 , |
|
|
|
weight_decay = 1e-4 , |
|
|
|
include_top = True , |
|
|
|
weights = None , |
|
|
|
input_tensor = None , |
|
|
|
classes = 1000 , |
|
|
|
activation = 'softmax' |
|
) |
| |
Definition at line 235 of file se_densenet.py.
235 activation=
'softmax'):
236 return SEDenseNet(input_shape, depth=201, nb_dense_block=4, growth_rate=32, nb_filter=64,
237 nb_layers_per_block=[6, 12, 64, 48], bottleneck=bottleneck, reduction=reduction,
238 dropout_rate=dropout_rate, weight_decay=weight_decay, subsample_initial_block=
True,
239 include_top=include_top, weights=weights, input_tensor=input_tensor,
240 classes=classes, activation=activation)
def SEDenseNet(input_shape=None, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1, nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=0.0, weight_decay=1e-4, subsample_initial_block=False, include_top=True, weights=None, input_tensor=None, classes=10, activation='softmax')