# Assumes a BGR image from which the mean pixel value has been subtracted # We used B=104, G=117, R=123 in our experiments. layer { name: "input" type: "Input" top: "data" input_param { shape { dim: 10 dim: 3 dim: 227 dim: 227 } } } layer { name: "conv1" type: "Convolution" bottom: "data" top: "conv1" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0 } convolution_param { num_output: 96 kernel_size: 11 stride: 4 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "bn1" type: "BatchNorm" bottom: "conv1" top: "bn1" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "relu1" type: "ReLU" bottom: "bn1" top: "bn1" } layer { name: "pool1" type: "Pooling" bottom: "bn1" top: "pool1" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm1" type: "LRN" bottom: "pool1" top: "norm1" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv2" type: "Convolution" bottom: "norm1" top: "conv2" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 5 group : 2 pad : 2 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1.0 } } } layer { name: "bn2" type: "BatchNorm" bottom: "conv2" top: "bn2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "relu2" type: "ReLU" bottom: "bn2" top: "bn2" } layer { name: "pool2" type: "Pooling" bottom: "bn2" top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm2" type: "LRN" bottom: "pool2" top: "norm2" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv3" type: "Convolution" bottom: "norm2" top: "conv3" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0 } convolution_param { num_output: 384 kernel_size: 3 pad : 1 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "bn3" type: "BatchNorm" bottom: "conv3" top: "bn3" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "relu3" type: "ReLU" bottom: "bn3" top: "bn3" } layer { name: "conv4" type: "Convolution" bottom: "bn3" top: "conv4" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0 } convolution_param { num_output: 384 kernel_size: 3 group : 2 pad : 1 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1.0 } } } layer { name: "bn4" type: "BatchNorm" bottom: "conv4" top: "bn4" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "relu4" type: "ReLU" bottom: "bn4" top: "bn4" } layer { name: "conv5" type: "Convolution" bottom: "bn4" top: "conv5" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0 } convolution_param { num_output: 256 kernel_size: 3 group : 2 pad : 1 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 1.0 } } } layer { name: "bn5" type: "BatchNorm" bottom: "conv5" top: "bn5" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "relu5" type: "ReLU" bottom: "bn5" top: "bn5" } layer { name: "pool5" type: "Pooling" bottom: "bn5" top: "pool5" pooling_param { pool: MAX kernel_size: 3 stride: 2 } }