name: "CaffeNet" input: "data" input_dim: 256 input_dim: 3 input_dim: 227 input_dim: 227 layer { name: "conv1" type: "Convolution" bottom: "data" top: "conv1" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 96 kernel_size: 11 stride: 4 bias_term: false weight_filler { type: "gaussian" std: 0.01 } } } layer { name: "bn1" type: "BatchNorm" bottom: "conv1" top: "conv1" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale1" type: "Scale" bottom: "conv1" top: "scale1" scale_param { bias_term: true } } layer { name: "relu1" type: "ReLU" bottom: "scale1" top: "scale1" } layer { name: "pool1" type: "Pooling" bottom: "scale1" top: "pool1" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "conv2" type: "Convolution" bottom: "pool1" top: "conv2" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 256 pad: 2 kernel_size: 5 group: 2 bias_term: false weight_filler { type: "gaussian" std: 0.01 } } } layer { name: "bn2" type: "BatchNorm" bottom: "conv2" top: "conv2" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale2" type: "Scale" bottom: "conv2" top: "scale2" scale_param { bias_term: true } } layer { name: "relu2" type: "ReLU" bottom: "scale2" top: "scale2" } layer { name: "pool2" type: "Pooling" bottom: "scale2" top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } #layer { # name: "norm2" # type: "BatchNorm" # bottom: "pool2" # top: "norm2" # param { # lr_mult: 0 # } # param { # lr_mult: 0 # } # param { # lr_mult: 0 # } #} #layer { # name: "norm2" # type: "LRN" # bottom: "pool2" # top: "norm2" # lrn_param { # local_size: 5 # alpha: 0.0001 # beta: 0.75 # } #} layer { name: "conv3" type: "Convolution" bottom: "pool2" top: "conv3" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 384 pad: 1 kernel_size: 3 bias_term: false weight_filler { type: "gaussian" std: 0.01 } } } layer { name: "bn3" type: "BatchNorm" bottom: "conv3" top: "conv3" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale3" type: "Scale" bottom: "conv3" top: "scale3" scale_param { bias_term: true } } layer { name: "relu3" type: "ReLU" bottom: "scale3" top: "scale3" } layer { name: "conv4" type: "Convolution" bottom: "scale3" top: "conv4" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 384 pad: 1 kernel_size: 3 group: 2 bias_term: false weight_filler { type: "gaussian" std: 0.01 } } } layer { name: "bn4" type: "BatchNorm" bottom: "conv4" top: "conv4" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale4" type: "Scale" bottom: "conv4" top: "scale4" scale_param { bias_term: true } } layer { name: "relu4" type: "ReLU" bottom: "scale4" top: "scale4" } layer { name: "conv5" type: "Convolution" bottom: "scale4" top: "conv5" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 256 pad: 1 kernel_size: 3 group: 2 bias_term: false weight_filler { type: "gaussian" std: 0.01 } } } layer { name: "bn5" type: "BatchNorm" bottom: "conv5" top: "conv5" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale5" type: "Scale" bottom: "conv5" top: "scale5" scale_param { bias_term: true } } layer { name: "relu5" type: "ReLU" bottom: "scale5" top: "scale5" } layer { name: "pool5" type: "Pooling" bottom: "scale5" top: "pool5" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "fc6" type: "InnerProduct" bottom: "pool5" top: "fc6" param { lr_mult: 1 decay_mult: 1 } inner_product_param { num_output: 4096 bias_term: false weight_filler { type: "gaussian" std: 0.005 } } } layer { name: "bn6" type: "BatchNorm" bottom: "fc6" top: "fc6" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale6" type: "Scale" bottom: "fc6" top: "scale6" scale_param { bias_term: true } } layer { name: "relu6" type: "ReLU" bottom: "scale6" top: "scale6" } layer { name: "drop6" type: "Dropout" bottom: "scale6" top: "scale6" dropout_param { dropout_ratio: 0.5 } } layer { name: "fc7" type: "InnerProduct" bottom: "scale6" top: "fc7" param { lr_mult: 1 decay_mult: 1 } inner_product_param { num_output: 4096 bias_term: false weight_filler { type: "gaussian" std: 0.005 } } } layer { name: "bn7" type: "BatchNorm" bottom: "fc7" top: "fc7" param { lr_mult: 0 } param { lr_mult: 0 } param { lr_mult: 0 } batch_norm_param { use_global_stats: true } } layer { name: "scale7" type: "Scale" bottom: "fc7" top: "scale7" scale_param { bias_term: true } } layer { name: "relu7" type: "ReLU" bottom: "scale7" top: "scale7" } layer { name: "drop7" type: "Dropout" bottom: "scale7" top: "scale7" dropout_param { dropout_ratio: 0.5 } } layer { name: "fc8" type: "InnerProduct" bottom: "scale7" top: "fc8" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 205 weight_filler { type: "gaussian" std: 0.01 } bias_filler { type: "constant" value: 0 } } } layer { name: "prob" type: "Softmax" bottom: "fc8" top: "prob" }