name: "EquivNet" input: "data" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 layer{ name: "conv1" type: "Convolution" bottom: "data" top: "cls_conv1" param{ name: "conv1_w" lr_mult: 1.0 decay_mult: 0.0 } param{ name: "conv1_b" lr_mult: 2.0 decay_mult: 0.0 } convolution_param{ num_output: 96 kernel_size: 11 stride: 4 weight_filler{ type: "gaussian" std: 0.01 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu1" type: "ReLU" bottom: "cls_conv1" top: "cls_conv1" } layer{ name: "pool1" type: "Pooling" bottom: "cls_conv1" top: "cls_pool1" pooling_param{ pool: MAX kernel_size: 3 stride: 2 } } layer{ name: "bn1" type: "BatchNorm" bottom: "cls_pool1" top: "cls_pool1" param{ name: "bn1_w1" lr_mult: 0 } param{ name: "bn1_w2" lr_mult: 0 } param{ name: "bn1_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} } layer{ name: "norm1" type: "LRN" bottom: "cls_pool1" top: "cls_norm1" lrn_param{ local_size: 5 alpha: 0.0001 beta: 0.75 } } layer{ name: "conv2" type: "Convolution" bottom: "cls_norm1" top: "cls_conv2" param{ name: "conv2_w" lr_mult: 1.0 decay_mult: 0.0 } param{ name: "conv2_b" lr_mult: 2.0 decay_mult: 0.0 } convolution_param{ num_output: 256 pad: 2 kernel_size: 5 group: 2 weight_filler{ type: "gaussian" std: 0.01 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu2" type: "ReLU" bottom: "cls_conv2" top: "cls_conv2" } layer{ name: "pool2" type: "Pooling" bottom: "cls_conv2" top: "cls_pool2" pooling_param{ pool: MAX kernel_size: 3 stride: 2 } } layer{ name: "norm2" type: "LRN" bottom: "cls_pool2" top: "cls_norm2" lrn_param{ local_size: 5 alpha: 0.0001 beta: 0.75 } } layer{ name: "bn2" type: "BatchNorm" bottom: "cls_norm2" top: "cls_norm2" param{ name: "bn2_w1" lr_mult: 0 } param{ name: "bn2_w2" lr_mult: 0 } param{ name: "bn2_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} } layer{ name: "conv3" type: "Convolution" bottom: "cls_norm2" top: "cls_conv3" param{ name: "conv3_w" lr_mult: 1.0 decay_mult: 0.0 } param{ name: "conv3_b" lr_mult: 2.0 decay_mult: 0.0 } convolution_param{ num_output: 384 pad: 1 kernel_size: 3 weight_filler{ type: "gaussian" std: 0.01 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu3" type: "ReLU" bottom: "cls_conv3" top: "cls_conv3" } layer{ name: "bn3" type: "BatchNorm" bottom: "cls_conv3" top: "cls_conv3" param{ name: "bn3_w1" lr_mult: 0 } param{ name: "bn3_w2" lr_mult: 0 } param{ name: "bn3_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} } layer{ name: "conv4" type: "Convolution" bottom: "cls_conv3" top: "cls_conv4" param{ name: "conv4_w" lr_mult: 1.0 decay_mult: 0.0 } param{ name: "conv4_b" lr_mult: 2.0 decay_mult: 0.0 } convolution_param{ num_output: 384 pad: 1 kernel_size: 3 group: 2 weight_filler{ type: "gaussian" std: 0.01 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu4" type: "ReLU" bottom: "cls_conv4" top: "cls_conv4" } layer{ name: "bn4" type: "BatchNorm" bottom: "cls_conv4" top: "cls_conv4" param{ name: "bn4_w1" lr_mult: 0 } param{ name: "bn4_w2" lr_mult: 0 } param{ name: "bn4_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} } layer{ name: "conv5" type: "Convolution" bottom: "cls_conv4" top: "cls_conv5" param{ name: "conv5_w" lr_mult: 1.0 decay_mult: 0.0 } param{ name: "conv5_b" lr_mult: 2.0 decay_mult: 0.0 } convolution_param{ num_output: 256 pad: 1 kernel_size: 3 group: 2 weight_filler{ type: "gaussian" std: 0.01 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu5" type: "ReLU" bottom: "cls_conv5" top: "cls_conv5" } layer{ name: "pool5" type: "Pooling" bottom: "cls_conv5" top: "cls_pool5" pooling_param{ pool: MAX kernel_size: 3 stride: 2 } } layer{ name: "bn5" type: "BatchNorm" bottom: "cls_pool5" top: "cls_pool5" param{ name: "bn5_w1" lr_mult: 0 } param{ name: "bn5_w2" lr_mult: 0 } param{ name: "bn5_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} } layer{ name: "drop-pool5" type: "Power" bottom: "cls_pool5" top: "cls_pool5" power_param{ scale: 0.5 } } layer{ name: "fc6" type: "InnerProduct" bottom: "cls_pool5" top: "cls_fc6" param{ name: "fc6_w" lr_mult: 1 decay_mult: 1 } param{ name: "fc6_b" lr_mult: 2 decay_mult: 0 } inner_product_param{ num_output: 4096 weight_filler{ type: "gaussian" std: 0.005 } bias_filler{ type: "constant" value: 0 } } } layer{ name: "relu6" type: "ReLU" bottom: "cls_fc6" top: "cls_fc6" } layer{ name: "bn6" type: "BatchNorm" bottom: "cls_fc6" top: "cls_fc6" param{ name: "bn6_w1" lr_mult: 0 } param{ name: "bn6_w2" lr_mult: 0 } param{ name: "bn6_w3" lr_mult: 0 } batch_norm_param { use_global_stats: true} }