Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.
6403 Discussions

Why is "Layer ___ forced to im2col_v2, because its output is used in concat" compile mesage given?

idata
Employee
433 Views

What does this message mean? This portion of the model doesn't even have a Concat layer. Should I be concerned? Prototxt below is a modified version of beginning layers of a tiny_yolo model. The compiler message refers to the final, "conv15/1x1" layer.

 

name: "tiny_ncsdk_dts" input: "data" input_shape { dim:1 dim:1 dim:224 dim:224 } layer { name: "conv1" type: "Convolution" bottom: "data" top: "conv1" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 2 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn1" type: "BatchNorm" bottom: "conv1" top: "bn1" } layer { name: "relu1" type: "ReLU" bottom: "bn1" top: "relu1" relu_param{ negative_slope: 0.1 } } layer{ name: "conv2" type: "Convolution" bottom: "relu1" top: "conv2" convolution_param { num_output: 32 kernel_size: 3 pad: 1 stride: 2 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn2" type: "BatchNorm" bottom: "conv2" top: "bn2" } layer { name: "relu2" type: "ReLU" bottom: "bn2" top: "relu2" relu_param{ negative_slope: 0.1 } } layer{ name: "conv3" type: "Convolution" bottom: "relu2" top: "conv3" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn3" type: "BatchNorm" bottom: "conv3" top: "bn3" } layer { name: "relu3" type: "ReLU" bottom: "bn3" top: "relu3" relu_param{ negative_slope: 0.1 } } layer{ name: "conv5" type: "Convolution" bottom: "relu3" top: "conv5" convolution_param { num_output: 64 kernel_size: 3 pad: 1 stride: 2 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn5" type: "BatchNorm" bottom: "conv5" top: "bn5" } layer { name: "relu5" type: "ReLU" bottom: "bn5" top: "relu5" relu_param{ negative_slope: 0.1 } } layer{ name: "conv6" type: "Convolution" bottom: "relu5" top: "conv6" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn6" type: "BatchNorm" bottom: "conv6" top: "bn6" } layer { name: "relu6" type: "ReLU" bottom: "bn6" top: "relu6" relu_param{ negative_slope: 0.1 } } layer{ name: "conv8" type: "Convolution" bottom: "bn6" top: "conv8" convolution_param { num_output: 128 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn8" type: "BatchNorm" bottom: "conv8" top: "bn8" } layer { name: "relu8" type: "ReLU" bottom: "bn8" top: "relu8" relu_param{ negative_slope: 0.1 } } layer { name: "pool8" type: "Pooling" bottom: "relu8" top: "pool8" pooling_param { pool: MAX kernel_size: 2 stride: 2 } } layer{ name: "conv9" type: "Convolution" bottom: "pool8" top: "conv9" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } } layer { name: "bn9" type: "BatchNorm" bottom: "conv9" top: "bn9" } layer { name: "relu9" type: "ReLU" bottom: "bn9" top: "relu9" relu_param{ negative_slope: 0.1 } } layer{ name: "conv11" type: "Convolution" bottom: "relu9" top: "conv11" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } param { name: "conv11_w" } } layer { name: "relu11" type: "ReLU" bottom: "conv11" top: "relu11" relu_param{ negative_slope: 0.1 } } layer{ name: "conv11_2" type: "Convolution" bottom: "relu11" top: "conv11_2" convolution_param { num_output: 256 kernel_size: 3 pad: 1 stride: 1 bias_term: false weight_filler { type: "xavier" } } param { name: "conv11_w" } } layer { name: "bn11_2" type: "BatchNorm" bottom: "conv11_2" top: "bn11_2" } layer { name: "relu11_2" type: "ReLU" bottom: "bn11_2" top: "relu11_2" relu_param{ negative_slope: 0.1 } } ##### layer { name: "conv14/3x3" type: "Convolution" bottom: "relu11_2" top: "conv14/3x3" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 group: 256 weight_filler { type: "msra" } bias_term: false engine: CAFFE } } layer { name: "conv14/3x3/bn" type: "BatchNorm" bottom: "conv14/3x3" top: "conv14/3x3/bn" } layer { name: "conv14/3x3/scale" type: "Scale" bottom: "conv14/3x3/bn" top: "conv14/3x3/scale" scale_param { bias_term: true } } layer { name: "conv14/relu1" type: "ReLU" bottom: "conv14/3x3/scale" top: "conv14/relu1" relu_param{ negative_slope: 0.1 } } layer { name: "conv14/1x1" type: "Convolution" bottom: "conv14/relu1" top: "conv14/1x1" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } } layer { name: "conv14/1x1/bn" type: "BatchNorm" bottom: "conv14/1x1" top: "conv14/1x1/bn" } layer { name: "conv14/1x1/scale" type: "Scale" bottom: "conv14/1x1/bn" top: "conv14/1x1/scale" scale_param { bias_term: true } } layer { name: "conv14/relu2" type: "ReLU" bottom: "conv14/1x1/scale" top: "conv14/1x1/relu" relu_param{ negative_slope: 0.1 } } ##### # Had typo below, in which layer name had cap X: "conv15/3X3" layer { name: "conv15/3x3" type: "Convolution" bottom: "conv14/1x1/relu" top: "conv15/3x3" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 pad: 1 kernel_size: 3 stride: 1 group: 512 #dilation: 2 weight_filler { type: "msra" } bias_term: false engine: CAFFE } } layer { name: "conv15/3x3/bn" type: "BatchNorm" bottom: "conv15/3x3" top: "conv15/3x3/bn" } layer { name: "conv15/3x3/scale" type: "Scale" bottom: "conv15/3x3/bn" top: "conv15/3x3/scale" scale_param { bias_term: true } } layer { name: "conv15/relu1" type: "ReLU" bottom: "conv15/3x3/scale" top: "conv15/relu1" relu_param{ negative_slope: 0.1 } } layer { name: "conv15/1x1" type: "Convolution" bottom: "conv15/relu1" top: "conv15/1x1" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 512 pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "msra" } bias_term: false } }

 

root@synapse:~/Code/DeepTextSpotter/models# mvNCCompile -s 12 -o temp.graph junk.prototxt mvNCCompile v02.00, Copyright @ Intel Corporation 2017 ****** WARNING: using empty weights ****** Layer conv15/1x1 forced to im2col_v2, because its output is used in concat /usr/local/bin/ncsdk/Controllers/FileIO.py:65: UserWarning: You are using a large type. Consider reducing your data sizes for best performance Blob generated
0 Kudos
0 Replies
Reply