Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.
6404 Discussions

[Error 25] Myriad Error: "Cannot Allocate space for convolution.".

idata
Employee
1,097 Views

Hi, has anyone encountered the following error: [Error 25] Myriad Error: "Cannot Allocate space for convolution.".

 

When I execute: python3 ./mvNCCheck.pyc ~/Documents/dex/age.prototxt -w ~/Documents/dex/dex_imdb.wiki.caffemodel

 

This error is followed after a warning:

 

[libprotobuf WARNING google/protobuf/io/coded_stream.cc:537] Reading dangerously large protocol message. If the message turns out to be larger than 2147483647 bytes, parsing will be halted for security reasons. To increase the limit (or to disable these warnings), see CodedInputStream::SetTotalBytesLimit() in google/protobuf/io/coded_stream.h.

 

[libprotobuf WARNING google/protobuf/io/coded_stream.cc:78] The total number of bytes read was 538700275

 

USB: Transferring Data…

 

I'm using a pre-trained CNN.

 

this is the prototxt structure

 

name: "VGG_ILSVRC_16_layers"

 

input: "data"

 

input_shape {

 

dim: 1

 

dim: 3

 

dim: 224

 

dim: 224

 

}

 

layer {

 

bottom: "data"

 

top: "conv1_1"

 

name: "conv1_1"

 

type: "Convolution"

 

convolution_param {

 

num_output: 64

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv1_1"

 

top: "conv1_1"

 

name: "relu1_1"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv1_1"

 

top: "conv1_2"

 

name: "conv1_2"

 

type: "Convolution"

 

convolution_param {

 

num_output: 64

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv1_2"

 

top: "conv1_2"

 

name: "relu1_2"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv1_2"

 

top: "pool1"

 

name: "pool1"

 

type: "Pooling"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

bottom: "pool1"

 

top: "conv2_1"

 

name: "conv2_1"

 

type: "Convolution"

 

convolution_param {

 

num_output: 128

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv2_1"

 

top: "conv2_1"

 

name: "relu2_1"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv2_1"

 

top: "conv2_2"

 

name: "conv2_2"

 

type: "Convolution"

 

convolution_param {

 

num_output: 128

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv2_2"

 

top: "conv2_2"

 

name: "relu2_2"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv2_2"

 

top: "pool2"

 

name: "pool2"

 

type: "Pooling"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

bottom: "pool2"

 

top: "conv3_1"

 

name: "conv3_1"

 

type: "Convolution"

 

convolution_param {

 

num_output: 256

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv3_1"

 

top: "conv3_1"

 

name: "relu3_1"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv3_1"

 

top: "conv3_2"

 

name: "conv3_2"

 

type: "Convolution"

 

convolution_param {

 

num_output: 256

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv3_2"

 

top: "conv3_2"

 

name: "relu3_2"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv3_2"

 

top: "conv3_3"

 

name: "conv3_3"

 

type: "Convolution"

 

convolution_param {

 

num_output: 256

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv3_3"

 

top: "conv3_3"

 

name: "relu3_3"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv3_3"

 

top: "pool3"

 

name: "pool3"

 

type: "Pooling"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

bottom: "pool3"

 

top: "conv4_1"

 

name: "conv4_1"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv4_1"

 

top: "conv4_1"

 

name: "relu4_1"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv4_1"

 

top: "conv4_2"

 

name: "conv4_2"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv4_2"

 

top: "conv4_2"

 

name: "relu4_2"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv4_2"

 

top: "conv4_3"

 

name: "conv4_3"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv4_3"

 

top: "conv4_3"

 

name: "relu4_3"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv4_3"

 

top: "pool4"

 

name: "pool4"

 

type: "Pooling"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

bottom: "pool4"

 

top: "conv5_1"

 

name: "conv5_1"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv5_1"

 

top: "conv5_1"

 

name: "relu5_1"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv5_1"

 

top: "conv5_2"

 

name: "conv5_2"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv5_2"

 

top: "conv5_2"

 

name: "relu5_2"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv5_2"

 

top: "conv5_3"

 

name: "conv5_3"

 

type: "Convolution"

 

convolution_param {

 

num_output: 512

 

pad: 1

 

kernel_size: 3

 

}

 

}

 

layer {

 

bottom: "conv5_3"

 

top: "conv5_3"

 

name: "relu5_3"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "conv5_3"

 

top: "pool5"

 

name: "pool5"

 

type: "Pooling"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

bottom: "pool5"

 

top: "fc6"

 

name: "fc6"

 

type: "InnerProduct"

 

inner_product_param {

 

num_output: 4096

 

}

 

}

 

layer {

 

bottom: "fc6"

 

top: "fc6"

 

name: "relu6"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "fc6"

 

top: "fc6"

 

name: "drop6"

 

type: "Dropout"

 

dropout_param {

 

dropout_ratio: 0.5

 

}

 

}

 

layer {

 

bottom: "fc6"

 

top: "fc7"

 

name: "fc7"

 

type: "InnerProduct"

 

inner_product_param {

 

num_output: 4096

 

}

 

}

 

layer {

 

bottom: "fc7"

 

top: "fc7"

 

name: "relu7"

 

type: "ReLU"

 

}

 

layer {

 

bottom: "fc7"

 

top: "fc7"

 

name: "drop7"

 

type: "Dropout"

 

dropout_param {

 

dropout_ratio: 0.5

 

}

 

}

 

layer {

 

bottom: "fc7"

 

top: "fc8-101"

 

name: "fc8-101"

 

type: "InnerProduct"

 

inner_product_param {

 

num_output: 101

 

}

 

}

 

layer {

 

bottom: "fc8-101"

 

top: "prob"

 

name: "prob"

 

type: "Softmax"

 

}

 

Thanks million!

0 Kudos
8 Replies
idata
Employee
768 Views

@4nd4 Thank you for bringing this to our attention. This is a known bug. We are looking into it and will notify you when there is a fix.

0 Kudos
idata
Employee
768 Views

are there any updates for the bug fix? @Tome_at_Intel

0 Kudos
idata
Employee
768 Views

Are there any bug fixes? I also have this problem.

0 Kudos
idata
Employee
768 Views

@Tome_at_Intel Is there any update on this? What kind of structure leads to this error?

0 Kudos
idata
Employee
768 Views

@gauravmoon @yairh @csarron @4nd4 Thanks for the responses. Currently, we are testing a release candidate and while we can't give a ETA for the fix, I will be updating this post ASAP when the fix is released. Thank you for your patience!

0 Kudos
idata
Employee
768 Views

@4nd4 Please try the new NCSDK version 1.10.01 with support for VGG16 and let us know your results

0 Kudos
idata
Employee
768 Views

Hi @Tome_at_Intel I downloaded the extended examples from https://github.com/movidius/ncappzoo which do not include VGG16.

 

The version on Github https://github.com/movidius/ncsdk/tree/master/examples also doesn't have VGG16. Where can I get VGG16 example from?

 

Thanks

0 Kudos
idata
Employee
768 Views

@preetibindu Unfortunately there isn't a VGG sample code available at the moment.

0 Kudos
Reply