Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.
6570 Discussions

Training a network for semantic segmentation

idata
Employee
863 Views

Hi,

 

Is the movidius stick suitable to train a semantic segmentation network such unet for example ?

 

jeanpat

0 Kudos
3 Replies
idata
Employee
594 Views

@jeanpat The Movidius NCS meant to do low-power inference at the edge, not training. You can train your networks on your platform of choice and run the model on NCS, just make sure that your CNN is trained using Caffe framework, and it sticks to the layers mentioned in our release notes. Ex. https://ncsforum.movidius.com/discussion/118/movidius-nc-sdk-1-07-07-with-raspberry-pi-support >> NC SDK 1.07.07 Release Notes.

0 Kudos
idata
Employee
594 Views

Hi,

 

I am trying to use a Caffe trained fully convolution network for "segmentation" and get some errors when use mvNCCompile.pyc. I understand that Movidius is designed only for CNNs, but FCN is more or less the same and all layers the network uses suppose to be supported in the release notes.

 

After the training (I use digits 5), I downloaded the model, it saves .caffemodel & deploy.prototxt which I used for mvNCCompile.pyc.

 

The error I get: [Error 17] Toolkit Error: Internal Error: Could not build graph. Missing link: data_preprocessed.

 

Is there something wrong with my file or am I doing something wrong?

 

Thank you

 

The prototxt looks like this:

 

name: "TEST"

 

input: "data"

 

input_shape {

 

dim: 1

 

dim: 3

 

dim: 281

 

dim: 500

 

}

 

layer {

 

name: "shift"

 

type: "Power"

 

bottom: "data"

 

top: "data_preprocessed"

 

power_param {

 

shift: -116.0

 

}

 

}

 

layer {

 

name: "conv1"

 

type: "Convolution"

 

bottom: "data_preprocessed"

 

top: "conv1"

 

convolution_param {

 

num_output: 96

 

pad: 100

 

kernel_size: 11

 

group: 1

 

stride: 4

 

}

 

}

 

layer {

 

name: "relu1"

 

type: "ReLU"

 

bottom: "conv1"

 

top: "conv1"

 

}

 

layer {

 

name: "pool1"

 

type: "Pooling"

 

bottom: "conv1"

 

top: "pool1"

 

pooling_param {

 

pool: MAX

 

kernel_size: 3

 

stride: 2

 

}

 

}

 

layer {

 

name: "norm1"

 

type: "LRN"

 

bottom: "pool1"

 

top: "norm1"

 

lrn_param {

 

local_size: 5

 

alpha: 0.0001

 

beta: 0.75

 

}

 

}

 

layer {

 

name: "conv2"

 

type: "Convolution"

 

bottom: "norm1"

 

top: "conv2"

 

convolution_param {

 

num_output: 256

 

pad: 2

 

kernel_size: 5

 

group: 2

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu2"

 

type: "ReLU"

 

bottom: "conv2"

 

top: "conv2"

 

}

 

layer {

 

name: "pool2"

 

type: "Pooling"

 

bottom: "conv2"

 

top: "pool2"

 

pooling_param {

 

pool: MAX

 

kernel_size: 3

 

stride: 2

 

}

 

}

 

layer {

 

name: "norm2"

 

type: "LRN"

 

bottom: "pool2"

 

top: "norm2"

 

lrn_param {

 

local_size: 5

 

alpha: 0.0001

 

beta: 0.75

 

}

 

}

 

layer {

 

name: "conv3"

 

type: "Convolution"

 

bottom: "norm2"

 

top: "conv3"

 

convolution_param {

 

num_output: 384

 

pad: 1

 

kernel_size: 3

 

group: 1

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu3"

 

type: "ReLU"

 

bottom: "conv3"

 

top: "conv3"

 

}

 

layer {

 

name: "conv4"

 

type: "Convolution"

 

bottom: "conv3"

 

top: "conv4"

 

convolution_param {

 

num_output: 384

 

pad: 1

 

kernel_size: 3

 

group: 2

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu4"

 

type: "ReLU"

 

bottom: "conv4"

 

top: "conv4"

 

}

 

layer {

 

name: "conv5"

 

type: "Convolution"

 

bottom: "conv4"

 

top: "conv5"

 

convolution_param {

 

num_output: 256

 

pad: 1

 

kernel_size: 3

 

group: 2

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu5"

 

type: "ReLU"

 

bottom: "conv5"

 

top: "conv5"

 

}

 

layer {

 

name: "pool5"

 

type: "Pooling"

 

bottom: "conv5"

 

top: "pool5"

 

pooling_param {

 

pool: MAX

 

kernel_size: 3

 

stride: 2

 

}

 

}

 

layer {

 

name: "fc6"

 

type: "Convolution"

 

bottom: "pool5"

 

top: "fc6"

 

convolution_param {

 

num_output: 4096

 

pad: 0

 

kernel_size: 6

 

group: 1

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu6"

 

type: "ReLU"

 

bottom: "fc6"

 

top: "fc6"

 

}

 

layer {

 

name: "drop6"

 

type: "Dropout"

 

bottom: "fc6"

 

top: "fc6"

 

dropout_param {

 

dropout_ratio: 0.5

 

}

 

}

 

layer {

 

name: "fc7"

 

type: "Convolution"

 

bottom: "fc6"

 

top: "fc7"

 

convolution_param {

 

num_output: 4096

 

pad: 0

 

kernel_size: 1

 

group: 1

 

stride: 1

 

}

 

}

 

layer {

 

name: "relu7"

 

type: "ReLU"

 

bottom: "fc7"

 

top: "fc7"

 

}

 

layer {

 

name: "drop7"

 

type: "Dropout"

 

bottom: "fc7"

 

top: "fc7"

 

dropout_param {

 

dropout_ratio: 0.5

 

}

 

}

 

layer {

 

name: "score_fr"

 

type: "Convolution"

 

bottom: "fc7"

 

top: "score_fr"

 

param {

 

lr_mult: 1.0

 

decay_mult: 1.0

 

}

 

param {

 

lr_mult: 2.0

 

decay_mult: 0.0

 

}

 

convolution_param {

 

num_output: 21

 

pad: 0

 

kernel_size: 1

 

}

 

}

 

layer {

 

name: "upscore"

 

type: "Deconvolution"

 

bottom: "score_fr"

 

top: "upscore"

 

param {

 

lr_mult: 0.0

 

}

 

convolution_param {

 

num_output: 21

 

bias_term: false

 

kernel_size: 63

 

group: 21

 

stride: 32

 

weight_filler {

 

type: "bilinear"

 

}

 

}

 

}

 

layer {

 

name: "score"

 

type: "Crop"

 

bottom: "upscore"

 

bottom: "data"

 

top: "score"

 

crop_param {

 

axis: 2

 

offset: 18

 

}

 

}
0 Kudos
idata
Employee
594 Views

@klm try to use this prototxt:

 

name: "TEST" input: "data" input_shape { dim: 1 dim: 3 dim: 281 dim: 500 } layer { name: "shift" type: "Power" bottom: "data" top: "shift" power_param { shift: -116.0 } } layer { name: "conv1" type: "Convolution" bottom: "shift" top: "conv1" convolution_param { num_output: 96 pad: 100 kernel_size: 11 group: 1 stride: 4 } } layer { name: "relu1" type: "ReLU" bottom: "conv1" top: "conv1" } layer { name: "pool1" type: "Pooling" bottom: "conv1" top: "pool1" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm1" type: "LRN" bottom: "pool1" top: "norm1" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv2" type: "Convolution" bottom: "norm1" top: "conv2" convolution_param { num_output: 256 pad: 2 kernel_size: 5 group: 2 stride: 1 } } layer { name: "relu2" type: "ReLU" bottom: "conv2" top: "conv2" } layer { name: "pool2" type: "Pooling" bottom: "conv2" top: "pool2" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "norm2" type: "LRN" bottom: "pool2" top: "norm2" lrn_param { local_size: 5 alpha: 0.0001 beta: 0.75 } } layer { name: "conv3" type: "Convolution" bottom: "norm2" top: "conv3" convolution_param { num_output: 384 pad: 1 kernel_size: 3 group: 1 stride: 1 } } layer { name: "relu3" type: "ReLU" bottom: "conv3" top: "conv3" } layer { name: "conv4" type: "Convolution" bottom: "conv3" top: "conv4" convolution_param { num_output: 384 pad: 1 kernel_size: 3 group: 2 stride: 1 } } layer { name: "relu4" type: "ReLU" bottom: "conv4" top: "conv4" } layer { name: "conv5" type: "Convolution" bottom: "conv4" top: "conv5" convolution_param { num_output: 256 pad: 1 kernel_size: 3 group: 2 stride: 1 } } layer { name: "relu5" type: "ReLU" bottom: "conv5" top: "conv5" } layer { name: "pool5" type: "Pooling" bottom: "conv5" top: "pool5" pooling_param { pool: MAX kernel_size: 3 stride: 2 } } layer { name: "fc6" type: "Convolution" bottom: "pool5" top: "fc6" convolution_param { num_output: 4096 pad: 0 kernel_size: 6 group: 1 stride: 1 } } layer { name: "relu6" type: "ReLU" bottom: "fc6" top: "fc6" } layer { name: "drop6" type: "Dropout" bottom: "fc6" top: "fc6" dropout_param { dropout_ratio: 0.5 } } layer { name: "fc7" type: "Convolution" bottom: "fc6" top: "fc7" convolution_param { num_output: 4096 pad: 0 kernel_size: 1 group: 1 stride: 1 } } layer { name: "relu7" type: "ReLU" bottom: "fc7" top: "fc7" } layer { name: "drop7" type: "Dropout" bottom: "fc7" top: "fc7" dropout_param { dropout_ratio: 0.5 } } layer { name: "score_fr" type: "Convolution" bottom: "fc7" top: "score_fr" param { lr_mult: 1.0 decay_mult: 1.0 } param { lr_mult: 2.0 decay_mult: 0.0 } convolution_param { num_output: 21 pad: 0 kernel_size: 1 } } layer { name: "upscore" type: "Deconvolution" bottom: "score_fr" top: "upscore" param { lr_mult: 0.0 } convolution_param { num_output: 21 bias_term: false kernel_size: 63 group: 21 stride: 32 weight_filler { type: "bilinear" } } } layer { name: "score" type: "Crop" bottom: "upscore" bottom: "data" top: "score" crop_param { axis: 2 offset: 18 }

0 Kudos
Reply