Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.

Error compiling network

idata
Employee
657 Views

Hi,

 

I'm trying to convert a Caffe network with AbsVal layer but getting an error.

 

Network git at: https://github.com/guozhongluo/head-pose-estimation-and-face-landmark

 

Can someone please advice ?

 

Thanks,

 

Tsahi.
0 Kudos
3 Replies
idata
Employee
405 Views

@tsahim I can't see AbsVal layer in this project as well as weight (.caffemodel file). Please explain how do you manage to convert this network for NCS so we can triage the problem.

0 Kudos
idata
Employee
405 Views

Wrong link sorry,

 

this is the library at: https://github.com/jiangwqcooler/face-landmark

 

and this is the orig proto.txt (before shape mod ):

 

name: "landmark"

 

input: "data"

 

input_dim: 1

 

input_dim: 1

 

input_dim: 60

 

input_dim: 60

 

#

 

the actual net

 

layer 1

 

layer {

 

name: "Conv1"

 

type: "Convolution"

 

bottom: "data"

 

top: "Conv1"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

convolution_param {

 

num_output: 20

 

pad: 2

 

kernel_size: 5

 

stride: 1

 

weight_filler {

 

type: "xavier"

 

std: 0.1

 

}

 

bias_filler {

 

type: "constant"

 

value: 0.2

 

}

 

}

 

}

 

layer {

 

name: "ActivationTangH1"

 

bottom: "Conv1"

 

top: "ActivationTangH1"

 

type: "TanH"

 

}

 

layer {

 

name: "ActivationAbs1"

 

bottom: "ActivationTangH1"

 

top: "Abs1"

 

type: "AbsVal"

 

}

 

layer {

 

name: "Pool1"

 

type: "Pooling"

 

bottom: "Abs1"

 

top: "Pool1"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer {

 

name: "Conv2"

 

type: "Convolution"

 

bottom: "Pool1"

 

top: "Conv2"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

convolution_param {

 

num_output: 48

 

pad: 2

 

kernel_size: 5

 

stride: 1

 

weight_filler {

 

type: "xavier"

 

std: 0.1

 

}

 

bias_filler {

 

type: "constant"

 

value: 0.2

 

}

 

}

 

}

 

layer {

 

name: "ActivationTangH2"

 

bottom: "Conv2"

 

top: "ActivationTangH2"

 

type: "TanH"

 

}

 

layer {

 

name: "ActivationAbs2"

 

bottom: "ActivationTangH2"

 

top: "Abs2"

 

type: "AbsVal"

 

}

 

layer {

 

name: "Pool2"

 

type: "Pooling"

 

bottom: "Abs2"

 

top: "Pool2"

 

pooling_param {

 

pool: MAX

 

kernel_size: 2

 

stride: 2

 

}

 

}

 

layer 3

 

layer {

 

name: "Conv3"

 

type: "Convolution"

 

bottom: "Pool2"

 

top: "Conv3"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

convolution_param {

 

num_output: 64

 

pad: 0

 

kernel_size: 3

 

stride: 1

 

weight_filler {

 

type: "xavier"

 

std: 0.1

 

}

 

bias_filler {

 

type: "constant"

 

value: 0.2

 

}

 

}

 

}

 

layer {

 

name: "ActivationTangH3"

 

bottom: "Conv3"

 

top: "ActivationTangH3"

 

type: "TanH"

 

}

 

layer {

 

name: "ActivationAbs3"

 

bottom: "ActivationTangH3"

 

top: "Abs3"

 

type: "AbsVal"

 

}

 

layer {

 

name: "Pool3"

 

type: "Pooling"

 

bottom: "Abs3"

 

top: "Pool3"

 

pooling_param {

 

pool: MAX

 

kernel_size: 3

 

stride: 2

 

}

 

}

 

layer 4

 

layer {

 

name: "Conv4"

 

type: "Convolution"

 

bottom: "Pool3"

 

top: "Conv4"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

convolution_param {

 

num_output: 80

 

pad: 0

 

kernel_size: 3

 

stride: 1

 

weight_filler {

 

type: "xavier"

 

std: 0.1

 

}

 

bias_filler {

 

type: "constant"

 

value: 0.2

 

}

 

}

 

}

 

layer {

 

name: "ActivationTangH4"

 

bottom: "Conv4"

 

top: "ActivationTangH4"

 

type: "TanH"

 

}

 

layer {

 

name: "ActivationAbs4"

 

bottom: "ActivationTangH4"

 

top: "Abs4"

 

type: "AbsVal"

 

}

 

#

 

layer {

 

name: "Dense1"

 

type: "InnerProduct"

 

bottom: "Abs4"

 

top: "Dense1"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

inner_product_param {

 

num_output: 512

 

weight_filler {

 

type: "xavier"

 

}

 

bias_filler {

 

type: "constant"

 

value: 0

 

}

 

}

 

}

 

layer {

 

name: "ActivationTangH5"

 

bottom: "Dense1"

 

top: "ActivationTangH5"

 

type: "TanH"

 

}

 

layer {

 

name: "ActivationAbs5"

 

bottom: "ActivationTangH5"

 

top: "Abs5"

 

type: "AbsVal"

 

}

 

layer {

 

name: "Dense3"

 

type: "InnerProduct"

 

bottom: "Abs5"

 

top: "Dense3"

 

param {

 

lr_mult: 1

 

decay_mult: 1

 

}

 

param {

 

lr_mult: 2

 

decay_mult: 0

 

}

 

inner_product_param {

 

num_output: 136

 

weight_filler {

 

type: "xavier"

 

}

 

bias_filler {

 

type: "constant"

 

value: 0

 

}

 

}

 

}
0 Kudos
idata
Employee
405 Views

@tsahim I believe that we may not yet support the Absolute Value layer. Can you post the error that you are receiving?

0 Kudos
Reply