Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.

Simple Custom TF Network

idata
Employee
565 Views

Hi,

 

I created a simple TF network to compare inference results between TF and the NCS. It compiles, but the results are completely different between the two of them. Please see my code below. Note, that I just initialize weights and do not train, because I just want to analyze the computation. I also attached the image on which I do the inference pass.

 

It would be helpful if someone could check my code. Thanks a lot!

 

Training Version

 

import numpy as np import tensorflow as tf import argparse import sys import tempfile import os import cv2 def gqnn(x): # Reshape to 4dim tensor with tf.name_scope('reshape'): x_image = tf.reshape(x, [-1, 32, 32, 1]) # convolutional layer - 8 feature maps with tf.name_scope('conv1'): W_conv1 = weight_variable([5, 5, 1, 8]) b_conv1 = bias_variable([8]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer with tf.name_scope('pool1'): h_pool1 = max_pool_2x2(h_conv1) # Flatten h_pool1_flat = tf.reshape( h_pool1, [-1, 16 * 16 * 8] ) # Dense with tf.name_scope( 'dense' ): fc = tf.layers.dense(inputs = h_pool1_flat, units=2, activation=None) fc = tf.identity(fc, name='output') #name output node return fc def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def main(_): x = tf.placeholder(tf.float32, [32, 32], name="input") # Build the graph fc = gqnn(x) saver = tf.train.Saver(save_relative_paths=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) variables_names =[v.name for v in tf.trainable_variables()] train_writer = tf.summary.FileWriter( FLAGS.model_dir, sess.graph ) save_path = saver.save(sess, FLAGS.model_dir + "/" + FLAGS.model_name + ".ckpt") tf.train.write_graph( sess.graph_def, FLAGS.model_dir, FLAGS.model_name + ".pb", as_text=False ) # Run inference on graph with randomly intiliazed weights on test.png test_img = cv2.imread('test.png').astype(np.float32) test_img=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY) fc, tvars_vals = sess.run([fc, variables_names], feed_dict={x: test_img}) print(fc) if __name__ == '__main__': #current file is executed under a shell parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='data', help='Directory for storing input data') parser.add_argument('--model_dir', type=str, default='tf_model', help='Directory where the model files will be created') parser.add_argument('--model_name', type=str, default='model', help='Name of the model that will be created') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

 

Inference Version according to Instructions

 

import numpy as np import tensorflow as tf import argparse import sys import tempfile import os def gqnn(x): # Reshape to 4dim tensor with tf.name_scope('reshape'): x_image = tf.reshape(x, [-1, 32, 32, 1]) # convolutional layer - 8 feature maps with tf.name_scope('conv1'): W_conv1 = weight_variable([5, 5, 1, 8]) b_conv1 = bias_variable([8]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer with tf.name_scope('pool1'): h_pool1 = max_pool_2x2(h_conv1) # Flatten h_pool1_flat = tf.reshape( h_pool1, [-1, 16 * 16 * 8] ) # Dense with tf.name_scope( 'dense' ): fc = tf.layers.dense(inputs = h_pool1_flat, units=2, activation=None) fc = tf.identity(fc, name='output') #name output node return fc def conv2d(x, W): """conv2d returns a 2d convolution layer with full stride.""" return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): """max_pool_2x2 downsamples a feature map by 2X.""" return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def main(_): # Import data x = tf.placeholder(tf.float32, [32, 32], name="input") # Build the graph for the deep net fc = gqnn(x) saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # read the previously saved network. saver.restore(sess, "./tf_model/model.ckpt") # save the version of the network ready that can be compiled for SDK saver.save(sess, "./tf_model/model_inference") if __name__ == '__main__': #current file is executed under a shell parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='data', help='Directory for storing input data') parser.add_argument('--model_dir', type=str, default='tf_model', help='Directory where the model files will be created') parser.add_argument('--model_name', type=str, default='model', help='Name of the model that will be created') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

 

Afterwards I run mvNCCompile model_inference.meta -s 12 -in=input -on=output -is 32 32 -o graph to compile. The compiled graph file is loaded to the NCS and run through the Python NCSDK API and the inference is again performed on the test.png, which is attached here.

0 Kudos
1 Reply
idata
Employee
329 Views

@mumes Thanks for reporting this. Can you provide a log of the NCS results and TF results you are seeing?

 

Using NCSDK 2.04.00.06, I am seeing the following results using model.ckpt.meta generated by your scripts when running mvNCCheck:

 

USB: Myriad Execution Finished USB: Myriad Connection Closing. USB: Myriad Connection Closed. Result: (1, 1, 2) 1) 0 0.635 2) 1 -0.6475 Expected: (1, 1, 2) 1) 0 0.63927805 2) 1 -0.6549165 ------------------------------------------------------------ Obtained values ------------------------------------------------------------ Obtained Min Pixel Accuracy: 1.138402707874775% (max allowed=2%), Pass Obtained Average Pixel Accuracy: 0.9137054905295372% (max allowed=1%), Pass Obtained Percentage of wrong values: 0.0% (max allowed=0%), Pass Obtained Pixel-wise L2 error: 0.9409285567235474% (max allowed=1%), Pass Obtained Global Sum Difference: 0.011968016624450684 ------------------------------------------------------------
0 Kudos
Reply