- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
I'm try to convert a quite simple TF model,
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layersmodel = keras.Sequential([
layers.Conv2D(1, (3,3), input_shape=(160,160,3)),
layers.Dense(10)
])
model.save('test_model', save_format='tf')
/opt/intel/openvino/deployment_tools/model_optimizer/mo_tf.py --saved_model_dir test_model --log_level DEBUG
which ends in:
[ 2020-03-05 12:19:30,515 ] [ DEBUG ] [ register_custom_ops:104 ] Added a new entry Upsample to extractors with custom op class <class 'extensions.ops.upsample.UpsampleOp'>.
[ 2020-03-05 12:19:30,516 ] [ DEBUG ] [ const:33 ] Constant extractor for node gives shape = [3 3 3 1] and value.shape = (3, 3, 3, 1)
[ WARNING ] Broadcast of scalar to shape: [1]
[ 2020-03-05 12:19:30,519 ] [ DEBUG ] [ const:33 ] Constant extractor for node gives shape = [1] and value.shape = (1,)
[ 2020-03-05 12:19:30,520 ] [ DEBUG ] [ const:33 ] Constant extractor for node gives shape = [ 1 10] and value.shape = (1, 10)
[ 2020-03-05 12:19:30,520 ] [ DEBUG ] [ const:33 ] Constant extractor for node gives shape = [10] and value.shape = (10,)
[ WARNING ] Node attributes: {'pb': name: "Const"
op: "Const"
device: "/device:CPU:0"
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
attr {
key: "dtype"
value {
type: DT_STRING
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {
}
string_val: "\n\275\001\n\013\010\001\022\007layer-0\n\030\010\002\022\024layer_with_weights-0\n\013\010\002\022\007layer-1\n\030\010\003\022\024layer_with_weights-1\n\013\010\003\022\007layer-2\n\027\010\004\022\023trainable_variables\n\r\010\005\022\tvariables\n\031\010\006\022\025regularization_losses\n\r\010\007\022\tkeras_api\n\016\010\010\022\nsignatures\nR\n\027\010\t\022\023trainable_variables\n\r\010\n\022\tvariables\n\031\010\013\022\025regularization_losses\n\r\010\014\022\tkeras_api\n~\n\n\010\r\022\006kernel\n\010\010\016\022\004bias\n\024\010\017\022\020_callable_losses\n\027\010\020\022\023trainable_variables\n\r\010\021\022\tvariables\n\031\010\022\022\025regularization_losses\n\r\010\023\022\tkeras_api\n~\n\n\010\024\022\006kernel\n\010\010\025\022\004bias\n\024\010\026\022\020_callable_losses\n\027\010\027\022\023trainable_variables\n\r\010\030\022\tvariables\n\031\010\031\022\025regularization_losses\n\r\010\032\022\tkeras_api\n\034\n\005\010\r\022\0010\n\005\010\016\022\0011\n\005\010\024\022\0012\n\005\010\025\022\0013\n\034\n\005\010\r\022\0010\n\005\010\016\022\0011\n\005\010\024\022\0012\n\005\010\025\022\0013\n\000\n\232\001\n\037\010\033\022\033layer_regularization_losses\n\027\010\004\022\023trainable_variables\n\n\010\034\022\006layers\n\033\010\035\022\027non_trainable_variables\n\r\010\005\022\tvariables\n\031\010\006\022\025regularization_losses\n\013\010\036\022\007metrics\n\000\n\000\n\000\n\000\n\232\001\n\037\010\037\022\033layer_regularization_losses\n\027\010\t\022\023trainable_variables\n\n\010 \022\006layers\n\033\010!\022\027non_trainable_variables\n\r\010\n\022\tvariables\n\031\010\013\022\025regularization_losses\n\013\010\"\022\007metrics\nY\022W\n\016VARIABLE_VALUE\022\rconv2d/kernel\0326layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE\nU\022S\n\016VARIABLE_VALUE\022\013conv2d/bias\0324layer_with_weights-0/bias/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\016\n\005\010\r\022\0010\n\005\010\016\022\0011\n\016\n\005\010\r\022\0010\n\005\010\016\022\0011\n\000\n\232\001\n\037\010#\022\033layer_regularization_losses\n\027\010\020\022\023trainable_variables\n\n\010$\022\006layers\n\033\010%\022\027non_trainable_variables\n\r\010\021\022\tvariables\n\031\010\022\022\025regularization_losses\n\013\010&\022\007metrics\nX\022V\n\016VARIABLE_VALUE\022\014dense/kernel\0326layer_with_weights-1/kernel/.ATTRIBUTES/VARIABLE_VALUE\nT\022R\n\016VARIABLE_VALUE\022\ndense/bias\0324layer_with_weights-1/bias/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\016\n\005\010\024\022\0010\n\005\010\025\022\0011\n\016\n\005\010\024\022\0010\n\005\010\025\022\0011\n\000\n\232\001\n\037\010\'\022\033layer_regularization_losses\n\027\010\027\022\023trainable_variables\n\n\010(\022\006layers\n\033\010)\022\027non_trainable_variables\n\r\010\030\022\tvariables\n\031\010\031\022\025regularization_losses\n\013\010*\022\007metrics\n\000\n\016\n\005\010\002\022\0010\n\005\010\003\022\0011\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000"
}
}
}
, '_in_ports': {}, '_out_ports': {}, 'kind': 'op', 'name': 'Const', 'op': 'Const', 'precision': 'FP32'}
[ ERROR ] 'ascii' codec can't decode byte 0xbd in position 1: ordinal not in range(128)
Unexpected exception happened during extracting attributes for node dense/bias/Read/ReadVariableOp.
Original exception message: 'ascii' codec can't decode byte 0xbd in position 1: ordinal not in range(128)
[ 2020-03-05 12:19:30,522 ] [ DEBUG ] [ main:304 ] Traceback (most recent call last):
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/extractor.py", line 608, in extract_node_attrs
supported, new_attrs = extractor(Node(graph, node))
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/pipeline/tf.py", line 132, in <lambda>
extract_node_attrs(graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors)))
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/tf/extractor.py", line 109, in tf_op_extractor
attrs = tf_op_extractors[op](node)
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/tf/extractor.py", line 65, in <lambda>
return lambda node: pb_extractor(node.pb)
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/tf/extractors/const.py", line 31, in tf_const_ext
result['value'] = tf_tensor_content(pb_tensor.dtype, result['shape'], pb_tensor)
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/tf/extractors/utils.py", line 77, in tf_tensor_content
dtype=type_helper[0]),
UnicodeDecodeError: 'ascii' codec can't decode byte 0xbd in position 1: ordinal not in range(128)The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/main.py", line 298, in main
return driver(argv)
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/main.py", line 247, in driver
is_binary=not argv.input_model_is_text)
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/pipeline/tf.py", line 132, in tf2nx
extract_node_attrs(graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors)))
File "/opt/intel/openvino_2019.3.376/deployment_tools/model_optimizer/mo/front/extractor.py", line 616, in extract_node_attrs
) from e
mo.utils.error.Error: 'ascii' codec can't decode byte 0xbd in position 1: ordinal not in range(128)
Unexpected exception happened during extracting attributes for node dense/bias/Read/ReadVariableOp.
Original exception message: 'ascii' codec can't decode byte 0xbd in position 1: ordinal not in range(128)
tensorflow version: '1.15.2'
Link Copied
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Hi R,
Thanks for reaching out.
To generate the IR using model optimizer tool, please use a trained model.
Best Regards,
Surya
- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page