<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re:Inference speed with INT8 model on a bit old CPU in Intel® Distribution of OpenVINO™ Toolkit</title>
    <link>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1407277#M28106</link>
    <description>&lt;P&gt;Hi Timosy,&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;This thread will no longer be monitored since we have provided information.&amp;nbsp;If you need any additional information from Intel, please submit a new question.&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;Regards,&lt;/P&gt;&lt;P&gt;Aznie&lt;/P&gt;&lt;BR /&gt;</description>
    <pubDate>Wed, 10 Aug 2022 05:16:16 GMT</pubDate>
    <dc:creator>IntelSupport</dc:creator>
    <dc:date>2022-08-10T05:16:16Z</dc:date>
    <item>
      <title>Inference speed with INT8 model on a bit old CPU</title>
      <link>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1402417#M27939</link>
      <description>&lt;P&gt;I prepared INT8 model, and tested it on a new CPU. I found that the inferece speed gets fast, this is OK. But, I tested the inference using the same model on a bit old CPU, I found that it does not get fast. How should I check whether CPU supports INT8 model or not?&lt;/P&gt;</description>
      <pubDate>Fri, 22 Jul 2022 04:45:57 GMT</pubDate>
      <guid>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1402417#M27939</guid>
      <dc:creator>timosy</dc:creator>
      <dc:date>2022-07-22T04:45:57Z</dc:date>
    </item>
    <item>
      <title>Re:Inference speed with INT8 model on a bit old CPU</title>
      <link>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1403037#M27965</link>
      <description>&lt;P&gt;Hi Timosy,&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Thanks for reaching out.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;The slow inference on old CPU is expected based on the hardware configuration and model layer framework. Refer to the &amp;nbsp;&lt;A href="https://docs.openvino.ai/latest/openvino_docs_performance_benchmarks_openvino.html#doxid-openvino-docs-performance-benchmarks-openvino" rel="noopener noreferrer" target="_blank"&gt;Intel® Distribution of OpenVINO™ toolkit Benchmark Results&lt;/A&gt;&amp;nbsp;for the inference performance on a specified hardware configuration.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Apart from that, INT8 is supported with CPU plugin. Check out the &lt;A href="https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html#supported-devices" rel="noopener noreferrer" target="_blank"&gt;Supported Devices&lt;/A&gt;, &lt;A href="https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html#supported-model-formats" rel="noopener noreferrer" target="_blank"&gt;Supported Model Formats&lt;/A&gt;, and &lt;A href="https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html#supported-layers" rel="noopener noreferrer" target="_blank"&gt;Supported Layers&lt;/A&gt; documentation for more details.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Regards,&lt;/P&gt;&lt;P&gt;Aznie&lt;/P&gt;&lt;BR /&gt;</description>
      <pubDate>Mon, 25 Jul 2022 09:37:26 GMT</pubDate>
      <guid>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1403037#M27965</guid>
      <dc:creator>IntelSupport</dc:creator>
      <dc:date>2022-07-25T09:37:26Z</dc:date>
    </item>
    <item>
      <title>Re:Inference speed with INT8 model on a bit old CPU</title>
      <link>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1407277#M28106</link>
      <description>&lt;P&gt;Hi Timosy,&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;This thread will no longer be monitored since we have provided information.&amp;nbsp;If you need any additional information from Intel, please submit a new question.&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;&lt;BR /&gt;&lt;/P&gt;&lt;P&gt;Regards,&lt;/P&gt;&lt;P&gt;Aznie&lt;/P&gt;&lt;BR /&gt;</description>
      <pubDate>Wed, 10 Aug 2022 05:16:16 GMT</pubDate>
      <guid>https://community.intel.com/t5/Intel-Distribution-of-OpenVINO/Inference-speed-with-INT8-model-on-a-bit-old-CPU/m-p/1407277#M28106</guid>
      <dc:creator>IntelSupport</dc:creator>
      <dc:date>2022-08-10T05:16:16Z</dc:date>
    </item>
  </channel>
</rss>

