Intel® oneAPI Math Kernel Library
Ask questions and share information with other developers who use Intel® Math Kernel Library.

MKL-DNN primitive examples

radjaradja
Beginner
496 Views

Hello people ,

I'm trying to write a personal DNN primitive ( relu or maxpool ) and i need to compare performance with mkl-dnn primitives. the problem is that in the examples provided in github i could only find a full net so i tried to separate primitives and i got the following code. So i want to know if my code is correct and how could i test it ? ( i couldn't find a way to print outputs from memory directly ).

thank you

void relu ()
{
    auto cpu_engine = engine(engine::cpu, 0);

    const int batch = 2;

    std::vector<float> net_src(batch * 3 * 227 * 227);

    /* initializing non-zero values for src */
    for (size_t i = 0; i < net_src.size(); ++i) net_src = sinf((float)i);

    /* AlexNet: conv
     * {batch, 3, 227, 227} (x) {96, 3, 11, 11} -> {batch, 96, 55, 55}
     * strides: {4, 4}
     */
    memory::dims conv_src_tz = { batch, 3, 227, 227 };
    memory::dims conv_weights_tz = { 96, 3, 11, 11 };
    memory::dims conv_bias_tz = { 96 };
    memory::dims conv_dst_tz = { batch, 96, 55, 55 };
    memory::dims conv_strides = { 4, 4 };
    auto conv_padding = { 0, 0 };

    std::vector<float> conv_weights(
            std::accumulate(conv_weights_tz.begin(), conv_weights_tz.end(), 1,
                            std::multiplies<uint32_t>()));
    std::vector<float> conv_bias(std::accumulate(conv_bias_tz.begin(),
                                                 conv_bias_tz.end(), 1,
                                                 std::multiplies<uint32_t>()));

 

    /* create memory for user data */
    auto conv_user_src_memory = memory(
            { { { conv_src_tz }, memory::data_type::f32, memory::format::nchw },
              cpu_engine },
            net_src.data());


    /* create mmemory descriptors for convolution data w/ no specified
     * format(`any`)
     * format `any` lets a primitive(convolution in this case)
     * chose the memory format preferred for best performance. */
    auto conv_src_md = memory::desc({ conv_src_tz }, memory::data_type::f32,
                                    memory::format::any);
    auto conv_bias_md = memory::desc({ conv_bias_tz }, memory::data_type::f32,
                                     memory::format::any);
    auto conv_weights_md = memory::desc(
            { conv_weights_tz }, memory::data_type::f32, memory::format::any);
    auto conv_dst_md = memory::desc({ conv_dst_tz }, memory::data_type::f32,
                                    memory::format::any);

    /* create a convolution primitive descriptor */
    auto conv_desc = convolution_forward::desc(
            prop_kind::forward, convolution_direct, conv_src_md,
            conv_weights_md, conv_bias_md, conv_dst_md, conv_strides,
            conv_padding, conv_padding, padding_kind::zero);
    auto conv_pd = convolution_forward::primitive_desc(conv_desc, cpu_engine);

 

    /* create memory primitive for conv dst */
    auto conv_dst_memory = memory(conv_pd.dst_primitive_desc());  

    /* AlexNet: relu
     * {batch, 96, 55, 55} -> {batch, 96, 55, 55}
     */
    const float negative_slope = 1.0f;

    /* create relu primitive desc */
    /* keep memory format of source same as the format of convolution
     * output in order to avoid reorder */

 

    auto relu_desc = eltwise_forward::desc(prop_kind::forward,
            algorithm::eltwise_relu, conv_pd.dst_primitive_desc().desc(),
            negative_slope);
    auto relu_pd = eltwise_forward::primitive_desc(relu_desc, cpu_engine);

    /* create relu dst memory primitive */
    auto relu_dst_memory = memory(relu_pd.dst_primitive_desc());

    /* finally create a relu primitive */
    auto relu = eltwise_forward(relu_pd, conv_dst_memory, relu_dst_memory);


    /* build forward net */
    std::vector<primitive> net_fwd;
    net_fwd.push_back(relu);

    int n_iter = 1; //number of iterations for training
    /* execute */
    while (n_iter) {
        /* forward */
        stream(stream::kind::eager).submit(net_fwd).wait();

        --n_iter;
    }

}

 

0 Kudos
1 Reply
Gennady_F_Intel
Moderator
496 Views

it would be better if you will address this questions to https://github.com/intel/mkl-dnn/issues ;where mkl-dnn team probably may help....

0 Kudos
Reply