diff --git a/CMakeLists.txt b/CMakeLists.txt index edf8233f46f683..902d32898b12dd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -78,8 +78,7 @@ function(build_ngraph) if (NOT ANDROID) ngraph_set(NGRAPH_UNIT_TEST_ENABLE TRUE) ngraph_set(NGRAPH_UNIT_TEST_OPENVINO_ENABLE TRUE) - # ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE) - set(NGRAPH_ONNX_IMPORT_ENABLE TRUE CACHE BOOL "" FORCE) + ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE TRUE) else() ngraph_set(NGRAPH_UNIT_TEST_ENABLE FALSE) ngraph_set(NGRAPH_TEST_UTIL_ENABLE FALSE) @@ -118,4 +117,49 @@ build_ngraph() add_subdirectory(inference-engine) +add_subdirectory(docs) + +# cpack + +# install setupvars + +ie_cpack_add_component(setupvars REQUIRED) + +if(UNIX) + install(PROGRAMS scripts/setupvars/setupvars.sh + DESTINATION bin + COMPONENT setupvars) +elseif(WIN32) + install(PROGRAMS scripts/setupvars/setupvars.bat + DESTINATION bin + COMPONENT setupvars) +endif() + +# install install_dependencies + +if(UNIX) + ie_cpack_add_component(install_dependencies REQUIRED) + install(DIRECTORY scripts/install_dependencies/ + DESTINATION install_dependencies + COMPONENT install_dependencies) +endif() + +# install files for demo + +ie_cpack_add_component(demo_scripts REQUIRED DEPENDS core) + +if(UNIX) + install(DIRECTORY scripts/demo/ + DESTINATION deployment_tools/demo + COMPONENT demo_scripts + USE_SOURCE_PERMISSIONS + PATTERN *.bat EXCLUDE) +elseif(WIN32) + install(DIRECTORY scripts/demo/ + DESTINATION deployment_tools/demo + COMPONENT demo_scripts + USE_SOURCE_PERMISSIONS + PATTERN *.sh EXCLUDE) +endif() + ie_cpack(${IE_CPACK_COMPONENTS_ALL}) diff --git a/cmake/developer_package.cmake b/cmake/developer_package.cmake index bed7350318f9e1..23cd9b2d7f2483 100644 --- a/cmake/developer_package.cmake +++ b/cmake/developer_package.cmake @@ -36,9 +36,13 @@ function(ie_cpack_set_library_dir) endif() if(WIN32) - set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE) + set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE) else() set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE) + set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE) + set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE) endif() endfunction() @@ -59,8 +63,10 @@ macro(ie_cpack) set(CPACK_GENERATOR "TGZ") if(WIN32) set(CPACK_PACKAGE_NAME inference-engine_${CMAKE_BUILD_TYPE}) + string(REPLACE "\\" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}") else() set(CPACK_PACKAGE_NAME inference-engine) + string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}") endif() set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY OFF) set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) @@ -194,7 +200,7 @@ else() endif() if(APPLE) - set(CMAKE_MACOSX_RPATH 1) + set(CMAKE_MACOSX_RPATH ON) endif(APPLE) # Use solution folders diff --git a/cmake/download/dependency_solver.cmake b/cmake/download/dependency_solver.cmake index f275f68599aecf..437b2e2653e4f3 100644 --- a/cmake/download/dependency_solver.cmake +++ b/cmake/download/dependency_solver.cmake @@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR) endfunction(RESOLVE_DEPENDENCY) +function (resolve_model_dependency network archive network_model_path) + RESOLVE_DEPENDENCY(${network_model_path} + ARCHIVE "models_archives/${archive}" + TARGET_PATH "${MODELS_PATH}/${network}") + string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}}) + set(${network_model_path} ".${relative_path}" PARENT_SCOPE) +endfunction() + function(reset_deps_cache) # # Reset the dependencies cache if it was set by dependency solver diff --git a/cmake/download/download_and_extract.cmake b/cmake/download/download_and_extract.cmake index f8f79df818be73..9ff9c0ea936083 100644 --- a/cmake/download/download_and_extract.cmake +++ b/cmake/download/download_and_extract.cmake @@ -154,7 +154,7 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked if(DEFINED ENV{IE_PATH_TO_DEPS}) set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}") else() - set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.2/inference_engine/${RELATIVE_URL}") + set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.3/inference_engine/${RELATIVE_URL}") endif() #no message on recursive calls diff --git a/cmake/os_flags.cmake b/cmake/os_flags.cmake index 6bcd3324c4e327..a91c4641ee9d5e 100644 --- a/cmake/os_flags.cmake +++ b/cmake/os_flags.cmake @@ -223,12 +223,13 @@ if(WIN32) # 161 unrecognized pragma # 177 variable was declared but never referenced # 556 not matched type of assigned function pointer + # 1744: field of class type without a DLL interface used in a class with a DLL interface # 2586 decorated name length exceeded, name was truncated # 2651: attribute does not apply to any entity # 3180 unrecognized OpenMP pragma # 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo # 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override - ie_add_compiler_flags(/Qdiag-disable:161,177,556,2586,2651,3180,11075,15335) + ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335) endif() # Debug information flags diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt new file mode 100644 index 00000000000000..f9948bdc8f3ca0 --- /dev/null +++ b/docs/CMakeLists.txt @@ -0,0 +1,47 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +if(NOT ENABLE_DOCKER) + add_subdirectory(examples) + # Detect nGraph + find_package(ngraph QUIET) + if(NOT ngraph_FOUND) + set(ngraph_DIR ${CMAKE_BINARY_DIR}/ngraph) + endif() + # Detect InferenceEngine + find_package(InferenceEngine QUIET) + if(NOT InferenceEngine_FOUND) + set(InferenceEngine_DIR ${CMAKE_BINARY_DIR}) + endif() + add_subdirectory(template_extension) +endif() + +# OpenVINO docs + +set(OPENVINO_DOCS_PATH "" CACHE PATH "Path to openvino-documentation local repository") +set(args "") + +if(OPENVINO_DOCS_PATH) + set(args "${args} ovinodoc_path:${OPENVINO_DOCS_PATH}") +endif() + +file(GLOB_RECURSE docs_files "${OpenVINO_MAIN_SOURCE_DIR}/docs") +file(GLOB_RECURSE include_files "${OpenVINO_MAIN_SOURCE_DIR}/inference-engine/include") +file(GLOB_RECURSE ovino_files "${OPENVINO_DOCS_PATH}") + +add_custom_target(ie_docs + COMMAND ./build_docs.sh ${args} + WORKING_DIRECTORY "${OpenVINO_MAIN_SOURCE_DIR}/docs/build_documentation" + COMMENT "Generating OpenVINO documentation" + SOURCES ${docs_files} ${include_files} ${ovino_files} + VERBATIM) + +find_program(browser NAMES xdg-open) +if(browser) + add_custom_target(ie_docs_open + COMMAND ${browser} "${OpenVINO_MAIN_SOURCE_DIR}/doc/html/index.html" + DEPENDS ie_docs + COMMENT "Open OpenVINO documentation" + VERBATIM) +endif() diff --git a/docs/examples/CMakeLists.txt b/docs/examples/CMakeLists.txt new file mode 100644 index 00000000000000..d8519aed461303 --- /dev/null +++ b/docs/examples/CMakeLists.txt @@ -0,0 +1,14 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME ie_docs_examples) + +file(GLOB SOURCES *.cpp) + +add_library(ie_docs_examples STATIC ${SOURCES}) + +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api) + +#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) +#add_clang_format_target(clang_format_${TARGET_NAME} FOR_TARGETS ${TARGET_NAME}) diff --git a/docs/examples/example_async_infer_request.cpp b/docs/examples/example_async_infer_request.cpp new file mode 100644 index 00000000000000..b62ba6db0bb6a9 --- /dev/null +++ b/docs/examples/example_async_infer_request.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +using namespace InferenceEngine; + +class AcceleratorSyncRequest : public InferRequestInternal { +public: + using Ptr = std::shared_ptr; + + void Preprocess(); + void WriteToDevice(); + void RunOnDevice(); + void ReadFromDevice(); + void PostProcess(); +}; + +// ! [async_infer_request:define_pipeline] +// Inherits from AsyncInferRequestThreadSafeDefault +class AcceleratorAsyncInferRequest : public AsyncInferRequestThreadSafeDefault { + // Store the pointer to the synchronous request and five executors + AcceleratorAsyncInferRequest(const AcceleratorSyncRequest::Ptr& syncRequest, + const ITaskExecutor::Ptr& preprocessExecutor, + const ITaskExecutor::Ptr& writeToDeviceExecutor, + const ITaskExecutor::Ptr& runOnDeviceExecutor, + const ITaskExecutor::Ptr& readFromDeviceExecutor, + const ITaskExecutor::Ptr& postProcessExecutor) : + AsyncInferRequestThreadSafeDefault(syncRequest, nullptr, nullptr), + _accSyncRequest{syncRequest}, + _preprocessExecutor{preprocessExecutor}, + _writeToDeviceExecutor{writeToDeviceExecutor}, + _runOnDeviceExecutor{runOnDeviceExecutor}, + _readFromDeviceExecutor{readFromDeviceExecutor}, + _postProcessExecutor{postProcessExecutor} + { + // Five pipeline stages of synchronous infer request are run by different executors + _pipeline = { + { _preprocessExecutor , [this] { + _accSyncRequest->Preprocess(); + }}, + { _writeToDeviceExecutor , [this] { + _accSyncRequest->WriteToDevice(); + }}, + { _runOnDeviceExecutor , [this] { + _accSyncRequest->RunOnDevice(); + }}, + { _readFromDeviceExecutor , [this] { + _accSyncRequest->ReadFromDevice(); + }}, + { _postProcessExecutor , [this] { + _accSyncRequest->PostProcess(); + }}, + }; + } + + // As all stages use _accSyncRequest member we should wait for all stages tasks before the destructor destroy this member. + ~AcceleratorAsyncInferRequest() { + StopAndWait(); + } + + AcceleratorSyncRequest::Ptr _accSyncRequest; + ITaskExecutor::Ptr _preprocessExecutor, _writeToDeviceExecutor, _runOnDeviceExecutor, _readFromDeviceExecutor, _postProcessExecutor; +}; +// ! [async_infer_request:define_pipeline] diff --git a/docs/examples/example_itask_executor.cpp b/docs/examples/example_itask_executor.cpp new file mode 100644 index 00000000000000..a08c4c7575cf12 --- /dev/null +++ b/docs/examples/example_itask_executor.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include + +void example1() { +// ! [itask_executor:define_pipeline] + // std::promise is move only object so to satisfy copy callable constraint we use std::shared_ptr + auto promise = std::make_shared>(); + // When the promise is created we can get std::future to wait the result + auto future = promise->get_future(); + // Rather simple task + InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; }; + // Create an executor + InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared(); + if (taskExecutor == nullptr) { + // ProcessError(e); + return; + } + // We capture the task and the promise. When the task is executed in the task executor context + // we munually call std::promise::set_value() method + taskExecutor->run([task, promise] { + std::exception_ptr currentException; + try { + task(); + } catch(...) { + // If there is some exceptions store the pointer to current exception + currentException = std::current_exception(); + } + + if (nullptr == currentException) { + promise->set_value(); // <-- If there is no problems just call std::promise::set_value() + } else { + promise->set_exception(currentException); // <-- If there is an exception forward it to std::future object + } + }); + // To wait the task completion we call std::future::wait method + future.wait(); // The current thread will be blocked here and wait when std::promise::set_value() + // or std::promise::set_exception() method will be called. + + // If the future store the exception it will be rethrown in std::future::get method + try { + future.get(); + } catch(std::exception& /*e*/) { + // ProcessError(e); + } +// ! [itask_executor:define_pipeline] +} diff --git a/docs/template_extension/CMakeLists.txt b/docs/template_extension/CMakeLists.txt new file mode 100644 index 00000000000000..957f9b800746bd --- /dev/null +++ b/docs/template_extension/CMakeLists.txt @@ -0,0 +1,18 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# [cmake:extension] +set(TARGET_NAME "template_extension") + +find_package(ngraph REQUIRED) +find_package(InferenceEngine REQUIRED) + +file(GLOB_RECURSE SRC *.cpp) + +add_library(${TARGET_NAME} SHARED ${SRC}) + +target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_EXTENSION_API) +target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} + ${NGRAPH_LIBRARIES}) +# [cmake:extension] diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp new file mode 100644 index 00000000000000..7bc45fc273f69f --- /dev/null +++ b/docs/template_extension/cpu_kernel.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "cpu_kernel.hpp" +#include "op.hpp" +#include
+#include + +using namespace TemplateExtension; + +//! [cpu_implementation:ctor] +OpImplementation::OpImplementation(const std::shared_ptr &node) { + try { + auto castedNode = std::dynamic_pointer_cast(node); + if (!castedNode) + THROW_IE_EXCEPTION << "Cannot create implementation for unknown operation!"; + if (castedNode->inputs().size() != 1 || castedNode->outputs().size() != 1) + THROW_IE_EXCEPTION << "Cannot create implementation for operation with incorrect number of inputs or outputs!"; + if (castedNode->get_input_partial_shape(0).is_dynamic() || castedNode->get_output_partial_shape(0).is_dynamic()) + THROW_IE_EXCEPTION << "Cannot create implementation for op with dynamic shapes!"; + if (castedNode->get_input_shape(0).size() != 4 || castedNode->get_output_shape(0).size() != 4) + THROW_IE_EXCEPTION << "Operation supports only 4d tensors for input and output."; + if (castedNode->get_input_element_type(0) != ngraph::element::f32 || castedNode->get_output_element_type(0) != ngraph::element::f32) + THROW_IE_EXCEPTION << "Operation supports only FP32 tensors."; + add = castedNode->getAddAttr(); + } catch (InferenceEngine::details::InferenceEngineException& ex) { + error = ex.what(); + } +} +//! [cpu_implementation:ctor] + +//! [cpu_implementation:getSupportedConfigurations] +InferenceEngine::StatusCode OpImplementation::getSupportedConfigurations(std::vector &conf, + InferenceEngine::ResponseDesc *resp) noexcept { + auto createConfig = [](const InferenceEngine::SizeVector inShape, const InferenceEngine::SizeVector& outShape, bool planar) { + InferenceEngine::LayerConfig config; + config.dynBatchSupport = false; + InferenceEngine::DataConfig inData; + InferenceEngine::DataConfig outData; + InferenceEngine::SizeVector order = {0, 1, 2, 3}; + // Allow any offset before data + size_t offset((std::numeric_limits::max)()); + if (planar) { + inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inShape, order, offset}); + config.inConfs.push_back(inData); + outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outShape, order, offset}); + config.outConfs.push_back(outData); + } else { + // Add blocked (nChw8c) format + auto div_up = [](const int a, const int b) -> int { + if (!b) + return 0; + return (a + b - 1) / b; + }; + + order.push_back(1); + InferenceEngine::SizeVector inBlkDims = inShape; + inBlkDims[1] = div_up(inBlkDims[1], 8); + inBlkDims.push_back(8); + InferenceEngine::SizeVector outBlkDims = outShape; + outBlkDims[1] = div_up(outBlkDims[1], 8); + outBlkDims.push_back(8); + inData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, inShape, {inBlkDims, order, offset}); + config.inConfs.push_back(inData); + outData.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, outShape, {outBlkDims, order, offset}); + config.outConfs.push_back(outData); + } + return config; + }; + if (!error.empty()) { + if (resp) { + strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); + resp->msg[sizeof(resp->msg)-1] = 0; + } + return InferenceEngine::GENERAL_ERROR; + } + // Add planar format + conf.emplace_back(createConfig(inShape, outShape, true)); + // Add blocked format nChw8c + conf.emplace_back(createConfig(inShape, outShape, false)); + return InferenceEngine::OK; +} +//! [cpu_implementation:getSupportedConfigurations] + +//! [cpu_implementation:init] +InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig &config, InferenceEngine::ResponseDesc *resp) noexcept { + try { + if (config.inConfs.size() != 1 || config.outConfs.size() != 1) { + THROW_IE_EXCEPTION << "Operation cannot be initialized with incorrect number of inputs/outputs!"; + } + + if (config.inConfs[0].desc.getDims().size() != 4 || config.outConfs[0].desc.getDims().size() != 4) { + THROW_IE_EXCEPTION << "Operation can be initialized only with 4d input/output tensors!"; + } + + if (config.outConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32 || + config.inConfs[0].desc.getPrecision() != InferenceEngine::Precision::FP32) { + THROW_IE_EXCEPTION << "Operation supports only FP32 precisions!"; + } + } catch (InferenceEngine::details::InferenceEngineException& ex) { + if (resp) { + strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1); + resp->msg[sizeof(resp->msg)-1] = 0; + } + return InferenceEngine::GENERAL_ERROR; + } + + return InferenceEngine::OK; +} +//! [cpu_implementation:init] + +//! [cpu_implementation:execute] +InferenceEngine::StatusCode OpImplementation::execute(std::vector &inputs, + std::vector &outputs, + InferenceEngine::ResponseDesc *resp) noexcept { + const float* src_data = inputs[0]->cbuffer().as() + inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + float *dst_data = outputs[0]->buffer().as() + outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding(); + + for (size_t i = 0; i < inputs[0]->size(); i++) { + dst_data[i] = src_data[i] + add; + } + return InferenceEngine::OK; +} +//! [cpu_implementation:execute] diff --git a/docs/template_extension/cpu_kernel.hpp b/docs/template_extension/cpu_kernel.hpp new file mode 100644 index 00000000000000..84e92398b750e6 --- /dev/null +++ b/docs/template_extension/cpu_kernel.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +namespace TemplateExtension { + +//! [cpu_implementation:header] +class OpImplementation : public InferenceEngine::ILayerExecImpl { +public: + explicit OpImplementation(const std::shared_ptr& node); + InferenceEngine::StatusCode getSupportedConfigurations(std::vector &conf, + InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig &config, + InferenceEngine::ResponseDesc *resp) noexcept override; + InferenceEngine::StatusCode execute(std::vector &inputs, + std::vector &outputs, + InferenceEngine::ResponseDesc *resp) noexcept override; +private: + int64_t add; + ngraph::Shape inShape; + ngraph::Shape outShape; + std::string error; +}; +//! [cpu_implementation:header] + +} // namespace TemplateExtension diff --git a/docs/template_extension/extension.cpp b/docs/template_extension/extension.cpp new file mode 100644 index 00000000000000..d3ef04de8ec803 --- /dev/null +++ b/docs/template_extension/extension.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "extension.hpp" +#include "cpu_kernel.hpp" +#include "op.hpp" +#include +#include + +#include +#include +#include +#include +#include + +using namespace TemplateExtension; + +//! [extension:GetVersion] +void Extension::GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept { + static InferenceEngine::Version ExtensionDescription = { + {1, 0}, // extension API version + "1.0", + "template_ext" // extension description message + }; + + versionInfo = &ExtensionDescription; +} +//! [extension:GetVersion] + +//! [extension:getOpSets] +std::map Extension::getOpSets() { + std::map opsets; + ngraph::OpSet opset; + opset.insert(); + opsets["custom_opset"] = opset; + return opsets; +} +//! [extension:getOpSets] + +//! [extension:getImplTypes] +std::vector Extension::getImplTypes(const std::shared_ptr &node) { + if (std::dynamic_pointer_cast(node)) { + return {"CPU"}; + } + return {}; +} +//! [extension:getImplTypes] + +//! [extension:getImplementation] +InferenceEngine::ILayerImpl::Ptr Extension::getImplementation(const std::shared_ptr &node, const std::string &implType) { + if (std::dynamic_pointer_cast(node) && implType == "CPU") { + return std::make_shared(node); + } + return nullptr; +} +//! [extension:getImplementation] + +//! [extension:CreateExtension] +// Exported function +INFERENCE_EXTENSION_API(InferenceEngine::StatusCode) InferenceEngine::CreateExtension(InferenceEngine::IExtension *&ext, + InferenceEngine::ResponseDesc *resp) noexcept { + try { + ext = new Extension(); + return OK; + } catch (std::exception &ex) { + if (resp) { + std::string err = ((std::string) "Couldn't create extension: ") + ex.what(); + err.copy(resp->msg, 255); + } + return InferenceEngine::GENERAL_ERROR; + } +} +//! [extension:CreateExtension] diff --git a/docs/template_extension/extension.hpp b/docs/template_extension/extension.hpp new file mode 100644 index 00000000000000..e74c6c4354d65c --- /dev/null +++ b/docs/template_extension/extension.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +//! [extension:header] +namespace TemplateExtension { + +class Extension : public InferenceEngine::IExtension { +public: + Extension() = default; + void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override; + void Unload() noexcept override {} + void Release() noexcept override { delete this; } + + std::map getOpSets() override; + std::vector getImplTypes(const std::shared_ptr& node) override; + InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override; +}; + +} // namespace TemplateExtension +//! [extension:header] diff --git a/docs/template_extension/op.cpp b/docs/template_extension/op.cpp new file mode 100644 index 00000000000000..2939138ebd575a --- /dev/null +++ b/docs/template_extension/op.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "op.hpp" + +using namespace TemplateExtension; + +constexpr ngraph::NodeTypeInfo Operation::type_info; + +//! [op:ctor] +Operation::Operation(const ngraph::Output &arg, int64_t add) : Op({arg}), add(add) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void Operation::validate_and_infer_types() { + // Operation doesn't change shapes end element type + set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr Operation::copy_with_new_args(const ngraph::NodeVector &new_args) const { + if (new_args.size() != 1) { + throw ngraph::ngraph_error("Incorrect number of new arguments"); + } + + return std::make_shared(new_args.at(0), add); +} +//! [op:copy] + +//! [op:visit_attributes] +bool Operation::visit_attributes(ngraph::AttributeVisitor &visitor) { + visitor.on_attribute("add", add); + return true; +} +//! [op:visit_attributes] diff --git a/docs/template_extension/op.hpp b/docs/template_extension/op.hpp new file mode 100644 index 00000000000000..f56b36e4db6a05 --- /dev/null +++ b/docs/template_extension/op.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +//! [op:header] +namespace TemplateExtension { + +class Operation : public ngraph::op::Op { +public: + static constexpr ngraph::NodeTypeInfo type_info{"Template", 0}; + const ngraph::NodeTypeInfo& get_type_info() const override { return type_info; } + + Operation() = default; + Operation(const ngraph::Output& arg, int64_t add); + void validate_and_infer_types() override; + std::shared_ptr copy_with_new_args(const ngraph::NodeVector& new_args) const override; + bool visit_attributes(ngraph::AttributeVisitor& visitor) override; + int64_t getAddAttr() { return add; } + +private: + int64_t add; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/docs/template_plugin/CMakeLists.txt b/docs/template_plugin/CMakeLists.txt new file mode 100644 index 00000000000000..b8b69c16b9f8ef --- /dev/null +++ b/docs/template_plugin/CMakeLists.txt @@ -0,0 +1,31 @@ +# Copyright (C) 2018 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# [cmake:main] +if (APPLE) + # due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html + cmake_minimum_required(VERSION 3.9 FATAL_ERROR) +else() + cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR) +endif() + +project(InferenceEngineTemplatePlugin) + +set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DIR}) + +find_package(InferenceEngineDeveloperPackage REQUIRED) + +add_subdirectory(src) + +if(ENABLE_TESTS) + include(CTest) + enable_testing() + +endif() +# [cmake:main] + +# install + +# ATTENTION: uncomment to install component +# ie_cpack(template) diff --git a/docs/template_plugin/README.md b/docs/template_plugin/README.md new file mode 100644 index 00000000000000..49d8cf0ec2ed30 --- /dev/null +++ b/docs/template_plugin/README.md @@ -0,0 +1,18 @@ +# template-plugin + +Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API. + +## How to build + +```bash +$ cd $DLDT_HOME +$ mkdir $DLDT_HOME/build +$ cd $DLDT_HOME/build +$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON .. +$ make -j8 +$ cd $TEMPLATE_PLUGIN_HOME +$ mkdir $TEMPLATE_PLUGIN_HOME/build +$ cd $TEMPLATE_PLUGIN_HOME/build +$ cmake -DInferenceEngineDeveloperPackage_DIR=$DLDT_HOME/build .. +$ make -j8 +``` diff --git a/docs/template_plugin/include/template/template_config.hpp b/docs/template_plugin/include/template/template_config.hpp new file mode 100644 index 00000000000000..78760214c7f188 --- /dev/null +++ b/docs/template_plugin/include/template/template_config.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief A header that defines advanced related properties for DLIA plugins. + * These properties should be used in SetConfig() and LoadNetwork() methods of plugins + * + * @file dlia_config.hpp + */ + +#pragma once + +#include +#include "ie_plugin_config.hpp" + +namespace InferenceEngine { + +namespace TemplateMetrics { + +/** + * @def TEMPLATE_METRIC_VALUE(name) + * @brief Shortcut for defining Template metric values + */ +#define TEMPLATE_METRIC_VALUE(name) InferenceEngine::TemplateMetrics::name +#define DECLARE_TEMPLATE_METRIC_VALUE(name) static constexpr auto name = #name + +// ! [public_header:metrics] +/** + * @brief Defines whether current Template device instance supports hardware blocks for fast convolution computations. + */ +DECLARE_TEMPLATE_METRIC_VALUE(HARDWARE_CONVOLUTION); +// ! [public_header:metrics] + +} // namespace TemplateMetrics + +namespace TemplateConfigParams { + +/** + * @def TEMPLATE_CONFIG_KEY(name) + * @brief Shortcut for defining Template device configuration keys + */ +#define TEMPLATE_CONFIG_KEY(name) InferenceEngine::TemplateConfigParams::_CONFIG_KEY(TEMPLATE_##name) + +#define DECLARE_TEMPLATE_CONFIG_KEY(name) DECLARE_CONFIG_KEY(TEMPLATE_##name) +#define DECLARE_TEMPLATE_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(TEMPLATE_##name) + +/** + * @brief The key to define the type of transformations for TEMPLATE inputs and outputs. + * TEMPLATE use custom data layout for input and output blobs. IE TEMPLATE Plugin provides custom + * optimized version of transformation functions that do not use OpenMP and much more faster + * than native TEMPLATE functions. Values: "NO" - optimized plugin transformations + * are used, "YES" - native TEMPLATE transformations are used. + */ +DECLARE_TEMPLATE_CONFIG_KEY(ANY_CONFIG_KEY); + + +} // namespace TemplateConfigParams +} // namespace InferenceEngine diff --git a/docs/template_plugin/src/CMakeLists.txt b/docs/template_plugin/src/CMakeLists.txt new file mode 100644 index 00000000000000..2b78488285947c --- /dev/null +++ b/docs/template_plugin/src/CMakeLists.txt @@ -0,0 +1,43 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# [cmake:plugin] +set(TARGET_NAME "templatePlugin") + +if(ENABLE_LTO) + ie_enable_lto() +endif() + +file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) + +# adds a shared library with plugin +ie_add_plugin(NAME ${TARGET_NAME} + DEVICE_NAME "TEMPLATE" + SOURCES ${SOURCES} ${HEADERS} + SKIP_INSTALL # ATTENTION: uncomment to install component + VERSION_DEFINES_FOR template_plugin.cpp) + +target_include_directories(${TARGET_NAME} PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}" + "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include") + +target_link_libraries(${TARGET_NAME} PRIVATE IE::inference_engine IE::inference_engine_transformations ${NGRAPH_LIBRARIES} ${INTEL_ITT_LIBS}) + +# ATTENTION: uncomment to register a plugin in the plugins.xml file +# ie_register_plugins(MAIN_TARGET ${TARGET_NAME} + # POSSIBLE_PLUGINS ${TARGET_NAME}) +# [cmake:plugin] + +# ATTENTION: uncomment to install component +# install + +# set(component_name template) +# ie_cpack_add_component(${component_name} REQUIRED) + +# install(TARGETS ${TARGET_NAME} +# RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} +# ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} +# LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} +# COMPONENT ${component_name}) diff --git a/docs/template_plugin/src/template_async_infer_request.cpp b/docs/template_plugin/src/template_async_infer_request.cpp new file mode 100644 index 00000000000000..0b9516bb461d97 --- /dev/null +++ b/docs/template_plugin/src/template_async_infer_request.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "template_async_infer_request.hpp" +#include "template_executable_network.hpp" + +using namespace TemplatePlugin; + +// ! [async_infer_request:ctor] +TemplateAsyncInferRequest::TemplateAsyncInferRequest( + const TemplateInferRequest::Ptr& inferRequest, + const InferenceEngine::ITaskExecutor::Ptr& cpuTaskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, + const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) : + AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor), + _inferRequest(inferRequest), _waitExecutor(waitExecutor) { + _pipeline = { + {cpuTaskExecutor, [this] { + IE_PROFILING_AUTO_SCOPE(PreprocessingAndStartPipeline) + _inferRequest->inferPreprocess(); + _inferRequest->startPipeline(); + }}, + {_waitExecutor, [this] { + IE_PROFILING_AUTO_SCOPE(WaitPipeline) + _inferRequest->waitPipeline(); + }}, + {cpuTaskExecutor, [this] { + IE_PROFILING_AUTO_SCOPE(Postprocessing) + _inferRequest->inferPostprocess(); + }} + }; +} +// ! [async_infer_request:ctor] + +// ! [async_infer_request:dtor] +TemplateAsyncInferRequest::~TemplateAsyncInferRequest() { + InferenceEngine::AsyncInferRequestThreadSafeDefault::StopAndWait(); +} +// ! [async_infer_request:dtor] diff --git a/docs/template_plugin/src/template_async_infer_request.hpp b/docs/template_plugin/src/template_async_infer_request.hpp new file mode 100644 index 00000000000000..8e9dd807212d8f --- /dev/null +++ b/docs/template_plugin/src/template_async_infer_request.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#pragma once + +#include + +#include "template_infer_request.hpp" + +namespace TemplatePlugin { + +// ! [async_infer_request:header] +class TemplateAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault { +public: + TemplateAsyncInferRequest(const TemplateInferRequest::Ptr& inferRequest, + const InferenceEngine::ITaskExecutor::Ptr& taskExecutor, + const InferenceEngine::ITaskExecutor::Ptr& waitExecutor, + const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor); + + ~TemplateAsyncInferRequest() override; + +private: + TemplateInferRequest::Ptr _inferRequest; + InferenceEngine::ITaskExecutor::Ptr _waitExecutor; +}; +// ! [async_infer_request:header] + +} // namespace TemplatePlugin diff --git a/docs/template_plugin/src/template_config.cpp b/docs/template_plugin/src/template_config.cpp new file mode 100644 index 00000000000000..e60d0ac46fcdd4 --- /dev/null +++ b/docs/template_plugin/src/template_config.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#include +#include +#include + +#include +#include +#include +#include + +#include "template_config.hpp" + +using namespace TemplatePlugin; + +Configuration::Configuration() { } + +Configuration::Configuration(const ConfigMap& config, const Configuration & defaultCfg, bool throwOnUnsupported) { + *this = defaultCfg; + for (auto&& c : config) { + const auto& key = c.first; + const auto& value = c.second; + + if (CONFIG_KEY(DEVICE_ID) == key) { + deviceId = std::stoi(value); + } else if (CONFIG_KEY(PERF_COUNT) == key) { + perfCount = (CONFIG_VALUE(YES) == value); + } else if (throwOnUnsupported) { + THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << key; + } + } +} + +InferenceEngine::Parameter Configuration::Get(const std::string& name) const { + if (name == CONFIG_KEY(DEVICE_ID)) { + return {std::to_string(deviceId)}; + } else if (name == CONFIG_KEY(PERF_COUNT)) { + return {perfCount}; + } else { + THROW_IE_EXCEPTION << NOT_FOUND_str << ": " << name; + } +} diff --git a/docs/template_plugin/src/template_config.hpp b/docs/template_plugin/src/template_config.hpp new file mode 100644 index 00000000000000..6f6940aea7cc53 --- /dev/null +++ b/docs/template_plugin/src/template_config.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include + +namespace TemplatePlugin { + +template +using IOMap = std::unordered_map; + +// ! [configuration:header] +using ConfigMap = std::map; + +struct Configuration { + Configuration(); + Configuration(const Configuration&) = default; + Configuration(Configuration&&) = default; + Configuration& operator=(const Configuration&) = default; + Configuration& operator=(Configuration&&) = default; + + explicit Configuration(const ConfigMap& config, const Configuration & defaultCfg = {}, const bool throwOnUnsupported = true); + + InferenceEngine::Parameter Get(const std::string& name) const; + + // Plugin configuration parameters + + int deviceId = 0; + bool perfCount = true; +}; +// ! [configuration:header] + +} // namespace TemplatePlugin diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp new file mode 100644 index 00000000000000..8efc04f56bfbb2 --- /dev/null +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -0,0 +1,167 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include
+ +#include +#include +#include + +#include + +#include "template_plugin.hpp" +#include "template_executable_network.hpp" + +using namespace TemplatePlugin; + +// ! [executable_network:ctor_cnnnetwork] +TemplatePlugin::ExecutableNetwork::ExecutableNetwork(InferenceEngine::ICNNNetwork& network, + const Configuration& cfg): + _name(network.getName()), + _cfg(cfg), + _waitExecutor(InferenceEngine::ExecutorManager::getInstance()->getExecutor("Template")) { + // TODO: if your plugin supports device ID (more that single instance of device can be on host machine) + // you should select proper device based on KEY_DEVICE_ID or automatic behavior + // In this case, _waitExecutor should also be created per device. + + try { + if (std::shared_ptr ngraphFunction = network.getFunction()) { + CompileGraph(ngraphFunction); + } else { + THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks"; + } + } + catch (const InferenceEngineException & e) { + throw e; + } + catch (const std::exception & e) { + THROW_IE_EXCEPTION << "Standard exception from compilation library: " << e.what(); + } + catch (...) { + THROW_IE_EXCEPTION << "Generic exception is thrown"; + } +} +// ! [executable_network:ctor_cnnnetwork] + +// ! [executable_network:ctor_import_stream] +TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream & model, + const Configuration& cfg) : + _cfg(cfg) { + // TODO: since Import network is not a mandatory functionality, this ctor can just be removed +} +// ! [executable_network:ctor_import_stream] + +// ! [executable_network:compile_graph] +void TemplatePlugin::ExecutableNetwork::CompileGraph(const std::shared_ptr & ngraphFunction) { + // TODO: perform actual graph compilation taking `_cfg` into account + + // 1.Copy ngraph::Function first to apply some transformations later in + // ExecutableNetwork::CompileGraph, which modify original ngraph::Function + const bool shareConsts = false, constFolding = false; + std::vector<::ngraph::element::Type> new_types; + std::vector<::ngraph::PartialShape> new_shapes; + + for (const auto ¶meter : ngraphFunction->get_parameters()) { + new_shapes.emplace_back(parameter->get_partial_shape()); + new_types.emplace_back(parameter->get_element_type()); + } + + auto copyFunction = ngraph::specialize_function(std::const_pointer_cast(ngraphFunction), + new_types, new_shapes, std::vector(new_types.size(), nullptr), constFolding, shareConsts); + + // 2. Perform common and device-specific transformations + ngraph::pass::Manager passManager; + // Example: register standard ngraph transformation from ngraph::ngraph + passManager.register_pass(); + // Example: register inference engine optimization transformation for IE::inference_engine_transformations + passManager.register_pass(); + // Register any other transformations + // .. + + // After `run_passes`, we have the transformed function, where operations match device operations, + // and we can create device hardware-dependent graph + passManager.run_passes(copyFunction); + + // 3. Iterate over operations and create hardware-specific ngraph + for (const auto& op : copyFunction->get_ordered_ops()) { + // TODO: map ngraph `op` to device operation + } + + // 4. Perform any other steps like allocation and filling device buffers, and so on +} +// ! [executable_network:compile_graph] + +// ! [executable_network:create_infer_request_impl] +InferenceEngine::InferRequestInternal::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, + InferenceEngine::OutputsDataMap networkOutputs) { + return std::make_shared(networkInputs, networkOutputs, std::static_pointer_cast(shared_from_this())); +} +// ! [executable_network:create_infer_request_impl] + +// ! [executable_network:create_infer_request] +void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) { + auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs); + auto asyncThreadSafeImpl = std::make_shared(std::static_pointer_cast(internalRequest), + _taskExecutor, _waitExecutor, _callbackExecutor); + asyncRequest.reset(new InferenceEngine::InferRequestBase(asyncThreadSafeImpl), + [](InferenceEngine::IInferRequest *p) { p->Release(); }); + asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest); +} +// ! [executable_network:create_infer_request] + +// ! [executable_network:get_config] +void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const { + // TODO: return more supported values for config keys + if (name == CONFIG_KEY(DEVICE_ID) || + name == CONFIG_KEY(PERF_COUNT)) { + result = _cfg.Get(name); + } else { + THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name; + } +} +// ! [executable_network:get_config] + +// ! [executable_network:get_metric] +void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const { + // TODO: return more supported values for metrics + if (METRIC_KEY(SUPPORTED_METRICS) == name) { + result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector{ + METRIC_KEY(NETWORK_NAME), + METRIC_KEY(SUPPORTED_METRICS), + METRIC_KEY(SUPPORTED_CONFIG_KEYS), + METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)}); + } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) { + result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector{ + CONFIG_KEY(DEVICE_ID), + CONFIG_KEY(PERF_COUNT)}); + } else if (METRIC_KEY(NETWORK_NAME) == name) { + result = IE_SET_METRIC(NETWORK_NAME, _name); + } else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) { + // TODO: fill with actual number + unsigned int value = 1; + result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value); + } else { + THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name; + } +} +// ! [executable_network:get_metric] + +// ! [executable_network:export_impl] +void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) { + // TODO: Code which exports graph from std::ostream +} +// ! [executable_network:export_impl] diff --git a/docs/template_plugin/src/template_executable_network.hpp b/docs/template_plugin/src/template_executable_network.hpp new file mode 100644 index 00000000000000..58f15cd494593b --- /dev/null +++ b/docs/template_plugin/src/template_executable_network.hpp @@ -0,0 +1,68 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "template_config.hpp" +#include "template_infer_request.hpp" +#include "template_async_infer_request.hpp" + +namespace TemplatePlugin { + +class Engine; + +/** + * @class ExecutableNetwork + * @brief Interface of executable network + */ +// ! [executable_network:header] +class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault { +public: + ExecutableNetwork(InferenceEngine::ICNNNetwork& network, + const Configuration& cfg); + + ExecutableNetwork(std::istream & model, + const Configuration& cfg); + + ~ExecutableNetwork() override = default; + + // Methods from a base class ExecutableNetworkThreadSafeDefault + + void ExportImpl(std::ostream& model) override; + InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, + InferenceEngine::OutputsDataMap networkOutputs) override; + void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override; + void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override; + void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override; + + std::atomic _requestId = {0}; + std::string _name; + Configuration _cfg; + +private: + void CompileGraph(const std::shared_ptr & ngraphFunction); + + std::shared_ptr _plugin; + InferenceEngine::ITaskExecutor::Ptr _waitExecutor; +}; +// ! [executable_network:header] + +} // namespace TemplatePlugin diff --git a/docs/template_plugin/src/template_infer_request.cpp b/docs/template_plugin/src/template_infer_request.cpp new file mode 100644 index 00000000000000..e6d0f8eda6eafe --- /dev/null +++ b/docs/template_plugin/src/template_infer_request.cpp @@ -0,0 +1,224 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include