Unverified Commit ab46cb1d authored by Ewa Tusień's avatar Ewa Tusień Committed by GitHub

Removed onnxifi folder. (#4241)

Co-authored-by: 's avatarMichał Karzyński <postrational@users.noreply.github.com>
parent 9d06ee03
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
add_library(onnxifi-ngraph SHARED
onnxifi.cpp
backend.hpp
backend_manager.hpp
backend_manager.cpp
exceptions.hpp
span.hpp
tensor.hpp
tensor.cpp)
target_link_libraries(onnxifi-ngraph PRIVATE ngraph)
add_dependencies(onnxifi-ngraph onnx::libonnx)
target_include_directories(onnxifi-ngraph SYSTEM PRIVATE ${ONNX_INCLUDE_DIR} ${ONNX_IMPORT_INCLUDE_DIR})
set(ONNXIFI_VERSION ${NGRAPH_VERSION})
set(ONNXIFI_ABI_VERSION 1)
if(NGRAPH_LIB_VERSIONING_ENABLE)
set_target_properties(onnxifi-ngraph PROPERTIES
VERSION ${ONNXIFI_VERSION}
SOVERSION ${ONNXIFI_ABI_VERSION})
endif()
if(WIN32)
if(MSVC)
target_compile_definitions(onnxifi-ngraph PRIVATE "ONNXIFI_PUBLIC=__declspec(dllexport)")
else()
target_compile_definitions(onnxifi-ngraph PRIVATE "ONNXIFI_PUBLIC=__attribute__((__dllexport__))")
endif()
endif()
install(TARGETS onnxifi-ngraph DESTINATION ${NGRAPH_INSTALL_LIB})
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory> // std::shared_ptr
#include <string> // std::string
#include <utility> // std::move
#include <vector> // std::vector
#include "ngraph/function.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace onnxifi
{
/// \brief ONNXIFI extensions to nGraph backend
class Backend
{
public:
Backend(const Backend&) = delete;
Backend& operator=(const Backend&) = delete;
Backend(Backend&&) = default;
Backend& operator=(Backend&&) = default;
Backend() = delete;
explicit Backend(const std::string& type)
: m_type{type}
{
}
const std::string& get_type() const { return m_type; }
std::shared_ptr<runtime::Executable>
compile(const std::shared_ptr<Function>& function) const
{
return get().compile(function);
}
private:
std::string m_type{};
mutable std::shared_ptr<runtime::Backend> m_backend{nullptr};
runtime::Backend& get() const
{
if (m_backend == nullptr)
{
m_backend = runtime::Backend::create(m_type);
}
return *m_backend;
}
};
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstdlib> // std::size_t, std::uintptr_t
#include <onnx/onnxifi.h>
#include <stdexcept> // std::invalid_agrument, std::out_of_rage
#include "backend.hpp"
#include "backend_manager.hpp"
#include "exceptions.hpp"
#include "ngraph/runtime/backend_manager.hpp"
namespace ngraph
{
namespace onnxifi
{
BackendManager::BackendManager()
{
// Create ONNXIFI backend for each registered nGraph backend.
// Use pointer to temporary to capture the unique handle. The handles
// must be consistent within a session.
// In spec, backends are hot-pluggable. This means two calls to
// onnxGetBackendIDs() may result in different number of backends.
// For now, we don't do the re-discovery.
auto registered_backends = runtime::BackendManager::get_registered_backends();
for (const auto& type : registered_backends)
{
m_registered_backends.emplace(reinterpret_cast<std::uintptr_t>(&type),
Backend{type});
}
}
void BackendManager::get_registered_ids(::onnxBackendID* backend_ids,
std::size_t* count) const
{
if (count == nullptr)
{
throw status::null_pointer{};
}
std::size_t requested{*count};
*count = m_registered_backends.size();
if ((requested < *count) || (backend_ids == nullptr))
{
throw status::fallback{};
}
{
std::lock_guard<decltype(m_mutex)> lock{m_mutex};
std::transform(std::begin(m_registered_backends),
std::end(m_registered_backends),
backend_ids,
[](const std::map<std::uintptr_t, Backend>::value_type& pair)
-> ::onnxBackendID {
return reinterpret_cast<::onnxBackendID>(pair.first);
});
}
}
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstddef> // std::size_t, std::uintptr_t
#include <map> // std::map
#include <mutex> // std::mutex
#include <onnx/onnxifi.h>
#include "backend.hpp"
#include "ngraph/runtime/backend.hpp"
namespace ngraph
{
namespace onnxifi
{
/// \brief ONNXIFI backend manager
class BackendManager
{
public:
BackendManager(const BackendManager&) = delete;
BackendManager& operator=(const BackendManager&) = delete;
BackendManager(BackendManager&&) = delete;
BackendManager& operator=(BackendManager&&) = delete;
static void get_backend_ids(::onnxBackendID* backend_ids, std::size_t* count)
{
instance().get_registered_ids(backend_ids, count);
}
static void unregister(::onnxBackendID backend_id)
{
instance().unregister_backend(backend_id);
}
static const Backend& get(::onnxBackendID backend_id)
{
return instance().get_backend(backend_id);
}
private:
mutable std::mutex m_mutex{};
std::map<std::uintptr_t, Backend> m_registered_backends{};
BackendManager();
static BackendManager& instance()
{
static BackendManager backend_manager;
return backend_manager;
}
void unregister_backend(std::uintptr_t id)
{
std::lock_guard<decltype(m_mutex)> lock{m_mutex};
m_registered_backends.erase(id);
}
void unregister_backend(::onnxBackendID id)
{
return unregister_backend(reinterpret_cast<std::uintptr_t>(id));
}
void get_registered_ids(::onnxBackendID* backend_ids, std::size_t* count) const;
const Backend& get_backend(std::uintptr_t id) const
{
std::lock_guard<decltype(m_mutex)> lock{m_mutex};
return m_registered_backends.at(id);
}
const Backend& get_backend(::onnxBackendID id) const
{
return get_backend(reinterpret_cast<std::uintptr_t>(id));
}
};
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <onnx/onnxifi.h>
namespace ngraph
{
namespace onnxifi
{
namespace status
{
struct runtime
{
explicit constexpr runtime(::onnxStatus status)
: m_status{status}
{
}
constexpr ::onnxStatus get_status() const { return m_status; }
private:
::onnxStatus m_status;
};
struct internal : runtime
{
constexpr internal()
: runtime{ONNXIFI_STATUS_INTERNAL_ERROR}
{
}
};
struct fallback : runtime
{
constexpr fallback()
: runtime{ONNXIFI_STATUS_FALLBACK}
{
}
};
struct invalid_id : runtime
{
constexpr invalid_id()
: runtime{ONNXIFI_STATUS_INVALID_ID}
{
}
};
struct invalid_size : runtime
{
constexpr invalid_size()
: runtime{ONNXIFI_STATUS_INVALID_SIZE}
{
}
};
struct null_pointer : runtime
{
constexpr null_pointer()
: runtime{ONNXIFI_STATUS_INVALID_POINTER}
{
}
};
struct invalid_protobuf : runtime
{
constexpr invalid_protobuf()
: runtime{ONNXIFI_STATUS_INVALID_PROTOBUF}
{
}
};
struct invalid_model : runtime
{
constexpr invalid_model()
: runtime{ONNXIFI_STATUS_INVALID_MODEL}
{
}
};
struct invalid_backend : runtime
{
constexpr invalid_backend()
: runtime{ONNXIFI_STATUS_INVALID_BACKEND}
{
}
};
struct invalid_graph : runtime
{
constexpr invalid_graph()
: runtime{ONNXIFI_STATUS_INVALID_GRAPH}
{
}
};
struct invalid_event : runtime
{
constexpr invalid_event()
: runtime{ONNXIFI_STATUS_INVALID_EVENT}
{
}
};
struct invalid_state : runtime
{
constexpr invalid_state()
: runtime{ONNXIFI_STATUS_INVALID_STATE}
{
}
};
struct invalid_name : runtime
{
constexpr invalid_name()
: runtime{ONNXIFI_STATUS_INVALID_NAME}
{
}
};
struct invalid_shape : runtime
{
constexpr invalid_shape()
: runtime{ONNXIFI_STATUS_INVALID_SHAPE}
{
}
};
struct invalid_datatype : runtime
{
constexpr invalid_datatype()
: runtime{ONNXIFI_STATUS_INVALID_DATATYPE}
{
}
};
struct invalid_memory_type : runtime
{
constexpr invalid_memory_type()
: runtime{ONNXIFI_STATUS_INVALID_MEMORY_TYPE}
{
}
};
struct invalid_memory_location : runtime
{
constexpr invalid_memory_location()
: runtime{ONNXIFI_STATUS_INVALID_MEMORY_LOCATION}
{
}
};
struct invalid_fence_type : runtime
{
constexpr invalid_fence_type()
: runtime{ONNXIFI_STATUS_INVALID_FENCE_TYPE}
{
}
};
struct invalid_property : runtime
{
constexpr invalid_property()
: runtime{ONNXIFI_STATUS_INVALID_PROPERTY}
{
}
};
struct unsupported_tag : runtime
{
constexpr unsupported_tag()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_TAG}
{
}
};
struct unsupported_version : runtime
{
constexpr unsupported_version()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_VERSION}
{
}
};
struct unsupported_operator : runtime
{
constexpr unsupported_operator()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_OPERATOR}
{
}
};
struct unsupported_attribute : runtime
{
constexpr unsupported_attribute()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_ATTRIBUTE}
{
}
};
struct unsupported_shape : runtime
{
constexpr unsupported_shape()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_SHAPE}
{
}
};
struct unsupported_datatype : runtime
{
constexpr unsupported_datatype()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_DATATYPE}
{
}
};
struct unsupported_memory_type : runtime
{
constexpr unsupported_memory_type()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_MEMORY_TYPE}
{
}
};
struct unsupported_fence_type : runtime
{
constexpr unsupported_fence_type()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_FENCE_TYPE}
{
}
};
struct unsupported_property : runtime
{
constexpr unsupported_property()
: runtime{ONNXIFI_STATUS_UNSUPPORTED_PROPERTY}
{
}
};
struct unidentified_name : runtime
{
constexpr unidentified_name()
: runtime{ONNXIFI_STATUS_UNIDENTIFIED_NAME}
{
}
};
struct mismatching_shape : runtime
{
constexpr mismatching_shape()
: runtime{ONNXIFI_STATUS_MISMATCHING_SHAPE}
{
}
};
struct mismatching_datatype : runtime
{
constexpr mismatching_datatype()
: runtime{ONNXIFI_STATUS_MISMATCHING_DATATYPE}
{
}
};
struct no_system_memory : runtime
{
constexpr no_system_memory()
: runtime{ONNXIFI_STATUS_NO_SYSTEM_MEMORY}
{
}
};
struct no_device_memory : runtime
{
constexpr no_device_memory()
: runtime{ONNXIFI_STATUS_NO_DEVICE_MEMORY}
{
}
};
struct no_system_resources : runtime
{
constexpr no_system_resources()
: runtime{ONNXIFI_STATUS_NO_SYSTEM_RESOURCES}
{
}
};
struct no_device_resources : runtime
{
constexpr no_device_resources()
: runtime{ONNXIFI_STATUS_NO_DEVICE_RESOURCES}
{
}
};
struct backend_unavailable : runtime
{
constexpr backend_unavailable()
: runtime{ONNXIFI_STATUS_BACKEND_UNAVAILABLE}
{
}
};
} // namespace status
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory> // std::shared_ptr
#include <string> // std::string
#include <utility> // std::move
#include <vector> // std::vector
#include "ngraph/function.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/executable.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace onnxifi
{
/// \brief ONNXIFI extensions to nGraph Executable
class Executable
{
public:
Executable(const Executable&) = delete;
Executable& operator=(const Executable&) = delete;
Executable(Executable&&) = default;
Executable& operator=(Executable&&) = default;
explicit Executable(const std::shared_ptr<runtime::Executable>& executable)
: m_executable{executable}
{
}
bool call(const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) const
{
return m_executable->call(outputs, inputs);
}
bool call_with_validate(
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
const std::vector<std::shared_ptr<runtime::Tensor>>& inputs) const
{
return m_executable->call_with_validate(outputs, inputs);
}
private:
mutable std::shared_ptr<runtime::Executable> m_executable{nullptr};
};
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstddef>
#include <cstdint>
#include <onnx/onnxifi.h>
#include <stdexcept>
#include "backend_manager.hpp"
#include "exceptions.hpp"
using namespace ngraph::onnxifi;
extern "C" {
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxGetBackendIDs(onnxBackendID* backendIDs, std::size_t* numBackends)
{
try
{
BackendManager::get_backend_ids(backendIDs, numBackends);
return ONNXIFI_STATUS_SUCCESS;
}
catch (const status::runtime& e)
{
return e.get_status();
}
catch (const std::bad_alloc&)
{
return ONNXIFI_STATUS_NO_SYSTEM_MEMORY;
}
catch (...)
{
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseBackendID(onnxBackendID /* backendID */)
{
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxGetBackendInfo(onnxBackendID /* backendID */,
onnxBackendInfo /* infoType */,
void* /* infoValue */,
std::size_t* /* infoValueSize */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxGetBackendCompatibility(
onnxBackendID /* backendID */, std::size_t /* onnxModelSize */, const void* /* onnxModel */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxInitBackend(onnxBackendID /* backendID */,
const uint64_t* /* auxPropertiesList */,
onnxBackend* /* backend */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC
ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseBackend(onnxBackend /* backend */)
{
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxInitEvent(onnxBackend /* backend */,
onnxEvent* /* event */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxSignalEvent(onnxEvent /* event */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxWaitEvent(onnxEvent /* event */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseEvent(onnxEvent /* event */)
{
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxInitGraph(onnxBackend /* backend */,
const uint64_t* /* auxPropertiesList */,
std::size_t /* onnxModelSize */,
const void* /* onnxModel */,
uint32_t /* weightsCount */,
const onnxTensorDescriptorV1* /* weightDescriptors */,
onnxGraph* /* graph */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxSetGraphIO(onnxGraph /* graph */,
std::uint32_t /* inputsCount */,
const onnxTensorDescriptorV1* /* inputDescriptors */,
std::uint32_t /* outputsCount */,
const onnxTensorDescriptorV1* /* outputDescriptors */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI
onnxRunGraph(onnxGraph /* graph */,
const onnxMemoryFenceV1* /* inputFence */,
onnxMemoryFenceV1* /* outputFence */)
{
return ONNXIFI_STATUS_BACKEND_UNAVAILABLE;
}
ONNXIFI_PUBLIC ONNXIFI_CHECK_RESULT onnxStatus ONNXIFI_ABI onnxReleaseGraph(onnxGraph /* graph */)
{
return ONNXIFI_STATUS_INTERNAL_ERROR;
}
} // extern "C"
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstddef>
#include <iterator>
#include <stdexcept>
namespace ngraph
{
namespace onnxifi
{
/// The class template describing an object that can refer to a contiquous
/// sequence of objects with the first element of the sequence at position zero.
/// This is implemention of dynamic extent only.
/// Refer to https://en.cppreference.com/w/cpp/container/span for complete
/// description of the class template.
/// \tparam T element type; must be complete type not an abstract class type.
template <typename T>
class Span
{
public:
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using value_type = T;
using reference = value_type&;
using pointer = value_type*;
using const_reference = const value_type&;
using const_pointer = const value_type*;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = const reverse_iterator;
Span(const Span&) = default;
Span& operator=(const Span&) = default;
Span() = default;
Span(Span&&) noexcept = default;
Span& operator=(Span&&) noexcept = default;
template <typename K>
Span(const K* buffer, size_type count)
: m_begin{reinterpret_cast<pointer>(const_cast<K*>(buffer))}
, m_end{reinterpret_cast<pointer>(const_cast<K*>(buffer)) + count}
, m_count{count}
{
}
iterator begin() { return m_begin; }
iterator end() { return m_end; }
const_iterator begin() const { return m_begin; }
const_iterator end() const { return m_end; }
const_iterator cbegin() const { return m_begin; }
const_iterator cend() const { return m_end; }
reverse_iterator rbegin() { return reverse_iterator{m_end}; }
const_reverse_iterator crbegin() const { return const_reverse_iterator{m_end}; }
reverse_iterator rend() { return reverse_iterator{m_begin}; }
const_reverse_iterator crend() const { return const_reverse_iterator{m_begin}; }
const_reference at(std::size_t index) const
{
auto it = std::next(m_begin, index);
if (it >= m_end)
{
throw std::out_of_range{"span"};
}
return *it;
}
reference at(std::size_t index)
{
auto it = std::next(m_begin, index);
if (it >= m_end)
{
throw std::out_of_range{"span"};
}
return *it;
}
reference front() { return *m_begin; }
const_reference front() const { return *m_begin; }
reference back() { return *std::prev(m_end); }
const_reference back() const { return *std::prev(m_end); }
const_pointer data() const { return m_begin; }
reference operator[](std::size_t index) { return at(index); }
const_reference operator[](std::size_t index) const { return at(index); }
size_type size() const { return m_count; }
bool is_valid() const { return (m_begin != nullptr) && (m_count > 0); }
bool empty() const { return (m_count == 0); }
private:
iterator m_begin{nullptr}, m_end{nullptr};
size_type m_count{0};
};
}
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "tensor.hpp"
#include "exceptions.hpp"
#include "span.hpp"
namespace ngraph
{
namespace onnxifi
{
Tensor::Tensor(const ::onnxTensorDescriptorV1& tensor)
: m_tensor{&tensor}
{
if (tensor.tag != ONNXIFI_TAG_TENSOR_DESCRIPTOR_V1)
{
throw status::unsupported_tag{};
}
if (tensor.name == nullptr)
{
throw status::invalid_name{};
}
switch (tensor.dataType)
{
case ONNXIFI_DATATYPE_FLOAT16:
case ONNXIFI_DATATYPE_FLOAT32:
case ONNXIFI_DATATYPE_FLOAT64:
case ONNXIFI_DATATYPE_INT8:
case ONNXIFI_DATATYPE_INT16:
case ONNXIFI_DATATYPE_INT32:
case ONNXIFI_DATATYPE_INT64:
case ONNXIFI_DATATYPE_UINT8:
case ONNXIFI_DATATYPE_UINT16:
case ONNXIFI_DATATYPE_UINT32:
case ONNXIFI_DATATYPE_UINT64: break;
case ONNXIFI_DATATYPE_COMPLEX64:
case ONNXIFI_DATATYPE_COMPLEX128: throw status::invalid_datatype{};
default: throw status::unsupported_datatype{};
}
switch (tensor.memoryType)
{
case ONNXIFI_MEMORY_TYPE_CPU: break;
case ONNXIFI_MEMORY_TYPE_CUDA_BUFFER:
case ONNXIFI_MEMORY_TYPE_OPENCL_BUFFER:
case ONNXIFI_MEMORY_TYPE_OPENGLES_TEXTURE_2D:
case ONNXIFI_MEMORY_TYPE_D3D_RESOURCE: throw status::invalid_memory_type{};
default: throw status::unsupported_memory_type{};
}
if ((tensor.dimensions != 0) && (tensor.shape == nullptr))
{
throw status::null_pointer{};
}
if ((tensor.shape != nullptr) && (tensor.dimensions == 0))
{
throw status::invalid_size{};
}
if (tensor.shape == nullptr)
{
m_shape = {1};
}
else
{
Span<uint64_t> shape{tensor.shape, tensor.dimensions};
for (const auto& value : shape)
{
if (value == 0)
{
throw status::invalid_shape{};
}
m_shape.push_back(value);
m_size *= value;
}
}
if (tensor.buffer == 0)
{
throw status::invalid_memory_location{};
}
}
std::shared_ptr<runtime::Tensor> Tensor::to_ng(runtime::Backend& backend) const
{
std::shared_ptr<runtime::Tensor> tensor;
switch (m_tensor->dataType)
{
case ONNXIFI_DATATYPE_FLOAT16:
case ONNXIFI_DATATYPE_FLOAT32:
tensor = backend.create_tensor(element::f32, m_shape);
tensor->write(data(), sizeof(float) * size());
break;
case ONNXIFI_DATATYPE_FLOAT64:
tensor = backend.create_tensor(element::f64, m_shape);
tensor->write(data(), sizeof(double) * size());
break;
case ONNXIFI_DATATYPE_INT8:
tensor = backend.create_tensor(element::i8, m_shape);
tensor->write(data(), sizeof(int8_t) * size());
break;
case ONNXIFI_DATATYPE_INT16:
tensor = backend.create_tensor(element::i16, m_shape);
tensor->write(data(), sizeof(int16_t) * size());
break;
case ONNXIFI_DATATYPE_INT32:
tensor = backend.create_tensor(element::i32, m_shape);
tensor->write(data(), sizeof(int32_t) * size());
break;
case ONNXIFI_DATATYPE_INT64:
tensor = backend.create_tensor(element::i64, m_shape);
tensor->write(data(), sizeof(int64_t) * size());
break;
case ONNXIFI_DATATYPE_UINT8:
tensor = backend.create_tensor(element::u8, m_shape);
tensor->write(data(), sizeof(uint8_t) * size());
break;
case ONNXIFI_DATATYPE_UINT16:
tensor = backend.create_tensor(element::u16, m_shape);
tensor->write(data(), sizeof(uint16_t) * size());
break;
case ONNXIFI_DATATYPE_UINT32:
tensor = backend.create_tensor(element::u32, m_shape);
tensor->write(data(), sizeof(uint32_t) * size());
break;
case ONNXIFI_DATATYPE_UINT64:
tensor = backend.create_tensor(element::u64, m_shape);
tensor->write(data(), sizeof(uint64_t) * size());
break;
default: throw status::unsupported_datatype{};
}
return tensor;
}
void Tensor::from_ng(const runtime::Tensor& tensor)
{
std::size_t readSize{tensor.get_element_count()};
switch (m_tensor->dataType)
{
case ONNXIFI_DATATYPE_FLOAT16:
case ONNXIFI_DATATYPE_FLOAT32: readSize *= sizeof(float); break;
case ONNXIFI_DATATYPE_FLOAT64: readSize *= sizeof(double); break;
case ONNXIFI_DATATYPE_INT8: readSize *= sizeof(int8_t); break;
case ONNXIFI_DATATYPE_INT16: readSize *= sizeof(int16_t); break;
case ONNXIFI_DATATYPE_INT32: readSize *= sizeof(int32_t); break;
case ONNXIFI_DATATYPE_INT64: readSize *= sizeof(int64_t); break;
case ONNXIFI_DATATYPE_UINT8: readSize *= sizeof(uint8_t); break;
case ONNXIFI_DATATYPE_UINT16: readSize *= sizeof(uint16_t); break;
case ONNXIFI_DATATYPE_UINT32: readSize *= sizeof(uint32_t); break;
case ONNXIFI_DATATYPE_UINT64: readSize *= sizeof(uint64_t); break;
default: break;
}
tensor.read(reinterpret_cast<void*>(m_tensor->buffer), readSize);
}
} // namespace onnxifi
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <onnx/onnxifi.h>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace onnxifi
{
/// \brief Wrapper for onnxTensorDescriptorV1 class
class Tensor
{
public:
Tensor(const Tensor&) = default;
Tensor& operator=(const Tensor&) = default;
Tensor(Tensor&&) = default;
Tensor& operator=(Tensor&&) = default;
Tensor() = delete;
virtual ~Tensor() = default;
explicit Tensor(const ::onnxTensorDescriptorV1& tensor);
/// \brief Convert to ngraph::runtime::Tensor
/// This function method converts ONNXIFI tensor to nGraph tensor.
/// \param backend the backend to use for nGraph tensor creation.
/// \returns Shared pointer to nGraph tensor.
std::shared_ptr<runtime::Tensor> to_ng(runtime::Backend& backend) const;
/// \brief Copies data from ngraph::runtime::Tensor
/// This function method writes the content of nGraph tensor.
/// \param tensor nGraph tensor to copy from.
void from_ng(const runtime::Tensor& tensor);
const void* data() const { return reinterpret_cast<const void*>(m_tensor->buffer); }
std::size_t size() const { return m_size; }
const Shape& get_shape() const { return m_shape; }
const char* get_name() const { return m_tensor->name; }
protected:
const ::onnxTensorDescriptorV1* m_tensor;
Shape m_shape;
std::size_t m_size{1};
};
} // namespace onnxifi
} // namespace ngraph
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment