Commit 0d26f380 authored by Robert Kimball's avatar Robert Kimball

add range checking for tensor read/write

parent b0de2d3e
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
#include <iostream>
#include <clang/CodeGen/ObjectFilePCHContainerOperations.h> #include <clang/CodeGen/ObjectFilePCHContainerOperations.h>
#include <clang/Driver/DriverDiagnostic.h> #include <clang/Driver/DriverDiagnostic.h>
#include <clang/Driver/Options.h> #include <clang/Driver/Options.h>
...@@ -191,7 +193,10 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con ...@@ -191,7 +193,10 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
// Create and execute action // Create and execute action
CodeGenAction* compilerAction = new EmitCodeGenOnlyAction(); CodeGenAction* compilerAction = new EmitCodeGenOnlyAction();
Clang->ExecuteAction(*compilerAction); if (Clang->ExecuteAction(*compilerAction) == false)
{
throw runtime_error("codegen compile failed");
}
buffer.release(); buffer.release();
......
...@@ -101,7 +101,6 @@ bool pass::Liveness::run_on_call_graph(list<shared_ptr<Node>>& ops) ...@@ -101,7 +101,6 @@ bool pass::Liveness::run_on_call_graph(list<shared_ptr<Node>>& ops)
} }
for (Tensor* tensor : outputs) for (Tensor* tensor : outputs)
{ {
NGRAPH_INFO << "found output";
node->liveness_live_list.insert(tensor); node->liveness_live_list.insert(tensor);
node->liveness_free_list.erase(tensor); node->liveness_free_list.erase(tensor);
......
...@@ -24,7 +24,6 @@ using namespace ngraph::runtime; ...@@ -24,7 +24,6 @@ using namespace ngraph::runtime;
std::shared_ptr<TensorView> std::shared_ptr<TensorView>
Backend::make_primary_tensor_view(const ngraph::element::Type& element_type, const Shape& shape) Backend::make_primary_tensor_view(const ngraph::element::Type& element_type, const Shape& shape)
{ {
NGRAPH_INFO;
return element_type.make_primary_tensor_view(shape); return element_type.make_primary_tensor_view(shape);
} }
......
...@@ -60,7 +60,6 @@ namespace ngraph ...@@ -60,7 +60,6 @@ namespace ngraph
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>> std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_parameterized_tensor_view(const Shape& shape) make_parameterized_tensor_view(const Shape& shape)
{ {
NGRAPH_INFO;
return std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>( return std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>(
make_primary_tensor_view(ET::element_type(), shape)); make_primary_tensor_view(ET::element_type(), shape));
} }
...@@ -69,7 +68,6 @@ namespace ngraph ...@@ -69,7 +68,6 @@ namespace ngraph
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>> std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_parameterized_tensor_view(const NDArrayBase<typename ET::type>& ndarray) make_parameterized_tensor_view(const NDArrayBase<typename ET::type>& ndarray)
{ {
NGRAPH_INFO;
auto result = auto result =
std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>( std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>(
make_primary_tensor_view(ET::element_type(), ndarray.get_shape())); make_primary_tensor_view(ET::element_type(), ndarray.get_shape()));
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -324,8 +324,6 @@ extern "C" void free_aligned_buffer(void* allocated); ...@@ -324,8 +324,6 @@ extern "C" void free_aligned_buffer(void* allocated);
TU << "// Define tensor views\n"; TU << "// Define tensor views\n";
TU << "\n"; TU << "\n";
TU.indent--;
for (shared_ptr<Node> node : m_function->get_ordered_ops()) for (shared_ptr<Node> node : m_function->get_ordered_ops())
{ {
auto& n = *node; // Work around a compiler warning (*node inside typeid may have effects auto& n = *node; // Work around a compiler warning (*node inside typeid may have effects
...@@ -351,6 +349,8 @@ extern "C" void free_aligned_buffer(void* allocated); ...@@ -351,6 +349,8 @@ extern "C" void free_aligned_buffer(void* allocated);
handler->second(&emitter, node.get(), this, function_map, in, out); handler->second(&emitter, node.get(), this, function_map, in, out);
} }
TU.indent--;
// End TU // End TU
TU += "}\n"; TU += "}\n";
......
...@@ -33,15 +33,12 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_ ...@@ -33,15 +33,12 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor)); std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size(); m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
NGRAPH_INFO << m_buffer_size;
allocate_aligned_buffer(m_buffer_size, runtime::cpu::alignment, &m_allocated, &m_buffer); allocate_aligned_buffer(m_buffer_size, runtime::cpu::alignment, &m_allocated, &m_buffer);
} }
runtime::cpu::CPUTensorView::~CPUTensorView() runtime::cpu::CPUTensorView::~CPUTensorView()
{ {
NGRAPH_INFO;
free_aligned_buffer(m_allocated); free_aligned_buffer(m_allocated);
NGRAPH_INFO;
} }
char* runtime::cpu::CPUTensorView::get_data_ptr() char* runtime::cpu::CPUTensorView::get_data_ptr()
...@@ -58,9 +55,6 @@ void runtime::cpu::CPUTensorView::write(const void* source, size_t tensor_offset ...@@ -58,9 +55,6 @@ void runtime::cpu::CPUTensorView::write(const void* source, size_t tensor_offset
{ {
if (tensor_offset + n > m_buffer_size) if (tensor_offset + n > m_buffer_size)
{ {
NGRAPH_INFO << m_buffer_size;
NGRAPH_INFO << n;
NGRAPH_INFO << tensor_offset;
throw out_of_range("write access past end of tensor"); throw out_of_range("write access past end of tensor");
} }
char* target = get_data_ptr(); char* target = get_data_ptr();
...@@ -71,9 +65,6 @@ void runtime::cpu::CPUTensorView::read(void* target, size_t tensor_offset, size_ ...@@ -71,9 +65,6 @@ void runtime::cpu::CPUTensorView::read(void* target, size_t tensor_offset, size_
{ {
if (tensor_offset + n > m_buffer_size) if (tensor_offset + n > m_buffer_size)
{ {
NGRAPH_INFO << m_buffer_size;
NGRAPH_INFO << n;
NGRAPH_INFO << tensor_offset;
throw out_of_range("read access past end of tensor"); throw out_of_range("read access past end of tensor");
} }
const char* source = get_data_ptr(); const char* source = get_data_ptr();
......
...@@ -85,7 +85,6 @@ TEST(${BACKEND_NAME}, abc_int64) ...@@ -85,7 +85,6 @@ TEST(${BACKEND_NAME}, abc_int64)
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
// Create some tensors for input/output // Create some tensors for input/output
NGRAPH_INFO;
auto a = backend->make_primary_tensor_view(element::Int64::element_type(), shape); auto a = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
copy_data(a, vector<element::Int64::type>{1, 2, 3, 4}); copy_data(a, vector<element::Int64::type>{1, 2, 3, 4});
auto b = backend->make_primary_tensor_view(element::Int64::element_type(), shape); auto b = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
...@@ -94,15 +93,14 @@ TEST(${BACKEND_NAME}, abc_int64) ...@@ -94,15 +93,14 @@ TEST(${BACKEND_NAME}, abc_int64)
copy_data(c, vector<element::Int64::type>{9, 10, 11, 12}); copy_data(c, vector<element::Int64::type>{9, 10, 11, 12});
auto result = backend->make_primary_tensor_view(element::Int64::element_type(), shape); auto result = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
// (*cf)({a, b, c}, {result}); (*cf)({a, b, c}, {result});
// EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>()); EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
// (*cf)({b, a, c}, {result}); (*cf)({b, a, c}, {result});
// EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>()); EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
// (*cf)({a, c, b}, {result}); (*cf)({a, c, b}, {result});
// EXPECT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector<int64_t>()); EXPECT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector<int64_t>());
NGRAPH_INFO;
} }
// Same as abc, but using tuples for input and output // Same as abc, but using tuples for input and output
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment