Commit 0d26f380 authored by Robert Kimball's avatar Robert Kimball

add range checking for tensor read/write

parent b0de2d3e
......@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <iostream>
#include <clang/CodeGen/ObjectFilePCHContainerOperations.h>
#include <clang/Driver/DriverDiagnostic.h>
#include <clang/Driver/Options.h>
......@@ -191,7 +193,10 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
// Create and execute action
CodeGenAction* compilerAction = new EmitCodeGenOnlyAction();
Clang->ExecuteAction(*compilerAction);
if (Clang->ExecuteAction(*compilerAction) == false)
{
throw runtime_error("codegen compile failed");
}
buffer.release();
......
......@@ -101,7 +101,6 @@ bool pass::Liveness::run_on_call_graph(list<shared_ptr<Node>>& ops)
}
for (Tensor* tensor : outputs)
{
NGRAPH_INFO << "found output";
node->liveness_live_list.insert(tensor);
node->liveness_free_list.erase(tensor);
......
......@@ -24,7 +24,6 @@ using namespace ngraph::runtime;
std::shared_ptr<TensorView>
Backend::make_primary_tensor_view(const ngraph::element::Type& element_type, const Shape& shape)
{
NGRAPH_INFO;
return element_type.make_primary_tensor_view(shape);
}
......
......@@ -60,7 +60,6 @@ namespace ngraph
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_parameterized_tensor_view(const Shape& shape)
{
NGRAPH_INFO;
return std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>(
make_primary_tensor_view(ET::element_type(), shape));
}
......@@ -69,7 +68,6 @@ namespace ngraph
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>
make_parameterized_tensor_view(const NDArrayBase<typename ET::type>& ndarray)
{
NGRAPH_INFO;
auto result =
std::dynamic_pointer_cast<ngraph::runtime::ParameterizedTensorView<ET>>(
make_primary_tensor_view(ET::element_type(), ndarray.get_shape()));
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -324,8 +324,6 @@ extern "C" void free_aligned_buffer(void* allocated);
TU << "// Define tensor views\n";
TU << "\n";
TU.indent--;
for (shared_ptr<Node> node : m_function->get_ordered_ops())
{
auto& n = *node; // Work around a compiler warning (*node inside typeid may have effects
......@@ -351,6 +349,8 @@ extern "C" void free_aligned_buffer(void* allocated);
handler->second(&emitter, node.get(), this, function_map, in, out);
}
TU.indent--;
// End TU
TU += "}\n";
......
......@@ -33,15 +33,12 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
NGRAPH_INFO << m_buffer_size;
allocate_aligned_buffer(m_buffer_size, runtime::cpu::alignment, &m_allocated, &m_buffer);
}
runtime::cpu::CPUTensorView::~CPUTensorView()
{
NGRAPH_INFO;
free_aligned_buffer(m_allocated);
NGRAPH_INFO;
}
char* runtime::cpu::CPUTensorView::get_data_ptr()
......@@ -58,9 +55,6 @@ void runtime::cpu::CPUTensorView::write(const void* source, size_t tensor_offset
{
if (tensor_offset + n > m_buffer_size)
{
NGRAPH_INFO << m_buffer_size;
NGRAPH_INFO << n;
NGRAPH_INFO << tensor_offset;
throw out_of_range("write access past end of tensor");
}
char* target = get_data_ptr();
......@@ -71,9 +65,6 @@ void runtime::cpu::CPUTensorView::read(void* target, size_t tensor_offset, size_
{
if (tensor_offset + n > m_buffer_size)
{
NGRAPH_INFO << m_buffer_size;
NGRAPH_INFO << n;
NGRAPH_INFO << tensor_offset;
throw out_of_range("read access past end of tensor");
}
const char* source = get_data_ptr();
......
......@@ -85,7 +85,6 @@ TEST(${BACKEND_NAME}, abc_int64)
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
NGRAPH_INFO;
auto a = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
copy_data(a, vector<element::Int64::type>{1, 2, 3, 4});
auto b = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
......@@ -94,15 +93,14 @@ TEST(${BACKEND_NAME}, abc_int64)
copy_data(c, vector<element::Int64::type>{9, 10, 11, 12});
auto result = backend->make_primary_tensor_view(element::Int64::element_type(), shape);
// (*cf)({a, b, c}, {result});
// EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
(*cf)({a, b, c}, {result});
EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
// (*cf)({b, a, c}, {result});
// EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
(*cf)({b, a, c}, {result});
EXPECT_EQ((vector<element::Int64::type>{54, 80, 110, 144}), result->get_vector<int64_t>());
// (*cf)({a, c, b}, {result});
// EXPECT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector<int64_t>());
NGRAPH_INFO;
(*cf)({a, c, b}, {result});
EXPECT_EQ((vector<element::Int64::type>{50, 72, 98, 128}), result->get_vector<int64_t>());
}
// Same as abc, but using tuples for input and output
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment