Unverified Commit b6197967 authored by Matthew Brookhart's avatar Matthew Brookhart Committed by GitHub

Merge branch 'master' into ayzhuang/in-place-concat

parents ba848e20 5a32dfe4
......@@ -128,6 +128,7 @@ def Notify() {
String notifyPeople = "$ghprbPullAuthorEmail, $ghprbActualCommitAuthorEmail"
Closure notifyMethod = { configMap ->
if(currentBuild.result == "FAILURE") {
blue_ocean = "https://crackerjack.intel.com/blue/organizations/jenkins/onnx%2Fngraph_onnx_integration_ci/detail/ngraph_onnx_integration_ci/${BUILD_NUMBER}/pipeline"
emailext (
subject: "NGraph-Onnx CI: NGraph PR $ghprbPullId $currentBuild.result!",
body: """
......@@ -135,11 +136,9 @@ def Notify() {
<tr><td>Status:</td> <td>${currentBuild.result}</td></tr>
<tr><td>Repository</td> <td>$ghprbGhRepository</td></tr>
<tr><td>Branch:</td> <td>$ghprbSourceBranch</td></tr>
<tr><td>Jenkins Job No:</td> <td>$BUILD_NUMBER</td></tr>
<tr><td>Jenkins Job Link:</td> <td>$BUILD_URL</td></tr>
<tr><td>Pull Request:</td> <td>$ghprbPullId</td></tr>
<tr><td>Jenkins Build:</td> <td> <a href=$blue_ocean> ${BUILD_NUMBER} </a> </td></tr>
<tr><td>Pull Request:</td> <td><a href=$ghprbPullLink>$ghprbPullId</a> </td></tr>
<tr><td>Commit SHA:</td> <td>$ghprbActualCommit</td></tr>
<tr><td>Link:</td> <td>$ghprbPullLink</td></tr>
<tr><td>nGraph-ONNX Branch:</td> <td>${ONNX_BRANCH}</td></tr>
</table>
""",
......
......@@ -65,19 +65,19 @@ function build_ngraph() {
mkdir -p ./build
cd ./build
cmake ../ -DNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="${ngraph_directory}/ngraph_dist" || return 1
rm -f "${ngraph_directory}"/ngraph/python/dist/ngraph*.whl
make -j $(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l) || return 1
make install || return 1
cd "${ngraph_directory}/ngraph/python"
if [ ! -d ./pybind11 ]; then
git clone --recursive https://github.com/pybind/pybind11.git
fi
rm -f "${ngraph_directory}"/ngraph/python/dist/ngraph*.whl
rm -rf "${ngraph_directory}/ngraph/python/*.so ${ngraph_directory}/ngraph/python/build"
export PYBIND_HEADERS_PATH="${ngraph_directory}/ngraph/python/pybind11"
export NGRAPH_CPP_BUILD_PATH="${ngraph_directory}/ngraph_dist"
export NGRAPH_ONNX_IMPORT_ENABLE="TRUE"
python3 setup.py bdist_wheel
# Clean build artifacts
rm -rf "${ngraph_directory}/ngraph/python/_pyngraph.cpython* ${ngraph_directory}/ngraph/python/build"
rm -rf "${ngraph_directory}/ngraph_dist"
return 0
}
......
.. ops/index.rst
Core Ops
========
About Core Ops
==============
An ``Op``'s primary role is to function as a node in a directed acyclic graph
dependency computation graph.
......@@ -40,7 +40,7 @@ that must be performed are:
Alphabetical list of Core ``ops``
----------------------------------
=================================
Not currently a comprehensive list.
......@@ -163,5 +163,3 @@ Not currently a comprehensive list.
sqrt.rst
tan.rst
tanh.rst
......@@ -155,6 +155,7 @@ set (SRC
strides.cpp
type/element_type.cpp
util.cpp
validation_util.cpp
graph_util.cpp
placement.cpp
cpio.cpp
......
......@@ -59,6 +59,10 @@ add_library(onnx_import STATIC
op/floor.hpp
op/gemm.cpp
op/gemm.hpp
op/global_average_pool.cpp
op/global_average_pool.hpp
op/global_max_pool.cpp
op/global_max_pool.hpp
op/greater.hpp
op/hard_sigmoid.cpp
op/hard_sigmoid.hpp
......
......@@ -180,7 +180,7 @@ namespace ngraph
}
template <>
inline const std::string& get_value(const onnx::AttributeProto& attribute)
inline std::string get_value(const onnx::AttributeProto& attribute)
{
if (unlikely(attribute.type() != onnx::AttributeProto_AttributeType_STRING))
{
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/convpool.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector global_average_pool(const Node& node)
{
return convpool::make_ng_pool<ngraph::op::AvgPool>(node);
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
/// \brief Convert ONNX GlobalAveragePool operation to an nGraph node.
///
/// \param node The ONNX node object representing this operation.
///
/// \return The vector containing Ngraph nodes producing output of ONNX GlobalAveragePool
/// operation.
NodeVector global_average_pool(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/max_pool.hpp"
#include "utils/convpool.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector global_max_pool(const Node& node)
{
return convpool::make_ng_pool<ngraph::op::MaxPool>(node);
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
/// \brief Convert ONNX GlobalMaxPool operation to an nGraph node.
///
/// \param node The ONNX node object representing this operation.
///
/// \return The vector containing Ngraph nodes producing output of ONNX GlobalMaxPool
/// operation.
NodeVector global_max_pool(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -37,6 +37,8 @@
#include "op/flatten.hpp"
#include "op/floor.hpp"
#include "op/gemm.hpp"
#include "op/global_average_pool.hpp"
#include "op/global_max_pool.hpp"
#include "op/greater.hpp"
#include "op/hard_sigmoid.hpp"
#include "op/identity.hpp"
......@@ -148,6 +150,8 @@ namespace ngraph
REGISTER_OPERATOR("Flatten", 1, flatten);
REGISTER_OPERATOR("Floor", 1, floor);
REGISTER_OPERATOR("Gemm", 1, gemm);
REGISTER_OPERATOR("GlobalAveragePool", 1, global_average_pool);
REGISTER_OPERATOR("GlobalMaxPool", 1, global_max_pool);
REGISTER_OPERATOR("Greater", 1, greater);
REGISTER_OPERATOR("HardSigmoid", 1, hard_sigmoid);
REGISTER_OPERATOR("Identity", 1, identity);
......
......@@ -31,7 +31,9 @@ namespace ngraph
{
Shape get_kernel_shape(const Node& node)
{
return node.get_attribute_value<std::vector<std::size_t>>("kernel_shape", {1, 1});
std::size_t input_spacial_dims = node.get_ng_inputs()[0]->get_shape().size() - 2;
return node.get_attribute_value<std::vector<std::size_t>>(
"kernel_shape", std::vector<std::size_t>(input_spacial_dims, 1UL));
}
namespace detail
......@@ -121,7 +123,7 @@ namespace ngraph
pads = CoordinateDiff(static_cast<std::ptrdiff_t>(kernel_shape.size()), 0UL);
}
if (pads.size() <= 3)
if (pads.size() != kernel_shape.size() * 2)
{
// Paddings specified in (H, W, C) format.
return {pads, pads};
......
......@@ -16,7 +16,10 @@
#pragma once
#include <string>
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/shape.hpp"
#include "core/attribute.hpp"
......@@ -84,13 +87,11 @@ namespace ngraph
return get_pads(node, get_kernel_shape(node));
}
/**
* @brief Create an nGraph pooling operation based on an ONNX pooling op.
*
* @tparam T Class of an nGraph pooling operation (e.g. AveragePool, MaxPool)
* @param node incoming ONNX opearation
* @return nGraph node equivalent of the ONNX operation
*/
/// \brief Create an nGraph pooling operation based on an ONNX pooling op.
///
/// \param T Class of an nGraph pooling operation (e.g. AveragePool, MaxPool)
/// \param node incoming ONNX opearation
/// \return nGraph node equivalent of the ONNX operation
template <class T>
inline NodeVector make_ng_pool(const Node& node)
{
......@@ -98,19 +99,44 @@ namespace ngraph
auto data = node.get_ng_inputs().at(0);
// Parse ONNX op attributes
Shape kernel_shape = convpool::get_kernel_shape(node);
Shape kernel_shape;
if (node.op_type().find("Global") != std::string::npos)
{
kernel_shape = node.get_ng_inputs()[0]->get_shape();
// Remove N and C dimensions and leave only spatial dims.
kernel_shape.erase(std::begin(kernel_shape),
std::next(std::begin(kernel_shape), 2));
}
else
{
kernel_shape = convpool::get_kernel_shape(node);
}
auto strides = convpool::get_strides(node);
auto dilations = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
bool count_include_pad = node.get_attribute_value<int64_t>("count_include_pad", 0);
// Convert padding from CoordinateDiff to Shape objects
const CoordinateDiff& padding_above{paddings.first};
const CoordinateDiff& padding_below{paddings.second};
Shape padding_below_shape{std::begin(padding_below), std::end(padding_below)};
Shape padding_above_shape{std::begin(padding_above), std::end(padding_above)};
return {std::make_shared<T>(
data, kernel_shape, strides, padding_below_shape, padding_above_shape)};
if (count_include_pad)
{
return {std::make_shared<ngraph::op::AvgPool>(data,
kernel_shape,
strides,
padding_below_shape,
padding_above_shape,
count_include_pad)};
}
else
{
return {std::make_shared<T>(
data, kernel_shape, strides, padding_below_shape, padding_above_shape)};
}
}
} // namespace convpool
......
......@@ -17,6 +17,7 @@
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/assertion.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
......@@ -41,6 +42,10 @@ void op::AvgPool::validate_and_infer_types()
{
auto& arg_shape = get_input_shape(0);
NODE_VALIDATION_ASSERT(this, arg_shape.size() >= 3)
<< "Data input shape does not have rank of at least 3 (data input shape: " << arg_shape
<< ").";
if (0 == m_window_movement_strides.size() && arg_shape.size() > 2)
{
m_window_movement_strides = Strides(arg_shape.size() - 2, 1);
......@@ -56,145 +61,20 @@ void op::AvgPool::validate_and_infer_types()
m_padding_above = Shape(arg_shape.size() - 2, 0);
}
//
// Make sure batch size and channel count are not zero, and that we have at least one spatial
// dimension (in other words, that arg has shape NCDi for some Di of rank>0, N != 0, C != 0).
//
NODE_VALIDATION_ASSERT(this, arg_shape.size() >= 3)
<< "Data input shape does not have rank of at least 3 (data input shape: " << arg_shape
<< ").";
size_t batch_size = arg_shape[0];
NODE_VALIDATION_ASSERT(this, batch_size != 0)
<< "Data batch size is zero (data input shape: " << arg_shape << ").";
size_t channel_count = arg_shape[1];
NODE_VALIDATION_ASSERT(this, channel_count != 0)
<< "Channel count is zero (data input shape: " << arg_shape << ").";
size_t spatial_dimension_count = arg_shape.size() - 2;
//
// Make sure window shape, window movement strides, and padding have same rank as Di.
//
NODE_VALIDATION_ASSERT(this, m_window_shape.size() == spatial_dimension_count)
<< "Window shape rank does not match number of spatial dimensions (window shape: "
<< m_window_shape << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_window_movement_strides.size() == spatial_dimension_count)
<< "Window movement stride rank does not match number of spatial dimensions (window "
"movement strides: "
<< m_window_movement_strides << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_below.size() == spatial_dimension_count)
<< "Below-padding rank does not match number of spatial dimensions (padding below: "
<< m_padding_below << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_above.size() == spatial_dimension_count)
<< "Above-padding rank does not match number of spatial dimensions (padding above: "
<< m_padding_above << ", data input shape: " << arg_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0.
//
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
size_t dim_size = arg_shape[1 + 1 + i];
size_t virtual_dim_size = m_padding_below[i] + dim_size + m_padding_above[i];
input_item_virtual_shape.push_back(virtual_dim_size);
}
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, input_item_virtual_shape[i] != 0)
<< "Data input spatial dimension " << i
<< " has zero length even after padding (virtual shape of input item: "
<< input_item_virtual_shape << ").";
}
//
// Make sure window shape dimensions are all larger than 0.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] != 0)
<< "Window shape dimension " << i
<< " has zero length (window shape: " << m_window_shape << ").";
}
//
// Make sure the pooling window fits within the spatial dimensions.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] <= input_item_virtual_shape[i])
<< "Window shape after padding is larger than the spatial dimensions (window shape: "
<< m_window_shape << ", virtual shape of input item: " << input_item_virtual_shape
<< ").";
}
//
// Compute output item shape Do, checking at the same time that all window movement strides are larger than 0.
//
Shape output_item_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_movement_strides[i] != 0)
<< "Window movement strides dimension " << i
<< " has zero length (window movement strides: " << m_window_movement_strides << ").";
output_item_shape.push_back(ceil_div(input_item_virtual_shape[i] - m_window_shape[i] + 1,
m_window_movement_strides[i]));
}
//
// Make sure we're not going to have to compute average over an empty set of tensor elements.
// That will happen if the sliding window ever resides entirely over the padding area AND
// we're planning to disregard padding when computing the window's average.
//
if (!m_include_padding_in_avg_computation)
{
for (size_t i = 0; i < spatial_dimension_count; i++)
{
const size_t dim_virtual_size = input_item_virtual_shape[i];
const size_t dim_window_size = m_window_shape[i];
const size_t dim_stride = m_window_movement_strides[i];
const size_t dim_padding_below = m_padding_below[i];
const size_t dim_padding_above = m_padding_above[i];
// Checking the lower edge of each dimension is easy, because there's no mystery
// regarding the window's lower-edge placement...
NODE_VALIDATION_ASSERT(this,
dim_padding_below == 0 || dim_window_size > dim_padding_below)
<< "Window will sometimes reside entirely within the below-padding region, but"
<< " include_padding_in_avg_computation was not set (padding below: "
<< m_padding_below << ", window shape: " << m_window_shape << ").";
// Now check the upper-bound...
{
const size_t dim_num_strides = (dim_virtual_size - dim_window_size) / dim_stride;
const size_t dim_window_max_lower_offset = dim_num_strides * dim_stride;
const size_t dim_padding_above_start_offset = dim_virtual_size - dim_padding_above;
NODE_VALIDATION_ASSERT(this,
dim_padding_above == 0 ||
dim_window_max_lower_offset <
dim_padding_above_start_offset)
<< "Window will sometimes reside entirely within the above-padding region, but"
<< " include_padding_in_avg_computation was not set (padding above: "
<< m_padding_above << ", window shape: " << m_window_shape << ").";
}
}
}
//
// Construct result shape: NCDo.
//
Shape result_shape(1 + 1 + spatial_dimension_count);
result_shape[0] = batch_size;
result_shape[1] = channel_count;
copy(output_item_shape.begin(), output_item_shape.end(), result_shape.begin() + 2);
set_output_type(0, get_input_element_type(0), result_shape);
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
padding_below,
padding_above,
m_window_shape,
m_window_movement_strides,
m_include_padding_in_avg_computation));
}
op::AvgPool::AvgPool(const shared_ptr<Node>& arg,
......@@ -240,154 +120,25 @@ op::AvgPoolBackprop::AvgPoolBackprop(const Shape& forward_arg_shape,
void op::AvgPoolBackprop::validate_and_infer_types()
{
// --
// TODO: de-duplicate this code from AvgPool::AvgPool.
// --
auto& delta_shape = get_input_shape(0);
//
// Make sure batch size and channel count are not zero, and that we have at least one spatial
// dimension (in other words, that arg has shape NCDi for some Di of rank>0, N != 0, C != 0).
//
NODE_VALIDATION_ASSERT(this, m_forward_arg_shape.size() >= 3)
<< "Forward input shape does not have rank of at least 3 (forward input shape: "
<< m_forward_arg_shape << ").";
size_t batch_size = m_forward_arg_shape[0];
NODE_VALIDATION_ASSERT(this, batch_size != 0)
<< "Data batch size is zero (forward input shape: " << m_forward_arg_shape << ").";
size_t channel_count = m_forward_arg_shape[1];
NODE_VALIDATION_ASSERT(this, channel_count != 0)
<< "Channel count is zero (forward input shape: " << m_forward_arg_shape << ").";
size_t spatial_dimension_count = m_forward_arg_shape.size() - 2;
//
// Make sure window shape, window movement strides, and padding have same rank as Di.
//
NODE_VALIDATION_ASSERT(this, m_window_shape.size() == spatial_dimension_count)
<< "Window shape rank does not match number of spatial dimensions (window shape: "
<< m_window_shape << ", forward input shape: " << m_forward_arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_window_movement_strides.size() == spatial_dimension_count)
<< "Window movement stride rank does not match number of spatial dimensions (window "
"movement strides: "
<< m_window_movement_strides << ", forward input shape: " << m_forward_arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_below.size() == spatial_dimension_count)
<< "Below-padding rank does not match number of spatial dimensions (padding below: "
<< m_padding_below << ", forward input shape: " << m_forward_arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_above.size() == spatial_dimension_count)
<< "Above-padding rank does not match number of spatial dimensions (padding above: "
<< m_padding_above << ", forward input shape: " << m_forward_arg_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0.
//
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
size_t dim_size = m_forward_arg_shape[1 + 1 + i];
size_t virtual_dim_size = m_padding_below[i] + dim_size + m_padding_above[i];
input_item_virtual_shape.push_back(virtual_dim_size);
}
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, input_item_virtual_shape[i] != 0)
<< "Forward input spatial dimension " << i
<< " has zero length even after padding (virtual shape of input item: "
<< input_item_virtual_shape << ").";
}
//
// Make sure window shape dimensions are all larger than 0.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] != 0)
<< "Window shape dimension " << i
<< " has zero length (window shape: " << m_window_shape << ").";
}
//
// Make sure the pooling window fits within the spatial dimensions.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] <= input_item_virtual_shape[i])
<< "Window shape after padding is larger than the spatial dimensions (window shape: "
<< m_window_shape << ", virtual shape of input item: " << input_item_virtual_shape
<< ").";
}
//
// Compute output item shape Do, checking at the same time that all window movement strides are larger than 0.
//
Shape output_item_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_movement_strides[i] != 0)
<< "Window movement strides dimension " << i
<< " has zero length (window movement strides: " << m_window_movement_strides << ").";
output_item_shape.push_back(ceil_div(input_item_virtual_shape[i] - m_window_shape[i] + 1,
m_window_movement_strides[i]));
}
//
// Make sure we're not going to have to compute average over an empty set of tensor elements.
// That will happen if the sliding window ever resides entirely over the padding area AND
// we're planning to disregard padding when computing the window's average.
//
if (!m_include_padding_in_avg_computation)
{
for (size_t i = 0; i < spatial_dimension_count; i++)
{
const size_t dim_virtual_size = input_item_virtual_shape[i];
const size_t dim_window_size = m_window_shape[i];
const size_t dim_stride = m_window_movement_strides[i];
const size_t dim_padding_below = m_padding_below[i];
const size_t dim_padding_above = m_padding_above[i];
// Checking the lower edge of each dimension is easy, because there's no mystery
// regarding the window's lower-edge placement...
NODE_VALIDATION_ASSERT(this,
dim_padding_below == 0 || dim_window_size > dim_padding_below)
<< "Window will sometimes reside entirely within the below-padding region, but"
<< " include_padding_in_avg_computation was not set (padding below: "
<< m_padding_below << ", window shape: " << m_window_shape << ").";
// Now check the upper-bound...
{
const size_t dim_num_strides = (dim_virtual_size - dim_window_size) / dim_stride;
const size_t dim_window_max_lower_offset = dim_num_strides * dim_stride;
const size_t dim_padding_above_start_offset = dim_virtual_size - dim_padding_above;
NODE_VALIDATION_ASSERT(this,
dim_padding_above == 0 ||
dim_window_max_lower_offset <
dim_padding_above_start_offset)
<< "Window will sometimes reside entirely within the above-padding region, but"
<< " include_padding_in_avg_computation was not set (padding above: "
<< m_padding_above << ", window shape: " << m_window_shape << ").";
}
}
}
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
//
// Construct result shape: NCDo.
//
Shape forward_result_shape(1 + 1 + spatial_dimension_count);
forward_result_shape[0] = batch_size;
forward_result_shape[1] = channel_count;
copy(output_item_shape.begin(), output_item_shape.end(), forward_result_shape.begin() + 2);
Shape forward_result_shape =
infer_batched_pooling_forward(this,
m_forward_arg_shape,
padding_below,
padding_above,
m_window_shape,
m_window_movement_strides,
m_include_padding_in_avg_computation);
NODE_VALIDATION_ASSERT(this, forward_result_shape == delta_shape)
<< "Inferred forward output shape does not match delta shape (inferred forward output "
"shape: "
<< forward_result_shape << ", delta shape: " << delta_shape << ").";
<< "shape: " << forward_result_shape << ", delta shape: " << delta_shape << ").";
set_output_type(0, get_input_element_type(0), m_forward_arg_shape);
}
......
......@@ -22,209 +22,11 @@
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
Shape op::util::infer_convolution_output_shape(const Node* node,
const Shape& data_batch_shape,
const Shape& filters_shape,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
size_t batch_axis_data,
size_t input_channel_axis_data,
size_t input_channel_axis_filters,
size_t output_channel_axis_filters,
size_t batch_axis_result,
size_t output_channel_axis_result)
{
NODE_VALIDATION_ASSERT(node, batch_axis_data <= 1) << "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, input_channel_axis_data <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, input_channel_axis_filters <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, output_channel_axis_filters <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, batch_axis_result <= 1) << "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, output_channel_axis_result <= 1)
<< "(This is an internal nGraph error)";
//
// Make sure data_batch: NCiDi for some Di of rank>0, N != 0, Ci != 0.
//
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch input must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
size_t batch_size = data_batch_shape[batch_axis_data];
NODE_VALIDATION_ASSERT(node, batch_size != 0)
<< "Data batch size is zero (data batch shape: " << data_batch_shape << ", "
<< "batch axis is axis " << batch_axis_data << ").";
size_t input_channel_count = data_batch_shape[input_channel_axis_data];
NODE_VALIDATION_ASSERT(node, input_channel_count != 0)
<< "Input channel count is zero (data batch shape: " << data_batch_shape << ", "
<< "channel axis is axis " << input_channel_axis_data << ").";
size_t spatial_dimension_count = data_batch_shape.size() - 2;
//
// Make sure filters: CoCiWv for some Co>0, rank of W = rank of Di.
//
NODE_VALIDATION_ASSERT(node, filters_shape.size() == 2 + spatial_dimension_count)
<< "Filter input must have rank equal to the data batch (one axis for output "
<< "channels, one axis for input channels, and the same number of spatial "
<< "dimensions as the data batch (filter input shape: " << filters_shape << ", "
<< "data batch shape: " << data_batch_shape << ").";
size_t output_channel_count = filters_shape[output_channel_axis_filters];
NODE_VALIDATION_ASSERT(node, output_channel_count != 0)
<< "Output channel count for filters is zero (filters shape: " << filters_shape << ", "
<< "output channels on axis " << output_channel_axis_filters << ").";
NODE_VALIDATION_ASSERT(node, filters_shape[input_channel_axis_filters] == input_channel_count)
<< "Input channel count for filters (" << filters_shape[input_channel_axis_filters] << ") "
<< "does not match the number of channels in the data batch (" << input_channel_count
<< ") "
<< "(filter input shape: " << filters_shape << ", filter input channels on axis "
<< input_channel_axis_filters << "; data batch shape: " << data_batch_shape
<< ", data batch channels on axis " << batch_axis_data << ").";
//
// Make sure window movement strides, window dilation strides, and data dilation strides
// have same rank as Di.
//
NODE_VALIDATION_ASSERT(node, window_movement_strides.size() == spatial_dimension_count)
<< "Rank of window movement strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (window movement strides: " << window_movement_strides
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, window_dilation_strides.size() == spatial_dimension_count)
<< "Rank of window dilation strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (window dilation strides: " << window_dilation_strides
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, data_dilation_strides.size() == spatial_dimension_count)
<< "Rank of data dilation strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (data dilation strides: " << data_dilation_strides
<< ", data batch shape: " << data_batch_shape << ").";
//
// Make sure padding-below and padding-above shapes have same rank as Di.
//
NODE_VALIDATION_ASSERT(node, padding_below.size() == spatial_dimension_count)
<< "Rank of the padding below does not match the number of spatial dimensions ("
<< spatial_dimension_count << ") in the data batch (padding below: " << padding_below
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, padding_above.size() == spatial_dimension_count)
<< "Rank of the padding above does not match the number of spatial dimensions ("
<< spatial_dimension_count << ") in the data batch (padding above: " << padding_above
<< ", data batch shape: " << data_batch_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0 after padding and dilation.
//
std::vector<ptrdiff_t> input_item_virtual_shape_signed;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, data_dilation_strides[i] != 0)
<< "Data dilation stride at spatial dimension " << i << " is zero "
<< "(data dilation strides: " << data_dilation_strides << ").";
size_t dim_size = data_batch_shape[1 + 1 + i];
size_t dilated_dim_size = (dim_size - 1) * data_dilation_strides[i] + 1;
ptrdiff_t padded_dilated_dim_size = padding_below[i] + dilated_dim_size + padding_above[i];
input_item_virtual_shape_signed.push_back(padded_dilated_dim_size);
}
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, input_item_virtual_shape_signed[i] > 0)
<< "Input dimension after padding and dilation is non-positive "
<< "at spatial axis " << i
<< " (post-padding/dilation input item shape: " << input_item_virtual_shape
<< ", data batch shape: " << data_batch_shape
<< ", data dilation strides: " << data_dilation_strides
<< ", padding below: " << padding_below << ", padding above: " << padding_above << ").";
input_item_virtual_shape.push_back(size_t(input_item_virtual_shape_signed[i]));
}
//
// Extract the physical shape Wp of the convolution window, *not* including dilation, from the filter dimensions.
// At the same time, make sure window shape dimensions are all larger than 0.
//
Shape window_physical_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
window_physical_shape.push_back(filters_shape[1 + 1 + i]);
NODE_VALIDATION_ASSERT(node, window_physical_shape[i] != 0)
<< "Filters shape at spatial dimension " << i << " is zero "
<< "(filters shape: " << filters_shape << ").";
}
//
// Compute virtual shape Wp of the convolution window, *including* dilation. At the same time, make sure all
// window dilation strides are larger than 0, and that the dilated filter fits within the spatial dimensions.
//
Shape window_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, window_dilation_strides[i] != 0)
<< "Window dilation stride at spatial dimension " << i << " is zero "
<< "(window dilation strides: " << window_dilation_strides << ").";
window_virtual_shape.push_back((window_physical_shape[i] - 1) * window_dilation_strides[i] +
1);
NODE_VALIDATION_ASSERT(node, window_virtual_shape[i] <= input_item_virtual_shape[i])
<< "Post-dilation window shape is smaller than the post-padding/dilation "
<< "input item shape at spatial dimension " << i << " (post-padding/dilation "
<< "input item shape: " << input_item_virtual_shape
<< ", data batch shape: " << data_batch_shape
<< ", data dilation strides: " << data_dilation_strides
<< ", padding below: " << padding_below << ", padding above: " << padding_above
<< ", post-dilation window shape: " << window_virtual_shape
<< ", filters shape: " << filters_shape
<< ", window dilation strides: " << window_dilation_strides;
}
//
// Construct result shape: NCoDo or CoNDo (depending on *_axis_result), checking at the same
// time that all window movement strides are larger than 0.
//
Shape result_shape(spatial_dimension_count + 2);
result_shape[batch_axis_result] = batch_size;
result_shape[output_channel_axis_result] = output_channel_count;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, window_movement_strides[i] != 0)
<< "Window movement stride at spatial dimension " << i << " is zero "
<< "(window movement strides: " << window_movement_strides << ").";
result_shape[i + 2] = ceil_div(input_item_virtual_shape[i] - window_virtual_shape[i] + 1,
window_movement_strides[i]);
}
return result_shape;
}
op::Convolution::Convolution(const shared_ptr<Node>& data_batch,
const shared_ptr<Node>& filters,
const Strides& window_movement_strides,
......@@ -249,14 +51,21 @@ void op::Convolution::validate_and_infer_types()
auto& filters_shape = get_input_shape(1);
auto& filters_et = get_input_element_type(1);
NODE_VALIDATION_ASSERT(this, data_batch_shape.size() >= 3)
<< "Data batch must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
if (m_data_dilation_strides.size() == 0)
{
m_data_dilation_strides = default_strides(this, data_batch_shape);
}
if (m_window_movement_strides.size() == 0)
{
m_window_movement_strides = default_strides(this, data_batch_shape);
}
if (m_window_dilation_strides.size() == 0)
{
m_window_dilation_strides = default_strides(this, data_batch_shape);
......@@ -272,39 +81,26 @@ void op::Convolution::validate_and_infer_types()
m_padding_above = default_padding(this, data_batch_shape);
}
//
// Make sure data batch and filter element types match.
//
NODE_VALIDATION_ASSERT(this, data_batch_et == filters_et)
<< "Element types for data batch and filters do not match (data batch element type: "
<< data_batch_et << ", filters element type: " << filters_et << ").";
set_output_type(0,
data_batch_et,
util::infer_convolution_output_shape(this,
data_batch_shape,
filters_shape,
m_window_movement_strides,
m_window_dilation_strides,
m_padding_below,
m_padding_above,
m_data_dilation_strides,
0,
1,
1,
0,
0,
1));
element::Type result_et;
Shape result_shape;
std::tie(result_et, result_shape) = infer_convolution_forward(this,
data_batch_et,
filters_et,
data_batch_shape,
m_data_dilation_strides,
m_padding_below,
m_padding_above,
filters_shape,
m_window_movement_strides,
m_window_dilation_strides);
set_output_type(0, result_et, result_shape);
}
Strides op::Convolution::default_strides(const Node* node, const Shape& data_batch_shape)
{
// For consistency we should throw the same error message here that we throw in the constructor.
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch input must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
NGRAPH_ASSERT(data_batch_shape.size() >= 2);
return Strides(data_batch_shape.size() - 2, 1);
}
......@@ -326,12 +122,7 @@ op::Convolution::Convolution(const shared_ptr<Node>& data_batch,
CoordinateDiff op::Convolution::default_padding(const Node* node, const Shape& data_batch_shape)
{
// For consistency we should throw the same error message here that we throw in the constructor.
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch input must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
NGRAPH_ASSERT(data_batch_shape.size() >= 2);
return CoordinateDiff(data_batch_shape.size() - 2, 0);
}
......@@ -429,65 +220,88 @@ op::ConvolutionBackpropData::ConvolutionBackpropData(const Shape& data_batch_sha
void op::ConvolutionBackpropData::validate_and_infer_types()
{
// Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as
// follows.
//
// Forward Backward
// "N" axis for data batch 0 0
// "C" axis for data batch 1 1
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 0
// "C" axis for output 1 1
// Data batch x delta
// Data batch shape S_x S_o
// Filters f reverse(f) [on spatial axes]
// Filters shape S_f S_f
// Window movement strides q_x p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_f - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q_x) - b_x
// Data dilation strides p_x q_x
// Output shape S_o S_x
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
//
// We will also compute and store the various parameters in the "backward" column above, since
// some backends need them. (TODO(amprocte): Is it just because of the way the reference works
// that this stuff is needed? If so, we can probably get rid of it and have conv_backprop
// reference kernels that do the calculations of the backward parameters internally, or supply
// utility functions to do it.)
auto& filters_shape = get_input_shape(0);
auto& filters_et = get_input_element_type(0);
auto& output_delta_shape = get_input_shape(1);
auto& output_delta_et = get_input_element_type(1);
auto& delta_shape = get_input_shape(1);
auto& delta_et = get_input_element_type(1);
element::Type forward_result_et;
Shape forward_result_shape;
std::tie(forward_result_et, forward_result_shape) =
infer_convolution_forward(this,
delta_et,
filters_et,
m_data_batch_shape,
m_data_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
filters_shape,
m_window_movement_strides_forward,
m_window_dilation_strides_forward);
NODE_VALIDATION_ASSERT(this, forward_result_shape == delta_shape)
<< "Inferred forward output shape (" << forward_result_shape << ") does not match shape of "
<< "delta (" << delta_shape << ").";
set_output_type(0, delta_et, m_data_batch_shape);
//
// Make sure filter and output delta element types match.
// Compute parameters needed for backprop-as-convolution.
//
NODE_VALIDATION_ASSERT(this, output_delta_et == filters_et)
<< "Element types for filters and output delta do not match (filters element type: "
<< filters_et << ", output delta element type: " << output_delta_et << ").";
// Forward Backward
// Window movement strides q p_x
// Window dilation strides p_f p_f
// Padding below a_x (S_F - 1)p_f - a_x
// Padding above b_x (S_f - 1)p_f + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q) - b_x
// Data dilation strides p_x q
for (size_t i = 0; i < m_data_batch_shape.size() - 2; i++)
size_t spatial_dim_count = delta_shape.size() - 2;
m_window_movement_strides_backward = m_data_dilation_strides_forward;
m_window_dilation_strides_backward = m_window_dilation_strides_forward;
m_data_dilation_strides_backward = m_window_movement_strides_forward;
m_padding_below_backward.resize(spatial_dim_count);
m_padding_above_backward.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
m_window_movement_strides_backward.push_back(m_data_dilation_strides_forward[i]);
m_window_dilation_strides_backward.push_back(m_window_dilation_strides_forward[i]);
m_padding_below_backward.push_back((filters_shape[i + 2] - 1) *
m_window_dilation_strides_forward[i] -
m_padding_below_forward[i]);
m_padding_above_backward.push_back(
m_padding_below_backward[i] =
(filters_shape[i + 2] - 1) * m_window_dilation_strides_forward[i] -
m_padding_below_forward[i];
m_padding_above_backward[i] =
(filters_shape[i + 2] - 1) * m_window_dilation_strides_forward[i] +
((m_padding_below_forward[i] +
(m_data_batch_shape[i + 2] - 1) * m_data_dilation_strides_forward[i] +
m_padding_above_forward[i] -
(filters_shape[i + 2] - 1) * m_window_dilation_strides_forward[i]) %
m_window_movement_strides_forward[i]) -
m_padding_above_forward[i]);
m_data_dilation_strides_backward.push_back(m_window_movement_strides_forward[i]);
m_padding_above_forward[i];
}
Shape inferred_convolution_output_shape =
util::infer_convolution_output_shape(this,
output_delta_shape,
filters_shape,
m_window_movement_strides_backward,
m_window_dilation_strides_backward,
m_padding_below_backward,
m_padding_above_backward,
m_data_dilation_strides_backward,
0,
1,
0,
1,
0,
1);
NODE_VALIDATION_ASSERT(this, inferred_convolution_output_shape == m_data_batch_shape)
<< "Specified data batch shape does not match the inferred data batch shape "
<< "(specified shape: " << m_data_batch_shape
<< ", inferred data batch shape: " << inferred_convolution_output_shape;
set_output_type(0, filters_et, inferred_convolution_output_shape);
}
void op::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints,
......@@ -596,62 +410,84 @@ op::ConvolutionBackpropFilters::ConvolutionBackpropFilters(
void op::ConvolutionBackpropFilters::validate_and_infer_types()
{
// Backprop to filters is itself convolution, with inputs/outputs/attributes transmogrified as
// follows.
//
// Forward Backward
// "N" axis for data batch 0 1
// "C" axis for data batch 1 0
// "Co" axis for filters 0 0
// "Ci" axis for filters 1 1
// "N" axis for output 0 1
// "C" axis for output 1 0
// Data batch x x
// Data batch shape S_x S_x
// Filters f delta
// Filters shape S_f S_f
// Window movement strides q_x p_f
// Window dilation strides p_f q_x
// Padding below a_x a_x
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q_x
// Data dilation strides p_x p_x
// Output shape S_o S_f
//
// To _validate_, we simply need to check/infer the output shape of the forward convolution,
// then check to make sure that the incoming delta has the same shape as the forward output.
//
// We will also compute and store the various parameters in the "backward" column above, since
// some backends need them. (TODO(amprocte): Is it just because of the way the reference works
// that this stuff is needed? If so, we can probably get rid of it and have conv_backprop
// reference kernels that do the calculations of the backward parameters internally, or supply
// utility functions to do it.)
auto& data_batch_shape = get_input_shape(0);
auto& data_batch_et = get_input_element_type(0);
auto& output_delta_shape = get_input_shape(1);
auto& output_delta_et = get_input_element_type(1);
auto& delta_shape = get_input_shape(1);
auto& delta_et = get_input_element_type(1);
element::Type forward_result_et;
Shape forward_result_shape;
std::tie(forward_result_et, forward_result_shape) =
infer_convolution_forward(this,
data_batch_et,
delta_et,
data_batch_shape,
m_data_dilation_strides_forward,
m_padding_below_forward,
m_padding_above_forward,
m_filters_shape,
m_window_movement_strides_forward,
m_window_dilation_strides_forward);
NODE_VALIDATION_ASSERT(this, forward_result_shape == delta_shape)
<< "Inferred forward output shape (" << forward_result_shape << ") does not match shape of "
<< "delta (" << delta_shape << ").";
set_output_type(0, delta_et, m_filters_shape);
//
// Make sure data batch and output delta element types match.
// Compute parameters needed for backprop-as-convolution.
//
NODE_VALIDATION_ASSERT(this, output_delta_et == data_batch_et)
<< "Element types for data batch and output delta do not match (data batch element type: "
<< data_batch_et << ", output delta element type: " << output_delta_et << ").";
// Forward Backward
// Window movement strides q p_f
// Window dilation strides p_f q
// Padding below a_x a_x
// Padding above b_x b_x - (a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) % q
// Data dilation strides p_x p_x
for (size_t i = 0; i < m_filters_shape.size() - 2; i++)
size_t spatial_dim_count = delta_shape.size() - 2;
m_window_movement_strides_backward = m_window_dilation_strides_forward;
m_window_dilation_strides_backward = m_window_movement_strides_forward;
m_padding_below_backward = m_padding_below_forward;
m_data_dilation_strides_backward = m_data_dilation_strides_forward;
m_padding_above_backward.resize(spatial_dim_count);
for (size_t i = 0; i < spatial_dim_count; i++)
{
m_window_movement_strides_backward.push_back(m_window_dilation_strides_forward[i]);
m_window_dilation_strides_backward.push_back(m_window_movement_strides_forward[i]);
m_padding_below_backward.push_back(m_padding_below_forward[i]);
m_padding_above_backward.push_back(
m_padding_above_backward[i] =
m_padding_above_forward[i] -
(m_padding_below_forward[i] +
(data_batch_shape[i + 2] - 1) * m_data_dilation_strides_forward[i] +
m_padding_above_forward[i] -
(m_filters_shape[i + 2] - 1) * m_window_dilation_strides_forward[i]) %
m_window_movement_strides_forward[i]);
m_data_dilation_strides_backward.push_back(m_data_dilation_strides_forward[i]);
m_window_movement_strides_forward[i];
}
Shape inferred_convolution_output_shape =
util::infer_convolution_output_shape(this,
data_batch_shape,
output_delta_shape,
m_window_movement_strides_backward,
m_window_dilation_strides_backward,
m_padding_below_backward,
m_padding_above_backward,
m_data_dilation_strides_backward,
1,
0,
0,
1,
1,
0);
NODE_VALIDATION_ASSERT(this, inferred_convolution_output_shape == m_filters_shape)
<< "Specified filters shape does not match the inferred filters shape "
<< "(specified shape: " << m_filters_shape
<< ", inferred filters shape: " << inferred_convolution_output_shape;
set_output_type(0, data_batch_et, inferred_convolution_output_shape);
}
shared_ptr<Node>
......@@ -667,3 +503,207 @@ shared_ptr<Node>
m_padding_above_forward,
m_data_dilation_strides_forward);
}
//
// This is a legacy function, retained because the CPU backend uses it for now.
// TODO(amprocte): Update CPU backend to use the new stuff in validation_util.hpp, and remove this
// function.
//
Shape op::util::infer_convolution_output_shape(const Node* node,
const Shape& data_batch_shape,
const Shape& filters_shape,
const Strides& window_movement_strides,
const Strides& window_dilation_strides,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const Strides& data_dilation_strides,
size_t batch_axis_data,
size_t input_channel_axis_data,
size_t input_channel_axis_filters,
size_t output_channel_axis_filters,
size_t batch_axis_result,
size_t output_channel_axis_result)
{
NODE_VALIDATION_ASSERT(node, batch_axis_data <= 1) << "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, input_channel_axis_data <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, input_channel_axis_filters <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, output_channel_axis_filters <= 1)
<< "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, batch_axis_result <= 1) << "(This is an internal nGraph error)";
NODE_VALIDATION_ASSERT(node, output_channel_axis_result <= 1)
<< "(This is an internal nGraph error)";
//
// Make sure data_batch: NCiDi for some Di of rank>0, N != 0, Ci != 0.
//
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch input must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
size_t batch_size = data_batch_shape[batch_axis_data];
NODE_VALIDATION_ASSERT(node, batch_size != 0)
<< "Data batch size is zero (data batch shape: " << data_batch_shape << ", "
<< "batch axis is axis " << batch_axis_data << ").";
size_t input_channel_count = data_batch_shape[input_channel_axis_data];
NODE_VALIDATION_ASSERT(node, input_channel_count != 0)
<< "Input channel count is zero (data batch shape: " << data_batch_shape << ", "
<< "channel axis is axis " << input_channel_axis_data << ").";
size_t spatial_dimension_count = data_batch_shape.size() - 2;
//
// Make sure filters: CoCiWv for some Co>0, rank of W = rank of Di.
//
NODE_VALIDATION_ASSERT(node, filters_shape.size() == 2 + spatial_dimension_count)
<< "Filter input must have rank equal to the data batch (one axis for output "
<< "channels, one axis for input channels, and the same number of spatial "
<< "dimensions as the data batch (filter input shape: " << filters_shape << ", "
<< "data batch shape: " << data_batch_shape << ").";
size_t output_channel_count = filters_shape[output_channel_axis_filters];
NODE_VALIDATION_ASSERT(node, output_channel_count != 0)
<< "Output channel count for filters is zero (filters shape: " << filters_shape << ", "
<< "output channels on axis " << output_channel_axis_filters << ").";
NODE_VALIDATION_ASSERT(node, filters_shape[input_channel_axis_filters] == input_channel_count)
<< "Input channel count for filters (" << filters_shape[input_channel_axis_filters] << ") "
<< "does not match the number of channels in the data batch (" << input_channel_count
<< ") "
<< "(filter input shape: " << filters_shape << ", filter input channels on axis "
<< input_channel_axis_filters << "; data batch shape: " << data_batch_shape
<< ", data batch channels on axis " << batch_axis_data << ").";
//
// Make sure window movement strides, window dilation strides, and data dilation strides
// have same rank as Di.
//
NODE_VALIDATION_ASSERT(node, window_movement_strides.size() == spatial_dimension_count)
<< "Rank of window movement strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (window movement strides: " << window_movement_strides
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, window_dilation_strides.size() == spatial_dimension_count)
<< "Rank of window dilation strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (window dilation strides: " << window_dilation_strides
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, data_dilation_strides.size() == spatial_dimension_count)
<< "Rank of data dilation strides does not match the number of spatial dimensions ("
<< spatial_dimension_count
<< ") in the data batch (data dilation strides: " << data_dilation_strides
<< ", data batch shape: " << data_batch_shape << ").";
//
// Make sure padding-below and padding-above shapes have same rank as Di.
//
NODE_VALIDATION_ASSERT(node, padding_below.size() == spatial_dimension_count)
<< "Rank of the padding below does not match the number of spatial dimensions ("
<< spatial_dimension_count << ") in the data batch (padding below: " << padding_below
<< ", data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, padding_above.size() == spatial_dimension_count)
<< "Rank of the padding above does not match the number of spatial dimensions ("
<< spatial_dimension_count << ") in the data batch (padding above: " << padding_above
<< ", data batch shape: " << data_batch_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0 after padding and dilation.
//
std::vector<ptrdiff_t> input_item_virtual_shape_signed;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, data_dilation_strides[i] != 0)
<< "Data dilation stride at spatial dimension " << i << " is zero "
<< "(data dilation strides: " << data_dilation_strides << ").";
size_t dim_size = data_batch_shape[1 + 1 + i];
size_t dilated_dim_size = (dim_size - 1) * data_dilation_strides[i] + 1;
ptrdiff_t padded_dilated_dim_size = padding_below[i] + dilated_dim_size + padding_above[i];
input_item_virtual_shape_signed.push_back(padded_dilated_dim_size);
}
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, input_item_virtual_shape_signed[i] > 0)
<< "Input dimension after padding and dilation is non-positive "
<< "at spatial axis " << i
<< " (post-padding/dilation input item shape: " << input_item_virtual_shape
<< ", data batch shape: " << data_batch_shape
<< ", data dilation strides: " << data_dilation_strides
<< ", padding below: " << padding_below << ", padding above: " << padding_above << ").";
input_item_virtual_shape.push_back(size_t(input_item_virtual_shape_signed[i]));
}
//
// Extract the physical shape Wp of the convolution window, *not* including dilation, from the filter dimensions.
// At the same time, make sure window shape dimensions are all larger than 0.
//
Shape window_physical_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
window_physical_shape.push_back(filters_shape[1 + 1 + i]);
NODE_VALIDATION_ASSERT(node, window_physical_shape[i] != 0)
<< "Filters shape at spatial dimension " << i << " is zero "
<< "(filters shape: " << filters_shape << ").";
}
//
// Compute virtual shape Wp of the convolution window, *including* dilation. At the same time, make sure all
// window dilation strides are larger than 0, and that the dilated filter fits within the spatial dimensions.
//
Shape window_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, window_dilation_strides[i] != 0)
<< "Window dilation stride at spatial dimension " << i << " is zero "
<< "(window dilation strides: " << window_dilation_strides << ").";
window_virtual_shape.push_back((window_physical_shape[i] - 1) * window_dilation_strides[i] +
1);
NODE_VALIDATION_ASSERT(node, window_virtual_shape[i] <= input_item_virtual_shape[i])
<< "Post-dilation window shape is smaller than the post-padding/dilation "
<< "input item shape at spatial dimension " << i << " (post-padding/dilation "
<< "input item shape: " << input_item_virtual_shape
<< ", data batch shape: " << data_batch_shape
<< ", data dilation strides: " << data_dilation_strides
<< ", padding below: " << padding_below << ", padding above: " << padding_above
<< ", post-dilation window shape: " << window_virtual_shape
<< ", filters shape: " << filters_shape
<< ", window dilation strides: " << window_dilation_strides;
}
//
// Construct result shape: NCoDo or CoNDo (depending on *_axis_result), checking at the same
// time that all window movement strides are larger than 0.
//
Shape result_shape(spatial_dimension_count + 2);
result_shape[batch_axis_result] = batch_size;
result_shape[output_channel_axis_result] = output_channel_count;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(node, window_movement_strides[i] != 0)
<< "Window movement stride at spatial dimension " << i << " is zero "
<< "(window movement strides: " << window_movement_strides << ").";
result_shape[i + 2] = ceil_div(input_item_virtual_shape[i] - window_virtual_shape[i] + 1,
window_movement_strides[i]);
}
return result_shape;
}
......@@ -356,6 +356,9 @@ namespace ngraph
namespace util
{
// This is a legacy function, retained because the CPU backend uses it for now.
// TODO: Update CPU backend to use the new stuff in validation_util.hpp, and remove
// this function.
Shape infer_convolution_output_shape(const Node* node,
const Shape& data_batch_shape,
const Shape& filters_shape,
......
......@@ -21,6 +21,7 @@
#include "ngraph/op/greater.hpp"
#include "ngraph/op/select_and_scatter.hpp"
#include "ngraph/util.hpp"
#include "ngraph/validation_util.hpp"
using namespace std;
using namespace ngraph;
......@@ -39,14 +40,14 @@ op::MaxPool::MaxPool(const shared_ptr<Node>& arg,
constructor_validate_and_infer_types();
}
// TODO(amprocte): This code is now *exactly* the same as AvgPool::validate_and_infer_types(),
// except that for AvgPool we also have an optional check that the pooling window is never
// entirely in the padding. Should unify in a utility function, but not sure where it belongs
// at this juncture.
void op::MaxPool::validate_and_infer_types()
{
auto& arg_shape = get_input_shape(0);
NODE_VALIDATION_ASSERT(this, arg_shape.size() >= 3)
<< "Data input shape does not have rank of at least 3 (data input shape: " << arg_shape
<< ").";
if (0 == m_window_movement_strides.size() && arg_shape.size() > 2)
{
m_window_movement_strides = Strides(arg_shape.size() - 2, 1);
......@@ -62,105 +63,20 @@ void op::MaxPool::validate_and_infer_types()
m_padding_above = Shape(arg_shape.size() - 2, 0);
}
//
// Make sure batch size and channel count are not zero, and that we have at least one spatial
// dimension (in other words, that arg has shape NCDi for some Di of rank>0, N != 0, C != 0).
//
NODE_VALIDATION_ASSERT(this, arg_shape.size() >= 3)
<< "Data input shape does not have rank of at least 3 (data input shape: " << arg_shape
<< ").";
size_t batch_size = arg_shape[0];
NODE_VALIDATION_ASSERT(this, batch_size != 0)
<< "Data batch size is zero (data input shape: " << arg_shape << ").";
size_t channel_count = arg_shape[1];
NODE_VALIDATION_ASSERT(this, channel_count != 0)
<< "Channel count is zero (data input shape: " << arg_shape << ").";
size_t spatial_dimension_count = arg_shape.size() - 2;
//
// Make sure window shape, window movement strides, and padding have same rank as Di.
//
NODE_VALIDATION_ASSERT(this, m_window_shape.size() == spatial_dimension_count)
<< "Window shape rank does not match number of spatial dimensions (window shape: "
<< m_window_shape << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_window_movement_strides.size() == spatial_dimension_count)
<< "Window movement stride rank does not match number of spatial dimensions (window "
"movement strides: "
<< m_window_movement_strides << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_below.size() == spatial_dimension_count)
<< "Below-padding rank does not match number of spatial dimensions (padding below: "
<< m_padding_below << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_above.size() == spatial_dimension_count)
<< "Above-padding rank does not match number of spatial dimensions (padding above: "
<< m_padding_above << ", data input shape: " << arg_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0.
//
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
size_t dim_size = arg_shape[1 + 1 + i];
size_t virtual_dim_size = m_padding_below[i] + dim_size + m_padding_above[i];
input_item_virtual_shape.push_back(virtual_dim_size);
}
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, input_item_virtual_shape[i] != 0)
<< "Data input spatial dimension " << i
<< " has zero length even after padding (virtual shape of input item: "
<< input_item_virtual_shape << ").";
}
//
// Make sure window shape dimensions are all larger than 0.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] != 0)
<< "Window shape dimension " << i
<< " has zero length (window shape: " << m_window_shape << ").";
}
//
// Make sure the pooling window fits within the spatial dimensions.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] <= input_item_virtual_shape[i])
<< "Window shape after padding is larger than the spatial dimensions (window shape: "
<< m_window_shape << ", virtual shape of input item: " << input_item_virtual_shape
<< ").";
}
//
// Compute output item shape Do, checking at the same time that all window movement strides are larger than 0.
//
Shape output_item_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_movement_strides[i] != 0)
<< "Window movement strides dimension " << i
<< " has zero length (window movement strides: " << m_window_movement_strides << ").";
output_item_shape.push_back(ceil_div(input_item_virtual_shape[i] - m_window_shape[i] + 1,
m_window_movement_strides[i]));
}
//
// Construct result shape: NCDo.
//
Shape result_shape(1 + 1 + spatial_dimension_count);
result_shape[0] = batch_size;
result_shape[1] = channel_count;
copy(output_item_shape.begin(), output_item_shape.end(), result_shape.begin() + 2);
set_output_type(0, get_input_element_type(0), result_shape);
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
padding_below,
padding_above,
m_window_shape,
m_window_movement_strides,
true));
}
op::MaxPool::MaxPool(const shared_ptr<Node>& arg,
......@@ -204,121 +120,33 @@ op::MaxPoolBackprop::MaxPoolBackprop(const shared_ptr<Node>& arg_forward,
void op::MaxPoolBackprop::validate_and_infer_types()
{
NODE_VALIDATION_ASSERT(this, get_input_element_type(0) == get_input_element_type(1))
<< "Data input and delta element types do not match (data input element type: "
<< get_input_element_type(0) << ", delta element type: " << get_input_element_type(1)
<< ").";
//
// TODO(amprocte): de-duplicate almost all the rest of this code from
// MaxPool::validate_and_infer_types().
//
auto& arg_shape = get_input_shape(0);
//
// Make sure batch size and channel count are not zero, and that we have at least one spatial
// dimension (in other words, that arg has shape NCDi for some Di of rank>0, N != 0, C != 0).
//
NODE_VALIDATION_ASSERT(this, arg_shape.size() >= 3)
<< "Data input shape does not have rank of at least 3 (data input shape: " << arg_shape
<< ").";
size_t batch_size = arg_shape[0];
NODE_VALIDATION_ASSERT(this, batch_size != 0)
<< "Data batch size is zero (data input shape: " << arg_shape << ").";
size_t channel_count = arg_shape[1];
NODE_VALIDATION_ASSERT(this, channel_count != 0)
<< "Channel count is zero (data input shape: " << arg_shape << ").";
size_t spatial_dimension_count = arg_shape.size() - 2;
//
// Make sure window shape, window movement strides, and padding have same rank as Di.
//
NODE_VALIDATION_ASSERT(this, m_window_shape.size() == spatial_dimension_count)
<< "Window shape rank does not match number of spatial dimensions (window shape: "
<< m_window_shape << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_window_movement_strides.size() == spatial_dimension_count)
<< "Window movement stride rank does not match number of spatial dimensions (window "
"movement strides: "
<< m_window_movement_strides << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_below.size() == spatial_dimension_count)
<< "Below-padding rank does not match number of spatial dimensions (padding below: "
<< m_padding_below << ", data input shape: " << arg_shape << ").";
NODE_VALIDATION_ASSERT(this, m_padding_above.size() == spatial_dimension_count)
<< "Above-padding rank does not match number of spatial dimensions (padding above: "
<< m_padding_above << ", data input shape: " << arg_shape << ").";
//
// Extract input item shape Di and make sure all dimensions are larger than 0.
//
Shape input_item_virtual_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
size_t dim_size = arg_shape[1 + 1 + i];
size_t virtual_dim_size = m_padding_below[i] + dim_size + m_padding_above[i];
input_item_virtual_shape.push_back(virtual_dim_size);
}
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, input_item_virtual_shape[i] != 0)
<< "Data input spatial dimension " << i
<< " has zero length even after padding (virtual shape of input item: "
<< input_item_virtual_shape << ").";
}
//
// Make sure window shape dimensions are all larger than 0.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] != 0)
<< "Window shape dimension " << i
<< " has zero length (window shape: " << m_window_shape << ").";
}
//
// Make sure the pooling window fits within the spatial dimensions.
//
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_shape[i] <= input_item_virtual_shape[i])
<< "Window shape after padding is larger than the spatial dimensions (window shape: "
<< m_window_shape << ", virtual shape of input item: " << input_item_virtual_shape
<< ").";
}
//
// Compute output item shape Do, checking at the same time that all window movement strides are larger than 0.
//
Shape output_item_shape;
for (size_t i = 0; i < spatial_dimension_count; i++)
{
NODE_VALIDATION_ASSERT(this, m_window_movement_strides[i] != 0)
<< "Window movement strides dimension " << i
<< " has zero length (window movement strides: " << m_window_movement_strides << ").";
output_item_shape.push_back(ceil_div(input_item_virtual_shape[i] - m_window_shape[i] + 1,
m_window_movement_strides[i]));
}
//
// Construct result shape: NCDo.
//
Shape result_shape(1 + 1 + spatial_dimension_count);
result_shape[0] = batch_size;
result_shape[1] = channel_count;
copy(output_item_shape.begin(), output_item_shape.end(), result_shape.begin() + 2);
NODE_VALIDATION_ASSERT(this, get_input_shape(1) == result_shape)
<< "Forward result shape and delta shape do not match (forward result shape: "
<< result_shape << ", delta shape: " << get_input_shape(1) << ").";
set_output_type(0, get_input_element_type(0), arg_shape);
auto forward_arg_et = get_input_element_type(0);
auto& forward_arg_shape = get_input_shape(0);
auto delta_et = get_input_element_type(1);
auto& delta_shape = get_input_shape(1);
NODE_VALIDATION_ASSERT(this, forward_arg_et == delta_et)
<< "Element types for forward argument (" << forward_arg_et << ") and delta (" << delta_et
<< ") do not match.";
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
Shape forward_result_shape = infer_batched_pooling_forward(this,
forward_arg_shape,
padding_below,
padding_above,
m_window_shape,
m_window_movement_strides,
true);
NODE_VALIDATION_ASSERT(this, forward_result_shape == delta_shape)
<< "Inferred forward output shape does not match delta shape (inferred forward output "
<< "shape: " << forward_result_shape << ", delta shape: " << delta_shape << ").";
set_output_type(0, get_input_element_type(0), forward_arg_shape);
}
shared_ptr<op::MaxPool> op::MaxPoolBackprop::get_forward_op() const
......
......@@ -54,7 +54,7 @@ public:
/// \brief Create a tensor specific to this backend
/// \param element_type The type of the tensor element
/// \param shape The shape of the tensor
/// \returns shared_ptr to a new backend specific tensor
/// \returns shared_ptr to a new backend-specific tensor
virtual std::shared_ptr<ngraph::runtime::Tensor>
create_tensor(const ngraph::element::Type& element_type, const Shape& shape) = 0;
......@@ -64,7 +64,7 @@ public:
/// \param memory_pointer A pointer to a buffer used for this tensor. The size of the buffer
/// must be sufficient to contain the tensor. The lifetime of the buffer is the
/// responsibility of the caller.
/// \returns shared_ptr to a new backend specific tensor
/// \returns shared_ptr to a new backend-specific tensor
virtual std::shared_ptr<ngraph::runtime::Tensor> create_tensor(
const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer) = 0;
......@@ -107,8 +107,8 @@ public:
/// \param func The function to execute
virtual void remove_compiled_function(std::shared_ptr<Function> func);
/// \brief Enable the collection of per op performance information on a specified Function.
/// Data is collection via the `get_performance_data` method.
/// \brief Enable the collection of per-op performance information on a specified Function.
/// Data collection is via the `get_performance_data` method.
/// \param func The function to collect perfomance data on.
/// \param enable Set to true to enable or false to disable data collection
virtual void enable_performance_data(std::shared_ptr<Function> func, bool enable) {}
......
......@@ -26,7 +26,7 @@ constexpr const uint32_t initial_buffer_size = 10 * 1024 * 1024;
runtime::gpu::GPUMemoryManager::GPUMemoryManager(GPUPrimitiveEmitter* emitter)
: m_buffer_offset(0)
, m_buffered_mem(initial_buffer_size)
, m_buffered_mem(initial_buffer_size, 0)
, m_workspace_manager(new pass::MemoryManager(runtime::gpu::GPUMemoryManager::alignment))
, m_argspace_mem(1, {nullptr, 0})
, m_workspace_mem(1, {nullptr, 0})
......@@ -80,6 +80,8 @@ void runtime::gpu::GPUMemoryManager::allocate()
m_argspace_mem.back().ptr, m_buffered_mem.data(), m_buffer_offset);
// add an empty node to the end of the list and zero offset
m_argspace_mem.push_back({nullptr, 0});
m_buffered_mem.clear();
m_buffered_mem.resize(initial_buffer_size, 0);
m_buffer_offset = 0;
}
......@@ -97,7 +99,9 @@ void runtime::gpu::GPUMemoryManager::allocate()
size_t runtime::gpu::GPUMemoryManager::queue_for_transfer(const void* data, size_t size)
{
// if the current allocation will overflow the host buffer
size_t new_size = m_buffer_offset + size;
size_t aligned_size =
ngraph::pass::MemoryManager::align(size, runtime::gpu::GPUMemoryManager::alignment);
size_t new_size = m_buffer_offset + aligned_size;
size_t buffer_size = m_buffered_mem.size();
bool need_resize = false;
while (buffer_size < new_size)
......@@ -109,12 +113,12 @@ size_t runtime::gpu::GPUMemoryManager::queue_for_transfer(const void* data, size
if (need_resize)
{
m_buffered_mem.resize(buffer_size);
m_buffered_mem.resize(buffer_size, 0);
}
size_t offset = m_buffer_offset;
std::memcpy(m_buffered_mem.data() + offset, data, size);
m_buffer_offset += size;
m_buffer_offset += aligned_size;
return offset;
}
......@@ -133,7 +137,6 @@ runtime::gpu::GPUAllocator::GPUAllocator(const GPUAllocator& g)
size_t runtime::gpu::GPUAllocator::reserve_argspace(const void* data, size_t size)
{
// add parameter data to host buffer that will be transfered to device
size = ngraph::pass::MemoryManager::align(size, runtime::gpu::GPUMemoryManager::alignment);
size_t offset = m_manager->queue_for_transfer(data, size);
auto local = std::prev(m_manager->m_argspace_mem.end());
// return a lambda that will yield the gpu memory address. this
......
......@@ -1565,14 +1565,13 @@ void runtime::intelgpu::do_arg_max_min_operation(cldnn::topology& topology,
{
gws = generate_loops(writer, output_shape, true);
writer << get_opencl_type_name(output_type) << " " << var_name << " = " << infinity
<< ";\n";
writer << "uint index = -1;\n";
writer << get_opencl_type_name(input_type) << " " << var_name << " = " << infinity << ";\n";
writer << get_opencl_type_name(output_type) << " index = 0;\n";
writer << "for (uint i = 0; i < " << input_shape.at(reduction_axis) << "; ++i)\n";
writer.block_begin();
{
writer << "if(i == 0 || input0" << dims_buffer << operation_sign << var_name << ")\n";
writer << "if (input0" << dims_buffer << operation_sign << var_name << ")\n";
writer.block_begin();
{
writer << var_name << " = input0" << dims_buffer << ";\n";
......
......@@ -7,6 +7,7 @@ backwards_dot_scalar_tensor
backwards_dot_tensor3_tensor3
backwards_dot_tensor_scalar
backwards_dot_tensor_vector
backwards_exp
backwards_maxpool_n2_c1_hw5_3x3_str2_max
backwards_maxpool_n4_c1_hw4_2x2_max
backwards_replace_slice
......@@ -20,6 +21,9 @@ dequantize
dequantize_axes
dequantize_int8
divide_by_zero_int32
dot_3d_multi_axis
dot_4d_5d_multi_axis
dot_4d_5d_multi_axis_more
function_call
max_pool_3d
numeric_double_inf
......
......@@ -78,12 +78,12 @@ namespace ngraph
/// \param layout Layout to set
void set_tensor_layout(const std::shared_ptr<descriptor::layout::TensorLayout>& layout);
/// \brief Get the stale value of the tensor. A tensor is stale if it's data is
/// \brief Get the stale value of the tensor. A tensor is stale if its data is
/// changed.
/// \return true if there is new data in this tensor
bool get_stale() const;
/// \brief Set the stale value of the tensor. A tensor is stale if it's data is
/// \brief Set the stale value of the tensor. A tensor is stale if its data is
/// changed.
void set_stale(bool val);
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/validation_util.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
//
// Infers the output shape of a windowed reduction operation, where the data may be dilated and/or
// padded, and the reduction window may be strided and/or dilated.
//
Shape ngraph::infer_windowed_reduction_output_shape(const Node* node,
const Shape& data_shape,
const Strides& data_dilation,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& window_shape,
const Strides& window_strides,
const Strides& window_dilation,
bool is_window_all_in_padding_allowed)
{
NODE_VALIDATION_ASSERT(node, data_shape.size() == data_dilation.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the data dilation (" << data_dilation << ").";
NODE_VALIDATION_ASSERT(node, data_shape.size() == data_padding_below.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the data padding below (" << data_padding_below << ").";
NODE_VALIDATION_ASSERT(node, data_shape.size() == data_padding_above.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the data padding above (" << data_padding_above << ").";
NODE_VALIDATION_ASSERT(node, data_shape.size() == window_shape.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the window shape (" << window_shape << ").";
NODE_VALIDATION_ASSERT(node, data_shape.size() == window_strides.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the window strides (" << window_strides << ").";
NODE_VALIDATION_ASSERT(node, data_shape.size() == window_dilation.size())
<< "Data shape (" << data_shape << ") does not have same rank as "
<< "the window dilation (" << window_dilation << ").";
Shape output_shape(data_shape.size());
for (size_t i = 0; i < data_shape.size(); i++)
{
NODE_VALIDATION_ASSERT(node, data_dilation[i] > 0)
<< "Data dilation (" << data_dilation << ") has zero dimension at axis " << i << ".";
NODE_VALIDATION_ASSERT(node, window_strides[i] > 0)
<< "Window strides (" << window_strides << ") has zero dimension at axis " << i << ".";
NODE_VALIDATION_ASSERT(node, window_dilation[i] > 0)
<< "Window dilation (" << window_dilation << ") has zero dimension at axis " << i
<< ".";
ptrdiff_t data_padded_dilated_dim =
(ptrdiff_t(data_dilation[i]) * (ptrdiff_t(data_shape[i]) - 1)) + 1 +
data_padding_below[i] + data_padding_above[i];
ptrdiff_t window_dilated_dim =
ptrdiff_t(window_dilation[i]) * (ptrdiff_t(window_shape[i]) - 1) + 1;
NODE_VALIDATION_ASSERT(node, data_padded_dilated_dim > 0)
<< "Data shape after padding and dilation has dimension less than 1 (dim: "
<< data_padded_dilated_dim << ") at axis " << i << ".";
NODE_VALIDATION_ASSERT(node, window_dilated_dim > 0)
<< "Window after dilation has dimension less than 1 (dim: " << window_dilated_dim
<< ") at axis " << i << ".";
NODE_VALIDATION_ASSERT(node, window_dilated_dim <= data_padded_dilated_dim)
<< "Window after dilation has dimension (dim: " << window_dilated_dim
<< ") larger than the data shape after padding (dim: " << data_padded_dilated_dim
<< ") at axis " << i << ".";
NODE_VALIDATION_ASSERT(node,
is_window_all_in_padding_allowed ||
(window_dilated_dim >= data_padding_below[i] &&
window_dilated_dim >= data_padding_above[i]))
<< "Window after dilation is sometimes entirely in the padding area for axis " << i
<< "(dilated window dimension: " << window_dilated_dim
<< ", padding below dimension: " << data_padding_below[i]
<< ", padding above dimension: " << data_padding_above[i] << ") and this is not "
<< "allowed.";
size_t output_dim = ceil_div(
size_t(data_padded_dilated_dim) - size_t(window_dilated_dim) + 1, window_strides[i]);
output_shape[i] = output_dim;
}
return output_shape;
}
//
// Infers the output batch shape and element type for convolution fprop.
//
std::tuple<element::Type, Shape>
ngraph::infer_convolution_forward(const Node* node,
element::Type et_batch,
element::Type et_filters,
const Shape& data_batch_shape,
const Strides& data_dilation,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& filters_shape,
const Strides& filter_strides,
const Strides& filter_dilation)
{
NODE_VALIDATION_ASSERT(node, et_batch == et_filters)
<< "Element types for data batch and filters do not match (data batch element type: "
<< et_batch << ", filters element type: " << et_filters << ").";
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
NODE_VALIDATION_ASSERT(node, filters_shape.size() >= 3)
<< "Filters must have rank of at least 3 (one output-channel axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(filters shape: " << filters_shape << ").";
size_t batch_size = data_batch_shape[0];
size_t data_channel_count = data_batch_shape[1];
Shape data_spatial_shape(data_batch_shape.begin() + 2, data_batch_shape.end());
size_t filter_output_channel_count = filters_shape[0];
size_t filter_input_channel_count = filters_shape[1];
Shape filter_spatial_shape(filters_shape.begin() + 2, filters_shape.end());
NODE_VALIDATION_ASSERT(node, batch_size > 0) << "Batch size is zero.";
NODE_VALIDATION_ASSERT(node, data_channel_count > 0) << "Data batch channel count is zero.";
NODE_VALIDATION_ASSERT(node, data_channel_count == filter_input_channel_count)
<< "Data batch channel count (" << data_channel_count << ") does not match filter input "
<< "channel count (" << filter_input_channel_count << ").";
NODE_VALIDATION_ASSERT(node, filter_output_channel_count > 0)
<< "Filter output channel count is zero.";
Shape data_output_shape = infer_windowed_reduction_output_shape(node,
data_spatial_shape,
data_dilation,
data_padding_below,
data_padding_above,
filter_spatial_shape,
filter_strides,
filter_dilation,
true);
Shape batch_output_shape(data_batch_shape.size());
batch_output_shape[0] = batch_size;
batch_output_shape[1] = filter_output_channel_count;
std::copy(data_output_shape.begin(), data_output_shape.end(), batch_output_shape.begin() + 2);
return std::make_tuple(et_batch, batch_output_shape);
}
//
// Infers the output batch shape and element type for batched pooling fprop.
//
Shape ngraph::infer_batched_pooling_forward(const Node* node,
const Shape& data_batch_shape,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& window_shape,
const Strides& window_strides,
bool is_window_all_in_padding_allowed)
{
NODE_VALIDATION_ASSERT(node, data_batch_shape.size() >= 3)
<< "Data batch must have rank of at least 3 (one batch axis, "
<< "one input-channel axis, and at least one spatial dimension) "
<< "(data batch shape: " << data_batch_shape << ").";
size_t spatial_dimension_count = data_batch_shape.size() - 2;
NODE_VALIDATION_ASSERT(node, data_padding_below.size() == spatial_dimension_count)
<< "Data padding below (" << data_padding_below << ") does not have required rank ("
<< spatial_dimension_count << ").";
NODE_VALIDATION_ASSERT(node, data_padding_above.size() == spatial_dimension_count)
<< "Data padding above (" << data_padding_above << ") does not have required rank ("
<< spatial_dimension_count << ").";
NODE_VALIDATION_ASSERT(node, window_shape.size() == spatial_dimension_count)
<< "Window shape (" << window_shape << ") does not have required rank ("
<< spatial_dimension_count << ").";
NODE_VALIDATION_ASSERT(node, window_strides.size() == spatial_dimension_count)
<< "Window shape (" << window_strides << ") does not have required rank ("
<< spatial_dimension_count << ").";
size_t batch_size = data_batch_shape[0];
size_t channel_count = data_batch_shape[1];
Shape data_spatial_shape(data_batch_shape.begin() + 2, data_batch_shape.end());
NODE_VALIDATION_ASSERT(node, batch_size > 0) << "Batch size is zero.";
NODE_VALIDATION_ASSERT(node, channel_count > 0) << "Channel count is zero.";
// For pooling ops we don't need dilation, so we fill in the identity value (all 1).
Strides data_dilation(spatial_dimension_count, 1);
Strides window_dilation(spatial_dimension_count, 1);
Shape data_output_shape =
infer_windowed_reduction_output_shape(node,
data_spatial_shape,
data_dilation,
data_padding_below,
data_padding_above,
window_shape,
window_strides,
window_dilation,
is_window_all_in_padding_allowed);
Shape batch_output_shape(data_batch_shape.size());
batch_output_shape[0] = batch_size;
batch_output_shape[1] = channel_count;
std::copy(data_output_shape.begin(), data_output_shape.end(), batch_output_shape.begin() + 2);
return batch_output_shape;
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <tuple>
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/op.hpp"
namespace ngraph
{
Shape infer_windowed_reduction_output_shape(const Node* node,
const Shape& data_shape,
const Strides& data_dilation,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& window_shape,
const Strides& window_strides,
const Strides& window_dilation,
bool is_window_all_in_padding_allowed);
std::tuple<element::Type, Shape>
infer_convolution_forward(const Node* node,
element::Type et_batch,
element::Type et_filters,
const Shape& data_batch_shape,
const Strides& data_dilation,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& filters_shape,
const Strides& filter_strides,
const Strides& filter_dilation);
Shape infer_batched_pooling_forward(const Node* node,
const Shape& data_batch_shape,
const CoordinateDiff& data_padding_below,
const CoordinateDiff& data_padding_above,
const Shape& window_shape,
const Strides& window_strides,
bool is_window_all_in_padding_allowed);
}
......@@ -83,6 +83,30 @@ TEST(gpu_test, memory_manager_extract_arguments)
EXPECT_EQ(host, fp32_args);
}
// This test is add to catch a potential bug in allocator
// previously allocator will copy extra data
// for exampele: alignment = 8 bytes, you reserve 4 bytes space
// previously allocator will copy 8 bytes data from input_args, this will lead to two potential bug:
// 1. copy extrea data intead of initial alignment data to 0.
// 2. out of boundary access for input_args which lead to undefined behavior
TEST(gpu_test, memory_manager_argspace_alignment)
{
size_t alignment = 8;
std::vector<char> input_args = {0, 1, 2, 3, 4, 5, 6, 7};
std::vector<char> ref_args = {0, 1, 2, 3, 0, 0, 0, 0};
std::vector<char> result_args(alignment, 0);
size_t idx;
runtime::gpu::GPUPrimitiveEmitter emitter;
{
auto allocator = emitter.get_memory_allocator();
idx = allocator.reserve_argspace(input_args.data(), 4 * sizeof(char));
}
emitter.allocate_primitive_memory();
runtime::gpu::memory_primitive& mem_primitive = emitter.get_memory_primitives()[idx];
runtime::gpu::cuda_memcpyDtH(result_args.data(), mem_primitive(), alignment * sizeof(char));
EXPECT_EQ(result_args, ref_args);
}
TEST(gpu_test, memory_manager_argspace_size)
{
std::vector<float> fp32_args = {2112.0f, 2112.0f};
......
......@@ -2915,7 +2915,9 @@ TEST(type_prop, conv_invalid_0d_input)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data batch input must have rank of at least 3"));
std::string("Data batch must have rank of at least 3 "
"(one batch axis, one input-channel axis, "
"and at least one spatial dimension)"));
}
catch (...)
{
......@@ -2938,7 +2940,9 @@ TEST(type_prop, conv_invalid_1d_input)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data batch input must have rank of at least 3"));
std::string("Data batch must have rank of at least 3 "
"(one batch axis, one input-channel axis, "
"and at least one spatial dimension)"));
}
catch (...)
{
......@@ -2961,7 +2965,9 @@ TEST(type_prop, conv_invalid_2d_input)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data batch input must have rank of at least 3"));
std::string("Data batch must have rank of at least 3 "
"(one batch axis, one input-channel axis, "
"and at least one spatial dimension)"));
}
catch (...)
{
......@@ -2983,7 +2989,7 @@ TEST(type_prop, conv_invalid_0_batch_size)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch size is zero"));
EXPECT_HAS_SUBSTRING(error.what(), std::string("Batch size is zero"));
}
catch (...)
{
......@@ -3005,7 +3011,7 @@ TEST(type_prop, conv_invalid_0_input_channels)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Input channel count is zero"));
EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch channel count is zero"));
}
catch (...)
{
......@@ -3028,7 +3034,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Filter input must have rank equal to the data batch"));
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the window shape (Shape{3, 3, 3})"));
}
catch (...)
{
......@@ -3051,7 +3058,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Filter input must have rank equal to the data batch"));
std::string("Data shape (Shape{10, 10}) does not have "
"same rank as the window shape (Shape{3})"));
}
catch (...)
{
......@@ -3073,7 +3081,7 @@ TEST(type_prop, conv_invalid_0_output_channels)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Output channel count for filters is zero"));
EXPECT_HAS_SUBSTRING(error.what(), std::string("Filter output channel count is zero"));
}
catch (...)
{
......@@ -3095,9 +3103,10 @@ TEST(type_prop, conv_invalid_input_channel_mismatch)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Input channel count for filters (3) does not match the "
"number of channels in the data batch (2)"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Data batch channel count (2) does not match filter input channel count (3)"));
}
catch (...)
{
......@@ -3119,10 +3128,9 @@ TEST(type_prop, conv_invalid_movement_stride_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Rank of window movement strides does not match the number of spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the window strides (Strides{2, 3, 8})"));
}
catch (...)
{
......@@ -3144,10 +3152,9 @@ TEST(type_prop, conv_invalid_window_dilation_stride_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Rank of window dilation strides does not match the number of spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the window dilation (Strides{2, 3, 8})"));
}
catch (...)
{
......@@ -3175,10 +3182,9 @@ TEST(type_prop, conv_invalid_data_dilation_stride_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Rank of data dilation strides does not match the number of spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the data dilation (Strides{2, 3, 8})"));
}
catch (...)
{
......@@ -3205,10 +3211,9 @@ TEST(type_prop, conv_invalid_padding_below_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Rank of the padding below does not match the number of spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the data padding below (CoordinateDiff{0, 0, 0})"));
}
catch (...)
{
......@@ -3235,10 +3240,9 @@ TEST(type_prop, conv_invalid_padding_above_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Rank of the padding above does not match the number of spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape (Shape{10, 10}) does not have same rank as "
"the data padding above (CoordinateDiff{0, 0, 0})"));
}
catch (...)
{
......@@ -3255,8 +3259,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding)
{
auto conv = make_shared<op::Convolution>(param0,
param1,
Strides{0, 0},
Strides{0, 0},
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{-4, 0},
CoordinateDiff{-7, 0});
......@@ -3265,9 +3269,9 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input dimension after padding and dilation is non-positive"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape after padding and dilation has dimension less "
"than 1 (dim: -1) at axis 0"));
}
catch (...)
{
......@@ -3284,8 +3288,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding)
{
auto conv = make_shared<op::Convolution>(param0,
param1,
Strides{0, 0},
Strides{0, 0},
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{-4, 0},
CoordinateDiff{-6, 0});
......@@ -3294,9 +3298,9 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input dimension after padding and dilation is non-positive"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape after padding and dilation has dimension less "
"than 1 (dim: 0) at axis 0"));
}
catch (...)
{
......@@ -3318,9 +3322,9 @@ TEST(type_prop, conv_invalid_input_spatial_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Input dimension after padding and dilation is non-positive"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape after padding and dilation has "
"dimension less than 1 (dim: 0) at axis 0"));
}
catch (...)
{
......@@ -3342,8 +3346,9 @@ TEST(type_prop, conv_invalid_window_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Filters shape at spatial dimension 1 is zero"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window after dilation has dimension less than 1 (dim: 0) at axis 1"));
}
catch (...)
{
......@@ -3365,8 +3370,9 @@ TEST(type_prop, conv_invalid_window_dilation_stride_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Window dilation stride at spatial dimension 1 is zero"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window dilation (Strides{2, 0}) has zero dimension at axis 1"));
}
catch (...)
{
......@@ -3394,8 +3400,9 @@ TEST(type_prop, conv_invalid_data_dilation_stride_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data dilation stride at spatial dimension 1 is zero"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Data dilation (Strides{2, 0}) has zero dimension at axis 1"));
}
catch (...)
{
......@@ -3418,8 +3425,8 @@ TEST(type_prop, conv_invalid_dilated_window_too_large)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Post-dilation window shape is smaller than the "
"post-padding/dilation input item shape"));
std::string("Window after dilation has dimension (dim: 9) larger than "
"the data shape after padding (dim: 8) at axis 0"));
}
catch (...)
{
......@@ -3441,8 +3448,9 @@ TEST(type_prop, conv_invalid_movement_stride_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Window movement stride at spatial dimension 0 is zero"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window strides (Strides{0, 1}) has zero dimension at axis 0"));
}
catch (...)
{
......@@ -3636,7 +3644,7 @@ TEST(type_prop, max_pool_invalid_0_batch_size)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch size is zero"));
EXPECT_HAS_SUBSTRING(error.what(), std::string("Batch size is zero"));
}
catch (...)
{
......@@ -3682,7 +3690,7 @@ TEST(type_prop, max_pool_invalid_wrong_number_of_window_dimensions_too_many)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window shape rank does not match number of spatial dimensions"));
std::string("Window shape (Shape{3, 3, 3}) does not have required rank (2)"));
}
catch (...)
{
......@@ -3705,8 +3713,7 @@ TEST(type_prop, max_pool_invalid_wrong_number_of_window_dimensions_too_few)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window shape rank does not match number of spatial dimensions"));
error.what(), std::string("Window shape (Shape{3}) does not have required rank (2)"));
}
catch (...)
{
......@@ -3731,7 +3738,7 @@ TEST(type_prop, max_pool_invalid_movement_stride_rank)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window movement stride rank does not match number of spatial dimensions"));
std::string("Window shape (Strides{2, 3, 8}) does not have required rank (2)"));
}
catch (...)
{
......@@ -3753,9 +3760,9 @@ TEST(type_prop, max_pool_invalid_input_data_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Data input spatial dimension 0 has zero length even after padding"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Data shape after padding and dilation has "
"dimension less than 1 (dim: 0) at axis 0"));
}
catch (...)
{
......@@ -3777,7 +3784,9 @@ TEST(type_prop, max_pool_invalid_window_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Window shape dimension 1 has zero length"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window after dilation has dimension less than 1 (dim: 0) at axis 1"));
}
catch (...)
{
......@@ -3799,9 +3808,9 @@ TEST(type_prop, max_pool_invalid_dilated_too_large)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window shape after padding is larger than the spatial dimensions"));
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Window after dilation has dimension (dim: 9) larger than "
"the data shape after padding (dim: 8) at axis 0"));
}
catch (...)
{
......@@ -3824,8 +3833,9 @@ TEST(type_prop, max_pool_invalid_movement_stride_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Window movement strides dimension 0 has zero length"));
EXPECT_HAS_SUBSTRING(
error.what(),
std::string("Window strides (Strides{0, 1}) has zero dimension at axis 0"));
}
catch (...)
{
......@@ -5927,7 +5937,7 @@ TEST(type_prop, avg_pool_invalid_0_batch_size)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Data batch size is zero");
EXPECT_HAS_SUBSTRING(error.what(), "Batch size is zero");
}
catch (...)
{
......@@ -5972,7 +5982,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Window shape rank does not match number of spatial dimensions");
"Window shape (Shape{3, 3, 3}) does not have required rank (2)");
}
catch (...)
{
......@@ -5995,7 +6005,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Window shape rank does not match number of spatial dimensions");
"Window shape (Shape{3}) does not have required rank (2)");
}
catch (...)
{
......@@ -6018,9 +6028,8 @@ TEST(type_prop, avg_pool_invalid_movement_stride_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
"Window movement stride rank does not match number of spatial dimensions");
EXPECT_HAS_SUBSTRING(error.what(),
"Window shape (Strides{2, 3, 8}) does not have required rank (2)");
}
catch (...)
{
......@@ -6046,8 +6055,9 @@ TEST(type_prop, avg_pool_invalid_padding_below_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Below-padding rank does not match number of spatial dimensions");
EXPECT_HAS_SUBSTRING(
error.what(),
"Data padding below (CoordinateDiff{1, 2, 3}) does not have required rank (2)");
}
catch (...)
{
......@@ -6073,8 +6083,9 @@ TEST(type_prop, avg_pool_invalid_padding_above_rank)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Above-padding rank does not match number of spatial dimensions");
EXPECT_HAS_SUBSTRING(
error.what(),
"Data padding above (CoordinateDiff{1, 2, 3}) does not have required rank (2");
}
catch (...)
{
......@@ -6096,8 +6107,9 @@ TEST(type_prop, avg_pool_invalid_input_item_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Data input spatial dimension 0 has zero length even after padding");
EXPECT_HAS_SUBSTRING(
error.what(),
"Data shape after padding and dilation has dimension less than 1 (dim: 0) at axis 0");
}
catch (...)
{
......@@ -6119,7 +6131,8 @@ TEST(type_prop, avg_pool_invalid_window_size_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Window shape dimension 1 has zero length");
EXPECT_HAS_SUBSTRING(error.what(),
"Window after dilation has dimension less than 1 (dim: 0) at axis 1");
}
catch (...)
{
......@@ -6142,7 +6155,8 @@ TEST(type_prop, avg_pool_invalid_dilated_too_large)
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
"Window shape after padding is larger than the spatial dimensions");
"Window after dilation has dimension (dim: 9) larger than the data "
"shape after padding (dim: 8) at axis 0");
}
catch (...)
{
......@@ -6150,6 +6164,20 @@ TEST(type_prop, avg_pool_invalid_dilated_too_large)
}
}
TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding)
{
auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 8, 8});
Shape window_shape{9, 9};
Strides window_strides{1, 1};
Shape padding_below{0, 0};
Shape padding_above{1, 1};
auto avg_pool =
make_shared<op::AvgPool>(param, window_shape, window_strides, padding_below, padding_above);
ASSERT_EQ(avg_pool->get_output_element_type(0), element::f32);
ASSERT_EQ(avg_pool->get_output_shape(0), (Shape{6, 2, 1, 1}));
}
TEST(type_prop, avg_pool_invalid_movement_stride_0)
{
// Deduce type
......@@ -6165,7 +6193,8 @@ TEST(type_prop, avg_pool_invalid_movement_stride_0)
}
catch (const NodeValidationError& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "Window movement strides dimension 0 has zero length");
EXPECT_HAS_SUBSTRING(error.what(),
"Window strides (Strides{0, 1}) has zero dimension at axis 0");
}
catch (...)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment