Commit 79f27b2e authored by Jayaram Bobba's avatar Jayaram Bobba Committed by Scott Cyphers

Support for complex ops used in object detection models (#2841)

* - Added support for complex ops used in object detection models
  - PriorBox, PriorBoxClustered, Proposal, RegionYolo, ReorgYolo

* Added  unit test file

* Fix documentation errors
parent cd76c79f
...@@ -163,6 +163,16 @@ set (SRC ...@@ -163,6 +163,16 @@ set (SRC
op/experimental/quantized_dot_bias.hpp op/experimental/quantized_dot_bias.hpp
op/experimental/transpose.cpp op/experimental/transpose.cpp
op/experimental/transpose.hpp op/experimental/transpose.hpp
op/experimental/layers/prior_box.cpp
op/experimental/layers/prior_box.hpp
op/experimental/layers/prior_box_clustered.cpp
op/experimental/layers/prior_box_clustered.hpp
op/experimental/layers/proposal.hpp
op/experimental/layers/proposal.cpp
op/experimental/layers/region_yolo.hpp
op/experimental/layers/region_yolo.cpp
op/experimental/layers/reorg_yolo.hpp
op/experimental/layers/reorg_yolo.cpp
op/floor.cpp op/floor.cpp
op/floor.hpp op/floor.hpp
op/gather.cpp op/gather.cpp
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "prior_box.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
op::PriorBox::PriorBox(const shared_ptr<Node>& layer_shape,
const shared_ptr<Node>& image_shape,
const std::vector<float>& min_sizes,
const std::vector<float>& max_sizes,
const std::vector<float>& aspect_ratios,
const bool clip,
const bool flip,
const float step,
const float offset,
const std::vector<float>& variances,
const bool scale_all)
: Op("PriorBox", check_single_output_args({layer_shape, image_shape}))
, m_min_sizes(min_sizes)
, m_max_sizes(max_sizes)
, m_aspect_ratios(aspect_ratios)
, m_clip(clip)
, m_flip(flip)
, m_step(step)
, m_offset(offset)
, m_variances(variances)
, m_scale_all(scale_all)
{
constructor_validate_and_infer_types();
}
void op::PriorBox::validate_and_infer_types()
{
// shape node should have integer data type. For now we only allow i64
auto layer_shape_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this,
layer_shape_et.compatible(element::Type_t::i64),
"layer shape input must have element type i64, but has ",
layer_shape_et);
auto image_shape_et = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
image_shape_et.compatible(element::Type_t::i64),
"image shape input must have element type i64, but has ",
image_shape_et);
auto layer_shape_rank = get_input_partial_shape(0).rank();
auto image_shape_rank = get_input_partial_shape(1).rank();
NODE_VALIDATION_CHECK(this,
layer_shape_rank.compatible(image_shape_rank),
"layer shape input rank ",
layer_shape_rank,
" must match image shape input rank ",
image_shape_rank);
set_input_is_relevant_to_shape(0);
if (auto const_shape = dynamic_pointer_cast<op::Constant>(get_argument(0)))
{
NODE_VALIDATION_CHECK(this,
shape_size(const_shape->get_shape()) == 2,
"Layer shape must have rank 2",
const_shape->get_shape());
auto layer_shape = static_cast<const int64_t*>(const_shape->get_data_ptr());
size_t num_priors = 0;
// {Prior boxes, Variance-adjusted prior boxes}
if (m_scale_all)
{
num_priors = ((m_flip ? 2 : 1) * m_aspect_ratios.size() + 1) * m_min_sizes.size() +
m_max_sizes.size();
}
else
{
num_priors = (m_flip ? 2 : 1) * m_aspect_ratios.size() + m_min_sizes.size() - 1;
}
set_output_type(
0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors});
}
else
{
set_output_type(0, element::f32, PartialShape::dynamic());
}
}
shared_ptr<Node> op::PriorBox::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<PriorBox>(new_args.at(0),
new_args.at(1),
m_min_sizes,
m_max_sizes,
m_aspect_ratios,
m_clip,
m_flip,
m_step,
m_offset,
m_variances,
m_scale_all);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class PriorBox : public Op
{
public:
/// \brief Constructs a PriorBox operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param min_sizes Desired min_sizess of prior boxes
/// \param max_sizes Desired max_sizess of prior boxes
/// \param aspect_ratios Aspect ratios of prior boxes
/// \param clip Clip output to [0,1]
/// \param flip Flip aspect ratios
/// \param step Distance between prior box centers
/// \param offset Box offset relative to top center of image
/// \param variances Values to adjust prior boxes with
/// \param scale_all Scale all sizes
PriorBox(const std::shared_ptr<Node>& layer_shape,
const std::shared_ptr<Node>& image_shape,
const std::vector<float>& min_sizes,
const std::vector<float>& max_sizes,
const std::vector<float>& aspect_ratios,
const bool clip,
const bool flip,
const float step,
const float offset,
const std::vector<float>& variances,
const bool scale_all);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
std::vector<float> m_min_sizes;
std::vector<float> m_max_sizes;
std::vector<float> m_aspect_ratios;
bool m_clip;
bool m_flip;
float m_step;
float m_offset;
std::vector<float> m_variances;
bool m_scale_all;
};
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "prior_box_clustered.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
op::PriorBoxClustered::PriorBoxClustered(const shared_ptr<Node>& layer_shape,
const shared_ptr<Node>& image_shape,
const size_t num_priors,
const std::vector<float>& widths,
const std::vector<float>& heights,
const bool clip,
const float step_widths,
const float step_heights,
const float offset,
const std::vector<float>& variances)
: Op("PriorBoxClustered", check_single_output_args({layer_shape, image_shape}))
, m_num_priors(num_priors)
, m_widths(widths)
, m_heights(heights)
, m_clip(clip)
, m_step_widths(step_widths)
, m_step_heights(step_heights)
, m_offset(offset)
, m_variances(variances)
{
constructor_validate_and_infer_types();
}
void op::PriorBoxClustered::validate_and_infer_types()
{
// shape node should have integer data type. For now we only allow i64
auto layer_shape_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this,
layer_shape_et.compatible(element::Type_t::i64),
"layer shape input must have element type i64, but has ",
layer_shape_et);
auto image_shape_et = get_input_element_type(1);
NODE_VALIDATION_CHECK(this,
image_shape_et.compatible(element::Type_t::i64),
"image shape input must have element type i64, but has ",
image_shape_et);
auto layer_shape_rank = get_input_partial_shape(0).rank();
auto image_shape_rank = get_input_partial_shape(1).rank();
NODE_VALIDATION_CHECK(this,
layer_shape_rank.compatible(image_shape_rank),
"layer shape input rank ",
layer_shape_rank,
" must match image shape input rank ",
image_shape_rank);
NODE_VALIDATION_CHECK(this,
m_widths.size() == m_num_priors,
"Num_priors ",
m_num_priors,
" doesn't match size of widths vector ",
m_widths.size());
NODE_VALIDATION_CHECK(this,
m_heights.size() == m_num_priors,
"Num_priors ",
m_num_priors,
" doesn't match size of heights vector ",
m_heights.size());
set_input_is_relevant_to_shape(0);
if (auto const_shape = dynamic_pointer_cast<op::Constant>(get_argument(0)))
{
NODE_VALIDATION_CHECK(this,
shape_size(const_shape->get_shape()) == 2,
"Layer shape must have rank 2",
const_shape->get_shape());
auto layer_shape = static_cast<const int64_t*>(const_shape->get_data_ptr());
// {Prior boxes, variances-adjusted prior boxes}
set_output_type(
0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * m_num_priors});
}
else
{
set_output_type(0, element::f32, PartialShape::dynamic());
}
}
shared_ptr<Node> op::PriorBoxClustered::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<PriorBoxClustered>(new_args.at(0),
new_args.at(1),
m_num_priors,
m_widths,
m_heights,
m_clip,
m_step_widths,
m_step_heights,
m_offset,
m_variances);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Layer which generates prior boxes of specified sizes
/// normalized to input image size
class PriorBoxClustered : public Op
{
public:
/// \brief Constructs a PriorBoxClustered operation
///
/// \param layer_shape Shape of layer for which prior boxes are computed
/// \param image_shape Shape of image to which prior boxes are scaled
/// \param num_priors Number of prior boxes
/// \param widths Desired widths of prior boxes
/// \param heights Desired heights of prior boxes
/// \param clip Clip output to [0,1]
/// \param step_widths Distance between prior box centers
/// \param step_heights Distance between prior box centers
/// \param offset Box offset relative to top center of image
/// \param variances Values to adjust prior boxes with
PriorBoxClustered(const std::shared_ptr<Node>& layer_shape,
const std::shared_ptr<Node>& image_shape,
const size_t num_priors,
const std::vector<float>& widths,
const std::vector<float>& heights,
const bool clip,
const float step_widths,
const float step_heights,
const float offset,
const std::vector<float>& variances);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
size_t m_num_priors;
std::vector<float> m_widths;
std::vector<float> m_heights;
bool m_clip;
float m_step_widths;
float m_step_heights;
float m_offset;
std::vector<float> m_variances;
};
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "proposal.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
op::Proposal::Proposal(const std::shared_ptr<Node>& class_probs,
const std::shared_ptr<Node>& class_logits,
const std::shared_ptr<Node>& image_shape,
const size_t base_size,
const size_t pre_nms_topn,
const size_t post_nms_topn,
const float nms_threshold,
const size_t feature_stride,
const size_t min_size,
const std::vector<float>& anchor_ratios,
const std::vector<float>& anchor_scales,
const bool clip_before_nms,
const bool clip_after_nms,
const bool normalize,
const float box_size_scale,
const float box_coord_scale,
const std::string& algo)
: Op("Proposal", check_single_output_args({class_probs, class_logits, image_shape}))
, m_base_size(base_size)
, m_pre_nms_topn(pre_nms_topn)
, m_post_nms_topn(post_nms_topn)
, m_nms_threshold(nms_threshold)
, m_feature_stride(feature_stride)
, m_min_size(min_size)
, m_anchor_ratios(anchor_ratios)
, m_anchor_scales(anchor_scales)
, m_clip_before_nms(clip_before_nms)
, m_clip_after_nms(clip_after_nms)
, m_normalize(normalize)
, m_box_size_scale(box_size_scale)
, m_box_coord_scale(box_coord_scale)
, m_algo(algo)
{
constructor_validate_and_infer_types();
}
void op::Proposal::validate_and_infer_types()
{
// shape node should have integer data type. For now we only allow i64
auto image_shape_et = get_input_element_type(2);
NODE_VALIDATION_CHECK(this,
image_shape_et.compatible(element::Type_t::i64),
"image shape input must have element type i64, but has ",
image_shape_et);
set_input_is_relevant_to_shape(2);
if (auto const_shape = dynamic_pointer_cast<op::Constant>(get_argument(2)))
{
NODE_VALIDATION_CHECK(this,
shape_size(const_shape->get_shape()) == 2,
"Layer shape must have rank 2",
const_shape->get_shape());
auto image_shape = static_cast<const int64_t*>(const_shape->get_data_ptr());
set_output_type(0, element::f32, Shape{image_shape[0] * m_post_nms_topn, 5});
}
else
{
set_output_type(0, element::f32, PartialShape::dynamic());
}
}
shared_ptr<Node> op::Proposal::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Proposal>(new_args.at(0),
new_args.at(1),
new_args.at(2),
m_base_size,
m_pre_nms_topn,
m_post_nms_topn,
m_nms_threshold,
m_feature_stride,
m_min_size,
m_anchor_ratios,
m_anchor_scales,
m_clip_before_nms,
m_clip_after_nms,
m_normalize,
m_box_size_scale,
m_box_coord_scale,
m_algo);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
class Proposal : public Op
{
public:
/// \brief Constructs a Proposal operation
///
/// \param class_probs Class probability scores
/// \param class_logits Class prediction logits
/// \param image_shape Shape of image
/// \param base_size Anchor sizes
/// \param pre_nms_topn Number of boxes before nms
/// \param post_nms_topn Number of boxes after nms
/// \param nms_threshold Threshold for nms
/// \param feature_stride Feature stride
/// \param min_size Minimum box size
/// \param anchor_ratios Ratios for anchor generation
/// \param anchor_scales Scales for anchor generation
/// \param clip_before_nms Clip before NMs
/// \param clip_after_nms Clip after NMs
/// \param normalize Normalize boxes to [0,1]
/// \param box_size_scale Scale factor for scaling box size logits
/// \param box_coord_scale Scale factor for scaling box coordiate logits
/// \param algo Calculation algorithm to use
Proposal(const std::shared_ptr<Node>& class_probs,
const std::shared_ptr<Node>& class_logits,
const std::shared_ptr<Node>& image_shape,
const size_t base_size,
const size_t pre_nms_topn,
const size_t post_nms_topn,
const float nms_threshold,
const size_t feature_stride,
const size_t min_size,
const std::vector<float>& anchor_ratios,
const std::vector<float>& anchor_scales,
const bool clip_before_nms,
const bool clip_after_nms,
const bool normalize,
const float box_size_scale,
const float box_coord_scale,
const std::string& algo);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
size_t m_base_size;
size_t m_pre_nms_topn;
size_t m_post_nms_topn;
float m_nms_threshold;
size_t m_feature_stride;
size_t m_min_size;
std::vector<float> m_anchor_ratios;
std::vector<float> m_anchor_scales;
bool m_clip_before_nms;
bool m_clip_after_nms;
bool m_normalize;
float m_box_size_scale;
float m_box_coord_scale;
std::string m_algo;
};
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "region_yolo.hpp"
using namespace std;
using namespace ngraph;
op::RegionYolo::RegionYolo(const shared_ptr<Node>& input,
const size_t num_coords,
const size_t num_classes,
const size_t num_regions,
const bool do_softmax,
const vector<int64_t>& mask,
const int axis,
const int end_axis)
: Op("RegionYolo", check_single_output_args({input}))
, m_num_coords(num_coords)
, m_num_classes(num_classes)
, m_num_regions(num_regions)
, m_do_softmax(do_softmax)
, m_mask(mask)
, m_axis(axis)
, m_end_axis(end_axis)
{
constructor_validate_and_infer_types();
}
void op::RegionYolo::validate_and_infer_types()
{
auto input_et = get_input_element_type(0);
if (get_input_partial_shape(0).is_static())
{
Shape input_shape = get_input_partial_shape(0).to_shape();
Shape output_shape;
int end_axis = m_end_axis;
if (m_end_axis < 0)
{
m_end_axis += input_shape.size();
}
if (m_do_softmax)
{
size_t flat_dim = 1;
for (size_t i = 0; i < m_axis; i++)
{
output_shape.push_back(input_shape[i]);
}
for (size_t i = m_axis; i < end_axis + 1; i++)
{
flat_dim *= input_shape[i];
}
output_shape.push_back(flat_dim);
for (size_t i = end_axis + 1; i < input_shape.size(); i++)
{
output_shape.push_back(input_shape[i]);
}
}
else
{
output_shape = {input_shape[0],
(m_num_classes + m_num_coords + 1) * m_mask.size(),
input_shape[2],
input_shape[3]};
}
set_output_type(0, input_et, output_shape);
}
else
{
set_output_type(0, input_et, PartialShape::dynamic());
}
}
shared_ptr<Node> op::RegionYolo::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<RegionYolo>(new_args.at(0),
m_num_coords,
m_num_classes,
m_num_regions,
m_do_softmax,
m_mask,
m_axis,
m_end_axis);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
class RegionYolo : public Op
{
public:
/// \brief Constructs a RegionYolo operation
///
/// \param input Input
/// \param num_coords Number of coordinates for each region
/// \param num_classes Number of classes for each region
/// \param num_regions Number of regions
/// \param do_softmax Compute softmax
/// \param mask Mask
/// \param axis Axis to begin softmax on
/// \param end_axis Axis to end softmax on
RegionYolo(const std::shared_ptr<Node>& input,
const size_t num_coords,
const size_t num_classes,
const size_t num_regions,
const bool do_softmax,
const std::vector<int64_t>& mask,
const int axis,
const int end_axis);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
size_t m_num_coords;
size_t m_num_classes;
size_t m_num_regions;
bool m_do_softmax;
std::vector<int64_t> m_mask;
int m_axis;
int m_end_axis;
};
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "reorg_yolo.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
op::ReorgYolo::ReorgYolo(const shared_ptr<Node>& input, const Strides& stride)
: Op("ReorgYolo", check_single_output_args({input}))
, m_stride(stride)
{
constructor_validate_and_infer_types();
}
void op::ReorgYolo::validate_and_infer_types()
{
auto input_et = get_input_element_type(0);
if (get_input_partial_shape(0).is_static())
{
auto input_shape = get_input_partial_shape(0).to_shape();
Shape output_shape{input_shape[0], input_shape[1]};
for (size_t i = 2; i < input_shape.size(); i++)
{
output_shape.push_back(input_shape[i] / m_stride[0]);
output_shape[1] *= m_stride[0];
}
set_output_type(0, input_et, output_shape);
}
else
{
set_output_type(0, input_et, PartialShape::dynamic());
}
}
shared_ptr<Node> op::ReorgYolo::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<ReorgYolo>(new_args.at(0), m_stride);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
namespace ngraph
{
namespace op
{
class ReorgYolo : public Op
{
public:
/// \brief Constructs a ReorgYolo operation
///
/// \param input Input
/// \param stride Stride to reorganize input by
ReorgYolo(const std::shared_ptr<Node>& input, const Strides& stride);
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
private:
Strides m_stride;
};
}
}
...@@ -65,6 +65,7 @@ set(SRC ...@@ -65,6 +65,7 @@ set(SRC
specialize_shapes.cpp specialize_shapes.cpp
tensor.cpp tensor.cpp
type_prop.cpp type_prop.cpp
type_prop_layers.cpp
util.cpp util.cpp
zero_dim_tensor_elimination.cpp zero_dim_tensor_elimination.cpp
) )
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/experimental/layers/prior_box.hpp"
#include "ngraph/op/experimental/layers/prior_box_clustered.hpp"
#include "ngraph/op/experimental/layers/proposal.hpp"
#include "ngraph/op/experimental/layers/region_yolo.hpp"
#include "ngraph/op/experimental/layers/reorg_yolo.hpp"
#include <memory>
using namespace std;
using namespace ngraph;
TEST(type_prop_layers, prior_box1)
{
auto layer_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {32, 32});
auto image_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {300, 300});
auto pb = make_shared<op::PriorBox>(layer_shape,
image_shape,
std::vector<float>{2.0f, 3.0f},
std::vector<float>{},
std::vector<float>{1.0f, 2.0f, 0.5f},
false,
false,
1.0f,
0.5f,
std::vector<float>{1.0f, 0.0f, 0.0f, 2.0f},
false);
ASSERT_EQ(pb->get_shape(), (Shape{2, 16384}));
}
TEST(type_prop_layers, prior_box2)
{
auto layer_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {32, 32});
auto image_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {300, 300});
auto pb = make_shared<op::PriorBox>(layer_shape,
image_shape,
std::vector<float>{2.0f, 3.0f},
std::vector<float>{},
std::vector<float>{1.0f, 2.0f, 0.5f},
false,
true,
1.0f,
0.5f,
std::vector<float>{1.0f, 0.0f, 0.0f, 2.0f},
false);
ASSERT_EQ(pb->get_shape(), (Shape{2, 28672}));
}
TEST(type_prop_layers, prior_box3)
{
auto layer_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {1, 1});
auto image_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {300, 300});
auto pb = make_shared<op::PriorBox>(layer_shape,
image_shape,
std::vector<float>{256.0f},
std::vector<float>{315.0f},
std::vector<float>{2.0f},
false,
true,
1.0f,
0.5f,
std::vector<float>{1.0f, 0.0f, 0.0f, 2.0f},
true);
ASSERT_EQ(pb->get_shape(), (Shape{2, 16}));
}
TEST(type_prop_layers, prior_box_clustered)
{
auto layer_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {19, 19});
auto image_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {300, 300});
auto pbc = make_shared<op::PriorBoxClustered>(layer_shape,
image_shape,
3,
std::vector<float>{4.0f, 2.0f, 3.2f},
std::vector<float>{1.0f, 2.0f, 1.1f},
false,
1.0f,
2.0f,
0.0f,
std::vector<float>{1.0f, 0.0f, 0.0f, 2.0f});
// Output shape - 4 * 19 * 19 * 3 (num_priors)
ASSERT_EQ(pbc->get_shape(), (Shape{2, 4332}));
}
TEST(type_prop_layers, proposal)
{
auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 12, 34, 62});
auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 24, 34, 62});
auto image_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {1, 6});
auto op = make_shared<op::Proposal>(class_probs,
class_logits,
image_shape,
1,
20,
200,
0.0f,
1,
1,
std::vector<float>{},
std::vector<float>{},
false,
false,
false,
0.1f,
0.1f,
std::string{""});
ASSERT_EQ(op->get_shape(), (Shape{200, 5}));
}
TEST(type_prop_layers, region_yolo1)
{
auto inputs = make_shared<op::Parameter>(element::f32, Shape{1, 125, 13, 13});
auto op = make_shared<op::RegionYolo>(inputs, 0, 0, 0, true, std::vector<int64_t>{}, 0, 1);
ASSERT_EQ(op->get_shape(), (Shape{1 * 125, 13, 13}));
}
TEST(type_prop_layers, region_yolo2)
{
auto inputs = make_shared<op::Parameter>(element::f32, Shape{1, 125, 13, 13});
auto op = make_shared<op::RegionYolo>(inputs, 0, 0, 0, true, std::vector<int64_t>{}, 0, 2);
ASSERT_EQ(op->get_shape(), (Shape{1 * 125 * 13, 13}));
}
TEST(type_prop_layers, region_yolo3)
{
auto inputs = make_shared<op::Parameter>(element::f32, Shape{1, 125, 13, 13});
auto op =
make_shared<op::RegionYolo>(inputs, 4, 80, 1, false, std::vector<int64_t>{6, 7, 8}, 0, -1);
ASSERT_EQ(op->get_shape(), (Shape{1, (80 + 4 + 1) * 3, 13, 13}));
}
TEST(type_prop_layers, reorg_yolo)
{
auto inputs = make_shared<op::Parameter>(element::f32, Shape{2, 24, 34, 62});
auto op = make_shared<op::ReorgYolo>(inputs, Strides{2});
ASSERT_EQ(op->get_shape(), (Shape{2, 96, 17, 31}));
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment