Commit 3654fb10 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #11567 from alalek:code_quality

parents 5a706eee 471c1732
...@@ -142,7 +142,7 @@ public: ...@@ -142,7 +142,7 @@ public:
PyGILState_Release(gstate); PyGILState_Release(gstate);
if (!res) if (!res)
CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method"); CV_Error(Error::StsNotImplemented, "Failed to call \"getMemoryShapes\" method");
pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)); CV_Assert(pyopencv_to_generic_vec(res, outputs, ArgInfo("", 0)));
return false; return false;
} }
...@@ -163,7 +163,7 @@ public: ...@@ -163,7 +163,7 @@ public:
CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method"); CV_Error(Error::StsNotImplemented, "Failed to call \"forward\" method");
std::vector<Mat> pyOutputs; std::vector<Mat> pyOutputs;
pyopencv_to(res, pyOutputs, ArgInfo("", 0)); CV_Assert(pyopencv_to(res, pyOutputs, ArgInfo("", 0)));
CV_Assert(pyOutputs.size() == outputs.size()); CV_Assert(pyOutputs.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); ++i) for (size_t i = 0; i < outputs.size(); ++i)
......
...@@ -1530,10 +1530,12 @@ struct Net::Impl ...@@ -1530,10 +1530,12 @@ struct Net::Impl
LayerData *eltwiseData = nextData; LayerData *eltwiseData = nextData;
// go down from the second input and find the first non-skipped layer. // go down from the second input and find the first non-skipped layer.
LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid]; LayerData *downLayerData = &layers[eltwiseData->inputBlobsId[1].lid];
CV_Assert(downLayerData);
while (downLayerData->skip) while (downLayerData->skip)
{ {
downLayerData = &layers[downLayerData->inputBlobsId[0].lid]; downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
} }
CV_Assert(downLayerData);
// second input layer is current layer. // second input layer is current layer.
if ( ld.id == downLayerData->id ) if ( ld.id == downLayerData->id )
...@@ -1548,9 +1550,7 @@ struct Net::Impl ...@@ -1548,9 +1550,7 @@ struct Net::Impl
downLayerData = &layers[downLayerData->inputBlobsId[0].lid]; downLayerData = &layers[downLayerData->inputBlobsId[0].lid];
} }
Ptr<ConvolutionLayer> convLayer; Ptr<ConvolutionLayer> convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
if( downLayerData )
convLayer = downLayerData->layerInstance.dynamicCast<ConvolutionLayer>();
// first input layer is convolution layer // first input layer is convolution layer
if( !convLayer.empty() && eltwiseData->consumers.size() == 1 ) if( !convLayer.empty() && eltwiseData->consumers.size() == 1 )
......
...@@ -119,9 +119,10 @@ public: ...@@ -119,9 +119,10 @@ public:
if (blobs.size() > 3) if (blobs.size() > 3)
{ {
CV_Assert(blobs.size() == 6); CV_Assert(blobs.size() == 6);
const int N = Wh.cols;
for (int i = 3; i < 6; ++i) for (int i = 3; i < 6; ++i)
{ {
CV_Assert(blobs[i].rows == Wh.cols && blobs[i].cols == Wh.cols); CV_Assert(blobs[i].rows == N && blobs[i].cols == N);
CV_Assert(blobs[i].type() == bias.type()); CV_Assert(blobs[i].type() == bias.type());
} }
} }
......
...@@ -504,7 +504,7 @@ static bool ocl4dnnFastBufferGEMM(const CBLAS_TRANSPOSE TransA, ...@@ -504,7 +504,7 @@ static bool ocl4dnnFastBufferGEMM(const CBLAS_TRANSPOSE TransA,
oclk_gemm_float.set(arg_idx++, (float)alpha); oclk_gemm_float.set(arg_idx++, (float)alpha);
oclk_gemm_float.set(arg_idx++, (float)beta); oclk_gemm_float.set(arg_idx++, (float)beta);
bool ret; bool ret = true;
if (TransB == CblasNoTrans || TransA != CblasNoTrans) { if (TransB == CblasNoTrans || TransA != CblasNoTrans) {
int stride = 256; int stride = 256;
for (int start_index = 0; start_index < K; start_index += stride) { for (int start_index = 0; start_index < K; start_index += stride) {
......
...@@ -40,17 +40,18 @@ TEST(Padding_Halide, Accuracy) ...@@ -40,17 +40,18 @@ TEST(Padding_Halide, Accuracy)
{ {
static const int kNumRuns = 10; static const int kNumRuns = 10;
std::vector<int> paddings(8); std::vector<int> paddings(8);
cv::RNG& rng = cv::theRNG();
for (int t = 0; t < kNumRuns; ++t) for (int t = 0; t < kNumRuns; ++t)
{ {
for (int i = 0; i < paddings.size(); ++i) for (int i = 0; i < paddings.size(); ++i)
paddings[i] = rand() % 5; paddings[i] = rng(5);
LayerParams lp; LayerParams lp;
lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size())); lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
lp.type = "Padding"; lp.type = "Padding";
lp.name = "testLayer"; lp.name = "testLayer";
Mat input({1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10, 1 + rand() % 10}, CV_32F); Mat input({1 + rng(10), 1 + rng(10), 1 + rng(10), 1 + rng(10)}, CV_32F);
test(lp, input); test(lp, input);
} }
} }
...@@ -633,7 +634,7 @@ TEST_P(Eltwise, Accuracy) ...@@ -633,7 +634,7 @@ TEST_P(Eltwise, Accuracy)
eltwiseParam.set("operation", op); eltwiseParam.set("operation", op);
if (op == "sum" && weighted) if (op == "sum" && weighted)
{ {
RNG rng = cv::theRNG(); RNG& rng = cv::theRNG();
std::vector<float> coeff(1 + numConv); std::vector<float> coeff(1 + numConv);
for (int i = 0; i < coeff.size(); ++i) for (int i = 0; i < coeff.size(); ++i)
{ {
......
...@@ -376,7 +376,8 @@ TEST(Test_TensorFlow, memory_read) ...@@ -376,7 +376,8 @@ TEST(Test_TensorFlow, memory_read)
class ResizeBilinearLayer CV_FINAL : public Layer class ResizeBilinearLayer CV_FINAL : public Layer
{ {
public: public:
ResizeBilinearLayer(const LayerParams &params) : Layer(params) ResizeBilinearLayer(const LayerParams &params) : Layer(params),
outWidth(0), outHeight(0), factorWidth(1), factorHeight(1)
{ {
CV_Assert(!params.get<bool>("align_corners", false)); CV_Assert(!params.get<bool>("align_corners", false));
CV_Assert(!blobs.empty()); CV_Assert(!blobs.empty());
......
...@@ -285,7 +285,8 @@ struct CvtHelper ...@@ -285,7 +285,8 @@ struct CvtHelper
template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE > template< typename VScn, typename VDcn, typename VDepth, SizePolicy sizePolicy = NONE >
struct OclHelper struct OclHelper
{ {
OclHelper( InputArray _src, OutputArray _dst, int dcn) OclHelper( InputArray _src, OutputArray _dst, int dcn) :
nArgs(0)
{ {
src = _src.getUMat(); src = _src.getUMat();
Size sz = src.size(), dstSz; Size sz = src.size(), dstSz;
......
...@@ -357,20 +357,22 @@ void CV_RotatedRectangleIntersectionTest::test13() ...@@ -357,20 +357,22 @@ void CV_RotatedRectangleIntersectionTest::test13()
void CV_RotatedRectangleIntersectionTest::test14() void CV_RotatedRectangleIntersectionTest::test14()
{ {
const int kNumTests = 100; const int kNumTests = 100;
const int kWidth = 5; const float kWidth = 5;
const int kHeight = 5; const float kHeight = 5;
RotatedRect rects[2]; RotatedRect rects[2];
std::vector<Point2f> inter; std::vector<Point2f> inter;
cv::RNG& rng = cv::theRNG();
for (int i = 0; i < kNumTests; ++i) for (int i = 0; i < kNumTests; ++i)
{ {
for (int j = 0; j < 2; ++j) for (int j = 0; j < 2; ++j)
{ {
rects[j].center = Point2f((float)(rand() % kWidth), (float)(rand() % kHeight)); rects[j].center = Point2f(rng.uniform(0.0f, kWidth), rng.uniform(0.0f, kHeight));
rects[j].size = Size2f(rand() % kWidth + 1.0f, rand() % kHeight + 1.0f); rects[j].size = Size2f(rng.uniform(1.0f, kWidth), rng.uniform(1.0f, kHeight));
rects[j].angle = (float)(rand() % 360); rects[j].angle = rng.uniform(0.0f, 360.0f);
} }
rotatedRectangleIntersection(rects[0], rects[1], inter); int res = rotatedRectangleIntersection(rects[0], rects[1], inter);
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)); EXPECT_TRUE(res == INTERSECT_NONE || res == INTERSECT_PARTIAL || res == INTERSECT_FULL) << res;
ASSERT_TRUE(inter.size() < 4 || isContourConvex(inter)) << inter;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment