Commit 863d6ad3 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents fa50918e 6ae9809b
......@@ -57,4 +57,4 @@ In order to keep a clean overview containing all contributed modules the followi
1. Update the README.md file under the modules folder. Here you add your model with a single line description.
2. Add a README.md inside your own module folder. This README explains which functionality (seperate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.
2. Add a README.md inside your own module folder. This README explains which functionality (separate functions) is available, links to the corresponding samples and explains in somewhat more detail what the module is expected to do. If any extra requirements are needed to build the module without problems, add them here also.
......@@ -79,7 +79,7 @@ $ ./example_cnn_3dobj_classify -mean_file=../data/images_mean/triplet_mean.binar
```
===========================================================
##Demo3: Model performance test
####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from seperate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis.
####This demo will run a performance test of a trained CNN model on several images. If the the model fails on telling different samples from separate classes apart, or is confused on samples with similar pose but from different classes, this will give some information for model analysis.
```
$ ./example_cnn_3dobj_model_analysis
```
......
......@@ -73,7 +73,7 @@ the use of this software, even if advised of the possibility of such damage.
As CNN based learning algorithm shows better performance on the classification issues,
the rich labeled data could be more useful in the training stage. 3D object classification and pose estimation
is a jointed mission aimming at seperate different posed apart in the descriptor form.
is a jointed mission aiming at separate different posed apart in the descriptor form.
In the training stage, we prepare 2D training images generated from our module with their
class label and pose label. We fully exploit the information lies in their labels
......
......@@ -18,8 +18,8 @@ namespace ovis {
enum SceneSettings
{
/// the window will use a seperate scene. The scene will be shared otherwise.
SCENE_SEPERATE = 1,
/// the window will use a separate scene. The scene will be shared otherwise.
SCENE_SEPARATE = 1,
/// allow the user to control the camera.
SCENE_INTERACTIVE = 2,
/// draw coordinate system crosses for debugging
......
......@@ -43,7 +43,7 @@ int main()
owin->playEntityAnimation("sinbad", "IdleTop");
//interaction scene
Ptr<ovis::WindowScene> iwin = ovis::createWindow(String("AR"), imsize, ovis::SCENE_SEPERATE | ovis::SCENE_INTERACTIVE);
Ptr<ovis::WindowScene> iwin = ovis::createWindow(String("AR"), imsize, ovis::SCENE_SEPARATE | ovis::SCENE_INTERACTIVE);
iwin->createEntity("sinbad", "Sinbad.mesh", Vec3i(0, -5, 0), Vec3f(CV_PI, 0.0, 0.0));
iwin->createLightEntity("sun", Vec3i(0, 0, -100));
iwin->setCameraIntrinsics(K, imsize);
......
......@@ -23,7 +23,7 @@ owin.playEntityAnimation("sinbad", "IdleBase")
owin.playEntityAnimation("sinbad", "IdleTop")
# interaction scene
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPERATE | cv.ovis.SCENE_INTERACTIVE)
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPARATE | cv.ovis.SCENE_INTERACTIVE)
iwin.createEntity("sinbad", "Sinbad.mesh", tvec=(0, -5, 0), rot=(np.pi, 0, 0))
iwin.createLightEntity("sun", (0, 0, -100))
iwin.setCameraIntrinsics(K, imsize)
......
......@@ -302,10 +302,10 @@ public:
{
if (!app->sceneMgr)
{
flags |= SCENE_SEPERATE;
flags |= SCENE_SEPARATE;
}
if (flags & SCENE_SEPERATE)
if (flags & SCENE_SEPARATE)
{
sceneMgr = root->createSceneManager("DefaultSceneManager", title);
RTShader::ShaderGenerator& shadergen = RTShader::ShaderGenerator::getSingleton();
......@@ -362,7 +362,7 @@ public:
~WindowSceneImpl()
{
if (flags & SCENE_SEPERATE)
if (flags & SCENE_SEPARATE)
{
TextureManager& texMgr = TextureManager::getSingleton();
......@@ -378,7 +378,7 @@ public:
}
}
if(_app->sceneMgr == sceneMgr && (flags & SCENE_SEPERATE))
if(_app->sceneMgr == sceneMgr && (flags & SCENE_SEPARATE))
{
// this is the root window owning the context
CV_Assert(_app->numWindows() == 1 && "the first OVIS window must be deleted last");
......
......@@ -157,7 +157,7 @@ int main( int argc, char** argv )
int ndet = int(saliencyMap.size());
std::cout << "Objectness done " << ndet << std::endl;
// The result are sorted by objectness. We only use the first maxd boxes here.
int maxd = 7, step = 255 / maxd, jitter=9; // jitter to seperate single rects
int maxd = 7, step = 255 / maxd, jitter=9; // jitter to separate single rects
Mat draw = image.clone();
for (int i = 0; i < std::min(maxd, ndet); i++) {
Vec4i bb = saliencyMap[i];
......
......@@ -27,10 +27,17 @@ if __name__ == '__main__':
edge_boxes = cv.ximgproc.createEdgeBoxes()
edge_boxes.setMaxBoxes(30)
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
for b in boxes:
x, y, w, h = b
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
boxes, scores = edge_boxes.getBoundingBoxes(edges, orimap)
if len(boxes) > 0:
boxes_scores = zip(boxes, scores)
for b_s in boxes_scores:
box = b_s[0]
x, y, w, h = box
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
score = b_s[1][0]
cv.putText(im, "{:.2f}".format(score), (x, y), cv.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255), 1, cv.LINE_AA)
print("Box at (x,y)=({:d},{:d}); score={:f}".format(x, y, score))
cv.imshow("edges", edges)
cv.imshow("edgeboxes", im)
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ximpgroc_Edgeboxes, DISABLED_regression)
{
//Testing Edgeboxes implementation by asking for one proposal
//on a simple test image from the PASCAL VOC 2012 dataset.
std::vector<Rect> boxes;
std::vector<float> scores;
float expectedScore = 0.48742563f;
Rect expectedProposal(158, 69, 125, 154);
//Using sample model file, compute orientations map for use with edge detection.
cv::String testImagePath = cvtest::TS::ptr()->get_data_path() + "cv/ximgproc/" + "pascal_voc_bird.png";
Mat testImg = imread(testImagePath);
ASSERT_FALSE(testImg.empty()) << "Could not load input image " << testImagePath;
cvtColor(testImg, testImg, COLOR_BGR2RGB);
testImg.convertTo(testImg, CV_32F, 1.0 / 255.0f);
//Use the model for structured edge detection that is already provided in opencv_extra.
cv::String model_path = cvtest::TS::ptr()->get_data_path() + "cv/ximgproc/" + "model.yml.gz";
Ptr<StructuredEdgeDetection> sed = createStructuredEdgeDetection(model_path);
Mat edgeImage, edgeOrientations;
sed->detectEdges(testImg, edgeImage);
sed->computeOrientation(edgeImage, edgeOrientations);
//Obtain one proposal and its score from Edgeboxes.
Ptr<EdgeBoxes> edgeboxes = createEdgeBoxes();
edgeboxes->setMaxBoxes(1);
edgeboxes->getBoundingBoxes(edgeImage, edgeOrientations, boxes, scores);
//We asked for one proposal and thus one score, we better get one back only.
ASSERT_TRUE(boxes.size() == 1);
ASSERT_TRUE(scores.size() == 1);
//Check the proposal and its score.
EXPECT_NEAR(scores[0], expectedScore, 1e-8);
EXPECT_EQ(expectedProposal.x, boxes[0].x);
EXPECT_EQ(expectedProposal.y, boxes[0].y);
EXPECT_EQ(expectedProposal.height, boxes[0].height);
EXPECT_EQ(expectedProposal.width, boxes[0].width);
}
}} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment