Commit 197fba68 authored by Wangyida's avatar Wangyida

modify the test of module cnn_3dobj

parent cabd5d40
This diff is collapsed.
......@@ -4,3 +4,10 @@
booktitle = {BMVC British Machine Vision Conference 2008},
year = {2008}
}
@inproceedings{wohlhart15,
author = {Paul Wohlhart and Vincent Lepetit},
title = {Learning Descriptors for Object Recognition and 3D Pose Estimation},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year = {2015}
}
#ifndef __OPENCV_CNN_3DOBJ_CONFIG_HPP__
#define __OPENCV_CNN_3DOBJ_CONFIG_HPP__
// HAVE CAFFE
#define HAVE_CAFFE
#endif
......@@ -11,3 +11,7 @@ target_link_libraries(sphereview_test ${OpenCV_LIBS})
set(SOURCES_classifier classifyIMG_demo.cpp)
add_executable(classify_test ${SOURCES_classifier})
target_link_libraries(classify_test ${OpenCV_LIBS})
set(SOURCES_modelanalysis model_analysis_demo.cpp)
add_executable(model_test ${SOURCES_modelanalysis})
target_link_libraries(model_test ${OpenCV_LIBS})
......@@ -74,10 +74,10 @@ int main(int argc, char** argv)
{
const String keys = "{help | | this demo will convert a set of images in a particular path into leveldb database for feature extraction using Caffe. If there little variance in data such as human faces, you can add a mean_file, otherwise it is not so useful}"
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../data/3d_triplet_iter_20000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../data/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/3_13.png | Path of image waiting to be classified.}"
"{target_img | ../data/images_all/1_8.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | device}"
......@@ -99,21 +99,22 @@ int main(int argc, char** argv)
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
cv::cnn_3dobj::descriptorExtractor descriptor(device, dev_id);
std::vector<string> device_info = descriptor.getter();
std::cout << "Using" << device_info[0] << std::endl;
cv::cnn_3dobj::descriptorExtractor descriptor(device);
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
std::vector<string> name_gallery;
listDir(src_dir.c_str(), name_gallery, false);
for (unsigned int i = 0; i < name_gallery.size(); i++) {
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
name_gallery[i] = src_dir + name_gallery[i];
}
std::vector<cv::Mat> img_gallery;
cv::Mat feature_reference;
for (unsigned int i = 0; i < name_gallery.size(); i++) {
for (unsigned int i = 0; i < name_gallery.size(); i++)
{
img_gallery.push_back(cv::imread(name_gallery[i], -1));
}
descriptor.extract(img_gallery, feature_reference, feature_blob);
......@@ -122,7 +123,7 @@ int main(int argc, char** argv)
cv::Mat img = cv::imread(target_img, -1);
// CHECK(!img.empty()) << "Unable to decode image " << target_img;
std::cout << std::endl << "---------- Featrue of gallery images ----------" << std::endl;
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
std::vector<std::pair<string, float> > prediction;
for (unsigned int i = 0; i < feature_reference.rows; i++)
std::cout << feature_reference.row(i) << endl;
......@@ -131,10 +132,11 @@ int main(int argc, char** argv)
cv::BFMatcher matcher(NORM_L2);
std::vector<std::vector<cv::DMatch> > matches;
matcher.knnMatch(feature_test, feature_reference, matches, num_candidate);
std::cout << std::endl << "---------- Featrue of target image: " << target_img << "----------" << endl << feature_test << std::endl;
std::cout << std::endl << "---------- Features of target image: " << target_img << "----------" << endl << feature_test << std::endl;
// Print the top N prediction.
std::cout << std::endl << "---------- Prediction result(Distance - File Name in Gallery) ----------" << std::endl;
for (size_t i = 0; i < matches[0].size(); ++i) {
for (size_t i = 0; i < matches[0].size(); ++i)
{
std::cout << i << " - " << std::fixed << std::setprecision(2) << name_gallery[matches[0][i].trainIdx] << " - \"" << matches[0][i].distance << "\"" << std::endl;
}
return 0;
......
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#define HAVE_CAFFE
#include <iostream>
#include "opencv2/imgproc.hpp"
#include "opencv2/cnn_3dobj.hpp"
using namespace cv;
using namespace cv::cnn_3dobj;
int main(int argc, char** argv)
{
const String keys = "{help | | this demo will have an analysis on the trained model, it will print information about whether the model is suit for set different classes apart and also discriminant on object pose at the same time.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image in reference.}"
"{ref_img1 | ../data/images_all/1_23.png | Path of closest image.}"
"{ref_img2 | ../data/images_all/1_14.png | Path of less closer image in the same class with reference image.}"
"{ref_img3 | ../data/images_all/3_8.png | Path of image with the same pose in another class.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{device | CPU | device}"
"{dev_id | 0 | dev_id}";
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Demo for object data classification and pose estimation");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
string caffemodel = parser.get<string>("caffemodel");
string network_forIMG = parser.get<string>("network_forIMG");
string mean_file = parser.get<string>("mean_file");
string target_img = parser.get<string>("target_img");
string ref_img1 = parser.get<string>("ref_img1");
string ref_img2 = parser.get<string>("ref_img2");
string ref_img3 = parser.get<string>("ref_img3");
string feature_blob = parser.get<string>("feature_blob");
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
std::vector<string> ref_img;
ref_img.push_back(ref_img1);
ref_img.push_back(ref_img2);
ref_img.push_back(ref_img3);
cv::cnn_3dobj::descriptorExtractor descriptor(device, dev_id);
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
cv::Mat img_base = cv::imread(target_img, -1);
if (img_base.empty())
{
printf("could not read reference image %s\n, make sure the path of images are set properly.", target_img.c_str());
}
std::vector<cv::Mat> img;
for (unsigned int i = 0; i < ref_img.size(); i++)
{
img.push_back(cv::imread(ref_img[i], -1));
if (img[i].empty()) {
printf("could not read reference image %s\n, make sure the path of images are set properly.", ref_img[i].c_str());
}
}
cv::Mat feature_test;
descriptor.extract(img_base, feature_test, feature_blob);
if (feature_test.empty()) {
printf("could not extract feature from test image which is read into cv::Mat.");
}
cv::Mat feature_reference;
descriptor.extract(img, feature_reference, feature_blob);
if (feature_reference.empty()) {
printf("could not extract feature from reference images which is already stored in vector<cv::Mat>.");
}
std::vector<float> matches;
for (int i = 0; i < feature_reference.rows; i++)
{
cv::Mat distance = feature_test-feature_reference.row(i);
matches.push_back(cv::norm(distance));
}
bool pose_pass = false;
bool class_pass = false;
if (matches[0] < matches[1] && matches[0] < matches[2])
pose_pass = true;
if (matches[1] < matches[2])
class_pass = true;
if (!pose_pass)
{
printf("\n =========== Model %s ========== \nIs not trained properly that the similar pose could not be tell from a cluster of features.\n", caffemodel.c_str());
}
else if (!class_pass)
{
printf("\n =========== Model %s ========== \nIs not trained properly that feature from the same class is not discriminant from the one of another class with similar pose.\n", caffemodel.c_str());
}
else
{
printf("\n =========== Model %s ========== \nSuits for setting different classes apart and also discriminant on object pose at the same time.\n", caffemodel.c_str());
}
return 0;
}
......@@ -42,9 +42,9 @@ using namespace std;
using namespace cv::cnn_3dobj;
int main(int argc, char *argv[])
{
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../3Dmodel/ape.ply -imagedir=../data/images_ape/ -labeldir=../data/label_ape.txt -num_class=4 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=4 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
"{ite_depth | 2 | Iteration of sphere generation.}"
"{plymodel | ../3Dmodel/ape.ply | path of the '.ply' file for image rendering. }"
"{plymodel | ../data/3Dmodel/ape.ply | path of the '.ply' file for image rendering. }"
"{imagedir | ../data/images_all/ | path of the generated images for one particular .ply model. }"
"{labeldir | ../data/label_all.txt | path of the generated images for one particular .ply model. }"
"{num_class | 4 | total number of classes of models}"
......@@ -84,8 +84,8 @@ int main(int argc, char *argv[])
Point3d cam_focal_point = ViewSphere.getCenter(objmesh.cloud);
float radius = ViewSphere.getRadius(objmesh.cloud, cam_focal_point);
Point3d cam_y_dir(0.0f,0.0f,1.0f);
const char* headerPath = "./header_for_";
const char* binaryPath = "./binary_";
const char* headerPath = "../data/header_for_";
const char* binaryPath = "../data/binary_";
ViewSphere.createHeader((int)campos.size(), 64, 64, headerPath);
for(int pose = 0; pose < (int)campos.size(); pose++){
char* temp = new char;
......
......@@ -8,19 +8,20 @@ namespace cnn_3dobj
{
descriptorExtractor::descriptorExtractor(const string& device_type, int device_id)
{
net_ready = 0;
if (strcmp(device_type.c_str(), "CPU") == 0 || strcmp(device_type.c_str(), "GPU") == 0)
{
if (strcmp(device_type.c_str(), "CPU") == 0)
{
caffe::Caffe::set_mode(caffe::Caffe::CPU);
device_info.push_back("CPU");
deviceType = "CPU";
std::cout << "Using CPU" << std::endl;
}
else
{
caffe::Caffe::set_mode(caffe::Caffe::GPU);
caffe::Caffe::SetDevice(device_id);
device_info.push_back("GPU");
deviceType = "GPU";
std::cout << "Using GPU" << std::endl;
std::cout << "Using Device_id=" << device_id << std::endl;
}
......@@ -33,44 +34,59 @@ namespace cnn_3dobj
}
};
std::vector<string> descriptorExtractor::getDevice()
string descriptorExtractor::getDeviceType()
{
string device_info_out;
device_info_out = deviceType;
return device_info_out;
};
int descriptorExtractor::getDeviceId()
{
std::vector<string> device_info_out;
device_info_out = device_info;
int device_info_out;
device_info_out = deviceId;
return device_info_out;
};
void descriptorExtractor::setDevice(const string& device_type, const string& device_id)
void descriptorExtractor::setDeviceType(const string& device_type)
{
if (strcmp(device_type.c_str(), "CPU") == 0 || strcmp(device_type.c_str(), "GPU") == 0)
{
if (strcmp(device_type.c_str(), "CPU") == 0)
{
caffe::Caffe::set_mode(caffe::Caffe::CPU);
device_info.push_back("CPU");
deviceType = "CPU";
std::cout << "Using CPU" << std::endl;
}
else
{
int dev_id = atoi(device_id.c_str());
caffe::Caffe::set_mode(caffe::Caffe::GPU);
caffe::Caffe::SetDevice(dev_id);
device_info.push_back("GPU");
deviceType = "GPU";
std::cout << "Using GPU" << std::endl;
std::cout << "Using Device_id=" << dev_id << std::endl;
}
net_set = true;
}
else
{
std::cout << "Error: Device name must be 'GPU' together with an device number or 'CPU'." << std::endl;
net_set = false;
std::cout << "Error: Device name must be 'GPU' or 'CPU'." << std::endl;
}
};
void descriptorExtractor::loadNet(const string& model_file, const string& trained_file, string mean_file)
void descriptorExtractor::setDeviceId(const int& device_id)
{
if (strcmp(deviceType.c_str(), "GPU") == 0)
{
caffe::Caffe::SetDevice(device_id);
deviceId = device_id;
std::cout << "Using GPU with Device ID = " << device_id << std::endl;
}
else
{
std::cout << "Error: Device ID only need to be set when GPU is used." << std::endl;
}
};
void descriptorExtractor::loadNet(const string& model_file, const string& trained_file, const string& mean_file)
{
net_ready = 0;
if (net_set)
{
/* Load the network. */
......@@ -98,7 +114,7 @@ namespace cnn_3dobj
}
else
{
std::cout << "Error: Device must be set in advance using SetNet function" << std::endl;
std::cout << "Error: Net is not set properly in advance using construtor." << std::endl;
}
};
......@@ -181,14 +197,14 @@ namespace cnn_3dobj
}
}
else
std::cout << "Network must be set properly using SetNet and LoadNet in advance.";
std::cout << "Device must be set properly using constructor and the net must be set in advance using loadNet.";
};
/* Wrap the input layer of the network in separate cv::Mat objects
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
void descriptorExtractor::wrapInput(std::vector<cv::Mat>* input_channels)
{
Blob<float>* input_layer = convnet->input_blobs()[0];
......@@ -233,12 +249,12 @@ namespace cnn_3dobj
else
sample_normalized = sample_float;
/* This operation will write the separate BGR planes directly to the
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
cv::split(sample_normalized, *input_channels);
if (reinterpret_cast<float*>(input_channels->at(0).data)
!= convnet->input_blobs()[0]->cpu_data())
std::cout << "Input channels are not wrapping the input layer of the network." << std::endl;
};
}
}
} /* namespace cnn_3dobj */
} /* namespace cv */
......@@ -8,12 +8,8 @@ namespace cnn_3dobj
{
icoSphere::icoSphere(float radius_in, int depth_in)
{
X = 0.5f;
Z = 0.5f;
X *= (int)radius_in;
Z *= (int)radius_in;
diff = 0.00000005964;
float vdata[12][3] = { { -X, 0.0f, Z }, { X, 0.0f, Z },
{ -X, 0.0f, -Z }, { X, 0.0f, -Z }, { 0.0f, Z, X }, { 0.0f, Z, -X },
{ 0.0f, -Z, X }, { 0.0f, -Z, -X }, { Z, X, 0.0f }, { -Z, X, 0.0f },
......@@ -23,6 +19,9 @@ namespace cnn_3dobj
{ 5, 2, 3 }, { 2, 7, 3 }, { 7, 10, 3 }, { 7, 6, 10 }, { 7, 11, 6 },
{ 11, 0, 6 }, { 0, 1, 6 }, { 6, 1, 10 }, { 9, 0, 11 },
{ 9, 11, 2 }, { 9, 2, 5 }, { 7, 2, 11 } };
diff = 0.00000001;
X *= (int)radius_in;
Z *= (int)radius_in;
// Iterate over points
for (int i = 0; i < 20; ++i)
......@@ -31,20 +30,24 @@ namespace cnn_3dobj
vdata[tindices[i][2]], depth_in);
}
CameraPos_temp.push_back(CameraPos[0]);
for (int j = 1; j<int(CameraPos.size()); j++)
for (unsigned int j = 1; j < CameraPos.size(); ++j)
{
for (int k = 0; k<j; k++)
for (unsigned int k = 0; k < j; ++k)
{
if (CameraPos.at(k).x-CameraPos.at(j).x < diff && CameraPos.at(k).y-CameraPos.at(j).y < diff && CameraPos.at(k).z-CameraPos.at(j).z < diff)
float dist_x, dist_y, dist_z;
dist_x = (CameraPos.at(k).x-CameraPos.at(j).x) * (CameraPos.at(k).x-CameraPos.at(j).x);
dist_y = (CameraPos.at(k).y-CameraPos.at(j).y) * (CameraPos.at(k).y-CameraPos.at(j).y);
dist_z = (CameraPos.at(k).z-CameraPos.at(j).z) * (CameraPos.at(k).z-CameraPos.at(j).z);
if (dist_x < diff && dist_y < diff && dist_z < diff)
break;
if(k == j-1)
else if (k == j-1)
CameraPos_temp.push_back(CameraPos[j]);
}
}
CameraPos = CameraPos_temp;
cout << "View points in total: " << CameraPos.size() << endl;
cout << "The coordinate of view point: " << endl;
for(int i=0; i < (int)CameraPos.size(); i++)
for(unsigned int i = 0; i < CameraPos.size(); i++)
{
cout << CameraPos.at(i).x <<' '<< CameraPos.at(i).y << ' ' << CameraPos.at(i).z << endl;
}
......@@ -69,8 +72,6 @@ namespace cnn_3dobj
std::vector<float>* temp = new std::vector<float>;
for (int k = 0; k < 3; ++k)
{
vertexList.push_back(v[k]);
vertexNormalsList.push_back(v[k]);
temp->push_back(v[k]);
}
temp_Campos.x = temp->at(0);temp_Campos.y = temp->at(1);temp_Campos.z = temp->at(2);
......@@ -261,4 +262,5 @@ namespace cnn_3dobj
img_file.close();
lab_file.close();
};
}}
} /* namespace cnn_3dobj */
} /* namespace cv */
/*
* Created on: Aug 14, 2015
* Author: yidawang
* Author: Yida Wang
*/
#include "test_precomp.hpp"
......@@ -26,33 +26,38 @@ CV_CNN_Feature_Test::CV_CNN_Feature_Test()
*/
void CV_CNN_Feature_Test::run(int)
{
string caffemodel = ts->get_data_path() + "cnn_3dobj/samples/data/3d_triplet_iter_20000.caffemodel";
string network_forIMG = ts->get_data_path() + "cnn_3dobj/samples/data/3d_triplet_testIMG.prototxt";
string mean_file = "no";
string target_img = ts->get_data_path() + "cnn_3dobj/samples/data/images_all/2_24.png";
string caffemodel = std::string(ts->get_data_path()) + "3d_triplet_iter_30000.caffemodel";
string network_forIMG = cvtest::TS::ptr()->get_data_path() + "3d_triplet_testIMG.prototxt";
string mean_file = "no";
std::vector<string> ref_img;
string target_img = std::string(ts->get_data_path()) + "1_8.png";
string feature_blob = "feat";
string device = "CPU";
int dev_id = 0;
cv::Mat img_base = cv::imread(target_img, -1);
if (img_base.empty())
{
ts->printf(cvtest::TS::LOG, "could not read reference image %s\n", target_img.c_str(), "make sure the path of images are set properly.");
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
return;
}
cv::cnn_3dobj::descriptorExtractor descriptor(device, dev_id);
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
cv::Mat img = cv::imread(target_img, -1);
if (img.empty()) {
ts->printf(cvtest::TS::LOG, "could not read image %s\n", target_img.c_str());
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
return;
}
cv::Mat feature_test;
descriptor.extract(img, feature_test, feature_blob);
if (feature_test.empty()) {
ts->printf(cvtest::TS::LOG, "could not extract feature from image %s\n", target_img.c_str());
descriptor.extract(img_base, feature_test, feature_blob);
Mat feature_reference = (Mat_<float>(1,16) << -134.03548, -203.48265, -105.96752, 55.343075, -211.36378, 487.85968, -182.15063, 62.229042, 297.19876, 206.07578, 291.74951, -19.906454, -464.09152, 135.79895, 420.43616, 2.2887282);
printf("Reference feature is computed by Caffe extract_features tool by \n To generate values for different images, use extract_features \n with the resetted image list in prototxt.");
float dist = norm(feature_test - feature_reference);
if (dist > 5) {
ts->printf(cvtest::TS::LOG, "Extracted featrue is not the same from the one extracted from Caffe.");
ts->set_failed_test_info(cvtest::TS::FAIL_MISSING_TEST_DATA);
return;
}
}
TEST(VIDEO_BGSUBGMG, accuracy) { CV_CNN_Feature_Test test; test.safe_run(); }
TEST(CNN_FEATURE, accuracy) { CV_CNN_Feature_Test test; test.safe_run(); }
......@@ -81,6 +81,6 @@ layer {
bottom: "ip1"
top: "feat"
inner_product_param {
num_output: 4
num_output: 16
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment