Commit 9852e809 authored by Wangyida's avatar Wangyida

add libs hint on OS X in for cmake

parent 97d49a88
An overview of the contrib modules and a small explanation
----------------------------------------------------------
This list gives an overview of all modules available inside the contrib repository.
These are also the correct names for disabling the building of a specific module by adding
```
$ cmake -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules -D BUILD_opencv_reponame=OFF <opencv_source_directory>
```
1. **opencv_adas**: Advanced Driver Assistance Systems module with Forward Collision Warning.
2. **opencv_bgsegm**: Improved Adaptive Background Mixture Model for Real-time Tracking / Visual Tracking of Human Visitors under Variable-Lighting Conditions.
3. **opencv_bioinspired**: Biologically inspired vision models and derivated tools.
4. **opencv_ ccalib**: Custom Calibration Pattern for 3D reconstruction.
5. **opencv_cvv**: GUI for Interactive Visual Debugging of Computer Vision Programs.
6. **opencv_datasets**: Interface for interfacing with existing computer vision databases.
7. **opencv_datasettools**: Tools for working with different datasets.
8. **opencv_face**: Recently added face recognition software which is not yet stabalized.
9. **opencv_latentsvm**: Implementation of the LatentSVM detector algorithm.
10. **opencv_line_descriptor**: Binary descriptors for lines extracted from an image.
11. **opencv_matlab**: OpenCV Matlab Code Generator.
12. **opencv_optflow**: Optical Flow Algorithms for tracking points.
13. **opencv_reg**: Image Registration module.
14. **opencv_rgbd**: RGB-Depth Processing module.
15. **opencv_saliency**: Saliency API, understanding where humans focus given a scene.
16. **opencv_surface_matching**: Surface Matching Algorithm Through 3D Features.
17. **opencv_text**: Scene Text Detection and Recognition in Natural Scene Images.
18. **opencv_tracking**: Long-term optical tracking API.
19. **opencv_xfeatures2d**: Extra 2D Features Framework containing experimental and non-free 2D feature algorithms.
20. **opencv_ximgproc**: Extended Image Processing: Structured Forests / Domain Transform Filter / Guided Filter / Adaptive Manifold Filter / Joint Bilateral Filter / Superpixels.
21. **opencv_xobjdetect**: Integral Channel Features Detector Framework.
22. **opencv_xphoto**: Additional photo processing algorithms: Color balance / Denoising / Inpainting.
<<<<<<< 54d9fdeb5ed51d326de3d2f1383f8e330f114381
23. **opencv_stereo**: Stereo Correspondence done with different descriptors: Census / CS-Census / MCT / BRIEF / MV / RT.
=======
23. **opencv_stereo**: Stereo Correspondence done with different descriptors: Census / CS-Census / MCT / BRIEF / MV / RT.
>>>>>>> modify README under modules
......@@ -8,6 +8,22 @@ else()
message(STATUS "Caffe: NO")
endif()
find_package(Protobuf)
if(Protobuf_FOUND)
message(STATUS "Protobuf: YES")
set(HAVE_PROTOBUF 1)
else()
message(STATUS "Protobuf: NO")
endif()
find_package(Glog)
if(Glog_FOUND)
message(STATUS "Glog: YES")
set(HAVE_GLOG 1)
else()
message(STATUS "Glog: NO")
endif()
if(HAVE_CAFFE)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cnn_3dobj_config.hpp.in
${CMAKE_CURRENT_SOURCE_DIR}/include/opencv2/cnn_3dobj_config.hpp @ONLY)
......@@ -21,6 +37,6 @@ endif()
ocv_define_module(cnn_3dobj opencv_core opencv_imgproc opencv_viz opencv_highgui OPTIONAL WRAP python)
if(${Caffe_FOUND})
target_link_libraries(opencv_cnn_3dobj ${Caffe_LIBS})
target_link_libraries(opencv_cnn_3dobj ${Caffe_LIBS} ${Glog_LIBS} ${Protobuf_LIBS})
endif()
endif()
File mode changed from 100644 to 100755
# Glog package for CNN Triplet training
unset(Glog_FOUND)
find_library(Glog_LIBS NAMES glog
HINTS
/usr/local/lib)
if(Glog_LIBS)
set(Glog_FOUND 1)
endif()
# Protobuf package for CNN Triplet training
unset(Protobuf_FOUND)
find_library(Protobuf_LIBS NAMES protobuf
HINTS
/usr/local/lib)
if(Protobuf_LIBS)
set(Protobuf_FOUND 1)
endif()
......@@ -30,7 +30,7 @@ $ sudo make install
$ cd <opencv_source_directory>
$ mkdir build
$ cd build
$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=OFF -D WITH_V4L=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D WITH_VTK=ON -D INSTALL_TESTS=ON -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules ..
$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=OFF -D WITH_V4L=ON -D WITH_QT=OFF -D WITH_OPENGL=ON -D WITH_VTK=ON -D INSTALL_TESTS=ON -D OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules ..
$ make -j4
$ sudo make install
```
......@@ -56,22 +56,24 @@ $ make
##Demo1: training data generation
####Imagas generation from different pose, by default there are 4 models used, there will be 276 images in all which each class contains 69 iamges, if you want to use additional .ply models, it is necessary to change the class number parameter to the new class number and also give it a new class label. If you will train net work and extract feature from RGB images set the parameter rgb_use as 1.
```
$ ./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0
$ ./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0 -cam_head_x=0 -cam_head_y=0 -cam_head_z=1
```
####press 'Q' to start 2D image genaration
```
$ ./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1
$ ./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
####press 'Q' to start
```
$ ./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2
$ ./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
####press 'Q' to start
```
$ ./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3
$ ./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
```
$ ./sphereview_test -plymodel=../data/3Dmodel/bunny.ply -label_class=4 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
```
```
$ ./sphereview_test -plymodel=../data/3Dmodel/horse.ply -label_class=5 -cam_head_x=0 -cam_head_y=0 -cam_head_z=-1
```
####press 'Q' to start
####When all images are created in images_all folder as a collection of training images for network tranining and as a gallery of reference images for the classification part, then proceed on.
####After this demo, the binary files of images and labels will be stored as 'binary_image' and 'binary_label' in current path, you should copy them into the leveldb folder in Caffe triplet training, for example: copy these 2 files in <caffe_source_directory>/data/linemod and rename them as 'binary_image_train', 'binary_image_test' and 'binary_label_train', 'binary_label_train'. Here I use the same as trianing and testing data, you can use different data for training and testing the performance in the CAFFE training process. It's important to observe the loss of testing data to check whether training data is suitable for the your aim. Loss should be obseved as keep decreasing and remain on a much smaller number than the initial loss.
####You could start triplet tranining using Caffe like this:
......
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
......@@ -55,10 +55,10 @@ the use of this software, even if advised of the possibility of such damage.
#include <set>
#include <string.h>
#include <stdlib.h>
#include <tr1/memory>
#include <dirent.h>
#define CPU_ONLY
#include <opencv2/cnn_3dobj_config.hpp>
#ifdef HAVE_CAFFE
#include <caffe/blob.hpp>
#include <caffe/common.hpp>
......
File mode changed from 100644 to 100755
......@@ -15,3 +15,7 @@ target_link_libraries(classify_test ${OpenCV_LIBS})
set(SOURCES_modelanalysis demo_model_analysis.cpp)
add_executable(model_test ${SOURCES_modelanalysis})
target_link_libraries(model_test ${OpenCV_LIBS})
set(SOURCES_video demo_video.cpp)
add_executable(video_test ${SOURCES_video})
target_link_libraries(video_test ${OpenCV_LIBS})
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
This diff is collapsed.
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
rm -rf ../data/binary_image
rm -rf ../data/binary_label
rm -rf ../data/header_for_image
rm -rf ../data/header_for_label
./sphereview_test -plymodel=../data/3Dmodel/ape.ply -label_class=0 -cam_head_x=0 -cam_head_y=0 -cam_head_z=1
./sphereview_test -plymodel=../data/3Dmodel/ant.ply -label_class=1 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/cow.ply -label_class=2 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/plane.ply -label_class=3 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/bunny.ply -label_class=4 -cam_head_x=0 -cam_head_y=-1 -cam_head_z=0
./sphereview_test -plymodel=../data/3Dmodel/horse.ply -label_class=5 -cam_head_x=0 -cam_head_y=0 -cam_head_z=-1
\ No newline at end of file
......@@ -84,16 +84,15 @@ void listDir(const char *path, std::vector<string>& files, bool r)
int main(int argc, char** argv)
{
const String keys = "{help | | This sample will extract featrues from reference images and target image for classification. You can add a mean_file if there little variance in data such as human faces, otherwise it is not so useful}"
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/4_78.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
/* get parameters from comand line */
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Feature extraction and classification");
......@@ -111,18 +110,15 @@ int main(int argc, char** argv)
int num_candidate = parser.get<int>("num_candidate");
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
/* Initialize a net work with Device */
cv::cnn_3dobj::descriptorExtractor descriptor(device);
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
/* Load net with the caffe trained net work parameter and structure */
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
std::vector<string> name_gallery;
/* List the file names under a given path */
listDir(src_dir.c_str(), name_gallery, false);
for (unsigned int i = 0; i < name_gallery.size(); i++)
......@@ -135,16 +131,12 @@ int main(int argc, char** argv)
{
img_gallery.push_back(cv::imread(name_gallery[i], -1));
}
/* Extract feature from a set of images */
descriptor.extract(img_gallery, feature_reference, feature_blob);
std::cout << std::endl << "---------- Prediction for " << target_img << " ----------" << std::endl;
cv::Mat img = cv::imread(target_img, -1);
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
std::vector<std::pair<string, float> > prediction;
/* Print features of the reference images. */
for (unsigned int i = 0; i < feature_reference.rows; i++)
std::cout << feature_reference.row(i) << endl;
......@@ -155,10 +147,8 @@ int main(int argc, char** argv)
std::vector<std::vector<cv::DMatch> > matches;
/* Have a KNN match on the target and reference images. */
matcher.knnMatch(feature_test, feature_reference, matches, num_candidate);
/* Print feature of the target image waiting to be classified. */
std::cout << std::endl << "---------- Features of target image: " << target_img << "----------" << endl << feature_test << std::endl;
/* Print the top N prediction. */
std::cout << std::endl << "---------- Prediction result(Distance - File Name in Gallery) ----------" << std::endl;
for (size_t i = 0; i < matches[0].size(); ++i)
......@@ -166,4 +156,4 @@ int main(int argc, char** argv)
std::cout << i << " - " << std::fixed << std::setprecision(2) << name_gallery[matches[0][i].trainIdx] << " - \"" << matches[0][i].distance << "\"" << std::endl;
}
return 0;
}
}
\ No newline at end of file
......@@ -50,10 +50,10 @@ int main(int argc, char** argv)
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image in reference.}"
"{ref_img1 | ../data/images_all/1_23.png | Path of closest image.}"
"{ref_img2 | ../data/images_all/1_14.png | Path of less closer image in the same class with reference image.}"
"{ref_img3 | ../data/images_all/3_8.png | Path of image with the same pose in another class.}"
"{target_img | ../data/images_all/4_78.png | Path of image in reference.}"
"{ref_img1 | ../data/images_all/4_79.png | Path of closest image.}"
"{ref_img2 | ../data/images_all/4_87.png | Path of less closer image in the same class with reference image.}"
"{ref_img3 | ../data/images_all/3_78.png | Path of image with the same pose in another class.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{device | CPU | device}"
"{dev_id | 0 | dev_id}";
......
......@@ -47,12 +47,15 @@ using namespace std;
using namespace cv::cnn_3dobj;
int main(int argc, char *argv[])
{
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=4 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
"{ite_depth | 2 | Iteration of sphere generation.}"
const String keys = "{help | | demo :$ ./sphereview_test -ite_depth=2 -plymodel=../data/3Dmodel/ape.ply -imagedir=../data/images_all/ -labeldir=../data/label_all.txt -num_class=6 -label_class=0, then press 'q' to run the demo for images generation when you see the gray background and a coordinate.}"
"{ite_depth | 3 | Iteration of sphere generation.}"
"{plymodel | ../data/3Dmodel/ape.ply | Path of the '.ply' file for image rendering. }"
"{imagedir | ../data/images_all/ | Path of the generated images for one particular .ply model. }"
"{labeldir | ../data/label_all.txt | Path of the generated images for one particular .ply model. }"
"{num_class | 4 | Total number of classes of models}"
"{cam_head_x | 0 | Head of the camera. }"
"{cam_head_y | -1 | Head of the camera. }"
"{cam_head_z | 0 | Head of the camera. }"
"{num_class | 6 | Total number of classes of models}"
"{label_class | 0 | Class label of current .ply model}"
"{rgb_use | 0 | Use RGB image or grayscale}";
/* Get parameters from comand line. */
......@@ -69,19 +72,20 @@ int main(int argc, char *argv[])
string labeldir = parser.get<string>("labeldir");
int num_class = parser.get<int>("num_class");
int label_class = parser.get<int>("label_class");
float cam_head_x = parser.get<float>("cam_head_x");
float cam_head_y = parser.get<float>("cam_head_y");
float cam_head_z = parser.get<float>("cam_head_z");
int rgb_use = parser.get<int>("rgb_use");
cv::cnn_3dobj::icoSphere ViewSphere(10,ite_depth);
std::vector<cv::Point3d> campos = ViewSphere.CameraPos;
std::fstream imglabel;
char* p=(char*)labeldir.data();
imglabel.open(p, fstream::app|fstream::out);
bool camera_pov = (true);
bool camera_pov = true;
/* Create a window using viz. */
viz::Viz3d myWindow("Coordinate Frame");
/* Set window size as 64*64, we use this scale as default. */
myWindow.setWindowSize(Size(64,64));
/* Add coordinate axes. */
myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());
/* Set background color. */
myWindow.setBackgroundColor(viz::Color::gray());
myWindow.spin();
......@@ -90,7 +94,12 @@ int main(int argc, char *argv[])
/* Get the center of the generated mesh widget, cause some .ply files. */
Point3d cam_focal_point = ViewSphere.getCenter(objmesh.cloud);
float radius = ViewSphere.getRadius(objmesh.cloud, cam_focal_point);
Point3d cam_y_dir(0.0f,0.0f,1.0f);
objmesh.cloud = objmesh.cloud/radius*100;
cam_focal_point = cam_focal_point/radius*100;
Point3d cam_y_dir;
cam_y_dir.x = cam_head_x;
cam_y_dir.y = cam_head_y;
cam_y_dir.z = cam_head_z;
const char* headerPath = "../data/header_for_";
const char* binaryPath = "../data/binary_";
ViewSphere.createHeader((int)campos.size(), 64, 64, headerPath);
......@@ -106,7 +115,7 @@ int main(int argc, char *argv[])
imglabel << filename << ' ' << (int)(campos.at(pose).x*100) << ' ' << (int)(campos.at(pose).y*100) << ' ' << (int)(campos.at(pose).z*100) << endl;
filename = imagedir + filename;
/* Get the pose of the camera using makeCameraPoses. */
Affine3f cam_pose = viz::makeCameraPose(campos.at(pose)*radius+cam_focal_point, cam_focal_point, cam_y_dir*radius+cam_focal_point);
Affine3f cam_pose = viz::makeCameraPose(campos.at(pose)*380+cam_focal_point, cam_focal_point, cam_y_dir*380+cam_focal_point);
/* Get the transformation matrix from camera coordinate system to global. */
Affine3f transform = viz::makeTransformToGlobal(Vec3f(1.0f,0.0f,0.0f), Vec3f(0.0f,1.0f,0.0f), Vec3f(0.0f,0.0f,1.0f), campos.at(pose));
viz::WMesh mesh_widget(objmesh);
......
This diff is collapsed.
File mode changed from 100644 to 100755
......@@ -146,7 +146,6 @@ namespace cnn_3dobj
radiusCam = Radius;
}
}
radiusCam *= 4;
return radiusCam;
};
......
......@@ -42,7 +42,6 @@ the use of this software, even if advised of the possibility of such damage.
#ifndef __OPENCV_CNN_3DOBJ_PRECOMP_HPP__
#define __OPENCV_CNN_3DOBJ_PRECOMP_HPP__
#include <opencv2/cnn_3dobj_config.hpp>
#include <opencv2/cnn_3dobj.hpp>
#endif
#include "../include/cnn_3dobj.hpp"
using namespace cv;
using namespace std;
namespace cv{ namespace cnn_3dobj{
IcoSphere::IcoSphere(float radius_in, int depth_in)
{
X = 0.525731112119133606f;
Z = 0.850650808352039932f;
int radius = radius_in;
int depth = depth_in;
X *= radius;
Z *= radius;
float vdata[12][3] = { { -X, 0.0f, Z }, { X, 0.0f, Z },
{ -X, 0.0f, -Z }, { X, 0.0f, -Z }, { 0.0f, Z, X }, { 0.0f, Z, -X },
{ 0.0f, -Z, X }, { 0.0f, -Z, -X }, { Z, X, 0.0f }, { -Z, X, 0.0f },
{ Z, -X, 0.0f }, { -Z, -X, 0.0f } };
int tindices[20][3] = { { 0, 4, 1 }, { 0, 9, 4 }, { 9, 5, 4 },
{ 4, 5, 8 }, { 4, 8, 1 }, { 8, 10, 1 }, { 8, 3, 10 }, { 5, 3, 8 },
{ 5, 2, 3 }, { 2, 7, 3 }, { 7, 10, 3 }, { 7, 6, 10 }, { 7, 11, 6 },
{ 11, 0, 6 }, { 0, 1, 6 }, { 6, 1, 10 }, { 9, 0, 11 },
{ 9, 11, 2 }, { 9, 2, 5 }, { 7, 2, 11 } };
std::vector<float>* texCoordsList = new std::vector<float>;
std::vector<int>* indicesList = new std::vector<int>;
// Iterate over points
for (int i = 0; i < 20; ++i) {
subdivide(vdata[tindices[i][1]], vdata[tindices[i][2]],
vdata[tindices[i][3]], depth);
}
cout << "View points in total: " << CameraPos->size() << endl;
cout << "The coordinate of view point: " << endl;
for(int i=0; i < CameraPos->size(); i++)
{
cout << CameraPos->at(i).x << endl;
}
}
void IcoSphere::norm(float v[])
{
float len = 0;
for (int i = 0; i < 3; ++i) {
len += v[i] * v[i];
}
len = sqrt(len);
for (int i = 0; i < 3; ++i) {
v[i] /= ((float)len/(float)IcoSphere::radius);
}
}
void IcoSphere::add(float v[])
{
Point3f temp_Campos;
std::vector<float>* temp = new std::vector<float>;
for (int k = 0; k < 3; ++k) {
vertexList->push_back(v[k]);
vertexNormalsList->push_back(v[k]);
temp->push_back(v[k]);
}
temp_Campos.x = temp->at(0);temp_Campos.y = temp->at(1);temp_Campos.z = temp->at(2);
CameraPos->push_back(temp_Campos);
}
void IcoSphere::subdivide(float v1[], float v2[], float v3[], int depth)
{
if (depth == 0) {
add(v1);
add(v2);
add(v3);
return;
}
float* v12 = new float[3];
float* v23 = new float[3];
float* v31 = new float[3];
for (int i = 0; i < 3; ++i) {
v12[i] = (v1[i] + v2[i]) / 2;
v23[i] = (v2[i] + v3[i]) / 2;
v31[i] = (v3[i] + v1[i]) / 2;
}
norm(v12);
norm(v23);
norm(v31);
subdivide(v1, v12, v31, depth - 1);
subdivide(v2, v23, v12, depth - 1);
subdivide(v3, v31, v23, depth - 1);
subdivide(v12, v23, v31, depth - 1);
}
}}
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
......@@ -81,6 +81,6 @@ layer {
bottom: "ip1"
top: "feat"
inner_product_param {
num_output: 16
num_output: 3
}
}
../../testdata/cv/3d_triplet_iter_1.caffemodel
../../testdata/cv/3d_triplet_iter_2.caffemodel
../../testdata/cv/3d_triplet_iter_3.caffemodel
../../testdata/cv/3d_triplet_iter_4.caffemodel
../../testdata/cv/3d_triplet_iter_5.caffemodel
../../testdata/cv/3d_triplet_iter_6.caffemodel
../../testdata/cv/3d_triplet_iter_7.caffemodel
../../testdata/cv/3d_triplet_iter_8.caffemodel
../../testdata/cv/3d_triplet_iter_9.caffemodel
../../testdata/cv/3d_triplet_iter_10.caffemodel
../../testdata/cv/3d_triplet_iter_20.caffemodel
../../testdata/cv/3d_triplet_iter_30.caffemodel
../../testdata/cv/3d_triplet_iter_40.caffemodel
../../testdata/cv/3d_triplet_iter_50.caffemodel
../../testdata/cv/3d_triplet_iter_60.caffemodel
../../testdata/cv/3d_triplet_iter_70.caffemodel
../../testdata/cv/3d_triplet_iter_80.caffemodel
../../testdata/cv/3d_triplet_iter_90.caffemodel
../../testdata/cv/3d_triplet_iter_100.caffemodel
../../testdata/cv/3d_triplet_iter_200.caffemodel
../../testdata/cv/3d_triplet_iter_300.caffemodel
../../testdata/cv/3d_triplet_iter_400.caffemodel
../../testdata/cv/3d_triplet_iter_500.caffemodel
../../testdata/cv/3d_triplet_iter_600.caffemodel
../../testdata/cv/3d_triplet_iter_700.caffemodel
../../testdata/cv/3d_triplet_iter_800.caffemodel
../../testdata/cv/3d_triplet_iter_900.caffemodel
../../testdata/cv/3d_triplet_iter_1000.caffemodel
../../testdata/cv/3d_triplet_iter_2000.caffemodel
../../testdata/cv/3d_triplet_iter_3000.caffemodel
../../testdata/cv/3d_triplet_iter_4000.caffemodel
../../testdata/cv/3d_triplet_iter_5000.caffemodel
../../testdata/cv/3d_triplet_iter_6000.caffemodel
../../testdata/cv/3d_triplet_iter_7000.caffemodel
../../testdata/cv/3d_triplet_iter_8000.caffemodel
../../testdata/cv/3d_triplet_iter_9000.caffemodel
../../testdata/cv/3d_triplet_iter_10000.caffemodel
../../testdata/cv/3d_triplet_iter_20000.caffemodel
../../testdata/cv/3d_triplet_iter_30000.caffemodel
../../testdata/cv/3d_triplet_iter_40000.caffemodel
../../testdata/cv/3d_triplet_iter_50000.caffemodel
../../testdata/cv/3d_triplet_iter_60000.caffemodel
../../testdata/cv/3d_triplet_iter_70000.caffemodel
../../testdata/cv/3d_triplet_iter_110000.caffemodel
../../testdata/cv/3d_triplet_iter_120000.caffemodel
../../testdata/cv/3d_triplet_iter_130000.caffemodel
../../testdata/cv/3d_triplet_iter_140000.caffemodel
../../testdata/cv/3d_triplet_iter_150000.caffemodel
../../testdata/cv/3d_triplet_iter_160000.caffemodel
../../testdata/cv/3d_triplet_iter_170000.caffemodel
../../testdata/cv/3d_triplet_iter_180000.caffemodel
../../testdata/cv/3d_triplet_iter_190000.caffemodel
\ No newline at end of file
File mode changed from 100644 to 100755
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment