Commit 30b2a945 authored by marina.kolpakova's avatar marina.kolpakova

load from config xml and fix integral representation

parent a2382ce6
......@@ -92,6 +92,8 @@ bool sft::Octave::train( const cv::Mat& trainData, const cv::Mat& _responses, co
_params.weak_count = 1;
}
std::cout << "WARNING: " << sampleIdx << std::endl;
bool update = false;
return cv::Boost::train(trainData, CV_COL_SAMPLE, _responses, varIdx, sampleIdx, varType, missingDataMask, _params,
update);
......@@ -104,7 +106,7 @@ class Preprocessor
public:
Preprocessor(int shr) : shrinkage(shr) {}
void apply(const Mat& frame, Mat integrals)
void apply(const Mat& frame, Mat& integrals)
{
CV_Assert(frame.type() == CV_8UC3);
......@@ -178,7 +180,7 @@ void sft::Octave::processPositives(const Dataset& dataset, const FeaturePool& po
dprintf("Process candidate positive image %s\n", curr.c_str());
cv::Mat sample = cv::imread(curr);
cv::Mat channels = integrals.col(total).reshape(0, h + 1);
cv::Mat channels = integrals.row(total).reshape(0, h + 1);
prepocessor.apply(sample, channels);
responses.ptr<float>(total)[0] = 1.f;
......@@ -198,6 +200,9 @@ void sft::Octave::generateNegatives(const Dataset& dataset)
sft::Random::engine eng;
sft::Random::engine idxEng;
int w = 64 * pow(2, logScale) /shrinkage;
int h = 128 * pow(2, logScale) /shrinkage * 10;
Preprocessor prepocessor(shrinkage);
int nimages = (int)dataset.neg.size();
......@@ -215,24 +220,33 @@ void sft::Octave::generateNegatives(const Dataset& dataset)
Mat frame = cv::imread(dataset.neg[curr]);
prepocessor.apply(frame, sum);
int maxW = frame.cols - 2 * boundingBox.x - boundingBox.width;
int maxH = frame.rows - 2 * boundingBox.y - boundingBox.height;
std::cout << "WARNING: " << frame.cols << " " << frame.rows << std::endl;
std::cout << "WARNING: " << frame.cols / shrinkage << " " << frame.rows / shrinkage << std::endl;
sft::Random::uniform wRand(0, maxW);
sft::Random::uniform hRand(0, maxH);
int maxW = frame.cols / shrinkage - 2 * boundingBox.x - boundingBox.width;
int maxH = frame.rows / shrinkage - 2 * boundingBox.y - boundingBox.height;
std::cout << "WARNING: " << maxW << " " << maxH << std::endl;
sft::Random::uniform wRand(0, maxW -1);
sft::Random::uniform hRand(0, maxH -1);
int dx = wRand(eng);
int dy = hRand(eng);
sum = sum(cv::Rect(dx, dy, boundingBox.width, boundingBox.height));
std::cout << "WARNING: " << dx << " " << dy << std::endl;
std::cout << "WARNING: " << dx + boundingBox.width + 1 << " " << dy + boundingBox.height + 1 << std::endl;
std::cout << "WARNING: " << sum.cols << " " << sum.rows << std::endl;
sum = sum(cv::Rect(dx, dy, boundingBox.width + 1, boundingBox.height * 10 + 1));
dprintf("generated %d %d\n", dx, dy);
if (predict(sum))
// if (predict(sum))
{
responses.ptr<float>(i)[0] = 0.f;
sum = sum.reshape(0, 1);
sum.copyTo(integrals.col(i));
// sum = sum.reshape(0, 1);
sum.copyTo(integrals.row(i).reshape(0, h + 1));
++i;
}
}
......@@ -257,7 +271,7 @@ bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool)
// 3. only sumple case (all samples used)
int nsamples = npositives + nnegatives;
cv::Mat sampleIdx(1, nsamples, CV_32SC1);
ptr = varIdx.ptr<int>(0);
ptr = sampleIdx.ptr<int>(0);
for (int x = 0; x < nsamples; ++x)
ptr[x] = x;
......@@ -281,7 +295,10 @@ bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool)
cv::Mat missingMask;
return train(trainData, responses, varIdx, sampleIdx, varType, missingMask);
bool ok = train(trainData, responses, varIdx, sampleIdx, varType, missingMask);
if (!ok)
std::cout << "ERROR:tree couldnot be trained" << std::endl;
return ok;
}
......
......@@ -44,85 +44,119 @@
#include <sft/common.hpp>
#include <sft/octave.hpp>
#include <sft/config.hpp>
int main(int argc, char** argv)
{
// hard coded now
int nfeatures = 50;
int npositives = 10;
int nnegatives = 10;
using namespace sft;
int shrinkage = 4;
int octave = 0;
const string keys =
"{help h usage ? | | print this message }"
"{config c | | path to configuration xml }"
;
int nsamples = npositives + nnegatives;
cv::Size model(64, 128);
std::string path = "/home/kellan/cuda-dev/opencv_extra/testdata/sctrain/rescaled-train-2012-10-27-19-02-52";
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Soft cascade training application.");
cv::Rect boundingBox(5, 5 ,16, 32);
sft::Octave boost(boundingBox, npositives, nnegatives, octave, shrinkage);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
sft::FeaturePool pool(model, nfeatures);
sft::Dataset dataset(path, boost.logScale);
if (!parser.check())
{
parser.printErrors();
return 1;
}
boost.train(dataset, pool);
string configPath = parser.get<string>("config");
if (configPath.empty())
{
std::cout << "Configuration file is missing or empty. Could not start training." << std::endl << std::flush;
return 0;
}
cv::Mat train_data(nfeatures, nsamples, CV_32FC1);
cv::RNG rng;
std::cout << "Read configuration from file " << configPath << std::endl;
cv::FileStorage fs(configPath, cv::FileStorage::READ);
if(!fs.isOpened())
{
std::cout << "Configuration file " << configPath << " can't be opened." << std::endl << std::flush;
return 1;
}
for (int y = 0; y < nfeatures; ++y)
for (int x = 0; x < nsamples; ++x)
train_data.at<float>(y, x) = rng.uniform(0.f, 1.f);
// +
int tflag = CV_COL_SAMPLE;
cv::Mat responses(nsamples, 1, CV_32FC1);
for (int y = 0; y < nsamples; ++y)
responses.at<float>(y, 0) = (y < npositives) ? 1.f : 0.f;
// 1. load config
sft::Config cfg;
fs["config"] >> cfg;
std::cout << std::endl << "Training will be executed for configuration:" << std::endl << cfg << std::endl;
// 2. check and open output file
cv::FileStorage fso(cfg.outXmlPath, cv::FileStorage::WRITE);
if(!fs.isOpened())
{
std::cout << "Training stopped. Output classifier Xml file " << cfg.outXmlPath << " can't be opened." << std::endl << std::flush;
return 1;
}
cv::Mat var_idx(1, nfeatures, CV_32SC1);
for (int x = 0; x < nfeatures; ++x)
var_idx.at<int>(0, x) = x;
// ovector strong;
// strong.reserve(cfg.octaves.size());
// Mat sample_idx;
cv::Mat sample_idx(1, nsamples, CV_32SC1);
for (int x = 0; x < nsamples; ++x)
sample_idx.at<int>(0, x) = x;
// fso << "softcascade" << "{" << "octaves" << "[";
cv::Mat var_type(1, nfeatures + 1, CV_8UC1);
for (int x = 0; x < nfeatures; ++x)
var_type.at<uchar>(0, x) = CV_VAR_ORDERED;
// 3. Train all octaves
for (ivector::const_iterator it = cfg.octaves.begin(); it != cfg.octaves.end(); ++it)
{
int nfeatures = cfg.poolSize;
int npositives = cfg.positives;
int nnegatives = cfg.negatives;
var_type.at<uchar>(0, nfeatures) = CV_VAR_CATEGORICAL;
int shrinkage = cfg.shrinkage;
int octave = *it;
cv::Mat missing_mask;
cv::Size model = cfg.modelWinSize;
std::string path = cfg.trainPath;
CvBoostParams params;
{
params.max_categories = 10;
params.max_depth = 2;
params.min_sample_count = 2;
params.cv_folds = 0;
params.truncate_pruned_tree = false;
/// ??????????????????
params.regression_accuracy = 0.01;
params.use_surrogates = false;
params.use_1se_rule = false;
///////// boost params
params.boost_type = CvBoost::GENTLE;
params.weak_count = 1;
params.split_criteria = CvBoost::SQERR;
params.weight_trim_rate = 0.95;
cv::Rect boundingBox(cfg.offset.x / cfg.shrinkage, cfg.offset.y / cfg.shrinkage,
cfg.modelWinSize.width / cfg.shrinkage, cfg.modelWinSize.height / cfg.shrinkage);
sft::Octave boost(boundingBox, npositives, nnegatives, octave, shrinkage);
sft::FeaturePool pool(model, nfeatures);
sft::Dataset dataset(path, boost.logScale);
if (boost.train(dataset, pool))
{
}
std::cout << "Octave " << octave << " was successfully trained..." << std::endl;
// // d. crain octave
// if (octave.train(pool, cfg.positives, cfg.negatives, cfg.weaks))
// {
// strong.push_back(octave);
// }
}
bool update = false;
// fso << "]" << "}";
// // 3. create Soft Cascade
// // sft::SCascade cascade(cfg.modelWinSize, cfg.octs, cfg.shrinkage);
// // // 4. Generate feature pool
// // std::vector<sft::ICF> pool;
// // sft::fillPool(pool, cfg.poolSize, cfg.modelWinSize / cfg.shrinkage, cfg.seed);
// // // 5. Train all octaves
// // cascade.train(cfg.trainPath);
// // // 6. Set thresolds
// // cascade.prune();
// boost.train(train_data, responses, var_idx, sample_idx, var_type, missing_mask);
// // // 7. Postprocess
// // cascade.normolize();
// CvFileStorage* fs = cvOpenFileStorage( "/home/kellan/train_res.xml", 0, CV_STORAGE_WRITE );
// boost.write(fs, "test_res");
// // // 8. Write result xml
// // cv::FileStorage ofs(cfg.outXmlPath, cv::FileStorage::WRITE);
// // ofs << cfg.cascadeName << cascade;
// cvReleaseFileStorage( &fs );
std::cout << "Training complete..." << std::endl;
return 0;
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment