lr.cpp 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
///////////////////////////////////////////////////////////////////////////////////////
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.

//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.

// This is a implementation of the Logistic Regression algorithm in C++ in OpenCV.

// AUTHOR:
// Rahul Kavi rahulkavi[at]live[at]com

// # You are free to use, change, or redistribute the code in any way you wish for
// # non-commercial purposes, but please maintain the name of the original author.
// # This code comes with no warranty of any kind.

// #
// # You are free to use, change, or redistribute the code in any way you wish for
// # non-commercial purposes, but please maintain the name of the original author.
// # This code comes with no warranty of any kind.

// # Logistic Regression ALGORITHM


//                           License Agreement
//                For Open Source Computer Vision Library

// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.

// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:

//   * Redistributions of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.

//   * Redistributions in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.

//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.

// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.

#include "precomp.hpp"

using namespace std;

60 61 62
namespace cv {
namespace ml {

63
class LrParams
64
{
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
public:
    LrParams()
    {
        alpha = 0.001;
        num_iters = 1000;
        norm = LogisticRegression::REG_L2;
        train_method = LogisticRegression::BATCH;
        mini_batch_size = 1;
        term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
    }

    double alpha; //!< learning rate.
    int num_iters; //!< number of iterations.
    int norm;
    int train_method;
    int mini_batch_size;
    TermCriteria term_crit;
};
83

84
class LogisticRegressionImpl CV_FINAL : public LogisticRegression
85
{
86
public:
87 88

    LogisticRegressionImpl() { }
89 90
    virtual ~LogisticRegressionImpl() {}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
    inline double getLearningRate() const CV_OVERRIDE { return params.alpha; }
    inline void setLearningRate(double val) CV_OVERRIDE { params.alpha = val; }
    inline int getIterations() const CV_OVERRIDE { return params.num_iters; }
    inline void setIterations(int val) CV_OVERRIDE { params.num_iters = val; }
    inline int getRegularization() const CV_OVERRIDE { return params.norm; }
    inline void setRegularization(int val) CV_OVERRIDE { params.norm = val; }
    inline int getTrainMethod() const CV_OVERRIDE { return params.train_method; }
    inline void setTrainMethod(int val) CV_OVERRIDE { params.train_method = val; }
    inline int getMiniBatchSize() const CV_OVERRIDE { return params.mini_batch_size; }
    inline void setMiniBatchSize(int val) CV_OVERRIDE { params.mini_batch_size = val; }
    inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.term_crit; }
    inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.term_crit = val; }

    virtual bool train( const Ptr<TrainData>& trainData, int=0 ) CV_OVERRIDE;
    virtual float predict(InputArray samples, OutputArray results, int flags=0) const CV_OVERRIDE;
    virtual void clear() CV_OVERRIDE;
    virtual void write(FileStorage& fs) const CV_OVERRIDE;
    virtual void read(const FileNode& fn) CV_OVERRIDE;
    virtual Mat get_learnt_thetas() const CV_OVERRIDE { return learnt_thetas; }
    virtual int getVarCount() const CV_OVERRIDE { return learnt_thetas.cols; }
    virtual bool isTrained() const CV_OVERRIDE { return !learnt_thetas.empty(); }
    virtual bool isClassifier() const CV_OVERRIDE { return true; }
    virtual String getDefaultName() const CV_OVERRIDE { return "opencv_ml_lr"; }
114
protected:
115 116
    Mat calc_sigmoid(const Mat& data) const;
    double compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
117 118 119
    void compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient );
    Mat batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
    Mat mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta);
120 121
    bool set_label_map(const Mat& _labels_i);
    Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
122
protected:
123
    LrParams params;
124
    Mat learnt_thetas;
125 126
    map<int, int> forward_mapper;
    map<int, int> reverse_mapper;
127 128
    Mat labels_o;
    Mat labels_n;
129 130
};

131
Ptr<LogisticRegression> LogisticRegression::create()
132
{
133
    return makePtr<LogisticRegressionImpl>();
134 135
}

136 137 138 139 140 141
Ptr<LogisticRegression> LogisticRegression::load(const String& filepath, const String& nodeName)
{
    return Algorithm::load<LogisticRegression>(filepath, nodeName);
}


142
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
143
{
144
    CV_TRACE_FUNCTION_SKIP_NESTED();
145 146 147
    // return value
    bool ok = false;

148 149 150
    if (trainData.empty()) {
        return false;
    }
151
    clear();
152 153
    Mat _data_i = trainData->getSamples();
    Mat _labels_i = trainData->getResponses();
154

155
    // check size and type of training data
156
    CV_Assert( !_labels_i.empty() && !_data_i.empty());
157 158
    if(_labels_i.cols != 1)
    {
159
        CV_Error( CV_StsBadArg, "labels should be a column matrix" );
160
    }
161
    if(_data_i.type() != CV_32FC1 || _labels_i.type() != CV_32FC1)
162
    {
163
        CV_Error( CV_StsBadArg, "data and labels must be a floating point matrix" );
164
    }
165 166 167 168
    if(_labels_i.rows != _data_i.rows)
    {
        CV_Error( CV_StsBadArg, "number of rows in data and labels should be equal" );
    }
169

170
    // class labels
171
    set_label_map(_labels_i);
172
    Mat labels_l = remap_labels(_labels_i, this->forward_mapper);
173
    int num_classes = (int) this->forward_mapper.size();
174 175
    if(num_classes < 2)
    {
176
        CV_Error( CV_StsBadArg, "data should have atleast 2 classes" );
177 178
    }

179 180 181
    // add a column of ones to the data (bias/intercept term)
    Mat data_t;
    hconcat( cv::Mat::ones( _data_i.rows, 1, CV_32F ), _data_i, data_t );
182

183 184
    // coefficient matrix (zero-initialized)
    Mat thetas;
185
    Mat init_theta = Mat::zeros(data_t.cols, 1, CV_32F);
186

187
    // fit the model (handles binary and multiclass cases)
188
    Mat new_theta;
189
    Mat labels;
190 191 192
    if(num_classes == 2)
    {
        labels_l.convertTo(labels, CV_32F);
193
        if(this->params.train_method == LogisticRegression::BATCH)
194
            new_theta = batch_gradient_descent(data_t, labels, init_theta);
195
        else
196
            new_theta = mini_batch_gradient_descent(data_t, labels, init_theta);
197 198 199 200 201 202
        thetas = new_theta.t();
    }
    else
    {
        /* take each class and rename classes you will get a theta per class
        as in multi class class scenario, we will have n thetas for n classes */
203 204 205
        thetas.create(num_classes, data_t.cols, CV_32F);
        Mat labels_binary;
        int ii = 0;
206 207
        for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
        {
208 209 210
            // one-vs-rest (OvR) scheme
            labels_binary = (labels_l == it->second)/255;
            labels_binary.convertTo(labels, CV_32F);
211
            if(this->params.train_method == LogisticRegression::BATCH)
212
                new_theta = batch_gradient_descent(data_t, labels, init_theta);
213
            else
214
                new_theta = mini_batch_gradient_descent(data_t, labels, init_theta);
215 216 217 218 219
            hconcat(new_theta.t(), thetas.row(ii));
            ii += 1;
        }
    }

220
    // check that the estimates are stable and finite
221
    this->learnt_thetas = thetas.clone();
222
    if( cvIsNaN( (double)sum(this->learnt_thetas)[0] ) )
223
    {
224
        CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" );
225
    }
226 227

    // success
228 229 230 231
    ok = true;
    return ok;
}

232
float LogisticRegressionImpl::predict(InputArray samples, OutputArray results, int flags) const
233 234
{
    // check if learnt_mats array is populated
235
    if(!this->isTrained())
236
    {
237
        CV_Error( CV_StsBadArg, "classifier should be trained first" );
238
    }
239

240 241
    // coefficient matrix
    Mat thetas;
242
    if ( learnt_thetas.type() == CV_32F )
243
    {
244 245 246 247 248
        thetas = learnt_thetas;
    }
    else
    {
        this->learnt_thetas.convertTo( thetas, CV_32F );
249 250 251
    }
    CV_Assert(thetas.rows > 0);

252 253 254 255 256 257
    // data samples
    Mat data = samples.getMat();
    if(data.type() != CV_32F)
    {
        CV_Error( CV_StsBadArg, "data must be of floating type" );
    }
258

259 260 261 262
    // add a column of ones to the data (bias/intercept term)
    Mat data_t;
    hconcat( cv::Mat::ones( data.rows, 1, CV_32F ), data, data_t );
    CV_Assert(data_t.cols == thetas.cols);
263

264
    // predict class labels for samples (handles binary and multiclass cases)
265
    Mat labels_c;
266
    Mat pred_m;
267
    Mat temp_pred;
268 269
    if(thetas.rows == 1)
    {
270 271
        // apply sigmoid function
        temp_pred = calc_sigmoid(data_t * thetas.t());
272
        CV_Assert(temp_pred.cols==1);
273
        pred_m = temp_pred.clone();
274

275
        // if greater than 0.5, predict class 0 or predict class 1
276
        temp_pred = (temp_pred > 0.5f) / 255;
277 278 279 280
        temp_pred.convertTo(labels_c, CV_32S);
    }
    else
    {
281 282 283
        // apply sigmoid function
        pred_m.create(data_t.rows, thetas.rows, data.type());
        for(int i = 0; i < thetas.rows; i++)
284 285
        {
            temp_pred = calc_sigmoid(data_t * thetas.row(i).t());
286
            vconcat(temp_pred, pred_m.col(i));
287
        }
288 289 290 291 292

        // predict class with the maximum output
        Point max_loc;
        Mat labels;
        for(int i = 0; i < pred_m.rows; i++)
293 294
        {
            temp_pred = pred_m.row(i);
295
            minMaxLoc( temp_pred, NULL, NULL, NULL, &max_loc );
296 297 298 299
            labels.push_back(max_loc.x);
        }
        labels.convertTo(labels_c, CV_32S);
    }
300 301 302

    // return label of the predicted class. class names can be 1,2,3,...
    Mat pred_labs = remap_labels(labels_c, this->reverse_mapper);
303
    pred_labs.convertTo(pred_labs, CV_32S);
304 305 306 307

    // return either the labels or the raw output
    if ( results.needed() )
    {
308
        if ( flags & StatModel::RAW_OUTPUT )
309 310 311 312 313 314 315 316 317
        {
            pred_m.copyTo( results );
        }
        else
        {
            pred_labs.copyTo(results);
        }
    }

318
    return ( pred_labs.empty() ? 0.f : static_cast<float>(pred_labs.at<int>(0)) );
319 320
}

321
Mat LogisticRegressionImpl::calc_sigmoid(const Mat& data) const
322
{
323
    CV_TRACE_FUNCTION();
324 325
    Mat dest;
    exp(-data, dest);
326 327 328
    return 1.0/(1.0+dest);
}

329
double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
330
{
331
    CV_TRACE_FUNCTION();
BadrinathS's avatar
BadrinathS committed
332
    float llambda = 0;                   /*changed llambda from int to float to solve issue #7924*/
333 334 335 336
    int m;
    int n;
    double cost = 0;
    double rparameter = 0;
337 338 339 340
    Mat theta_b;
    Mat theta_c;
    Mat d_a;
    Mat d_b;
341 342 343 344 345 346

    m = _data.rows;
    n = _data.cols;

    theta_b = _init_theta(Range(1, n), Range::all());

ippei ito's avatar
ippei ito committed
347
    if (params.norm != REG_DISABLE)
348 349 350 351
    {
        llambda = 1;
    }

352
    if(this->params.norm == LogisticRegression::REG_L1)
353
    {
354
        rparameter = (llambda/(2*m)) * sum(theta_b)[0];
355 356 357 358
    }
    else
    {
        // assuming it to be L2 by default
359
        multiply(theta_b, theta_b, theta_c, 1);
360
        rparameter = (llambda/(2*m)) * sum(theta_c)[0];
361 362
    }

363
    d_a = calc_sigmoid(_data * _init_theta);
364 365
    log(d_a, d_a);
    multiply(d_a, _labels, d_a);
366

367 368
    // use the fact that: log(1 - sigmoid(x)) = log(sigmoid(-x))
    d_b = calc_sigmoid(- _data * _init_theta);
369 370
    log(d_b, d_b);
    multiply(d_b, 1-_labels, d_b);
371

372
    cost = (-1.0/m) * (sum(d_a)[0] + sum(d_b)[0]);
373 374
    cost = cost + rparameter;

375 376 377 378 379
    if(cvIsNaN( cost ) == 1)
    {
        CV_Error( CV_StsBadArg, "check training parameters. Invalid training classifier" );
    }

380 381 382
    return cost;
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
struct LogisticRegressionImpl_ComputeDradient_Impl : ParallelLoopBody
{
    const Mat* data;
    const Mat* theta;
    const Mat* pcal_a;
    Mat* gradient;
    double lambda;

    LogisticRegressionImpl_ComputeDradient_Impl(const Mat& _data, const Mat &_theta, const Mat& _pcal_a, const double _lambda, Mat & _gradient)
        : data(&_data)
        , theta(&_theta)
        , pcal_a(&_pcal_a)
        , gradient(&_gradient)
        , lambda(_lambda)
    {

    }

401
    void operator()(const cv::Range& r) const CV_OVERRIDE
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
    {
        const Mat& _data  = *data;
        const Mat &_theta = *theta;
        Mat & _gradient   = *gradient;
        const Mat & _pcal_a = *pcal_a;
        const int m = _data.rows;
        Mat pcal_ab;

        for (int ii = r.start; ii<r.end; ii++)
        {
            Mat pcal_b = _data(Range::all(), Range(ii,ii+1));
            multiply(_pcal_a, pcal_b, pcal_ab, 1);

            _gradient.row(ii) = (1.0/m)*sum(pcal_ab)[0] + (lambda/m) * _theta.row(ii);
        }
    }
};
419 420 421

void LogisticRegressionImpl::compute_gradient(const Mat& _data, const Mat& _labels, const Mat &_theta, const double _lambda, Mat & _gradient )
{
422
    CV_TRACE_FUNCTION();
423 424 425 426 427 428 429 430 431 432 433 434 435 436
    const int m = _data.rows;
    Mat pcal_a, pcal_b, pcal_ab;

    const Mat z = _data * _theta;

    CV_Assert( _gradient.rows == _theta.rows && _gradient.cols == _theta.cols );

    pcal_a = calc_sigmoid(z) - _labels;
    pcal_b = _data(Range::all(), Range(0,1));
    multiply(pcal_a, pcal_b, pcal_ab, 1);

    _gradient.row(0) = ((float)1/m) * sum(pcal_ab)[0];

    //cout<<"for each training data entry"<<endl;
437 438
    LogisticRegressionImpl_ComputeDradient_Impl invoker(_data, _theta, pcal_a, _lambda, _gradient);
    cv::parallel_for_(cv::Range(1, _gradient.rows), invoker);
439 440 441 442
}


Mat LogisticRegressionImpl::batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
443
{
444
    CV_TRACE_FUNCTION();
445 446 447
    // implements batch gradient descent
    if(this->params.alpha<=0)
    {
448
        CV_Error( CV_StsBadArg, "check training parameters (learning rate) for the classifier" );
449 450 451 452
    }

    if(this->params.num_iters <= 0)
    {
453
        CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" );
454 455 456
    }

    int llambda = 0;
457
    int m;
458
    Mat theta_p = _init_theta.clone();
459
    Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() );
460 461
    m = _data.rows;

ippei ito's avatar
ippei ito committed
462
    if (params.norm != REG_DISABLE)
463 464 465 466 467 468
    {
        llambda = 1;
    }

    for(int i = 0;i<this->params.num_iters;i++)
    {
469 470
        // this seems to only be called to ensure that cost is not NaN
        compute_cost(_data, _labels, theta_p);
471

472
        compute_gradient( _data, _labels, theta_p, llambda, gradient );
473 474 475 476 477 478

        theta_p = theta_p - ( static_cast<double>(this->params.alpha)/m)*gradient;
    }
    return theta_p;
}

479
Mat LogisticRegressionImpl::mini_batch_gradient_descent(const Mat& _data, const Mat& _labels, const Mat& _init_theta)
480 481 482
{
    // implements batch gradient descent
    int lambda_l = 0;
483
    int m;
484
    int j = 0;
485
    int size_b = this->params.mini_batch_size;
486

487
    if(this->params.mini_batch_size <= 0 || this->params.alpha == 0)
488
    {
489
        CV_Error( CV_StsBadArg, "check training parameters for the classifier" );
490 491 492 493
    }

    if(this->params.num_iters <= 0)
    {
494
        CV_Error( CV_StsBadArg, "number of iterations cannot be zero or a negative number" );
495 496
    }

497
    Mat theta_p = _init_theta.clone();
498
    Mat gradient( theta_p.rows, theta_p.cols, theta_p.type() );
499 500
    Mat data_d;
    Mat labels_l;
501

ippei ito's avatar
ippei ito committed
502
    if (params.norm != REG_DISABLE)
503 504 505 506
    {
        lambda_l = 1;
    }

507
    for(int i = 0;i<this->params.term_crit.maxCount;i++)
508 509 510 511 512 513 514 515 516 517 518 519 520 521
    {
        if(j+size_b<=_data.rows)
        {
            data_d = _data(Range(j,j+size_b), Range::all());
            labels_l = _labels(Range(j,j+size_b),Range::all());
        }
        else
        {
            data_d = _data(Range(j, _data.rows), Range::all());
            labels_l = _labels(Range(j, _labels.rows),Range::all());
        }

        m = data_d.rows;

522 523
        // this seems to only be called to ensure that cost is not NaN
        compute_cost(data_d, labels_l, theta_p);
524

525
        compute_gradient(data_d, labels_l, theta_p, lambda_l, gradient);
526 527 528

        theta_p = theta_p - ( static_cast<double>(this->params.alpha)/m)*gradient;

529
        j += this->params.mini_batch_size;
530

531 532 533
        // if parsed through all data variables
        if (j >= _data.rows) {
            j = 0;
534 535 536 537 538
        }
    }
    return theta_p;
}

539
bool LogisticRegressionImpl::set_label_map(const Mat &_labels_i)
540
{
541
    // this function creates two maps to map user defined labels to program friendly labels two ways.
542
    int ii = 0;
543
    Mat labels;
544

545 546
    this->labels_o = Mat(0,1, CV_8U);
    this->labels_n = Mat(0,1, CV_8U);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567

    _labels_i.convertTo(labels, CV_32S);

    for(int i = 0;i<labels.rows;i++)
    {
        this->forward_mapper[labels.at<int>(i)] += 1;
    }

    for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
    {
        this->forward_mapper[it->first] = ii;
        this->labels_o.push_back(it->first);
        this->labels_n.push_back(ii);
        ii += 1;
    }

    for(map<int,int>::iterator it = this->forward_mapper.begin(); it != this->forward_mapper.end(); ++it)
    {
        this->reverse_mapper[it->second] = it->first;
    }

568
    return true;
569 570
}

571
Mat LogisticRegressionImpl::remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const
572
{
573
    Mat labels;
574 575
    _labels_i.convertTo(labels, CV_32S);

576
    Mat new_labels = Mat::zeros(labels.rows, labels.cols, labels.type());
577

578
    CV_Assert( !lmap.empty() );
579 580 581

    for(int i =0;i<labels.rows;i++)
    {
582 583 584
        map<int, int>::const_iterator val = lmap.find(labels.at<int>(i,0));
        CV_Assert(val != lmap.end());
        new_labels.at<int>(i,0) = val->second;
585 586 587 588
    }
    return new_labels;
}

589
void LogisticRegressionImpl::clear()
590 591 592 593 594 595
{
    this->learnt_thetas.release();
    this->labels_o.release();
    this->labels_n.release();
}

596
void LogisticRegressionImpl::write(FileStorage& fs) const
597
{
598 599 600 601 602
    // check if open
    if(fs.isOpened() == 0)
    {
        CV_Error(CV_StsBadArg,"file can't open. Check file path");
    }
603
    writeFormat(fs);
604
    string desc = "Logistic Regression Classifier";
605 606 607 608 609 610 611 612 613 614 615 616 617
    fs<<"classifier"<<desc.c_str();
    fs<<"alpha"<<this->params.alpha;
    fs<<"iterations"<<this->params.num_iters;
    fs<<"norm"<<this->params.norm;
    fs<<"train_method"<<this->params.train_method;
    if(this->params.train_method == LogisticRegression::MINI_BATCH)
    {
        fs<<"mini_batch_size"<<this->params.mini_batch_size;
    }
    fs<<"learnt_thetas"<<this->learnt_thetas;
    fs<<"n_labels"<<this->labels_n;
    fs<<"o_labels"<<this->labels_o;
}
618

619
void LogisticRegressionImpl::read(const FileNode& fn)
620 621 622
{
    // check if empty
    if(fn.empty())
623
    {
624
        CV_Error( CV_StsBadArg, "empty FileNode object" );
625 626
    }

627 628 629 630 631 632 633 634 635
    this->params.alpha = (double)fn["alpha"];
    this->params.num_iters = (int)fn["iterations"];
    this->params.norm = (int)fn["norm"];
    this->params.train_method = (int)fn["train_method"];

    if(this->params.train_method == LogisticRegression::MINI_BATCH)
    {
        this->params.mini_batch_size = (int)fn["mini_batch_size"];
    }
636

637 638 639
    fn["learnt_thetas"] >> this->learnt_thetas;
    fn["o_labels"] >> this->labels_o;
    fn["n_labels"] >> this->labels_n;
640 641 642 643 644 645 646 647

    for(int ii =0;ii<labels_o.rows;ii++)
    {
        this->forward_mapper[labels_o.at<int>(ii,0)] = labels_n.at<int>(ii,0);
        this->reverse_mapper[labels_n.at<int>(ii,0)] = labels_o.at<int>(ii,0);
    }
}

648 649 650
}
}

651
/* End of file. */