Commit 2385a587 authored by Alexander Alekhin's avatar Alexander Alekhin

next(ml): eliminate dummy interface class ANN_MLP_ANNEAL

parent e567135e
......@@ -1503,33 +1503,33 @@ public:
/** ANNEAL: Update initial temperature.
It must be \>=0. Default value is 10.*/
/** @see setAnnealInitialT */
CV_WRAP double getAnnealInitialT() const;
CV_WRAP virtual double getAnnealInitialT() const = 0;
/** @copybrief getAnnealInitialT @see getAnnealInitialT */
CV_WRAP void setAnnealInitialT(double val);
CV_WRAP virtual void setAnnealInitialT(double val) = 0;
/** ANNEAL: Update final temperature.
It must be \>=0 and less than initialT. Default value is 0.1.*/
/** @see setAnnealFinalT */
CV_WRAP double getAnnealFinalT() const;
CV_WRAP virtual double getAnnealFinalT() const = 0;
/** @copybrief getAnnealFinalT @see getAnnealFinalT */
CV_WRAP void setAnnealFinalT(double val);
CV_WRAP virtual void setAnnealFinalT(double val) = 0;
/** ANNEAL: Update cooling ratio.
It must be \>0 and less than 1. Default value is 0.95.*/
/** @see setAnnealCoolingRatio */
CV_WRAP double getAnnealCoolingRatio() const;
CV_WRAP virtual double getAnnealCoolingRatio() const = 0;
/** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */
CV_WRAP void setAnnealCoolingRatio(double val);
CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;
/** ANNEAL: Update iteration per step.
It must be \>0 . Default value is 10.*/
/** @see setAnnealItePerStep */
CV_WRAP int getAnnealItePerStep() const;
CV_WRAP virtual int getAnnealItePerStep() const = 0;
/** @copybrief getAnnealItePerStep @see getAnnealItePerStep */
CV_WRAP void setAnnealItePerStep(int val);
CV_WRAP virtual void setAnnealItePerStep(int val) = 0;
/** @brief Set/initialize anneal RNG */
void setAnnealEnergyRNG(const RNG& rng);
virtual void setAnnealEnergyRNG(const RNG& rng) = 0;
/** possible activation functions */
enum ActivationFunctions {
......@@ -1586,6 +1586,10 @@ public:
};
#ifndef DISABLE_OPENCV_3_COMPATIBILITY
typedef ANN_MLP ANN_MLP_ANNEAL;
#endif
/****************************************************************************************\
* Logistic Regression *
\****************************************************************************************/
......@@ -1870,43 +1874,6 @@ CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, Out
CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,
OutputArray samples, OutputArray responses);
/** @brief Artificial Neural Networks - Multi-Layer Perceptrons.
@sa @ref ml_intro_ann
*/
class CV_EXPORTS_W ANN_MLP_ANNEAL : public ANN_MLP
{
public:
/** @see setAnnealInitialT */
CV_WRAP virtual double getAnnealInitialT() const = 0;
/** @copybrief getAnnealInitialT @see getAnnealInitialT */
CV_WRAP virtual void setAnnealInitialT(double val) = 0;
/** ANNEAL: Update final temperature.
It must be \>=0 and less than initialT. Default value is 0.1.*/
/** @see setAnnealFinalT */
CV_WRAP virtual double getAnnealFinalT() const = 0;
/** @copybrief getAnnealFinalT @see getAnnealFinalT */
CV_WRAP virtual void setAnnealFinalT(double val) = 0;
/** ANNEAL: Update cooling ratio.
It must be \>0 and less than 1. Default value is 0.95.*/
/** @see setAnnealCoolingRatio */
CV_WRAP virtual double getAnnealCoolingRatio() const = 0;
/** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */
CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;
/** ANNEAL: Update iteration per step.
It must be \>0 . Default value is 10.*/
/** @see setAnnealItePerStep */
CV_WRAP virtual int getAnnealItePerStep() const = 0;
/** @copybrief getAnnealItePerStep @see getAnnealItePerStep */
CV_WRAP virtual void setAnnealItePerStep(int val) = 0;
/** @brief Set/initialize anneal RNG */
virtual void setAnnealEnergyRNG(const RNG& rng) = 0;
};
/****************************************************************************************\
* Simulated annealing solver *
......
......@@ -141,79 +141,7 @@ protected:
};
double ANN_MLP::getAnnealInitialT() const
{
const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
return this_->getAnnealInitialT();
}
void ANN_MLP::setAnnealInitialT(double val)
{
ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
this_->setAnnealInitialT(val);
}
double ANN_MLP::getAnnealFinalT() const
{
const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
return this_->getAnnealFinalT();
}
void ANN_MLP::setAnnealFinalT(double val)
{
ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
this_->setAnnealFinalT(val);
}
double ANN_MLP::getAnnealCoolingRatio() const
{
const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
return this_->getAnnealCoolingRatio();
}
void ANN_MLP::setAnnealCoolingRatio(double val)
{
ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
this_->setAnnealCoolingRatio(val);
}
int ANN_MLP::getAnnealItePerStep() const
{
const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
return this_->getAnnealItePerStep();
}
void ANN_MLP::setAnnealItePerStep(int val)
{
ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
this_->setAnnealItePerStep(val);
}
void ANN_MLP::setAnnealEnergyRNG(const RNG& rng)
{
ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
if (!this_)
CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
this_->setAnnealEnergyRNG(rng);
}
class ANN_MLPImpl CV_FINAL : public ANN_MLP_ANNEAL
class ANN_MLPImpl CV_FINAL : public ANN_MLP
{
public:
ANN_MLPImpl()
......@@ -224,7 +152,7 @@ public:
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
}
virtual ~ANN_MLPImpl() {}
virtual ~ANN_MLPImpl() CV_OVERRIDE {}
inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; }
inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.termCrit = val; }
......
......@@ -283,7 +283,7 @@ TEST_P(ML_ANN_METHOD, Test)
#ifdef GENERATE_TESTDATA
{
Ptr<ml::ANN_MLP> xx = ml::ANN_MLP_ANNEAL::create();
Ptr<ml::ANN_MLP> xx = ml::ANN_MLP::create();
Mat_<int> layerSizesXX(1, 4);
layerSizesXX(0, 0) = tdata->getNVars();
layerSizesXX(0, 1) = 30;
......@@ -303,7 +303,7 @@ TEST_P(ML_ANN_METHOD, Test)
{
FileStorage fs;
fs.open(dataname + "_init_weight.yml.gz", FileStorage::READ);
Ptr<ml::ANN_MLP> x = ml::ANN_MLP_ANNEAL::create();
Ptr<ml::ANN_MLP> x = ml::ANN_MLP::create();
x->read(fs.root());
x->setTrainMethod(methodType);
if (methodType == ml::ANN_MLP::ANNEAL)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment