Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
e83c9b08
Commit
e83c9b08
authored
Jul 27, 2010
by
Maria Dimashova
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
replaced Calonder descriptor implementation; added windowedMatchingMask()
parent
4f3de6eb
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
1306 additions
and
288 deletions
+1306
-288
features2d.hpp
modules/features2d/include/opencv2/features2d/features2d.hpp
+259
-126
calonder.cpp
modules/features2d/src/calonder.cpp
+1023
-1
descriptors.cpp
modules/features2d/src/descriptors.cpp
+24
-161
No files found.
modules/features2d/include/opencv2/features2d/features2d.hpp
View file @
e83c9b08
...
...
@@ -605,6 +605,207 @@ protected:
/****************************************************************************************\
* Calonder Classifier *
\****************************************************************************************/
struct
RTreeNode
;
struct
CV_EXPORTS
BaseKeypoint
{
int
x
;
int
y
;
IplImage
*
image
;
BaseKeypoint
()
:
x
(
0
),
y
(
0
),
image
(
NULL
)
{}
BaseKeypoint
(
int
x
,
int
y
,
IplImage
*
image
)
:
x
(
x
),
y
(
y
),
image
(
image
)
{}
};
class
CV_EXPORTS
RandomizedTree
{
public
:
friend
class
RTreeClassifier
;
static
const
int
PATCH_SIZE
=
32
;
static
const
int
DEFAULT_DEPTH
=
9
;
static
const
int
DEFAULT_VIEWS
=
5000
;
static
const
size_t
DEFAULT_REDUCED_NUM_DIM
=
176
;
static
const
float
LOWER_QUANT_PERC
=
.03
f
;
static
const
float
UPPER_QUANT_PERC
=
.92
f
;
RandomizedTree
();
~
RandomizedTree
();
void
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
);
void
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
PatchGenerator
&
make_patch
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
);
// following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do)
static
void
quantizeVector
(
float
*
vec
,
int
dim
,
int
N
,
float
bnds
[
2
],
int
clamp_mode
=
0
);
static
void
quantizeVector
(
float
*
src
,
int
dim
,
int
N
,
float
bnds
[
2
],
uint8_t
*
dst
);
// patch_data must be a 32x32 array (no row padding)
float
*
getPosterior
(
uchar
*
patch_data
);
const
float
*
getPosterior
(
uchar
*
patch_data
)
const
;
uint8_t
*
getPosterior2
(
uchar
*
patch_data
);
const
uint8_t
*
getPosterior2
(
uchar
*
patch_data
)
const
;
void
read
(
const
char
*
file_name
,
int
num_quant_bits
);
void
read
(
std
::
istream
&
is
,
int
num_quant_bits
);
void
write
(
const
char
*
file_name
)
const
;
void
write
(
std
::
ostream
&
os
)
const
;
int
classes
()
{
return
classes_
;
}
int
depth
()
{
return
depth_
;
}
//void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; }
void
discardFloatPosteriors
()
{
freePosteriors
(
1
);
}
inline
void
applyQuantization
(
int
num_quant_bits
)
{
makePosteriors2
(
num_quant_bits
);
}
// debug
void
savePosteriors
(
std
::
string
url
,
bool
append
=
false
);
void
savePosteriors2
(
std
::
string
url
,
bool
append
=
false
);
private
:
int
classes_
;
int
depth_
;
int
num_leaves_
;
std
::
vector
<
RTreeNode
>
nodes_
;
float
**
posteriors_
;
// 16-bytes aligned posteriors
uint8_t
**
posteriors2_
;
// 16-bytes aligned posteriors
std
::
vector
<
int
>
leaf_counts_
;
void
createNodes
(
int
num_nodes
,
RNG
&
rng
);
void
allocPosteriorsAligned
(
int
num_leaves
,
int
num_classes
);
void
freePosteriors
(
int
which
);
// which: 1=posteriors_, 2=posteriors2_, 3=both
void
init
(
int
classes
,
int
depth
,
RNG
&
rng
);
void
addExample
(
int
class_id
,
uchar
*
patch_data
);
void
finalize
(
size_t
reduced_num_dim
,
int
num_quant_bits
);
int
getIndex
(
uchar
*
patch_data
)
const
;
inline
float
*
getPosteriorByIndex
(
int
index
);
inline
const
float
*
getPosteriorByIndex
(
int
index
)
const
;
inline
uint8_t
*
getPosteriorByIndex2
(
int
index
);
inline
const
uint8_t
*
getPosteriorByIndex2
(
int
index
)
const
;
//void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim);
void
convertPosteriorsToChar
();
void
makePosteriors2
(
int
num_quant_bits
);
void
compressLeaves
(
size_t
reduced_num_dim
);
void
estimateQuantPercForPosteriors
(
float
perc
[
2
]);
};
inline
uchar
*
getData
(
IplImage
*
image
)
{
return
reinterpret_cast
<
uchar
*>
(
image
->
imageData
);
}
inline
float
*
RandomizedTree
::
getPosteriorByIndex
(
int
index
)
{
return
const_cast
<
float
*>
(
const_cast
<
const
RandomizedTree
*>
(
this
)
->
getPosteriorByIndex
(
index
));
}
inline
const
float
*
RandomizedTree
::
getPosteriorByIndex
(
int
index
)
const
{
return
posteriors_
[
index
];
}
inline
uint8_t
*
RandomizedTree
::
getPosteriorByIndex2
(
int
index
)
{
return
const_cast
<
uint8_t
*>
(
const_cast
<
const
RandomizedTree
*>
(
this
)
->
getPosteriorByIndex2
(
index
));
}
inline
const
uint8_t
*
RandomizedTree
::
getPosteriorByIndex2
(
int
index
)
const
{
return
posteriors2_
[
index
];
}
struct
CV_EXPORTS
RTreeNode
{
short
offset1
,
offset2
;
RTreeNode
()
{}
RTreeNode
(
uchar
x1
,
uchar
y1
,
uchar
x2
,
uchar
y2
)
:
offset1
(
y1
*
RandomizedTree
::
PATCH_SIZE
+
x1
),
offset2
(
y2
*
RandomizedTree
::
PATCH_SIZE
+
x2
)
{}
//! Left child on 0, right child on 1
inline
bool
operator
()
(
uchar
*
patch_data
)
const
{
return
patch_data
[
offset1
]
>
patch_data
[
offset2
];
}
};
class
CV_EXPORTS
RTreeClassifier
{
public
:
static
const
int
DEFAULT_TREES
=
48
;
static
const
size_t
DEFAULT_NUM_QUANT_BITS
=
4
;
RTreeClassifier
();
void
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
int
num_trees
=
RTreeClassifier
::
DEFAULT_TREES
,
int
depth
=
RandomizedTree
::
DEFAULT_DEPTH
,
int
views
=
RandomizedTree
::
DEFAULT_VIEWS
,
size_t
reduced_num_dim
=
RandomizedTree
::
DEFAULT_REDUCED_NUM_DIM
,
int
num_quant_bits
=
DEFAULT_NUM_QUANT_BITS
);
void
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
PatchGenerator
&
make_patch
,
int
num_trees
=
RTreeClassifier
::
DEFAULT_TREES
,
int
depth
=
RandomizedTree
::
DEFAULT_DEPTH
,
int
views
=
RandomizedTree
::
DEFAULT_VIEWS
,
size_t
reduced_num_dim
=
RandomizedTree
::
DEFAULT_REDUCED_NUM_DIM
,
int
num_quant_bits
=
DEFAULT_NUM_QUANT_BITS
);
// sig must point to a memory block of at least classes()*sizeof(float|uint8_t) bytes
void
getSignature
(
IplImage
*
patch
,
uint8_t
*
sig
)
const
;
void
getSignature
(
IplImage
*
patch
,
float
*
sig
)
const
;
void
getSparseSignature
(
IplImage
*
patch
,
float
*
sig
,
float
thresh
)
const
;
// TODO: deprecated in favor of getSignature overload, remove
void
getFloatSignature
(
IplImage
*
patch
,
float
*
sig
)
const
{
getSignature
(
patch
,
sig
);
}
static
int
countNonZeroElements
(
float
*
vec
,
int
n
,
double
tol
=
1e-10
);
static
inline
void
safeSignatureAlloc
(
uint8_t
**
sig
,
int
num_sig
=
1
,
int
sig_len
=
176
);
static
inline
uint8_t
*
safeSignatureAlloc
(
int
num_sig
=
1
,
int
sig_len
=
176
);
inline
int
classes
()
const
{
return
classes_
;
}
inline
int
original_num_classes
()
const
{
return
original_num_classes_
;
}
void
setQuantization
(
int
num_quant_bits
);
void
discardFloatPosteriors
();
void
read
(
const
char
*
file_name
);
void
read
(
std
::
istream
&
is
);
void
write
(
const
char
*
file_name
)
const
;
void
write
(
std
::
ostream
&
os
)
const
;
// experimental and debug
void
saveAllFloatPosteriors
(
std
::
string
file_url
);
void
saveAllBytePosteriors
(
std
::
string
file_url
);
void
setFloatPosteriorsFromTextfile_176
(
std
::
string
url
);
float
countZeroElements
();
std
::
vector
<
RandomizedTree
>
trees_
;
private
:
int
classes_
;
int
num_quant_bits_
;
mutable
uint8_t
**
posteriors_
;
mutable
uint16_t
*
ptemp_
;
int
original_num_classes_
;
bool
keep_floats_
;
};
#if 0
class CV_EXPORTS CalonderClassifier
{
public:
...
...
@@ -645,6 +846,7 @@ public:
#endif
void read( const FileNode& fn );
void read( std::istream& is );
void write( FileStorage& fs ) const;
bool empty() const;
...
...
@@ -722,6 +924,7 @@ private:
vector<uchar> quantizedPosteriors;
#endif
};
#endif
/****************************************************************************************\
* One-Way Descriptor *
...
...
@@ -1339,9 +1542,8 @@ protected:
SURF
surf
;
};
#if 0
template
<
typename
T
>
class CalonderDescriptorExtractor : public DescriptorExtractor
class
C
V_EXPORTS
C
alonderDescriptorExtractor
:
public
DescriptorExtractor
{
public
:
CalonderDescriptorExtractor
(
const
string
&
classifierFile
);
...
...
@@ -1371,15 +1573,23 @@ void CalonderDescriptorExtractor<T>::compute( const cv::Mat& image,
/// @todo Check 16-byte aligned
descriptors
.
create
(
keypoints
.
size
(),
classifier_
.
classes
(),
cv
::
DataType
<
T
>::
type
);
IplImage ipl = (IplImage)image;
int
patchSize
=
RandomizedTree
::
PATCH_SIZE
;
int
offset
=
patchSize
/
2
;
for
(
size_t
i
=
0
;
i
<
keypoints
.
size
();
++
i
)
{
cv::Point2f
key
pt = keypoints[i].pt;
cv::WImageView1_b patch = features::extractPatch(&ipl, keypt
);
classifier_.getSignature(
patch.Ipl()
, descriptors.ptr<T>(i));
cv
::
Point2f
pt
=
keypoints
[
i
].
pt
;
IplImage
ipl
=
image
(
Rect
(
pt
.
x
-
offset
,
pt
.
y
-
offset
,
patchSize
,
patchSize
)
);
classifier_
.
getSignature
(
&
ipl
,
descriptors
.
ptr
<
T
>
(
i
));
}
}
#endif
template
<
typename
T
>
void
CalonderDescriptorExtractor
<
T
>::
read
(
const
FileNode
&
fn
)
{}
template
<
typename
T
>
void
CalonderDescriptorExtractor
<
T
>::
write
(
FileStorage
&
fs
)
const
{}
CV_EXPORTS
Ptr
<
DescriptorExtractor
>
createDescriptorExtractor
(
const
string
&
descriptorExtractorType
);
...
...
@@ -1478,7 +1688,7 @@ public:
/*
* Index the descriptors training set
*/
v
oid
index
()
;
v
irtual
void
index
()
=
0
;
/*
* Find the best match for each descriptor from a query set
...
...
@@ -1574,18 +1784,15 @@ protected:
* Find matches; match() calls this. Must be implemented by the subclass.
* The mask may be empty.
*/
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
=
0
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
=
0
;
/*
* Find matches; match() calls this. Must be implemented by the subclass.
* The mask may be empty.
*/
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
=
0
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
=
0
;
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
=
0
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
=
0
;
static
bool
possibleMatch
(
const
Mat
&
mask
,
int
index_1
,
int
index_2
)
...
...
@@ -1614,36 +1821,36 @@ inline void DescriptorMatcher::add( const Mat& descriptors )
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
vector
<
int
>&
matches
)
const
{
matchImpl
(
query
,
train
,
Mat
(),
matches
);
matchImpl
(
query
,
Mat
(),
matches
);
}
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
{
matchImpl
(
query
,
train
,
mask
,
matches
);
matchImpl
(
query
,
mask
,
matches
);
}
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
vector
<
DMatch
>&
matches
)
const
{
matchImpl
(
query
,
train
,
Mat
(),
matches
);
matchImpl
(
query
,
Mat
(),
matches
);
}
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
{
matchImpl
(
query
,
train
,
mask
,
matches
);
matchImpl
(
query
,
mask
,
matches
);
}
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
{
matchImpl
(
query
,
train
,
Mat
(),
matches
,
threshold
);
matchImpl
(
query
,
Mat
(),
matches
,
threshold
);
}
inline
void
DescriptorMatcher
::
match
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
{
matchImpl
(
query
,
train
,
mask
,
matches
,
threshold
);
matchImpl
(
query
,
mask
,
matches
,
threshold
);
}
...
...
@@ -1666,26 +1873,22 @@ class CV_EXPORTS BruteForceMatcher : public DescriptorMatcher
{
public
:
BruteForceMatcher
(
Distance
d
=
Distance
()
)
:
distance
(
d
)
{}
virtual
void
index
()
{}
protected
:
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
;
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
;
virtual
void
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
;
virtual
void
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
;
Distance
distance
;
};
template
<
class
Distance
>
inline
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
{
vector
<
DMatch
>
matchings
;
matchImpl
(
descriptors_1
,
descriptors_2
,
mask
,
matchings
);
matchImpl
(
query
,
mask
,
matchings
);
matches
.
clear
();
matches
.
resize
(
matchings
.
size
()
);
for
(
size_t
i
=
0
;
i
<
matchings
.
size
();
i
++
)
...
...
@@ -1695,33 +1898,32 @@ void BruteForceMatcher<Distance>::matchImpl( const Mat& descriptors_1, const Mat
}
template
<
class
Distance
>
inline
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
DMatch
>&
matches
)
const
{
typedef
typename
Distance
::
ValueType
ValueType
;
typedef
typename
Distance
::
ResultType
DistanceType
;
assert
(
mask
.
empty
()
||
(
mask
.
rows
==
descriptors_1
.
rows
&&
mask
.
cols
==
descriptors_2
.
rows
)
);
assert
(
mask
.
empty
()
||
(
mask
.
rows
==
query
.
rows
&&
mask
.
cols
==
train
.
rows
)
);
assert
(
descriptors_1
.
cols
==
descriptors_2
.
cols
||
descriptors_1
.
empty
()
||
descriptors_2
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
descriptors_1
.
type
()
||
descriptors_1
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
descriptors_2
.
type
()
||
descriptors_2
.
empty
()
);
assert
(
query
.
cols
==
train
.
cols
||
query
.
empty
()
||
train
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
query
.
type
()
||
query
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
train
.
type
()
||
train
.
empty
()
);
int
dimension
=
descriptors_1
.
cols
;
int
dimension
=
query
.
cols
;
matches
.
clear
();
matches
.
resize
(
descriptors_1
.
rows
);
matches
.
resize
(
query
.
rows
);
for
(
int
i
=
0
;
i
<
descriptors_1
.
rows
;
i
++
)
for
(
int
i
=
0
;
i
<
query
.
rows
;
i
++
)
{
const
ValueType
*
d1
=
(
const
ValueType
*
)(
descriptors_1
.
data
+
descriptors_1
.
step
*
i
);
const
ValueType
*
d1
=
(
const
ValueType
*
)(
query
.
data
+
query
.
step
*
i
);
int
matchIndex
=
-
1
;
DistanceType
matchDistance
=
std
::
numeric_limits
<
DistanceType
>::
max
();
for
(
int
j
=
0
;
j
<
descriptors_2
.
rows
;
j
++
)
for
(
int
j
=
0
;
j
<
train
.
rows
;
j
++
)
{
if
(
possibleMatch
(
mask
,
i
,
j
)
)
{
const
ValueType
*
d2
=
(
const
ValueType
*
)(
descriptors_2
.
data
+
descriptors_2
.
step
*
j
);
const
ValueType
*
d2
=
(
const
ValueType
*
)(
train
.
data
+
train
.
step
*
j
);
DistanceType
curDistance
=
distance
(
d1
,
d2
,
dimension
);
if
(
curDistance
<
matchDistance
)
{
...
...
@@ -1743,31 +1945,30 @@ void BruteForceMatcher<Distance>::matchImpl( const Mat& descriptors_1, const Mat
}
template
<
class
Distance
>
inline
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
void
BruteForceMatcher
<
Distance
>::
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
vector
<
DMatch
>
>&
matches
,
float
threshold
)
const
{
typedef
typename
Distance
::
ValueType
ValueType
;
typedef
typename
Distance
::
ResultType
DistanceType
;
assert
(
mask
.
empty
()
||
(
mask
.
rows
==
descriptors_1
.
rows
&&
mask
.
cols
==
descriptors_2
.
rows
)
);
assert
(
mask
.
empty
()
||
(
mask
.
rows
==
query
.
rows
&&
mask
.
cols
==
train
.
rows
)
);
assert
(
descriptors_1
.
cols
==
descriptors_2
.
cols
||
descriptors_1
.
empty
()
||
descriptors_2
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
descriptors_1
.
type
()
||
descriptors_1
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
descriptors_2
.
type
()
||
descriptors_2
.
empty
()
);
assert
(
query
.
cols
==
train
.
cols
||
query
.
empty
()
||
train
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
query
.
type
()
||
query
.
empty
()
);
assert
(
DataType
<
ValueType
>::
type
==
train
.
type
()
||
train
.
empty
()
);
int
dimension
=
descriptors_1
.
cols
;
int
dimension
=
query
.
cols
;
matches
.
clear
();
matches
.
resize
(
descriptors_1
.
rows
);
matches
.
resize
(
query
.
rows
);
for
(
int
i
=
0
;
i
<
descriptors_1
.
rows
;
i
++
)
for
(
int
i
=
0
;
i
<
query
.
rows
;
i
++
)
{
const
ValueType
*
d1
=
(
const
ValueType
*
)(
descriptors_1
.
data
+
descriptors_1
.
step
*
i
);
const
ValueType
*
d1
=
(
const
ValueType
*
)(
query
.
data
+
query
.
step
*
i
);
for
(
int
j
=
0
;
j
<
descriptors_2
.
rows
;
j
++
)
for
(
int
j
=
0
;
j
<
train
.
rows
;
j
++
)
{
if
(
possibleMatch
(
mask
,
i
,
j
)
)
{
const
ValueType
*
d2
=
(
const
ValueType
*
)(
descriptors_2
.
data
+
descriptors_2
.
step
*
j
);
const
ValueType
*
d2
=
(
const
ValueType
*
)(
train
.
data
+
train
.
step
*
j
);
DistanceType
curDistance
=
distance
(
d1
,
d2
,
dimension
);
if
(
curDistance
<
threshold
)
{
...
...
@@ -1783,8 +1984,7 @@ void BruteForceMatcher<Distance>::matchImpl( const Mat& descriptors_1, const Mat
}
template
<>
void
BruteForceMatcher
<
L2
<
float
>
>::
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
;
void
BruteForceMatcher
<
L2
<
float
>
>::
matchImpl
(
const
Mat
&
query
,
const
Mat
&
mask
,
vector
<
int
>&
matches
)
const
;
CV_EXPORTS
Ptr
<
DescriptorMatcher
>
createDescriptorMatcher
(
const
string
&
descriptorMatcherType
);
...
...
@@ -1952,76 +2152,6 @@ protected:
Params
params
;
};
/*
* CalonderDescriptorMatch
*/
#if 0
class CV_EXPORTS CalonderDescriptorMatch : public GenericDescriptorMatch
{
public:
class Params
{
public:
static const int DEFAULT_NUM_TREES = 80;
static const int DEFAULT_DEPTH = 9;
static const int DEFAULT_VIEWS = 5000;
static const size_t DEFAULT_REDUCED_NUM_DIM = 176;
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
static const int DEFAULT_PATCH_SIZE = PATCH_SIZE;
Params( const RNG& _rng = RNG(), const PatchGenerator& _patchGen = PatchGenerator(),
int _numTrees=DEFAULT_NUM_TREES,
int _depth=DEFAULT_DEPTH,
int _views=DEFAULT_VIEWS,
size_t _reducedNumDim=DEFAULT_REDUCED_NUM_DIM,
int _numQuantBits=DEFAULT_NUM_QUANT_BITS,
bool _printStatus=true,
int _patchSize=DEFAULT_PATCH_SIZE );
Params( const string& _filename );
RNG rng;
PatchGenerator patchGen;
int numTrees;
int depth;
int views;
int patchSize;
size_t reducedNumDim;
int numQuantBits;
bool printStatus;
string filename;
};
CalonderDescriptorMatch();
CalonderDescriptorMatch( const Params& _params );
virtual ~CalonderDescriptorMatch();
void initialize( const Params& _params );
virtual void add( const Mat& image, vector<KeyPoint>& keypoints );
virtual void match( const Mat& image, vector<KeyPoint>& keypoints, vector<int>& indices );
virtual void classify( const Mat& image, vector<KeyPoint>& keypoints );
virtual void clear ();
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
protected:
void trainRTreeClassifier();
Mat extractPatch( const Mat& image, const Point& pt, int patchSize ) const;
void calcBestProbAndMatchIdx( const Mat& image, const Point& pt,
float& bestProb, int& bestMatchIdx, float* signature );
Ptr<RTreeClassifier> classifier;
Params params;
};
#endif
/*
* FernDescriptorMatch
*/
...
...
@@ -2128,6 +2258,9 @@ protected:
//vector<int> classIds;
};
CV_EXPORTS
Mat
windowedMatchingMask
(
const
vector
<
KeyPoint
>&
keypoints1
,
const
vector
<
KeyPoint
>&
keypoints2
,
float
maxDeltaX
,
float
maxDeltaY
);
struct
CV_EXPORTS
DrawMatchesFlags
{
...
...
modules/features2d/src/calonder.cpp
View file @
e83c9b08
...
...
@@ -42,12 +42,957 @@
#include "precomp.hpp"
#include <cstdio>
#include <iostream>
#include <fstream>
using
namespace
std
;
const
int
progressBarSize
=
50
;
class
CSMatrixGenerator
{
public
:
typedef
enum
{
PDT_GAUSS
=
1
,
PDT_BERNOULLI
,
PDT_DBFRIENDLY
}
PHI_DISTR_TYPE
;
~
CSMatrixGenerator
();
static
float
*
getCSMatrix
(
int
m
,
int
n
,
PHI_DISTR_TYPE
dt
);
// do NOT free returned pointer
private
:
static
float
*
cs_phi_
;
// matrix for compressive sensing
static
int
cs_phi_m_
,
cs_phi_n_
;
};
float
*
CSMatrixGenerator
::
getCSMatrix
(
int
m
,
int
n
,
PHI_DISTR_TYPE
dt
)
{
assert
(
m
<=
n
);
if
(
cs_phi_m_
!=
m
||
cs_phi_n_
!=
n
||
cs_phi_
==
NULL
)
{
if
(
cs_phi_
)
delete
[]
cs_phi_
;
cs_phi_
=
new
float
[
m
*
n
];
}
#if 0 // debug - load the random matrix from a file (for reproducability of results)
//assert(m == 176);
//assert(n == 500);
//const char *phi = "/u/calonder/temp/dim_red/kpca_phi.txt";
const char *phi = "/u/calonder/temp/dim_red/debug_phi.txt";
std::ifstream ifs(phi);
for (size_t i=0; i<m*n; ++i) {
if (!ifs.good()) {
printf("[ERROR] RandomizedTree::makeRandomMeasMatrix: problem reading '%s'\n", phi);
exit(0);
}
ifs >> cs_phi[i];
}
ifs.close();
static bool warned=false;
if (!warned) {
printf("[NOTE] RT: reading %ix%i PHI matrix from '%s'...\n", m, n, phi);
warned=true;
}
return;
#endif
float
*
cs_phi
=
cs_phi_
;
if
(
m
==
n
)
{
// special case - set to 0 for safety
memset
(
cs_phi
,
0
,
m
*
n
*
sizeof
(
float
));
printf
(
"[WARNING] %s:%i: square CS matrix (-> no reduction)
\n
"
,
__FILE__
,
__LINE__
);
}
else
{
cv
::
RNG
rng
(
23
);
// par is distr param, cf 'Favorable JL Distributions' (Baraniuk et al, 2006)
if
(
dt
==
PDT_GAUSS
)
{
float
par
=
(
float
)(
1.
/
m
);
for
(
int
i
=
0
;
i
<
m
*
n
;
++
i
)
*
cs_phi
++
=
rng
.
gaussian
(
par
);
}
else
if
(
dt
==
PDT_BERNOULLI
)
{
float
par
=
(
float
)(
1.
/
sqrt
(
m
));
for
(
int
i
=
0
;
i
<
m
*
n
;
++
i
)
*
cs_phi
++
=
(
rng
(
2
)
==
0
?
par
:
-
par
);
}
else
if
(
dt
==
PDT_DBFRIENDLY
)
{
float
par
=
(
float
)
sqrt
(
3.
/
m
);
for
(
int
i
=
0
;
i
<
m
*
n
;
++
i
)
{
int
i
=
rng
(
6
);
*
cs_phi
++
=
(
i
==
0
?
par
:
(
i
==
1
?
-
par
:
0.
f
));
}
}
else
throw
(
"PHI_DISTR_TYPE not implemented."
);
}
return
cs_phi_
;
}
CSMatrixGenerator
::~
CSMatrixGenerator
()
{
if
(
cs_phi_
)
delete
[]
cs_phi_
;
cs_phi_
=
NULL
;
}
float
*
CSMatrixGenerator
::
cs_phi_
=
NULL
;
int
CSMatrixGenerator
::
cs_phi_m_
=
0
;
int
CSMatrixGenerator
::
cs_phi_n_
=
0
;
inline
void
addVec
(
int
size
,
const
float
*
src1
,
const
float
*
src2
,
float
*
dst
)
{
while
(
--
size
>=
0
)
{
*
dst
=
*
src1
+
*
src2
;
++
dst
;
++
src1
;
++
src2
;
}
}
// sum up 50 byte vectors of length 176
// assume 4 bits max for input vector values
// final shift is 2 bits right
// temp buffer should be twice as long as signature
// sig and buffer need not be initialized
inline
void
sum_50t_176c
(
uint8_t
**
pp
,
uint8_t
*
sig
,
uint16_t
*
temp
)
{
#if CV_SSE2
__m128i
acc
,
*
acc1
,
*
acc2
,
*
acc3
,
*
acc4
,
tzero
;
__m128i
*
ssig
,
*
ttemp
;
ssig
=
(
__m128i
*
)
sig
;
ttemp
=
(
__m128i
*
)
temp
;
// empty ttemp[]
tzero
=
_mm_set_epi32
(
0
,
0
,
0
,
0
);
for
(
int
i
=
0
;
i
<
22
;
i
++
)
ttemp
[
i
]
=
tzero
;
for
(
int
j
=
0
;
j
<
48
;
j
+=
16
)
{
// empty ssig[]
for
(
int
i
=
0
;
i
<
11
;
i
++
)
ssig
[
i
]
=
tzero
;
for
(
int
i
=
j
;
i
<
j
+
16
;
i
+=
4
)
// 4 columns at a time, to 16
{
acc1
=
(
__m128i
*
)
pp
[
i
];
acc2
=
(
__m128i
*
)
pp
[
i
+
1
];
acc3
=
(
__m128i
*
)
pp
[
i
+
2
];
acc4
=
(
__m128i
*
)
pp
[
i
+
3
];
// add next four columns
acc
=
_mm_adds_epu8
(
acc1
[
0
],
acc2
[
0
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
0
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
1
]);
ssig
[
0
]
=
_mm_adds_epu8
(
acc
,
ssig
[
0
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
1
],
acc2
[
1
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
1
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
1
]);
ssig
[
1
]
=
_mm_adds_epu8
(
acc
,
ssig
[
1
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
2
],
acc2
[
2
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
2
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
2
]);
ssig
[
2
]
=
_mm_adds_epu8
(
acc
,
ssig
[
2
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
3
],
acc2
[
3
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
3
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
3
]);
ssig
[
3
]
=
_mm_adds_epu8
(
acc
,
ssig
[
3
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
4
],
acc2
[
4
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
4
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
4
]);
ssig
[
4
]
=
_mm_adds_epu8
(
acc
,
ssig
[
4
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
5
],
acc2
[
5
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
5
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
5
]);
ssig
[
5
]
=
_mm_adds_epu8
(
acc
,
ssig
[
5
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
6
],
acc2
[
6
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
6
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
6
]);
ssig
[
6
]
=
_mm_adds_epu8
(
acc
,
ssig
[
6
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
7
],
acc2
[
7
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
7
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
7
]);
ssig
[
7
]
=
_mm_adds_epu8
(
acc
,
ssig
[
7
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
8
],
acc2
[
8
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
8
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
8
]);
ssig
[
8
]
=
_mm_adds_epu8
(
acc
,
ssig
[
8
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
9
],
acc2
[
9
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
9
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
9
]);
ssig
[
9
]
=
_mm_adds_epu8
(
acc
,
ssig
[
9
]);
// add four columns
acc
=
_mm_adds_epu8
(
acc1
[
10
],
acc2
[
10
]);
acc
=
_mm_adds_epu8
(
acc
,
acc3
[
10
]);
acc
=
_mm_adds_epu8
(
acc
,
acc4
[
10
]);
ssig
[
10
]
=
_mm_adds_epu8
(
acc
,
ssig
[
10
]);
}
// unpack to ttemp buffer and add
ttemp
[
0
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
0
],
tzero
),
ttemp
[
0
]);
ttemp
[
1
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
0
],
tzero
),
ttemp
[
1
]);
ttemp
[
2
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
1
],
tzero
),
ttemp
[
2
]);
ttemp
[
3
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
1
],
tzero
),
ttemp
[
3
]);
ttemp
[
4
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
2
],
tzero
),
ttemp
[
4
]);
ttemp
[
5
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
2
],
tzero
),
ttemp
[
5
]);
ttemp
[
6
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
3
],
tzero
),
ttemp
[
6
]);
ttemp
[
7
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
3
],
tzero
),
ttemp
[
7
]);
ttemp
[
8
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
4
],
tzero
),
ttemp
[
8
]);
ttemp
[
9
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
4
],
tzero
),
ttemp
[
9
]);
ttemp
[
10
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
5
],
tzero
),
ttemp
[
10
]);
ttemp
[
11
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
5
],
tzero
),
ttemp
[
11
]);
ttemp
[
12
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
6
],
tzero
),
ttemp
[
12
]);
ttemp
[
13
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
6
],
tzero
),
ttemp
[
13
]);
ttemp
[
14
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
7
],
tzero
),
ttemp
[
14
]);
ttemp
[
15
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
7
],
tzero
),
ttemp
[
15
]);
ttemp
[
16
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
8
],
tzero
),
ttemp
[
16
]);
ttemp
[
17
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
8
],
tzero
),
ttemp
[
17
]);
ttemp
[
18
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
9
],
tzero
),
ttemp
[
18
]);
ttemp
[
19
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
9
],
tzero
),
ttemp
[
19
]);
ttemp
[
20
]
=
_mm_add_epi16
(
_mm_unpacklo_epi8
(
ssig
[
10
],
tzero
),
ttemp
[
20
]);
ttemp
[
21
]
=
_mm_add_epi16
(
_mm_unpackhi_epi8
(
ssig
[
10
],
tzero
),
ttemp
[
21
]);
}
// create ssignature from 16-bit result
ssig
[
0
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
0
],
2
),
_mm_srai_epi16
(
ttemp
[
1
],
2
));
ssig
[
1
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
2
],
2
),
_mm_srai_epi16
(
ttemp
[
3
],
2
));
ssig
[
2
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
4
],
2
),
_mm_srai_epi16
(
ttemp
[
5
],
2
));
ssig
[
3
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
6
],
2
),
_mm_srai_epi16
(
ttemp
[
7
],
2
));
ssig
[
4
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
8
],
2
),
_mm_srai_epi16
(
ttemp
[
9
],
2
));
ssig
[
5
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
10
],
2
),
_mm_srai_epi16
(
ttemp
[
11
],
2
));
ssig
[
6
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
12
],
2
),
_mm_srai_epi16
(
ttemp
[
13
],
2
));
ssig
[
7
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
14
],
2
),
_mm_srai_epi16
(
ttemp
[
15
],
2
));
ssig
[
8
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
16
],
2
),
_mm_srai_epi16
(
ttemp
[
17
],
2
));
ssig
[
9
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
18
],
2
),
_mm_srai_epi16
(
ttemp
[
19
],
2
));
ssig
[
10
]
=
_mm_packus_epi16
(
_mm_srai_epi16
(
ttemp
[
20
],
2
),
_mm_srai_epi16
(
ttemp
[
21
],
2
));
#else
CV_Error
(
CV_StsNotImplemented
,
"Not supported without SSE2"
);
#endif
}
namespace
cv
{
RandomizedTree
::
RandomizedTree
()
:
posteriors_
(
NULL
),
posteriors2_
(
NULL
)
{
}
RandomizedTree
::~
RandomizedTree
()
{
freePosteriors
(
3
);
}
void
RandomizedTree
::
createNodes
(
int
num_nodes
,
RNG
&
rng
)
{
nodes_
.
reserve
(
num_nodes
);
for
(
int
i
=
0
;
i
<
num_nodes
;
++
i
)
{
nodes_
.
push_back
(
RTreeNode
(
rng
(
RandomizedTree
::
PATCH_SIZE
),
rng
(
RandomizedTree
::
PATCH_SIZE
),
rng
(
RandomizedTree
::
PATCH_SIZE
),
rng
(
RandomizedTree
::
PATCH_SIZE
))
);
}
}
int
RandomizedTree
::
getIndex
(
uchar
*
patch_data
)
const
{
int
index
=
0
;
for
(
int
d
=
0
;
d
<
depth_
;
++
d
)
{
int
child_offset
=
nodes_
[
index
](
patch_data
);
index
=
2
*
index
+
1
+
child_offset
;
}
return
index
-
nodes_
.
size
();
}
void
RandomizedTree
::
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
)
{
PatchGenerator
make_patch
;
train
(
base_set
,
rng
,
make_patch
,
depth
,
views
,
reduced_num_dim
,
num_quant_bits
);
}
void
RandomizedTree
::
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
PatchGenerator
&
make_patch
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
)
{
init
(
base_set
.
size
(),
depth
,
rng
);
Mat
patch
;
// Estimate posterior probabilities using random affine views
std
::
vector
<
BaseKeypoint
>::
const_iterator
keypt_it
;
int
class_id
=
0
;
Size
patchSize
(
PATCH_SIZE
,
PATCH_SIZE
);
for
(
keypt_it
=
base_set
.
begin
();
keypt_it
!=
base_set
.
end
();
++
keypt_it
,
++
class_id
)
{
for
(
int
i
=
0
;
i
<
views
;
++
i
)
{
make_patch
(
Mat
(
keypt_it
->
image
),
Point
(
keypt_it
->
y
,
keypt_it
->
x
),
patch
,
patchSize
,
rng
);
IplImage
iplPatch
=
patch
;
addExample
(
class_id
,
getData
(
&
iplPatch
));
}
}
finalize
(
reduced_num_dim
,
num_quant_bits
);
}
void
RandomizedTree
::
allocPosteriorsAligned
(
int
num_leaves
,
int
num_classes
)
{
freePosteriors
(
3
);
posteriors_
=
new
float
*
[
num_leaves
];
//(float**) malloc(num_leaves*sizeof(float*));
for
(
int
i
=
0
;
i
<
num_leaves
;
++
i
)
{
posteriors_
[
i
]
=
(
float
*
)
cvAlloc
(
num_classes
*
sizeof
(
posteriors_
[
i
][
0
]));
memset
(
posteriors_
[
i
],
0
,
num_classes
*
sizeof
(
float
));
}
posteriors2_
=
new
uint8_t
*
[
num_leaves
];
for
(
int
i
=
0
;
i
<
num_leaves
;
++
i
)
{
posteriors2_
[
i
]
=
(
uint8_t
*
)
cvAlloc
(
num_classes
*
sizeof
(
posteriors2_
[
i
][
0
]));
memset
(
posteriors2_
[
i
],
0
,
num_classes
*
sizeof
(
uint8_t
));
}
classes_
=
num_classes
;
}
void
RandomizedTree
::
freePosteriors
(
int
which
)
{
if
(
posteriors_
&&
(
which
&
1
))
{
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
if
(
posteriors_
[
i
])
cvFree
(
&
posteriors_
[
i
]
);
delete
[]
posteriors_
;
posteriors_
=
NULL
;
}
if
(
posteriors2_
&&
(
which
&
2
))
{
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
cvFree
(
&
posteriors2_
[
i
]
);
delete
[]
posteriors2_
;
posteriors2_
=
NULL
;
}
classes_
=
-
1
;
}
void
RandomizedTree
::
init
(
int
num_classes
,
int
depth
,
RNG
&
rng
)
{
depth_
=
depth
;
num_leaves_
=
1
<<
depth
;
// 2**d
int
num_nodes
=
num_leaves_
-
1
;
// 2**d - 1
// Initialize probabilities and counts to 0
allocPosteriorsAligned
(
num_leaves_
,
num_classes
);
// will set classes_ correctly
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
memset
((
void
*
)
posteriors_
[
i
],
0
,
num_classes
*
sizeof
(
float
));
leaf_counts_
.
resize
(
num_leaves_
);
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
memset
((
void
*
)
posteriors2_
[
i
],
0
,
num_classes
*
sizeof
(
uint8_t
));
createNodes
(
num_nodes
,
rng
);
}
void
RandomizedTree
::
addExample
(
int
class_id
,
uchar
*
patch_data
)
{
int
index
=
getIndex
(
patch_data
);
float
*
posterior
=
getPosteriorByIndex
(
index
);
++
leaf_counts_
[
index
];
++
posterior
[
class_id
];
}
// returns the p% percentile of data (length n vector)
static
float
percentile
(
float
*
data
,
int
n
,
float
p
)
{
assert
(
n
>
0
);
assert
(
p
>=
0
&&
p
<=
1
);
std
::
vector
<
float
>
vec
(
data
,
data
+
n
);
sort
(
vec
.
begin
(),
vec
.
end
());
int
ix
=
(
int
)(
p
*
(
n
-
1
));
return
vec
[
ix
];
}
void
RandomizedTree
::
finalize
(
size_t
reduced_num_dim
,
int
num_quant_bits
)
{
// Normalize by number of patches to reach each leaf
for
(
int
index
=
0
;
index
<
num_leaves_
;
++
index
)
{
float
*
posterior
=
posteriors_
[
index
];
assert
(
posterior
!=
NULL
);
int
count
=
leaf_counts_
[
index
];
if
(
count
!=
0
)
{
float
normalizer
=
1.0
f
/
count
;
for
(
int
c
=
0
;
c
<
classes_
;
++
c
)
{
*
posterior
*=
normalizer
;
++
posterior
;
}
}
}
leaf_counts_
.
clear
();
// apply compressive sensing
if
((
int
)
reduced_num_dim
!=
classes_
)
compressLeaves
(
reduced_num_dim
);
else
{
static
bool
notified
=
false
;
if
(
!
notified
)
printf
(
"
\n
[OK] NO compression to leaves applied, dim=%i
\n
"
,
(
int
)
reduced_num_dim
);
notified
=
true
;
}
// convert float-posteriors to char-posteriors (quantization step)
makePosteriors2
(
num_quant_bits
);
}
void
RandomizedTree
::
compressLeaves
(
size_t
reduced_num_dim
)
{
static
bool
warned
=
false
;
if
(
!
warned
)
{
printf
(
"
\n
[OK] compressing leaves with phi %i x %i
\n
"
,
(
int
)
reduced_num_dim
,
(
int
)
classes_
);
warned
=
true
;
}
static
bool
warned2
=
false
;
if
((
int
)
reduced_num_dim
==
classes_
)
{
if
(
!
warned2
)
printf
(
"[WARNING] RandomizedTree::compressLeaves: not compressing because reduced_dim == classes()
\n
"
);
warned2
=
true
;
return
;
}
// DO NOT FREE RETURNED POINTER
float
*
cs_phi
=
CSMatrixGenerator
::
getCSMatrix
(
reduced_num_dim
,
classes_
,
CSMatrixGenerator
::
PDT_BERNOULLI
);
float
*
cs_posteriors
=
new
float
[
num_leaves_
*
reduced_num_dim
];
// temp, num_leaves_ x reduced_num_dim
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
{
float
*
post
=
getPosteriorByIndex
(
i
);
float
*
prod
=
&
cs_posteriors
[
i
*
reduced_num_dim
];
Mat
A
(
reduced_num_dim
,
classes_
,
CV_32FC1
,
cs_phi
);
Mat
X
(
classes_
,
1
,
CV_32FC1
,
post
);
Mat
Y
(
reduced_num_dim
,
1
,
CV_32FC1
,
prod
);
Y
=
A
*
X
;
}
// copy new posteriors
freePosteriors
(
3
);
allocPosteriorsAligned
(
num_leaves_
,
reduced_num_dim
);
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
memcpy
(
posteriors_
[
i
],
&
cs_posteriors
[
i
*
reduced_num_dim
],
reduced_num_dim
*
sizeof
(
float
));
classes_
=
reduced_num_dim
;
delete
[]
cs_posteriors
;
}
void
RandomizedTree
::
makePosteriors2
(
int
num_quant_bits
)
{
int
N
=
(
1
<<
num_quant_bits
)
-
1
;
float
perc
[
2
];
estimateQuantPercForPosteriors
(
perc
);
assert
(
posteriors_
!=
NULL
);
for
(
int
i
=
0
;
i
<
num_leaves_
;
++
i
)
quantizeVector
(
posteriors_
[
i
],
classes_
,
N
,
perc
,
posteriors2_
[
i
]);
// printf("makePosteriors2 quantization bounds: %.3e, %.3e (num_leaves=%i, N=%i)\n",
// perc[0], perc[1], num_leaves_, N);
}
void
RandomizedTree
::
estimateQuantPercForPosteriors
(
float
perc
[
2
])
{
// _estimate_ percentiles for this tree
// TODO: do this more accurately
assert
(
posteriors_
!=
NULL
);
perc
[
0
]
=
perc
[
1
]
=
.0
f
;
for
(
int
i
=
0
;
i
<
num_leaves_
;
i
++
)
{
perc
[
0
]
+=
percentile
(
posteriors_
[
i
],
classes_
,
LOWER_QUANT_PERC
);
perc
[
1
]
+=
percentile
(
posteriors_
[
i
],
classes_
,
UPPER_QUANT_PERC
);
}
perc
[
0
]
/=
num_leaves_
;
perc
[
1
]
/=
num_leaves_
;
}
float
*
RandomizedTree
::
getPosterior
(
uchar
*
patch_data
)
{
return
const_cast
<
float
*>
(
const_cast
<
const
RandomizedTree
*>
(
this
)
->
getPosterior
(
patch_data
));
}
const
float
*
RandomizedTree
::
getPosterior
(
uchar
*
patch_data
)
const
{
return
getPosteriorByIndex
(
getIndex
(
patch_data
)
);
}
uint8_t
*
RandomizedTree
::
getPosterior2
(
uchar
*
patch_data
)
{
return
const_cast
<
uint8_t
*>
(
const_cast
<
const
RandomizedTree
*>
(
this
)
->
getPosterior2
(
patch_data
));
}
const
uint8_t
*
RandomizedTree
::
getPosterior2
(
uchar
*
patch_data
)
const
{
return
getPosteriorByIndex2
(
getIndex
(
patch_data
)
);
}
void
RandomizedTree
::
quantizeVector
(
float
*
vec
,
int
dim
,
int
N
,
float
bnds
[
2
],
int
clamp_mode
)
{
float
map_bnd
[
2
]
=
{
0.
f
,(
float
)
N
};
// bounds of quantized target interval we're mapping to
for
(
int
k
=
0
;
k
<
dim
;
++
k
,
++
vec
)
{
*
vec
=
float
(
int
((
*
vec
-
bnds
[
0
])
/
(
bnds
[
1
]
-
bnds
[
0
])
*
(
map_bnd
[
1
]
-
map_bnd
[
0
])
+
map_bnd
[
0
]));
// 0: clamp both, lower and upper values
if
(
clamp_mode
==
0
)
*
vec
=
(
*
vec
<
map_bnd
[
0
])
?
map_bnd
[
0
]
:
((
*
vec
>
map_bnd
[
1
])
?
map_bnd
[
1
]
:
*
vec
);
// 1: clamp lower values only
else
if
(
clamp_mode
==
1
)
*
vec
=
(
*
vec
<
map_bnd
[
0
])
?
map_bnd
[
0
]
:
*
vec
;
// 2: clamp upper values only
else
if
(
clamp_mode
==
2
)
*
vec
=
(
*
vec
>
map_bnd
[
1
])
?
map_bnd
[
1
]
:
*
vec
;
// 4: no clamping
else
if
(
clamp_mode
==
4
)
;
// yep, nothing
else
{
printf
(
"clamp_mode == %i is not valid (%s:%i).
\n
"
,
clamp_mode
,
__FILE__
,
__LINE__
);
exit
(
1
);
}
}
}
void
RandomizedTree
::
quantizeVector
(
float
*
vec
,
int
dim
,
int
N
,
float
bnds
[
2
],
uint8_t
*
dst
)
{
int
map_bnd
[
2
]
=
{
0
,
N
};
// bounds of quantized target interval we're mapping to
int
tmp
;
for
(
int
k
=
0
;
k
<
dim
;
++
k
)
{
tmp
=
int
((
*
vec
-
bnds
[
0
])
/
(
bnds
[
1
]
-
bnds
[
0
])
*
(
map_bnd
[
1
]
-
map_bnd
[
0
])
+
map_bnd
[
0
]);
*
dst
=
(
uint8_t
)((
tmp
<
0
)
?
0
:
((
tmp
>
N
)
?
N
:
tmp
));
++
vec
;
++
dst
;
}
}
void
RandomizedTree
::
read
(
const
char
*
file_name
,
int
num_quant_bits
)
{
std
::
ifstream
file
(
file_name
,
std
::
ifstream
::
binary
);
read
(
file
,
num_quant_bits
);
file
.
close
();
}
void
RandomizedTree
::
read
(
std
::
istream
&
is
,
int
num_quant_bits
)
{
is
.
read
((
char
*
)(
&
classes_
),
sizeof
(
classes_
));
is
.
read
((
char
*
)(
&
depth_
),
sizeof
(
depth_
));
num_leaves_
=
1
<<
depth_
;
int
num_nodes
=
num_leaves_
-
1
;
nodes_
.
resize
(
num_nodes
);
is
.
read
((
char
*
)(
&
nodes_
[
0
]),
num_nodes
*
sizeof
(
nodes_
[
0
]));
//posteriors_.resize(classes_ * num_leaves_);
//freePosteriors(3);
//printf("[DEBUG] reading: %i leaves, %i classes\n", num_leaves_, classes_);
allocPosteriorsAligned
(
num_leaves_
,
classes_
);
for
(
int
i
=
0
;
i
<
num_leaves_
;
i
++
)
is
.
read
((
char
*
)
posteriors_
[
i
],
classes_
*
sizeof
(
*
posteriors_
[
0
]));
// make char-posteriors from float-posteriors
makePosteriors2
(
num_quant_bits
);
}
void
RandomizedTree
::
write
(
const
char
*
file_name
)
const
{
std
::
ofstream
file
(
file_name
,
std
::
ofstream
::
binary
);
write
(
file
);
file
.
close
();
}
void
RandomizedTree
::
write
(
std
::
ostream
&
os
)
const
{
if
(
!
posteriors_
)
{
printf
(
"WARNING: Cannot write float posteriors (posteriors_ = NULL).
\n
"
);
return
;
}
os
.
write
((
char
*
)(
&
classes_
),
sizeof
(
classes_
));
os
.
write
((
char
*
)(
&
depth_
),
sizeof
(
depth_
));
os
.
write
((
char
*
)(
&
nodes_
[
0
]),
nodes_
.
size
()
*
sizeof
(
nodes_
[
0
]));
for
(
int
i
=
0
;
i
<
num_leaves_
;
i
++
)
{
os
.
write
((
char
*
)
posteriors_
[
i
],
classes_
*
sizeof
(
*
posteriors_
[
0
]));
}
}
void
RandomizedTree
::
savePosteriors
(
std
::
string
url
,
bool
append
)
{
std
::
ofstream
file
(
url
.
c_str
(),
(
append
?
std
::
ios
::
app
:
std
::
ios
::
out
));
for
(
int
i
=
0
;
i
<
num_leaves_
;
i
++
)
{
float
*
post
=
posteriors_
[
i
];
char
buf
[
20
];
for
(
int
i
=
0
;
i
<
classes_
;
i
++
)
{
sprintf
(
buf
,
"%.10e"
,
*
post
++
);
file
<<
buf
<<
((
i
<
classes_
-
1
)
?
" "
:
""
);
}
file
<<
std
::
endl
;
}
file
.
close
();
}
void
RandomizedTree
::
savePosteriors2
(
std
::
string
url
,
bool
append
)
{
std
::
ofstream
file
(
url
.
c_str
(),
(
append
?
std
::
ios
::
app
:
std
::
ios
::
out
));
for
(
int
i
=
0
;
i
<
num_leaves_
;
i
++
)
{
uint8_t
*
post
=
posteriors2_
[
i
];
for
(
int
i
=
0
;
i
<
classes_
;
i
++
)
file
<<
int
(
*
post
++
)
<<
(
i
<
classes_
-
1
?
" "
:
""
);
file
<<
std
::
endl
;
}
file
.
close
();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RTreeClassifier
::
RTreeClassifier
()
:
classes_
(
0
)
{
posteriors_
=
NULL
;
}
void
RTreeClassifier
::
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
int
num_trees
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
)
{
PatchGenerator
make_patch
;
train
(
base_set
,
rng
,
make_patch
,
num_trees
,
depth
,
views
,
reduced_num_dim
,
num_quant_bits
);
}
// Single-threaded version of train(), with progress output
void
RTreeClassifier
::
train
(
std
::
vector
<
BaseKeypoint
>
const
&
base_set
,
RNG
&
rng
,
PatchGenerator
&
make_patch
,
int
num_trees
,
int
depth
,
int
views
,
size_t
reduced_num_dim
,
int
num_quant_bits
)
{
if
(
reduced_num_dim
>
base_set
.
size
())
{
printf
(
"INVALID PARAMS in RTreeClassifier::train: reduced_num_dim{%i} > base_set.size(){%i}
\n
"
,
(
int
)
reduced_num_dim
,
(
int
)
base_set
.
size
());
return
;
}
num_quant_bits_
=
num_quant_bits
;
classes_
=
reduced_num_dim
;
// base_set.size();
original_num_classes_
=
base_set
.
size
();
trees_
.
resize
(
num_trees
);
printf
(
"[OK] Training trees: base size=%i, reduced size=%i
\n
"
,
(
int
)
base_set
.
size
(),
(
int
)
reduced_num_dim
);
int
count
=
1
;
printf
(
"[OK] Trained 0 / %i trees"
,
num_trees
);
fflush
(
stdout
);
for
(
int
ti
=
0
;
ti
<
num_trees
;
ti
++
)
{
trees_
[
ti
].
train
(
base_set
,
rng
,
make_patch
,
depth
,
views
,
reduced_num_dim
,
num_quant_bits_
);
printf
(
"
\r
[OK] Trained %i / %i trees"
,
count
++
,
num_trees
);
fflush
(
stdout
);
}
printf
(
"
\n
"
);
countZeroElements
();
printf
(
"
\n\n
"
);
}
void
RTreeClassifier
::
getSignature
(
IplImage
*
patch
,
float
*
sig
)
const
{
// Need pointer to 32x32 patch data
uchar
buffer
[
RandomizedTree
::
PATCH_SIZE
*
RandomizedTree
::
PATCH_SIZE
];
uchar
*
patch_data
;
if
(
patch
->
widthStep
!=
RandomizedTree
::
PATCH_SIZE
)
{
//printf("[INFO] patch is padded, data will be copied (%i/%i).\n",
// patch->widthStep, RandomizedTree::PATCH_SIZE);
uchar
*
data
=
getData
(
patch
);
patch_data
=
buffer
;
for
(
int
i
=
0
;
i
<
RandomizedTree
::
PATCH_SIZE
;
++
i
)
{
memcpy
((
void
*
)
patch_data
,
(
void
*
)
data
,
RandomizedTree
::
PATCH_SIZE
);
data
+=
patch
->
widthStep
;
patch_data
+=
RandomizedTree
::
PATCH_SIZE
;
}
patch_data
=
buffer
;
}
else
{
patch_data
=
getData
(
patch
);
}
memset
((
void
*
)
sig
,
0
,
classes_
*
sizeof
(
float
));
std
::
vector
<
RandomizedTree
>::
const_iterator
tree_it
;
// get posteriors
float
**
posteriors
=
new
float
*
[
trees_
.
size
()];
// TODO: move alloc outside this func
float
**
pp
=
posteriors
;
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
,
pp
++
)
{
*
pp
=
const_cast
<
float
*>
(
tree_it
->
getPosterior
(
patch_data
));
assert
(
*
pp
!=
NULL
);
}
// sum them up
pp
=
posteriors
;
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
,
pp
++
)
addVec
(
classes_
,
sig
,
*
pp
,
sig
);
delete
[]
posteriors
;
posteriors
=
NULL
;
// full quantization (experimental)
#if 0
int n_max = 1<<8 - 1;
int sum_max = (1<<4 - 1)*trees_.size();
int shift = 0;
while ((sum_max>>shift) > n_max) shift++;
for (int i = 0; i < classes_; ++i) {
sig[i] = int(sig[i] + .5) >> shift;
if (sig[i]>n_max) sig[i] = n_max;
}
static bool warned = false;
if (!warned) {
printf("[WARNING] Using full quantization (RTreeClassifier::getSignature)! shift=%i\n", shift);
warned = true;
}
#else
// TODO: get rid of this multiply (-> number of trees is known at train
// time, exploit it in RandomizedTree::finalize())
float
normalizer
=
1.0
f
/
trees_
.
size
();
for
(
int
i
=
0
;
i
<
classes_
;
++
i
)
sig
[
i
]
*=
normalizer
;
#endif
}
void
RTreeClassifier
::
getSignature
(
IplImage
*
patch
,
uint8_t
*
sig
)
const
{
// Need pointer to 32x32 patch data
uchar
buffer
[
RandomizedTree
::
PATCH_SIZE
*
RandomizedTree
::
PATCH_SIZE
];
uchar
*
patch_data
;
if
(
patch
->
widthStep
!=
RandomizedTree
::
PATCH_SIZE
)
{
//printf("[INFO] patch is padded, data will be copied (%i/%i).\n",
// patch->widthStep, RandomizedTree::PATCH_SIZE);
uchar
*
data
=
getData
(
patch
);
patch_data
=
buffer
;
for
(
int
i
=
0
;
i
<
RandomizedTree
::
PATCH_SIZE
;
++
i
)
{
memcpy
((
void
*
)
patch_data
,
(
void
*
)
data
,
RandomizedTree
::
PATCH_SIZE
);
data
+=
patch
->
widthStep
;
patch_data
+=
RandomizedTree
::
PATCH_SIZE
;
}
patch_data
=
buffer
;
}
else
{
patch_data
=
getData
(
patch
);
}
std
::
vector
<
RandomizedTree
>::
const_iterator
tree_it
;
// get posteriors
if
(
posteriors_
==
NULL
)
{
posteriors_
=
(
uint8_t
**
)
cvAlloc
(
trees_
.
size
()
*
sizeof
(
posteriors_
[
0
])
);
ptemp_
=
(
uint16_t
*
)
cvAlloc
(
classes_
*
sizeof
(
ptemp_
[
0
])
);
}
/// @todo What is going on in the next 4 lines?
uint8_t
**
pp
=
posteriors_
;
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
,
pp
++
)
*
pp
=
const_cast
<
uint8_t
*>
(
tree_it
->
getPosterior2
(
patch_data
));
pp
=
posteriors_
;
#if 1
// SSE2 optimized code
sum_50t_176c
(
pp
,
sig
,
ptemp_
);
// sum them up
#else
static
bool
warned
=
false
;
memset
((
void
*
)
sig
,
0
,
classes_
*
sizeof
(
sig
[
0
]));
uint16_t
*
sig16
=
new
uint16_t
[
classes_
];
// TODO: make member, no alloc here
memset
((
void
*
)
sig16
,
0
,
classes_
*
sizeof
(
sig16
[
0
]));
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
,
pp
++
)
addVec
(
classes_
,
sig16
,
*
pp
,
sig16
);
// squeeze signatures into an uint8_t
const
bool
full_shifting
=
true
;
int
shift
;
if
(
full_shifting
)
{
float
num_add_bits_f
=
log
((
float
)
trees_
.
size
())
/
log
(
2.
f
);
// # additional bits required due to summation
int
num_add_bits
=
int
(
num_add_bits_f
);
if
(
num_add_bits_f
!=
float
(
num_add_bits
))
++
num_add_bits
;
shift
=
num_quant_bits_
+
num_add_bits
-
8
*
sizeof
(
uint8_t
);
//shift = num_quant_bits_ + num_add_bits - 2;
//shift = 6;
if
(
shift
>
0
)
for
(
int
i
=
0
;
i
<
classes_
;
++
i
)
sig
[
i
]
=
(
sig16
[
i
]
>>
shift
);
// &3 cut off all but lowest 2 bits, 3(dec) = 11(bin)
if
(
!
warned
)
printf
(
"[OK] RTC: quantizing by FULL RIGHT SHIFT, shift = %i
\n
"
,
shift
);
}
else
{
printf
(
"[ERROR] RTC: not implemented!
\n
"
);
exit
(
0
);
}
if
(
!
warned
)
printf
(
"[WARNING] RTC: unoptimized signature computation
\n
"
);
warned
=
true
;
#endif
}
void
RTreeClassifier
::
getSparseSignature
(
IplImage
*
patch
,
float
*
sig
,
float
thresh
)
const
{
getFloatSignature
(
patch
,
sig
);
for
(
int
i
=
0
;
i
<
classes_
;
++
i
,
sig
++
)
if
(
*
sig
<
thresh
)
*
sig
=
0.
f
;
}
int
RTreeClassifier
::
countNonZeroElements
(
float
*
vec
,
int
n
,
double
tol
)
{
int
res
=
0
;
while
(
n
--
>
0
)
res
+=
(
fabs
(
*
vec
++
)
>
tol
);
return
res
;
}
void
RTreeClassifier
::
read
(
const
char
*
file_name
)
{
std
::
ifstream
file
(
file_name
,
std
::
ifstream
::
binary
);
read
(
file
);
file
.
close
();
}
void
RTreeClassifier
::
read
(
std
::
istream
&
is
)
{
int
num_trees
=
0
;
is
.
read
((
char
*
)(
&
num_trees
),
sizeof
(
num_trees
));
is
.
read
((
char
*
)(
&
classes_
),
sizeof
(
classes_
));
is
.
read
((
char
*
)(
&
original_num_classes_
),
sizeof
(
original_num_classes_
));
is
.
read
((
char
*
)(
&
num_quant_bits_
),
sizeof
(
num_quant_bits_
));
if
(
num_quant_bits_
<
1
||
num_quant_bits_
>
8
)
{
printf
(
"[WARNING] RTC: suspicious value num_quant_bits_=%i found; setting to %i.
\n
"
,
num_quant_bits_
,
(
int
)
DEFAULT_NUM_QUANT_BITS
);
num_quant_bits_
=
DEFAULT_NUM_QUANT_BITS
;
}
trees_
.
resize
(
num_trees
);
std
::
vector
<
RandomizedTree
>::
iterator
tree_it
;
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
)
{
tree_it
->
read
(
is
,
num_quant_bits_
);
}
printf
(
"[OK] Loaded RTC, quantization=%i bits
\n
"
,
num_quant_bits_
);
countZeroElements
();
}
void
RTreeClassifier
::
write
(
const
char
*
file_name
)
const
{
std
::
ofstream
file
(
file_name
,
std
::
ofstream
::
binary
);
write
(
file
);
file
.
close
();
}
void
RTreeClassifier
::
write
(
std
::
ostream
&
os
)
const
{
int
num_trees
=
trees_
.
size
();
os
.
write
((
char
*
)(
&
num_trees
),
sizeof
(
num_trees
));
os
.
write
((
char
*
)(
&
classes_
),
sizeof
(
classes_
));
os
.
write
((
char
*
)(
&
original_num_classes_
),
sizeof
(
original_num_classes_
));
os
.
write
((
char
*
)(
&
num_quant_bits_
),
sizeof
(
num_quant_bits_
));
printf
(
"RTreeClassifier::write: num_quant_bits_=%i
\n
"
,
num_quant_bits_
);
std
::
vector
<
RandomizedTree
>::
const_iterator
tree_it
;
for
(
tree_it
=
trees_
.
begin
();
tree_it
!=
trees_
.
end
();
++
tree_it
)
tree_it
->
write
(
os
);
}
void
RTreeClassifier
::
saveAllFloatPosteriors
(
std
::
string
url
)
{
printf
(
"[DEBUG] writing all float posteriors to %s...
\n
"
,
url
.
c_str
());
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
trees_
[
i
].
savePosteriors
(
url
,
(
i
==
0
?
false
:
true
));
printf
(
"[DEBUG] done
\n
"
);
}
void
RTreeClassifier
::
saveAllBytePosteriors
(
std
::
string
url
)
{
printf
(
"[DEBUG] writing all byte posteriors to %s...
\n
"
,
url
.
c_str
());
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
trees_
[
i
].
savePosteriors2
(
url
,
(
i
==
0
?
false
:
true
));
printf
(
"[DEBUG] done
\n
"
);
}
void
RTreeClassifier
::
setFloatPosteriorsFromTextfile_176
(
std
::
string
url
)
{
std
::
ifstream
ifs
(
url
.
c_str
());
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
{
int
num_classes
=
trees_
[
i
].
classes_
;
assert
(
num_classes
==
176
);
// TODO: remove this limitation (arose due to SSE2 optimizations)
for
(
int
k
=
0
;
k
<
trees_
[
i
].
num_leaves_
;
++
k
)
{
float
*
post
=
trees_
[
i
].
getPosteriorByIndex
(
k
);
for
(
int
j
=
0
;
j
<
num_classes
;
++
j
,
++
post
)
ifs
>>
*
post
;
assert
(
ifs
.
good
());
}
}
classes_
=
176
;
//setQuantization(num_quant_bits_);
ifs
.
close
();
printf
(
"[EXPERIMENTAL] read entire tree from '%s'
\n
"
,
url
.
c_str
());
}
float
RTreeClassifier
::
countZeroElements
()
{
int
flt_zeros
=
0
;
int
ui8_zeros
=
0
;
int
num_elem
=
trees_
[
0
].
classes
();
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
for
(
int
k
=
0
;
k
<
(
int
)
trees_
[
i
].
num_leaves_
;
++
k
)
{
float
*
p
=
trees_
[
i
].
getPosteriorByIndex
(
k
);
uint8_t
*
p2
=
trees_
[
i
].
getPosteriorByIndex2
(
k
);
assert
(
p
);
assert
(
p2
);
for
(
int
j
=
0
;
j
<
num_elem
;
++
j
,
++
p
,
++
p2
)
{
if
(
*
p
==
0.
f
)
flt_zeros
++
;
if
(
*
p2
==
0
)
ui8_zeros
++
;
}
}
num_elem
=
trees_
.
size
()
*
trees_
[
0
].
num_leaves_
*
num_elem
;
float
flt_perc
=
100.
*
flt_zeros
/
num_elem
;
float
ui8_perc
=
100.
*
ui8_zeros
/
num_elem
;
printf
(
"[OK] RTC: overall %i/%i (%.3f%%) zeros in float leaves
\n
"
,
flt_zeros
,
num_elem
,
flt_perc
);
printf
(
" overall %i/%i (%.3f%%) zeros in uint8 leaves
\n
"
,
ui8_zeros
,
num_elem
,
ui8_perc
);
return
flt_perc
;
}
void
RTreeClassifier
::
setQuantization
(
int
num_quant_bits
)
{
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
trees_
[
i
].
applyQuantization
(
num_quant_bits
);
printf
(
"[OK] signature quantization is now %i bits (before: %i)
\n
"
,
num_quant_bits
,
num_quant_bits_
);
num_quant_bits_
=
num_quant_bits
;
}
void
RTreeClassifier
::
discardFloatPosteriors
()
{
for
(
int
i
=
0
;
i
<
(
int
)
trees_
.
size
();
++
i
)
trees_
[
i
].
discardFloatPosteriors
();
printf
(
"[OK] RTC: discarded float posteriors of all trees
\n
"
);
}
#if 0
const int progressBarSize = 50;
CalonderClassifier::CalonderClassifier()
{
...
...
@@ -561,4 +1506,81 @@ void CalonderClassifier::write( FileStorage& fs ) const
fs << "}"; // trees
}
struct RTreeNode
{
short offset1, offset2;
};
void CalonderClassifier::read( istream &is )
{
int _patchSize, _numTrees, _treeDepth, _numViews, _signatureSize, _origNumClasses, _numQuantBits, _compressType;
_patchSize = 32;
_numViews = 0;
_compressType = COMPRESS_DISTR_BERNOULLI;
is.read((char*)(&_numTrees), sizeof(_numTrees));
is.read((char*)(&_signatureSize), sizeof(_signatureSize));
is.read((char*)(&_origNumClasses), sizeof(_origNumClasses));
is.read((char*)(&_numQuantBits), sizeof(_numQuantBits));
// 1st tree
int _classes;
is.read((char*)(&_classes), sizeof(_classes));
CV_Assert( _signatureSize == _classes );
is.read((char*)(&_treeDepth), sizeof(_treeDepth));
prepare( _patchSize, _signatureSize, _numTrees, _treeDepth, _numViews );
origNumClasses = _origNumClasses;
compressType = _compressType;
if( _numQuantBits>8 )
{
if( verbose )
cout << "[WARNING] suspicious value numQuantBits=" << numQuantBits << " found; setting to " << DEFAULT_NUM_QUANT_BITS;
_numQuantBits = DEFAULT_NUM_QUANT_BITS;
}
// 1st tree
vector<RTreeNode> rtreeNodes(numNodesPerTree);
is.read((char*)(&rtreeNodes[0]), numNodesPerTree * sizeof(rtreeNodes[0]));
for( int ni = 0; ni < numNodesPerTree; ni ++ )
{
short offset1 = rtreeNodes[ni].offset1,
offset2 = rtreeNodes[ni].offset2;
nodes[ni] = Node(offset1 % _patchSize, offset1 / _patchSize, offset2 % _patchSize, offset2 / _patchSize );
}
for( int li = 0; li < numLeavesPerTree; li++ )
is.read((char*)&posteriors[li*signatureSize], signatureSize * sizeof(float));
// other trees
for( int treeIdx = 1; treeIdx < numTrees; treeIdx++ )
{
is.read((char*)(&_classes), sizeof(_classes));
CV_Assert( _classes == signatureSize );
is.read((char*)(&_treeDepth), sizeof(_treeDepth));
CV_Assert( _treeDepth == treeDepth );
is.read((char*)(&rtreeNodes[0]), numNodesPerTree * sizeof(rtreeNodes[0]));
Node* treeNodes = &nodes[treeIdx*numNodesPerTree];
for( int ni = 0; ni < numNodesPerTree; ni ++ )
{
short offset1 = rtreeNodes[ni].offset1,
offset2 = rtreeNodes[ni].offset2;
treeNodes[ni] = Node(offset1 % _patchSize, offset1 / _patchSize, offset2 % _patchSize, offset2 / _patchSize );
}
float* treePosteriors = &posteriors[treeIdx*numLeavesPerTree*signatureSize];
for( int li = 0; li < numLeavesPerTree; li++ )
is.read((char*)&treePosteriors[li*signatureSize], signatureSize * sizeof(float));
}
#if QUANTIZATION_AVAILABLE
if( _numQuantBits )
quantizePosteriors(_numQuantBits);
#endif
}
#endif
}
modules/features2d/src/descriptors.cpp
View file @
e83c9b08
...
...
@@ -51,6 +51,24 @@ using namespace std;
namespace
cv
{
Mat
windowedMatchingMask
(
const
vector
<
KeyPoint
>&
keypoints1
,
const
vector
<
KeyPoint
>&
keypoints2
,
float
maxDeltaX
,
float
maxDeltaY
)
{
if
(
keypoints1
.
empty
()
||
keypoints2
.
empty
()
)
return
Mat
();
Mat
mask
(
keypoints1
.
size
(),
keypoints2
.
size
(),
CV_8UC1
);
for
(
size_t
i
=
0
;
i
<
keypoints1
.
size
();
i
++
)
{
for
(
size_t
j
=
0
;
j
<
keypoints2
.
size
();
j
++
)
{
Point2f
diff
=
keypoints2
[
j
].
pt
-
keypoints1
[
i
].
pt
;
mask
.
at
<
uchar
>
(
i
,
j
)
=
std
::
abs
(
diff
.
x
)
<
maxDeltaX
&&
std
::
abs
(
diff
.
y
)
<
maxDeltaY
;
}
}
return
mask
;
}
void
drawMatches
(
const
Mat
&
img1
,
const
vector
<
KeyPoint
>&
keypoints1
,
const
Mat
&
img2
,
const
vector
<
KeyPoint
>&
keypoints2
,
const
vector
<
int
>&
matches
,
Mat
&
outImg
,
...
...
@@ -278,20 +296,19 @@ Ptr<DescriptorMatcher> createDescriptorMatcher( const string& descriptorMatcherT
* BruteForceMatcher L2 specialization *
\****************************************************************************************/
template
<>
void
BruteForceMatcher
<
L2
<
float
>
>::
matchImpl
(
const
Mat
&
descriptors_1
,
const
Mat
&
descriptors_2
,
const
Mat
&
/*mask*/
,
vector
<
int
>&
matches
)
const
void
BruteForceMatcher
<
L2
<
float
>
>::
matchImpl
(
const
Mat
&
query
,
const
Mat
&
/*mask*/
,
vector
<
int
>&
matches
)
const
{
matches
.
clear
();
matches
.
reserve
(
descriptors_1
.
rows
);
matches
.
reserve
(
query
.
rows
);
//TODO: remove _DEBUG if bag 416 fixed
#if (defined _DEBUG || !defined HAVE_EIGEN2)
Mat
norms
;
cv
::
reduce
(
descriptors_2
.
mul
(
descriptors_2
),
norms
,
1
,
0
);
cv
::
reduce
(
train
.
mul
(
train
),
norms
,
1
,
0
);
norms
=
norms
.
t
();
Mat
desc_2t
=
descriptors_2
.
t
();
for
(
int
i
=
0
;
i
<
descriptors_1
.
rows
;
i
++
)
Mat
desc_2t
=
train
.
t
();
for
(
int
i
=
0
;
i
<
query
.
rows
;
i
++
)
{
Mat
distances
=
(
-
2
)
*
descriptors_1
.
row
(
i
)
*
desc_2t
;
Mat
distances
=
(
-
2
)
*
query
.
row
(
i
)
*
desc_2t
;
distances
+=
norms
;
Point
minLoc
;
minMaxLoc
(
distances
,
0
,
0
,
&
minLoc
);
...
...
@@ -631,160 +648,6 @@ void OneWayDescriptorMatch::clear ()
base
->
clear
();
}
/****************************************************************************************\
* CalonderDescriptorMatch *
\****************************************************************************************/
#if 0
CalonderDescriptorMatch::Params::Params( const RNG& _rng, const PatchGenerator& _patchGen,
int _numTrees, int _depth, int _views,
size_t _reducedNumDim,
int _numQuantBits,
bool _printStatus,
int _patchSize ) :
rng(_rng), patchGen(_patchGen), numTrees(_numTrees), depth(_depth), views(_views),
patchSize(_patchSize), reducedNumDim(_reducedNumDim), numQuantBits(_numQuantBits), printStatus(_printStatus)
{}
CalonderDescriptorMatch::Params::Params( const string& _filename )
{
filename = _filename;
}
CalonderDescriptorMatch::CalonderDescriptorMatch()
{}
CalonderDescriptorMatch::CalonderDescriptorMatch( const Params& _params )
{
initialize(_params);
}
CalonderDescriptorMatch::~CalonderDescriptorMatch()
{}
void CalonderDescriptorMatch::initialize( const Params& _params )
{
classifier.release();
params = _params;
if( !params.filename.empty() )
{
classifier = new RTreeClassifier;
classifier->read( params.filename.c_str() );
}
}
void CalonderDescriptorMatch::add( const Mat& image, vector<KeyPoint>& keypoints )
{
if( params.filename.empty() )
collection.add( image, keypoints );
}
Mat CalonderDescriptorMatch::extractPatch( const Mat& image, const Point& pt, int patchSize ) const
{
const int offset = patchSize / 2;
return image( Rect(pt.x - offset, pt.y - offset, patchSize, patchSize) );
}
void CalonderDescriptorMatch::calcBestProbAndMatchIdx( const Mat& image, const Point& pt,
float& bestProb, int& bestMatchIdx, float* signature )
{
IplImage roi = extractPatch( image, pt, params.patchSize );
classifier->getSignature( &roi, signature );
bestProb = 0;
bestMatchIdx = -1;
for( int ci = 0; ci < classifier->classes(); ci++ )
{
if( signature[ci] > bestProb )
{
bestProb = signature[ci];
bestMatchIdx = ci;
}
}
}
void CalonderDescriptorMatch::trainRTreeClassifier()
{
if( classifier.empty() )
{
assert( params.filename.empty() );
classifier = new RTreeClassifier;
vector<BaseKeypoint> baseKeyPoints;
vector<IplImage> iplImages( collection.images.size() );
for( size_t imageIdx = 0; imageIdx < collection.images.size(); imageIdx++ )
{
iplImages[imageIdx] = collection.images[imageIdx];
for( size_t pointIdx = 0; pointIdx < collection.points[imageIdx].size(); pointIdx++ )
{
BaseKeypoint bkp;
KeyPoint kp = collection.points[imageIdx][pointIdx];
bkp.x = cvRound(kp.pt.x);
bkp.y = cvRound(kp.pt.y);
bkp.image = &iplImages[imageIdx];
baseKeyPoints.push_back(bkp);
}
}
classifier->train( baseKeyPoints, params.rng, params.patchGen, params.numTrees,
params.depth, params.views, params.reducedNumDim, params.numQuantBits,
params.printStatus );
}
}
void CalonderDescriptorMatch::match( const Mat& image, vector<KeyPoint>& keypoints, vector<int>& indices )
{
trainRTreeClassifier();
float bestProb = 0;
AutoBuffer<float> signature( classifier->classes() );
indices.resize( keypoints.size() );
for( size_t pi = 0; pi < keypoints.size(); pi++ )
calcBestProbAndMatchIdx( image, keypoints[pi].pt, bestProb, indices[pi], signature );
}
void CalonderDescriptorMatch::classify( const Mat& image, vector<KeyPoint>& keypoints )
{
trainRTreeClassifier();
AutoBuffer<float> signature( classifier->classes() );
for( size_t pi = 0; pi < keypoints.size(); pi++ )
{
float bestProb = 0;
int bestMatchIdx = -1;
calcBestProbAndMatchIdx( image, keypoints[pi].pt, bestProb, bestMatchIdx, signature );
keypoints[pi].class_id = collection.getKeyPoint(bestMatchIdx).class_id;
}
}
void CalonderDescriptorMatch::clear ()
{
GenericDescriptorMatch::clear();
classifier.release();
}
void CalonderDescriptorMatch::read( const FileNode &fn )
{
params.numTrees = fn["numTrees"];
params.depth = fn["depth"];
params.views = fn["views"];
params.patchSize = fn["patchSize"];
params.reducedNumDim = (int) fn["reducedNumDim"];
params.numQuantBits = fn["numQuantBits"];
params.printStatus = (int) fn["printStatus"] != 0;
}
void CalonderDescriptorMatch::write( FileStorage& fs ) const
{
fs << "numTrees" << params.numTrees;
fs << "depth" << params.depth;
fs << "views" << params.views;
fs << "patchSize" << params.patchSize;
fs << "reducedNumDim" << (int) params.reducedNumDim;
fs << "numQuantBits" << params.numQuantBits;
fs << "printStatus" << params.printStatus;
}
#endif
/****************************************************************************************\
* FernDescriptorMatch *
\****************************************************************************************/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment