Commit bb704fc3 authored by dmitriy.anisimov's avatar dmitriy.anisimov

removed generated document

parent 17d73988
/*
* basic.css
* ~~~~~~~~~
*
* Sphinx stylesheet -- basic theme.
*
* :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/* -- main layout ----------------------------------------------------------- */
div.clearer {
clear: both;
}
/* -- relbar ---------------------------------------------------------------- */
div.related {
width: 100%;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
float: left;
width: 230px;
margin-left: -100%;
font-size: 90%;
}
div.sphinxsidebar ul {
list-style: none;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox input[type="text"] {
width: 170px;
}
div.sphinxsidebar #searchbox input[type="submit"] {
width: 30px;
}
img {
border: 0;
max-width: 100%;
}
/* -- search page ----------------------------------------------------------- */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li div.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* -- index page ------------------------------------------------------------ */
table.contentstable {
width: 90%;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* -- general index --------------------------------------------------------- */
table.indextable {
width: 100%;
}
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable dl, table.indextable dd {
margin-top: 0;
margin-bottom: 0;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
div.modindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
div.genindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
/* -- general body styles --------------------------------------------------- */
a.headerlink {
visibility: hidden;
}
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink {
visibility: visible;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
.field-list ul {
padding-left: 1em;
}
.first {
margin-top: 0 !important;
}
p.rubric {
margin-top: 30px;
font-weight: bold;
}
img.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
img.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
img.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left {
text-align: left;
}
.align-center {
text-align: center;
}
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
div.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
padding: 7px 7px 0 7px;
background-color: #ffe;
width: 40%;
float: right;
}
p.sidebar-title {
font-weight: bold;
}
/* -- topics ---------------------------------------------------------------- */
div.topic {
border: 1px solid #ccc;
padding: 7px 7px 0 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* -- admonitions ----------------------------------------------------------- */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
div.admonition dl {
margin-bottom: 0;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
/* -- tables ---------------------------------------------------------------- */
table.docutils {
border: 0;
border-collapse: collapse;
}
table.docutils td, table.docutils th {
padding: 1px 8px 1px 5px;
border-top: 0;
border-left: 0;
border-right: 0;
border-bottom: 1px solid #aaa;
}
table.field-list td, table.field-list th {
border: 0 !important;
}
table.footnote td, table.footnote th {
border: 0 !important;
}
th {
text-align: left;
padding-right: 5px;
}
table.citation {
border-left: solid 1px gray;
margin-left: 1px;
}
table.citation td {
border-bottom: none;
}
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
list-style: decimal;
}
ol.loweralpha {
list-style: lower-alpha;
}
ol.upperalpha {
list-style: upper-alpha;
}
ol.lowerroman {
list-style: lower-roman;
}
ol.upperroman {
list-style: upper-roman;
}
dl {
margin-bottom: 15px;
}
dd p {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
dt:target, .highlighted {
background-color: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
.optional {
font-size: 1.3em;
}
.versionmodified {
font-style: italic;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
.footnote:target {
background-color: #ffa;
}
.line-block {
display: block;
margin-top: 1em;
margin-bottom: 1em;
}
.line-block .line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
.guilabel, .menuselection {
font-family: sans-serif;
}
.accelerator {
text-decoration: underline;
}
.classifier {
font-style: oblique;
}
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
/* -- code displays --------------------------------------------------------- */
pre {
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
td.linenos pre {
padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
margin-left: 0.5em;
}
table.highlighttable td {
padding: 0 0.5em 0 0.5em;
}
tt.descname {
background-color: transparent;
font-weight: bold;
font-size: 1.2em;
}
tt.descclassname {
background-color: transparent;
}
tt.xref, a tt {
background-color: transparent;
font-weight: bold;
}
h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
background-color: transparent;
}
.viewcode-link {
float: right;
}
.viewcode-back {
float: right;
font-family: sans-serif;
}
div.viewcode-block:target {
margin: -1px -10px;
padding: 0 10px;
}
/* -- math display ---------------------------------------------------------- */
img.math {
vertical-align: middle;
}
div.body div.math p {
text-align: center;
}
span.eqno {
float: right;
}
/* -- printout stylesheet --------------------------------------------------- */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0 !important;
width: 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
#top-link {
display: none;
}
}
\ No newline at end of file
/*
* default.css_t
* ~~~~~~~~~~~~~
*
* Sphinx stylesheet -- default theme.
*
* :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: sans-serif;
font-size: 100%;
background-color: #11303d;
color: #000;
margin: 0;
padding: 0;
}
div.document {
background-color: #1c4e63;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 230px;
}
div.body {
background-color: #ffffff;
color: #000000;
padding: 0 20px 30px 20px;
}
div.footer {
color: #ffffff;
width: 100%;
padding: 9px 0 9px 0;
text-align: center;
font-size: 75%;
}
div.footer a {
color: #ffffff;
text-decoration: underline;
}
div.related {
background-color: #133f52;
line-height: 30px;
color: #ffffff;
}
div.related a {
color: #ffffff;
}
div.sphinxsidebar {
}
div.sphinxsidebar h3 {
font-family: 'Trebuchet MS', sans-serif;
color: #ffffff;
font-size: 1.4em;
font-weight: normal;
margin: 0;
padding: 0;
}
div.sphinxsidebar h3 a {
color: #ffffff;
}
div.sphinxsidebar h4 {
font-family: 'Trebuchet MS', sans-serif;
color: #ffffff;
font-size: 1.3em;
font-weight: normal;
margin: 5px 0 0 0;
padding: 0;
}
div.sphinxsidebar p {
color: #ffffff;
}
div.sphinxsidebar p.topless {
margin: 5px 10px 10px 10px;
}
div.sphinxsidebar ul {
margin: 10px;
padding: 0;
color: #ffffff;
}
div.sphinxsidebar a {
color: #98dbcc;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
/* -- hyperlink styles ------------------------------------------------------ */
a {
color: #355f7c;
text-decoration: none;
}
a:visited {
color: #355f7c;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
/* -- body styles ----------------------------------------------------------- */
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: 'Trebuchet MS', sans-serif;
background-color: #f2f2f2;
font-weight: normal;
color: #20435c;
border-bottom: 1px solid #ccc;
margin: 20px -20px 10px -20px;
padding: 3px 0 3px 10px;
}
div.body h1 { margin-top: 0; font-size: 200%; }
div.body h2 { font-size: 160%; }
div.body h3 { font-size: 140%; }
div.body h4 { font-size: 120%; }
div.body h5 { font-size: 110%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #c60f0f;
font-size: 0.8em;
padding: 0 4px 0 4px;
text-decoration: none;
}
a.headerlink:hover {
background-color: #c60f0f;
color: white;
}
div.body p, div.body dd, div.body li {
text-align: justify;
line-height: 130%;
}
div.admonition p.admonition-title + p {
display: inline;
}
div.admonition p {
margin-bottom: 5px;
}
div.admonition pre {
margin-bottom: 5px;
}
div.admonition ul, div.admonition ol {
margin-bottom: 5px;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre {
padding: 5px;
background-color: #eeffcc;
color: #333333;
line-height: 120%;
border: 1px solid #ac9;
border-left: none;
border-right: none;
}
tt {
background-color: #ecf0f3;
padding: 0 1px 0 1px;
font-size: 0.95em;
}
th {
background-color: #ede;
}
.warning tt {
background: #efc2c2;
}
.note tt {
background: #d6d6d6;
}
.viewcode-back {
font-family: sans-serif;
}
div.viewcode-block:target {
background-color: #f4debf;
border-top: 1px solid #ac9;
border-bottom: 1px solid #ac9;
}
\ No newline at end of file
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>datasetstools. Tools for working with different datasets. &mdash; OpenCV datasetstools 3.0 documentation</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '3.0',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<link rel="top" title="OpenCV datasetstools 3.0 documentation" href="index.html" />
</head>
<body>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
accesskey="I">index</a></li>
<li><a href="index.html">OpenCV datasetstools 3.0 documentation</a> &raquo;</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<div class="section" id="datasetstools-tools-for-working-with-different-datasets">
<h1>datasetstools. Tools for working with different datasets.<a class="headerlink" href="#datasetstools-tools-for-working-with-different-datasets" title="Permalink to this headline"></a></h1>
<p>The datasetstools module includes classes for working with different datasets.</p>
<p>First version of this module was implemented for <strong>Fall2014 OpenCV Challenge</strong>.</p>
<div class="section" id="action-recognition">
<h2>Action Recognition<a class="headerlink" href="#action-recognition" title="Permalink to this headline"></a></h2>
<div class="section" id="ar-hmdb">
<h3>ar_hmdb<a class="headerlink" href="#ar-hmdb" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="hmdb-a-large-human-motion-database">&#8220;HMDB: A Large Human Motion Database&#8221;</span>: <a class="reference external" href="http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/">http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: hmdb51_org.rar &amp; test_train_splits.rar.</li>
<li>Unpack them.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_ar_hmdb -p=/home/user/path_to_unpacked_folders/</li>
</ol>
</div>
</div>
<div class="section" id="ar-sports">
<h3>ar_sports<a class="headerlink" href="#ar-sports" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="sports-1m-dataset">&#8220;Sports-1M Dataset&#8221;</span>: <a class="reference external" href="http://cs.stanford.edu/people/karpathy/deepvideo/">http://cs.stanford.edu/people/karpathy/deepvideo/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files (git clone <a class="reference external" href="https://code.google.com/p/sports-1m-dataset/">https://code.google.com/p/sports-1m-dataset/</a>).</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_ar_sports -p=/home/user/path_to_downloaded_folders/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="face-recognition">
<h2>Face Recognition<a class="headerlink" href="#face-recognition" title="Permalink to this headline"></a></h2>
<div class="section" id="fr-lfw">
<h3>fr_lfw<a class="headerlink" href="#fr-lfw" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="labeled-faces-in-the-wild-a">&#8220;Labeled Faces in the Wild-a&#8221;</span>: <a class="reference external" href="http://www.openu.ac.il/home/hassner/data/lfwa/">http://www.openu.ac.il/home/hassner/data/lfwa/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset file: lfwa.tar.gz.</li>
<li>Unpack it.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_fr_lfw -p=/home/user/path_to_unpacked_folder/lfw2/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="gesture-recognition">
<h2>Gesture Recognition<a class="headerlink" href="#gesture-recognition" title="Permalink to this headline"></a></h2>
<div class="section" id="gr-chalearn">
<h3>gr_chalearn<a class="headerlink" href="#gr-chalearn" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="chalearn-looking-at-people">&#8220;ChaLearn Looking at People&#8221;</span>: <a class="reference external" href="http://gesture.chalearn.org/">http://gesture.chalearn.org/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>Follow instruction from site above, download files for dataset &#8220;Track 3: Gesture Recognition&#8221;: Train1.zip-Train5.zip, Validation1.zip-Validation3.zip (Register on site: www.codalab.org and accept the terms and conditions of competition: <a class="reference external" href="https://www.codalab.org/competitions/991#learn_the_details">https://www.codalab.org/competitions/991#learn_the_details</a> There are three mirrors for downloading dataset files. When I downloaded data only mirror: &#8220;Universitat Oberta de Catalunya&#8221; works).</li>
<li>Unpack train archives Train1.zip-Train5.zip to one folder (currently loading validation files wasn&#8217;t implemented)</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_gr_chalearn -p=/home/user/path_to_unpacked_folder/</li>
</ol>
</div>
</div>
<div class="section" id="gr-skig">
<h3>gr_skig<a class="headerlink" href="#gr-skig" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="sheffield-kinect-gesture-dataset">&#8220;Sheffield Kinect Gesture Dataset&#8221;</span>: <a class="reference external" href="http://lshao.staff.shef.ac.uk/data/SheffieldKinectGesture.htm">http://lshao.staff.shef.ac.uk/data/SheffieldKinectGesture.htm</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: subject1_dep.7z-subject6_dep.7z, subject1_rgb.7z-subject6_rgb.7z.</li>
<li>Unpack them.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_gr_skig -p=/home/user/path_to_unpacked_folders/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="human-pose-estimation">
<h2>Human Pose Estimation<a class="headerlink" href="#human-pose-estimation" title="Permalink to this headline"></a></h2>
<div class="section" id="hpe-parse">
<h3>hpe_parse<a class="headerlink" href="#hpe-parse" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="parse-dataset">&#8220;PARSE Dataset&#8221;</span>: <a class="reference external" href="http://www.ics.uci.edu/~dramanan/papers/parse/">http://www.ics.uci.edu/~dramanan/papers/parse/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset file: people.zip.</li>
<li>Unpack it.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_hpe_parse -p=/home/user/path_to_unpacked_folder/people_all/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="image-registration">
<h2>Image Registration<a class="headerlink" href="#image-registration" title="Permalink to this headline"></a></h2>
<div class="section" id="ir-affine">
<h3>ir_affine<a class="headerlink" href="#ir-affine" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="affine-covariant-regions-datasets">&#8220;Affine Covariant Regions Datasets&#8221;</span>: <a class="reference external" href="http://www.robots.ox.ac.uk/~vgg/data/data-aff.html">http://www.robots.ox.ac.uk/~vgg/data/data-aff.html</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: bark\bikes\boat\graf\leuven\trees\ubc\wall.tar.gz.</li>
<li>Unpack them.</li>
<li>To load data, for example, for &#8220;bark&#8221;, run: ./opencv/build/bin/example_datasetstools_ir_affine -p=/home/user/path_to_unpacked_folder/bark/</li>
</ol>
</div>
</div>
<div class="section" id="ir-robot">
<h3>ir_robot<a class="headerlink" href="#ir-robot" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="robot-data-set">&#8220;Robot Data Set&#8221;</span>: <a class="reference external" href="http://roboimagedata.compute.dtu.dk/?page_id=24">http://roboimagedata.compute.dtu.dk/?page_id=24</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download files for dataset &#8220;Point Feature Data Set – 2010&#8221;: SET001_6.tar.gz-SET055_60.tar.gz (there are two data sets: - Full resolution images (1200×1600), ~500 Gb and - Half size image (600×800), ~115 Gb.)</li>
<li>Unpack them to one folder.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_ir_robot -p=/home/user/path_to_unpacked_folder/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="image-segmentation">
<h2>Image Segmentation<a class="headerlink" href="#image-segmentation" title="Permalink to this headline"></a></h2>
<div class="section" id="is-bsds">
<h3>is_bsds<a class="headerlink" href="#is-bsds" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="the-berkeley-segmentation-dataset-and-benchmark">&#8220;The Berkeley Segmentation Dataset and Benchmark&#8221;</span>: <a class="reference external" href="https://www.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/">https://www.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: BSDS300-human.tgz &amp; BSDS300-images.tgz.</li>
<li>Unpack them.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_is_bsds -p=/home/user/path_to_unpacked_folder/BSDS300/</li>
</ol>
</div>
</div>
<div class="section" id="is-weizmann">
<h3>is_weizmann<a class="headerlink" href="#is-weizmann" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="weizmann-segmentation-evaluation-database">&#8220;Weizmann Segmentation Evaluation Database&#8221;</span>: <a class="reference external" href="http://www.wisdom.weizmann.ac.il/~vision/Seg_Evaluation_DB/">http://www.wisdom.weizmann.ac.il/~vision/Seg_Evaluation_DB/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: Weizmann_Seg_DB_1obj.ZIP &amp; Weizmann_Seg_DB_2obj.ZIP.</li>
<li>Unpack them.</li>
<li>To load data, for example, for 1 object dataset, run: ./opencv/build/bin/example_datasetstools_is_weizmann -p=/home/user/path_to_unpacked_folder/1obj/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="multiview-stereo-matching">
<h2>Multiview Stereo Matching<a class="headerlink" href="#multiview-stereo-matching" title="Permalink to this headline"></a></h2>
<div class="section" id="msm-epfl">
<h3>msm_epfl<a class="headerlink" href="#msm-epfl" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="epfl-multi-view-stereo">&#8220;EPFL Multi-View Stereo&#8221;</span>: <a class="reference external" href="http://cvlabwww.epfl.ch/~strecha/multiview/denseMVS.html">http://cvlabwww.epfl.ch/~strecha/multiview/denseMVS.html</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: castle_dense\castle_dense_large\castle_entry\fountain\herzjesu_dense\herzjesu_dense_large_bounding\cameras\images\p.tar.gz.</li>
<li>Unpack them in separate folder for each object. For example, for &#8220;fountain&#8221;, in folder fountain/ : fountain_dense_bounding.tar.gz -&gt; bounding/, fountain_dense_cameras.tar.gz -&gt; camera/, fountain_dense_images.tar.gz -&gt; png/, fountain_dense_p.tar.gz -&gt; P/</li>
<li>To load data, for example, for &#8220;fountain&#8221;, run: ./opencv/build/bin/example_datasetstools_msm_epfl -p=/home/user/path_to_unpacked_folder/fountain/</li>
</ol>
</div>
</div>
<div class="section" id="msm-middlebury">
<h3>msm_middlebury<a class="headerlink" href="#msm-middlebury" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="stereo-middlebury-computer-vision">&#8220;Stereo – Middlebury Computer Vision&#8221;</span>: <a class="reference external" href="http://vision.middlebury.edu/mview/">http://vision.middlebury.edu/mview/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: dino\dinoRing\dinoSparseRing\temple\templeRing\templeSparseRing.zip</li>
<li>Unpack them.</li>
<li>To load data, for example &#8220;temple&#8221; dataset, run: ./opencv/build/bin/example_datasetstools_msm_middlebury -p=/home/user/path_to_unpacked_folder/temple/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="object-recognition">
<h2>Object Recognition<a class="headerlink" href="#object-recognition" title="Permalink to this headline"></a></h2>
<div class="section" id="or-imagenet">
<h3>or_imagenet<a class="headerlink" href="#or-imagenet" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="imagenet">&#8220;ImageNet&#8221;</span>: <a class="reference external" href="http://www.image-net.org/">http://www.image-net.org/</a></p>
<p>Currently implemented loading full list with urls. Planned to implement dataset from ILSVRC challenge.</p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset file: imagenet_fall11_urls.tgz</li>
<li>Unpack it.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_or_imagenet -p=/home/user/path_to_unpacked_file/</li>
</ol>
</div>
</div>
<div class="section" id="or-sun">
<h3>or_sun<a class="headerlink" href="#or-sun" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="sun-database">&#8220;SUN Database&#8221;</span>: <a class="reference external" href="http://sun.cs.princeton.edu/">http://sun.cs.princeton.edu/</a></p>
<p>Currently implemented loading &#8220;Scene Recognition Benchmark. SUN397&#8221;. Planned to implement also &#8220;Object Detection Benchmark. SUN2012&#8221;.</p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset file: SUN397.tar</li>
<li>Unpack it.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_or_sun -p=/home/user/path_to_unpacked_folder/SUN397/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="slam">
<h2>SLAM<a class="headerlink" href="#slam" title="Permalink to this headline"></a></h2>
<div class="section" id="slam-kitti">
<h3>slam_kitti<a class="headerlink" href="#slam-kitti" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="kitti-vision-benchmark">&#8220;KITTI Vision Benchmark&#8221;</span>: <a class="reference external" href="http://www.cvlibs.net/datasets/kitti/eval_odometry.php">http://www.cvlibs.net/datasets/kitti/eval_odometry.php</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download &#8220;Odometry&#8221; dataset files: data_odometry_gray\data_odometry_color\data_odometry_velodyne\data_odometry_poses\data_odometry_calib.zip.</li>
<li>Unpack data_odometry_poses.zip, it creates folder dataset/poses/. After that unpack data_odometry_gray.zip, data_odometry_color.zip, data_odometry_velodyne.zip. Folder dataset/sequences/ will be created with folders 00/..21/. Each of these folders will contain: image_0/, image_1/, image_2/, image_3/, velodyne/ and files calib.txt &amp; times.txt. These two last files will be replaced after unpacking data_odometry_calib.zip at the end.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_slam_kitti -p=/home/user/path_to_unpacked_folder/dataset/</li>
</ol>
</div>
</div>
<div class="section" id="slam-tumindoor">
<h3>slam_tumindoor<a class="headerlink" href="#slam-tumindoor" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="tumindoor-dataset">&#8220;TUMindoor Dataset&#8221;</span>: <a class="reference external" href="http://www.navvis.lmt.ei.tum.de/dataset/">http://www.navvis.lmt.ei.tum.de/dataset/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: dslr\info\ladybug\pointcloud.tar.bz2 for each dataset: 11-11-28 (1st floor)\11-12-13 (1st floor N1)\11-12-17a (4th floor)\11-12-17b (3rd floor)\11-12-17c (Ground I)\11-12-18a (Ground II)\11-12-18b (2nd floor)</li>
<li>Unpack them in separate folder for each dataset. dslr.tar.bz2 -&gt; dslr/, info.tar.bz2 -&gt; info/, ladybug.tar.bz2 -&gt; ladybug/, pointcloud.tar.bz2 -&gt; pointcloud/.</li>
<li>To load each dataset run: ./opencv/build/bin/example_datasetstools_slam_tumindoor -p=/home/user/path_to_unpacked_folders/</li>
</ol>
</div>
</div>
</div>
<div class="section" id="text-recognition">
<h2>Text Recognition<a class="headerlink" href="#text-recognition" title="Permalink to this headline"></a></h2>
<div class="section" id="tr-chars">
<h3>tr_chars<a class="headerlink" href="#tr-chars" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="the-chars74k-dataset">&#8220;The Chars74K Dataset&#8221;</span>: <a class="reference external" href="http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/">http://www.ee.surrey.ac.uk/CVSSP/demos/chars74k/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset files: EnglishFnt\EnglishHnd\EnglishImg\KannadaHnd\KannadaImg.tgz, ListsTXT.tgz.</li>
<li>Unpack them.</li>
<li>Move <a href="#id1"><span class="problematic" id="id2">*</span></a>.m files from folder ListsTXT/ to appropriate folder. For example, English/list_English_Img.m for EnglishImg.tgz.</li>
<li>To load data, for example &#8220;EnglishImg&#8221;, run: ./opencv/build/bin/example_datasetstools_tr_chars -p=/home/user/path_to_unpacked_folder/English/</li>
</ol>
</div>
</div>
<div class="section" id="tr-svt">
<h3>tr_svt<a class="headerlink" href="#tr-svt" title="Permalink to this headline"></a></h3>
<p>Implements loading dataset:</p>
<p><span class="target" id="the-street-view-text-dataset">&#8220;The Street View Text Dataset&#8221;</span>: <a class="reference external" href="http://vision.ucsd.edu/~kai/svt/">http://vision.ucsd.edu/~kai/svt/</a></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p>Usage</p>
<ol class="last arabic simple">
<li>From link above download dataset file: svt.zip.</li>
<li>Unpack it.</li>
<li>To load data run: ./opencv/build/bin/example_datasetstools_tr_svt -p=/home/user/path_to_unpacked_folder/svt/svt1/</li>
</ol>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<h3><a href="index.html">Table Of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">datasetstools. Tools for working with different datasets.</a><ul>
<li><a class="reference internal" href="#action-recognition">Action Recognition</a><ul>
<li><a class="reference internal" href="#ar-hmdb">ar_hmdb</a></li>
<li><a class="reference internal" href="#ar-sports">ar_sports</a></li>
</ul>
</li>
<li><a class="reference internal" href="#face-recognition">Face Recognition</a><ul>
<li><a class="reference internal" href="#fr-lfw">fr_lfw</a></li>
</ul>
</li>
<li><a class="reference internal" href="#gesture-recognition">Gesture Recognition</a><ul>
<li><a class="reference internal" href="#gr-chalearn">gr_chalearn</a></li>
<li><a class="reference internal" href="#gr-skig">gr_skig</a></li>
</ul>
</li>
<li><a class="reference internal" href="#human-pose-estimation">Human Pose Estimation</a><ul>
<li><a class="reference internal" href="#hpe-parse">hpe_parse</a></li>
</ul>
</li>
<li><a class="reference internal" href="#image-registration">Image Registration</a><ul>
<li><a class="reference internal" href="#ir-affine">ir_affine</a></li>
<li><a class="reference internal" href="#ir-robot">ir_robot</a></li>
</ul>
</li>
<li><a class="reference internal" href="#image-segmentation">Image Segmentation</a><ul>
<li><a class="reference internal" href="#is-bsds">is_bsds</a></li>
<li><a class="reference internal" href="#is-weizmann">is_weizmann</a></li>
</ul>
</li>
<li><a class="reference internal" href="#multiview-stereo-matching">Multiview Stereo Matching</a><ul>
<li><a class="reference internal" href="#msm-epfl">msm_epfl</a></li>
<li><a class="reference internal" href="#msm-middlebury">msm_middlebury</a></li>
</ul>
</li>
<li><a class="reference internal" href="#object-recognition">Object Recognition</a><ul>
<li><a class="reference internal" href="#or-imagenet">or_imagenet</a></li>
<li><a class="reference internal" href="#or-sun">or_sun</a></li>
</ul>
</li>
<li><a class="reference internal" href="#slam">SLAM</a><ul>
<li><a class="reference internal" href="#slam-kitti">slam_kitti</a></li>
<li><a class="reference internal" href="#slam-tumindoor">slam_tumindoor</a></li>
</ul>
</li>
<li><a class="reference internal" href="#text-recognition">Text Recognition</a><ul>
<li><a class="reference internal" href="#tr-chars">tr_chars</a></li>
<li><a class="reference internal" href="#tr-svt">tr_svt</a></li>
</ul>
</li>
</ul>
</li>
</ul>
<h3>This Page</h3>
<ul class="this-page-menu">
<li><a href="_sources/datasetstools.txt"
rel="nofollow">Show Source</a></li>
</ul>
<div id="searchbox" style="display: none">
<h3>Quick search</h3>
<form class="search" action="search.html" method="get">
<input type="text" name="q" />
<input type="submit" value="Go" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
<p class="searchtip" style="font-size: 90%">
Enter search terms or a module, class or function name.
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
>index</a></li>
<li><a href="index.html">OpenCV datasetstools 3.0 documentation</a> &raquo;</li>
</ul>
</div>
<div class="footer">
&copy; Copyright 2014, itseez.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.2.2.
</div>
</body>
</html>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment