opengl.cpp 49.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#include "precomp.hpp"

45
#ifdef HAVE_OPENGL
46 47 48 49
#  include "gl_core_3_1.hpp"
#  ifdef HAVE_CUDA
#    include <cuda_gl_interop.h>
#  endif
Alexey Ershov's avatar
Alexey Ershov committed
50 51 52
#else // HAVE_OPENGL
#  define NO_OPENGL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support")
#endif // HAVE_OPENGL
53

54
using namespace cv;
55
using namespace cv::cuda;
56

57 58
namespace
{
59 60 61 62 63 64 65
#ifndef HAVE_OPENGL
inline static void throw_no_ogl() { CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support"); }
#elif defined _DEBUG
inline static bool checkError(const char* file, const int line, const char* func = 0)
{
    GLenum err = gl::GetError();
    if (err != gl::NO_ERROR_)
66
    {
67 68
        const char* msg;
        switch (err)
69
        {
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
        case gl::INVALID_ENUM:
            msg = "An unacceptable value is specified for an enumerated argument";
            break;
        case gl::INVALID_VALUE:
            msg = "A numeric argument is out of range";
            break;
        case gl::INVALID_OPERATION:
            msg = "The specified operation is not allowed in the current state";
            break;
        case gl::OUT_OF_MEMORY:
            msg = "There is not enough memory left to execute the command";
            break;
        default:
            msg = "Unknown error";
        };
        cvError(CV_OpenGlApiCallError, func, msg, file, line);
        return false;
87
    }
88 89 90
    return true;
}
#endif // HAVE_OPENGL
91 92
} // namespace

93 94
#define CV_CheckGlError() CV_DbgAssert( (checkError(__FILE__, __LINE__, CV_Func)) )

95 96
#ifdef HAVE_OPENGL
namespace
97
{
98
    const GLenum gl_types[] = { gl::UNSIGNED_BYTE, gl::BYTE, gl::UNSIGNED_SHORT, gl::SHORT, gl::INT, gl::FLOAT, gl::DOUBLE };
99
}
100
#endif
101

102 103
////////////////////////////////////////////////////////////////////////
// setGlDevice
104

105
void cv::cuda::setGlDevice(int device)
106
{
107
#ifndef HAVE_OPENGL
108
    (void) device;
109
    throw_no_ogl();
110
#else
111
    #ifndef HAVE_CUDA
112
        (void) device;
113
        throw_no_cuda();
114
    #else
115
        cudaSafeCall( cudaGLSetGLDevice(device) );
116
    #endif
117 118 119 120
#endif
}

////////////////////////////////////////////////////////////////////////
121 122
// CudaResource

123
#if defined(HAVE_OPENGL) && defined(HAVE_CUDA)
124 125 126

namespace
{
127
    class CudaResource
128 129
    {
    public:
130 131
        CudaResource();
        ~CudaResource();
132

133 134
        void registerBuffer(GLuint buffer);
        void release();
135

136 137
        void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
        void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0);
138

139
        void* map(cudaStream_t stream = 0);
140 141 142 143
        void unmap(cudaStream_t stream = 0);

    private:
        cudaGraphicsResource_t resource_;
144 145 146
        GLuint buffer_;

        class GraphicsMapHolder;
147 148
    };

149
    CudaResource::CudaResource() : resource_(0), buffer_(0)
150 151 152
    {
    }

153
    CudaResource::~CudaResource()
154
    {
155
        release();
156 157
    }

158
    void CudaResource::registerBuffer(GLuint buffer)
159
    {
160 161 162 163
        CV_DbgAssert( buffer != 0 );

        if (buffer_ == buffer)
            return;
164 165

        cudaGraphicsResource_t resource;
166
        cudaSafeCall( cudaGraphicsGLRegisterBuffer(&resource, buffer, cudaGraphicsMapFlagsNone) );
167

168 169
        release();

170
        resource_ = resource;
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
        buffer_ = buffer;
    }

    void CudaResource::release()
    {
        if (resource_)
            cudaGraphicsUnregisterResource(resource_);

        resource_ = 0;
        buffer_ = 0;
    }

    class CudaResource::GraphicsMapHolder
    {
    public:
        GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream);
        ~GraphicsMapHolder();

        void reset();

    private:
        cudaGraphicsResource_t* resource_;
        cudaStream_t stream_;
    };

    CudaResource::GraphicsMapHolder::GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream) : resource_(resource), stream_(stream)
    {
        if (resource_)
199
            cudaSafeCall( cudaGraphicsMapResources(1, resource_, stream_) );
200 201 202 203 204 205
    }

    CudaResource::GraphicsMapHolder::~GraphicsMapHolder()
    {
        if (resource_)
            cudaGraphicsUnmapResources(1, resource_, stream_);
206 207
    }

208
    void CudaResource::GraphicsMapHolder::reset()
209
    {
210 211
        resource_ = 0;
    }
212

213 214 215
    void CudaResource::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
    {
        CV_DbgAssert( resource_ != 0 );
216

217 218
        GraphicsMapHolder h(&resource_, stream);
        (void) h;
219

220 221
        void* dst;
        size_t size;
222
        cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&dst, &size, resource_) );
223

224
        CV_DbgAssert( width * height == size );
225 226

        if (stream == 0)
227
            cudaSafeCall( cudaMemcpy2D(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice) );
228
        else
229
            cudaSafeCall( cudaMemcpy2DAsync(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream) );
230
    }
231

232 233 234 235 236 237 238 239 240
    void CudaResource::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream)
    {
        CV_DbgAssert( resource_ != 0 );

        GraphicsMapHolder h(&resource_, stream);
        (void) h;

        void* src;
        size_t size;
241
        cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&src, &size, resource_) );
242 243 244 245

        CV_DbgAssert( width * height == size );

        if (stream == 0)
246
            cudaSafeCall( cudaMemcpy2D(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice) );
247
        else
248
            cudaSafeCall( cudaMemcpy2DAsync(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice, stream) );
249 250
    }

251
    void* CudaResource::map(cudaStream_t stream)
252
    {
253
        CV_DbgAssert( resource_ != 0 );
254

255
        GraphicsMapHolder h(&resource_, stream);
256 257

        void* ptr;
258
        size_t size;
259
        cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&ptr, &size, resource_) );
260

261
        h.reset();
262

263
        return ptr;
264 265
    }

266
    void CudaResource::unmap(cudaStream_t stream)
267
    {
268 269
        CV_Assert( resource_ != 0 );

270 271 272
        cudaGraphicsUnmapResources(1, &resource_, stream);
    }
}
273 274

#endif
275 276

////////////////////////////////////////////////////////////////////////
277
// ogl::Buffer
278 279 280

#ifndef HAVE_OPENGL

281
class cv::ogl::Buffer::Impl
282 283 284 285 286
{
};

#else

287
class cv::ogl::Buffer::Impl
288 289 290 291
{
public:
    static const Ptr<Impl>& empty();

292
    Impl(GLuint bufId, bool autoRelease);
293
    Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease);
294 295
    ~Impl();

296
    void bind(GLenum target) const;
297

298
    void copyFrom(GLuint srcBuf, GLsizeiptr size);
299

300 301
    void copyFrom(GLsizeiptr size, const GLvoid* data);
    void copyTo(GLsizeiptr size, GLvoid* data) const;
302

303
    void* mapHost(GLenum access);
304
    void unmapHost();
305 306

#ifdef HAVE_CUDA
307 308 309 310
    void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0);
    void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0) const;

    void* mapDevice(cudaStream_t stream = 0);
311 312 313
    void unmapDevice(cudaStream_t stream = 0);
#endif

314 315 316 317
    void setAutoRelease(bool flag) { autoRelease_ = flag; }

    GLuint bufId() const { return bufId_; }

318 319 320
private:
    Impl();

321 322
    GLuint bufId_;
    bool autoRelease_;
323 324

#ifdef HAVE_CUDA
325
    mutable CudaResource cudaResource_;
326 327 328
#endif
};

329
const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty()
330 331 332 333 334
{
    static Ptr<Impl> p(new Impl);
    return p;
}

335
cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(false)
336 337 338
{
}

339
cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease)
340
{
341
    CV_Assert( gl::IsBuffer(abufId) == gl::TRUE_ );
342 343
}

344
cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease)
345
{
346
    gl::GenBuffers(1, &bufId_);
347 348
    CV_CheckGlError();

349
    CV_Assert( bufId_ != 0 );
350

351
    gl::BindBuffer(target, bufId_);
352 353
    CV_CheckGlError();

354
    gl::BufferData(target, size, data, gl::DYNAMIC_DRAW);
355 356
    CV_CheckGlError();

357 358
    gl::BindBuffer(target, 0);
    CV_CheckGlError();
359 360
}

361
cv::ogl::Buffer::Impl::~Impl()
362
{
363 364
    if (autoRelease_ && bufId_)
        gl::DeleteBuffers(1, &bufId_);
365 366
}

367
void cv::ogl::Buffer::Impl::bind(GLenum target) const
368
{
369
    gl::BindBuffer(target, bufId_);
370 371 372
    CV_CheckGlError();
}

373
void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size)
374
{
375 376
    gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
    CV_CheckGlError();
377

378 379
    gl::BindBuffer(gl::COPY_READ_BUFFER, srcBuf);
    CV_CheckGlError();
380

381 382
    gl::CopyBufferSubData(gl::COPY_READ_BUFFER, gl::COPY_WRITE_BUFFER, 0, 0, size);
    CV_CheckGlError();
383 384
}

385
void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data)
386
{
387 388
    gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_);
    CV_CheckGlError();
389

390
    gl::BufferSubData(gl::COPY_WRITE_BUFFER, 0, size, data);
391 392 393
    CV_CheckGlError();
}

394
void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const
395
{
396 397 398 399 400
    gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
    CV_CheckGlError();

    gl::GetBufferSubData(gl::COPY_READ_BUFFER, 0, size, data);
    CV_CheckGlError();
401 402
}

403
void* cv::ogl::Buffer::Impl::mapHost(GLenum access)
404
{
405 406 407
    gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_);
    CV_CheckGlError();

408
    GLvoid* data = gl::MapBuffer(gl::COPY_READ_BUFFER, access);
409 410
    CV_CheckGlError();

411
    return data;
412 413
}

414
void cv::ogl::Buffer::Impl::unmapHost()
415
{
416
    gl::UnmapBuffer(gl::COPY_READ_BUFFER);
417 418 419 420
}

#ifdef HAVE_CUDA

421 422 423 424 425
void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream)
{
    cudaResource_.registerBuffer(bufId_);
    cudaResource_.copyFrom(src, spitch, width, height, stream);
}
426

427 428 429 430 431
void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const
{
    cudaResource_.registerBuffer(bufId_);
    cudaResource_.copyTo(dst, dpitch, width, height, stream);
}
432

433 434 435 436 437 438 439 440 441 442 443 444
void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream)
{
    cudaResource_.registerBuffer(bufId_);
    return cudaResource_.map(stream);
}

void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream)
{
    cudaResource_.unmap(stream);
}

#endif // HAVE_CUDA
445 446 447

#endif // HAVE_OPENGL

448
cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0)
449 450
{
#ifndef HAVE_OPENGL
451
    throw_no_ogl();
452 453 454 455 456
#else
    impl_ = Impl::empty();
#endif
}

457
cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
458 459
{
#ifndef HAVE_OPENGL
460 461 462 463 464
    (void) arows;
    (void) acols;
    (void) atype;
    (void) abufId;
    (void) autoRelease;
465
    throw_no_ogl();
466
#else
467
    impl_.reset(new Impl(abufId, autoRelease));
468 469 470
    rows_ = arows;
    cols_ = acols;
    type_ = atype;
471 472 473
#endif
}

474
cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0)
475 476
{
#ifndef HAVE_OPENGL
477 478 479 480
    (void) asize;
    (void) atype;
    (void) abufId;
    (void) autoRelease;
481
    throw_no_ogl();
482
#else
483
    impl_.reset(new Impl(abufId, autoRelease));
484 485 486
    rows_ = asize.height;
    cols_ = asize.width;
    type_ = atype;
487 488 489
#endif
}

490
cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0)
491 492
{
#ifndef HAVE_OPENGL
493 494
    (void) arr;
    (void) target;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
495
    (void) autoRelease;
496
    throw_no_ogl();
497
#else
498
    const int kind = arr.kind();
499

500
    switch (kind)
501
    {
502
    case _InputArray::OPENGL_BUFFER:
503
    case _InputArray::CUDA_GPU_MAT:
504 505
        copyFrom(arr, target, autoRelease);
        break;
506 507 508 509 510 511

    default:
        {
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
            const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize();
512
            impl_.reset(new Impl(asize, mat.data, target, autoRelease));
513 514 515 516 517 518
            rows_ = mat.rows;
            cols_ = mat.cols;
            type_ = mat.type();
            break;
        }
    }
519 520 521
#endif
}

522
void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease)
523 524
{
#ifndef HAVE_OPENGL
525 526 527 528
    (void) arows;
    (void) acols;
    (void) atype;
    (void) target;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
529
    (void) autoRelease;
530
    throw_no_ogl();
531
#else
532
    if (rows_ != arows || cols_ != acols || type_ != atype)
533
    {
534
        const GLsizeiptr asize = arows * acols * CV_ELEM_SIZE(atype);
535
        impl_.reset(new Impl(asize, 0, target, autoRelease));
536 537 538
        rows_ = arows;
        cols_ = acols;
        type_ = atype;
539 540 541 542
    }
#endif
}

543
void cv::ogl::Buffer::release()
544
{
545
#ifdef HAVE_OPENGL
546
    if (impl_)
547
        impl_->setAutoRelease(true);
548 549 550 551 552 553 554
    impl_ = Impl::empty();
    rows_ = 0;
    cols_ = 0;
    type_ = 0;
#endif
}

555
void cv::ogl::Buffer::setAutoRelease(bool flag)
556
{
557
#ifndef HAVE_OPENGL
558
    (void) flag;
559
    throw_no_ogl();
560
#else
561
    impl_->setAutoRelease(flag);
562 563 564
#endif
}

565
void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease)
566 567
{
#ifndef HAVE_OPENGL
568 569
    (void) arr;
    (void) target;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
570
    (void) autoRelease;
571
    throw_no_ogl();
572
#else
573 574 575 576
    const int kind = arr.kind();

    const Size asize = arr.size();
    const int atype = arr.type();
577
    create(asize, atype, target, autoRelease);
578 579 580 581 582

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
583
            ogl::Buffer buf = arr.getOGlBuffer();
584 585 586
            impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype));
            break;
        }
587

588
    case _InputArray::CUDA_GPU_MAT:
589
        {
590
            #ifndef HAVE_CUDA
591
                throw_no_cuda();
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
            #else
                GpuMat dmat = arr.getGpuMat();
                impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
            #endif

            break;
        }

    default:
        {
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
            impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data);
        }
    }
#endif
}

610
void cv::ogl::Buffer::copyFrom(InputArray arr, cuda::Stream& stream, Target target, bool autoRelease)
611 612 613
{
#ifndef HAVE_OPENGL
    (void) arr;
614
    (void) stream;
615
    (void) target;
616
    (void) autoRelease;
617
    throw_no_ogl();
618 619 620 621 622 623 624 625 626 627 628 629
#else
    #ifndef HAVE_CUDA
        (void) arr;
        (void) stream;
        (void) target;
        (void) autoRelease;
        throw_no_cuda();
    #else
        GpuMat dmat = arr.getGpuMat();

        create(dmat.size(), dmat.type(), target, autoRelease);

630
        impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
631 632 633 634 635 636 637 638 639
    #endif
#endif
}

void cv::ogl::Buffer::copyTo(OutputArray arr) const
{
#ifndef HAVE_OPENGL
    (void) arr;
    throw_no_ogl();
640 641
#else
    const int kind = arr.kind();
642 643 644 645 646

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
647
            arr.getOGlBufferRef().copyFrom(*this);
648 649 650
            break;
        }

651
    case _InputArray::CUDA_GPU_MAT:
652
        {
653
            #ifndef HAVE_CUDA
654
                throw_no_cuda();
655
            #else
656 657 658
                GpuMat& dmat = arr.getGpuMatRef();
                dmat.create(rows_, cols_, type_);
                impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows);
659 660 661 662
            #endif

            break;
        }
663

664 665
    default:
        {
666 667 668 669
            arr.create(rows_, cols_, type_);
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
            impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data);
670 671 672 673 674
        }
    }
#endif
}

675
void cv::ogl::Buffer::copyTo(OutputArray arr, cuda::Stream& stream) const
676 677 678 679 680 681 682 683 684 685 686 687 688
{
#ifndef HAVE_OPENGL
    (void) arr;
    (void) stream;
    throw_no_ogl();
#else
    #ifndef HAVE_CUDA
        (void) arr;
        (void) stream;
        throw_no_cuda();
    #else
        arr.create(rows_, cols_, type_);
        GpuMat dmat = arr.getGpuMat();
689
        impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream));
690 691 692 693
    #endif
#endif
}

694
cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const
695 696 697
{
#ifndef HAVE_OPENGL
    (void) target;
698
    (void) autoRelease;
699
    throw_no_ogl();
700
    return cv::ogl::Buffer();
701
#else
702
    ogl::Buffer buf;
703
    buf.copyFrom(*this, target, autoRelease);
704 705 706 707
    return buf;
#endif
}

708
void cv::ogl::Buffer::bind(Target target) const
709 710
{
#ifndef HAVE_OPENGL
711
    (void) target;
712
    throw_no_ogl();
713
#else
714
    impl_->bind(target);
715 716 717
#endif
}

718
void cv::ogl::Buffer::unbind(Target target)
719 720
{
#ifndef HAVE_OPENGL
721
    (void) target;
722
    throw_no_ogl();
723
#else
724 725
    gl::BindBuffer(target, 0);
    CV_CheckGlError();
726 727 728
#endif
}

729
Mat cv::ogl::Buffer::mapHost(Access access)
730 731
{
#ifndef HAVE_OPENGL
732
    (void) access;
733
    throw_no_ogl();
734
    return Mat();
735
#else
736
    return Mat(rows_, cols_, type_, impl_->mapHost(access));
737 738 739
#endif
}

740
void cv::ogl::Buffer::unmapHost()
741 742
{
#ifndef HAVE_OPENGL
743
    throw_no_ogl();
744
#else
745
    return impl_->unmapHost();
746 747 748
#endif
}

749
GpuMat cv::ogl::Buffer::mapDevice()
750 751
{
#ifndef HAVE_OPENGL
752
    throw_no_ogl();
753
    return GpuMat();
754
#else
755
    #ifndef HAVE_CUDA
756
        throw_no_cuda();
757
        return GpuMat();
758
    #else
759
        return GpuMat(rows_, cols_, type_, impl_->mapDevice());
760 761 762 763
    #endif
#endif
}

764
void cv::ogl::Buffer::unmapDevice()
765 766
{
#ifndef HAVE_OPENGL
767
    throw_no_ogl();
768
#else
769
    #ifndef HAVE_CUDA
770
        throw_no_cuda();
771 772 773 774 775 776
    #else
        impl_->unmapDevice();
    #endif
#endif
}

777
cuda::GpuMat cv::ogl::Buffer::mapDevice(cuda::Stream& stream)
778 779 780 781 782 783 784 785 786 787 788
{
#ifndef HAVE_OPENGL
    (void) stream;
    throw_no_ogl();
    return GpuMat();
#else
    #ifndef HAVE_CUDA
        (void) stream;
        throw_no_cuda();
        return GpuMat();
    #else
789
        return GpuMat(rows_, cols_, type_, impl_->mapDevice(cuda::StreamAccessor::getStream(stream)));
790 791 792 793
    #endif
#endif
}

794
void cv::ogl::Buffer::unmapDevice(cuda::Stream& stream)
795 796 797 798 799 800 801 802 803
{
#ifndef HAVE_OPENGL
    (void) stream;
    throw_no_ogl();
#else
    #ifndef HAVE_CUDA
        (void) stream;
        throw_no_cuda();
    #else
804
        impl_->unmapDevice(cuda::StreamAccessor::getStream(stream));
805 806 807 808
    #endif
#endif
}

809
unsigned int cv::ogl::Buffer::bufId() const
810 811
{
#ifndef HAVE_OPENGL
812
    throw_no_ogl();
813 814 815 816 817 818
    return 0;
#else
    return impl_->bufId();
#endif
}

819 820

//////////////////////////////////////////////////////////////////////////////////////////
821
// ogl::Texture
822 823 824

#ifndef HAVE_OPENGL

825
class cv::ogl::Texture2D::Impl
826 827 828 829 830
{
};

#else

831
class cv::ogl::Texture2D::Impl
832 833 834 835
{
public:
    static const Ptr<Impl> empty();

836
    Impl(GLuint texId, bool autoRelease);
837
    Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease);
838 839
    ~Impl();

840 841
    void copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels);
    void copyTo(GLenum format, GLenum type, GLvoid* pixels) const;
842 843

    void bind() const;
844 845 846 847

    void setAutoRelease(bool flag) { autoRelease_ = flag; }

    GLuint texId() const { return texId_; }
848 849 850 851

private:
    Impl();

852 853
    GLuint texId_;
    bool autoRelease_;
854 855
};

856
const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty()
857 858 859 860 861
{
    static Ptr<Impl> p(new Impl);
    return p;
}

862
cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(false)
863 864 865
{
}

866
cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease)
867
{
868
    CV_Assert( gl::IsTexture(atexId) == gl::TRUE_ );
869 870
}

871
cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease)
872
{
873
    gl::GenTextures(1, &texId_);
874 875
    CV_CheckGlError();

876
    CV_Assert(texId_ != 0);
877

878
    gl::BindTexture(gl::TEXTURE_2D, texId_);
879 880
    CV_CheckGlError();

881
    gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
882 883
    CV_CheckGlError();

884
    gl::TexImage2D(gl::TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, pixels);
885 886
    CV_CheckGlError();

887
    gl::GenerateMipmap(gl::TEXTURE_2D);
888 889 890
    CV_CheckGlError();
}

891
cv::ogl::Texture2D::Impl::~Impl()
892
{
893 894
    if (autoRelease_ && texId_)
        gl::DeleteTextures(1, &texId_);
895 896
}

897
void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels)
898
{
899
    gl::BindTexture(gl::TEXTURE_2D, texId_);
900 901
    CV_CheckGlError();

902
    gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
903 904
    CV_CheckGlError();

905
    gl::TexSubImage2D(gl::TEXTURE_2D, 0, 0, 0, width, height, format, type, pixels);
906 907
    CV_CheckGlError();

908
    gl::GenerateMipmap(gl::TEXTURE_2D);
909 910 911
    CV_CheckGlError();
}

912
void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const
913
{
914 915
    gl::BindTexture(gl::TEXTURE_2D, texId_);
    CV_CheckGlError();
916

917
    gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
918 919
    CV_CheckGlError();

920
    gl::GetTexImage(gl::TEXTURE_2D, 0, format, type, pixels);
921 922 923
    CV_CheckGlError();
}

924
void cv::ogl::Texture2D::Impl::bind() const
925
{
926 927
    gl::BindTexture(gl::TEXTURE_2D, texId_);
    CV_CheckGlError();
928 929 930 931
}

#endif // HAVE_OPENGL

932
cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE)
933 934
{
#ifndef HAVE_OPENGL
935
    throw_no_ogl();
936 937 938 939 940
#else
    impl_ = Impl::empty();
#endif
}

941
cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
942 943
{
#ifndef HAVE_OPENGL
944 945 946 947 948
    (void) arows;
    (void) acols;
    (void) aformat;
    (void) atexId;
    (void) autoRelease;
949
    throw_no_ogl();
950
#else
951
    impl_.reset(new Impl(atexId, autoRelease));
952 953 954
    rows_ = arows;
    cols_ = acols;
    format_ = aformat;
955 956 957
#endif
}

958
cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
959 960
{
#ifndef HAVE_OPENGL
961 962 963 964
    (void) asize;
    (void) aformat;
    (void) atexId;
    (void) autoRelease;
965
    throw_no_ogl();
966
#else
967
    impl_.reset(new Impl(atexId, autoRelease));
968 969 970
    rows_ = asize.height;
    cols_ = asize.width;
    format_ = aformat;
971 972 973
#endif
}

974
cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE)
975 976
{
#ifndef HAVE_OPENGL
977
    (void) arr;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
978
    (void) autoRelease;
979
    throw_no_ogl();
980
#else
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
    const int kind = arr.kind();

    const Size asize = arr.size();
    const int atype = arr.type();

    const int depth = CV_MAT_DEPTH(atype);
    const int cn = CV_MAT_CN(atype);

    CV_Assert( depth <= CV_32F );
    CV_Assert( cn == 1 || cn == 3 || cn == 4 );

    const Format internalFormats[] =
    {
        NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
    };
    const GLenum srcFormats[] =
    {
        0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
    };
1000 1001 1002 1003 1004

    switch (kind)
    {
    case _InputArray::OPENGL_BUFFER:
        {
1005 1006
            ogl::Buffer buf = arr.getOGlBuffer();
            buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1007
            impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease));
1008
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1009 1010
            break;
        }
1011

1012
    case _InputArray::CUDA_GPU_MAT:
1013
        {
1014
            #ifndef HAVE_CUDA
1015
                throw_no_cuda();
1016
            #else
1017
                GpuMat dmat = arr.getGpuMat();
1018
                ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
1019
                buf.setAutoRelease(true);
1020
                buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1021
                impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease));
1022
                ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1023 1024 1025 1026
            #endif

            break;
        }
1027

1028 1029
    default:
        {
1030 1031
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
1032
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1033
            impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease));
1034 1035 1036 1037
            break;
        }
    }

1038 1039 1040
    rows_ = asize.height;
    cols_ = asize.width;
    format_ = internalFormats[cn];
1041 1042 1043
#endif
}

1044
void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease)
1045 1046
{
#ifndef HAVE_OPENGL
1047 1048 1049
    (void) arows;
    (void) acols;
    (void) aformat;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
1050
    (void) autoRelease;
1051
    throw_no_ogl();
1052
#else
1053
    if (rows_ != arows || cols_ != acols || format_ != aformat)
1054
    {
1055
        ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1056
        impl_.reset(new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease));
1057 1058 1059
        rows_ = arows;
        cols_ = acols;
        format_ = aformat;
1060 1061 1062 1063
    }
#endif
}

1064
void cv::ogl::Texture2D::release()
1065 1066
{
#ifdef HAVE_OPENGL
1067
    if (impl_)
1068
        impl_->setAutoRelease(true);
1069 1070 1071 1072 1073 1074 1075
    impl_ = Impl::empty();
    rows_ = 0;
    cols_ = 0;
    format_ = NONE;
#endif
}

1076
void cv::ogl::Texture2D::setAutoRelease(bool flag)
1077 1078
{
#ifndef HAVE_OPENGL
1079
    (void) flag;
1080
    throw_no_ogl();
1081
#else
1082
    impl_->setAutoRelease(flag);
1083 1084 1085
#endif
}

1086
void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease)
1087 1088
{
#ifndef HAVE_OPENGL
1089
    (void) arr;
Vladislav Vinogradov's avatar
Vladislav Vinogradov committed
1090
    (void) autoRelease;
1091
    throw_no_ogl();
1092
#else
1093 1094 1095 1096
    const int kind = arr.kind();

    const Size asize = arr.size();
    const int atype = arr.type();
1097

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
    const int depth = CV_MAT_DEPTH(atype);
    const int cn = CV_MAT_CN(atype);

    CV_Assert( depth <= CV_32F );
    CV_Assert( cn == 1 || cn == 3 || cn == 4 );

    const Format internalFormats[] =
    {
        NONE, DEPTH_COMPONENT, NONE, RGB, RGBA
    };
    const GLenum srcFormats[] =
    {
        0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA
    };

1113
    create(asize, internalFormats[cn], autoRelease);
1114 1115 1116

    switch(kind)
    {
1117
    case _InputArray::OPENGL_BUFFER:
1118
        {
1119 1120
            ogl::Buffer buf = arr.getOGlBuffer();
            buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1121
            impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
1122
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1123 1124
            break;
        }
1125

1126
    case _InputArray::CUDA_GPU_MAT:
1127
        {
1128
            #ifndef HAVE_CUDA
1129
                throw_no_cuda();
1130 1131
            #else
                GpuMat dmat = arr.getGpuMat();
1132
                ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER);
1133
                buf.setAutoRelease(true);
1134
                buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1135
                impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0);
1136
                ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1137 1138 1139 1140 1141 1142 1143 1144 1145
            #endif

            break;
        }

    default:
        {
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
1146
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER);
1147 1148 1149 1150 1151 1152
            impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data);
        }
    }
#endif
}

1153
void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const
1154 1155 1156 1157
{
#ifndef HAVE_OPENGL
    (void) arr;
    (void) ddepth;
1158
    (void) autoRelease;
1159
    throw_no_ogl();
1160 1161 1162 1163 1164 1165 1166 1167
#else
    const int kind = arr.kind();

    const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4;
    const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA;

    switch(kind)
    {
1168 1169
    case _InputArray::OPENGL_BUFFER:
        {
1170 1171 1172
            ogl::Buffer& buf = arr.getOGlBufferRef();
            buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease);
            buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
1173
            impl_->copyTo(dstFormat, gl_types[ddepth], 0);
1174
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
1175 1176
            break;
        }
1177

1178
    case _InputArray::CUDA_GPU_MAT:
1179
        {
1180
            #ifndef HAVE_CUDA
1181
                throw_no_cuda();
1182
            #else
1183
                ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER);
1184
                buf.setAutoRelease(true);
1185
                buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER);
1186
                impl_->copyTo(dstFormat, gl_types[ddepth], 0);
1187
                ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
1188
                buf.copyTo(arr);
1189 1190 1191 1192
            #endif

            break;
        }
1193

1194 1195
    default:
        {
1196 1197 1198
            arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn));
            Mat mat = arr.getMat();
            CV_Assert( mat.isContinuous() );
1199
            ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER);
1200
            impl_->copyTo(dstFormat, gl_types[ddepth], mat.data);
1201 1202 1203 1204 1205
        }
    }
#endif
}

1206
void cv::ogl::Texture2D::bind() const
1207 1208
{
#ifndef HAVE_OPENGL
1209
    throw_no_ogl();
1210 1211 1212 1213 1214
#else
    impl_->bind();
#endif
}

1215
unsigned int cv::ogl::Texture2D::texId() const
1216 1217
{
#ifndef HAVE_OPENGL
1218
    throw_no_ogl();
1219
    return 0;
1220
#else
1221
    return impl_->texId();
1222 1223 1224 1225 1226
#endif
}


////////////////////////////////////////////////////////////////////////
1227
// ogl::Arrays
1228

1229
void cv::ogl::Arrays::setVertexArray(InputArray vertex)
1230
{
1231 1232 1233 1234 1235
    const int cn = vertex.channels();
    const int depth = vertex.depth();

    CV_Assert( cn == 2 || cn == 3 || cn == 4 );
    CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
1236

1237
    if (vertex.kind() == _InputArray::OPENGL_BUFFER)
1238
        vertex_ = vertex.getOGlBuffer();
1239 1240 1241 1242 1243
    else
        vertex_.copyFrom(vertex);

    size_ = vertex_.size().area();
}
1244

1245
void cv::ogl::Arrays::resetVertexArray()
1246 1247 1248
{
    vertex_.release();
    size_ = 0;
1249 1250
}

1251
void cv::ogl::Arrays::setColorArray(InputArray color)
1252
{
1253 1254 1255
    const int cn = color.channels();

    CV_Assert( cn == 3 || cn == 4 );
1256

1257
    if (color.kind() == _InputArray::OPENGL_BUFFER)
1258
        color_ = color.getOGlBuffer();
1259 1260 1261
    else
        color_.copyFrom(color);
}
1262

1263
void cv::ogl::Arrays::resetColorArray()
1264 1265
{
    color_.release();
1266 1267
}

1268
void cv::ogl::Arrays::setNormalArray(InputArray normal)
1269
{
1270 1271
    const int cn = normal.channels();
    const int depth = normal.depth();
1272

1273 1274
    CV_Assert( cn == 3 );
    CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
1275

1276
    if (normal.kind() == _InputArray::OPENGL_BUFFER)
1277
        normal_ = normal.getOGlBuffer();
1278 1279 1280 1281
    else
        normal_.copyFrom(normal);
}

1282
void cv::ogl::Arrays::resetNormalArray()
1283 1284
{
    normal_.release();
1285 1286
}

1287
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
1288
{
1289 1290
    const int cn = texCoord.channels();
    const int depth = texCoord.depth();
1291

1292 1293
    CV_Assert( cn >= 1 && cn <= 4 );
    CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
1294

1295
    if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
1296
        texCoord_ = texCoord.getOGlBuffer();
1297 1298 1299 1300
    else
        texCoord_.copyFrom(texCoord);
}

1301
void cv::ogl::Arrays::resetTexCoordArray()
1302 1303 1304 1305
{
    texCoord_.release();
}

1306
void cv::ogl::Arrays::release()
1307 1308 1309 1310 1311 1312 1313
{
    resetVertexArray();
    resetColorArray();
    resetNormalArray();
    resetTexCoordArray();
}

1314
void cv::ogl::Arrays::setAutoRelease(bool flag)
1315 1316 1317 1318 1319
{
    vertex_.setAutoRelease(flag);
    color_.setAutoRelease(flag);
    normal_.setAutoRelease(flag);
    texCoord_.setAutoRelease(flag);
1320 1321
}

1322
void cv::ogl::Arrays::bind() const
1323 1324
{
#ifndef HAVE_OPENGL
1325
    throw_no_ogl();
1326
#else
1327 1328 1329
    CV_Assert( texCoord_.empty() || texCoord_.size().area() == size_ );
    CV_Assert( normal_.empty() || normal_.size().area() == size_ );
    CV_Assert( color_.empty() || color_.size().area() == size_ );
1330

1331
    if (texCoord_.empty())
1332
    {
1333
        gl::DisableClientState(gl::TEXTURE_COORD_ARRAY);
1334 1335
        CV_CheckGlError();
    }
1336
    else
1337
    {
1338
        gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
1339 1340
        CV_CheckGlError();

1341
        texCoord_.bind(ogl::Buffer::ARRAY_BUFFER);
1342

1343
        gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0);
1344 1345 1346
        CV_CheckGlError();
    }

1347
    if (normal_.empty())
1348
    {
1349
        gl::DisableClientState(gl::NORMAL_ARRAY);
1350 1351
        CV_CheckGlError();
    }
1352
    else
1353
    {
1354
        gl::EnableClientState(gl::NORMAL_ARRAY);
1355 1356
        CV_CheckGlError();

1357
        normal_.bind(ogl::Buffer::ARRAY_BUFFER);
1358

1359
        gl::NormalPointer(gl_types[normal_.depth()], 0, 0);
1360 1361 1362
        CV_CheckGlError();
    }

1363
    if (color_.empty())
1364
    {
1365
        gl::DisableClientState(gl::COLOR_ARRAY);
1366 1367
        CV_CheckGlError();
    }
1368
    else
1369
    {
1370
        gl::EnableClientState(gl::COLOR_ARRAY);
1371 1372
        CV_CheckGlError();

1373
        color_.bind(ogl::Buffer::ARRAY_BUFFER);
1374

1375 1376 1377
        const int cn = color_.channels();

        gl::ColorPointer(cn, gl_types[color_.depth()], 0, 0);
1378 1379 1380
        CV_CheckGlError();
    }

1381
    if (vertex_.empty())
1382
    {
1383
        gl::DisableClientState(gl::VERTEX_ARRAY);
1384 1385
        CV_CheckGlError();
    }
1386
    else
1387
    {
1388 1389
        gl::EnableClientState(gl::VERTEX_ARRAY);
        CV_CheckGlError();
1390

1391
        vertex_.bind(ogl::Buffer::ARRAY_BUFFER);
1392

1393 1394
        gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0);
        CV_CheckGlError();
1395 1396
    }

1397
    ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
1398 1399 1400 1401 1402 1403
#endif
}

////////////////////////////////////////////////////////////////////////
// Rendering

1404
void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect)
1405 1406
{
#ifndef HAVE_OPENGL
1407 1408 1409
    (void) tex;
    (void) wndRect;
    (void) texRect;
1410
    throw_no_ogl();
1411 1412 1413
#else
    if (!tex.empty())
    {
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
        gl::MatrixMode(gl::PROJECTION);
        gl::LoadIdentity();
        gl::Ortho(0.0, 1.0, 1.0, 0.0, -1.0, 1.0);
        CV_CheckGlError();

        gl::MatrixMode(gl::MODELVIEW);
        gl::LoadIdentity();
        CV_CheckGlError();

        gl::Disable(gl::LIGHTING);
        CV_CheckGlError();

1426 1427
        tex.bind();

1428 1429
        gl::Enable(gl::TEXTURE_2D);
        CV_CheckGlError();
1430

1431 1432
        gl::TexEnvi(gl::TEXTURE_ENV, gl::TEXTURE_ENV_MODE, gl::REPLACE);
        CV_CheckGlError();
1433

1434 1435
        gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR);
        CV_CheckGlError();
1436

1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
        const float vertex[] =
        {
            wndRect.x, wndRect.y, 0.0f,
            wndRect.x, (wndRect.y + wndRect.height), 0.0f,
            wndRect.x + wndRect.width, (wndRect.y + wndRect.height), 0.0f,
            wndRect.x + wndRect.width, wndRect.y, 0.0f
        };
        const float texCoords[] =
        {
            texRect.x, texRect.y,
            texRect.x, texRect.y + texRect.height,
            texRect.x + texRect.width, texRect.y + texRect.height,
            texRect.x + texRect.width, texRect.y
        };
1451

1452
        ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER);
1453

1454
        gl::EnableClientState(gl::TEXTURE_COORD_ARRAY);
1455 1456
        CV_CheckGlError();

1457 1458
        gl::TexCoordPointer(2, gl::FLOAT, 0, texCoords);
        CV_CheckGlError();
1459

1460 1461 1462
        gl::DisableClientState(gl::NORMAL_ARRAY);
        gl::DisableClientState(gl::COLOR_ARRAY);
        CV_CheckGlError();
1463

1464 1465
        gl::EnableClientState(gl::VERTEX_ARRAY);
        CV_CheckGlError();
1466

1467 1468
        gl::VertexPointer(3, gl::FLOAT, 0, vertex);
        CV_CheckGlError();
1469

1470
        gl::DrawArrays(gl::QUADS, 0, 4);
1471 1472
        CV_CheckGlError();
    }
1473 1474 1475
#endif
}

1476
void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color)
1477 1478
{
#ifndef HAVE_OPENGL
1479 1480 1481
    (void) arr;
    (void) mode;
    (void) color;
1482
    throw_no_ogl();
1483
#else
1484 1485 1486
    if (!arr.empty())
    {
        gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
1487

1488
        arr.bind();
1489

1490 1491
        gl::DrawArrays(mode, 0, arr.size());
    }
1492 1493 1494
#endif
}

1495
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
1496
{
1497 1498 1499 1500 1501
#ifndef HAVE_OPENGL
    (void) arr;
    (void) indices;
    (void) mode;
    (void) color;
1502
    throw_no_ogl();
1503 1504 1505 1506
#else
    if (!arr.empty() && !indices.empty())
    {
        gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);
1507

1508
        arr.bind();
1509

1510
        const int kind = indices.kind();
1511

1512 1513 1514 1515
        switch (kind)
        {
        case _InputArray::OPENGL_BUFFER :
            {
1516
                ogl::Buffer buf = indices.getOGlBuffer();
1517

1518
                const int depth = buf.depth();
1519

1520 1521
                CV_Assert( buf.channels() == 1 );
                CV_Assert( depth <= CV_32S );
1522

1523 1524 1525 1526 1527 1528 1529
                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;
1530

1531
                buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
1532

1533
                gl::DrawElements(mode, buf.size().area(), type, 0);
1534

1535
                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
1536

1537 1538
                break;
            }
1539

1540 1541 1542
        default:
            {
                Mat mat = indices.getMat();
1543

1544
                const int depth = mat.depth();
1545

1546 1547 1548
                CV_Assert( mat.channels() == 1 );
                CV_Assert( depth <= CV_32S );
                CV_Assert( mat.isContinuous() );
1549

1550 1551 1552 1553 1554 1555 1556
                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;
1557

1558
                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);
1559

1560 1561 1562
                gl::DrawElements(mode, mat.size().area(), type, mat.data);
            }
        }
1563 1564 1565
    }
#endif
}
Alexey Ershov's avatar
Alexey Ershov committed
1566 1567 1568 1569 1570 1571

////////////////////////////////////////////////////////////////////////
// CL-GL Interoperability

#ifdef HAVE_OPENCL
#  include "opencv2/core/opencl/runtime/opencl_gl.hpp"
1572 1573 1574 1575 1576
#  ifdef cl_khr_gl_sharing
#    define HAVE_OPENCL_OPENGL_SHARING
#  else
#    define NO_OPENCL_SHARING_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL/OpenGL sharing support")
#  endif
Alexey Ershov's avatar
Alexey Ershov committed
1577 1578 1579 1580 1581
#else // HAVE_OPENCL
#  define NO_OPENCL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support")
#endif // HAVE_OPENCL

#if defined(HAVE_OPENGL)
1582
#  if defined(__ANDROID__)
Alexey Ershov's avatar
Alexey Ershov committed
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
#    include <EGL/egl.h>
#  elif defined(__linux__)
#    include <GL/glx.h>
#  endif
#endif // HAVE_OPENGL

namespace cv { namespace ogl {

namespace ocl {

Context& initializeContextFromGL()
{
#if !defined(HAVE_OPENGL)
    NO_OPENGL_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
1599 1600
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
    NO_OPENCL_SHARING_ERROR;
Alexey Ershov's avatar
Alexey Ershov committed
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
#else
    cl_uint numPlatforms;
    cl_int status = clGetPlatformIDs(0, NULL, &numPlatforms);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms");
    if (numPlatforms == 0)
        CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available platforms");

    std::vector<cl_platform_id> platforms(numPlatforms);
    status = clGetPlatformIDs(numPlatforms, &platforms[0], NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms");

    // TODO Filter platforms by name from OPENCV_OPENCL_DEVICE

    int found = -1;
    cl_device_id device = NULL;
    cl_context context = NULL;

    for (int i = 0; i < (int)numPlatforms; i++)
    {
luz.paz's avatar
luz.paz committed
1622
        // query platform extension: presence of "cl_khr_gl_sharing" extension is required
Alexey Ershov's avatar
Alexey Ershov committed
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
        {
            AutoBuffer<char> extensionStr;

            size_t extensionSize;
            status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, 0, NULL, &extensionSize);
            if (status == CL_SUCCESS)
            {
                extensionStr.allocate(extensionSize+1);
                status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, extensionSize, (char*)extensionStr, NULL);
            }
            if (status != CL_SUCCESS)
                CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform extension string");

            if (!strstr((const char*)extensionStr, "cl_khr_gl_sharing"))
                continue;
        }

        clGetGLContextInfoKHR_fn clGetGLContextInfoKHR = (clGetGLContextInfoKHR_fn)
                clGetExtensionFunctionAddressForPlatform(platforms[i], "clGetGLContextInfoKHR");
        if (!clGetGLContextInfoKHR)
            continue;

        cl_context_properties properties[] =
        {
1647
#if defined(_WIN32)
Alexey Ershov's avatar
Alexey Ershov committed
1648 1649 1650
            CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
            CL_GL_CONTEXT_KHR, (cl_context_properties)wglGetCurrentContext(),
            CL_WGL_HDC_KHR, (cl_context_properties)wglGetCurrentDC(),
1651
#elif defined(__ANDROID__)
Alexey Ershov's avatar
Alexey Ershov committed
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
            CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
            CL_GL_CONTEXT_KHR, (cl_context_properties)eglGetCurrentContext(),
            CL_EGL_DISPLAY_KHR, (cl_context_properties)eglGetCurrentDisplay(),
#elif defined(__linux__)
            CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i],
            CL_GL_CONTEXT_KHR, (cl_context_properties)glXGetCurrentContext(),
            CL_GLX_DISPLAY_KHR, (cl_context_properties)glXGetCurrentDisplay(),
#endif
            0
        };

        // query device
        device = NULL;
1665 1666 1667
        status = clGetGLContextInfoKHR(properties, CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR, sizeof(cl_device_id), (void*)&device, NULL);
        if (status != CL_SUCCESS)
            continue;
Alexey Ershov's avatar
Alexey Ershov committed
1668 1669

        // create context
1670 1671
        context = clCreateContext(properties, 1, &device, NULL, NULL, &status);
        if (status != CL_SUCCESS)
Alexey Ershov's avatar
Alexey Ershov committed
1672
        {
1673 1674 1675 1676 1677 1678
            clReleaseDevice(device);
        }
        else
        {
            found = i;
            break;
Alexey Ershov's avatar
Alexey Ershov committed
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
        }
    }

    if (found < 0)
        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't create context for OpenGL interop");

    Context& ctx = Context::getDefault(false);
    initializeContextFromHandle(ctx, platforms[found], context, device);
    return ctx;
#endif
}

} // namespace cv::ogl::ocl

void convertToGLTexture2D(InputArray src, Texture2D& texture)
{
    (void)src; (void)texture;
#if !defined(HAVE_OPENGL)
    NO_OPENGL_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
1700 1701
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
    NO_OPENCL_SHARING_ERROR;
Alexey Ershov's avatar
Alexey Ershov committed
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
#else
    Size srcSize = src.size();
    CV_Assert(srcSize.width == (int)texture.cols() && srcSize.height == (int)texture.rows());

    using namespace cv::ocl;
    Context& ctx = Context::getDefault();
    cl_context context = (cl_context)ctx.ptr();

    UMat u = src.getUMat();

    // TODO Add support for roi
    CV_Assert(u.offset == 0);
    CV_Assert(u.isContinuous());

    cl_int status = 0;
    cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_WRITE_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed");

    cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);

    cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
    status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");
    size_t offset = 0; // TODO
    size_t dst_origin[3] = {0, 0, 0};
    size_t region[3] = {u.cols, u.rows, 1};
    status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed");
    status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");

    status = clFinish(q); // TODO Use events
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");

    status = clReleaseMemObject(clImage); // TODO RAII
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
#endif
}

void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst)
{
    (void)texture; (void)dst;
#if !defined(HAVE_OPENGL)
    NO_OPENGL_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
1754 1755
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
    NO_OPENCL_SHARING_ERROR;
Alexey Ershov's avatar
Alexey Ershov committed
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
#else
    // check texture format
    const int dtype = CV_8UC4;
    CV_Assert(texture.format() == Texture2D::RGBA);

    int textureType = dtype;
    CV_Assert(textureType >= 0);

    using namespace cv::ocl;
    Context& ctx = Context::getDefault();
    cl_context context = (cl_context)ctx.ptr();

    // TODO Need to specify ACCESS_WRITE here somehow to prevent useless data copying!
    dst.create(texture.size(), textureType);
    UMat u = dst.getUMat();

    // TODO Add support for roi
    CV_Assert(u.offset == 0);
    CV_Assert(u.isContinuous());

    cl_int status = 0;
    cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_READ_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed");

    cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);

    cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
    status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");
    size_t offset = 0; // TODO
    size_t src_origin[3] = {0, 0, 0};
    size_t region[3] = {u.cols, u.rows, 1};
    status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed");
    status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");

    status = clFinish(q); // TODO Use events
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");

    status = clReleaseMemObject(clImage); // TODO RAII
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
#endif
}

1807 1808 1809 1810 1811 1812 1813 1814
//void mapGLBuffer(const Buffer& buffer, UMat& dst, int accessFlags)
UMat mapGLBuffer(const Buffer& buffer, int accessFlags)
{
    (void)buffer; (void)accessFlags;
#if !defined(HAVE_OPENGL)
    NO_OPENGL_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
1815 1816
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
    NO_OPENCL_SHARING_ERROR;
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
#else
    using namespace cv::ocl;
    Context& ctx = Context::getDefault();
    cl_context context = (cl_context)ctx.ptr();
    cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr();

    int clAccessFlags = 0;
    switch (accessFlags & (ACCESS_READ|ACCESS_WRITE))
    {
    default:
    case ACCESS_READ|ACCESS_WRITE:
        clAccessFlags = CL_MEM_READ_WRITE;
        break;
    case ACCESS_READ:
        clAccessFlags = CL_MEM_READ_ONLY;
        break;
    case ACCESS_WRITE:
        clAccessFlags = CL_MEM_WRITE_ONLY;
        break;
    }

    cl_int status = 0;
    cl_mem clBuffer = clCreateFromGLBuffer(context, clAccessFlags, buffer.bufId(), &status);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLBuffer failed");

    gl::Finish();

    status = clEnqueueAcquireGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed");

    size_t step = buffer.cols() * buffer.elemSize();
    int rows = buffer.rows();
    int cols = buffer.cols();
    int type = buffer.type();

    UMat u;
    convertFromBuffer(clBuffer, step, rows, cols, type, u);
    return u;
#endif
}

void unmapGLBuffer(UMat& u)
{
    (void)u;
#if !defined(HAVE_OPENGL)
    NO_OPENGL_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
1867 1868
#elif !defined(HAVE_OPENCL_OPENGL_SHARING)
    NO_OPENCL_SHARING_ERROR;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
#else
    using namespace cv::ocl;
    cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr();

    cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);

    u.release();

    cl_int status = clEnqueueReleaseGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed");

    status = clFinish(clQueue);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");

    status = clReleaseMemObject(clBuffer);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed");
#endif
}

Alexey Ershov's avatar
Alexey Ershov committed
1891
}} // namespace cv::ogl