Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Fork of gr-peach-opencv-project by
opengl.cpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Third party copyrights are property of their respective owners. 00016 // 00017 // Redistribution and use in source and binary forms, with or without modification, 00018 // are permitted provided that the following conditions are met: 00019 // 00020 // * Redistribution's of source code must retain the above copyright notice, 00021 // this list of conditions and the following disclaimer. 00022 // 00023 // * Redistribution's in binary form must reproduce the above copyright notice, 00024 // this list of conditions and the following disclaimer in the documentation 00025 // and/or other materials provided with the distribution. 00026 // 00027 // * The name of the copyright holders may not be used to endorse or promote products 00028 // derived from this software without specific prior written permission. 00029 // 00030 // This software is provided by the copyright holders and contributors "as is" and 00031 // any express or implied warranties, including, but not limited to, the implied 00032 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00033 // In no event shall the Intel Corporation or contributors be liable for any direct, 00034 // indirect, incidental, special, exemplary, or consequential damages 00035 // (including, but not limited to, procurement of substitute goods or services; 00036 // loss of use, data, or profits; or business interruption) however caused 00037 // and on any theory of liability, whether in contract, strict liability, 00038 // or tort (including negligence or otherwise) arising in any way out of 00039 // the use of this software, even if advised of the possibility of such damage. 00040 // 00041 //M*/ 00042 00043 #include "precomp.hpp" 00044 00045 #ifdef HAVE_OPENGL 00046 # include "gl_core_3_1.hpp" 00047 # ifdef HAVE_CUDA 00048 # include <cuda_gl_interop.h> 00049 # endif 00050 #else // HAVE_OPENGL 00051 # define NO_OPENGL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support") 00052 #endif // HAVE_OPENGL 00053 00054 using namespace cv; 00055 using namespace cv::cuda; 00056 00057 namespace 00058 { 00059 #ifndef HAVE_OPENGL 00060 inline void throw_no_ogl() { CV_Error(cv::Error::OpenGlNotSupported, "The library is compiled without OpenGL support"); } 00061 #else 00062 inline void throw_no_ogl() { CV_Error(cv::Error::OpenGlApiCallError, "OpenGL context doesn't exist"); } 00063 00064 bool checkError(const char* file, const int line, const char* func = 0) 00065 { 00066 GLenum err = gl::GetError(); 00067 00068 if (err != gl::NO_ERROR_) 00069 { 00070 const char* msg; 00071 00072 switch (err) 00073 { 00074 case gl::INVALID_ENUM: 00075 msg = "An unacceptable value is specified for an enumerated argument"; 00076 break; 00077 00078 case gl::INVALID_VALUE: 00079 msg = "A numeric argument is out of range"; 00080 break; 00081 00082 case gl::INVALID_OPERATION: 00083 msg = "The specified operation is not allowed in the current state"; 00084 break; 00085 00086 case gl::OUT_OF_MEMORY: 00087 msg = "There is not enough memory left to execute the command"; 00088 break; 00089 00090 default: 00091 msg = "Unknown error"; 00092 }; 00093 00094 cvError(CV_OpenGlApiCallError, func, msg, file, line); 00095 00096 return false; 00097 } 00098 00099 return true; 00100 } 00101 #endif 00102 00103 #define CV_CheckGlError() CV_DbgAssert( (checkError(__FILE__, __LINE__, CV_Func)) ) 00104 } // namespace 00105 00106 #ifdef HAVE_OPENGL 00107 namespace 00108 { 00109 const GLenum gl_types[] = { gl::UNSIGNED_BYTE, gl::BYTE, gl::UNSIGNED_SHORT, gl::SHORT, gl::INT, gl::FLOAT, gl::DOUBLE }; 00110 } 00111 #endif 00112 00113 //////////////////////////////////////////////////////////////////////// 00114 // setGlDevice 00115 00116 void cv::cuda::setGlDevice(int device) 00117 { 00118 #ifndef HAVE_OPENGL 00119 (void) device; 00120 throw_no_ogl(); 00121 #else 00122 #ifndef HAVE_CUDA 00123 (void) device; 00124 throw_no_cuda(); 00125 #else 00126 cudaSafeCall( cudaGLSetGLDevice(device) ); 00127 #endif 00128 #endif 00129 } 00130 00131 //////////////////////////////////////////////////////////////////////// 00132 // CudaResource 00133 00134 #if defined(HAVE_OPENGL) && defined(HAVE_CUDA) 00135 00136 namespace 00137 { 00138 class CudaResource 00139 { 00140 public: 00141 CudaResource(); 00142 ~CudaResource(); 00143 00144 void registerBuffer(GLuint buffer); 00145 void release(); 00146 00147 void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0); 00148 void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0); 00149 00150 void* map(cudaStream_t stream = 0); 00151 void unmap(cudaStream_t stream = 0); 00152 00153 private: 00154 cudaGraphicsResource_t resource_; 00155 GLuint buffer_; 00156 00157 class GraphicsMapHolder; 00158 }; 00159 00160 CudaResource::CudaResource() : resource_(0), buffer_(0) 00161 { 00162 } 00163 00164 CudaResource::~CudaResource() 00165 { 00166 release(); 00167 } 00168 00169 void CudaResource::registerBuffer(GLuint buffer) 00170 { 00171 CV_DbgAssert( buffer != 0 ); 00172 00173 if (buffer_ == buffer) 00174 return; 00175 00176 cudaGraphicsResource_t resource; 00177 cudaSafeCall( cudaGraphicsGLRegisterBuffer(&resource, buffer, cudaGraphicsMapFlagsNone) ); 00178 00179 release(); 00180 00181 resource_ = resource; 00182 buffer_ = buffer; 00183 } 00184 00185 void CudaResource::release() 00186 { 00187 if (resource_) 00188 cudaGraphicsUnregisterResource(resource_); 00189 00190 resource_ = 0; 00191 buffer_ = 0; 00192 } 00193 00194 class CudaResource::GraphicsMapHolder 00195 { 00196 public: 00197 GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream); 00198 ~GraphicsMapHolder(); 00199 00200 void reset(); 00201 00202 private: 00203 cudaGraphicsResource_t* resource_; 00204 cudaStream_t stream_; 00205 }; 00206 00207 CudaResource::GraphicsMapHolder::GraphicsMapHolder(cudaGraphicsResource_t* resource, cudaStream_t stream) : resource_(resource), stream_(stream) 00208 { 00209 if (resource_) 00210 cudaSafeCall( cudaGraphicsMapResources(1, resource_, stream_) ); 00211 } 00212 00213 CudaResource::GraphicsMapHolder::~GraphicsMapHolder() 00214 { 00215 if (resource_) 00216 cudaGraphicsUnmapResources(1, resource_, stream_); 00217 } 00218 00219 void CudaResource::GraphicsMapHolder::reset() 00220 { 00221 resource_ = 0; 00222 } 00223 00224 void CudaResource::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream) 00225 { 00226 CV_DbgAssert( resource_ != 0 ); 00227 00228 GraphicsMapHolder h(&resource_, stream); 00229 (void) h; 00230 00231 void* dst; 00232 size_t size; 00233 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&dst, &size, resource_) ); 00234 00235 CV_DbgAssert( width * height == size ); 00236 00237 if (stream == 0) 00238 cudaSafeCall( cudaMemcpy2D(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice) ); 00239 else 00240 cudaSafeCall( cudaMemcpy2DAsync(dst, width, src, spitch, width, height, cudaMemcpyDeviceToDevice, stream) ); 00241 } 00242 00243 void CudaResource::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) 00244 { 00245 CV_DbgAssert( resource_ != 0 ); 00246 00247 GraphicsMapHolder h(&resource_, stream); 00248 (void) h; 00249 00250 void* src; 00251 size_t size; 00252 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&src, &size, resource_) ); 00253 00254 CV_DbgAssert( width * height == size ); 00255 00256 if (stream == 0) 00257 cudaSafeCall( cudaMemcpy2D(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice) ); 00258 else 00259 cudaSafeCall( cudaMemcpy2DAsync(dst, dpitch, src, width, width, height, cudaMemcpyDeviceToDevice, stream) ); 00260 } 00261 00262 void* CudaResource::map(cudaStream_t stream) 00263 { 00264 CV_DbgAssert( resource_ != 0 ); 00265 00266 GraphicsMapHolder h(&resource_, stream); 00267 00268 void* ptr; 00269 size_t size; 00270 cudaSafeCall( cudaGraphicsResourceGetMappedPointer(&ptr, &size, resource_) ); 00271 00272 h.reset(); 00273 00274 return ptr; 00275 } 00276 00277 void CudaResource::unmap(cudaStream_t stream) 00278 { 00279 CV_Assert( resource_ != 0 ); 00280 00281 cudaGraphicsUnmapResources(1, &resource_, stream); 00282 } 00283 } 00284 00285 #endif 00286 00287 //////////////////////////////////////////////////////////////////////// 00288 // ogl::Buffer 00289 00290 #ifndef HAVE_OPENGL 00291 00292 class cv::ogl::Buffer::Impl 00293 { 00294 }; 00295 00296 #else 00297 00298 class cv::ogl::Buffer::Impl 00299 { 00300 public: 00301 static const Ptr<Impl> & empty(); 00302 00303 Impl(GLuint bufId, bool autoRelease); 00304 Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease); 00305 ~Impl(); 00306 00307 void bind(GLenum target) const; 00308 00309 void copyFrom(GLuint srcBuf, GLsizeiptr size); 00310 00311 void copyFrom(GLsizeiptr size, const GLvoid* data); 00312 void copyTo(GLsizeiptr size, GLvoid* data) const; 00313 00314 void* mapHost(GLenum access); 00315 void unmapHost(); 00316 00317 #ifdef HAVE_CUDA 00318 void copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream = 0); 00319 void copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream = 0) const; 00320 00321 void* mapDevice(cudaStream_t stream = 0); 00322 void unmapDevice(cudaStream_t stream = 0); 00323 #endif 00324 00325 void setAutoRelease(bool flag) { autoRelease_ = flag; } 00326 00327 GLuint bufId() const { return bufId_; } 00328 00329 private: 00330 Impl(); 00331 00332 GLuint bufId_; 00333 bool autoRelease_; 00334 00335 #ifdef HAVE_CUDA 00336 mutable CudaResource cudaResource_; 00337 #endif 00338 }; 00339 00340 const Ptr<cv::ogl::Buffer::Impl>& cv::ogl::Buffer::Impl::empty() 00341 { 00342 static Ptr<Impl> p(new Impl); 00343 return p; 00344 } 00345 00346 cv::ogl::Buffer::Impl::Impl() : bufId_(0), autoRelease_(false) 00347 { 00348 } 00349 00350 cv::ogl::Buffer::Impl::Impl(GLuint abufId, bool autoRelease) : bufId_(abufId), autoRelease_(autoRelease) 00351 { 00352 CV_Assert( gl::IsBuffer(abufId) == gl::TRUE_ ); 00353 } 00354 00355 cv::ogl::Buffer::Impl::Impl(GLsizeiptr size, const GLvoid* data, GLenum target, bool autoRelease) : bufId_(0), autoRelease_(autoRelease) 00356 { 00357 gl::GenBuffers(1, &bufId_); 00358 CV_CheckGlError(); 00359 00360 CV_Assert( bufId_ != 0 ); 00361 00362 gl::BindBuffer(target, bufId_); 00363 CV_CheckGlError(); 00364 00365 gl::BufferData(target, size, data, gl::DYNAMIC_DRAW); 00366 CV_CheckGlError(); 00367 00368 gl::BindBuffer(target, 0); 00369 CV_CheckGlError(); 00370 } 00371 00372 cv::ogl::Buffer::Impl::~Impl() 00373 { 00374 if (autoRelease_ && bufId_) 00375 gl::DeleteBuffers(1, &bufId_); 00376 } 00377 00378 void cv::ogl::Buffer::Impl::bind(GLenum target) const 00379 { 00380 gl::BindBuffer(target, bufId_); 00381 CV_CheckGlError(); 00382 } 00383 00384 void cv::ogl::Buffer::Impl::copyFrom(GLuint srcBuf, GLsizeiptr size) 00385 { 00386 gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); 00387 CV_CheckGlError(); 00388 00389 gl::BindBuffer(gl::COPY_READ_BUFFER, srcBuf); 00390 CV_CheckGlError(); 00391 00392 gl::CopyBufferSubData(gl::COPY_READ_BUFFER, gl::COPY_WRITE_BUFFER, 0, 0, size); 00393 CV_CheckGlError(); 00394 } 00395 00396 void cv::ogl::Buffer::Impl::copyFrom(GLsizeiptr size, const GLvoid* data) 00397 { 00398 gl::BindBuffer(gl::COPY_WRITE_BUFFER, bufId_); 00399 CV_CheckGlError(); 00400 00401 gl::BufferSubData(gl::COPY_WRITE_BUFFER, 0, size, data); 00402 CV_CheckGlError(); 00403 } 00404 00405 void cv::ogl::Buffer::Impl::copyTo(GLsizeiptr size, GLvoid* data) const 00406 { 00407 gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); 00408 CV_CheckGlError(); 00409 00410 gl::GetBufferSubData(gl::COPY_READ_BUFFER, 0, size, data); 00411 CV_CheckGlError(); 00412 } 00413 00414 void* cv::ogl::Buffer::Impl::mapHost(GLenum access) 00415 { 00416 gl::BindBuffer(gl::COPY_READ_BUFFER, bufId_); 00417 CV_CheckGlError(); 00418 00419 GLvoid* data = gl::MapBuffer(gl::COPY_READ_BUFFER, access); 00420 CV_CheckGlError(); 00421 00422 return data; 00423 } 00424 00425 void cv::ogl::Buffer::Impl::unmapHost() 00426 { 00427 gl::UnmapBuffer(gl::COPY_READ_BUFFER); 00428 } 00429 00430 #ifdef HAVE_CUDA 00431 00432 void cv::ogl::Buffer::Impl::copyFrom(const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream) 00433 { 00434 cudaResource_.registerBuffer(bufId_); 00435 cudaResource_.copyFrom(src, spitch, width, height, stream); 00436 } 00437 00438 void cv::ogl::Buffer::Impl::copyTo(void* dst, size_t dpitch, size_t width, size_t height, cudaStream_t stream) const 00439 { 00440 cudaResource_.registerBuffer(bufId_); 00441 cudaResource_.copyTo(dst, dpitch, width, height, stream); 00442 } 00443 00444 void* cv::ogl::Buffer::Impl::mapDevice(cudaStream_t stream) 00445 { 00446 cudaResource_.registerBuffer(bufId_); 00447 return cudaResource_.map(stream); 00448 } 00449 00450 void cv::ogl::Buffer::Impl::unmapDevice(cudaStream_t stream) 00451 { 00452 cudaResource_.unmap(stream); 00453 } 00454 00455 #endif // HAVE_CUDA 00456 00457 #endif // HAVE_OPENGL 00458 00459 cv::ogl::Buffer::Buffer() : rows_(0), cols_(0), type_(0) 00460 { 00461 #ifndef HAVE_OPENGL 00462 throw_no_ogl(); 00463 #else 00464 impl_ = Impl::empty(); 00465 #endif 00466 } 00467 00468 cv::ogl::Buffer::Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) 00469 { 00470 #ifndef HAVE_OPENGL 00471 (void) arows; 00472 (void) acols; 00473 (void) atype; 00474 (void) abufId; 00475 (void) autoRelease; 00476 throw_no_ogl(); 00477 #else 00478 impl_.reset(new Impl(abufId, autoRelease)); 00479 rows_ = arows; 00480 cols_ = acols; 00481 type_ = atype; 00482 #endif 00483 } 00484 00485 cv::ogl::Buffer::Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease) : rows_(0), cols_(0), type_(0) 00486 { 00487 #ifndef HAVE_OPENGL 00488 (void) asize; 00489 (void) atype; 00490 (void) abufId; 00491 (void) autoRelease; 00492 throw_no_ogl(); 00493 #else 00494 impl_.reset(new Impl(abufId, autoRelease)); 00495 rows_ = asize.height; 00496 cols_ = asize.width; 00497 type_ = atype; 00498 #endif 00499 } 00500 00501 cv::ogl::Buffer::Buffer(InputArray arr, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) 00502 { 00503 #ifndef HAVE_OPENGL 00504 (void) arr; 00505 (void) target; 00506 (void) autoRelease; 00507 throw_no_ogl(); 00508 #else 00509 const int kind = arr.kind(); 00510 00511 switch (kind) 00512 { 00513 case _InputArray::OPENGL_BUFFER: 00514 case _InputArray::CUDA_GPU_MAT: 00515 copyFrom(arr, target, autoRelease); 00516 break; 00517 00518 default: 00519 { 00520 Mat mat = arr.getMat(); 00521 CV_Assert( mat.isContinuous() ); 00522 const GLsizeiptr asize = mat.rows * mat.cols * mat.elemSize(); 00523 impl_.reset(new Impl(asize, mat.data, target, autoRelease)); 00524 rows_ = mat.rows; 00525 cols_ = mat.cols; 00526 type_ = mat.type(); 00527 break; 00528 } 00529 } 00530 #endif 00531 } 00532 00533 void cv::ogl::Buffer::create(int arows, int acols, int atype, Target target, bool autoRelease) 00534 { 00535 #ifndef HAVE_OPENGL 00536 (void) arows; 00537 (void) acols; 00538 (void) atype; 00539 (void) target; 00540 (void) autoRelease; 00541 throw_no_ogl(); 00542 #else 00543 if (rows_ != arows || cols_ != acols || type_ != atype) 00544 { 00545 const GLsizeiptr asize = arows * acols * CV_ELEM_SIZE(atype); 00546 impl_.reset(new Impl(asize, 0, target, autoRelease)); 00547 rows_ = arows; 00548 cols_ = acols; 00549 type_ = atype; 00550 } 00551 #endif 00552 } 00553 00554 void cv::ogl::Buffer::release() 00555 { 00556 #ifdef HAVE_OPENGL 00557 if (impl_) 00558 impl_->setAutoRelease(true); 00559 impl_ = Impl::empty(); 00560 rows_ = 0; 00561 cols_ = 0; 00562 type_ = 0; 00563 #endif 00564 } 00565 00566 void cv::ogl::Buffer::setAutoRelease(bool flag) 00567 { 00568 #ifndef HAVE_OPENGL 00569 (void) flag; 00570 throw_no_ogl(); 00571 #else 00572 impl_->setAutoRelease(flag); 00573 #endif 00574 } 00575 00576 void cv::ogl::Buffer::copyFrom(InputArray arr, Target target, bool autoRelease) 00577 { 00578 #ifndef HAVE_OPENGL 00579 (void) arr; 00580 (void) target; 00581 (void) autoRelease; 00582 throw_no_ogl(); 00583 #else 00584 const int kind = arr.kind(); 00585 00586 const Size asize = arr.size(); 00587 const int atype = arr.type(); 00588 create(asize, atype, target, autoRelease); 00589 00590 switch (kind) 00591 { 00592 case _InputArray::OPENGL_BUFFER: 00593 { 00594 ogl::Buffer buf = arr.getOGlBuffer(); 00595 impl_->copyFrom(buf.bufId(), asize.area() * CV_ELEM_SIZE(atype)); 00596 break; 00597 } 00598 00599 case _InputArray::CUDA_GPU_MAT: 00600 { 00601 #ifndef HAVE_CUDA 00602 throw_no_cuda(); 00603 #else 00604 GpuMat dmat = arr.getGpuMat(); 00605 impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); 00606 #endif 00607 00608 break; 00609 } 00610 00611 default: 00612 { 00613 Mat mat = arr.getMat(); 00614 CV_Assert( mat.isContinuous() ); 00615 impl_->copyFrom(asize.area() * CV_ELEM_SIZE(atype), mat.data); 00616 } 00617 } 00618 #endif 00619 } 00620 00621 void cv::ogl::Buffer::copyFrom(InputArray arr, cuda::Stream& stream, Target target, bool autoRelease) 00622 { 00623 #ifndef HAVE_OPENGL 00624 (void) arr; 00625 (void) stream; 00626 (void) target; 00627 (void) autoRelease; 00628 throw_no_ogl(); 00629 #else 00630 #ifndef HAVE_CUDA 00631 (void) arr; 00632 (void) stream; 00633 (void) target; 00634 (void) autoRelease; 00635 throw_no_cuda(); 00636 #else 00637 GpuMat dmat = arr.getGpuMat(); 00638 00639 create(dmat.size(), dmat.type(), target, autoRelease); 00640 00641 impl_->copyFrom(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream)); 00642 #endif 00643 #endif 00644 } 00645 00646 void cv::ogl::Buffer::copyTo(OutputArray arr) const 00647 { 00648 #ifndef HAVE_OPENGL 00649 (void) arr; 00650 throw_no_ogl(); 00651 #else 00652 const int kind = arr.kind(); 00653 00654 switch (kind) 00655 { 00656 case _InputArray::OPENGL_BUFFER: 00657 { 00658 arr.getOGlBufferRef().copyFrom(*this); 00659 break; 00660 } 00661 00662 case _InputArray::CUDA_GPU_MAT: 00663 { 00664 #ifndef HAVE_CUDA 00665 throw_no_cuda(); 00666 #else 00667 GpuMat& dmat = arr.getGpuMatRef(); 00668 dmat.create(rows_, cols_, type_); 00669 impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows); 00670 #endif 00671 00672 break; 00673 } 00674 00675 default: 00676 { 00677 arr.create(rows_, cols_, type_); 00678 Mat mat = arr.getMat(); 00679 CV_Assert( mat.isContinuous() ); 00680 impl_->copyTo(mat.rows * mat.cols * mat.elemSize(), mat.data); 00681 } 00682 } 00683 #endif 00684 } 00685 00686 void cv::ogl::Buffer::copyTo(OutputArray arr, cuda::Stream& stream) const 00687 { 00688 #ifndef HAVE_OPENGL 00689 (void) arr; 00690 (void) stream; 00691 throw_no_ogl(); 00692 #else 00693 #ifndef HAVE_CUDA 00694 (void) arr; 00695 (void) stream; 00696 throw_no_cuda(); 00697 #else 00698 arr.create(rows_, cols_, type_); 00699 GpuMat dmat = arr.getGpuMat(); 00700 impl_->copyTo(dmat.data, dmat.step, dmat.cols * dmat.elemSize(), dmat.rows, cuda::StreamAccessor::getStream(stream)); 00701 #endif 00702 #endif 00703 } 00704 00705 cv::ogl::Buffer cv::ogl::Buffer::clone(Target target, bool autoRelease) const 00706 { 00707 #ifndef HAVE_OPENGL 00708 (void) target; 00709 (void) autoRelease; 00710 throw_no_ogl(); 00711 return cv::ogl::Buffer(); 00712 #else 00713 ogl::Buffer buf; 00714 buf.copyFrom(*this, target, autoRelease); 00715 return buf; 00716 #endif 00717 } 00718 00719 void cv::ogl::Buffer::bind(Target target) const 00720 { 00721 #ifndef HAVE_OPENGL 00722 (void) target; 00723 throw_no_ogl(); 00724 #else 00725 impl_->bind(target); 00726 #endif 00727 } 00728 00729 void cv::ogl::Buffer::unbind(Target target) 00730 { 00731 #ifndef HAVE_OPENGL 00732 (void) target; 00733 throw_no_ogl(); 00734 #else 00735 gl::BindBuffer(target, 0); 00736 CV_CheckGlError(); 00737 #endif 00738 } 00739 00740 Mat cv::ogl::Buffer::mapHost(Access access) 00741 { 00742 #ifndef HAVE_OPENGL 00743 (void) access; 00744 throw_no_ogl(); 00745 return Mat(); 00746 #else 00747 return Mat(rows_, cols_, type_, impl_->mapHost(access)); 00748 #endif 00749 } 00750 00751 void cv::ogl::Buffer::unmapHost() 00752 { 00753 #ifndef HAVE_OPENGL 00754 throw_no_ogl(); 00755 #else 00756 return impl_->unmapHost(); 00757 #endif 00758 } 00759 00760 GpuMat cv::ogl::Buffer::mapDevice() 00761 { 00762 #ifndef HAVE_OPENGL 00763 throw_no_ogl(); 00764 return GpuMat(); 00765 #else 00766 #ifndef HAVE_CUDA 00767 throw_no_cuda(); 00768 return GpuMat(); 00769 #else 00770 return GpuMat(rows_, cols_, type_, impl_->mapDevice()); 00771 #endif 00772 #endif 00773 } 00774 00775 void cv::ogl::Buffer::unmapDevice() 00776 { 00777 #ifndef HAVE_OPENGL 00778 throw_no_ogl(); 00779 #else 00780 #ifndef HAVE_CUDA 00781 throw_no_cuda(); 00782 #else 00783 impl_->unmapDevice(); 00784 #endif 00785 #endif 00786 } 00787 00788 cuda::GpuMat cv::ogl::Buffer::mapDevice(cuda::Stream& stream) 00789 { 00790 #ifndef HAVE_OPENGL 00791 (void) stream; 00792 throw_no_ogl(); 00793 return GpuMat(); 00794 #else 00795 #ifndef HAVE_CUDA 00796 (void) stream; 00797 throw_no_cuda(); 00798 return GpuMat(); 00799 #else 00800 return GpuMat(rows_, cols_, type_, impl_->mapDevice(cuda::StreamAccessor::getStream(stream))); 00801 #endif 00802 #endif 00803 } 00804 00805 void cv::ogl::Buffer::unmapDevice(cuda::Stream& stream) 00806 { 00807 #ifndef HAVE_OPENGL 00808 (void) stream; 00809 throw_no_ogl(); 00810 #else 00811 #ifndef HAVE_CUDA 00812 (void) stream; 00813 throw_no_cuda(); 00814 #else 00815 impl_->unmapDevice(cuda::StreamAccessor::getStream(stream)); 00816 #endif 00817 #endif 00818 } 00819 00820 unsigned int cv::ogl::Buffer::bufId() const 00821 { 00822 #ifndef HAVE_OPENGL 00823 throw_no_ogl(); 00824 return 0; 00825 #else 00826 return impl_->bufId(); 00827 #endif 00828 } 00829 00830 00831 ////////////////////////////////////////////////////////////////////////////////////////// 00832 // ogl::Texture 00833 00834 #ifndef HAVE_OPENGL 00835 00836 class cv::ogl::Texture2D::Impl 00837 { 00838 }; 00839 00840 #else 00841 00842 class cv::ogl::Texture2D::Impl 00843 { 00844 public: 00845 static const Ptr<Impl> empty(); 00846 00847 Impl(GLuint texId, bool autoRelease); 00848 Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease); 00849 ~Impl(); 00850 00851 void copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels); 00852 void copyTo(GLenum format, GLenum type, GLvoid* pixels) const; 00853 00854 void bind() const; 00855 00856 void setAutoRelease(bool flag) { autoRelease_ = flag; } 00857 00858 GLuint texId() const { return texId_; } 00859 00860 private: 00861 Impl(); 00862 00863 GLuint texId_; 00864 bool autoRelease_; 00865 }; 00866 00867 const Ptr<cv::ogl::Texture2D::Impl> cv::ogl::Texture2D::Impl::empty() 00868 { 00869 static Ptr<Impl> p(new Impl); 00870 return p; 00871 } 00872 00873 cv::ogl::Texture2D::Impl::Impl() : texId_(0), autoRelease_(false) 00874 { 00875 } 00876 00877 cv::ogl::Texture2D::Impl::Impl(GLuint atexId, bool autoRelease) : texId_(atexId), autoRelease_(autoRelease) 00878 { 00879 CV_Assert( gl::IsTexture(atexId) == gl::TRUE_ ); 00880 } 00881 00882 cv::ogl::Texture2D::Impl::Impl(GLint internalFormat, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels, bool autoRelease) : texId_(0), autoRelease_(autoRelease) 00883 { 00884 gl::GenTextures(1, &texId_); 00885 CV_CheckGlError(); 00886 00887 CV_Assert(texId_ != 0); 00888 00889 gl::BindTexture(gl::TEXTURE_2D, texId_); 00890 CV_CheckGlError(); 00891 00892 gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1); 00893 CV_CheckGlError(); 00894 00895 gl::TexImage2D(gl::TEXTURE_2D, 0, internalFormat, width, height, 0, format, type, pixels); 00896 CV_CheckGlError(); 00897 00898 gl::GenerateMipmap(gl::TEXTURE_2D); 00899 CV_CheckGlError(); 00900 } 00901 00902 cv::ogl::Texture2D::Impl::~Impl() 00903 { 00904 if (autoRelease_ && texId_) 00905 gl::DeleteTextures(1, &texId_); 00906 } 00907 00908 void cv::ogl::Texture2D::Impl::copyFrom(GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels) 00909 { 00910 gl::BindTexture(gl::TEXTURE_2D, texId_); 00911 CV_CheckGlError(); 00912 00913 gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1); 00914 CV_CheckGlError(); 00915 00916 gl::TexSubImage2D(gl::TEXTURE_2D, 0, 0, 0, width, height, format, type, pixels); 00917 CV_CheckGlError(); 00918 00919 gl::GenerateMipmap(gl::TEXTURE_2D); 00920 CV_CheckGlError(); 00921 } 00922 00923 void cv::ogl::Texture2D::Impl::copyTo(GLenum format, GLenum type, GLvoid* pixels) const 00924 { 00925 gl::BindTexture(gl::TEXTURE_2D, texId_); 00926 CV_CheckGlError(); 00927 00928 gl::PixelStorei(gl::PACK_ALIGNMENT, 1); 00929 CV_CheckGlError(); 00930 00931 gl::GetTexImage(gl::TEXTURE_2D, 0, format, type, pixels); 00932 CV_CheckGlError(); 00933 } 00934 00935 void cv::ogl::Texture2D::Impl::bind() const 00936 { 00937 gl::BindTexture(gl::TEXTURE_2D, texId_); 00938 CV_CheckGlError(); 00939 } 00940 00941 #endif // HAVE_OPENGL 00942 00943 cv::ogl::Texture2D::Texture2D() : rows_(0), cols_(0), format_(NONE) 00944 { 00945 #ifndef HAVE_OPENGL 00946 throw_no_ogl(); 00947 #else 00948 impl_ = Impl::empty(); 00949 #endif 00950 } 00951 00952 cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 00953 { 00954 #ifndef HAVE_OPENGL 00955 (void) arows; 00956 (void) acols; 00957 (void) aformat; 00958 (void) atexId; 00959 (void) autoRelease; 00960 throw_no_ogl(); 00961 #else 00962 impl_.reset(new Impl(atexId, autoRelease)); 00963 rows_ = arows; 00964 cols_ = acols; 00965 format_ = aformat; 00966 #endif 00967 } 00968 00969 cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 00970 { 00971 #ifndef HAVE_OPENGL 00972 (void) asize; 00973 (void) aformat; 00974 (void) atexId; 00975 (void) autoRelease; 00976 throw_no_ogl(); 00977 #else 00978 impl_.reset(new Impl(atexId, autoRelease)); 00979 rows_ = asize.height; 00980 cols_ = asize.width; 00981 format_ = aformat; 00982 #endif 00983 } 00984 00985 cv::ogl::Texture2D::Texture2D(InputArray arr, bool autoRelease) : rows_(0), cols_(0), format_(NONE) 00986 { 00987 #ifndef HAVE_OPENGL 00988 (void) arr; 00989 (void) autoRelease; 00990 throw_no_ogl(); 00991 #else 00992 const int kind = arr.kind(); 00993 00994 const Size asize = arr.size(); 00995 const int atype = arr.type(); 00996 00997 const int depth = CV_MAT_DEPTH(atype); 00998 const int cn = CV_MAT_CN(atype); 00999 01000 CV_Assert( depth <= CV_32F ); 01001 CV_Assert( cn == 1 || cn == 3 || cn == 4 ); 01002 01003 const Format internalFormats[] = 01004 { 01005 NONE, DEPTH_COMPONENT, NONE, RGB, RGBA 01006 }; 01007 const GLenum srcFormats[] = 01008 { 01009 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA 01010 }; 01011 01012 switch (kind) 01013 { 01014 case _InputArray::OPENGL_BUFFER: 01015 { 01016 ogl::Buffer buf = arr.getOGlBuffer(); 01017 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01018 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); 01019 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01020 break; 01021 } 01022 01023 case _InputArray::CUDA_GPU_MAT: 01024 { 01025 #ifndef HAVE_CUDA 01026 throw_no_cuda(); 01027 #else 01028 GpuMat dmat = arr.getGpuMat(); 01029 ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); 01030 buf.setAutoRelease(true); 01031 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01032 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], 0, autoRelease)); 01033 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01034 #endif 01035 01036 break; 01037 } 01038 01039 default: 01040 { 01041 Mat mat = arr.getMat(); 01042 CV_Assert( mat.isContinuous() ); 01043 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01044 impl_.reset(new Impl(internalFormats[cn], asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data, autoRelease)); 01045 break; 01046 } 01047 } 01048 01049 rows_ = asize.height; 01050 cols_ = asize.width; 01051 format_ = internalFormats[cn]; 01052 #endif 01053 } 01054 01055 void cv::ogl::Texture2D::create(int arows, int acols, Format aformat, bool autoRelease) 01056 { 01057 #ifndef HAVE_OPENGL 01058 (void) arows; 01059 (void) acols; 01060 (void) aformat; 01061 (void) autoRelease; 01062 throw_no_ogl(); 01063 #else 01064 if (rows_ != arows || cols_ != acols || format_ != aformat) 01065 { 01066 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01067 impl_.reset(new Impl(aformat, acols, arows, aformat, gl::FLOAT, 0, autoRelease)); 01068 rows_ = arows; 01069 cols_ = acols; 01070 format_ = aformat; 01071 } 01072 #endif 01073 } 01074 01075 void cv::ogl::Texture2D::release() 01076 { 01077 #ifdef HAVE_OPENGL 01078 if (impl_) 01079 impl_->setAutoRelease(true); 01080 impl_ = Impl::empty(); 01081 rows_ = 0; 01082 cols_ = 0; 01083 format_ = NONE; 01084 #endif 01085 } 01086 01087 void cv::ogl::Texture2D::setAutoRelease(bool flag) 01088 { 01089 #ifndef HAVE_OPENGL 01090 (void) flag; 01091 throw_no_ogl(); 01092 #else 01093 impl_->setAutoRelease(flag); 01094 #endif 01095 } 01096 01097 void cv::ogl::Texture2D::copyFrom(InputArray arr, bool autoRelease) 01098 { 01099 #ifndef HAVE_OPENGL 01100 (void) arr; 01101 (void) autoRelease; 01102 throw_no_ogl(); 01103 #else 01104 const int kind = arr.kind(); 01105 01106 const Size asize = arr.size(); 01107 const int atype = arr.type(); 01108 01109 const int depth = CV_MAT_DEPTH(atype); 01110 const int cn = CV_MAT_CN(atype); 01111 01112 CV_Assert( depth <= CV_32F ); 01113 CV_Assert( cn == 1 || cn == 3 || cn == 4 ); 01114 01115 const Format internalFormats[] = 01116 { 01117 NONE, DEPTH_COMPONENT, NONE, RGB, RGBA 01118 }; 01119 const GLenum srcFormats[] = 01120 { 01121 0, gl::DEPTH_COMPONENT, 0, gl::BGR, gl::BGRA 01122 }; 01123 01124 create(asize, internalFormats[cn], autoRelease); 01125 01126 switch(kind) 01127 { 01128 case _InputArray::OPENGL_BUFFER: 01129 { 01130 ogl::Buffer buf = arr.getOGlBuffer(); 01131 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01132 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); 01133 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01134 break; 01135 } 01136 01137 case _InputArray::CUDA_GPU_MAT: 01138 { 01139 #ifndef HAVE_CUDA 01140 throw_no_cuda(); 01141 #else 01142 GpuMat dmat = arr.getGpuMat(); 01143 ogl::Buffer buf(dmat, ogl::Buffer::PIXEL_UNPACK_BUFFER); 01144 buf.setAutoRelease(true); 01145 buf.bind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01146 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], 0); 01147 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01148 #endif 01149 01150 break; 01151 } 01152 01153 default: 01154 { 01155 Mat mat = arr.getMat(); 01156 CV_Assert( mat.isContinuous() ); 01157 ogl::Buffer::unbind(ogl::Buffer::PIXEL_UNPACK_BUFFER); 01158 impl_->copyFrom(asize.width, asize.height, srcFormats[cn], gl_types[depth], mat.data); 01159 } 01160 } 01161 #endif 01162 } 01163 01164 void cv::ogl::Texture2D::copyTo(OutputArray arr, int ddepth, bool autoRelease) const 01165 { 01166 #ifndef HAVE_OPENGL 01167 (void) arr; 01168 (void) ddepth; 01169 (void) autoRelease; 01170 throw_no_ogl(); 01171 #else 01172 const int kind = arr.kind(); 01173 01174 const int cn = format_ == DEPTH_COMPONENT ? 1: format_ == RGB ? 3 : 4; 01175 const GLenum dstFormat = format_ == DEPTH_COMPONENT ? gl::DEPTH_COMPONENT : format_ == RGB ? gl::BGR : gl::BGRA; 01176 01177 switch(kind) 01178 { 01179 case _InputArray::OPENGL_BUFFER: 01180 { 01181 ogl::Buffer& buf = arr.getOGlBufferRef(); 01182 buf.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER, autoRelease); 01183 buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); 01184 impl_->copyTo(dstFormat, gl_types[ddepth], 0); 01185 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 01186 break; 01187 } 01188 01189 case _InputArray::CUDA_GPU_MAT: 01190 { 01191 #ifndef HAVE_CUDA 01192 throw_no_cuda(); 01193 #else 01194 ogl::Buffer buf(rows_, cols_, CV_MAKE_TYPE(ddepth, cn), ogl::Buffer::PIXEL_PACK_BUFFER); 01195 buf.setAutoRelease(true); 01196 buf.bind(ogl::Buffer::PIXEL_PACK_BUFFER); 01197 impl_->copyTo(dstFormat, gl_types[ddepth], 0); 01198 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 01199 buf.copyTo(arr); 01200 #endif 01201 01202 break; 01203 } 01204 01205 default: 01206 { 01207 arr.create(rows_, cols_, CV_MAKE_TYPE(ddepth, cn)); 01208 Mat mat = arr.getMat(); 01209 CV_Assert( mat.isContinuous() ); 01210 ogl::Buffer::unbind(ogl::Buffer::PIXEL_PACK_BUFFER); 01211 impl_->copyTo(dstFormat, gl_types[ddepth], mat.data); 01212 } 01213 } 01214 #endif 01215 } 01216 01217 void cv::ogl::Texture2D::bind() const 01218 { 01219 #ifndef HAVE_OPENGL 01220 throw_no_ogl(); 01221 #else 01222 impl_->bind(); 01223 #endif 01224 } 01225 01226 unsigned int cv::ogl::Texture2D::texId() const 01227 { 01228 #ifndef HAVE_OPENGL 01229 throw_no_ogl(); 01230 return 0; 01231 #else 01232 return impl_->texId(); 01233 #endif 01234 } 01235 01236 01237 //////////////////////////////////////////////////////////////////////// 01238 // ogl::Arrays 01239 01240 void cv::ogl::Arrays::setVertexArray(InputArray vertex) 01241 { 01242 const int cn = vertex.channels(); 01243 const int depth = vertex.depth(); 01244 01245 CV_Assert( cn == 2 || cn == 3 || cn == 4 ); 01246 CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 01247 01248 if (vertex.kind() == _InputArray::OPENGL_BUFFER) 01249 vertex_ = vertex.getOGlBuffer(); 01250 else 01251 vertex_.copyFrom(vertex); 01252 01253 size_ = vertex_.size().area(); 01254 } 01255 01256 void cv::ogl::Arrays::resetVertexArray() 01257 { 01258 vertex_.release(); 01259 size_ = 0; 01260 } 01261 01262 void cv::ogl::Arrays::setColorArray(InputArray color) 01263 { 01264 const int cn = color.channels(); 01265 01266 CV_Assert( cn == 3 || cn == 4 ); 01267 01268 if (color.kind() == _InputArray::OPENGL_BUFFER) 01269 color_ = color.getOGlBuffer(); 01270 else 01271 color_.copyFrom(color); 01272 } 01273 01274 void cv::ogl::Arrays::resetColorArray() 01275 { 01276 color_.release(); 01277 } 01278 01279 void cv::ogl::Arrays::setNormalArray(InputArray normal) 01280 { 01281 const int cn = normal.channels(); 01282 const int depth = normal.depth(); 01283 01284 CV_Assert( cn == 3 ); 01285 CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 01286 01287 if (normal.kind() == _InputArray::OPENGL_BUFFER) 01288 normal_ = normal.getOGlBuffer(); 01289 else 01290 normal_.copyFrom(normal); 01291 } 01292 01293 void cv::ogl::Arrays::resetNormalArray() 01294 { 01295 normal_.release(); 01296 } 01297 01298 void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord) 01299 { 01300 const int cn = texCoord.channels(); 01301 const int depth = texCoord.depth(); 01302 01303 CV_Assert( cn >= 1 && cn <= 4 ); 01304 CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F ); 01305 01306 if (texCoord.kind() == _InputArray::OPENGL_BUFFER) 01307 texCoord_ = texCoord.getOGlBuffer(); 01308 else 01309 texCoord_.copyFrom(texCoord); 01310 } 01311 01312 void cv::ogl::Arrays::resetTexCoordArray() 01313 { 01314 texCoord_.release(); 01315 } 01316 01317 void cv::ogl::Arrays::release() 01318 { 01319 resetVertexArray(); 01320 resetColorArray(); 01321 resetNormalArray(); 01322 resetTexCoordArray(); 01323 } 01324 01325 void cv::ogl::Arrays::setAutoRelease(bool flag) 01326 { 01327 vertex_.setAutoRelease(flag); 01328 color_.setAutoRelease(flag); 01329 normal_.setAutoRelease(flag); 01330 texCoord_.setAutoRelease(flag); 01331 } 01332 01333 void cv::ogl::Arrays::bind() const 01334 { 01335 #ifndef HAVE_OPENGL 01336 throw_no_ogl(); 01337 #else 01338 CV_Assert( texCoord_.empty() || texCoord_.size().area() == size_ ); 01339 CV_Assert( normal_.empty() || normal_.size().area() == size_ ); 01340 CV_Assert( color_.empty() || color_.size().area() == size_ ); 01341 01342 if (texCoord_.empty()) 01343 { 01344 gl::DisableClientState(gl::TEXTURE_COORD_ARRAY); 01345 CV_CheckGlError(); 01346 } 01347 else 01348 { 01349 gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); 01350 CV_CheckGlError(); 01351 01352 texCoord_.bind(ogl::Buffer::ARRAY_BUFFER); 01353 01354 gl::TexCoordPointer(texCoord_.channels(), gl_types[texCoord_.depth()], 0, 0); 01355 CV_CheckGlError(); 01356 } 01357 01358 if (normal_.empty()) 01359 { 01360 gl::DisableClientState(gl::NORMAL_ARRAY); 01361 CV_CheckGlError(); 01362 } 01363 else 01364 { 01365 gl::EnableClientState(gl::NORMAL_ARRAY); 01366 CV_CheckGlError(); 01367 01368 normal_.bind(ogl::Buffer::ARRAY_BUFFER); 01369 01370 gl::NormalPointer(gl_types[normal_.depth()], 0, 0); 01371 CV_CheckGlError(); 01372 } 01373 01374 if (color_.empty()) 01375 { 01376 gl::DisableClientState(gl::COLOR_ARRAY); 01377 CV_CheckGlError(); 01378 } 01379 else 01380 { 01381 gl::EnableClientState(gl::COLOR_ARRAY); 01382 CV_CheckGlError(); 01383 01384 color_.bind(ogl::Buffer::ARRAY_BUFFER); 01385 01386 const int cn = color_.channels(); 01387 01388 gl::ColorPointer(cn, gl_types[color_.depth()], 0, 0); 01389 CV_CheckGlError(); 01390 } 01391 01392 if (vertex_.empty()) 01393 { 01394 gl::DisableClientState(gl::VERTEX_ARRAY); 01395 CV_CheckGlError(); 01396 } 01397 else 01398 { 01399 gl::EnableClientState(gl::VERTEX_ARRAY); 01400 CV_CheckGlError(); 01401 01402 vertex_.bind(ogl::Buffer::ARRAY_BUFFER); 01403 01404 gl::VertexPointer(vertex_.channels(), gl_types[vertex_.depth()], 0, 0); 01405 CV_CheckGlError(); 01406 } 01407 01408 ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER); 01409 #endif 01410 } 01411 01412 //////////////////////////////////////////////////////////////////////// 01413 // Rendering 01414 01415 void cv::ogl::render(const ogl::Texture2D& tex, Rect_<double> wndRect, Rect_<double> texRect) 01416 { 01417 #ifndef HAVE_OPENGL 01418 (void) tex; 01419 (void) wndRect; 01420 (void) texRect; 01421 throw_no_ogl(); 01422 #else 01423 if (!tex.empty()) 01424 { 01425 gl::MatrixMode(gl::PROJECTION); 01426 gl::LoadIdentity(); 01427 gl::Ortho(0.0, 1.0, 1.0, 0.0, -1.0, 1.0); 01428 CV_CheckGlError(); 01429 01430 gl::MatrixMode(gl::MODELVIEW); 01431 gl::LoadIdentity(); 01432 CV_CheckGlError(); 01433 01434 gl::Disable(gl::LIGHTING); 01435 CV_CheckGlError(); 01436 01437 tex.bind(); 01438 01439 gl::Enable(gl::TEXTURE_2D); 01440 CV_CheckGlError(); 01441 01442 gl::TexEnvi(gl::TEXTURE_ENV, gl::TEXTURE_ENV_MODE, gl::REPLACE); 01443 CV_CheckGlError(); 01444 01445 gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR); 01446 CV_CheckGlError(); 01447 01448 const float vertex[] = 01449 { 01450 wndRect.x, wndRect.y, 0.0f, 01451 wndRect.x, (wndRect.y + wndRect.height), 0.0f, 01452 wndRect.x + wndRect.width, (wndRect.y + wndRect.height), 0.0f, 01453 wndRect.x + wndRect.width, wndRect.y, 0.0f 01454 }; 01455 const float texCoords[] = 01456 { 01457 texRect.x, texRect.y, 01458 texRect.x, texRect.y + texRect.height, 01459 texRect.x + texRect.width, texRect.y + texRect.height, 01460 texRect.x + texRect.width, texRect.y 01461 }; 01462 01463 ogl::Buffer::unbind(ogl::Buffer::ARRAY_BUFFER); 01464 01465 gl::EnableClientState(gl::TEXTURE_COORD_ARRAY); 01466 CV_CheckGlError(); 01467 01468 gl::TexCoordPointer(2, gl::FLOAT, 0, texCoords); 01469 CV_CheckGlError(); 01470 01471 gl::DisableClientState(gl::NORMAL_ARRAY); 01472 gl::DisableClientState(gl::COLOR_ARRAY); 01473 CV_CheckGlError(); 01474 01475 gl::EnableClientState(gl::VERTEX_ARRAY); 01476 CV_CheckGlError(); 01477 01478 gl::VertexPointer(3, gl::FLOAT, 0, vertex); 01479 CV_CheckGlError(); 01480 01481 gl::DrawArrays(gl::QUADS, 0, 4); 01482 CV_CheckGlError(); 01483 } 01484 #endif 01485 } 01486 01487 void cv::ogl::render(const ogl::Arrays& arr, int mode, Scalar color) 01488 { 01489 #ifndef HAVE_OPENGL 01490 (void) arr; 01491 (void) mode; 01492 (void) color; 01493 throw_no_ogl(); 01494 #else 01495 if (!arr.empty()) 01496 { 01497 gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0); 01498 01499 arr.bind(); 01500 01501 gl::DrawArrays(mode, 0, arr.size()); 01502 } 01503 #endif 01504 } 01505 01506 void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color) 01507 { 01508 #ifndef HAVE_OPENGL 01509 (void) arr; 01510 (void) indices; 01511 (void) mode; 01512 (void) color; 01513 throw_no_ogl(); 01514 #else 01515 if (!arr.empty() && !indices.empty()) 01516 { 01517 gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0); 01518 01519 arr.bind(); 01520 01521 const int kind = indices.kind(); 01522 01523 switch (kind) 01524 { 01525 case _InputArray::OPENGL_BUFFER : 01526 { 01527 ogl::Buffer buf = indices.getOGlBuffer(); 01528 01529 const int depth = buf.depth(); 01530 01531 CV_Assert( buf.channels() == 1 ); 01532 CV_Assert( depth <= CV_32S ); 01533 01534 GLenum type; 01535 if (depth < CV_16U) 01536 type = gl::UNSIGNED_BYTE; 01537 else if (depth < CV_32S) 01538 type = gl::UNSIGNED_SHORT; 01539 else 01540 type = gl::UNSIGNED_INT; 01541 01542 buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 01543 01544 gl::DrawElements(mode, buf.size().area(), type, 0); 01545 01546 ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 01547 01548 break; 01549 } 01550 01551 default: 01552 { 01553 Mat mat = indices.getMat(); 01554 01555 const int depth = mat.depth(); 01556 01557 CV_Assert( mat.channels() == 1 ); 01558 CV_Assert( depth <= CV_32S ); 01559 CV_Assert( mat.isContinuous() ); 01560 01561 GLenum type; 01562 if (depth < CV_16U) 01563 type = gl::UNSIGNED_BYTE; 01564 else if (depth < CV_32S) 01565 type = gl::UNSIGNED_SHORT; 01566 else 01567 type = gl::UNSIGNED_INT; 01568 01569 ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER); 01570 01571 gl::DrawElements(mode, mat.size().area(), type, mat.data); 01572 } 01573 } 01574 } 01575 #endif 01576 } 01577 01578 //////////////////////////////////////////////////////////////////////// 01579 // CL-GL Interoperability 01580 01581 #ifdef HAVE_OPENCL 01582 # include "opencv2/core/opencl/runtime/opencl_gl.hpp" 01583 #else // HAVE_OPENCL 01584 # define NO_OPENCL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support") 01585 #endif // HAVE_OPENCL 01586 01587 #if defined(HAVE_OPENGL) 01588 # if defined(ANDROID) 01589 # include <EGL/egl.h> 01590 # elif defined(__linux__) 01591 # include <GL/glx.h> 01592 # endif 01593 #endif // HAVE_OPENGL 01594 01595 namespace cv { namespace ogl { 01596 01597 namespace ocl { 01598 01599 Context& initializeContextFromGL() 01600 { 01601 #if !defined(HAVE_OPENGL) 01602 NO_OPENGL_SUPPORT_ERROR; 01603 #elif !defined(HAVE_OPENCL) 01604 NO_OPENCL_SUPPORT_ERROR; 01605 #else 01606 cl_uint numPlatforms; 01607 cl_int status = clGetPlatformIDs(0, NULL, &numPlatforms); 01608 if (status != CL_SUCCESS) 01609 CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms"); 01610 if (numPlatforms == 0) 01611 CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available platforms"); 01612 01613 std::vector<cl_platform_id> platforms(numPlatforms); 01614 status = clGetPlatformIDs(numPlatforms, &platforms[0], NULL); 01615 if (status != CL_SUCCESS) 01616 CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms"); 01617 01618 // TODO Filter platforms by name from OPENCV_OPENCL_DEVICE 01619 01620 int found = -1; 01621 cl_device_id device = NULL; 01622 cl_context context = NULL; 01623 01624 for (int i = 0; i < (int)numPlatforms; i++) 01625 { 01626 // query platform extension: presence of "cl_khr_gl_sharing" extension is requred 01627 { 01628 AutoBuffer<char> extensionStr; 01629 01630 size_t extensionSize; 01631 status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, 0, NULL, &extensionSize); 01632 if (status == CL_SUCCESS) 01633 { 01634 extensionStr.allocate(extensionSize+1); 01635 status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, extensionSize, (char*)extensionStr, NULL); 01636 } 01637 if (status != CL_SUCCESS) 01638 CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform extension string"); 01639 01640 if (!strstr((const char*)extensionStr, "cl_khr_gl_sharing")) 01641 continue; 01642 } 01643 01644 clGetGLContextInfoKHR_fn clGetGLContextInfoKHR = (clGetGLContextInfoKHR_fn) 01645 clGetExtensionFunctionAddressForPlatform(platforms[i], "clGetGLContextInfoKHR"); 01646 if (!clGetGLContextInfoKHR) 01647 continue; 01648 01649 cl_context_properties properties[] = 01650 { 01651 #if defined(WIN32) || defined(_WIN32) 01652 CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], 01653 CL_GL_CONTEXT_KHR, (cl_context_properties)wglGetCurrentContext(), 01654 CL_WGL_HDC_KHR, (cl_context_properties)wglGetCurrentDC(), 01655 #elif defined(ANDROID) 01656 CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], 01657 CL_GL_CONTEXT_KHR, (cl_context_properties)eglGetCurrentContext(), 01658 CL_EGL_DISPLAY_KHR, (cl_context_properties)eglGetCurrentDisplay(), 01659 #elif defined(__linux__) 01660 CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], 01661 CL_GL_CONTEXT_KHR, (cl_context_properties)glXGetCurrentContext(), 01662 CL_GLX_DISPLAY_KHR, (cl_context_properties)glXGetCurrentDisplay(), 01663 #endif 01664 0 01665 }; 01666 01667 // query device 01668 device = NULL; 01669 status = clGetGLContextInfoKHR(properties, CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR, sizeof(cl_device_id), (void*)&device, NULL); 01670 if (status != CL_SUCCESS) 01671 continue; 01672 01673 // create context 01674 context = clCreateContext(properties, 1, &device, NULL, NULL, &status); 01675 if (status != CL_SUCCESS) 01676 { 01677 clReleaseDevice(device); 01678 } 01679 else 01680 { 01681 found = i; 01682 break; 01683 } 01684 } 01685 01686 if (found < 0) 01687 CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't create context for OpenGL interop"); 01688 01689 Context& ctx = Context::getDefault(false); 01690 initializeContextFromHandle(ctx, platforms[found], context, device); 01691 return ctx; 01692 #endif 01693 } 01694 01695 } // namespace cv::ogl::ocl 01696 01697 void convertToGLTexture2D(InputArray src, Texture2D& texture) 01698 { 01699 (void)src; (void)texture; 01700 #if !defined(HAVE_OPENGL) 01701 NO_OPENGL_SUPPORT_ERROR; 01702 #elif !defined(HAVE_OPENCL) 01703 NO_OPENCL_SUPPORT_ERROR; 01704 #else 01705 Size srcSize = src.size(); 01706 CV_Assert(srcSize.width == (int)texture.cols() && srcSize.height == (int)texture.rows()); 01707 01708 using namespace cv::ocl; 01709 Context& ctx = Context::getDefault(); 01710 cl_context context = (cl_context)ctx.ptr(); 01711 01712 UMat u = src.getUMat(); 01713 01714 // TODO Add support for roi 01715 CV_Assert(u.offset == 0); 01716 CV_Assert(u.isContinuous()); 01717 01718 cl_int status = 0; 01719 cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_WRITE_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status); 01720 if (status != CL_SUCCESS) 01721 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed"); 01722 01723 cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); 01724 01725 cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); 01726 status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL); 01727 if (status != CL_SUCCESS) 01728 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); 01729 size_t offset = 0; // TODO 01730 size_t dst_origin[3] = {0, 0, 0}; 01731 size_t region[3] = {u.cols, u.rows, 1}; 01732 status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL); 01733 if (status != CL_SUCCESS) 01734 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); 01735 status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL); 01736 if (status != CL_SUCCESS) 01737 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); 01738 01739 status = clFinish(q); // TODO Use events 01740 if (status != CL_SUCCESS) 01741 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); 01742 01743 status = clReleaseMemObject(clImage); // TODO RAII 01744 if (status != CL_SUCCESS) 01745 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); 01746 #endif 01747 } 01748 01749 void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst) 01750 { 01751 (void)texture; (void)dst; 01752 #if !defined(HAVE_OPENGL) 01753 NO_OPENGL_SUPPORT_ERROR; 01754 #elif !defined(HAVE_OPENCL) 01755 NO_OPENCL_SUPPORT_ERROR; 01756 #else 01757 // check texture format 01758 const int dtype = CV_8UC4; 01759 CV_Assert(texture.format() == Texture2D::RGBA); 01760 01761 int textureType = dtype; 01762 CV_Assert(textureType >= 0); 01763 01764 using namespace cv::ocl; 01765 Context& ctx = Context::getDefault(); 01766 cl_context context = (cl_context)ctx.ptr(); 01767 01768 // TODO Need to specify ACCESS_WRITE here somehow to prevent useless data copying! 01769 dst.create(texture.size(), textureType); 01770 UMat u = dst.getUMat(); 01771 01772 // TODO Add support for roi 01773 CV_Assert(u.offset == 0); 01774 CV_Assert(u.isContinuous()); 01775 01776 cl_int status = 0; 01777 cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_READ_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status); 01778 if (status != CL_SUCCESS) 01779 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed"); 01780 01781 cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); 01782 01783 cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); 01784 status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL); 01785 if (status != CL_SUCCESS) 01786 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); 01787 size_t offset = 0; // TODO 01788 size_t src_origin[3] = {0, 0, 0}; 01789 size_t region[3] = {u.cols, u.rows, 1}; 01790 status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL); 01791 if (status != CL_SUCCESS) 01792 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); 01793 status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL); 01794 if (status != CL_SUCCESS) 01795 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); 01796 01797 status = clFinish(q); // TODO Use events 01798 if (status != CL_SUCCESS) 01799 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); 01800 01801 status = clReleaseMemObject(clImage); // TODO RAII 01802 if (status != CL_SUCCESS) 01803 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); 01804 #endif 01805 } 01806 01807 //void mapGLBuffer(const Buffer& buffer, UMat& dst, int accessFlags) 01808 UMat mapGLBuffer(const Buffer& buffer, int accessFlags) 01809 { 01810 (void)buffer; (void)accessFlags; 01811 #if !defined(HAVE_OPENGL) 01812 NO_OPENGL_SUPPORT_ERROR; 01813 #elif !defined(HAVE_OPENCL) 01814 NO_OPENCL_SUPPORT_ERROR; 01815 #else 01816 using namespace cv::ocl; 01817 Context& ctx = Context::getDefault(); 01818 cl_context context = (cl_context)ctx.ptr(); 01819 cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr(); 01820 01821 int clAccessFlags = 0; 01822 switch (accessFlags & (ACCESS_READ|ACCESS_WRITE)) 01823 { 01824 default: 01825 case ACCESS_READ|ACCESS_WRITE: 01826 clAccessFlags = CL_MEM_READ_WRITE; 01827 break; 01828 case ACCESS_READ: 01829 clAccessFlags = CL_MEM_READ_ONLY; 01830 break; 01831 case ACCESS_WRITE: 01832 clAccessFlags = CL_MEM_WRITE_ONLY; 01833 break; 01834 } 01835 01836 cl_int status = 0; 01837 cl_mem clBuffer = clCreateFromGLBuffer(context, clAccessFlags, buffer.bufId(), &status); 01838 if (status != CL_SUCCESS) 01839 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLBuffer failed"); 01840 01841 gl::Finish(); 01842 01843 status = clEnqueueAcquireGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL); 01844 if (status != CL_SUCCESS) 01845 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); 01846 01847 size_t step = buffer.cols() * buffer.elemSize(); 01848 int rows = buffer.rows(); 01849 int cols = buffer.cols(); 01850 int type = buffer.type(); 01851 01852 UMat u; 01853 convertFromBuffer(clBuffer, step, rows, cols, type, u); 01854 return u; 01855 #endif 01856 } 01857 01858 void unmapGLBuffer(UMat & u) 01859 { 01860 (void)u; 01861 #if !defined(HAVE_OPENGL) 01862 NO_OPENGL_SUPPORT_ERROR; 01863 #elif !defined(HAVE_OPENCL) 01864 NO_OPENCL_SUPPORT_ERROR; 01865 #else 01866 using namespace cv::ocl; 01867 cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr(); 01868 01869 cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); 01870 01871 u.release(); 01872 01873 cl_int status = clEnqueueReleaseGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL); 01874 if (status != CL_SUCCESS) 01875 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); 01876 01877 status = clFinish(clQueue); 01878 if (status != CL_SUCCESS) 01879 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); 01880 01881 status = clReleaseMemObject(clBuffer); 01882 if (status != CL_SUCCESS) 01883 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); 01884 #endif 01885 } 01886 01887 }} // namespace cv::ogl 01888
Generated on Tue Jul 12 2022 15:17:29 by
1.7.2
