Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependents: RZ_A2M_Mbed_samples
cuda.inl.hpp
00001 /*M/////////////////////////////////////////////////////////////////////////////////////// 00002 // 00003 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 00004 // 00005 // By downloading, copying, installing or using the software you agree to this license. 00006 // If you do not agree to this license, do not download, install, 00007 // copy or use the software. 00008 // 00009 // 00010 // License Agreement 00011 // For Open Source Computer Vision Library 00012 // 00013 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 00014 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 00015 // Copyright (C) 2013, OpenCV Foundation, all rights reserved. 00016 // Third party copyrights are property of their respective owners. 00017 // 00018 // Redistribution and use in source and binary forms, with or without modification, 00019 // are permitted provided that the following conditions are met: 00020 // 00021 // * Redistribution's of source code must retain the above copyright notice, 00022 // this list of conditions and the following disclaimer. 00023 // 00024 // * Redistribution's in binary form must reproduce the above copyright notice, 00025 // this list of conditions and the following disclaimer in the documentation 00026 // and/or other materials provided with the distribution. 00027 // 00028 // * The name of the copyright holders may not be used to endorse or promote products 00029 // derived from this software without specific prior written permission. 00030 // 00031 // This software is provided by the copyright holders and contributors "as is" and 00032 // any express or implied warranties, including, but not limited to, the implied 00033 // warranties of merchantability and fitness for a particular purpose are disclaimed. 00034 // In no event shall the Intel Corporation or contributors be liable for any direct, 00035 // indirect, incidental, special, exemplary, or consequential damages 00036 // (including, but not limited to, procurement of substitute goods or services; 00037 // loss of use, data, or profits; or business interruption) however caused 00038 // and on any theory of liability, whether in contract, strict liability, 00039 // or tort (including negligence or otherwise) arising in any way out of 00040 // the use of this software, even if advised of the possibility of such damage. 00041 // 00042 //M*/ 00043 00044 #ifndef OPENCV_CORE_CUDAINL_HPP 00045 #define OPENCV_CORE_CUDAINL_HPP 00046 00047 #include "opencv2/core/cuda.hpp" 00048 00049 //! @cond IGNORED 00050 00051 namespace cv { namespace cuda { 00052 00053 //=================================================================================== 00054 // GpuMat 00055 //=================================================================================== 00056 00057 inline 00058 GpuMat::GpuMat(Allocator* allocator_) 00059 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00060 {} 00061 00062 inline 00063 GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_) 00064 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00065 { 00066 if (rows_ > 0 && cols_ > 0) 00067 create(rows_, cols_, type_); 00068 } 00069 00070 inline 00071 GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_) 00072 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00073 { 00074 if (size_.height > 0 && size_.width > 0) 00075 create(size_.height, size_.width, type_); 00076 } 00077 00078 inline 00079 GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_) 00080 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00081 { 00082 if (rows_ > 0 && cols_ > 0) 00083 { 00084 create(rows_, cols_, type_); 00085 setTo(s_); 00086 } 00087 } 00088 00089 inline 00090 GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_) 00091 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00092 { 00093 if (size_.height > 0 && size_.width > 0) 00094 { 00095 create(size_.height, size_.width, type_); 00096 setTo(s_); 00097 } 00098 } 00099 00100 inline 00101 GpuMat::GpuMat(const GpuMat& m) 00102 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator) 00103 { 00104 if (refcount) 00105 CV_XADD(refcount, 1); 00106 } 00107 00108 inline 00109 GpuMat::GpuMat(InputArray arr, Allocator* allocator_) : 00110 flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) 00111 { 00112 upload(arr); 00113 } 00114 00115 inline 00116 GpuMat::~GpuMat() 00117 { 00118 release(); 00119 } 00120 00121 inline 00122 GpuMat& GpuMat::operator =(const GpuMat& m) 00123 { 00124 if (this != &m) 00125 { 00126 GpuMat temp(m); 00127 swap(temp); 00128 } 00129 00130 return *this; 00131 } 00132 00133 inline 00134 void GpuMat::create(Size size_, int type_) 00135 { 00136 create(size_.height, size_.width, type_); 00137 } 00138 00139 inline 00140 void GpuMat::swap(GpuMat& b) 00141 { 00142 std::swap(flags , b.flags); 00143 std::swap(rows, b.rows); 00144 std::swap(cols, b.cols); 00145 std::swap(step, b.step); 00146 std::swap(data, b.data); 00147 std::swap(datastart, b.datastart); 00148 std::swap(dataend, b.dataend); 00149 std::swap(refcount, b.refcount); 00150 std::swap(allocator, b.allocator); 00151 } 00152 00153 inline 00154 GpuMat GpuMat::clone() const 00155 { 00156 GpuMat m; 00157 copyTo(m); 00158 return m; 00159 } 00160 00161 inline 00162 void GpuMat::copyTo(OutputArray dst, InputArray mask) const 00163 { 00164 copyTo(dst, mask, Stream::Null()); 00165 } 00166 00167 inline 00168 GpuMat& GpuMat::setTo(Scalar s) 00169 { 00170 return setTo(s, Stream::Null()); 00171 } 00172 00173 inline 00174 GpuMat& GpuMat::setTo(Scalar s, InputArray mask) 00175 { 00176 return setTo(s, mask, Stream::Null()); 00177 } 00178 00179 inline 00180 void GpuMat::convertTo(OutputArray dst, int rtype) const 00181 { 00182 convertTo(dst, rtype, Stream::Null()); 00183 } 00184 00185 inline 00186 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const 00187 { 00188 convertTo(dst, rtype, alpha, beta, Stream::Null()); 00189 } 00190 00191 inline 00192 void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const 00193 { 00194 convertTo(dst, rtype, alpha, 0.0, stream); 00195 } 00196 00197 inline 00198 void GpuMat::assignTo(GpuMat& m, int _type) const 00199 { 00200 if (_type < 0) 00201 m = *this; 00202 else 00203 convertTo(m, _type); 00204 } 00205 00206 inline 00207 uchar* GpuMat::ptr(int y) 00208 { 00209 CV_DbgAssert( (unsigned)y < (unsigned)rows ); 00210 return data + step * y; 00211 } 00212 00213 inline 00214 const uchar* GpuMat::ptr(int y) const 00215 { 00216 CV_DbgAssert( (unsigned)y < (unsigned)rows ); 00217 return data + step * y; 00218 } 00219 00220 template<typename _Tp> inline 00221 _Tp* GpuMat::ptr(int y) 00222 { 00223 return (_Tp*)ptr(y); 00224 } 00225 00226 template<typename _Tp> inline 00227 const _Tp* GpuMat::ptr(int y) const 00228 { 00229 return (const _Tp*)ptr(y); 00230 } 00231 00232 template <class T> inline 00233 GpuMat::operator PtrStepSz<T>() const 00234 { 00235 return PtrStepSz<T>(rows, cols, (T*)data, step); 00236 } 00237 00238 template <class T> inline 00239 GpuMat::operator PtrStep<T>() const 00240 { 00241 return PtrStep<T>((T*)data, step); 00242 } 00243 00244 inline 00245 GpuMat GpuMat::row(int y) const 00246 { 00247 return GpuMat(*this, Range(y, y+1), Range::all()); 00248 } 00249 00250 inline 00251 GpuMat GpuMat::col(int x) const 00252 { 00253 return GpuMat(*this, Range::all(), Range(x, x+1)); 00254 } 00255 00256 inline 00257 GpuMat GpuMat::rowRange(int startrow, int endrow) const 00258 { 00259 return GpuMat(*this, Range(startrow, endrow), Range::all()); 00260 } 00261 00262 inline 00263 GpuMat GpuMat::rowRange(Range r) const 00264 { 00265 return GpuMat(*this, r, Range::all()); 00266 } 00267 00268 inline 00269 GpuMat GpuMat::colRange(int startcol, int endcol) const 00270 { 00271 return GpuMat(*this, Range::all(), Range(startcol, endcol)); 00272 } 00273 00274 inline 00275 GpuMat GpuMat::colRange(Range r) const 00276 { 00277 return GpuMat(*this, Range::all(), r); 00278 } 00279 00280 inline 00281 GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const 00282 { 00283 return GpuMat(*this, rowRange_, colRange_); 00284 } 00285 00286 inline 00287 GpuMat GpuMat::operator ()(Rect roi) const 00288 { 00289 return GpuMat(*this, roi); 00290 } 00291 00292 inline 00293 bool GpuMat::isContinuous() const 00294 { 00295 return (flags & Mat::CONTINUOUS_FLAG) != 0; 00296 } 00297 00298 inline 00299 size_t GpuMat::elemSize() const 00300 { 00301 return CV_ELEM_SIZE(flags ); 00302 } 00303 00304 inline 00305 size_t GpuMat::elemSize1() const 00306 { 00307 return CV_ELEM_SIZE1(flags ); 00308 } 00309 00310 inline 00311 int GpuMat::type() const 00312 { 00313 return CV_MAT_TYPE(flags ); 00314 } 00315 00316 inline 00317 int GpuMat::depth() const 00318 { 00319 return CV_MAT_DEPTH(flags ); 00320 } 00321 00322 inline 00323 int GpuMat::channels() const 00324 { 00325 return CV_MAT_CN(flags ); 00326 } 00327 00328 inline 00329 size_t GpuMat::step1() const 00330 { 00331 return step / elemSize1(); 00332 } 00333 00334 inline 00335 Size GpuMat::size() const 00336 { 00337 return Size(cols, rows); 00338 } 00339 00340 inline 00341 bool GpuMat::empty() const 00342 { 00343 return data == 0; 00344 } 00345 00346 static inline 00347 GpuMat createContinuous(int rows, int cols, int type) 00348 { 00349 GpuMat m; 00350 createContinuous(rows, cols, type, m); 00351 return m; 00352 } 00353 00354 static inline 00355 void createContinuous(Size size, int type, OutputArray arr) 00356 { 00357 createContinuous(size.height, size.width, type, arr); 00358 } 00359 00360 static inline 00361 GpuMat createContinuous(Size size, int type) 00362 { 00363 GpuMat m; 00364 createContinuous(size, type, m); 00365 return m; 00366 } 00367 00368 static inline 00369 void ensureSizeIsEnough(Size size, int type, OutputArray arr) 00370 { 00371 ensureSizeIsEnough(size.height, size.width, type, arr); 00372 } 00373 00374 static inline 00375 void swap(GpuMat& a, GpuMat& b) 00376 { 00377 a.swap(b); 00378 } 00379 00380 //=================================================================================== 00381 // HostMem 00382 //=================================================================================== 00383 00384 inline 00385 HostMem::HostMem(AllocType alloc_type_) 00386 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 00387 { 00388 } 00389 00390 inline 00391 HostMem::HostMem(const HostMem& m) 00392 : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type) 00393 { 00394 if( refcount ) 00395 CV_XADD(refcount, 1); 00396 } 00397 00398 inline 00399 HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_) 00400 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 00401 { 00402 if (rows_ > 0 && cols_ > 0) 00403 create(rows_, cols_, type_); 00404 } 00405 00406 inline 00407 HostMem::HostMem(Size size_, int type_, AllocType alloc_type_) 00408 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 00409 { 00410 if (size_.height > 0 && size_.width > 0) 00411 create(size_.height, size_.width, type_); 00412 } 00413 00414 inline 00415 HostMem::HostMem(InputArray arr, AllocType alloc_type_) 00416 : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) 00417 { 00418 arr.getMat().copyTo(*this); 00419 } 00420 00421 inline 00422 HostMem::~HostMem() 00423 { 00424 release(); 00425 } 00426 00427 inline 00428 HostMem& HostMem::operator =(const HostMem& m) 00429 { 00430 if (this != &m) 00431 { 00432 HostMem temp(m); 00433 swap(temp); 00434 } 00435 00436 return *this; 00437 } 00438 00439 inline 00440 void HostMem::swap(HostMem& b) 00441 { 00442 std::swap(flags, b.flags); 00443 std::swap(rows, b.rows); 00444 std::swap(cols, b.cols); 00445 std::swap(step, b.step); 00446 std::swap(data, b.data); 00447 std::swap(datastart, b.datastart); 00448 std::swap(dataend, b.dataend); 00449 std::swap(refcount, b.refcount); 00450 std::swap(alloc_type, b.alloc_type); 00451 } 00452 00453 inline 00454 HostMem HostMem::clone() const 00455 { 00456 HostMem m(size(), type(), alloc_type); 00457 createMatHeader().copyTo(m); 00458 return m; 00459 } 00460 00461 inline 00462 void HostMem::create(Size size_, int type_) 00463 { 00464 create(size_.height, size_.width, type_); 00465 } 00466 00467 inline 00468 Mat HostMem::createMatHeader() const 00469 { 00470 return Mat(size(), type(), data, step); 00471 } 00472 00473 inline 00474 bool HostMem::isContinuous() const 00475 { 00476 return (flags & Mat::CONTINUOUS_FLAG) != 0; 00477 } 00478 00479 inline 00480 size_t HostMem::elemSize() const 00481 { 00482 return CV_ELEM_SIZE(flags); 00483 } 00484 00485 inline 00486 size_t HostMem::elemSize1() const 00487 { 00488 return CV_ELEM_SIZE1(flags); 00489 } 00490 00491 inline 00492 int HostMem::type() const 00493 { 00494 return CV_MAT_TYPE(flags); 00495 } 00496 00497 inline 00498 int HostMem::depth() const 00499 { 00500 return CV_MAT_DEPTH(flags); 00501 } 00502 00503 inline 00504 int HostMem::channels() const 00505 { 00506 return CV_MAT_CN(flags); 00507 } 00508 00509 inline 00510 size_t HostMem::step1() const 00511 { 00512 return step / elemSize1(); 00513 } 00514 00515 inline 00516 Size HostMem::size() const 00517 { 00518 return Size(cols, rows); 00519 } 00520 00521 inline 00522 bool HostMem::empty() const 00523 { 00524 return data == 0; 00525 } 00526 00527 static inline 00528 void swap(HostMem& a, HostMem& b) 00529 { 00530 a.swap(b); 00531 } 00532 00533 //=================================================================================== 00534 // Stream 00535 //=================================================================================== 00536 00537 inline 00538 Stream::Stream(const Ptr<Impl>& impl) 00539 : impl_(impl) 00540 { 00541 } 00542 00543 //=================================================================================== 00544 // Event 00545 //=================================================================================== 00546 00547 inline 00548 Event::Event(const Ptr<Impl>& impl) 00549 : impl_(impl) 00550 { 00551 } 00552 00553 //=================================================================================== 00554 // Initialization & Info 00555 //=================================================================================== 00556 00557 inline 00558 bool TargetArchs::has(int major, int minor) 00559 { 00560 return hasPtx(major, minor) || hasBin(major, minor); 00561 } 00562 00563 inline 00564 bool TargetArchs::hasEqualOrGreater(int major, int minor) 00565 { 00566 return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor); 00567 } 00568 00569 inline 00570 DeviceInfo::DeviceInfo() 00571 { 00572 device_id_ = getDevice(); 00573 } 00574 00575 inline 00576 DeviceInfo::DeviceInfo(int device_id) 00577 { 00578 CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() ); 00579 device_id_ = device_id; 00580 } 00581 00582 inline 00583 int DeviceInfo::deviceID() const 00584 { 00585 return device_id_; 00586 } 00587 00588 inline 00589 size_t DeviceInfo::freeMemory() const 00590 { 00591 size_t _totalMemory = 0, _freeMemory = 0; 00592 queryMemory(_totalMemory, _freeMemory); 00593 return _freeMemory; 00594 } 00595 00596 inline 00597 size_t DeviceInfo::totalMemory() const 00598 { 00599 size_t _totalMemory = 0, _freeMemory = 0; 00600 queryMemory(_totalMemory, _freeMemory); 00601 return _totalMemory; 00602 } 00603 00604 inline 00605 bool DeviceInfo::supports(FeatureSet feature_set) const 00606 { 00607 int version = majorVersion() * 10 + minorVersion(); 00608 return version >= feature_set; 00609 } 00610 00611 00612 }} // namespace cv { namespace cuda { 00613 00614 //=================================================================================== 00615 // Mat 00616 //=================================================================================== 00617 00618 namespace cv { 00619 00620 inline 00621 Mat::Mat(const cuda::GpuMat& m) 00622 : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) 00623 { 00624 m.download(*this); 00625 } 00626 00627 } 00628 00629 //! @endcond 00630 00631 #endif // OPENCV_CORE_CUDAINL_HPP
Generated on Tue Jul 12 2022 18:20:17 by
1.7.2