scan.hpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  16. // Third party copyrights are property of their respective owners.
  17. //
  18. // Redistribution and use in source and binary forms, with or without modification,
  19. // are permitted provided that the following conditions are met:
  20. //
  21. // * Redistribution's of source code must retain the above copyright notice,
  22. // this list of conditions and the following disclaimer.
  23. //
  24. // * Redistribution's in binary form must reproduce the above copyright notice,
  25. // this list of conditions and the following disclaimer in the documentation
  26. // and/or other materials provided with the distribution.
  27. //
  28. // * The name of the copyright holders may not be used to endorse or promote products
  29. // derived from this software without specific prior written permission.
  30. //
  31. // This software is provided by the copyright holders and contributors "as is" and
  32. // any express or implied warranties, including, but not limited to, the implied
  33. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  34. // In no event shall the Intel Corporation or contributors be liable for any direct,
  35. // indirect, incidental, special, exemplary, or consequential damages
  36. // (including, but not limited to, procurement of substitute goods or services;
  37. // loss of use, data, or profits; or business interruption) however caused
  38. // and on any theory of liability, whether in contract, strict liability,
  39. // or tort (including negligence or otherwise) arising in any way out of
  40. // the use of this software, even if advised of the possibility of such damage.
  41. //
  42. //M*/
  43. #pragma once
  44. #ifndef OPENCV_CUDEV_BLOCK_SCAN_HPP
  45. #define OPENCV_CUDEV_BLOCK_SCAN_HPP
  46. #include "../common.hpp"
  47. #include "../warp/scan.hpp"
  48. #include "../warp/warp.hpp"
  49. namespace cv { namespace cudev {
  50. //! @addtogroup cudev
  51. //! @{
  52. #if __CUDACC_VER_MAJOR__ >= 9
  53. // Usage Note
  54. // - THREADS_NUM should be equal to the number of threads in this block.
  55. // - smem must be able to contain at least n elements of type T, where n is equal to the number
  56. // of warps in this block. The number can be calculated by divUp(THREADS_NUM, WARP_SIZE).
  57. //
  58. // Dev Note
  59. // - Starting from CUDA 9.0, support for Fermi is dropped. So CV_CUDEV_ARCH >= 300 is implied.
  60. // - "For Pascal and earlier architectures (CV_CUDEV_ARCH < 700), all threads in mask must execute
  61. // the same warp intrinsic instruction in convergence, and the union of all values in mask must
  62. // be equal to the warp's active mask."
  63. // (https://docs.nvidia.com/cuda/archive/10.0/cuda-c-programming-guide#independent-thread-scheduling-7-x)
  64. // - Above restriction does not apply starting from Volta (CV_CUDEV_ARCH >= 700). We just need to
  65. // take care so that "all non-exited threads named in mask must execute the same intrinsic with
  66. // the same mask."
  67. // (https://docs.nvidia.com/cuda/archive/10.0/cuda-c-programming-guide#warp-description)
  68. template <int THREADS_NUM, typename T>
  69. __device__ T blockScanInclusive(T data, volatile T* smem, uint tid)
  70. {
  71. const int residual = THREADS_NUM & (WARP_SIZE - 1);
  72. #if CV_CUDEV_ARCH < 700
  73. const uint residual_mask = (1U << residual) - 1;
  74. #endif
  75. if (THREADS_NUM > WARP_SIZE)
  76. {
  77. // bottom-level inclusive warp scan
  78. #if CV_CUDEV_ARCH >= 700
  79. T warpResult = warpScanInclusive(0xFFFFFFFFU, data);
  80. #else
  81. T warpResult;
  82. if (0 == residual)
  83. warpResult = warpScanInclusive(0xFFFFFFFFU, data);
  84. else
  85. {
  86. const int n_warps = divUp(THREADS_NUM, WARP_SIZE);
  87. const int warp_num = Warp::warpId();
  88. if (warp_num < n_warps - 1)
  89. warpResult = warpScanInclusive(0xFFFFFFFFU, data);
  90. else
  91. {
  92. // We are at the last threads of a block whose number of threads
  93. // is not a multiple of the warp size
  94. warpResult = warpScanInclusive(residual_mask, data);
  95. }
  96. }
  97. #endif
  98. __syncthreads();
  99. // save top elements of each warp for exclusive warp scan
  100. // sync to wait for warp scans to complete (because smem is being overwritten)
  101. if ((tid & (WARP_SIZE - 1)) == (WARP_SIZE - 1))
  102. {
  103. smem[tid >> LOG_WARP_SIZE] = warpResult;
  104. }
  105. __syncthreads();
  106. int quot = THREADS_NUM / WARP_SIZE;
  107. if (tid < quot)
  108. {
  109. // grab top warp elements
  110. T val = smem[tid];
  111. uint mask = (1LLU << quot) - 1;
  112. if (0 == residual)
  113. {
  114. // calculate exclusive scan and write back to shared memory
  115. smem[tid] = warpScanExclusive(mask, val);
  116. }
  117. else
  118. {
  119. // Read from smem[tid] (T val = smem[tid])
  120. // and write to smem[tid + 1] (smem[tid + 1] = warpScanInclusive(mask, val))
  121. // should be explicitly fenced by "__syncwarp" to get rid of
  122. // "cuda-memcheck --tool racecheck" warnings.
  123. __syncwarp(mask);
  124. // calculate inclusive scan and write back to shared memory with offset 1
  125. smem[tid + 1] = warpScanInclusive(mask, val);
  126. if (tid == 0)
  127. smem[0] = 0;
  128. }
  129. }
  130. __syncthreads();
  131. // return updated warp scans
  132. return warpResult + smem[tid >> LOG_WARP_SIZE];
  133. }
  134. else
  135. {
  136. #if CV_CUDEV_ARCH >= 700
  137. return warpScanInclusive(0xFFFFFFFFU, data);
  138. #else
  139. if (THREADS_NUM == WARP_SIZE)
  140. return warpScanInclusive(0xFFFFFFFFU, data);
  141. else
  142. return warpScanInclusive(residual_mask, data);
  143. #endif
  144. }
  145. }
  146. template <int THREADS_NUM, typename T>
  147. __device__ __forceinline__ T blockScanExclusive(T data, volatile T* smem, uint tid)
  148. {
  149. return blockScanInclusive<THREADS_NUM>(data, smem, tid) - data;
  150. }
  151. #else // __CUDACC_VER_MAJOR__ >= 9
  152. // Usage Note
  153. // - THREADS_NUM should be equal to the number of threads in this block.
  154. // - (>= Kepler) smem must be able to contain at least n elements of type T, where n is equal to the number
  155. // of warps in this block. The number can be calculated by divUp(THREADS_NUM, WARP_SIZE).
  156. // - (Fermi) smem must be able to contain at least n elements of type T, where n is equal to the number
  157. // of threads in this block (= THREADS_NUM).
  158. template <int THREADS_NUM, typename T>
  159. __device__ T blockScanInclusive(T data, volatile T* smem, uint tid)
  160. {
  161. if (THREADS_NUM > WARP_SIZE)
  162. {
  163. // bottom-level inclusive warp scan
  164. T warpResult = warpScanInclusive(data, smem, tid);
  165. __syncthreads();
  166. // save top elements of each warp for exclusive warp scan
  167. // sync to wait for warp scans to complete (because s_Data is being overwritten)
  168. if ((tid & (WARP_SIZE - 1)) == (WARP_SIZE - 1))
  169. {
  170. smem[tid >> LOG_WARP_SIZE] = warpResult;
  171. }
  172. __syncthreads();
  173. int quot = THREADS_NUM / WARP_SIZE;
  174. T val;
  175. if (tid < quot)
  176. {
  177. // grab top warp elements
  178. val = smem[tid];
  179. }
  180. __syncthreads();
  181. if (tid < quot)
  182. {
  183. if (0 == (THREADS_NUM & (WARP_SIZE - 1)))
  184. {
  185. // calculate exclusive scan and write back to shared memory
  186. smem[tid] = warpScanExclusive(val, smem, tid);
  187. }
  188. else
  189. {
  190. // calculate inclusive scan and write back to shared memory with offset 1
  191. smem[tid + 1] = warpScanInclusive(val, smem, tid);
  192. if (tid == 0)
  193. smem[0] = 0;
  194. }
  195. }
  196. __syncthreads();
  197. // return updated warp scans
  198. return warpResult + smem[tid >> LOG_WARP_SIZE];
  199. }
  200. else
  201. {
  202. return warpScanInclusive(data, smem, tid);
  203. }
  204. }
  205. template <int THREADS_NUM, typename T>
  206. __device__ __forceinline__ T blockScanExclusive(T data, volatile T* smem, uint tid)
  207. {
  208. return blockScanInclusive<THREADS_NUM>(data, smem, tid) - data;
  209. }
  210. #endif // __CUDACC_VER_MAJOR__ >= 9
  211. //! @}
  212. }}
  213. #endif