reduce_to_column.hpp 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  16. // Third party copyrights are property of their respective owners.
  17. //
  18. // Redistribution and use in source and binary forms, with or without modification,
  19. // are permitted provided that the following conditions are met:
  20. //
  21. // * Redistribution's of source code must retain the above copyright notice,
  22. // this list of conditions and the following disclaimer.
  23. //
  24. // * Redistribution's in binary form must reproduce the above copyright notice,
  25. // this list of conditions and the following disclaimer in the documentation
  26. // and/or other materials provided with the distribution.
  27. //
  28. // * The name of the copyright holders may not be used to endorse or promote products
  29. // derived from this software without specific prior written permission.
  30. //
  31. // This software is provided by the copyright holders and contributors "as is" and
  32. // any express or implied warranties, including, but not limited to, the implied
  33. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  34. // In no event shall the Intel Corporation or contributors be liable for any direct,
  35. // indirect, incidental, special, exemplary, or consequential damages
  36. // (including, but not limited to, procurement of substitute goods or services;
  37. // loss of use, data, or profits; or business interruption) however caused
  38. // and on any theory of liability, whether in contract, strict liability,
  39. // or tort (including negligence or otherwise) arising in any way out of
  40. // the use of this software, even if advised of the possibility of such damage.
  41. //
  42. //M*/
  43. #pragma once
  44. #ifndef OPENCV_CUDEV_GRID_REDUCE_TO_COLUMN_DETAIL_HPP
  45. #define OPENCV_CUDEV_GRID_REDUCE_TO_COLUMN_DETAIL_HPP
  46. #include "../../common.hpp"
  47. #include "../../util/saturate_cast.hpp"
  48. #include "../../block/reduce.hpp"
  49. namespace cv { namespace cudev {
  50. namespace grid_reduce_to_vec_detail
  51. {
  52. template <int BLOCK_SIZE, typename work_type, typename work_elem_type, class Reductor, int cn> struct Reduce;
  53. template <int BLOCK_SIZE, typename work_type, typename work_elem_type, class Reductor> struct Reduce<BLOCK_SIZE, work_type, work_elem_type, Reductor, 1>
  54. {
  55. __device__ __forceinline__ static void call(work_elem_type smem[1][BLOCK_SIZE], work_type& myVal)
  56. {
  57. typename Reductor::template rebind<work_elem_type>::other op;
  58. blockReduce<BLOCK_SIZE>(smem[0], myVal, threadIdx.x, op);
  59. }
  60. };
  61. template <int BLOCK_SIZE, typename work_type, typename work_elem_type, class Reductor> struct Reduce<BLOCK_SIZE, work_type, work_elem_type, Reductor, 2>
  62. {
  63. __device__ __forceinline__ static void call(work_elem_type smem[2][BLOCK_SIZE], work_type& myVal)
  64. {
  65. typename Reductor::template rebind<work_elem_type>::other op;
  66. blockReduce<BLOCK_SIZE>(smem_tuple(smem[0], smem[1]), tie(myVal.x, myVal.y), threadIdx.x, make_tuple(op, op));
  67. }
  68. };
  69. template <int BLOCK_SIZE, typename work_type, typename work_elem_type, class Reductor> struct Reduce<BLOCK_SIZE, work_type, work_elem_type, Reductor, 3>
  70. {
  71. __device__ __forceinline__ static void call(work_elem_type smem[3][BLOCK_SIZE], work_type& myVal)
  72. {
  73. typename Reductor::template rebind<work_elem_type>::other op;
  74. blockReduce<BLOCK_SIZE>(smem_tuple(smem[0], smem[1], smem[2]), tie(myVal.x, myVal.y, myVal.z), threadIdx.x, make_tuple(op, op, op));
  75. }
  76. };
  77. template <int BLOCK_SIZE, typename work_type, typename work_elem_type, class Reductor> struct Reduce<BLOCK_SIZE, work_type, work_elem_type, Reductor, 4>
  78. {
  79. __device__ __forceinline__ static void call(work_elem_type smem[4][BLOCK_SIZE], work_type& myVal)
  80. {
  81. typename Reductor::template rebind<work_elem_type>::other op;
  82. blockReduce<BLOCK_SIZE>(smem_tuple(smem[0], smem[1], smem[2], smem[3]), tie(myVal.x, myVal.y, myVal.z, myVal.w), threadIdx.x, make_tuple(op, op, op, op));
  83. }
  84. };
  85. template <class Reductor, int BLOCK_SIZE, class SrcPtr, typename ResType, class MaskPtr>
  86. __global__ void reduceToColumn(const SrcPtr src, ResType* dst, const MaskPtr mask, const int cols)
  87. {
  88. typedef typename Reductor::work_type work_type;
  89. typedef typename VecTraits<work_type>::elem_type work_elem_type;
  90. const int cn = VecTraits<work_type>::cn;
  91. __shared__ work_elem_type smem[cn][BLOCK_SIZE];
  92. const int y = blockIdx.x;
  93. work_type myVal = Reductor::initialValue();
  94. Reductor op;
  95. for (int x = threadIdx.x; x < cols; x += BLOCK_SIZE)
  96. {
  97. if (mask(y, x))
  98. {
  99. myVal = op(myVal, saturate_cast<work_type>(src(y, x)));
  100. }
  101. }
  102. Reduce<BLOCK_SIZE, work_type, work_elem_type, Reductor, cn>::call(smem, myVal);
  103. if (threadIdx.x == 0)
  104. dst[y] = saturate_cast<ResType>(Reductor::result(myVal, cols));
  105. }
  106. template <class Reductor, class Policy, class SrcPtr, typename ResType, class MaskPtr>
  107. __host__ void reduceToColumn(const SrcPtr& src, ResType* dst, const MaskPtr& mask, int rows, int cols, cudaStream_t stream)
  108. {
  109. const int BLOCK_SIZE_X = Policy::block_size_x;
  110. const int BLOCK_SIZE_Y = Policy::block_size_y;
  111. const int BLOCK_SIZE = BLOCK_SIZE_X * BLOCK_SIZE_Y;
  112. const dim3 block(BLOCK_SIZE);
  113. const dim3 grid(rows);
  114. reduceToColumn<Reductor, BLOCK_SIZE><<<grid, block, 0, stream>>>(src, dst, mask, cols);
  115. CV_CUDEV_SAFE_CALL( cudaGetLastError() );
  116. if (stream == 0)
  117. CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() );
  118. }
  119. }
  120. }}
  121. #endif