Skip to content

Commit 370e0f9

Browse files
committed
Add reduction example using global memory
1 parent 211d8f5 commit 370e0f9

File tree

2 files changed

+137
-0
lines changed

2 files changed

+137
-0
lines changed

examples/basic/CMakeLists.txt

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,14 @@ target_sources(${_TARGET_RANDOM_2D_MATRIX}
3232
set_target_properties(${_TARGET_RANDOM_2D_MATRIX} PROPERTIES
3333
CUDA_CXX_STANDARD 17
3434
)
35+
36+
set(_TARGET_REDUCE reduce)
37+
38+
add_executable(${_TARGET_REDUCE})
39+
target_sources(${_TARGET_REDUCE}
40+
PRIVATE
41+
reduce.cu)
42+
set_target_properties(${_TARGET_REDUCE} PROPERTIES
43+
CXX_STANDARD 17
44+
CUDA_CXX_STANDARD 17
45+
)

examples/basic/reduce.cu

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
#include <iostream>
2+
#include <random>
3+
#include <vector>
4+
#include <algorithm>
5+
6+
// The wrapper macro is required, that __LINE__ is correct pointing to the line, where the check fails
7+
#define checkCudaError(ans) \
8+
{ \
9+
checkCudaErrorFunc((ans), __FILE__, __LINE__); \
10+
}
11+
12+
inline void checkCudaErrorFunc(cudaError_t err, const char *file, int line)
13+
{
14+
if (err != cudaSuccess)
15+
{
16+
std::cout << "\r" << file << ":" << line << " -> Cuda Error " << err << ": " << cudaGetErrorString(err) << std::endl;
17+
std::cout << "Aborting..." << std::endl;
18+
exit(0);
19+
}
20+
}
21+
22+
// The reduction algorithms divide all elements in logical blocks with the size of threads.
23+
// Each local block is reduced to a single element.
24+
// A grid stride loop maps the logical blocks to cuda blocks (both has the same size).
25+
// The output array has the size of the number of logical blocks.
26+
__global__ void reduce_gm(unsigned int const size, unsigned int *const input, unsigned int *const output)
27+
{
28+
int const id = threadIdx.x + blockIdx.x * blockDim.x;
29+
int const stride = blockDim.x * gridDim.x;
30+
// use grid stride loop to distribute the logical blocks to cuda blocks.
31+
for (int block_offset_id = id, virtual_block_id = blockIdx.x; block_offset_id < size; block_offset_id += stride, virtual_block_id += gridDim.x)
32+
{
33+
// reduce all elements of logical block to a single element.
34+
for (int max_threads_blocks = blockDim.x / 2; max_threads_blocks > 0; max_threads_blocks /= 2)
35+
{
36+
if (threadIdx.x < max_threads_blocks)
37+
{
38+
input[block_offset_id] += input[block_offset_id + max_threads_blocks];
39+
}
40+
__syncthreads();
41+
}
42+
if (threadIdx.x == 0)
43+
{
44+
// write single element to output
45+
output[virtual_block_id] = input[block_offset_id];
46+
}
47+
__syncthreads();
48+
}
49+
}
50+
51+
// Helper function -> should be replaced by html visualization ;-)
52+
template <typename T>
53+
void print_vec(std::vector<T> vec)
54+
{
55+
for (auto const v : vec)
56+
{
57+
std::cout << v << " ";
58+
}
59+
std::cout << std::endl;
60+
}
61+
62+
int main(int argc, char **argv)
63+
{
64+
int const blocks = 10;
65+
int const threads = 32;
66+
67+
// number of input elements
68+
unsigned int const size = 1000;
69+
size_t const data_size_byte = sizeof(unsigned int) * size;
70+
71+
// number of logical blocks
72+
size_t output_elements = size / threads;
73+
// add an extra element, if logical blocks does not fit in cuda blocks
74+
output_elements += (size % threads == 0) ? 0 : 1;
75+
size_t const output_size_byte = sizeof(unsigned int) * output_elements;
76+
77+
std::vector<unsigned int> h_data(size);
78+
std::vector<unsigned int> h_output(output_elements, 0);
79+
80+
// initialize data matrix with random numbers betweem 0 and 10
81+
std::uniform_int_distribution<unsigned int> distribution(
82+
0,
83+
10);
84+
std::default_random_engine generator;
85+
std::generate(
86+
h_data.begin(),
87+
h_data.end(),
88+
[&distribution, &generator]()
89+
{ return distribution(generator); });
90+
91+
// calculate result for verification
92+
unsigned int const expected_result = std::reduce(h_data.begin(), h_data.end());
93+
94+
unsigned int *d_data = nullptr;
95+
unsigned int *d_output = nullptr;
96+
97+
checkCudaError(cudaMalloc((void **)&d_data, data_size_byte));
98+
checkCudaError(cudaMalloc((void **)&d_output, output_size_byte));
99+
checkCudaError(cudaMemcpy(d_data, h_data.data(), data_size_byte, cudaMemcpyHostToDevice));
100+
101+
reduce_gm<<<blocks, threads>>>(size, d_data, d_output);
102+
checkCudaError(cudaGetLastError());
103+
104+
checkCudaError(cudaMemcpy(h_output.data(), d_output, output_size_byte, cudaMemcpyDeviceToHost));
105+
106+
unsigned int sum = 0;
107+
108+
// Reduce all sums of the logical blocks on CPU.
109+
// Otherwise a second kernel or cuda cooperative groups are required to performe block synchronization.
110+
for (unsigned int const v : h_output)
111+
{
112+
sum += v;
113+
}
114+
115+
if (sum == expected_result)
116+
{
117+
std::cout << "reduction kernel works correctly" << std::endl;
118+
}
119+
else
120+
{
121+
std::cout << "sum: " << sum << std::endl;
122+
std::cout << "expected result: " << expected_result << std::endl;
123+
}
124+
125+
return 0;
126+
}

0 commit comments

Comments
 (0)