从初始块读取内存的连续块
Successive blocks reading memory from initial blocks
所以这是我程序的一部分,我对两个类进行了归约和。我用共享数组__shared__ int nrules[max_threads * MAX_CLASSES];
的一半对类进行索引,所以第一个类从nrules[0]
开始,第二个类在nrules[blockDim.x or max_threads]
开始。两半都减少了。总和保留在作为参数传递的全局数组中,该数组将保留每个块的总和,因此由blockIdx.x
索引。
我有一个测试用例的大小,它由MAX_SIZE
表示,所有测试首先从1到MAX_SIZE
进行处理,并且在全局数组中为每个块累加和。
我想调用一个块数等于我的测试数(10000)的内核,但在求和方面存在一些问题,所以我改为分步调用。
我找不到解决方案,但每当我调用一个块数超过max_threads
的内核时,它就会开始从初始块开始求和。如果您执行代码,您将看到它将打印每个块的值,在本例中为64,每个块有64个线程。如果我再执行至少一个块,它的总和将是128。这是头等舱的总和。就好像偏移量变量什么都不做,而写入再次发生在第一个块上。并且在MAX_SIZE
=3的情况下,第一块的第二类和被改变为192。这里的Cuda功能是2.0,一张GT 520卡。用CUDA 6.5编译。
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
}
}
#define MAX_CLASSES 2
#define max_threads 64
//#define MAX_FEATURES 65
__device__ __constant__ int d_MAX_SIZE;
__device__ __constant__ int offset;
__device__ void rules_points_reduction(float points[max_threads * MAX_CLASSES], int nrules[max_threads * MAX_CLASSES]){
float psum[MAX_CLASSES];
int nsum[MAX_CLASSES];
for (int i = 0; i < MAX_CLASSES; i++){
psum[i] = points[threadIdx.x + i * blockDim.x];
nsum[i] = nrules[threadIdx.x + i * blockDim.x];
}
__syncthreads();
if (blockDim.x >= 1024) {
if (threadIdx.x < 512) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 512 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 512 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 512) {
if (threadIdx.x < 256) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 256 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 256 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 256) {
if (threadIdx.x < 128) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 128 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 128 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 128) {
if (threadIdx.x < 64) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 64 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 64 + i * blockDim.x];
}
} __syncthreads();
}
if (threadIdx.x < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
//volatile int* smem = nrules;
//volatile float* smemf = points;
if (blockDim.x >= 64) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 32 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 32 + i * blockDim.x];
}
}
if (blockDim.x >= 32) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 16 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 16 + i * blockDim.x];
}
}
if (blockDim.x >= 16) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 8 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 8 + i * blockDim.x];
}
}
if (blockDim.x >= 8) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 4 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 4 + i * blockDim.x];
}
}
if (blockDim.x >= 4) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 2 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 2 + i * blockDim.x];
}
}
if (blockDim.x >= 2) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 1 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 1 + i * blockDim.x];
}
}
}
}
__device__ void d_get_THE_prediction(short k, float* finalpoints, int* gn_rules)
{
int max;
short true_label, n_items;
__shared__ float points[max_threads * MAX_CLASSES];
__shared__ int nrules[max_threads * MAX_CLASSES];
//__shared__ short items[MAX_FEATURES], ele[MAX_FEATURES];
__shared__ int max2;
for (int i = 0; i < MAX_CLASSES; i++)
{
points[threadIdx.x + i * blockDim.x] = 1;
nrules[threadIdx.x + i * blockDim.x] = 1;
}
if (threadIdx.x == 0) {
if (k == 1){
nrules[0] = 1;
nrules[blockDim.x] = 1;
}
//max2 = GetBinCoeff_l_d(n_items, k);
}
__syncthreads();
//max = max2;
//d_induce_rules(items, ele, n_items, k, max, nrules, points);
__syncthreads();
rules_points_reduction(points, nrules);
if (threadIdx.x == 0){
for (int i = 0; i < MAX_CLASSES; i++){
gn_rules[(blockIdx.x + offset) + i * blockDim.x] += nrules[i * blockDim.x];
finalpoints[(blockIdx.x + offset) + i * blockDim.x] += points[i * blockDim.x];
}
printf("block %d k%d %f %f %d %dn", (blockIdx.x + offset), k, finalpoints[(blockIdx.x + offset)],
finalpoints[(blockIdx.x + offset) + blockDim.x], gn_rules[(blockIdx.x + offset)], gn_rules[(blockIdx.x + offset) + blockDim.x]);
}
}
__global__ void lazy_supervised_classification_kernel(int k, float* finalpoints, int* n_rules){
d_get_THE_prediction( k, finalpoints, n_rules);
}
int main() {
//freopen("output.txt", "w", stdout);
int N_TESTS = 10000;
int MAX_SIZE = 3;
float *finalpoints = (float*)calloc(MAX_CLASSES * N_TESTS, sizeof(float));
float *d_finalpoints = 0;
int *d_nruls = 0;
int *nruls = (int*)calloc(MAX_CLASSES * N_TESTS, sizeof(int));
gpuErrchk(cudaMalloc(&d_finalpoints, MAX_CLASSES * N_TESTS * sizeof(float)));
gpuErrchk(cudaMemset(d_finalpoints, 0, MAX_CLASSES * N_TESTS * sizeof(float)));
gpuErrchk(cudaMalloc(&d_nruls, MAX_CLASSES * N_TESTS * sizeof(int)));
gpuErrchk(cudaMemset(d_nruls, 0, MAX_CLASSES * N_TESTS * sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol(d_MAX_SIZE, &MAX_SIZE, sizeof(int), 0, cudaMemcpyHostToDevice));
int step = max_threads, ofset = 0;
for (int k = 1; k < MAX_SIZE; k++){
//N_TESTS-step
for (ofset = 0; ofset < max_threads; ofset += step){
gpuErrchk(cudaMemcpyToSymbol(offset, &ofset, sizeof(int), 0, cudaMemcpyHostToDevice));
lazy_supervised_classification_kernel <<<step, max_threads >>>(k, d_finalpoints, d_nruls);
gpuErrchk(cudaDeviceSynchronize());
}
gpuErrchk(cudaMemcpyToSymbol(offset, &ofset, sizeof(int), 0, cudaMemcpyHostToDevice));//comment these lines
//N_TESTS - step
lazy_supervised_classification_kernel <<<3, max_threads >> >(k, d_finalpoints, d_nruls);//
gpuErrchk(cudaDeviceSynchronize());//
}
gpuErrchk(cudaFree(d_finalpoints));
gpuErrchk(cudaFree(d_nruls));
free(finalpoints);
free(nruls);
gpuErrchk(cudaDeviceReset());
return(0);
}
我不相信这个索引是你想要的:
gn_rules[(blockIdx.x + offset) + i * blockDim.x] += ...;
finalpoints[(blockIdx.x + offset) + i * blockDim.x] += ...;
对于MAX_CLASSES
=2,每个块需要存储2个finalpoints
值和2个gn_rules
值。因此,当offset
为非零时,它需要按MAX_CLASSES
值进行缩放,以便索引到该块的正确存储的开始。
因此,如果您将上述代码行更改为:
gn_rules[(blockIdx.x + (offset*MAX_CLASSES)) + i * blockDim.x] += nrules[i * blockDim.x];
finalpoints[(blockIdx.x + (offset*MAX_CLASSES)) + i * blockDim.x] += points[i * blockDim.x];
我相信你会得到你期望的结果。
相关文章:
- 当需要超过16GB的连续内存时,内存分配失败
- C++,您能否设计一种数据结构,将指针保存在连续内存中并且不会使它们失效?
- 使用连续内存实现多态性
- 我可以使用哪种数据结构来释放连续内存中的内存?
- std::array与C样式数组用于连续内存
- 如何创建非 POD 类型的连续内存池?
- C++ 中连续内存中的模板化不同大小的结构
- 连续内存分配
- std::vector 如何支持未知大小的自定义对象的连续内存
- 持续的时间访问是否在某个时候意味着连续内存
- std::vector 的连续内存分配
- C++连续内存操作
- 是否可以将指向已知大小的连续内存的指针转换为结构
- 确定将范围中的元素放置在连续内存中
- 未键入的连续内存容器
- 分配大块连续内存 - 做还是不做?
- 为什么数组中的指针不存储在连续内存中
- C++ 向量不分配连续内存
- std::bitset 是否保证连续内存以及结构中的恒定大小(以避免填充?
- 使用单个连续内存块为三维数组编制索引