大图像的 CUDA 内存分配问题

CUDA memory allocation issues with large images

本文关键字:分配 问题 内存 CUDA 图像      更新时间:2023-10-16

我有一个从图像制作直方图的功能(给出顺序版本(家庭作业))

CImg< unsigned char > histogramImage = CImg< unsigned char >(BAR_WIDTH * HISTOGRAM_SIZE, HISTOGRAM_SIZE, 1, 1);
unsigned int *histogram;
histogram = (unsigned int *)malloc(HISTOGRAM_SIZE * sizeof(unsigned int));
 memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(unsigned int));
cudaMemset(gpuImage, 0, grayImage.width() * grayImage.height() * sizeof(unsigned char));
cuda_err = cudaMemcpy(gpuImage, grayImage, grayImage.width() * grayImage.height() * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
   return -1;
}
unsigned int *gpuhistogram;
cuda_err = cudaMalloc((void **)(&gpuhistogram), HISTOGRAM_SIZE * sizeof(unsigned int));
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMalloc" << std::endl;
}
cudaMemset (gpuhistogram, 0, HISTOGRAM_SIZE * sizeof(unsigned int));
histogram1D(gpuImage, histogramImage, grayImage.width(), grayImage.height(), gpuhistogram, HISTOGRAM_SIZE, BAR_WIDTH, total, gridSize, blockSize);
cuda_err = cudaMemcpy(histogram, gpuhistogram, HISTOGRAM_SIZE * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cuda_err != cudaSuccess)
{
    std::cout << "ERROR: Failed cudaMemcpy" << std::endl;
}

那叫

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
histo <<< grid_size, block_size >>> (grayImage, histogram,width);
cudaDeviceSynchronize();
kernelTime.stop();
cout << fixed << setprecision(6);
cout << "histogram1D (kernel): tt" << kernelTime.getElapsed() << " seconds." << endl;
}

内核函数为

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned int index = static_cast< unsigned int >(inputImage[(y * width) + x]);
atomicAdd(&histogram[index],1);
}

我遇到的问题是,当我用 1024x1024 到 3543x2480 的图像调用它时,它可以工作。但是,我有一张 8192x8192 的图像,当函数返回时,*直方图中的值仍然为 0。我的试验似乎表明它与 *gpuhistogram 的内存分配有关(无符号 int 不应该足够大吗?因为这是有效的顺序版本。如何解决这个问题?有什么想法吗?

  1. 检查您的卡。来自维基百科:

    技术规格 计算能力(版本)1.0 1.1 1.2 1.3 2.x 3.0 3.5螺纹块网格的最大维数 2 3螺纹块网格的最大 x、y 或 z 维度 65535 231-1

  2. 我怀疑您的直方图的性能会比 CPU 代码差,请尝试使用共享内存之类的东西并假设 256 个值。诀窍是每个块使用线程的 bin#(每个块 256 个线程)。我不想破坏作者的收入,所以请参阅CUDA by Example 2010

只是想补充一下; 这就是我现在正在做的事情,按照米哈伊尔的回答;

void histogram1D(unsigned char *grayImage, unsigned char *histogramImage, const int width, const int height, unsigned int *histogram, const unsigned int HISTOGRAM_SIZE, const unsigned int BAR_WIDTH, NSTimer &timer, dim3 grid_size, dim3 block_size) {
NSTimer kernelTime = NSTimer("kernelTime", false, false);

kernelTime.start();
// Kernel
histo <<< 15*2, 256 >>> (grayImage, histogram,width,height);//15 is the number of blocks for my device
//cudaDeviceSynchronize(); //i get slow results with this. figured it's not nessesary since the kernel threads are synced.
kernelTime.stop();
cout << fixed << setprecision(6);
cout << "histogram1D (kernel): tt" << kernelTime.getElapsed()*1000 << " milliseconds." << endl;
}

内核代码;

__global__ void histo(unsigned char *inputImage, unsigned int *histogram, int width, int height)
{
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.y * gridDim.x;
while(i<width*height)
{
    atomicAdd(&temp[inputImage[i]],1);
    i += offset;
}
__syncthreads();
atomicAdd(&(histogram[threadIdx.x]),temp[threadIdx.x]);
}