CUDA计算后数组中存在重复值
Duplicate values in array after CUDA calculations
我复制了一个异步CUDA/C++示例,并对其进行了修改以评估素性。我的问题是,对于每个打印的素数,数组中的下一个值都是该值的副本。这是有意的行为,还是我编程示例的方式有问题?
代码:
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
//set matrix to possible prime values
//evaluate if input is prime, sets variable to 0 if not prime
__global__ void testPrimality(int * g_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = 3 + idx / 2;
if (g_data[idx] <= 3) {
if (g_data[idx] <= 1) {
g_data[idx] = 0;
}
}
else if (g_data[idx] % 2 == 0 || g_data[idx] % 3 == 0) {
g_data[idx] = 0;
}
else {
for (unsigned short i = 5; i * i <= g_data[idx]; i += 6) {
if (g_data[idx] % i == 0 || g_data[idx] % (i + 2) == 0) {
g_data[idx] = 0;
}
}
}
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %dn", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]n", deviceProps.name);
const int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 1;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
//increment_kernel<<<blocks, threads, 0, 0>>>(d_a);
testPrimality<<<blocks, threads, 0, 0 >>>(d_a);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2fn", gpu_time);
printf("time spent by CPU in CUDA calls: %.2fn", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finishn", counter);
//print values for all allocated memory space
for (int i = 0; i < n; i++) {
if (a[i] != 0) {
std::cout << a[i]<< " : " << i << std::endl;
}
}
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
bool bFinalResults = true;
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
重复是由操作的实际"输入"值引起的。我不清楚你想要什么数字序列,但这行代码:
g_data[idx] = 3 + idx / 2;
执行整数除法(idx
属于int
类型,g_data[idx]
也是)。
整数除以2的结果意味着"输入"中的每个值都将重复,因此输出中的每个数值也将重复。如果您想查看输入值,请修改上一条cout
语句,如下所示:
std::cout << a[i]<< " : " << i << " " << 3+i/2 << std::endl;
以"模拟"您在内核中进行的输入数据生成。如果你这样做,你会在最后一列数字中看到重复。
编辑:根据下面的评论,idx
变量如何生成数字似乎存在一些不确定性。这是一种生成全局唯一线程ID:的规范方法
int idx = blockIdx.x * blockDim.x + threadIdx.x;
在典型的使用中,每个线程都会得到一个唯一的正索引,该索引比"前一个"线程高一个:
0,1,2,3,...
似乎想要的情况是创建一个看起来像这样的"输入"数据集:
3,5,7,9,...
因此,正确的算法取代了这一点:
g_data[idx] = 3 + idx / 2;
这是吗
g_data[idx] = 3 + idx * 2;
以下是一个完整的例子,其中包含了该更改,以及我建议的先前cout
更改:
$ cat t1119.cu
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
//set matrix to possible prime values
//evaluate if input is prime, sets variable to 0 if not prime
__global__ void testPrimality(int * g_data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = 3 + idx * 2;
if (g_data[idx] <= 3) {
if (g_data[idx] <= 1) {
g_data[idx] = 0;
}
}
else if (g_data[idx] % 2 == 0 || g_data[idx] % 3 == 0) {
g_data[idx] = 0;
}
else {
for (unsigned short i = 5; i * i <= g_data[idx]; i += 6) {
if (g_data[idx] % i == 0 || g_data[idx] % (i + 2) == 0) {
g_data[idx] = 0;
}
}
}
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %dn", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]n", deviceProps.name);
//const int n = 16 * 1024 * 1024;
const int n = 1024;
int nbytes = n * sizeof(int);
//int value = 1;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
//increment_kernel<<<blocks, threads, 0, 0>>>(d_a);
testPrimality<<<blocks, threads, 0, 0 >>>(d_a);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2fn", gpu_time);
printf("time spent by CPU in CUDA calls: %.2fn", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finishn", counter);
//print values for all allocated memory space
for (int i = 0; i < n; i++) {
if (a[i] != 0) {
std::cout << a[i]<< " : " << i << " " << 3 + i * 2 << std::endl;
}
}
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
bool bFinalResults = true;
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
$ nvcc -I/usr/local/cuda/samples/common/inc t1119.cu -o t1119
$ cuda-memcheck ./t1119
(excerpted output:)
337 : 167 337
347 : 172 347
349 : 173 349
353 : 175 353
359 : 178 359
367 : 182 367
373 : 185 373
379 : 188 379
383 : 190 383
389 : 193 389
397 : 197 397
401 : 199 401
409 : 203 409
419 : 208 419
421 : 209 421
431 : 214 431
433 : 215 433
439 : 218 439
443 : 220 443
449 : 223 449
457 : 227 457
461 : 229 461
463 : 230 463
467 : 232 467
479 : 238 479
487 : 242 487
491 : 244 491
499 : 248 499
503 : 250 503
509 : 253 509
521 : 259 521
523 : 260 523
541 : 269 541
547 : 272 547
557 : 277 557
563 : 280 563
569 : 283 569
571 : 284 571
577 : 287 577
587 : 292 587
593 : 295 593
599 : 298 599
601 : 299 601
607 : 302 607
613 : 305 613
617 : 307 617
619 : 308 619
如上所述,输出序列中没有重复项。
相关文章:
- 为什么"do while"循环不断退出,即使条件计算结果为 false?
- C++模板来检查友元函数的存在
- 递归函数计算序列中的平方和(并输出过程)
- (C++)分析树以计算返回错误值的简单算术表达式
- 我的字符计数代码计算错误.为什么
- 在计算中使用二的幂有多有利可图
- 既然存在危险,为什么项目要使用-I include开关
- 我们可以访问一个不存在的联盟的成员吗
- 如何计算文件中的"columns"数?
- C++:对不存在的命名空间使用命名空间指令
- 计算排序向量的向量中唯一值的计数
- 计算数组中存在其总和的对数的算法
- 此外,在计算大斐波那契数时存在精度误差
- 动态规划:计算集合中存在多少个升序子集
- Kahan求和算法在GCC编译时存在较大的计算错误
- CUDA计算后数组中存在重复值
- 在标头不存在的计算机上运行已编译的 C 程序
- 我的iOS金属计算内核是否存在编译器错误,或者我遗漏了什么
- 是否存在用于C或C++中的矩阵计算的开源模板库
- 不明白tm_struct (C++) 计算 - 是否存在某种偏移量?