thrust::raw_pointer_cast和多个GPU,奇怪的行为
thrust::raw_pointer_cast and multiple GPU, weird behaviour
我在我的代码中使用了大量的推力,因为它是一个很好的包装器,提供了非常有用的实用程序,我更相信,因为异步行为的支持已经添加。
我的代码使用cuda推力工作得很好,直到我最近试图在我的应用程序中添加多gpu支持。
CUDA运行时API错误77:遇到非法内存访问
在我的代码的一部分,从来没有显示任何边界问题。
我在我的代码中添加了冗长的内容,似乎我的thrust::device_vector指针地址在执行过程中发生了变化,没有明显的原因,在手写内核中产生了错误77。
我可能误解了UVA的概念及其最终的"副作用",但是,我仍然对理解导致指针更新的过程感兴趣。
我无法准确地重现我的问题,在这个问题中,我不使用临时主机变量来存储cuda内存指针,而只在内核包装器调用需要时动态地使用thrust::raw_pointer_cast。
但是我写了一个小程序,显示了我可能遇到的问题,注意这不是很健壮,你需要在你的系统上至少有2个gpu来运行它:
/********************************************************************************************
** Compile using nvcc ./test.cu -gencode arch=compute_35,code=sm_35 -std=c++11 -o test.exe **
********************************************************************************************/
//Standard Library
#include <iostream>
#include <vector>
//Cuda
#include "cuda_runtime.h"
//Thrust
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( err != cudaSuccess )
{
printf("%s(%i) : CUDA Runtime API error %i : %s n",file ,line, (int)err, cudaGetErrorString(err) );
}
};
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
__global__ void write_memory( float* buf, float value )
{
printf("GPU TALK: Raw pointer is %p n",buf);
buf[0] = value;
}
int main( int argc, char* argv[] )
{
//declare a vector of vector
std::vector<thrust::device_vector<float> > v;
float test;
float* tmp;
//Initialize first vector on GPU 0
cudaSetDevice( 0 );
v.emplace_back( 65536, 1.0f );
tmp = thrust::raw_pointer_cast( v.at(0).data() );
std::cout << " Host TALK: Raw pointer of vector 0 at step 0 " << (void*)tmp << std::endl;
//Try to use it raw pointer
write_memory<<<1,1,0,0>>>( tmp, 2.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After first kernel launch, value is " << test << std::endl;
//Initialize second vector on GPU 1, but we do not use it
cudaSetDevice( 1 );
v.emplace_back( 65536, 1.0f );
std::cout << " Host TALK: Raw pointer of vector 0 at step 1 is now " << (void*)thrust::raw_pointer_cast( v.at(0).data() ) << " != " << (void*)tmp << std::endl;
std::cout << " Host TALK: Raw pointer of vector 1 at step 1 is " << (void*)thrust::raw_pointer_cast( v.at(1).data() ) << std::endl;
//Try to use the first vector : No segmentation fault ?
test = v.at(0)[0];
std::cout << " Host TALK: Before second kernel launch, value is " << test << std::endl;
write_memory<<<1,1,0,0>>>( thrust::raw_pointer_cast( v.at(0).data() ), 3.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After second kernel launch, value is " << test << std::endl;
//Raw pointer stored elsewhere: generates a segmentation fault
write_memory<<<1,1,0,0>>>( tmp, 4.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After third kernel launch, value is " << test << std::endl;
return 0;
}
下面是它在我的机器上产生的输出的一个例子:
主机TALK: 0步矢量0的原始指针0xb043c0000
GPU TALK: Raw指针是0xb043c0000
Host TALK:第一次内核启动后,值为2
主机对话:第一步矢量0的原始指针现在是0xb080000000 != 0xb043c0000
主机对话:第一步矢量1的原始指针为0xb07fc0000
Host TALK:在第二次内核启动之前,值是2
GPU TALK: Raw指针是0xb08000000
Host TALK:第二次内核启动后,值为3
GPU TALK: Raw指针是0xb043c0000
./test.cu(68): CUDA运行时API错误77:遇到非法内存访问终止调用后抛出'thrust::system::system_error'实例what():遇到非法内存访问
提前感谢您的帮助,我也可以在thrust的github上问这个问题。
编辑:多亏了ms和Hiura,下面的代码按预期工作:
/********************************************************************************************
** Compile using nvcc ./test.cu -gencode arch=compute_35,code=sm_35 -std=c++11 -o test.exe **
********************************************************************************************/
//Standard Library
#include <iostream>
#include <vector>
//Cuda
#include "cuda_runtime.h"
//Thrust
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( err != cudaSuccess )
{
printf("%s(%i) : CUDA Runtime API error %i : %s n",file ,line, (int)err, cudaGetErrorString(err) );
}
};
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
__global__ void write_memory( float* buf, float value )
{
printf("GPU TALK: Raw pointer is %p n",buf);
buf[0] = value;
}
int main( int argc, char* argv[] )
{
//declare a vector of vector
std::vector<thrust::device_vector<float> > v;
v.reserve(2);
float test;
float* tmp;
//Initialize first vector on GPU 0
cudaSetDevice( 0 );
v.emplace_back( 65536, 1.0f );
tmp = thrust::raw_pointer_cast( v.at(0).data() );
std::cout << " Host TALK: Raw pointer of vector 0 at step 0 " << (void*)tmp << std::endl;
//Try to use it raw pointer
write_memory<<<1,1,0,0>>>( tmp, 2.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After first kernel launch, value is " << test << std::endl;
//Initialize second vector on GPU 1, but we do not use it
cudaSetDevice( 1 );
v.emplace_back( 65536, 1.0f );
std::cout << " Host TALK: Raw pointer of vector 0 at step 1 is now " << (void*)thrust::raw_pointer_cast( v.at(0).data() ) << " != " << (void*)tmp << std::endl;
std::cout << " Host TALK: Raw pointer of vector 1 at step 1 is " << (void*)thrust::raw_pointer_cast( v.at(1).data() ) << std::endl;
//Try to use the first vector : No segmentation fault ?
cudaSetDevice( 0 );
test = v.at(0)[0];
std::cout << " Host TALK: Before second kernel launch, value is " << test << std::endl;
write_memory<<<1,1,0,0>>>( thrust::raw_pointer_cast( v.at(0).data() ), 3.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After second kernel launch, value is " << test << std::endl;
//Raw pointer stored elsewhere: generates a segmentation fault
write_memory<<<1,1,0,0>>>( tmp, 4.0f );
checkCudaErrors( cudaStreamSynchronize( NULL ) );
test = v.at(0)[0];
std::cout << " Host TALK: After third kernel launch, value is " << test << std::endl;
return 0;
}
这是我的代码中最后一个地方,我没有使用指针指向对象的向量,而不是简单的对象的向量,但我看到我应该避免这些恼人的移动/复制问题…
现在输出为:
主机TALK: 0步矢量0的原始指针0xb043c0000
GPU TALK: Raw指针是0xb043c0000
Host TALK:第一次内核启动后,值为2
主机对话:第一步矢量0的原始指针现在是0xb043c0000 != xb043c0000
主机对话:第一步矢量1的原始指针为0xb07fc0000
Host TALK:在第二次内核启动之前,值是2
GPU TALK: Raw指针是0xb043c0000
Host TALK:第二次内核启动后,值为3
GPU TALK: Raw指针是0xb043c0000
Host TALK:第三次内核启动后,值为4
所以我很快安装了CUDA来测试我的假设:添加reserve
语句可以保留地址。
//declare a vector of vector
std::vector<thrust::device_vector<float> > v;
v.reserve(2); // <<-- HERE
float test;
float* tmp;
和输出,首先没有补丁。
$ nvcc thrust.cu -std=c++11 -o test
$ ./test
Host TALK: Raw pointer of vector 0 at step 0 0x700ca0000
GPU TALK: Raw pointer is 0x700ca0000
Host TALK: After first kernel launch, value is 2
Host TALK: Raw pointer of vector 0 at step 1 is now 0x700d20000 != 0x700ca0000
Host TALK: Raw pointer of vector 1 at step 1 is 0x700ce0000
Host TALK: Before second kernel launch, value is 2
GPU TALK: Raw pointer is 0x700d20000
Host TALK: After second kernel launch, value is 3
GPU TALK: Raw pointer is 0x700ca0000
Host TALK: After third kernel launch, value is 3
与补丁:
$ nvcc thrust.cu -std=c++11 -o test
$ ./test
Host TALK: Raw pointer of vector 0 at step 0 0x700ca0000
GPU TALK: Raw pointer is 0x700ca0000
Host TALK: After first kernel launch, value is 2
Host TALK: Raw pointer of vector 0 at step 1 is now 0x700ca0000 != 0x700ca0000
Host TALK: Raw pointer of vector 1 at step 1 is 0x700ce0000
Host TALK: Before second kernel launch, value is 2
GPU TALK: Raw pointer is 0x700ca0000
Host TALK: After second kernel launch, value is 3
GPU TALK: Raw pointer is 0x700ca0000
Host TALK: After third kernel launch, value is 4
- C++ - "!pointer"和"pointer == nullptr"的区别?
- 在使用GPU支持编译Tensorflow时,会遇到CUDA_TOOLKIT_PATH未绑定变量
- 有没有办法简单地从 GPU 调用多个 cpp 输出文件?
- 在 DirectX 11 中从 GPU 读回顶点缓冲区(并获取顶点)
- 跨平台 GPU 计算
- C++:从GPU内存(cudaMemcpy2D)获取BGR图像(cv::Mat)
- 请求最简单的 OpenMP 目标 GPU 示例
- "No-Const Pointer to Const "调用功能
- "owned pointer"和 std::shared_ptr 的"stored pointer"有什么区别?
- DirectX 11 如何处理来自 GPU 上的 sharedHandle 的图像
- 删除分配的 (?) 指针时"Pointer being freed was not allocated"
- 编译 GPU 的张量流示例自定义操作
- 多 GPU 批处理 1D FFT:似乎只有一个 GPU 可以工作
- 如何在GPU支持下编译tflite?
- OpenCL 在 NVIDIA 和 Intel GPU 上启动内核时CL_INVALID_COMMAND_QUEUE
- OpenGL glGetUniformBlockIndex 在 nvidea GPU 上返回INVALID_INDEX
- SDL GPU 为什么将两个图像分成两个单独的循环更快?
- 使 C++ Pi 近似在 GPU Nvidia 970M CUDA 上的 Paralell 中运行
- 具有 GPU 时间表的卤化物产生黑色图像
- 如何在openACC中复制gpu vector-of-vector-pointer-memory