GPU与CPU编程:处理时间的不一致

GPU vs CPU programming: inconsistencies in processing times

本文关键字:时间 不一致 处理 CPU 编程 GPU      更新时间:2023-10-16

我目前正在研究图像跟踪:多亏了一个摄像头,我正在跟踪与Android系统交互的手指触摸。图像处理是在使用OpenCL的GPU上完成的:我将相机输出转换为黑白帧,以便获得白色的斑点。此方法的处理时间为65ms。由于我的目标是使程序更流畅,因此我使用OpenCV方法在CPU上执行了相同的操作。这样处理时间为115毫秒。问题是,使用OpenCV方法,程序感觉反应性更强,速度更快,我不明白在这种情况下,处理时间如何延长:这对我来说似乎是矛盾的。对于测量,我像这样进行:

start= clock();
finish = clock();
double time =((double)finish -start)/CLOCKS_PER_SEC;
std::cout<<"process time : "<< time<<std::endl;
下面是我的代码:
static cv::Mat              original_Right,binary_Right;
static cv::Mat              original_Left, binary_Left;
int                 width, height;
clock_t                 start,finish;
double time = 0.0;
width = (int) this->camera_Right.getCapture().get(cv::CAP_PROP_FRAME_WIDTH);
height = (int) this->camera_Right.getCapture().get(cv::CAP_PROP_FRAME_HEIGHT);
original_Right.create(height, width, CV_8UC3);

//--------------------------- Camera 2 ---------------------------------
int width_2 = (int) this->camera_Left.getCapture().get(cv::CAP_PROP_FRAME_WIDTH);
int height_2 = (int) this->camera_Left.getCapture().get(cv::CAP_PROP_FRAME_HEIGHT);
original_Left.create(height_2, width_2, CV_8UC3);

binary_Right.create(height, width, CV_32F); // FOR GPU
binary_Left.create(height_2, width_2, CV_32F); // FOR GPU
//binary_Right.create(height, width, CV_8UC1); // FOR CPU
//binary_Left.create(height_2, width_2, CV_8UC1); // FOR CPU
Core::running_ = true;

//------------------------------------ SET UP THE GPU -----------------------------------------
cl_context              context;
cl_context_properties   properties [3];
cl_kernel               kernel;
cl_command_queue        command_queue;
cl_program              program;
cl_int                  err;
cl_uint                 num_of_platforms=0;
cl_platform_id          platform_id;
cl_device_id            device_id;
cl_uint                 num_of_devices=0;
cl_mem                  input, output;
size_t                  global;
int                     data_size =height*width*3;

//load opencl source
FILE *fp;
char fileName[] = "./helloTedKrissV2.cl";
char *source_str;
 //Load the source code containing the kernel
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
global = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
fclose(fp);

//retreives a list of platforms available
if(clGetPlatformIDs(1,&platform_id, &num_of_platforms)!=CL_SUCCESS){
    std::cout<<"unable to get a platform_id"<<std::endl;
};
// to get a supported GPU device
if(clGetDeviceIDs(platform_id,CL_DEVICE_TYPE_GPU,1,&device_id, &num_of_devices)!= CL_SUCCESS){
    std::cout<<"unable to get a device_id"<<std::endl;      
};
//context properties list - must be terminated with 0
properties[0]=CL_CONTEXT_PLATFORM;
properties[1]=(cl_context_properties) platform_id;
properties[2]=0;
// create a context with the gpu device
context = clCreateContext(properties,1,&device_id,NULL,NULL,&err);
//create command queue using the context and device
command_queue = clCreateCommandQueue(context,device_id,0,&err);
//create a program from the kernel source code
program= clCreateProgramWithSource(context,1,(const char **) &source_str, NULL,&err);
// compile the program
if(clBuildProgram(program,0,NULL,NULL,NULL,NULL)!=CL_SUCCESS){
    size_t length;
    std::cout<<"Error building program"<<std::endl;
    char buffer[4096];
    clGetProgramBuildInfo(program,device_id,CL_PROGRAM_BUILD_LOG, sizeof(buffer),buffer,&length);
    std::cout<< buffer <<std::endl;
}
//specify which kernel from the program to execute
kernel = clCreateKernel(program,"imageProcessing",&err);


while (this->isRunning() == true) { 
    start= clock(); //--------------------- START----------------------
    //----------------------FRAME---------------------
    this->camera_Right.readFrame(original_Right);
    if (original_Right.empty() == true ) {
        std::cerr << "[Core/Error] Original  frame is empty." << std::endl;
        break;
    }
    this->camera_Left.readFrame(original_Left);
    if (original_Left.empty() == true ) {
        std::cerr << "[Core/Error] Original 2  frame is empty." << std::endl;
        break;
    }
    //----------------------FRAME---------------------

  //------------------------------------------------IMP GPU ------------------------------------------------------
    input = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR  , sizeof(unsigned char)*data_size,NULL,NULL);
    output =clCreateBuffer(context,CL_MEM_READ_WRITE   | CL_MEM_ALLOC_HOST_PTR, sizeof(float)*data_size/3,NULL,NULL);
   if(clEnqueueWriteBuffer(command_queue,input,CL_TRUE,0,sizeof(unsigned char)*data_size, original_Right.data ,0,NULL,NULL )!= CL_SUCCESS){};
    //set the argument list for the kernel command
    clSetKernelArg(kernel,0,sizeof(cl_mem), &input);
    clSetKernelArg(kernel,1,sizeof(cl_mem), &output);
    global = data_size  ;
    //enqueue the kernel command for execution
    clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global, NULL,0,NULL,NULL);
    clFinish(command_queue);
    //copy the results from out of the  output buffer
    if(clEnqueueReadBuffer(command_queue,output,CL_TRUE ,0,sizeof(float)*data_size/3,binary_Right.data,0,NULL,NULL )!= CL_SUCCESS){};
    clReleaseMemObject(input);
    clReleaseMemObject(output);
    //------------------------------------------------IMP GPU ------------------------------------------------------
    input = clCreateBuffer(context, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR  , sizeof(unsigned char)*data_size,NULL,NULL);
    output =clCreateBuffer(context,CL_MEM_READ_WRITE   | CL_MEM_ALLOC_HOST_PTR, sizeof(float)*data_size/3,NULL,NULL);
   if(clEnqueueWriteBuffer(command_queue,input,CL_TRUE,0,sizeof(unsigned char)*data_size, original_Left.data ,0,NULL,NULL )!= CL_SUCCESS){};
    //set the argument list for the kernel command
    clSetKernelArg(kernel,0,sizeof(cl_mem), &input);
    clSetKernelArg(kernel,1,sizeof(cl_mem), &output);
    global = data_size  ;
    //enqueue the kernel command for execution
    clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global, NULL,0,NULL,NULL);
    clFinish(command_queue);
    //copy the results from out of the  output buffer
    if(clEnqueueReadBuffer(command_queue,output,CL_TRUE ,0,sizeof(float)*data_size/3,binary_Left.data,0,NULL,NULL )!= CL_SUCCESS){};
   clReleaseMemObject(input);
   clReleaseMemObject(output);
    //------------------------------------------------IMP GPU ------------------------------------------------------
  // CPU METHOD
  // adok::processing::doImageProcessing(original_Right, binary_Right);
  // adok::processing::doImageProcessing(original_Left, binary_Left);
    //-------------------------------------------------------------- TRACKING ------------------------------------------------------
adok::tracking::doFingerContoursTracking(binary_Right,binary_Left, this->fingerContours, this->perspective_Right,this->perspective_Left, this->distortion_Right,this->distortion_Left, this);
    //------------------------------------------- TRACKING -----------------------------------------
 //------------------------------SEND COORDINATES TO ANDROID BOARD--------------------
if (getSideRight() && !getSideLeft() ) {
        std::cout<<"RIGHT : "<<std::endl;
        this->uart_.sendAll(this->fingerContours, this->perspective_Right.getPerspectiveMatrix(), RIGHT);
    }else if (!getSideRight() && getSideLeft() ){
        std::cout<<"LEFT : "<<std::endl;
        this->uart_.sendAll(this->fingerContours, this->perspective_Left.getPerspectiveMatrix(), LEFT);
    }else if (getSideRight() && getSideLeft() ){
        std::cout<<"RIGHT & LEFT : "<<std::endl;
        this->uart_.sendAll(this->fingerContours, this->perspective_Right.getPerspectiveMatrix(), this->perspective_Left.getPerspectiveMatrix());
    }
this->setSideRight(0);
this->setSideLeft(0);
finish = clock();
time =(double)(finish - start)/CLOCKS_PER_SEC;
std::cout << "Time: " << time << std::endl; // ------------END-----------
}
clReleaseCommandQueue(command_queue);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseContext(context);
this->stop();

}

还有一些奇怪的事情,当我在CPU上抓取一帧的时间是5ms,而在GPU上是15ms,我不知道为什么它会增加。

我正在开发android xu4。

GPU计算中有时可能比CPU计算花费更多的时间。因为,GPU计算时,主进程将数据发送到GPU内存,GPU经过数学计算后将数据发送回CPU。因此,数据传输和接收回CPU需要时间。如果计算的缓冲区大小越大,传输时间越长,则GPU计算所需的时间越长。CUDNN库以及GPU处理器使其速度提高了许多倍。所以,如果你的程序没有使用CUDNN,它可能会变慢。

您可以尝试使用event来查看写入数据所需的时间和处理数据所需的时间。
一般来说,使用clFinish不是一个好主意。当您从Enqueue命令中获取事件并将其传递给Read Data时,读取的数据将在进程完成时立即发生。另一个问题是,你不必每次都重新创建缓冲区对象,只要你有相同的数据大小,你可以创建它一次,并保持重用它。

谢谢你的回答!!我发现了为什么感觉停顿:这是因为抓取帧的时间从5毫秒变成了15毫秒。这可能是因为我创建了一个缓冲区,减少了带宽。在GPU上编程比在CPU上快,但它影响图像/秒。这样做的原因是因为我做了两次(对于每个相机):

    if(clEnqueueWriteBuffer(command_queue,input,CL_TRUE,0,sizeof(unsigned char)*data_size, original_Right.data ,0,NULL,NULL )!= CL_SUCCESS){};
    //set the argument list for the kernel command
    clSetKernelArg(kernel,0,sizeof(cl_mem), &input);
    clSetKernelArg(kernel,1,sizeof(cl_mem), &output);
    global = data_size  ;
    //enqueue the kernel command for execution
    clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global, NULL,0,NULL,NULL);
    clFinish(command_queue);
    //copy the results from out of the  output buffer
    if(clEnqueueReadBuffer(command_queue,output,CL_TRUE ,0,sizeof(unsigned char)*data_size,original_Right.data,0,NULL,NULL )!= CL_SUCCESS){};