Multithreading, OpenMP,C
Multithreading, OpenMP,C
我在论坛上看到了下面的一段代码,但当我开始编译它时,我遇到了一些错误。。我想把从#pragma scop
到#pragma endscop
的区域平行。
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_fdtd_2d(int tmax,
int nx,
int ny,
DATA_TYPE POLYBENCH_2D(ex,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_2D(ey,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_2D(hz,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(_fict_,TMAX,tmax))
{
int t, i, j;
#pragma scop
#pragma omp parallel private (t,i,j)
{
#pragma omp master
{
for (t = 0; t < _PB_TMAX; t++)
{
#pragma omp for
for (j = 0; j < _PB_NY; j++)
ey[0][j] = _fict_[t];
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 1; i < _PB_NX; i++)
for (j = 0; j < _PB_NY; j++)
ey[i][j] = ey[i][j] - 0.5*(hz[i][j]-hz[i-1][j]);
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX; i++)
for (j = 1; j < _PB_NY; j++)
ex[i][j] = ex[i][j] - 0.5*(hz[i][j]-hz[i][j-1]);
#pragma omp barrier
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX - 1; i++)
for (j = 0; j < _PB_NY - 1; j++)
hz[i][j] = hz[i][j] - 0.7* (ex[i][j+1] - ex[i][j] + ey[i+1][j] - ey[i][j]);
#pragma omp barrier
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int tmax = TMAX;
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(ex,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_2D_ARRAY_DECL(ey,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_2D_ARRAY_DECL(hz,DATA_TYPE,NX,NY,nx,ny);
POLYBENCH_1D_ARRAY_DECL(_fict_,DATA_TYPE,TMAX,tmax);
/* Initialize array(s). */
init_array (tmax, nx, ny,
POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz),
POLYBENCH_ARRAY(_fict_));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_fdtd_2d (tmax, nx, ny,
POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz),
POLYBENCH_ARRAY(_fict_));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, ny, POLYBENCH_ARRAY(ex),
POLYBENCH_ARRAY(ey),
POLYBENCH_ARRAY(hz)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(ex);
POLYBENCH_FREE_ARRAY(ey);
POLYBENCH_FREE_ARRAY(hz);
POLYBENCH_FREE_ARRAY(_fict_);
return 0;
}
错误如下:
stencils/fdtd-2d/fdtd-2dp.c:80:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for
^
stencils/fdtd-2d/fdtd-2dp.c:83:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:84:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:88:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:89:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:93:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
stencils/fdtd-2d/fdtd-2dp.c:94:9: error: work-sharing region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp for collapse(2) schedule(static)
^
stencils/fdtd-2d/fdtd-2dp.c:98:9: error: barrier region may not be closely nested inside of work-sharing, critical, ordered, master or explicit task region
#pragma omp barrier
^
感谢您对我如何编译这篇文章的帮助。。
老实说,这是非常糟糕的OpenMP代码。它不考虑整个算法中的数据使用情况。你可能想要的是:
int t, i, j;
#pragma omp parallel private (t,i,j)
{
for (t = 0; t < _PB_TMAX; t++)
{
#pragma omp for nowait
for (j = 0; j < _PB_NY; j++)
ey[0][j] = _fict_[t];
#pragma omp for collapse(2) nowait schedule(static)
for (i = 1; i < _PB_NX; i++)
for (j = 0; j < _PB_NY; j++)
ey[i][j] = ey[i][j] - 0.5*(hz[i][j]-hz[i-1][j]);
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX; i++)
for (j = 1; j < _PB_NY; j++)
ex[i][j] = ex[i][j] - 0.5*(hz[i][j]-hz[i][j-1]);
// #pragma omp barrier <- Implicit if nowait not specified
#pragma omp for collapse(2) schedule(static)
for (i = 0; i < _PB_NX - 1; i++)
for (j = 0; j < _PB_NY - 1; j++)
hz[i][j] = hz[i][j] - 0.7*(ex[i][j+1] - ex[i][j] + ey[i+1][j] - ey[i][j]);
// #pragma omp barrier <- Implicit if nowait not specified
}
}
应移除屏障,因为在没有指定nowait
的情况下for循环结束后,它们是隐式的。
此外,我认为前两个障碍应该完全消除,因为前三个循环之间没有线程依赖性——如果一个线程完成了它的部分循环,并立即开始了下一个循环的一部分,那么就不可能出现竞争条件。您可以添加nowait
子句来覆盖omp for
指令末尾的隐式屏障。
最后,如果_PB_NX
和_PB_NY
很大,那么您不太可能通过折叠嵌套循环来获得任何好处。我想去掉collapse(2)
可以稍微提高整个功能的性能。
希望这能有所帮助。
从代码中删除#pragma omp master
语句。这将解决编译问题。您可能不想"只"在主线程中运行该块,因为那样使用Open MP.
- OpenMP阵列性能较差
- OpenMP卸载说'fatal error: could not find accel/nvptx-none/mkoffload'
- 使用 GCC 卸载的 OpenMP 卸载失败,并出现"Ptx assembly aborted due to errors"
- OpenMP:并行更新数组总是需要减少数组吗
- 如何使用OpenMP并行这两个循环
- 从python调用openMP共享库时,未定义opnMP函数
- 如何使用OpenMP并行化此矩阵时间矢量运算
- 如何使用OpenMP使这个循环并行
- 如何通过替换顺序代码的while循环来添加OpenMP for循环
- 查找最近配对时的OpenMP竞赛条件
- 使用输入打破 OpenMP 中的循环
- 为什么 openmp 的并行不适用于矢量化色彩空间转换?
- 在 openmp 中,omp_get_thread_num是否绑定到物理线程?
- 在C++中使用并行化的预期速度是多少(不是 OpenMp,而是 <thread>)
- OpenMP 加上unordered_map<字符串、双字符串的缩减>
- OpenMP 与有序和关键指令并行
- 我使用 OpenMP 的线程越多,执行时间就越长,这是怎么回事?
- OpenMP for 循环并行性问题
- 两个连续的 OpenMP 并行区域会相互减慢速度
- 读取文件时无法使用 OpenMP 获得加速