Opencv - filter2D() 方法实际上是如何工作的?

Opencv - how does the filter2D() method actually work?

本文关键字:何工作 工作 实际上 filter2D 方法 Opencv      更新时间:2023-10-16

我确实查找了 Filter2D 的源代码,但找不到它。 Visual C++也不能。 这里有filter2D算法方面的专家吗?我知道它应该如何工作,但不知道它的实际工作方式。我制作了自己的 filter2d() 函数来测试事物,结果与 opencvs filter2D() 有很大不同。这是我的代码:

Mat myfilter2d(Mat input, Mat filter){
Mat dst = input.clone();
cout << " filter data successfully found.  Rows:" << filter.rows << " cols:" << filter.cols << " channels:" << filter.channels() << "n";
cout << " input data successfully found.  Rows:" << input.rows << " cols:" << input.cols << " channels:" << input.channels() << "n";
for (int i = 0-(filter.rows/2);i<input.rows-(filter.rows/2);i++){
for (int j = 0-(filter.cols/2);j<input.cols-(filter.cols/2);j++){  //adding k and l to i and j will make up the difference and allow us to process the whole image
float filtertotal = 0;
for (int k = 0; k < filter.rows;k++){
for (int l = 0; l < filter.rows;l++){
if(i+k >= 0 && i+k < input.rows && j+l >= 0 && j+l < input.cols){  //don't try to process pixels off the endge of the map
float a = input.at<uchar>(i+k,j+l);
float b = filter.at<float>(k,l);
float product = a * b;
filtertotal += product;
}
}
}
//filter all proccessed for this pixel, write it to dst
st.at<uchar>(i+(filter.rows/2),j+(filter.cols/2)) = filtertotal;
}
}
return dst;
}

有人发现我的实现有什么问题吗? (除了速度慢)

这是我的执行:

cvtColor(src,src_grey,CV_BGR2GRAY);
Mat dst = myfilter2d(src_grey,filter);
imshow("myfilter2d",dst);
filter2D(src_grey,dst2,-1,filter);
imshow("filter2d",dst2);

这是我的内核:

float megapixelarray[basesize][basesize] = {
{1,1,-1,1,1},
{1,1,-1,1,1},
{1,1,1,1,1},
{1,1,-1,1,1},
{1,1,-1,1,1}
};

以下是(实质性不同的)结果:

想法,有人吗?

编辑:感谢布莱恩斯的回答,我添加了以下代码:

//normalize the kernel so its sum = 1
Scalar mysum = sum(dst);
dst = dst / mysum[0];   //make sure its not 0
dst = dst * -1;  //show negetive

和 filter2d 效果更好。 某些过滤器给出了完全匹配,而其他过滤器,如 Sobel,则惨遭失败。

我正在接近实际的算法,但还没有。 还有其他人有什么想法吗?

我认为问题可能是规模问题之一:如果您的输入图像是 8 位图像,大多数情况下卷积将产生一个溢出最大值 255 的值。

在您的实现中,您似乎正在获取环绕值,但大多数 OpenCV 函数通过上限为最大值(或最小值)来处理溢出。 这就解释了为什么OpenCV函数的大部分输出都是白色的,也解释了为什么你的输出中也有同心形状。

要考虑到这一点,请将每个值除以过滤器的整个总和来规范化megapixelarray过滤器(即确保过滤器值的总和为 1):

例如,代替此筛选器(总和 = 10):

1 1 1
1 2 1
1 1 1

试试这个过滤器(总和 = 1):

0.1 0.1 0.1
0.1 0.2 0.1
0.1 0.1 0.1

这是我手动创建 filter2D 的解决方案:

#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
using namespace std;
int main(int argc, const char * argv[]) {    
Mat img;
Mat img_conv;
Mat my_kernel;
Mat my_conv;
// Controlling if the image is loaded correctly
img = imread("my_image.jpg",CV_LOAD_IMAGE_COLOR);
if(! img.data )
{
cout <<  "Could not open or find the image" << std::endl ;
return -1;
}
imshow("original image", img);
img.convertTo(img, CV_64FC3);
int kernel_size;   // permitted sizes: 3, 5, 7, 9 etc
cout << "Select the size of kernel (it should be an odd number from 3 onwards): n" << endl;
cin >> kernel_size;
// Defining the kernel here
int selection;
cout << "Select the type of kernel:n" << "1. Identity Operator n2. Mean Filter n3. Spatial shift n4. Sharpeningn-> ";
cin >> selection;
switch (selection){
case 1:
my_kernel = (Mat_<double>(kernel_size,kernel_size) << 0, 0, 0, 0, 1, 0, 0, 0, 0);
break;
case 2:
my_kernel = (Mat_<double>(kernel_size,kernel_size) << 1, 1, 1, 1, 1, 1, 1, 1, 1) / ( kernel_size * kernel_size);
break;
case 3:
my_kernel = (Mat_<double>(kernel_size,kernel_size) << 0, 0, 0, 0, 0, 1, 0, 0, 0);
break;
case 4:
my_kernel = (Mat_<double>(kernel_size,kernel_size) << -1, -1, -1, -1, 17, -1, -1, -1, -1) / ( kernel_size * kernel_size);
break;
default:
cerr << "Invalid selection";
return 1;
break;
}
cout << "my kernel:n "<<my_kernel << endl;
// Adding the countour of nulls around the original image, to avoid border problems during convolution
img_conv = Mat::Mat(img.rows + my_kernel.rows - 1, img.cols + my_kernel.cols - 1, CV_64FC3, CV_RGB(0,0,0));
for (int x=0; x<img.rows; x++) {
for (int y=0; y<img.cols; y++) {
img_conv.at<Vec3d>(x+1,y+1)[0] = img.at<Vec3d>(x,y)[0];
img_conv.at<Vec3d>(x+1,y+1)[1] = img.at<Vec3d>(x,y)[1];
img_conv.at<Vec3d>(x+1,y+1)[2] = img.at<Vec3d>(x,y)[2];
}
}
//Performing the convolution
my_conv = Mat::Mat(img.rows, img.cols, CV_64FC3, CV_RGB(0,0,0));
for (int x=(my_kernel.rows-1)/2; x<img_conv.rows-((my_kernel.rows-1)/2); x++) {
for (int y=(my_kernel.cols-1)/2; y<img_conv.cols-((my_kernel.cols-1)/2); y++) {
double comp_1=0;
double comp_2=0;
double comp_3=0;
for (int u=-(my_kernel.rows-1)/2; u<=(my_kernel.rows-1)/2; u++) {
for (int v=-(my_kernel.cols-1)/2; v<=(my_kernel.cols-1)/2; v++) {
comp_1 = comp_1 + ( img_conv.at<Vec3d>(x+u,y+v)[0] * my_kernel.at<double>(u + ((my_kernel.rows-1)/2) ,v + ((my_kernel.cols-1)/2)));
comp_2 = comp_2 + ( img_conv.at<Vec3d>(x+u,y+v)[1] * my_kernel.at<double>(u + ((my_kernel.rows-1)/2),v + ((my_kernel.cols-1)/2)));
comp_3 = comp_3 + ( img_conv.at<Vec3d>(x+u,y+v)[2] * my_kernel.at<double>(u +  ((my_kernel.rows-1)/2),v + ((my_kernel.cols-1)/2)));
}
}
my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[0] = comp_1;
my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[1] = comp_2;
my_conv.at<Vec3d>(x-((my_kernel.rows-1)/2),y-(my_kernel.cols-1)/2)[2] = comp_3;
}
}
my_conv.convertTo(my_conv, CV_8UC3);
imshow("convolution - manual", my_conv);
// Performing the filtering using the opencv funtions
Mat dst;
filter2D(img, dst, -1 , my_kernel, Point( -1, -1 ), 0, BORDER_DEFAULT );
dst.convertTo(dst, CV_8UC3);
imshow("convlution - opencv", dst);

waitKey();
return 0;
}