如何在OPENCV中没有插值(零盖)调整大小

How to resize without interpolation (zero-padding) in OpenCV?

本文关键字:零盖 调整 插值 OPENCV      更新时间:2023-10-16

是否有没有使用任何插值来调整OpenCV中图像的有效方法?我希望我的图像将像素重新映射到更大的图像中,但用0。

将所有图像重新映射到更大的图像中,而不是传统的"调整大小"。

例如。将IMG1扩展到2倍以下到IMG2:

img1 = [ 1, 2, 3,
         4, 5, 6,
         7, 8, 9 ]
cv::resize(img1, img2, cv::Size(6, 6));
img2 = [ 1, 0, 2, 0, 3, 0,
         0, 0, 0, 0, 0, 0,
         4, 0, 5, 0, 6, 0,
         0, 0, 0, 0, 0, 0,
         7, 0, 8, 0, 9, 0,
         0, 0, 0, 0, 0, 0 ]

我知道明显的方法是仅使用一个循环,但是我想知道是否有更有效的方法使用openCV调用?

想到的一个选项是将cv::resizeINTER_NEAREST一起使用,然后掩盖不需要的像素。

示例:

#include <opencv2/opencv.hpp>
#include <cstdint>
#include <iostream>
int main()
{
    cv::Mat m1((cv::Mat_<uint8_t>(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9));
    std::cout << "Input:n" << m1 << "nn";
    cv::Mat mask((cv::Mat_<uint8_t>(2, 2) << 255, 0, 0, 0));
    mask = cv::repeat(mask, m1.rows, m1.cols);
    std::cout << "Mask:n" << mask << "nn";
    cv::Mat m2;
    cv::resize(m1, m2, cv::Size(), 2, 2, cv::INTER_NEAREST);
    std::cout << "Resized:n" << m2 << "nn";
    cv::bitwise_and(m2, mask, m2);
    std::cout << "Masked:n" << m2 << "nn";
}

控制台输出:

Input:
[  1,   2,   3;
   4,   5,   6;
   7,   8,   9]
Mask:
[255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0;
 255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0;
 255,   0, 255,   0, 255,   0;
   0,   0,   0,   0,   0,   0]
Resized:
[  1,   1,   2,   2,   3,   3;
   1,   1,   2,   2,   3,   3;
   4,   4,   5,   5,   6,   6;
   4,   4,   5,   5,   6,   6;
   7,   7,   8,   8,   9,   9;
   7,   7,   8,   8,   9,   9]
Masked:
[  1,   0,   2,   0,   3,   0;
   0,   0,   0,   0,   0,   0;
   4,   0,   5,   0,   6,   0;
   0,   0,   0,   0,   0,   0;
   7,   0,   8,   0,   9,   0;
   0,   0,   0,   0,   0,   0]

更新

如果我们消除了Miki代码的一部分,而对于我们的特定情况不必要,我们几乎将其减少到一个简单的循环中。

进行一些快速比较,事实证明这一点更快。

#include <opencv2/opencv.hpp>
#include <chrono>
#include <cstdint>
#include <iostream>
cv::Mat resize_1(cv::Mat image)
{
    cv::Mat result(cv::Mat::zeros(image.rows * 2, image.cols * 2, CV_8UC1));
    for (int ra(0); ra < image.rows; ++ra) {
        for (int ca = 0; ca < image.cols; ++ca) {
            result.at<uint8_t>(ra * 2, ca * 2) = image.at<uint8_t>(ra, ca);
        }
    }
    return result;
}
cv::Mat resize_2(cv::Mat image)
{
    cv::Mat mask((cv::Mat_<uint8_t>(2, 2) << 255, 0, 0, 0));
    mask = cv::repeat(mask, image.rows, image.cols);
    cv::Mat result;
    cv::resize(image, result, cv::Size(), 2, 2, cv::INTER_NEAREST);
    cv::bitwise_and(result, mask, result);
    return result;
}
template<typename T>
void timeit(T f)
{
    using std::chrono::high_resolution_clock;
    using std::chrono::duration_cast;
    using std::chrono::microseconds;
    cv::Mat m1((cv::Mat_<uint8_t>(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9));
    m1 = cv::repeat(m1, 1024, 1024);
    high_resolution_clock::time_point t1 = high_resolution_clock::now();
    for (uint32_t i(0); i < 256; ++i) {
        cv::Mat result = f(m1);
    }
    high_resolution_clock::time_point t2 = high_resolution_clock::now();
    auto duration = duration_cast<microseconds>(t2 - t1).count();
    double t_ms(static_cast<double>(duration) / 1000.0);
    std::cout
        << "Total = " << t_ms << " msn"
        << "Iteration = " << (t_ms / 256) << " msn"
        << "FPS = " << (256 / t_ms * 1000.0) << "n";
}
int main()
{
    timeit(&resize_1);
    timeit(&resize_2);
}

计时:

resize_1

Total = 6344.86 ms
Iteration = 24.7846 ms
FPS = 40.3476

resize_2

Total = 7271.31 ms
Iteration = 28.4036 ms
FPS = 35.2068

更新2

并行版本:

class ResizeInvoker : public cv::ParallelLoopBody
{
public:
    ResizeInvoker(cv::Mat const& src, cv::Mat& dst)
        : image(src)
        , result(dst)
    {
    }
    void operator()(const cv::Range& range) const
    {
        for (int y(range.start); y < (range.end); ++y) {
            for (int x(0); x < image.cols; ++x) {
                result.at<uint8_t>(y * 2, x * 2) = image.at<uint8_t>(y, x);
            }
        }
    }
    cv::Mat const& image;
    cv::Mat& result;
};
cv::Mat resize_3(cv::Mat image)
{
    cv::Mat result(cv::Mat::zeros(image.rows * 2, image.cols * 2, CV_8UC1));
    ResizeInvoker loop_body(image, result);
    cv::parallel_for_(cv::Range(0, image.rows)
        , loop_body
        , result.total() / (double)(1 << 16));
    return result;
}

计时:

resize_3

Total = 3876.63 ms
Iteration = 15.1431 ms
FPS = 66.0367

更新3

如果我们在调用者中使用原始指针,我们可以做得更好:

void operator()(const cv::Range& range) const
{
    for (int y(range.start); y < (range.end); ++y) {
        uint8_t* D = result.data + result.step * y * 2;
        uint8_t const* S = image.data + image.step * y;
        for (int x(0); x < image.cols; ++x) {
            D[x * 2] = S[x];
        }
    }
}

计时:

Total = 3387.87 ms
Iteration = 13.2339 ms
FPS = 75.5636

您可以使用图像的kronecker产品和类似的模式:

1, 0
0, 0

结果是:

Input:
[1, 2, 3;
 4, 5, 6;
 7, 8, 9]
Output:
[1, 0, 2, 0, 3, 0;
 0, 0, 0, 0, 0, 0;
 4, 0, 5, 0, 6, 0;
 0, 0, 0, 0, 0, 0;
 7, 0, 8, 0, 9, 0;
 0, 0, 0, 0, 0, 0]

代码:

#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
Mat1b kron(const Mat1b& A, const Mat1b& B)
{
    Mat1b K(A.rows * B.rows, A.cols * B.cols, uchar(0));
    for (int ra = 0; ra < A.rows; ++ra)
    {
        for (int ca = 0; ca < A.cols; ++ca)
        {
            K(Range(ra*B.rows, (ra + 1)*B.rows), Range(ca*B.cols, (ca + 1)*B.cols)) = B.mul(A(ra, ca));
        }
    }
    return K;
}
int main()
{
    Mat1b img = (Mat1b(3, 3) << 1, 2, 3, 4, 5, 6, 7, 8, 9);
    std::cout << "Input:n" << img << "nn";
    // Define the pattern
    Mat1b pattern = (Mat1b(2, 2) << 1, 0, 
                                    0, 0);
    Mat1b out = kron(img, pattern);
    std::cout << "Output:n" << out << "nn";
    return 0;
}

OpenCV没有实现Kronecker产品,因此您需要编写自定义功能。对于与所有数据类型(1频道)一起使用的更一般的实现,请在此处查看。


我发现@dan Masek方法更快。这是因为我的kron实现未优化。我希望这种方法可以很好地实施。

考虑共享以下方法的想法有些不同。我不知道与其他方法相比,这将有多高效。至少您可以使用openCV呼叫而无需任何循环,并且可以轻松地使用x和y的任意比例因素。

首先将图像转换为浮点类型,然后使用warpAffine(使用线性插值)将其扩展。使用最近的邻居方法调整相同图像的大小。比较结果元素的两个结果图像以获取口罩。使用此掩码从任何结果图像中复制相关元素。

这是我得到的代码和一些结果:

uchar data[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
Mat im(3, 3, CV_8U, data);
im.convertTo(im, CV_32F);
// x and y scale
int xscale = 2, yscale = 2;
Size size(im.cols * xscale, im.rows * yscale);
float tr[] = {xscale, 0, 0, 0, yscale, 0};
Mat m(2, 3, CV_32F, tr);    // transformation matrix
Mat resized1, resized2;
warpAffine(im, resized1, m, size);  // affine scaling with linear interpolation
resize(im, resized2, size, 0, 0, INTER_NEAREST);    // resize with nearest neighbor
// get the mask
Mat resized = resized1 == resized2;
// copy the pixels
resized1.copyTo(resized, resized);
cout << "image:n" << im << endl;
cout << "M:n" << m << endl;
cout << "affine(scaled):n" << resized1 << endl;
cout << "resized:n" << resized2 << endl;
cout << "mask:n" << resized << endl;
cout << "output:n" << resized << endl;

xscale = 2,yscale = 2

image:
[1, 2, 3;
  4, 5, 6;
  7, 8, 9]
M:
[2, 0, 0;
  0, 2, 0]
affine(scaled):
[1, 1.5, 2, 2.5, 3, 1.5;
  2.5, 3, 3.5, 4, 4.5, 2.25;
  4, 4.5, 5, 5.5, 6, 3;
  5.5, 6, 6.5, 7, 7.5, 3.75;
  7, 7.5, 8, 8.5, 9, 4.5;
  3.5, 3.75, 4, 4.25, 4.5, 2.25]
resized:
[1, 1, 2, 2, 3, 3;
  1, 1, 2, 2, 3, 3;
  4, 4, 5, 5, 6, 6;
  4, 4, 5, 5, 6, 6;
  7, 7, 8, 8, 9, 9;
  7, 7, 8, 8, 9, 9]
mask:
[1, 0, 2, 0, 3, 0;
  0, 0, 0, 0, 0, 0;
  4, 0, 5, 0, 6, 0;
  0, 0, 0, 0, 0, 0;
  7, 0, 8, 0, 9, 0;
  0, 0, 0, 0, 0, 0]
output:
[1, 0, 2, 0, 3, 0;
  0, 0, 0, 0, 0, 0;
  4, 0, 5, 0, 6, 0;
  0, 0, 0, 0, 0, 0;
  7, 0, 8, 0, 9, 0;
  0, 0, 0, 0, 0, 0]

对于xscale = 4,yscale = 3

output:
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  7, 0, 0, 0, 8, 0, 0, 0, 9, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0;
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]