使用waveIn和waveOut在窗口中进行音频录制和实时播放
audio recording and real-time playing in windows using waveIn and waveOut
我想录制麦克风的音频输入,并在稍有延迟的情况下立即播放录制的声音。这将使用一个缓冲队列连续完成。
我让代码运行到几乎连续播放麦克风音频输入的地步,但在使用waveOut的整个音频输出中,有非常短但仍然明显的重复暂停。是什么导致了这些烦人的停顿?以及如何移除它们?
另一个问题是,我没有使用任何像互斥锁这样的东西,我依赖于这样一个事实,即waveIn和waveOut具有相同的采样率和相同的数据量,所以希望waveOut总是跟在waveIn后面,而waveIn不会写入正在播放的缓冲区。这会是个问题吗?
这是代码,它应该编译并运行。我只是运行了代码,而且它写得还远远不够好。任何关于改进代码的评论都是非常受欢迎的。
#include "stdafx.h"
#include <Windows.h>
#pragma comment(lib, "winmm.lib")
#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;
HANDLE hEvent_BufferReady;
HANDLE hEvent_FinishedPlaying;
#define Samplerate 44100
#define nSec 1
int _iBuf;
int _iplaying;
unsigned long result;
HWAVEIN hWaveIn;
HWAVEOUT hWaveOut;
WAVEFORMATEX pFormat;
enum { NUM_BUF = 3 };
WAVEHDR _header [NUM_BUF];
DWORD WINAPI RecordingWaitingThread(LPVOID ivalue)
{
while(1)
{
WaitForSingleObject(hEvent_BufferReady,INFINITE);
result = waveInUnprepareHeader (hWaveIn, &_header[_iBuf], sizeof (WAVEHDR));
_iplaying = _iBuf;
result = waveOutPrepareHeader(hWaveOut, &_header[_iBuf], sizeof(WAVEHDR));
result = waveOutWrite(hWaveOut, &_header[_iBuf], sizeof(WAVEHDR)); // play audio
++_iBuf;
if (_iBuf == NUM_BUF) _iBuf = 0;
result = waveInPrepareHeader(hWaveIn, & _header[_iBuf], sizeof(WAVEHDR));
result = waveInAddBuffer (hWaveIn, & _header[_iBuf], sizeof (WAVEHDR));
}
return 0;
}
DWORD WINAPI PlayingWaitingThread(LPVOID ivalue)
{
while(1){
WaitForSingleObject(hEvent_FinishedPlaying,INFINITE);
waveOutUnprepareHeader(hWaveOut, &_header[_iplaying], sizeof(WAVEHDR));
}
}
static void CALLBACK waveOutProc(HWAVEOUT hWaveOut, UINT uMsg, DWORD dwInstance, DWORD dwParam1,DWORD dwParam2 )
{
if(uMsg != WOM_DONE)
return;
SetEvent(hEvent_FinishedPlaying);
}
void CALLBACK myWaveInProc(HWAVEIN hwi, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2)
{
if(uMsg != WIM_DATA)
return;
SetEvent(hEvent_BufferReady);
}
int main(int argc, _TCHAR* argv[])
{
hEvent_BufferReady=CreateEvent(NULL,FALSE, FALSE, NULL);
hEvent_FinishedPlaying = CreateEvent(NULL,FALSE, FALSE, NULL);
pFormat.wFormatTag = WAVE_FORMAT_PCM; // simple, uncompressed format
pFormat.nChannels = 1; // 1=mono, 2=stereo
pFormat.nSamplesPerSec = Samplerate;
pFormat.wBitsPerSample = 16; // 16 for high quality, 8 for telephone-grade
pFormat.nBlockAlign = pFormat.nChannels*pFormat.wBitsPerSample/8;
pFormat.nAvgBytesPerSec = (pFormat.nSamplesPerSec)*(pFormat.nChannels)*(pFormat.wBitsPerSample)/8;
pFormat.cbSize=0;
short int *_pBuf;
size_t bpbuff =4000;//= (pFormat.nSamplesPerSec) * (pFormat.nChannels) * (pFormat.wBitsPerSample)/8;
_pBuf = new short int [bpbuff * NUM_BUF];
result = waveInOpen(&hWaveIn, WAVE_MAPPER,&pFormat, (DWORD)myWaveInProc, 0L, CALLBACK_FUNCTION);
result = waveOutOpen(&hWaveOut, WAVE_MAPPER, &pFormat, (DWORD_PTR)waveOutProc, 0, CALLBACK_FUNCTION);
// initialize all headers in the queue
for ( int i = 0; i < NUM_BUF; i++ )
{
_header[i].lpData = (LPSTR)&_pBuf [i * bpbuff];
_header[i].dwBufferLength = bpbuff*sizeof(*_pBuf);
_header[i].dwFlags = 0L;
_header[i].dwLoops = 0L;
}
DWORD myThreadID;
DWORD myThreadIDPlay;
HANDLE hThread;
HANDLE hThreadPlay;
hThread = CreateThread(NULL, 0, RecordingWaitingThread,NULL,0,&myThreadID);
hThreadPlay = CreateThread(NULL, 0, PlayingWaitingThread,NULL,0,&myThreadIDPlay);
_iBuf = 0;
waveInPrepareHeader(hWaveIn, & _header[_iBuf], sizeof(WAVEHDR));
waveInAddBuffer (hWaveIn, & _header[_iBuf], sizeof (WAVEHDR));
waveInStart(hWaveIn);
getchar();
waveInClose(hWaveIn);
waveOutClose(hWaveOut);
CloseHandle(hThread);
CloseHandle(hThreadPlay);
CloseHandle(hEvent_BufferReady);
CloseHandle(hEvent_FinishedPlaying);
return 0;
}
程序的问题是您声明的bpbuff
太小。只要试着用size_t bpbuff = 4410;
声明它,你就会消除音频流中所有这些重复的中断。
顺便说一句,我认为你可以摆脱这些线程方法,让你的代码变得更简单,就像下面这样:
#include "stdafx.h"
#include <Windows.h>
#pragma comment(lib, "winmm.lib")
#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;
#define Samplerate 44100
static HWAVEIN hWaveIn;
static HWAVEOUT hWaveOut;
enum { NUM_BUF = 3 };
WAVEHDR _header [NUM_BUF];
void CALLBACK myWaveInProc(HWAVEIN hwi, UINT uMsg, DWORD dwInstance, DWORD dwParam1, DWORD dwParam2)
{
static int _iBuf;
waveOutWrite(hWaveOut, &_header[_iBuf], sizeof(WAVEHDR)); // play audio
++_iBuf;
if (_iBuf == NUM_BUF) _iBuf = 0;
waveInAddBuffer (hWaveIn, & _header[_iBuf], sizeof (WAVEHDR));
}
int main(int argc, _TCHAR* argv[])
{
WAVEFORMATEX pFormat;
pFormat.wFormatTag = WAVE_FORMAT_PCM; // simple, uncompressed format
pFormat.nChannels = 1; // 1=mono, 2=stereo
pFormat.nSamplesPerSec = Samplerate;
pFormat.wBitsPerSample = 16; // 16 for high quality, 8 for telephone-grade
pFormat.nBlockAlign = pFormat.nChannels*pFormat.wBitsPerSample/8;
pFormat.nAvgBytesPerSec = (pFormat.nSamplesPerSec)*(pFormat.nChannels)*(pFormat.wBitsPerSample)/8;
pFormat.cbSize=0;
short int *_pBuf;
size_t bpbuff = 4410;//= (pFormat.nSamplesPerSec) * (pFormat.nChannels) * (pFormat.wBitsPerSample)/8;
_pBuf = new short int [bpbuff * NUM_BUF];
waveInOpen(&hWaveIn, WAVE_MAPPER,&pFormat, (DWORD)myWaveInProc, 0L, CALLBACK_FUNCTION);
waveOutOpen(&hWaveOut, WAVE_MAPPER, &pFormat, (DWORD_PTR)nullptr, 0, CALLBACK_FUNCTION);
// initialize all headers in the queue
for ( int i = 0; i < NUM_BUF; i++ )
{
_header[i].lpData = (LPSTR)&_pBuf [i * bpbuff];
_header[i].dwBufferLength = bpbuff*sizeof(*_pBuf);
_header[i].dwFlags = 0L;
_header[i].dwLoops = 0L;
waveInPrepareHeader(hWaveIn, & _header[i], sizeof(WAVEHDR));
}
waveInAddBuffer (hWaveIn, & _header[0], sizeof (WAVEHDR));
waveInStart(hWaveIn);
getchar();
waveInClose(hWaveIn);
waveOutClose(hWaveOut);
delete _pBuf;
return 0;
}
这段代码用更少的代码执行完全相同的任务。干杯,伙计!
相关文章:
- 媒体基金会:WavSink以比实时消耗率更快的速度处理音频-是否可以实时限制后台处理
- C# 的垃圾回收会给实时音频应用程序带来问题吗?
- C++从字节数组实时播放音频
- 实时更改音频文件的速度
- 如何在SDL2音频流数据上执行实时FFT
- C++精确的44100Hz时钟,用于实时音频合成
- VoIP:如何从QT多媒体中的相机中捕获实时音频/视频流字节
- 需要一个音频分析库来从音频文件中创建实时反馈
- C++ 是使用 OpenAL 进行实时 PCM fft 音频处理
- 使用waveIn和waveOut在窗口中进行音频录制和实时播放
- 实时录制/转换音频数据到WAV
- 实时音频应用程序,提高性能
- 在哪里使用c++和qt将Wav标头添加到实时音频服务器中的数据
- Live555可在一个RTSP流中流式传输实时视频和音频
- 很好的实时音频库(发送和接收)
- 如何从Tizen上的实时FFT中获得最准确的音频数据?
- 哪些实时c++音频库与Xcode 4一起工作
- 实时音频处理
- 实时音频处理
- 实时音频,快速循环中的临时缓冲区,不同的方法