以下是使用WASAPI API 在Windows平台上播放PCM文件的示例代码:
#include <iostream>
#include <fstream>
#include <vector>
#include <Audioclient.h>
// 音频设备参数
#define SAMPLE_RATE 44100 // 采样率
#define CHANNELS 2 // 声道数
int main() {
// 打开PCM文件
std::ifstream pcmFile("audio.pcm", std::ios::binary);
if (!pcmFile.is_open()) {
std::cout << "Failed to open PCM file." << std::endl;
return -1;
}
// 读取PCM数据到缓冲区
pcmFile.seekg(0, std::ios_base::end);
std::streampos fileSize = pcmFile.tellg();
pcmFile.seekg(0, std::ios_base::beg);
std::vector<char> buffer(fileSize);
pcmFile.read(buffer.data(), fileSize);
// 初始化COM组件
CoInitialize(nullptr);
// 创建音频客户端对象
IAudioClient* audioClient = nullptr;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
__uuidof(IMMDeviceEnumerator), (void**)&enumerator);
if (FAILED(hr)) {
std::cout << "Failed to create instance of MMDeviceEnumerator." << std::endl;
return -1;
}
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
if (FAILED(hr)) {
enumerator->Release();
std::cout << "Failed to get default audio endpoint." << std::endl;
return -1;
}
hr = device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
if (FAILED(hr)) {
enumerator->Release();
device->Release();
std::cout << "Failed to activate audio client." << std::endl;
return -1;
}
// 配置音频格式
WAVEFORMATEX format = {};
format.wFormatTag = WAVE_FORMAT_PCM;
format.nChannels = CHANNELS;
format.nSamplesPerSec = SAMPLE_RATE;
format.wBitsPerSample = 16; // 16位
format.nBlockAlign = format.nChannels * (format.wBitsPerSample / 8);
format.nAvgBytesPerSec = format.nSamplesPerSec * format.nBlockAlign;
hr = audioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
0, 0, &format, nullptr);
if (FAILED(hr)) {
audioClient->Release();
enumerator->Release();
device->Release();
std::cout << "Failed to initialize audio client." << std::endl;
return -1;
}
// 获取缓冲区大小
UINT32 bufferFrameCount;
hr = audioClient->GetBufferSize(&bufferFrameCount);
if (FAILED(hr)) {
audioClient->Release();
enumerator->Release();
device->Release();
std::cout << "Failed to get buffer size." << std::endl;
return -1;
}
// 获取渲染器接口
IAudioRenderClient* renderClient = nullptr;
hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
if (FAILED(hr)) {
audioClient->Release();
enumerator->Release();
device->Release();
std::cout << "Failed to get render client." << std::endl;
return -1;
}
// 启动音频流
hr = audioClient->Start();
if (FAILED(hr)) {
renderClient->Release();
audioClient->Release();
enumerator->Release();
device->Release();
std::cout << "Failed to start audio client." << std::endl;
return -1;
}
// 写入PCM数据到缓冲区
BYTE* data;
hr = renderClient->GetBuffer(bufferFrameCount, &data);
if (SUCCEEDED(hr)) {
memcpy(data, buffer.data(), buffer.size());
hr = renderClient->ReleaseBuffer(bufferFrameCount, 0);
}
// 等待直到播放完毕
DWORD taskIndex = 0;
HANDLE taskHandle = AvSetMmThreadCharacteristics(L"Audio", &taskIndex);
HANDLE eventHandle = CreateEvent(NULL, FALSE, FALSE, NULL);
while (true) {
DWORD waitResult = WaitForSingleObject(eventHandle, INFINITE);
if (waitResult == WAIT_OBJECT_0) {
break;
}
}
CloseHandle(eventHandle);
// 停止音频流
audioClient->Stop();
// 释放资源
renderClient->Release();
audioClient->Release();
device->Release();
enumerator->Release();
CoUninitialize();
return 0;
}
在上述示例中,我们使用了WASAPI API来创建音频客户端对象并初始化音频流。然后,我们从PCM文件中读取数据,并将其写入音频缓冲区。最后,我们启动音频流并等待直到播放完毕。
请确保在编译和运行代码时链接Windows SDK库,同时确保将audio.pcm
替换为实际的PCM文件路径。
内容由零声教学AI助手提供,问题来源于学员提问