webrtc-audio-processing pulseaudio最新版本1.0交叉编译到ARM

news/2024/11/28 1:34:52/

   最近在研究最新版本的aec3效果,之前0.31的太老了。百度,Google搜索一边发现都是基于0.31的。也没有找到1.0版本的demo(此时官网也没有提供)。完成工作任务后,写个博客记录一下,为其它猿少踩坑。

一、先下载源码和配置开发环境:

编译webrtc的时候会依赖链接abseil-cpp库。

git clone git://github.com/abseil/abseil-cpp.git

git clone https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing.git

看看编译工具的版本cmake和meso,ninja版本。

 

 交叉编译器要支持c++14. 这些工具怎么安装可以自行百度。

 二、编译abseil-cpp:

修改CMakeLists.txt文件。加入:(xxx是我自己的目录这里不方便发正式名字,读者要改成自己的交叉编译工具所在的目录)

set(CMAKE_SYSTEM_NAME Linux)SET(CMAKE_BUILD_TYPE "Release")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")set(CMAKE_CROSSCOMPILING ON)
set(ABSL_PROPAGATE_CXX_STD ON)set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14")
# arm 32
SET(CMAKE_C_COMPILER   /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc)SET(CMAKE_CXX_COMPILER /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++)SET(CMAKE_FIND_ROOT_PATH /work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf/)set(CMAKE_SYSTEM_PROCESSOR arm.v7)# aarch64
#SET(CMAKE_C_COMPILER   /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc)#SET(CMAKE_CXX_COMPILER /work/xxx/host-tools/gcc/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-g++)#SET(CMAKE_FIND_ROOT_PATH /work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-aarch64-linux-gnu/)#set(CMAKE_SYSTEM_PROCESSOR aarch64)
cd abseil-cpp/
mkdir build
cd build编译arm32执行这个:
cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local  -DCMAKE_CXX_STANDARD=14 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=arm.v7aarch64:
cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local  -DCMAKE_CXX_STANDARD=14 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=aarch64编译&安装:make -j8 && make install

这里安装在/usr/local目录。建议读者都用这个目录。我在这里被坑了很久,刚开始我是安装在/usr,或我自己的/work/install目录。结果在编译webrtc的时候出问题了。也可能和各个系统的环境变量有关系。要研究meson,ninja才能知道根本原因。

三、编译pulseaudio-webrtc-audio-processing

先来看编译脚本:

 #!/bin/bash
set -e
meson arm-build --prefix=/work/arm/webrtc-audio-processing/install --cross-file cross_file.txt
ninja -C arm-build
DESTDIR=/work/arm/webrtc-audio-processing/install ninja -C arm-build install

看交叉编译cross_file.txt文件:(如果是64为需要自己修改cpu_family = 'aarch64',cpu = 'armv8a')

[binaries]
c = 'arm-linux-gnueabihf-gcc'
cpp ='arm-linux-gnueabihf-g++'
ar = 'arm-linux-gnueabihf-ar'
ld = 'arm-linux-gnueabihf-ld'
srtip = 'arm-linux-gnueabihf-strip'
sys_root = '/work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf'
pkg_config_libdir = '/work/xxx/ramdisk/sysroot/sysroot-glibc-linaro-2.23-2017.05-arm-linux-gnueabihf/usr/lib/pkgconfig'
#这行是必须的否则会出错!
cmake = 'cmake'[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7l'
endian = 'little'[target_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7l'
endian = 'little'[build_machine]
system = 'linux'
cpu_family = 'x86_64'
cpu = 'i686'
endian = 'little'

最后修改meso.build改成  'buildtype=release',

在源码目录执行编译脚本就可以成功了。编译完后执行

arm-linux-gnueabihf-strip libwebrtc-audio-processing-1.so.1缩减so文件大小。

四、把/usr/local/include/absl/ ,webrtc install里的include目录所有头文件拷贝到自己的目录。

测试代码:(-I,-L后面的路径看你是把absl和webrtc安装文件考到哪里)

/*
编译命令:
arm-linux-gnueabihf-g++ -o demo demo.cc  -I ./include/webrtc-audio-processing-1/ -I ./include/webrtc-audio-processing-1/modules/ -L ./lib/ -I /work/arm/abseil-cpp/install/include/ -lwebrtc-audio-processing-1
板子上跑起来测试效果:
[root@xxx]/mnt/nfs# cp libwebrtc-audio-processing-1.so.1 /mnt/system/usr/lib/                                      
3rd/                                                                                                                  
[root@xxx]/mnt/nfs# ./demo aec_source_far16k.pcm aec_source_near16k.pcm webrt                                      
c.pcm                                                                                                                 
samples_per_frame =160                                                                                                
bytes_per_frame =320                                                                                                  
delay_ms =95 
*/#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_control.h"#include "audio_processing/include/audio_processing.h"#include <iostream>using namespace webrtc;
using namespace std;int main(int argc, char* argv[])
{FILE *fd_far  = NULL;FILE *fd_near = NULL;FILE *fd_out  = NULL;constexpr int16_t kAudioLevel = 10000;constexpr int kSampleRateHz = 16000;constexpr int kNumChannels = 1;fd_far  = fopen(argv[1], "rb");fd_near = fopen(argv[2], "rb");fd_out  = fopen(argv[3], "wb");if(!fd_far || !fd_near || !fd_out) {cout << "fopen file fail!" << endl;}
#if 1
//head file
// APM accepts only linear PCM audio data in chunks of 10 ms.int samples_per_frame = kSampleRateHz / 100;int bits_per_sample = 16;int bytes_per_frame = samples_per_frame * bits_per_sample / 8;int  NN = samples_per_frame ;//160 16000/1000*10int delay_ms = 95;int analog_level = 60;cout << "samples_per_frame =" << samples_per_frame << endl;cout << "bytes_per_frame =" << bytes_per_frame << endl;cout << "delay_ms =" << delay_ms << endl;int16_t *render_frame = (int16_t*)malloc(1024);// >bytes_per_frameint16_t *render_frame_out = (int16_t*)malloc(1024);int16_t *capture_frame = (int16_t*)malloc(1024);int16_t *capture_frame_out = (int16_t*)malloc(1024);webrtc::StreamConfig inStreamConfig  = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);webrtc::StreamConfig outStreamConfig = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);AudioProcessing* apm = AudioProcessingBuilder().Create();AudioProcessing::Config config;config.echo_canceller.enabled = true;config.echo_canceller.mobile_mode = false;config.gain_controller1.enabled = true;config.gain_controller1.mode =AudioProcessing::Config::GainController1::kAdaptiveAnalog;config.gain_controller1.analog_level_minimum = 0;config.gain_controller1.analog_level_maximum = 255;config.gain_controller2.enabled = true;config.high_pass_filter.enabled = true;config.voice_detection.enabled = true;apm->ApplyConfig(config);while(1){if (NN == fread(render_frame, sizeof(int16_t), NN, fd_far)) {fread(capture_frame, sizeof(int16_t), NN, fd_near);// ... Render frame arrives bound for the audio HAL ...//apm->ProcessReverseStream(render_frame);apm->ProcessReverseStream(render_frame, inStreamConfig, outStreamConfig, NULL);apm->ProcessStream(capture_frame, inStreamConfig, outStreamConfig, capture_frame_out);fwrite(capture_frame_out, sizeof(int16_t), NN, fd_out);}else {cout << "read far file end NULL" << endl;break;}}delete apm;free(capture_frame);free(capture_frame_out);free(render_frame);free(render_frame_out);fclose(fd_far);fclose(fd_near);fclose(fd_out);#endifreturn 0;
}

附上效果图:

初步效果还是可以的。后面还需对接到pcm_read,pcm_write相关的线程里实现通话3A功能。

来个实际对讲例子代码:(完整源码音频文件下载链接:https://download.csdn.net/download/longruic/21342708)

/*
编译:
arm-linux-gnueabihf-g++ -o xxx_audio_webrtc_3a cvi_audio_webrtc_3a.c  -I ./include/webrtc-audio-processing-1/ -I ./include/webrtc-audio-processing-1/modules/ -I ./include/  -I ./include/webrtc-audio-processing-1/ -lwebrtc-audio-processing-1 -L /work/install_32/lib/ -ltinyalsa -L ../../lib/
执行测试
[root@crl6]/mnt/nfs# ./xxx_audio_webrtc_3a ./aec_source_far16k.pcm  ./yuan.pcm ./test.pcm
*/#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_control.h"#include "audio_processing/include/audio_processing.h"
#include "asoundlib.h"#include <iostream>using namespace webrtc;
using namespace std;struct pcm *capture_handle;
struct pcm *playback_handle;
constexpr int16_t kAudioLevel = 10000;
constexpr int kSampleRateHz = 16000;
constexpr int kNumChannels = 1;
int  NN = 320;
int pcm_write_size = 0;void initCapture(){int period_size = NN;int s32sample_rate = 0;int record_second = 12;int period_count = 2;int channel = kNumChannels;struct pcm_config capture_config;enum pcm_format format = PCM_FORMAT_S16_LE;int card = 0;int device = 0;	memset(&capture_config, 0, sizeof(capture_config));capture_config.channels = channel;capture_config.rate = kSampleRateHz;printf("check period_size period cnt[%d][%d]\n", period_size, period_count);capture_config.period_size = period_size;capture_config.period_count = period_count;capture_config.format = format;capture_config.start_threshold = 0;capture_config.stop_threshold = 13245;capture_config.silence_threshold = 0;capture_handle = pcm_open(card, device, PCM_IN, &capture_config);if (!capture_handle || !pcm_is_ready(capture_handle)) {printf("Unable to open PCM device (%s)\n",pcm_get_error(capture_handle));}int	size = pcm_frames_to_bytes(capture_handle, pcm_get_buffer_size(capture_handle));printf("[pcm_get_frame_size][%d]\n", size);size =	pcm_frames_to_bytes(capture_handle, period_size);
}void initPlayback() {struct pcm_config pcm_config;int card = 1;int device = 0;memset(&pcm_config, 0, sizeof(pcm_config));pcm_config.channels = kNumChannels;pcm_config.rate = kSampleRateHz;pcm_config.period_size = NN;pcm_config.period_count = 4;pcm_config.format = PCM_FORMAT_S16_LE;pcm_config.start_threshold = 0;pcm_config.stop_threshold = 0;pcm_config.silence_threshold = 0;	playback_handle = pcm_open(card, device, PCM_OUT, &pcm_config);if (!playback_handle || !pcm_is_ready(playback_handle)) {printf("Unable to open PCM card %d device %u (%s)\n",card, device, pcm_get_error(playback_handle));return ;}pcm_write_size = pcm_frames_to_bytes(playback_handle, pcm_get_buffer_size(playback_handle));}int main(int argc, char* argv[])
{FILE *fd_far  = NULL;FILE *fd_yuan_out = NULL;FILE *fd_out  = NULL;int err = -1;fd_far  = fopen(argv[1], "rb");fd_yuan_out = fopen(argv[2], "wb");fd_out  = fopen(argv[3], "wb");if(!fd_far || !fd_yuan_out || !fd_out) {cout << "fopen file fail!" << endl;}int samples_per_frame = kSampleRateHz / 100;int bits_per_sample = 16;int bytes_per_frame = samples_per_frame * bits_per_sample / 8;int delay_ms = 95;int analog_level = 60;initCapture();initPlayback();cout << "samples_per_frame =" << samples_per_frame << endl;cout << "bytes_per_frame =" << bytes_per_frame << endl;cout << "delay_ms =" << delay_ms << endl;int16_t *render_frame = (int16_t*)malloc(321);int16_t *capture_frame = (int16_t*)malloc(321);int16_t *capture_frame_out = (int16_t*)malloc(321);webrtc::StreamConfig inStreamConfig  = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);webrtc::StreamConfig outStreamConfig = webrtc::StreamConfig(kSampleRateHz, kNumChannels,false);AudioProcessing* apm = AudioProcessingBuilder().Create();AudioProcessing::Config config;config.echo_canceller.enabled = true;config.echo_canceller.mobile_mode = false;config.gain_controller1.enabled = true;config.gain_controller1.mode =AudioProcessing::Config::GainController1::kAdaptiveAnalog;config.gain_controller1.analog_level_minimum = 0;config.gain_controller1.analog_level_maximum = 255;config.gain_controller2.enabled = true;config.high_pass_filter.enabled = true;config.voice_detection.enabled = true;apm->ApplyConfig(config);while(1){pcm_write_size = NN;if (pcm_write_size == fread((char*)render_frame, 1, pcm_write_size, fd_far)) {err = pcm_write(playback_handle, (char*)render_frame, pcm_write_size);apm->ProcessReverseStream(render_frame, inStreamConfig, outStreamConfig, NULL);err = pcm_read(capture_handle, (char*)capture_frame, pcm_write_size);if (err > 0) {printf("[%s]\n", pcm_get_error(capture_handle));}apm->ProcessStream(capture_frame, inStreamConfig, outStreamConfig, capture_frame_out);fwrite((char*)capture_frame, 1, pcm_write_size, fd_yuan_out);fwrite((char*)capture_frame_out, 1, pcm_write_size, fd_out);}else {cout << "read far file end" << endl;break;}}delete apm;free(capture_frame);free(capture_frame_out);free(render_frame);fclose(fd_far);fclose(fd_yuan_out);fclose(fd_out);pcm_close(capture_handle);pcm_close(playback_handle);return 0;
}


http://www.ppmy.cn/news/494758.html

相关文章

Audio知识总结(Android)

人的耳朵能够听到的范围,是20Hz到20000Hz.就是指物体每秒中振动20-20000次。PCM16LE双声道数据中左声道和右声道的采样值是间隔存储的。每个采样值占用2Byte空间。采样率是指&#xff1a;声音信号在“模→数”转换过程中单位时间内采样的次数。采样值是指每一次采样周期内声音模…

Android 5.1 Audio HAL分析

初始化工程 1) AudioPlicyService被第一次实例化后&#xff0c;将会调用onFirstRef&#xff0c;其中实例化了两个全局变量&#xff0c;一个是mAudioPolicyClient&#xff0c;一个是mAudioPolicyManager。 2) mAudioPolicyClient是AudioPolicyClient类型的&#xff0c;具体实…

深入理解audio 系统

本章主要内容 详细分析AudioTrack。 详细分析AudioFlinger。 详细分析AudioPolicyService。本章涉及的源代码文件名及位置下面是本章分析的源码文件名及其位置。 AudioTrack.javaframework/base/media/java/com/android/media/AudioTrack.java android_media_track.cppframewor…

html audio加载卡顿,audio 设置了currentTime后为什么会卡顿?

代码如下&#xff1a; methods:{ editprogress(value){ this.demoSlidervalue; this.$store.commit(editProgress,this.demoSlider) } } editProgress(state,progressValue){ const playerBardocument.getElementById("playerBar"); let currentTimeplayerBar.durati…

linux pulseaudio卸载,解决Ubuntu 9.04 下 PulseAudio声音故障

升级系统到Ubuntu 9.04之后,发现系统没声音了,pulseaudio和alsa都不可用,只有oss还能用。 参考了Ubuntu 8.04/8.10 系统无声、音频独占的解决方法解决了这个问题,只是按照文中给出的办法是把pulseaudio干掉,之用alsa,貌似照做一遍之后pulseaudio和alsa都恢复了正常。 做法…

Android audio介绍

转自 https://www.cnblogs.com/hzl6255/p/12173595.html 阅读目录 1. 架构2. Audio HAL3. Native Audio4. Java Audio 开始这篇文章之前&#xff0c;需要先了解<Linux音频编程> 回到顶部 1. 架构 在Android中&#xff0c;audio以分层的方式实现&#xff0c;从上到下…

USB audio调试

androidstudio打印的信息有如下&#xff1a; 07-12 08:27:17.660 2284-2284/? I/AudioFlinger: loadHwModule() Loaded a2dp audio interface from A2DP Audio HW HAL (audio) handle 7 07-12 08:27:17.660 2284-2284/? I/AudioFlinger: loadHwModule() Loaded usb audio int…

Audio Unit

Audio Unit 推荐先阅读Audio Unit Hosting Guide for iOS&#xff0c;部分翻译的文章可参考Audio Unit 基础 Audio Unit在框架中位置 Audio Unit提供了音频快速的模块化处理&#xff0c;在以下的场景中更适合使用AudioUnit&#xff0c;而不是更高层次的音频框架&#xff1a…