codecamp

开发音频通话功能

在音频通话场景下,音频输出(播放对端声音)和音频输入(录制本端声音)会同时进行,应用可以通过使用AudioRenderer来实现音频输出,通过使用AudioCapturer来实现音频输入,同时使用AudioRenderer和AudioCapturer即可实现音频通话功能。

在音频通话开始和结束时,应用可以自行检查当前的音频场景模式铃声模式,以便采取合适的音频管理及提示策略。

以下代码示范了同时使用AudioRenderer和AudioCapturer实现音频通话功能的基本过程,其中未包含音频通话数据的传输过程,实际开发中,需要将网络传输来的对端通话数据解码播放,此处仅以读取音频文件的数据代替;同时需要将本端录制的通话数据编码打包,通过网络发送给对端,此处仅以将数据写入音频文件代替。

使用AudioRenderer播放对端的通话声音

该过程与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRenderInfo参数和音频数据来源。audioRenderInfo参数中,音频内容类型需设置为语音,CONTENT_TYPE_SPEECH,音频流使用类型需设置为语音通信,STREAM_USAGE_VOICE_COMMUNICATION。
  1. import audio from '@ohos.multimedia.audio';
  2. import fs from '@ohos.file.fs';
  3. const TAG = 'VoiceCallDemoForAudioRenderer';
  4. // 与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRendererInfo参数和音频数据来源
  5. export default class VoiceCallDemoForAudioRenderer {
  6. private renderModel = undefined;
  7. private audioStreamInfo = {
  8. samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率
  9. channels: audio.AudioChannel.CHANNEL_2, // 通道
  10. sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
  11. encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
  12. }
  13. private audioRendererInfo = {
  14. // 需使用通话场景相应的参数
  15. content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音频内容类型:语音
  16. usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:语音通信
  17. rendererFlags: 0 // 音频渲染器标志:默认为0即可
  18. }
  19. private audioRendererOptions = {
  20. streamInfo: this.audioStreamInfo,
  21. rendererInfo: this.audioRendererInfo
  22. }
  23. // 初始化,创建实例,设置监听事件
  24. init() {
  25. audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // 创建AudioRenderer实例
  26. if (!err) {
  27. console.info(`${TAG}: creating AudioRenderer success`);
  28. this.renderModel = renderer;
  29. this.renderModel.on('stateChange', (state) => { // 设置监听事件,当转换到指定的状态时触发回调
  30. if (state == 1) {
  31. console.info('audio renderer state is: STATE_PREPARED');
  32. }
  33. if (state == 2) {
  34. console.info('audio renderer state is: STATE_RUNNING');
  35. }
  36. });
  37. this.renderModel.on('markReach', 1000, (position) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
  38. if (position == 1000) {
  39. console.info('ON Triggered successfully');
  40. }
  41. });
  42. } else {
  43. console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
  44. }
  45. });
  46. }
  47. // 开始一次音频渲染
  48. async start() {
  49. let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
  50. if (stateGroup.indexOf(this.renderModel.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染
  51. console.error(TAG + 'start failed');
  52. return;
  53. }
  54. await this.renderModel.start(); // 启动渲染
  55. const bufferSize = await this.renderModel.getBufferSize();
  56. // 此处仅以读取音频文件的数据举例,实际音频通话开发中,需要读取的是通话对端传输来的音频数据
  57. let context = getContext(this);
  58. let path = context.filesDir;
  59. const filePath = path + '/voice_call_data.wav'; // 沙箱路径,实际路径为/data/storage/el2/base/haps/entry/files/voice_call_data.wav
  60. let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
  61. let stat = await fs.stat(filePath);
  62. let buf = new ArrayBuffer(bufferSize);
  63. let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
  64. for (let i = 0; i < len; i++) {
  65. let options = {
  66. offset: i * bufferSize,
  67. length: bufferSize
  68. };
  69. let readsize = await fs.read(file.fd, buf, options);
  70. // buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
  71. let writeSize = await new Promise((resolve, reject) => {
  72. this.renderModel.write(buf, (err, writeSize) => {
  73. if (err) {
  74. reject(err);
  75. } else {
  76. resolve(writeSize);
  77. }
  78. });
  79. });
  80. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为STATE_RELEASED,停止渲染
  81. fs.close(file);
  82. await this.renderModel.stop();
  83. }
  84. if (this.renderModel.state === audio.AudioState.STATE_RUNNING) {
  85. if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
  86. fs.close(file);
  87. await this.renderModel.stop();
  88. }
  89. }
  90. }
  91. }
  92. // 暂停渲染
  93. async pause() {
  94. // 只有渲染器状态为STATE_RUNNING的时候才能暂停
  95. if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) {
  96. console.info('Renderer is not running');
  97. return;
  98. }
  99. await this.renderModel.pause(); // 暂停渲染
  100. if (this.renderModel.state === audio.AudioState.STATE_PAUSED) {
  101. console.info('Renderer is paused.');
  102. } else {
  103. console.error('Pausing renderer failed.');
  104. }
  105. }
  106. // 停止渲染
  107. async stop() {
  108. // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
  109. if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) {
  110. console.info('Renderer is not running or paused.');
  111. return;
  112. }
  113. await this.renderModel.stop(); // 停止渲染
  114. if (this.renderModel.state === audio.AudioState.STATE_STOPPED) {
  115. console.info('Renderer stopped.');
  116. } else {
  117. console.error('Stopping renderer failed.');
  118. }
  119. }
  120. // 销毁实例,释放资源
  121. async release() {
  122. // 渲染器状态不是STATE_RELEASED状态,才能release
  123. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
  124. console.info('Renderer already released');
  125. return;
  126. }
  127. await this.renderModel.release(); // 释放资源
  128. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
  129. console.info('Renderer released');
  130. } else {
  131. console.error('Renderer release failed.');
  132. }
  133. }
  134. }

使用AudioCapturer录制本端的通话声音

该过程与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向。audioCapturerInfo参数中音源类型需设置为语音通话,SOURCE_TYPE_VOICE_COMMUNICATION。
  1. import audio from '@ohos.multimedia.audio';
  2. import fs from '@ohos.file.fs';
  3. const TAG = 'VoiceCallDemoForAudioCapturer';
  4. // 与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向
  5. export default class VoiceCallDemoForAudioCapturer {
  6. private audioCapturer = undefined;
  7. private audioStreamInfo = {
  8. samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采样率
  9. channels: audio.AudioChannel.CHANNEL_1, // 通道
  10. sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
  11. encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
  12. }
  13. private audioCapturerInfo = {
  14. // 需使用通话场景相应的参数
  15. source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话
  16. capturerFlags: 0 // 音频采集器标志:默认为0即可
  17. }
  18. private audioCapturerOptions = {
  19. streamInfo: this.audioStreamInfo,
  20. capturerInfo: this.audioCapturerInfo
  21. }
  22. // 初始化,创建实例,设置监听事件
  23. init() {
  24. audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // 创建AudioCapturer实例
  25. if (err) {
  26. console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
  27. return;
  28. }
  29. console.info(`${TAG}: create AudioCapturer success`);
  30. this.audioCapturer = capturer;
  31. this.audioCapturer.on('markReach', 1000, (position) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
  32. if (position === 1000) {
  33. console.info('ON Triggered successfully');
  34. }
  35. });
  36. this.audioCapturer.on('periodReach', 2000, (position) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
  37. if (position === 2000) {
  38. console.info('ON Triggered successfully');
  39. }
  40. });
  41. });
  42. }
  43. // 开始一次音频采集
  44. async start() {
  45. let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
  46. if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
  47. console.error(`${TAG}: start failed`);
  48. return;
  49. }
  50. await this.audioCapturer.start(); // 启动采集
  51. // 此处仅以将音频数据写入文件举例,实际音频通话开发中,需要将本端采集的音频数据编码打包,通过网络发送给通话对端
  52. let context = getContext(this);
  53. const path = context.filesDir + '/voice_call_data.wav'; // 采集到的音频文件存储路径
  54. let file = fs.openSync(path, 0o2 | 0o100); // 如果文件不存在则创建文件
  55. let fd = file.fd;
  56. let numBuffersToCapture = 150; // 循环写入150次
  57. let count = 0;
  58. while (numBuffersToCapture) {
  59. let bufferSize = await this.audioCapturer.getBufferSize();
  60. let buffer = await this.audioCapturer.read(bufferSize, true);
  61. let options = {
  62. offset: count * bufferSize,
  63. length: bufferSize
  64. };
  65. if (buffer === undefined) {
  66. console.error(`${TAG}: read buffer failed`);
  67. } else {
  68. let number = fs.writeSync(fd, buffer, options);
  69. console.info(`${TAG}: write date: ${number}`);
  70. }
  71. numBuffersToCapture--;
  72. count++;
  73. }
  74. }
  75. // 停止采集
  76. async stop() {
  77. // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
  78. if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) {
  79. console.info('Capturer is not running or paused');
  80. return;
  81. }
  82. await this.audioCapturer.stop(); // 停止采集
  83. if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) {
  84. console.info('Capturer stopped');
  85. } else {
  86. console.error('Capturer stop failed');
  87. }
  88. }
  89. // 销毁实例,释放资源
  90. async release() {
  91. // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
  92. if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) {
  93. console.info('Capturer already released');
  94. return;
  95. }
  96. await this.audioCapturer.release(); // 释放资源
  97. if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) {
  98. console.info('Capturer released');
  99. } else {
  100. console.error('Capturer release failed');
  101. }
  102. }
  103. }
音频输入设备管理
视频播放
温馨提示
下载编程狮App,免费阅读超1000+编程语言教程
取消
确定
目录
HAR

关闭

MIP.setData({ 'pageTheme' : getCookie('pageTheme') || {'day':true, 'night':false}, 'pageFontSize' : getCookie('pageFontSize') || 20 }); MIP.watch('pageTheme', function(newValue){ setCookie('pageTheme', JSON.stringify(newValue)) }); MIP.watch('pageFontSize', function(newValue){ setCookie('pageFontSize', newValue) }); function setCookie(name, value){ var days = 1; var exp = new Date(); exp.setTime(exp.getTime() + days*24*60*60*1000); document.cookie = name + '=' + value + ';expires=' + exp.toUTCString(); } function getCookie(name){ var reg = new RegExp('(^| )' + name + '=([^;]*)(;|$)'); return document.cookie.match(reg) ? JSON.parse(document.cookie.match(reg)[2]) : null; }