1

我正在 Objective-c 中为 iOS 创建一个 voip 应用程序。目前我正在尝试创建音频部分:从麦克风录制音频数据,使用 Opus 编码,解码,然后播放。对于录音和播放,我使用 AudioUnit。我还做了一个缓冲区实现,它分配内存位置,每个位置都具有初始设置的大小。主要有三种方法: - setBufferSize - 用于设置缓冲区的子分配空间。- writeDataToBuffer - 用于创建新空间(如果需要),并将数据填充到当前写入空间。- readDataFromBuffer - 从当前读取空间读取数据。

我使用缓冲区在那里存储音频数据。它运作良好。我已经测试过了。此外,如果我尝试在没有 Opus 的情况下使用它,只读取音频数据,将其存储到缓冲区中,从缓冲区读取然后播放,一切都很好。但是当我包含作品时问题就来了。实际上它对音频数据进行编码和解码,但质量不太好,而且还有一些爆裂声。我想知道我做错了什么?这是我的代码片段:

音频单元:

OSStatus status;


m_sAudioDescription.componentType = kAudioUnitType_Output;
m_sAudioDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO/*kAudioUnitSubType_RemoteIO*/;
m_sAudioDescription.componentFlags = 0;
m_sAudioDescription.componentFlagsMask = 0;
m_sAudioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;

AudioComponent inputComponent = AudioComponentFindNext(NULL, &m_sAudioDescription);

status = AudioComponentInstanceNew(inputComponent, &m_audioUnit);


// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Input,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &flag,
                              sizeof(flag));

// Enable IO for playback
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Output,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &flag,
                              sizeof(flag));

// Describe format
m_sAudioFormat.mSampleRate          = 48000.00;//48000.00;/*44100.00*/;
m_sAudioFormat.mFormatID            = kAudioFormatLinearPCM;
m_sAudioFormat.mFormatFlags         = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked/* | kAudioFormatFlagsCanonical*/;
m_sAudioFormat.mFramesPerPacket     = 1;
m_sAudioFormat.mChannelsPerFrame    = 1;
m_sAudioFormat.mBitsPerChannel      = 16; //8 * bytesPerSample
m_sAudioFormat.mBytesPerFrame       = /*(UInt32)bytesPerSample;*/2; //bitsPerChannel / 8 * channelsPerFrame
m_sAudioFormat.mBytesPerPacket      = 2; //bytesPerFrame * framesPerPacket


// Apply format
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Output,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &m_sAudioFormat,
                              sizeof(m_sAudioFormat));

status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Input,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &m_sAudioFormat,
                              sizeof(m_sAudioFormat));


// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = inputRenderCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_SetInputCallback,
                              kAudioUnitScope_Global,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &callbackStruct,
                              sizeof(callbackStruct));

// Set output callback
callbackStruct.inputProc = outputRenderCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_SetRenderCallback,
                              kAudioUnitScope_Global,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &callbackStruct,
                              sizeof(callbackStruct));

//Enable Echo cancelation:
this->_setEchoCancelation(true);

//Enable Automatic Gain control:
this->_setAGC(false);

// Initialise
status = AudioUnitInitialize(m_audioUnit);

return noErr;

输入缓冲区分配和设置存储缓冲区的大小:

void VoipAudio::_allocBuffer()
{
   UInt32 numFramesPerBuffer;
   UInt32 size = sizeof(/*VoipUInt32*/VoipInt16);
   AudioUnitGetProperty(m_audioUnit,
                     kAudioUnitProperty_MaximumFramesPerSlice,
                     kAudioUnitScope_Global,
                     VOIP_AUDIO_OUTPUT_ELEMENT,                         &numFramesPerBuffer,                         &siz    

   UInt32 inputBufferListSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_sAudioFormat.mChannelsPerFrame);
   inputBuffer = (AudioBufferList *)malloc(inputBufferListSize);
   inputBuffer->mNumberBuffers = m_sAudioFormat.mChannelsPerFrame;

   //pre-malloc buffers for AudioBufferLists
   for(VoipUInt32 tmp_int1 = 0; tmp_int1 < inputBuffer->mNumberBuffers; tmp_int1++)
   {
      inputBuffer->mBuffers[tmp_int1].mNumberChannels = 1;
      inputBuffer->mBuffers[tmp_int1].mDataByteSize = 2048;
      inputBuffer->mBuffers[tmp_int1].mData = malloc(2048);
      memset(inputBuffer->mBuffers[tmp_int1].mData, 0, 2048);
   }

   this->m_oAudioBuffer = new VoipBuffer();
   this->m_oAudioBuffer->setBufferSize(2048);

   this->m_oAudioReadBuffer = new VoipBuffer();
   this->m_oAudioReadBuffer->setBufferSize(2880);
 }

记录回调:

this->m_oAudioReadBuffer->writeDataToBuffer(samples, samplesSize);
void* tmp_buffer = this->m_oAudioReadBuffer->readDataFromBuffer();
if (tmp_buffer != nullptr)
{
   sVoipAudioCodecOpusEncodedResult* encodedSamples = VoipAudioCodecs::Opus_Encode((VoipInt16*)tmp_buffer, 2880);

   sVoipAudioCodecOpusDecodedResult* decodedSamples = VoipAudioCodecs::Opus_Decode(encodedSamples->m_data, encodedSamples->m_dataSize);


   this->m_oAudioBuffer->writeDataToBuffer(decodedSamples->m_data, decodedSamples->m_dataSize);

   free(encodedSamples->m_data);
   free(encodedSamples);
   free(decodedSamples->m_data);
   free(decodedSamples);
}

播放回调:

void* tmp_buffer = this->m_oAudioBuffer->readDataFromBuffer();

if (tmp_buffer != nullptr)
{
   memset(buffer->mBuffers[0].mData, 0, 2048);
   memcpy(buffer->mBuffers[0].mData, tmp_buffer, 2048);
   buffer->mBuffers[0].mDataByteSize = 2048;
} else {
   memset(buffer->mBuffers[0].mData, 0, 2048);
   buffer->mBuffers[0].mDataByteSize = 2048;
}

作品初始化代码:

int _error = 0;

VoipAudioCodecs::m_oEncoder = opus_encoder_create(SAMPLE_RATE, CHANNELS, APPLICATION, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create an encoder: %s\n", opus_strerror(_error));

    return;
}

_error = opus_encoder_ctl(VoipAudioCodecs::m_oEncoder, OPUS_SET_BITRATE(BITRATE/*OPUS_BITRATE_MAX*/));
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to set bitrate: %s\n", opus_strerror(_error));

    return;
}

VoipAudioCodecs::m_oDecoder = opus_decoder_create(SAMPLE_RATE, CHANNELS, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create decoder: %s\n", opus_strerror(_error));

    return;
}

作品编码/解码:

sVoipAudioCodecOpusEncodedResult* VoipAudioCodecs::Opus_Encode(VoipInt16* number, int samplesCount)
{
   unsigned char cbits[MAX_PACKET_SIZE];
   VoipInt32 nbBytes;

   nbBytes = opus_encode(VoipAudioCodecs::m_oEncoder, number, FRAME_SIZE, cbits, MAX_PACKET_SIZE);
   if (nbBytes < 0)
   {
      fprintf(stderr, "VoipAudioCodecs error: encode failed: %s\n", opus_strerror(nbBytes));

      return nullptr;
   }    

   sVoipAudioCodecOpusEncodedResult* result = (sVoipAudioCodecOpusEncodedResult* )malloc(sizeof(sVoipAudioCodecOpusEncodedResult));

   result->m_data = (unsigned char*)malloc(nbBytes);
   memcpy(result->m_data, cbits, nbBytes);
   result->m_dataSize = nbBytes;

   return result;
}

sVoipAudioCodecOpusDecodedResult* VoipAudioCodecs::Opus_Decode(void* encoded, VoipInt32 nbBytes)
{
    VoipInt16 decodedPacket[MAX_FRAME_SIZE];


    int frame_size = opus_decode(VoipAudioCodecs::m_oDecoder, (const unsigned char*)encoded, nbBytes, decodedPacket, MAX_FRAME_SIZE, 0);

    if (frame_size < 0)
    {
       fprintf(stderr, "VoipAudioCodecs error: decoder failed: %s\n", opus_strerror(frame_size));

       return nullptr;
    }

    sVoipAudioCodecOpusDecodedResult* result = (sVoipAudioCodecOpusDecodedResult* )malloc(sizeof(sVoipAudioCodecOpusDecodedResult));

    result->m_data = (VoipInt16*)malloc(frame_size / sizeof(VoipInt16));
    memcpy(result->m_data, decodedPacket, (frame_size / sizeof(VoipInt16)));
    result->m_dataSize = frame_size / sizeof(VoipInt16);

    return result;
 }

以下是我使用的一些常量:

#define FRAME_SIZE 2880 //120, 240, 480, 960, 1920, 2880 
#define SAMPLE_RATE 48000
#define CHANNELS 1
#define APPLICATION OPUS_APPLICATION_VOIP//OPUS_APPLICATION_AUDIO
#define BITRATE 64000
#define MAX_FRAME_SIZE 4096
#define MAX_PACKET_SIZE (3*1276)

你能帮我吗?

4

2 回答 2

2

您的音频回叫时间可能需要增加。尝试增加您的会话 setPreferredIOBufferDuration 时间。我在 iOS 上使用过 opus 并测量了解码时间。解码大约 240 帧数据需要 2 到 3 毫秒。您很有可能会错过后续的回调,因为解码音频需要很长时间。

于 2015-11-13T14:35:48.113 回答
0

我在我的项目中遇到了同样的问题,问题是 iOS 给了我不稳定的帧大小,我使用了音频队列服务和音频单元,它们给了我相同的结果(噼里啪啦的声音)。您所要做的就是在音频回调的环形缓冲区中保存一些样本。然后在单独的线程中,进行音频处理,为每一轮制作固定帧。例如:audioUnit 给你这样的帧或样本:[2048 .. 2048 .. 2048] 和 opus 编解码器需要,每个数据包 2880 帧,所以你需要从第一个缓冲区获取 2048,从下一个缓冲区获取 832 个剩余帧固定帧大小以将其发送到 opus 编码器。

我在我的项目中使用的这个功能

    func audioProcessing(){
        DispatchQueue.global(qos: .default).async {
             
             // this to save remain data from ring buffer
             var remainData:NSMutableData = NSMutableData()
             var remainDataSize = 0
             
             while self.room_oppened{
                
                 // here we define the fixed frame we want to use in our opus encoder
                 
                 var packetOffset = 0
                 let fixedFrameSize:Int     = 5760
                 var dataToGetFullFrame:Int = 5760
                 let packetData:NSMutableData = NSMutableData(length: fixedFrameSize)!// this need to filled with data
                 

                 if remainDataSize > 0 {
                     if remainDataSize < fixedFrameSize{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), remainDataSize)// add the remain data
                         dataToGetFullFrame = dataToGetFullFrame - remainDataSize
                         packetOffset = packetOffset + remainDataSize// - 1
                     }else{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), fixedFrameSize)// add the remain data
                         dataToGetFullFrame = 0
                     }
                     remainDataSize = 0
                 }
                                  
                 
                 // if the packet not fill full, we need to get more data from circle buffer
                 if dataToGetFullFrame > 0 {
                     
                     while dataToGetFullFrame > 0 {
                         
                         let bufferData = self.ringBufferEncodedAudio.read()// read chunk of data from bufer
                         
                         if bufferData != nil{
                                      
                             
                             var chunkOffset = 0
                             
                             if dataToGetFullFrame > bufferData!.length{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , bufferData!.length)
                                 chunkOffset = bufferData!.length// this how much data we read
                                 dataToGetFullFrame = dataToGetFullFrame - bufferData!.length // how much of data we need to fill packet
                                 packetOffset = packetOffset + bufferData!.length// + 1
                             }else{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , dataToGetFullFrame)
                                 chunkOffset = dataToGetFullFrame// this how much data we read
                                 packetOffset = packetOffset + dataToGetFullFrame// + 1
                                 dataToGetFullFrame = dataToGetFullFrame - dataToGetFullFrame // how much of data we need to fill packet
                             }
                             
                             
                             if dataToGetFullFrame <= 0 {
                                 var size       = bufferData!.length - chunkOffset
                                 remainData     = NSMutableData(bytes: bufferData?.mutableBytes.advanced(by: chunkOffset), length: size)
                                 remainDataSize = size
                             }

        
                         }
                     
                         usleep(useconds_t(8 * 1000))
                         
                     }
                                                           
                 }
                 
                 // send packet to encoder
                if self.enable_streaming {
                    let dataToEncode:Data = packetData as Data
                    let packet = OpusSwiftPort.shared.encodeData(dataToEncode)
                                    
                    if packet != nil{
                        self.sendAudioPacket(packet: packet!)// <--- this to network
                    }
                }
                
              
             }
         }
     }

在我做了这个音频处理之后,我得到了非常清晰的音频。我希望这对你有帮助。

于 2020-11-28T20:07:55.030 回答