我正在尝试对来自录制回调的缓冲区进行实时编码,但我似乎并不真正了解如何做到这一点,以及它是如何工作的。事实上,我花了几个小时阅读 Apple 的参考资料,但我就是不明白。
我想在这里通过各种线程,但对我仍然没有好处。
我有这个录音回调:
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// the data gets rendered here
AudioBuffer buffer;
// a variable where we check the status
OSStatus status;
/**
This is the reference to the object who owns the callback.
*/
AudioProcessor *audioProcessor = (__bridge AudioProcessor*) inRefCon;
/**
on this point we define the number of channels, which is mono
for the iphone. the number of frames is usally 512 or 1024.
*/
buffer.mDataByteSize = inNumberFrames * 2; // sample size
buffer.mNumberChannels = 1; // one channel
buffer.mData = malloc( inNumberFrames * 2 ); // buffer size
// we put our buffer into a bufferlist array for rendering
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// render input and check for error
status = AudioUnitRender([audioProcessor audioUnit], ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
[audioProcessor hasError:status:__FILE__:__LINE__];
// process the bufferlist in the audio processor
[audioProcessor processBuffer:&bufferList];
[audioProcessor convertBuffer:&bufferList]; //trying to set the buffer to converting function
// clean up the buffer
free(bufferList.mBuffers[0].mData);
return noErr;
}
convertBuffer看起来像这样 - 它返回
被释放的指针未被分配
-(void)convertBuffer: (AudioBufferList*) audioBufferList
{
unsigned char** stream = audioBufferList->mBuffers[0].mData;
UInt32 *streamSize = &audioBufferList->mBuffers[0].mDataByteSize;
// describe the input format's description
AudioStreamBasicDescription inputDescription;
inputDescription.mSampleRate = SAMPLE_RATE;
inputDescription.mFormatID = kAudioFormatLinearPCM;
inputDescription.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
inputDescription.mFramesPerPacket = 1;
inputDescription.mChannelsPerFrame = 1;
inputDescription.mBitsPerChannel = 16;
inputDescription.mBytesPerPacket = 2;
inputDescription.mBytesPerFrame = 2;
// copy conversion output format's description from the
// output audio unit's description.
// then adjust framesPerPacket to match the input we'll be passing.
// framecount of our input stream is based on the input bytecount.
// output stream will have same number of frames, but different
// number of bytes.
AudioStreamBasicDescription outputDescription;
outputDescription.mSampleRate = 44100.0;
outputDescription.mFormatID = kAudioFormatMPEG4AAC;
outputDescription.mFormatFlags = kMPEG4Object_AAC_Main;
outputDescription.mChannelsPerFrame = 1;
outputDescription.mBytesPerPacket = 1;
outputDescription.mBytesPerFrame = 0;
outputDescription.mFramesPerPacket = 1024;
outputDescription.mBitsPerChannel = 0;
outputDescription.mReserved = 0;
// create an audio converter
AudioConverterRef audioConverter;
OSStatus acCreationResult = AudioConverterNew(&inputDescription, &outputDescription, &audioConverter);
if(!audioConverter)
{
// bail out
free(*stream);
*streamSize = 0;
*stream = (unsigned char*)malloc(0);
return;
}
// calculate number of bytes required for output of input stream.
// allocate buffer of adequate size.
UInt32 outputBytes = outputDescription.mBytesPerPacket * (*streamSize / inputDescription.mBytesPerPacket); // outputDescription.mFramesPerPacket * outputDescription.mBytesPerFrame;
unsigned char *outputBuffer = (unsigned char*)malloc(outputBytes);
memset(outputBuffer, 0, outputBytes);
// describe input data we'll pass into converter
AudioBuffer inputBuffer;
inputBuffer.mNumberChannels = inputDescription.mChannelsPerFrame;
inputBuffer.mDataByteSize = *streamSize;
inputBuffer.mData = *stream;
// describe output data buffers into which we can receive data.
AudioBufferList outputBufferList;
outputBufferList.mNumberBuffers = 1;
outputBufferList.mBuffers[0].mNumberChannels = outputDescription.mChannelsPerFrame;
outputBufferList.mBuffers[0].mDataByteSize = outputBytes;
outputBufferList.mBuffers[0].mData = outputBuffer;
// set output data packet size
UInt32 outputDataPacketSize = outputBytes / outputDescription.mBytesPerPacket;
// fill class members with data that we'll pass into
// the InputDataProc
//_converter_currentBuffer = &inputBuffer;
//_converter_currentInputDescription = inputDescription;
// convert
OSStatus result = AudioConverterFillComplexBuffer(audioConverter, /* AudioConverterRef inAudioConverter */
nil, /* AudioConverterComplexInputDataProc inInputDataProc */
nil, /* void *inInputDataProcUserData */
&outputDataPacketSize, /* UInt32 *ioOutputDataPacketSize */
&outputBufferList, /* AudioBufferList *outOutputData */
NULL /* AudioStreamPacketDescription *outPacketDescription */
);
NSLog([NSString stringWithFormat:@"Err : %ld",result ]);
// change "stream" to describe our output buffer.
// even if error occured, we'd rather have silence than unconverted audio.
free(*stream);
*stream = outputBuffer;
*streamSize = outputBytes;
// dispose of the audio converter
AudioConverterDispose(audioConverter);
}
基本上就是这样。无法让它工作。另外,回调函数应该怎么看?我现在将它设置为 nil。
非常感谢任何帮助。我被困了好几天。