3

我是 ios 开发的新手。我在 iOS 中将 LinearPCM 编码为 MP3。我正在尝试使用 AudioToolbox 框架和 Lame 将原始 PCM 数据从麦克风编码到 MP3。尽管如果我录制 .caf 格式,一切似乎都运行良好. 我只得到编码流中存在的噪声和失真。我不确定我是否正确设置了 AudioQueue,也不确定我是否以正确的方式处理编码缓冲区...我设置音频录制的代码:

示例项目https://github.com/vecter/Audio-Queue-Services-Example

- (void)setupAudioFormat:(AudioStreamBasicDescription*)format 
{
format->mSampleRate = 16000;
format->mFormatID = kAudioFormatLinearPCM;
format->mFramesPerPacket = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame = 2;
format->mBytesPerPacket = 2;
format->mBitsPerChannel = 16;
format->mReserved = 0;
format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian     |
                       kLinearPCMFormatFlagIsSignedInteger |
                       kLinearPCMFormatFlagIsPacked;
}
- (void)recordPressed:(id)sender
{
if (!playState.playing)
{
    if (!recordState.recording)
    {
        printf("Starting recording\n");
          self.mergedData =[[NSMutableData alloc] init];
        [self startRecording];
    }
    else
    {
        printf("Stopping recording\n");
        [self stopRecording];
    }
}
else
{
    printf("Can't start recording, currently playing\n");
}
}

- (void)startRecording
{
[self setupAudioFormat:&recordState.dataFormat];

recordState.currentPacket = 0;
recordState.pThis=self;

OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
                            AudioInputCallback,
                            &recordState,
                            CFRunLoopGetCurrent(),
                            kCFRunLoopCommonModes,
                            0,
                            &recordState.queue);

if (status == 0)
{
    // Prime recording buffers with empty data
    for (int i = 0; i < NUM_BUFFERS; i++)
    {
        AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
        AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
    }

    status = AudioFileCreateWithURL(fileURL,
                                    kAudioFileAIFFType,
                                    &recordState.dataFormat,
                                    kAudioFileFlags_EraseFile,
                                    &recordState.audioFile);

    gfp = lame_init();
    lame_set_num_channels(gfp, 1);
    lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate);
    lame_set_VBR(gfp, vbr_default);
    lame_init_params(gfp);
    if (status == 0)
    {
        recordState.recording = true;        
        status = AudioQueueStart(recordState.queue, NULL);
        if (status == 0)
        {
            mergeData =[[NSMutableData alloc]init];
            labelStatus.text = @"Recording";
        }
    }
}

if (status != 0)
{
    [self stopRecording];
    labelStatus.text = @"Record Failed";
}
}


- (void)stopRecording
{
recordState.recording = false;

AudioQueueStop(recordState.queue, true);
for(int i = 0; i < NUM_BUFFERS; i++)
{
    AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}

AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
labelStatus.text = @"Idle";
}

然后AudioQueue回调函数调用lame_encode_buffer,然后将编码后的缓冲区写入文件:

void AudioInputCallback(void * inUserData, 
                    AudioQueueRef inAQ, 
                    AudioQueueBufferRef inBuffer, 
                    const AudioTimeStamp * inStartTime, 
                    UInt32 inNumberPacketDescriptions, 
                    const AudioStreamPacketDescription * inPacketDescs)
 {
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
    printf("Not recording, returning\n");
}

printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
                                        false,
                                        inBuffer->mAudioDataByteSize,
                                        inPacketDescs,
                                        recordState->currentPacket,
                                        &inNumberPacketDescriptions,
                                        inBuffer->mAudioData);
if (status == 0)
{
    recordState->currentPacket += inNumberPacketDescriptions;
}

AudioRecorderAppDelegate *this = recordState->pThis;

const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4;
unsigned char mEncodedBuffer[MP3_BUFFER_SIZE];

 int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE);
NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes];
[this writeData:data];
lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE);


memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer));
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);

 }

附加数据

- (void) writeData:(NSData *)data
{ 
[mergeData appendData:data];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
                                                     NSUserDomainMask, YES);
NSString* docDir = [paths objectAtIndex:0];

NSString* file = [docDir stringByAppendingString:@"/lame.mp3"];
[mergeData writeToFile:file atomically:YES];
NSLog(@"%@",file);
}

有人可以建议这里有什么问题吗?

否则发布已经完成的示例项目?

4

2 回答 2

1

就我而言,这个逻辑有效:

int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);

NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes];

[this writeData:data];
于 2013-08-16T09:53:19.480 回答
1

试试这个

void AQRecorder::MyInputBufferHandler(  void *                              inUserData,
                                    AudioQueueRef                       inAQ,
                                    AudioQueueBufferRef                 inBuffer,
                                    const AudioTimeStamp *              inStartTime,
                                    UInt32                              inNumPackets,
                                    const AudioStreamPacketDescription* inPacketDesc)
{
  AQRecorder *aqr = (AQRecorder *)inUserData;
//    NSLog(@"%f",inStartTime->mSampleTime);
try
{
        if (inNumPackets > 0)
        {
            AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);

            aqr->mRecordPacket += inNumPackets;

            int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
            unsigned char mp3_buffer[MP3_SIZE];
            AppDelegate *delegate =[[UIApplication sharedApplication]delegate];
            lame_t lame = lame_init();
            lame_set_in_samplerate(lame, 44100);
            lame_set_VBR(lame, vbr_default);
            lame_init_params(lame);

      //                int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);


            int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData,  (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);

            [delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];

            if (inBuffer->mAudioDataByteSize != 0) {
            }
            else
            {
                int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
                [delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
            }
            lame_close(lame);
        }

        if (aqr->IsRunning())
        {
            AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
        }
} catch (CAXException e)
{
    char buf[256];
    fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
于 2013-08-20T12:12:01.703 回答