2

我是流应用程序的初学者,我从 AudioBuffer 创建了 NSdata,并将 nsdata 发送到客户端(接收器)。但我不知道如何将 NSdata 转换为音频缓冲区。

我正在使用以下代码将 AudioBuffer 转换为 NSdata(这很好用)

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{               
 AudioStreamBasicDescription audioFormat;
 memset(&audioFormat, 0, sizeof(audioFormat));
 audioFormat.mSampleRate = 8000.0;
 audioFormat.mFormatID = kAudioFormatiLBC;
 audioFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh;
 audioFormat.mFramesPerPacket = 1;
 audioFormat.mChannelsPerFrame = 1;
 audioFormat.mBitsPerChannel = 16;
 audioFormat.mBytesPerPacket = 2;
 audioFormat.mReserved = 0;
 audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket = audioFormat.mChannelsPerFrame* sizeof(SInt16);

 AudioBufferList audioBufferList;
 NSMutableData *data=[[NSMutableData alloc] init];
 CMBlockBufferRef blockBuffer;
 CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
  for( int y=0; y<audioBufferList.mNumberBuffers; y++ )
  {
      AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
      Float32 *frame = (Float32*)audioBuffer.mData;
      [data appendBytes:frame length:audioBuffer.mDataByteSize];
  }
}

如果这不是正确的方法,那么请帮助我....谢谢。

4

4 回答 4

3

您可以使用以下代码创建NSDataCMSampleBufferRef然后使用 AVAudioPlayer 播放。

- (void)captureOutput:(AVCaptureOutput *)captureOutput  didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {

    AudioBufferList audioBufferList;
    NSMutableData *data= [NSMutableData data];
    CMBlockBufferRef blockBuffer;
    CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);

    for( int y=0; y< audioBufferList.mNumberBuffers; y++ ){

        AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
        Float32 *frame = (Float32*)audioBuffer.mData;

        [data appendBytes:frame length:audioBuffer.mDataByteSize];

    }

    CFRelease(blockBuffer);
    CFRelease(ref);

    AVAudioPlayer *player = [[AVAudioPlayer alloc] initWithData:data error:nil];
    [player play];
}
于 2014-09-19T06:14:08.010 回答
2

我就是这样做的,以防其他人陷入同样的​​问题。您不需要从中取出数据,AudioBufferList而是按原样使用它。为了再次从 NSData 重新创建 AudioBufferList,我还需要样本数量信息,所以我在实际数据之前附加了它。

以下是从 CMSampleBufferRef 中获取数据的方法:

AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer);        
NSUInteger size = sizeof(audioBufferList);
char buffer[size + 4];
((int*)buffer)[0] = (int)numSamples;
memcpy(buffer +4, &audioBufferList, size);
//This is the Audio data.
NSData *bufferData = [NSData dataWithBytes:buffer length:size + 4];

这是您使用此数据创建 AudioSampleBufferRef 的方式:

const void *buffer = [bufferData bytes];
buffer = (char *)buffer;

CMSampleBufferRef sampleBuffer = NULL;
OSStatus status = -1;

/* Format Description */
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = 0xc;
audioFormat.mBytesPerPacket= 2;
audioFormat.mFramesPerPacket= 1;
audioFormat.mBytesPerFrame= 2;
audioFormat.mChannelsPerFrame= 1;
audioFormat.mBitsPerChannel= 16;
audioFormat.mReserved= 0;

CMFormatDescriptionRef format = NULL;
status = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &audioFormat, 0, nil, 0, nil, nil, &format);

CMFormatDescriptionRef formatdes = NULL;
status = CMFormatDescriptionCreate(NULL, kCMMediaType_Audio, 'lpcm', NULL, &formatdes);
if (status != noErr)
{
    NSLog(@"Error in CMAudioFormatDescriptionCreater");
    return;
}

/* Create sample Buffer */
CMSampleTimingInfo timing   = {.duration= CMTimeMake(1, 44100), .presentationTimeStamp= kCMTimeZero, .decodeTimeStamp= kCMTimeInvalid};
CMItemCount framesCount     = ((int*)buffer)[0];

status = CMSampleBufferCreate(kCFAllocatorDefault, nil , NO,nil,nil,format, framesCount, 1, &timing, 0, nil, &sampleBuffer);

if( status != noErr)
{
    NSLog(@"Error in CMSampleBufferCreate");
    return;
}

/* Copy BufferList to Sample Buffer */
AudioBufferList receivedAudioBufferList;
memcpy(&receivedAudioBufferList, buffer + 4, sizeof(receivedAudioBufferList));

status =   CMSampleBufferSetDataBufferFromAudioBufferList(sampleBuffer, kCFAllocatorDefault , kCFAllocatorDefault, 0, &receivedAudioBufferList);
if (status != noErr) {
    NSLog(@"Error in CMSampleBufferSetDataBufferFromAudioBufferList");
    return;
}
//Use your sampleBuffer.

让我知道任何问题。

于 2015-06-17T08:06:21.993 回答
0

我使用以下代码片段将 NSData(在我的情况下是 800 字节数据包,但可以说可以是任何大小)转换为 AudioBufferList:

-(AudioBufferList *) getBufferListFromData: (NSData *) data
{
       if (data.length > 0)
       {
            NSUInteger len = [data length];
            //I guess you can use Byte*, void* or Float32*. I am not sure if that makes any difference.
            Byte * byteData = (Byte*) malloc (len);
            memcpy (byteData, [data bytes], len);
            if (byteData)
            {
                 AudioBufferList * theDataBuffer =(AudioBufferList*)malloc(sizeof(AudioBufferList) * 1);
                 theDataBuffer->mNumberBuffers = 1;
                 theDataBuffer->mBuffers[0].mDataByteSize = len;
                 theDataBuffer->mBuffers[0].mNumberChannels = 1;
                 theDataBuffer->mBuffers[0].mData = byteData;
                 // Read the data into an AudioBufferList
                 return theDataBuffer;
             }
        }
        return nil;
}
于 2014-09-04T19:20:56.187 回答
0

这是我用来将音频数据(音频文件)转换为浮点表示并保存到数组中的代码。首先我将音频数据放入AudioBufferList,然后获取音频数据的浮点值。如果有帮助,请检查以下代码

-(void) PrintFloatDataFromAudioFile {

NSString *  name = @"Filename";  //YOUR FILE NAME
NSString * source = [[NSBundle mainBundle] pathForResource:name ofType:@"m4a"]; // SPECIFY YOUR FILE FORMAT

const char *cString = [source cStringUsingEncoding:NSASCIIStringEncoding];

CFStringRef str = CFStringCreateWithCString(
                                            NULL,
                                            cString,
                                            kCFStringEncodingMacRoman
                                            );
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
                                                      kCFAllocatorDefault,
                                                      str,
                                                      kCFURLPOSIXPathStyle,
                                                      false
                                                      );

ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);


  AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;   // GIVE YOUR SAMPLING RATE 
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32);  // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)

// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
                        fileRef,
                        kExtAudioFileProperty_ClientDataFormat,
                        sizeof (AudioStreamBasicDescription), //= audioFormat
                        &audioFormat);

int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;

// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);



AudioBufferList convertedData ;//= malloc(sizeof(convertedData));

convertedData.mNumberBuffers = 1;    // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame;  //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //

UInt32 frameCount = numSamples;
float *samplesAsCArray;
int j =0;
    double floatDataArray[882000]   ; // SPECIFY YOUR DATA LIMIT MINE WAS 882000 , SHOULD BE EQUAL TO OR MORE THAN DATA LIMIT

while (frameCount > 0) {
    ExtAudioFileRead(
                     fileRef,
                     &frameCount,
                     &convertedData
                     );
    if (frameCount > 0)  {
        AudioBuffer audioBuffer = convertedData.mBuffers[0];
        samplesAsCArray = (float *)audioBuffer.mData; // CAST YOUR mData INTO FLOAT

       for (int i =0; i<1024 /*numSamples */; i++) { //YOU CAN PUT numSamples INTEAD OF 1024

            floatDataArray[j] = (double)samplesAsCArray[i] ; //PUT YOUR DATA INTO FLOAT ARRAY
              printf("\n%f",floatDataArray[j]);  //PRINT YOUR ARRAY'S DATA IN FLOAT FORM RANGING -1 TO +1
            j++;


        }
    }
}}
于 2014-01-20T13:03:04.553 回答