我正在使用 AVAssetReader 和 AVAssetWriter 来反转音频文件。但是,产生的反转音频非常生涩。
反转音频文件的最佳做法是什么?
任何帮助深表感谢。
-(void)reverseAudio:(NSURL *)videoURL andVideoAsset:(AVURLAsset *)videoAsset{
AVAssetReader *video2AssetReader = [[AVAssetReader alloc] initWithAsset:videoAsset error:nil];
video2AssetReader.timeRange = CMTimeRangeFromTimeToTime(kCMTimeZero, [videoAsset duration]);
NSArray *audioTracks = [videoAsset tracksWithMediaType:AVMediaTypeAudio];
AVAssetTrack* audioTrack = [audioTracks objectAtIndex:0];
NSDictionary *outputSettingsDict = [[NSDictionary alloc] initWithObjectsAndKeys:
[NSNumber numberWithInt:kAudioFormatLinearPCM],AVFormatIDKey,
[NSNumber numberWithInt:16],AVLinearPCMBitDepthKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsBigEndianKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
[NSNumber numberWithBool:NO],AVLinearPCMIsNonInterleaved,
nil];
AVAssetReaderTrackOutput *readerAudioTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack outputSettings:outputSettingsDict];
[video2AssetReader addOutput:readerAudioTrackOutput];
[video2AssetReader startReading];
// read in the samples
NSMutableArray *audioSamples = [[NSMutableArray alloc] init];
CMSampleBufferRef audioSample;
while((audioSample = [readerAudioTrackOutput copyNextSampleBuffer])){
[audioSamples addObject:(__bridge id)audioSample];
CFRelease(audioSample);
}
videoReverseProcess3TotalFrames = audioSamples.count;
NSLog(@"AUDIO SAMPLES COUNT = %f", videoReverseProcess3TotalFrames);
[video2AssetReader cancelReading];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *videoPath = [documentsDirectory stringByAppendingPathComponent:@"videoReverseAudioFile.m4a"];
NSError *error = nil;
if([[NSFileManager defaultManager] fileExistsAtPath:videoPath]){
[[NSFileManager defaultManager] removeItemAtPath:videoPath error:&error];
if(error){
NSLog(@"VIDEO DELETE FAILED");
}
else{
NSLog(@"VIDEO DELETED");
}
}
NSURL *audioExportURL = [[NSURL alloc] initFileURLWithPath:videoPath];
AVAssetWriter *writer = [[AVAssetWriter alloc] initWithURL:audioExportURL fileType:AVFileTypeAppleM4A error:&error];
AudioChannelLayout channelLayout;
memset(&channelLayout, 0, sizeof(AudioChannelLayout));
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
NSDictionary *audioCompressionSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,
[NSNumber numberWithFloat:44100.0], AVSampleRateKey,
[NSNumber numberWithInt:2], AVNumberOfChannelsKey,
[NSNumber numberWithInt:128000], AVEncoderBitRateKey,
[NSData dataWithBytes:&channelLayout length:sizeof(AudioChannelLayout)], AVChannelLayoutKey, nil];
AVAssetWriterInput *writerAudioInput;
writerAudioInput = [[AVAssetWriterInput alloc] initWithMediaType:AVMediaTypeAudio outputSettings:audioCompressionSettings];
writerAudioInput.expectsMediaDataInRealTime = NO;
if([writer canAddInput:writerAudioInput]){
[writer addInput:writerAudioInput];
}
else{
NSLog(@"ERROR ADDING AUDIO");
}
[writer startWriting];
CMTime timeStamp = CMSampleBufferGetPresentationTimeStamp((__bridge CMSampleBufferRef)audioSamples[0]);
[writer startSessionAtSourceTime:timeStamp];
while(audioSamples.count > 0){
if(writer && writerAudioInput.readyForMoreMediaData){
CMSampleBufferRef audioBufferRef = (__bridge CMSampleBufferRef)audioSamples[audioSamples.count - 1];
[writerAudioInput appendSampleBuffer:audioBufferRef];
[audioSamples removeObjectAtIndex:audioSamples.count - 1];
}
else{
[NSThread sleepForTimeInterval:0.2];
}
}
if(writer.status != AVAssetWriterStatusCancelled){
[writerAudioInput markAsFinished];
[writer finishWritingWithCompletionHandler:^{
}];
}
}