5

我试图在 wav 文件中添加淡入淡出,然后使用添加淡入淡出导出新文件AVAssetExportSession。我见过的所有示例都导出为 m4u 甚至可以使用 wav 或 aif 来执行此操作吗?

我得到的错误是:

AVAssetExportSessionStatusFailed Error Domain=AVFoundationErrorDomain Code=-11822 "Cannot Open" UserInfo=0x1f01c9f0 {NSLocalizedDescription=Cannot Open, NSLocalizedFailureReason=This media format is not supported.} 

我的代码如下所示

 NSString *inpath = [path stringByAppendingFormat:@"/%@",file];

    NSString *ename = [file stringByDeletingPathExtension];
    NSString *incname = [ename stringByAppendingString:@"1t"];
    NSString *outname = [incname stringByAppendingPathExtension:@"wav"];
    NSString *outpath = [path stringByAppendingFormat:@"/%@",outname];

    NSURL *urlpath = [NSURL fileURLWithPath:inpath];
    NSURL *urlout = [NSURL fileURLWithPath:outpath];



    NSDictionary *options = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES]
                                                        forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
    AVURLAsset *anAsset = [[AVURLAsset alloc] initWithURL:urlpath options:options];


    //check the soundfile is greater than 50seconds
    CMTime assetTime = [anAsset duration];
    Float64 duration = CMTimeGetSeconds(assetTime);
    if (duration < 50.0) return NO;

    // get the first audio track
    NSArray *tracks = [anAsset tracksWithMediaType:AVMediaTypeAudio];
    if ([tracks count] == 0) return NO;

    AVAssetTrack *track = [tracks objectAtIndex:0];

    // create trim time range - 20 seconds starting from 30 seconds into the asset
    CMTime startTime = CMTimeMake(30, 1);
    CMTime stopTime = CMTimeMake(50, 1);
    CMTimeRange exportTimeRange = CMTimeRangeFromTimeToTime(startTime, stopTime);

    // create fade in time range - 10 seconds starting at the beginning of trimmed asset
    CMTime startFadeInTime = startTime;
    CMTime endFadeInTime = CMTimeMake(40, 1);
    CMTimeRange fadeInTimeRange = CMTimeRangeFromTimeToTime(startFadeInTime,
                                                            endFadeInTime);

    // setup audio mix
    AVMutableAudioMix *exportAudioMix = [AVMutableAudioMix audioMix];
    AVMutableAudioMixInputParameters *exportAudioMixInputParameters =
    [AVMutableAudioMixInputParameters audioMixInputParametersWithTrack:track];
    [exportAudioMixInputParameters setVolumeRampFromStartVolume:0.0 toEndVolume:1.0 timeRange:fadeInTimeRange];

    exportAudioMix.inputParameters = [NSArray arrayWithObject:exportAudioMixInputParameters];

    AVAssetExportSession *exportSession = [AVAssetExportSession
                                           exportSessionWithAsset:anAsset presetName:AVAssetExportPresetPassthrough];


    //NSArray *listof = [AVAssetExportSession exportPresetsCompatibleWithAsset:anAsset];
    //NSLog(@"LISTOF %@",listof);

    id desc = [track.formatDescriptions objectAtIndex:0];
    const AudioStreamBasicDescription *audioDesc = CMAudioFormatDescriptionGetStreamBasicDescription((CMAudioFormatDescriptionRef)desc);
    FourCharCode formatID = audioDesc->mFormatID;

    NSString *fileType = nil;
    NSString *ex = nil;

    switch (formatID) {

        case kAudioFormatLinearPCM:
        {
            UInt32 flags = audioDesc->mFormatFlags;
            if (flags & kAudioFormatFlagIsBigEndian) {
                fileType = @"public.aiff-audio";
                ex = @"aif";
            } else {
                fileType = @"com.microsoft.waveform-audio";
                ex = @"wav";
            }
        }
            break;

        case kAudioFormatMPEGLayer3:
            fileType = @"com.apple.quicktime-movie";
            ex = @"mp3";
            break;

        case kAudioFormatMPEG4AAC:
            fileType = @"com.apple.m4a-audio";
            ex = @"m4a";
            break;

        case kAudioFormatAppleLossless:
            fileType = @"com.apple.m4a-audio";
            ex = @"m4a";
            break;

        default:
            break;
    }



    exportSession.outputFileType = fileType;
    exportSession.outputURL = urlout;

    //exportSession.outputFileType = AVFileTypeWAVE; // output file type
    exportSession.timeRange = exportTimeRange; // trim time range
    exportSession.audioMix = exportAudioMix; // fade in audio mix


    // perform the export
    [exportSession exportAsynchronouslyWithCompletionHandler:^{

        if (AVAssetExportSessionStatusCompleted == exportSession.status) {
            NSLog(@"AVAssetExportSessionStatusCompleted");
        } else if (AVAssetExportSessionStatusFailed == exportSession.status) {
            // a failure may happen because of an event out of your control
            // for example, an interruption like a phone call comming in
            // make sure and handle this case appropriately
            NSLog(@"AVAssetExportSessionStatusFailed %@",exportSession.error);
        } else {
            NSLog(@"Export Session Status: %d", exportSession.status);
        }
    }];

    return YES;
    }
4

1 回答 1

2

您不能使用 AVAssetExportSession 来做到这一点,因为预设的使用非常固定。AVAssetExportPresetPassthrough 的预设值将使您的输入格式保持在输出状态。

由于您的任务将直接操作音频样本缓冲区,您应该使用 AVFoundation 为您提供的第二个变体:配对的 AVAssetReader 和 AVAssetWriter 设置。您会从 Apple 开发人员源代码中找到正确的示例代码,如 AVReaderWriterOSX 中的代码。除了您有不同的 I/O 格式设置可用之外,这也应该适用于 iOS。应提供将音频解压缩为 PCM 并写回未压缩的 .wav 文件的可用性。

于 2013-06-05T12:14:03.310 回答