我们正在使用 iOS 上的音频单元作为 VOIP 应用程序的一部分。我们已经成功播放了音频,但现在我们想实时控制音频的播放速度和音高。我们从 UDP 套接字获取实时音频字节。
这是用于播放的音频单元初始化代码:
init(_ client: UDPClient, _ tcpClient: TCPClient, _ opusHelper: OpusHelper, _ tvTemp: UILabel) {
super.init()
let success = initCircularBuffer(&circularBuffer, 4096)
if success {
print("Circular buffer init was successful")
} else {
print("Circular buffer init not successful")
}
self.tvTemp = tvTemp
self.opusHelper = opusHelper
monotonicTimer = MonotonicTimer()
udpClient = client
self.tcpClient = tcpClient
var desc = AudioComponentDescription(
componentType: OSType(kAudioUnitType_Output),
componentSubType: OSType(kAudioUnitSubType_VoiceProcessingIO),
componentManufacturer: OSType(kAudioUnitManufacturer_Apple),
componentFlags: 0,
componentFlagsMask: 0
)
let inputComponent = AudioComponentFindNext(nil, &desc)
status = AudioComponentInstanceNew(inputComponent!, &audioUnit)
if status != noErr {
print("Audio component instance new error \(status!)")
}
// Enable IO for recording
var flag: UInt32 = 1
// Enable IO for playback
status = AudioUnitSetProperty(
audioUnit!,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
MemoryLayoutStride.SizeOf32(flag)
)
if status != noErr {
print("Enable IO for playback error \(status!)")
}
var ioFormat = CAStreamBasicDescription(
sampleRate: 48000.0,
numChannels: 1,
pcmf: .int16,
isInterleaved: false
)
status = AudioUnitSetProperty(
audioUnit!,
AudioUnitPropertyID(kAudioUnitProperty_StreamFormat),
AudioUnitScope(kAudioUnitScope_Input),
0,
&ioFormat!,
MemoryLayoutStride.SizeOf32(ioFormat)
)
if status != noErr {
print("Unable to set stream format input to output \(status!)")
}
var playbackCallback = AURenderCallbackStruct(
inputProc: AudioController_PlaybackCallback,
inputProcRefCon: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque())
)
status = AudioUnitSetProperty(
audioUnit!,
AudioUnitPropertyID(kAudioUnitProperty_SetRenderCallback),
AudioUnitScope(kAudioUnitScope_Input),
kOutputBus,
&playbackCallback,
MemoryLayout<AURenderCallbackStruct>.size.ui
)
if status != noErr {
print("Failed to set recording render callback \(status!)")
}
status = AudioUnitInitialize(audioUnit!)
if status != noErr {
print("Failed to initialize audio unit \(status!)")
}
}
我们将来自 UDP 的音频数据放入 TPCircular 缓冲区:
let decodedData = self.opusHelper?.decodeStream(of: self.jitterGet.buffer)
let _ = TPCircularBufferProduceBytes(&self.circularBuffer, decodedData, UInt32(decodedData!.count * 2))
这就是我们播放音频的方式。
func performPlayback(
_ ioActionFlags: UnsafeMutablePointer<AudioUnitRenderActionFlags>,
inTimeStamp: UnsafePointer<AudioTimeStamp>,
inBufNumber: UInt32,
inNumberFrames: UInt32,
ioData: UnsafeMutablePointer<AudioBufferList>
) -> OSStatus {
let buffer = ioData[0].mBuffers
let bytesToCopy = ioData[0].mBuffers.mDataByteSize
var bufferTail: UnsafeMutableRawPointer?
// print("BYTES TO COPY: \(bytesToCopy)")
self.availableBytes = 0
bufferTail = TPCircularBufferTail(&self.circularBuffer, &self.availableBytes)
bytesToWrite = min(bytesToCopy, self.availableBytes)
print("BYTES TO WRITE: \(bytesToWrite)")
if bytesToWrite >= 3840 {
memcpy(buffer.mData, bufferTail, Int(bytesToWrite))
TPCircularBufferConsume(&self.circularBuffer, bytesToWrite)
} else {
let silence = [Int16](repeating: 0, count: Int(bytesToCopy))
memcpy(buffer.mData, silence, Int(bytesToCopy))
}
return noErr
}
现在我们要更改播放音频的 SPEED 和 PITCH,请您指导我们如何在当前配置中集成 Varispeed 和 Timepitch,因为我们发现这些属性可能对我们有所帮助。
https://stackoverflow.com/a/59061396/12020007 @hotpaw2 您的回答为我们指明了正确的道路。现在我们正在寻求改变速度和音高。