If your goal is simply to end up with AVAudioPCMBuffers that contains audio in your desired format, you can convert the buffers returned in the tap block using AVAudioConverter. This way, you actually don't need to know or care what the format of the inputNode is.
class MyBufferRecorder {
private let audioEngine:AVAudioEngine = AVAudioEngine()
private var inputNode:AVAudioInputNode!
private let audioQueue:DispatchQueue = DispatchQueue(label: "Audio Queue 5000")
private var isRecording:Bool = false
func startRecording() {
if (isRecording) {
return
}
isRecording = true
// must convert (unknown until runtime) input format to our desired output format
inputNode = audioEngine.inputNode
let inputFormat:AVAudioFormat! = inputNode.outputFormat(forBus: 0)
// 9600 is somewhat arbitrary... min seems to be 4800, max 19200... it doesn't matter what we set
// because we don't re-use this value -- we query the buffer returned in the tap block for it's true length.
// Using [weak self] in the tap block is probably a better idea, but it results in weird warnings for now
inputNode.installTap(onBus: 0, bufferSize: AVAudioFrameCount(9600), format: inputFormat) { (buffer, time) in
// not sure if this is necessary
if (!self.isRecording) {
print("\nDEBUG - rejecting callback, not recording")
return }
// not really sure if/why this needs to be async
self.audioQueue.async {
// Convert recorded buffer to our preferred format
let convertedPCMBuffer = AudioUtils.convertPCMBuffer(bufferToConvert: buffer, fromFormat: inputFormat, toFormat: AudioUtils.desiredFormat)
// do something with converted buffer
}
}
do {
// important not to start engine before installing tap
try audioEngine.start()
} catch {
print("\nDEBUG - couldn't start engine!")
return
}
}
func stopRecording() {
print("\nDEBUG - recording stopped")
isRecording = false
inputNode.removeTap(onBus: 0)
audioEngine.stop()
}
}
Separate class:
import Foundation
import AVFoundation
// assumes we want 16bit, mono, 44100hz
// change to what you want
class AudioUtils {
static let desiredFormat:AVAudioFormat! = AVAudioFormat(commonFormat: .pcmFormatInt16, sampleRate: Double(44100), channels: 1, interleaved: false)
// PCM <--> PCM
static func convertPCMBuffer(bufferToConvert: AVAudioPCMBuffer, fromFormat: AVAudioFormat, toFormat: AVAudioFormat) -> AVAudioPCMBuffer {
let convertedPCMBuffer = AVAudioPCMBuffer(pcmFormat: toFormat, frameCapacity: AVAudioFrameCount(bufferToConvert.frameLength))
var error: NSError? = nil
let inputBlock:AVAudioConverterInputBlock = {inNumPackets, outStatus in
outStatus.pointee = AVAudioConverterInputStatus.haveData
return bufferToConvert
}
let formatConverter:AVAudioConverter = AVAudioConverter(from:fromFormat, to: toFormat)!
formatConverter.convert(to: convertedPCMBuffer!, error: &error, withInputFrom: inputBlock)
if error != nil {
print("\nDEBUG - " + error!.localizedDescription)
}
return convertedPCMBuffer!
}
}
This is by no means production ready code -- I'm also learning IOS Audio... so please, please let me know any errors, best practices, or dangerous things going on in that code and I'll keep this answer updated.