Entrada / salida de baja latencia AudioQueue

Tengo dos AudioQueues de iOS: una entrada que alimenta las muestras directamente a una salida. Desafortunadamente, hay un efecto de eco que es bastante notable :(

¿Es posible hacer audio de baja latencia usando AudioQueues o realmente necesito usar AudioUnits? (He probado el marco Novocaine que usa AudioUnits y aquí la latencia es mucho más pequeña. También he notado que este marco parece usar menos recursos de CPU. Desafortunadamente, no pude usar este marco en mi proyecto Swift sin cambios importantes en él .)

Aquí hay algunos extractos de mi código, que se realiza principalmente en Swift, excepto las devoluciones de llamada que deben implementarse en C.

private let audioStreamBasicDescription = AudioStreamBasicDescription(
    mSampleRate: 16000,
    mFormatID: AudioFormatID(kAudioFormatLinearPCM),
    mFormatFlags: AudioFormatFlags(kAudioFormatFlagsNativeFloatPacked),
    mBytesPerPacket: 4,
    mFramesPerPacket: 1,
    mBytesPerFrame: 4,
    mChannelsPerFrame: 1,
    mBitsPerChannel: 32,
    mReserved: 0)

private let numberOfBuffers = 80
private let bufferSize: UInt32 = 256

private var active = false

private var inputQueue: AudioQueueRef = nil
private var outputQueue: AudioQueueRef = nil

private var inputBuffers = [AudioQueueBufferRef]()
private var outputBuffers = [AudioQueueBufferRef]()
private var headOfFreeOutputBuffers: AudioQueueBufferRef = nil

// callbacks implemented in Swift
private func audioQueueInputCallback(inputBuffer: AudioQueueBufferRef) {
    if active {
        if headOfFreeOutputBuffers != nil {
            let outputBuffer = headOfFreeOutputBuffers
            headOfFreeOutputBuffers = AudioQueueBufferRef(outputBuffer.memory.mUserData)
            outputBuffer.memory.mAudioDataByteSize = inputBuffer.memory.mAudioDataByteSize
            memcpy(outputBuffer.memory.mAudioData, inputBuffer.memory.mAudioData, Int(inputBuffer.memory.mAudioDataByteSize))
            assert(AudioQueueEnqueueBuffer(outputQueue, outputBuffer, 0, nil) == 0)
        } else {
            println(__FUNCTION__ + ": out-of-output-buffers!")
        }

        assert(AudioQueueEnqueueBuffer(inputQueue, inputBuffer, 0, nil) == 0)
    }
}

private func audioQueueOutputCallback(outputBuffer: AudioQueueBufferRef) {
    if active {
        outputBuffer.memory.mUserData = UnsafeMutablePointer<Void>(headOfFreeOutputBuffers)
        headOfFreeOutputBuffers = outputBuffer
    }
}

func start() {
    var error: NSError?
    audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, withOptions: .allZeros, error: &error)
    dumpError(error, functionName: "AVAudioSessionCategoryPlayAndRecord")
    audioSession.setPreferredSampleRate(16000, error: &error)
    dumpError(error, functionName: "setPreferredSampleRate")
    audioSession.setPreferredIOBufferDuration(0.005, error: &error)
    dumpError(error, functionName: "setPreferredIOBufferDuration")

    audioSession.setActive(true, error: &error)
    dumpError(error, functionName: "setActive(true)")

    assert(active == false)
    active = true

    // cannot provide callbacks to AudioQueueNewInput/AudioQueueNewOutput from Swift and so need to interface C functions
    assert(MyAudioQueueConfigureInputQueueAndCallback(audioStreamBasicDescription, &inputQueue, audioQueueInputCallback) == 0)
    assert(MyAudioQueueConfigureOutputQueueAndCallback(audioStreamBasicDescription, &outputQueue, audioQueueOutputCallback) == 0)

    for (var i = 0; i < numberOfBuffers; i++) {
        var audioQueueBufferRef: AudioQueueBufferRef = nil
        assert(AudioQueueAllocateBuffer(inputQueue, bufferSize, &audioQueueBufferRef) == 0)
        assert(AudioQueueEnqueueBuffer(inputQueue, audioQueueBufferRef, 0, nil) == 0)
        inputBuffers.append(audioQueueBufferRef)

        assert(AudioQueueAllocateBuffer(outputQueue, bufferSize, &audioQueueBufferRef) == 0)
        outputBuffers.append(audioQueueBufferRef)

        audioQueueBufferRef.memory.mUserData = UnsafeMutablePointer<Void>(headOfFreeOutputBuffers)
        headOfFreeOutputBuffers = audioQueueBufferRef
    }

    assert(AudioQueueStart(inputQueue, nil) == 0)
    assert(AudioQueueStart(outputQueue, nil) == 0)
}

Y luego mi código C para configurar las devoluciones de llamada a Swift:

static void MyAudioQueueAudioInputCallback(void * inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp * inStartTime,
                                   UInt32 inNumberPacketDescriptions, const AudioStreamPacketDescription * inPacketDescs) {
    void(^block)(AudioQueueBufferRef) = (__bridge void(^)(AudioQueueBufferRef))inUserData;
    block(inBuffer);
}

static void MyAudioQueueAudioOutputCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer) {
    void(^block)(AudioQueueBufferRef) = (__bridge void(^)(AudioQueueBufferRef))inUserData;
    block(inBuffer);
}

OSStatus MyAudioQueueConfigureInputQueueAndCallback(AudioStreamBasicDescription inFormat, AudioQueueRef *inAQ, void(^callback)(AudioQueueBufferRef)) {
    return AudioQueueNewInput(&inFormat, MyAudioQueueAudioInputCallback, (__bridge_retained void *)([callback copy]), nil, nil, 0, inAQ);
}

OSStatus MyAudioQueueConfigureOutputQueueAndCallback(AudioStreamBasicDescription inFormat, AudioQueueRef *inAQ, void(^callback)(AudioQueueBufferRef)) {
    return AudioQueueNewOutput(&inFormat, MyAudioQueueAudioOutputCallback, (__bridge_retained void *)([callback copy]), nil, nil, 0, inAQ);
}

Respuestas a la pregunta(2)

Su respuesta a la pregunta