2

when i use AudioQueue to Record voice to file, this is ok.

i try at MyInputBufferHandler function use

AudioQueueBufferRef->mAudioData

can get raw data, but in this MyInputBufferHandler function can't call other object , like oStream .

i want get AudioQueue Buffer's raw data , and send this raw data to internet ,how to do ?

mick
  • 35
  • 2
  • 7

3 Answers3

0

See the answer to this question, which gives you raw data. You can then bundle it as NSData or whatever, zip and upload.

Community
  • 1
  • 1
buildsucceeded
  • 4,203
  • 4
  • 34
  • 72
0

You need to modify some codes in myInputBufferHandler, I had created a obj-c object to adopt the cpp code from Apple SpeakHere sample.

Please feel free to use it: MIP_StreamAudioRecorder.h

//
//  MIP_StreamAudioRecorder.h
//
//  Created by Dennies Chang on 12/10/3.
//  Copyright (c) 2012年 Dennies Chang. All rights reserved.
//

#import <Foundation/Foundation.h>
#include <AudioToolbox/AudioToolbox.h>
#include <Foundation/Foundation.h>
#include <libkern/OSAtomic.h>

#include "CAStreamBasicDescription.h"
#include "CAXException.h"

#define kNumberRecordBuffers    3

@protocol MIP_StreamAudioRecorderDelegate;

@interface MIP_StreamAudioRecorder : NSObject {
    CAStreamBasicDescription    mRecordFormat;
    AudioQueueRef               mQueue;
    AudioQueueBufferRef         mBuffers[kNumberRecordBuffers];
    BOOL                        mIsRunning;

    id <MIP_StreamAudioRecorderDelegate> delegate;
}
@property (nonatomic, assign) id <MIP_StreamAudioRecorderDelegate> delegate;
@property (nonatomic, readonly) BOOL mIsRunning;

- (void)SetupAudioFormat:(UInt32) inFormatID;
- (void)startRecord;
- (void)stopRecord;
- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second;

@end


@protocol MIP_StreamAudioRecorderDelegate <NSObject>
@optional
- (void)gotAudioData:(NSData *)audioData;

@end

And .mm file : MIP_StreamAudioRecorder.mm

//
//  MIP_StreamAudioRecorder.mm
//
//  Created by Dennies Chang on 12/10/3.
//  Copyright (c) 2012年 Dennies Chang. All rights reserved.
//


#import "MIP_StreamAudioRecorder.h"

@implementation MIP_StreamAudioRecorder
@synthesize delegate;
@synthesize mIsRunning;

- (id)init {
    self = [super init];

    return self;
}

- (void)dealloc {
    [super dealloc];
}


- (void)SetupAudioFormat:(UInt32) inFormatID {
    memset(&mRecordFormat, 0, sizeof(mRecordFormat));

    UInt32 size = sizeof(mRecordFormat.mSampleRate);
    XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareSampleRate,
                                          &size,
                                          &mRecordFormat.mSampleRate), "couldn't get hardware sample rate");

    size = sizeof(mRecordFormat.mChannelsPerFrame);
    XThrowIfError(AudioSessionGetProperty(  kAudioSessionProperty_CurrentHardwareInputNumberChannels,
                                      &size,
                                      &mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");

    mRecordFormat.mFormatID = inFormatID;
    if (inFormatID == kAudioFormatLinearPCM)
    {
        // if we want pcm, default to signed 16-bit little-endian
        mRecordFormat.mChannelsPerFrame = 1;
        mRecordFormat.mSampleRate = 8000;

        mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
        mRecordFormat.mBitsPerChannel = 16;
        mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
        mRecordFormat.mFramesPerPacket = 1;
    }
}

- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second {
    int packets, frames, bytes = 0;
    try {
        frames = (int)ceil(second * format->mSampleRate);

        if (format->mBytesPerFrame > 0)
            bytes = frames * format->mBytesPerFrame;
        else {
            UInt32 maxPacketSize;
            if (format->mBytesPerPacket > 0)
                maxPacketSize = format->mBytesPerPacket;    // constant packet size
            else {
                UInt32 propertySize = sizeof(maxPacketSize);
                XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
                                                    &propertySize), "couldn't get queue's maximum output packet size");
            }
            if (format->mFramesPerPacket > 0)
                packets = frames / format->mFramesPerPacket;
            else
                packets = frames;   // worst-case scenario: 1 frame in a packet
            if (packets == 0)       // sanity check
                packets = 1;
                bytes = packets * maxPacketSize;
        }
    } catch (CAXException e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        return 0;
    }   
    return bytes;
}

/*
- (void)myInputBufferHandler:(id)inUserData AudioQueue:(AudioQueueRef) inAQ BufferRef:(AudioQueueBufferRef)inBuffer withAudioTS:(AudioTimeStamp *)inStartTime andNumPackets:(UInt32)inNumPackets andDescription:(AudioStreamPacketDescription *)inPacketDesc {
*/
void MyInputBufferHandler(  void *                              inUserData,
                                  AudioQueueRef                     inAQ,
                                  AudioQueueBufferRef                   inBuffer,
                                  const AudioTimeStamp *                inStartTime,
                                  UInt32                                inNumPackets,
                                  const AudioStreamPacketDescription*   inPacketDesc)
{

    MIP_StreamAudioRecorder *THIS = (MIP_StreamAudioRecorder *)inUserData;
    try {
        if (inNumPackets > 0) {
            //use delegate to handle;

            if (THIS.delegate) {
                NSMutableData *data = [[NSMutableData alloc] init];
                if ([THIS.delegate respondsToSelector:@selector(gotAudioData:)]) {
                    [data appendBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize];
                    [THIS.delegate gotAudioData:data];
                }
                [data release];
            }
            /*
            // write packets to file
            XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
                                                inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
                          "AudioFileWritePackets failed");
            aqr->mRecordPacket += inNumPackets;
            */
        }

        // if we're not stopping, re-enqueue the buffe so that it gets filled again
        if (THIS->mIsRunning)
            XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
    } catch (CAXException e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
    }
}

- (void)startRecord {
    int i, bufferByteSize;

    try {
        [self SetupAudioFormat:kAudioFormatLinearPCM];

        // create the queue
        XThrowIfError(AudioQueueNewInput(
                                         &mRecordFormat,
                                         MyInputBufferHandler,
                                         self /* userData */,
                                         NULL /* run loop */, NULL /* run loop mode */,
                                         0 /* flags */, &mQueue), "AudioQueueNewInput failed");

        // get the record format back from the queue's audio converter --
        // the file may require a more specific stream description than was necessary to create the encoder.

        UInt32 size = sizeof(mRecordFormat);
        XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
                                            &mRecordFormat, &size), "couldn't get queue's format");


        // allocate and enqueue buffers
        bufferByteSize = [self computeRecordBufferSize:&mRecordFormat duration:kBufferDurationSeconds]; // enough bytes for half a second
        for (i = 0; i < kNumberRecordBuffers; ++i) {
            XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
                          "AudioQueueAllocateBuffer failed");
            XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
                          "AudioQueueEnqueueBuffer failed");
        }
        // start the queue
        mIsRunning = true;
        XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
    }
    catch (CAXException &e) {
        char buf[256];
        fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
    }
    catch (...) {
        fprintf(stderr, "An unknown error occurred\n");
    }
}

- (void)stopRecord {
    XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
    AudioQueueDispose(mQueue, true);
}

@end

Please get informed, you should change the sampleRate and relative condition, I set it as mono (1 channel), 16 bit, 8Khz to record.

And you can get the raw data in the obj-c code which implement MIP_StreamAudioRecorderDelegate, you can send the raw data with internet channel, or save it to file.

Best Regard, Dennies.

Dennies Chang
  • 564
  • 5
  • 15
  • Hi Dennies! Can you please explain a bit how this works!? Can have the data/volume of a stream while recording?? thanks! – Frade Oct 25 '12 at 11:57
0

You need to set the format the way you want to receive data to AudioQueue, refer following function,

http://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/CoreAudioDataTypesRef/Reference/reference.html

One example,

FillOutASBDForLPCM (sRecordFormat,
                    16000,
                    1,
                    8,
                    8,
                    false,
                    false
                    );
Amitg2k12
  • 3,765
  • 10
  • 48
  • 97