242 lines
7.1 KiB
Objective-C
Executable File
242 lines
7.1 KiB
Objective-C
Executable File
//
|
||
// playAudio.m
|
||
// myhome
|
||
//
|
||
// Created by user on 12-11-20.
|
||
// Copyright (c) 2012年 __MyCompanyName__. All rights reserved.
|
||
//
|
||
|
||
#import "playAudio.h"
|
||
#import "AppDelegate.h"
|
||
|
||
#define SAMPLE_RATE (8000)
|
||
#define WEBRTC_AUDIO_BUF_SIZE (160*3*2) //30ms (10ms=160samples)
|
||
|
||
@implementation playAudio {
|
||
AppDelegate * app;
|
||
}
|
||
|
||
@synthesize Abuf;
|
||
@synthesize Abuf_p;
|
||
@synthesize Abuf_len;
|
||
@synthesize Version;
|
||
@synthesize vol;
|
||
|
||
static int BYTES_PER_SAMPLES;
|
||
static bool isAudioStop;
|
||
|
||
- (id)init {
|
||
Abuf_p = 0;
|
||
Abuf_len = 0;
|
||
Abuf = (Byte*)malloc(ABUF_NUM * 64);
|
||
if ( self = [super init] ) {
|
||
|
||
}
|
||
return self;
|
||
}
|
||
|
||
/* max number of samples per frame (= 60 ms frame) */
|
||
|
||
//回调 每当一组音频数据读取完毕之后会自动触发回调方法,读取下一帧数据
|
||
void buffer_callback(void *inUserData, AudioQueueRef q, AudioQueueBufferRef buf) {
|
||
//__bridge
|
||
// NSLog(@"333buffer_callback");
|
||
playAudio* player = (__bridge playAudio*)inUserData;
|
||
[player audioQueueOutputWithQueue:q queueBuffer:buf];
|
||
}
|
||
|
||
- (short)G711Decode_u_law_2_linear:(Byte)b {
|
||
// NSLog(@"444G711Decode_u_law_2_linear");
|
||
//u律 8126
|
||
short t;
|
||
b = ~b;
|
||
t = ((b&0xf)<<3)+0x84;
|
||
t <<= ((unsigned)b&0x70)>>4;
|
||
return ((b&0x80)?(0x84-t):(t-0x84));
|
||
}
|
||
|
||
- (short)G711Decode_a_law_2_linear:(Byte)b {
|
||
//a律 8300
|
||
int t;
|
||
int seg;
|
||
b ^= 0x55;
|
||
t = (b&0xf)<<4;
|
||
seg = ((unsigned)b&0x70)>>4;//seg=((unsigned)b&0x70)>>4;
|
||
switch (seg){
|
||
case 0:{
|
||
t += 8;
|
||
}
|
||
break;
|
||
case 1:{
|
||
t += 0x108;
|
||
}
|
||
break;
|
||
default:{
|
||
t += 0x108;
|
||
t <<= seg-1;
|
||
}
|
||
break;
|
||
}
|
||
return ((b&0x80)?t:-t);
|
||
}
|
||
|
||
- (void)audioQueueOutputWithQueue:(AudioQueueRef)q queueBuffer:(AudioQueueBufferRef)buf {
|
||
// NSLog(@"222audioQueueOutputWithQueue: _AudiodecodeType = %d", _AudiodecodeType);
|
||
if (_AudiodecodeType == k711) {
|
||
buf->mAudioDataByteSize = BYTES_PER_SAMPLES;
|
||
short *b = buf->mAudioData;
|
||
if (Abuf_len>0) {
|
||
//NSLog(@"8130 audio play ...");
|
||
int pos = Abuf_p*64;
|
||
for (int i=0; i<BYTES_PER_SAMPLES/2; i++) {
|
||
b[i] = [self G711Decode_u_law_2_linear:Abuf[pos+i]];
|
||
|
||
}
|
||
Abuf_len --;
|
||
Abuf_p=(Abuf_p+1)%100;
|
||
|
||
}
|
||
else {
|
||
for (int i=0; i<BYTES_PER_SAMPLES/2; i++) {
|
||
b[i] = 0;
|
||
}
|
||
}
|
||
}
|
||
|
||
|
||
if (isAudioStop == NO && (!app.TalkSendAudio) ) {//因为在停止播放的过程中,buffer_callback还在被调用
|
||
int ret = AudioQueueEnqueueBuffer(q, buf, 0, NULL);
|
||
if (ret) {
|
||
NSLog(@"enqueue error:%d", ret);
|
||
}
|
||
}
|
||
|
||
}
|
||
|
||
- (void)AudioPlayStartWithMode:(int)mod {
|
||
// NSLog(@"111AudioPlayStartWithMode mod = %d", mod);
|
||
app = (AppDelegate *)[[UIApplication sharedApplication]delegate];
|
||
int channels = 1;//default 1
|
||
Abuf_p = 0;
|
||
Abuf_len = 0;
|
||
isAudioStop = NO;
|
||
|
||
_AudiodecodeType = k711;
|
||
Abuf = malloc(64*ABUF_NUM);
|
||
BYTES_PER_SAMPLES = 64 * 2 * channels;
|
||
|
||
|
||
|
||
dataformat.mFormatID = kAudioFormatLinearPCM;
|
||
dataformat.mSampleRate = SAMPLE_RATE;
|
||
dataformat.mBitsPerChannel = 16;
|
||
dataformat.mChannelsPerFrame = 1;
|
||
dataformat.mFramesPerPacket = 1;
|
||
dataformat.mBytesPerFrame = (dataformat.mBitsPerChannel / 8) * dataformat.mChannelsPerFrame;
|
||
dataformat.mBytesPerPacket = dataformat.mBytesPerFrame * dataformat.mFramesPerPacket;
|
||
dataformat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
|
||
|
||
AudioQueueNewOutput(&dataformat, buffer_callback, (__bridge void *)(self), NULL, kCFRunLoopCommonModes, 0, &queue);
|
||
AudioQueueSetParameter(queue, kAudioQueueParam_Volume, 1.0);
|
||
|
||
unsigned int err;
|
||
for (int i=0; i<NUM_BUFFERS; i++) {
|
||
err = AudioQueueAllocateBuffer(queue, BYTES_PER_SAMPLES, &buffers[i]);
|
||
|
||
|
||
[self audioQueueOutputWithQueue:queue queueBuffer:buffers[i]];
|
||
}
|
||
|
||
AudioQueueStart(queue, NULL);
|
||
|
||
}
|
||
|
||
- (int)AudioStartWithRate:(int)rate setChannels:(int)channels setSamples:(int)samples setBuf_samples:(int)buf_samples setVersion:(int)v {
|
||
app = (AppDelegate *)[[UIApplication sharedApplication]delegate];
|
||
|
||
//added by huangkanghui at 2016.05.25
|
||
//Version = 8130;
|
||
_AudiodecodeType = kIsac;
|
||
Abuf_p = 0;
|
||
Abuf_len = 0;
|
||
//Abuf = malloc(64*ABUF_NUM);
|
||
Abuf = malloc(160*ABUF_NUM);
|
||
|
||
BYTES_PER_SAMPLES = buf_samples * 2 * channels;
|
||
dataformat.mFormatID = kAudioFormatLinearPCM;
|
||
dataformat.mSampleRate = 8000;//16000;//rate;//8000.0;
|
||
dataformat.mBitsPerChannel = 16;
|
||
dataformat.mChannelsPerFrame = 1;
|
||
dataformat.mFramesPerPacket = 1;
|
||
dataformat.mBytesPerFrame = (dataformat.mBitsPerChannel / 8) * dataformat.mChannelsPerFrame;
|
||
dataformat.mBytesPerPacket = dataformat.mBytesPerFrame * dataformat.mFramesPerPacket;
|
||
dataformat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
|
||
/*BYTES_PER_SAMPLES = buf_samples * 2 * channels;
|
||
|
||
dataformat.mSampleRate = rate;
|
||
dataformat.mFormatID = kAudioFormatLinearPCM;
|
||
dataformat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
|
||
dataformat.mBytesPerPacket = BYTES_PER_SAMPLES;
|
||
dataformat.mFramesPerPacket = buf_samples;
|
||
dataformat.mBytesPerFrame = 2*channels;
|
||
dataformat.mChannelsPerFrame = channels;
|
||
dataformat.mBitsPerChannel = 16; */
|
||
NSLog(@"aoutinit1:%d:%d:%d:%d", rate, channels, samples, buf_samples);
|
||
|
||
unsigned int err = AudioQueueNewOutput(&dataformat, buffer_callback, (__bridge void *)(self), NULL, kCFRunLoopCommonModes, 0, &queue);
|
||
if (err) {
|
||
//NSLog(@"AudioQueueNewOutput error");
|
||
return -1;
|
||
}
|
||
AudioQueueSetParameter(queue, kAudioQueueParam_Volume, 1.0);
|
||
|
||
int i;
|
||
for (i=0; i<NUM_BUFFERS; i++) {
|
||
//err = AudioQueueAllocateBuffer(queue, BYTES_PER_SAMPLES, &buffers[i]);
|
||
err = AudioQueueAllocateBuffer(queue, WEBRTC_AUDIO_BUF_SIZE, &buffers[i]);
|
||
if (err) {
|
||
return -2;
|
||
}
|
||
[self audioQueueOutputWithQueue:queue queueBuffer:buffers[i]];
|
||
}
|
||
|
||
AudioQueueStart(queue, NULL);
|
||
|
||
return 0;
|
||
}
|
||
|
||
- (void)setVolume:(float)v {
|
||
NSLog(@"setv:%.2f", v);
|
||
//vol=v;
|
||
AudioQueueSetParameter(queue, kAudioQueueParam_Volume, (int)v);
|
||
}
|
||
|
||
- (void)AudioEnd {
|
||
//unsigned int err = AudioQueueStop(queue, NULL);
|
||
isAudioStop = YES;
|
||
unsigned int err = AudioQueueStop(queue, TRUE);
|
||
Abuf_p = 0;
|
||
Abuf_len = 0;
|
||
for (NSInteger i = 0; i < NUM_BUFFERS; i++) {
|
||
AudioQueueFreeBuffer(queue, buffers[i]);
|
||
}
|
||
AudioQueueDispose(queue, TRUE);
|
||
free(Abuf);
|
||
if(err){
|
||
NSLog(@"AudioQueue1End error");
|
||
}
|
||
//NSLog(@"playAudioEnd ...");
|
||
}
|
||
|
||
+ (void)initAudioSession {
|
||
//AudioSessionInitialize(NULL, NULL, interruptionListener, (__bridge void *)(self));
|
||
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
|
||
[[AVAudioSession sharedInstance] setActive:YES error:nil];
|
||
|
||
NSError *err;
|
||
[[AVAudioSession sharedInstance] setActive:TRUE error:&err];
|
||
}
|
||
|
||
|
||
@end
|