fix:调整录音时不播放声音
This commit is contained in:
parent
22887310c0
commit
cafe132afc
@ -110,22 +110,8 @@ class ImageTransmissionLogic extends BaseGetXController {
|
||||
// 判断数据类型,进行分发处理
|
||||
switch (contentType) {
|
||||
case TalkData_ContentTypeE.G711:
|
||||
// // 第一帧到达时记录开始时间
|
||||
if (_isFirstAudioFrame) {
|
||||
_startAudioTime = currentTime;
|
||||
_isFirstAudioFrame = false;
|
||||
}
|
||||
|
||||
// 计算音频延迟
|
||||
final expectedTime = _startAudioTime + talkData.durationMs;
|
||||
final audioDelay = currentTime - expectedTime;
|
||||
|
||||
// 如果延迟太大,清空缓冲区并直接播放
|
||||
if (audioDelay > 500) {
|
||||
state.audioBuffer.clear();
|
||||
if (state.isOpenVoice.value) {
|
||||
_playAudioFrames();
|
||||
}
|
||||
// 没有开启所有和录音时不缓存和播放音频
|
||||
if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
|
||||
return;
|
||||
}
|
||||
if (state.audioBuffer.length >= audioBufferSize) {
|
||||
@ -212,7 +198,8 @@ class ImageTransmissionLogic extends BaseGetXController {
|
||||
|
||||
/// 播放音频数据
|
||||
void _playAudioData(TalkData talkData) async {
|
||||
if (state.isOpenVoice.value) {
|
||||
if (state.isOpenVoice.value &&
|
||||
state.isRecordingAudio.value == false) {
|
||||
final list =
|
||||
G711().decodeAndDenoise(talkData.content, true, 8000, 300, 150);
|
||||
// // 将 PCM 数据转换为 PcmArrayInt16
|
||||
@ -565,7 +552,6 @@ class ImageTransmissionLogic extends BaseGetXController {
|
||||
|
||||
//开始录音
|
||||
Future<void> startProcessingAudio() async {
|
||||
|
||||
try {
|
||||
if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) {
|
||||
await state.voiceProcessor?.start(state.frameLength, state.sampleRate);
|
||||
@ -656,12 +642,14 @@ class ImageTransmissionLogic extends BaseGetXController {
|
||||
List<int> encodedData = G711Tool.encode(applyGain, 0); // 0表示A-law
|
||||
_bufferedAudioFrames.addAll(encodedData);
|
||||
|
||||
|
||||
// 启动定时发送器(仅启动一次)
|
||||
if (_startProcessingAudioTimer == null && _bufferedAudioFrames.length > chunkSize) {
|
||||
_startProcessingAudioTimer = Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
|
||||
if (_startProcessingAudioTimer == null &&
|
||||
_bufferedAudioFrames.length > chunkSize) {
|
||||
_startProcessingAudioTimer =
|
||||
Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
|
||||
}
|
||||
}
|
||||
|
||||
// 错误监听
|
||||
void _onError(VoiceProcessorException error) {
|
||||
AppLog.log(error.message!);
|
||||
|
||||
@ -493,7 +493,9 @@ class TalkViewNativeDecodeLogic extends BaseGetXController {
|
||||
|
||||
/// 播放音频数据
|
||||
void _playAudioData(TalkData talkData) async {
|
||||
if (state.isOpenVoice.value && state.isLoading.isFalse) {
|
||||
if (state.isOpenVoice.value &&
|
||||
state.isLoading.isFalse &&
|
||||
state.isRecordingAudio.value == false) {
|
||||
List<int> encodedData = G711Tool.decode(talkData.content, 0); // 0表示A-law
|
||||
// 将 PCM 数据转换为 PcmArrayInt16
|
||||
final PcmArrayInt16 fromList = PcmArrayInt16.fromList(encodedData);
|
||||
@ -970,7 +972,8 @@ class TalkViewNativeDecodeLogic extends BaseGetXController {
|
||||
// 判断数据类型,进行分发处理
|
||||
switch (contentType) {
|
||||
case TalkData_ContentTypeE.G711:
|
||||
if (!state.isOpenVoice.value) {
|
||||
// 没有开启所有和录音时不缓存和播放音频
|
||||
if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
|
||||
return;
|
||||
}
|
||||
if (state.audioBuffer.length >= audioBufferSize) {
|
||||
|
||||
@ -109,22 +109,8 @@ class TalkViewLogic extends BaseGetXController {
|
||||
// 判断数据类型,进行分发处理
|
||||
switch (contentType) {
|
||||
case TalkData_ContentTypeE.G711:
|
||||
// // 第一帧到达时记录开始时间
|
||||
if (_isFirstAudioFrame) {
|
||||
_startAudioTime = currentTime;
|
||||
_isFirstAudioFrame = false;
|
||||
}
|
||||
|
||||
// 计算音频延迟
|
||||
final expectedTime = _startAudioTime + talkData.durationMs;
|
||||
final audioDelay = currentTime - expectedTime;
|
||||
|
||||
// 如果延迟太大,清空缓冲区并直接播放
|
||||
if (audioDelay > 500) {
|
||||
state.audioBuffer.clear();
|
||||
if (state.isOpenVoice.value) {
|
||||
_playAudioFrames();
|
||||
}
|
||||
// 没有开启所有和录音时不缓存和播放音频
|
||||
if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
|
||||
return;
|
||||
}
|
||||
if (state.audioBuffer.length >= audioBufferSize) {
|
||||
@ -388,9 +374,11 @@ class TalkViewLogic extends BaseGetXController {
|
||||
if (state.videoBuffer.isNotEmpty) {
|
||||
final TalkData oldestFrame = state.videoBuffer.removeAt(0);
|
||||
if (oldestFrame.content.isNotEmpty) {
|
||||
state.listData.value = Uint8List.fromList(oldestFrame.content); // 备份原始数据
|
||||
state.listData.value =
|
||||
Uint8List.fromList(oldestFrame.content); // 备份原始数据
|
||||
final int decodeStart = DateTime.now().millisecondsSinceEpoch;
|
||||
decodeImageFromList(Uint8List.fromList(oldestFrame.content)).then((ui.Image img) {
|
||||
decodeImageFromList(Uint8List.fromList(oldestFrame.content))
|
||||
.then((ui.Image img) {
|
||||
final int decodeEnd = DateTime.now().millisecondsSinceEpoch;
|
||||
state.currentImage.value = img;
|
||||
_renderedFrameCount++;
|
||||
@ -524,7 +512,7 @@ class TalkViewLogic extends BaseGetXController {
|
||||
|
||||
final lockPeerId = StartChartManage().lockPeerId;
|
||||
final LockListInfoGroupEntity? lockListInfoGroupEntity =
|
||||
await Storage.getLockMainListData();
|
||||
await Storage.getLockMainListData();
|
||||
if (lockListInfoGroupEntity != null) {
|
||||
lockListInfoGroupEntity!.groupList?.forEach((element) {
|
||||
final lockList = element.lockList;
|
||||
@ -562,7 +550,6 @@ class TalkViewLogic extends BaseGetXController {
|
||||
|
||||
//开始录音
|
||||
Future<void> startProcessingAudio() async {
|
||||
|
||||
try {
|
||||
if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) {
|
||||
await state.voiceProcessor?.start(state.frameLength, state.sampleRate);
|
||||
@ -653,10 +640,11 @@ class TalkViewLogic extends BaseGetXController {
|
||||
List<int> encodedData = G711Tool.encode(applyGain, 0); // 0表示A-law
|
||||
_bufferedAudioFrames.addAll(encodedData);
|
||||
|
||||
|
||||
// 启动定时发送器(仅启动一次)
|
||||
if (_startProcessingAudioTimer == null && _bufferedAudioFrames.length > chunkSize) {
|
||||
_startProcessingAudioTimer = Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
|
||||
if (_startProcessingAudioTimer == null &&
|
||||
_bufferedAudioFrames.length > chunkSize) {
|
||||
_startProcessingAudioTimer =
|
||||
Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user