fix:调整录音时不播放声音

This commit is contained in:
liyi 2025-08-28 13:41:53 +08:00
parent 22887310c0
commit cafe132afc
3 changed files with 25 additions and 46 deletions

View File

@ -110,22 +110,8 @@ class ImageTransmissionLogic extends BaseGetXController {
// //
switch (contentType) { switch (contentType) {
case TalkData_ContentTypeE.G711: case TalkData_ContentTypeE.G711:
// // //
if (_isFirstAudioFrame) { if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
_startAudioTime = currentTime;
_isFirstAudioFrame = false;
}
//
final expectedTime = _startAudioTime + talkData.durationMs;
final audioDelay = currentTime - expectedTime;
//
if (audioDelay > 500) {
state.audioBuffer.clear();
if (state.isOpenVoice.value) {
_playAudioFrames();
}
return; return;
} }
if (state.audioBuffer.length >= audioBufferSize) { if (state.audioBuffer.length >= audioBufferSize) {
@ -212,7 +198,8 @@ class ImageTransmissionLogic extends BaseGetXController {
/// ///
void _playAudioData(TalkData talkData) async { void _playAudioData(TalkData talkData) async {
if (state.isOpenVoice.value) { if (state.isOpenVoice.value &&
state.isRecordingAudio.value == false) {
final list = final list =
G711().decodeAndDenoise(talkData.content, true, 8000, 300, 150); G711().decodeAndDenoise(talkData.content, true, 8000, 300, 150);
// // PCM PcmArrayInt16 // // PCM PcmArrayInt16
@ -565,7 +552,6 @@ class ImageTransmissionLogic extends BaseGetXController {
// //
Future<void> startProcessingAudio() async { Future<void> startProcessingAudio() async {
try { try {
if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) { if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) {
await state.voiceProcessor?.start(state.frameLength, state.sampleRate); await state.voiceProcessor?.start(state.frameLength, state.sampleRate);
@ -656,12 +642,14 @@ class ImageTransmissionLogic extends BaseGetXController {
List<int> encodedData = G711Tool.encode(applyGain, 0); // 0A-law List<int> encodedData = G711Tool.encode(applyGain, 0); // 0A-law
_bufferedAudioFrames.addAll(encodedData); _bufferedAudioFrames.addAll(encodedData);
// //
if (_startProcessingAudioTimer == null && _bufferedAudioFrames.length > chunkSize) { if (_startProcessingAudioTimer == null &&
_startProcessingAudioTimer = Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk); _bufferedAudioFrames.length > chunkSize) {
_startProcessingAudioTimer =
Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
} }
} }
// //
void _onError(VoiceProcessorException error) { void _onError(VoiceProcessorException error) {
AppLog.log(error.message!); AppLog.log(error.message!);

View File

@ -493,7 +493,9 @@ class TalkViewNativeDecodeLogic extends BaseGetXController {
/// ///
void _playAudioData(TalkData talkData) async { void _playAudioData(TalkData talkData) async {
if (state.isOpenVoice.value && state.isLoading.isFalse) { if (state.isOpenVoice.value &&
state.isLoading.isFalse &&
state.isRecordingAudio.value == false) {
List<int> encodedData = G711Tool.decode(talkData.content, 0); // 0A-law List<int> encodedData = G711Tool.decode(talkData.content, 0); // 0A-law
// PCM PcmArrayInt16 // PCM PcmArrayInt16
final PcmArrayInt16 fromList = PcmArrayInt16.fromList(encodedData); final PcmArrayInt16 fromList = PcmArrayInt16.fromList(encodedData);
@ -970,7 +972,8 @@ class TalkViewNativeDecodeLogic extends BaseGetXController {
// //
switch (contentType) { switch (contentType) {
case TalkData_ContentTypeE.G711: case TalkData_ContentTypeE.G711:
if (!state.isOpenVoice.value) { //
if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
return; return;
} }
if (state.audioBuffer.length >= audioBufferSize) { if (state.audioBuffer.length >= audioBufferSize) {

View File

@ -109,22 +109,8 @@ class TalkViewLogic extends BaseGetXController {
// //
switch (contentType) { switch (contentType) {
case TalkData_ContentTypeE.G711: case TalkData_ContentTypeE.G711:
// // //
if (_isFirstAudioFrame) { if (!state.isOpenVoice.value && state.isRecordingAudio.value) {
_startAudioTime = currentTime;
_isFirstAudioFrame = false;
}
//
final expectedTime = _startAudioTime + talkData.durationMs;
final audioDelay = currentTime - expectedTime;
//
if (audioDelay > 500) {
state.audioBuffer.clear();
if (state.isOpenVoice.value) {
_playAudioFrames();
}
return; return;
} }
if (state.audioBuffer.length >= audioBufferSize) { if (state.audioBuffer.length >= audioBufferSize) {
@ -388,9 +374,11 @@ class TalkViewLogic extends BaseGetXController {
if (state.videoBuffer.isNotEmpty) { if (state.videoBuffer.isNotEmpty) {
final TalkData oldestFrame = state.videoBuffer.removeAt(0); final TalkData oldestFrame = state.videoBuffer.removeAt(0);
if (oldestFrame.content.isNotEmpty) { if (oldestFrame.content.isNotEmpty) {
state.listData.value = Uint8List.fromList(oldestFrame.content); // state.listData.value =
Uint8List.fromList(oldestFrame.content); //
final int decodeStart = DateTime.now().millisecondsSinceEpoch; final int decodeStart = DateTime.now().millisecondsSinceEpoch;
decodeImageFromList(Uint8List.fromList(oldestFrame.content)).then((ui.Image img) { decodeImageFromList(Uint8List.fromList(oldestFrame.content))
.then((ui.Image img) {
final int decodeEnd = DateTime.now().millisecondsSinceEpoch; final int decodeEnd = DateTime.now().millisecondsSinceEpoch;
state.currentImage.value = img; state.currentImage.value = img;
_renderedFrameCount++; _renderedFrameCount++;
@ -524,7 +512,7 @@ class TalkViewLogic extends BaseGetXController {
final lockPeerId = StartChartManage().lockPeerId; final lockPeerId = StartChartManage().lockPeerId;
final LockListInfoGroupEntity? lockListInfoGroupEntity = final LockListInfoGroupEntity? lockListInfoGroupEntity =
await Storage.getLockMainListData(); await Storage.getLockMainListData();
if (lockListInfoGroupEntity != null) { if (lockListInfoGroupEntity != null) {
lockListInfoGroupEntity!.groupList?.forEach((element) { lockListInfoGroupEntity!.groupList?.forEach((element) {
final lockList = element.lockList; final lockList = element.lockList;
@ -562,7 +550,6 @@ class TalkViewLogic extends BaseGetXController {
// //
Future<void> startProcessingAudio() async { Future<void> startProcessingAudio() async {
try { try {
if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) { if (await state.voiceProcessor?.hasRecordAudioPermission() ?? false) {
await state.voiceProcessor?.start(state.frameLength, state.sampleRate); await state.voiceProcessor?.start(state.frameLength, state.sampleRate);
@ -653,10 +640,11 @@ class TalkViewLogic extends BaseGetXController {
List<int> encodedData = G711Tool.encode(applyGain, 0); // 0A-law List<int> encodedData = G711Tool.encode(applyGain, 0); // 0A-law
_bufferedAudioFrames.addAll(encodedData); _bufferedAudioFrames.addAll(encodedData);
// //
if (_startProcessingAudioTimer == null && _bufferedAudioFrames.length > chunkSize) { if (_startProcessingAudioTimer == null &&
_startProcessingAudioTimer = Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk); _bufferedAudioFrames.length > chunkSize) {
_startProcessingAudioTimer =
Timer.periodic(Duration(milliseconds: intervalMs), _sendAudioChunk);
} }
} }