fix:调整ios的解码流程

This commit is contained in:
liyi 2025-06-25 18:47:40 +08:00
parent 5dfbd190fd
commit 1370536da9
2 changed files with 351 additions and 160 deletions

View File

@ -42,9 +42,11 @@ public class VideoDecodePlugin: NSObject, FlutterPlugin, FlutterTexture {
let width = args["width"] as? Int,
let height = args["height"] as? Int,
let codecType = args["codecType"] as? String else {
print("[VideoDecodePlugin][错误] 参数解析失败:\(String(describing: call.arguments))")
result(FlutterError(code: "INVALID_ARGS", message: "参数错误", details: nil))
return
}
//
decoder?.release()
decoder = nil
@ -52,13 +54,17 @@ public class VideoDecodePlugin: NSObject, FlutterPlugin, FlutterTexture {
textureRegistry?.unregisterTexture(tid)
textureId = nil
}
// Flutter
guard let registry = textureRegistry else {
print("[VideoDecodePlugin][错误] 无法获取纹理注册表")
result(FlutterError(code: "NO_TEXTURE_REGISTRY", message: "无法获取纹理注册表", details: nil))
return
}
let textureId = registry.register(self)
self.textureId = textureId
//
let decoder = VideoDecoder(width: width, height: height, codecType: codecType)
self.decoder = decoder
@ -79,32 +85,94 @@ public class VideoDecodePlugin: NSObject, FlutterPlugin, FlutterTexture {
result(textureId)
}
/// NALU
/// NALU
private func stripStartCode(_ data: Data) -> Data {
let originalLen = data.count
let naluType: UInt8 = {
if data.count > 4 && data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x00 && data[3] == 0x01 {
return data[4] & 0x1F
} else if data.count > 3 && data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x01 {
return data[3] & 0x1F
}
return 0
}()
var stripped: Data = data
if data.count > 4 && data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x00 && data[3] == 0x01 {
stripped = data.subdata(in: 4..<data.count)
} else if data.count > 3 && data[0] == 0x00 && data[1] == 0x00 && data[2] == 0x01 {
stripped = data.subdata(in: 3..<data.count)
}
let strippedLen = stripped.count
let strippedType: UInt8 = stripped.count > 0 ? (stripped[0] & 0x1F) : 0
if strippedLen < 3 || (strippedType != 7 && strippedType != 8) {
print("[VideoDecodePlugin][警告] strip后NALU长度或类型异常type=", strippedType, "len=", strippedLen)
}
//
return stripped
}
// side_data
private func checkNaluForSideData(_ nalu: Data, naluType: UInt8) -> Bool {
let maxSize = naluType == 5 ? 150_000 : 30_000 // I150KBP30KB
if nalu.count > maxSize {
print("[VideoDecodePlugin][警告] NALU长度异常可能包含side_datatype=\(naluType)len=\(nalu.count)")
return true
}
return false
}
// NALU
private func extractFirstValidNalu(_ nalu: Data) -> Data {
// 34
var start = -1
var startCodeLength = 0
// 4
if nalu.count >= 4 && nalu[0] == 0x00 && nalu[1] == 0x00 && nalu[2] == 0x00 && nalu[3] == 0x01 {
start = 0
startCodeLength = 4
}
// 3
else if nalu.count >= 3 && nalu[0] == 0x00 && nalu[1] == 0x00 && nalu[2] == 0x01 {
start = 0
startCodeLength = 3
}
//
else {
for i in 0..<(nalu.count - 4) {
if nalu[i] == 0x00 && nalu[i + 1] == 0x00 {
if nalu[i + 2] == 0x00 && nalu[i + 3] == 0x01 {
start = i
startCodeLength = 4
break
} else if nalu[i + 2] == 0x01 {
start = i
startCodeLength = 3
break
}
}
}
}
if start == -1 {
print("[VideoDecodePlugin][警告] NALU无AnnexB起始码丢弃该帧")
return Data()
}
let searchRange = (start + startCodeLength)..<nalu.count
var end = nalu.count
// 34
for i in searchRange.lowerBound..<(nalu.count - 3) {
if nalu[i] == 0x00 && nalu[i + 1] == 0x00 {
if i + 3 < nalu.count && nalu[i + 2] == 0x00 && nalu[i + 3] == 0x01 {
end = i
break
} else if nalu[i + 2] == 0x01 {
end = i
break
}
}
}
let extractedNalu = nalu[start..<end]
// NALU
if extractedNalu.count >= startCodeLength + 1 {
let naluType = extractedNalu[startCodeLength] & 0x1F
if checkNaluForSideData(extractedNalu, naluType: naluType) {
return Data()
}
}
return extractedNalu
}
///
private func handleDecodeFrame(call: FlutterMethodCall, result: @escaping FlutterResult) {
guard let args = call.arguments as? [String: Any],
@ -128,30 +196,32 @@ public class VideoDecodePlugin: NSObject, FlutterPlugin, FlutterTexture {
return 0
}()
print("[VideoDecodePlugin][调试] handleDecodeFrame: frameType=\(frameType), naluType=\(naluType), cachedSpsLen=\(cachedSps?.count ?? 0), cachedPpsLen=\(cachedPps?.count ?? 0)")
// SPS/PPS
// SPS/PPS
if naluType == 7 { // SPS
//
cachedSps = stripStartCode(data)
result(true)
return
} else if naluType == 8 { // PPS
//
cachedPps = stripStartCode(data)
result(true)
return
} else if naluType == 5 { // IDR/I
// NALU
// NALU
let firstNalu = extractFirstValidNalu(data)
if firstNalu.isEmpty { return }
print("[VideoDecodePlugin] 发送I帧, 长度: \(firstNalu.count), 头部: \(firstNalu.prefix(8).map { String(format: "%02X", $0) }.joined(separator: " ")), cachedSps长度: \(cachedSps?.count ?? 0), cachedPps长度: \(cachedPps?.count ?? 0)")
if firstNalu.isEmpty {
result(false)
return
}
decoder?.decodeFrame(frameData: firstNalu, frameType: frameType, timestamp: Int64(timestamp), frameSeq: frameSeq, refIFrameSeq: refIFrameSeq, sps: cachedSps, pps: cachedPps)
} else {
// NALU
// NALU
let firstNalu = extractFirstValidNalu(data)
if firstNalu.isEmpty { return }
print("[VideoDecodePlugin] 发送P/B帧, 长度: \(firstNalu.count), 头部: \(firstNalu.prefix(8).map { String(format: "%02X", $0) }.joined(separator: " "))")
if firstNalu.isEmpty {
result(false)
return
}
decoder?.decodeFrame(frameData: firstNalu, frameType: frameType, timestamp: Int64(timestamp), frameSeq: frameSeq, refIFrameSeq: refIFrameSeq)
}
result(true)
@ -182,28 +252,4 @@ public class VideoDecodePlugin: NSObject, FlutterPlugin, FlutterTexture {
}
return nil
}
// side_data
private func checkNaluForSideData(_ nalu: Data, naluType: UInt8) -> Bool {
if (naluType == 5 && nalu.count > 10000) || (naluType != 7 && naluType != 8 && nalu.count > 10000) {
print("[VideoDecodePlugin][警告] NALU长度异常可能包含side_datatype=\(naluType)len=\(nalu.count)")
return true
}
return false
}
// NALU
private func extractFirstValidNalu(_ nalu: Data) -> Data {
guard let start = nalu.range(of: Data([0x00, 0x00, 0x00, 0x01]))?.lowerBound else {
print("[VideoDecodePlugin][警告] NALU无AnnexB起始码丢弃该帧")
return Data()
}
let searchRange = (start+4)..<nalu.count
if let next = nalu[searchRange].range(of: Data([0x00, 0x00, 0x00, 0x01]))?.lowerBound {
let end = searchRange.lowerBound + next
return nalu[start..<end]
} else {
return nalu[start..<nalu.count]
}
}
}

View File

@ -35,49 +35,49 @@ class VideoDecoder {
///
private var frameSeqSet = Set<Int>()
///
private let maxAllowedDelayMs: Int = 350
private let maxAllowedDelayMs: Int64 = 750 // Android
///
private var timestampBaseMs: Int64?
///
private var firstFrameRelativeTimestamp: Int64?
// ====== ======
// ====== ======
/// 线
private let inputQueue = DispatchQueue(label: "video_decode_plugin.input.queue", attributes: .concurrent)
private var inputBuffer: [(frameData: Data, frameType: Int, timestamp: Int64, frameSeq: Int, refIFrameSeq: Int?, sps: Data?, pps: Data?)] = []
private let inputBufferSemaphore = DispatchSemaphore(value: 1)
private let inputBufferMaxCount = 15
private let inputBufferMaxCount = 100 // Android
/// 线
private let outputQueue = DispatchQueue(label: "video_decode_plugin.output.queue", attributes: .concurrent)
private var outputBuffer: [(pixelBuffer: CVPixelBuffer, timestamp: Int64)] = []
private let outputBufferSemaphore = DispatchSemaphore(value: 1)
private let outputBufferMaxCount = 15
/// 线
private var renderThread: Thread?
private let outputBufferMaxCount = 100 // Android
///
private var renderTimer: DispatchSourceTimer?
/// 线
private var renderThreadRunning = false
///
private var hasNotifiedFlutter = false
///
private var renderFps: Int = 15
/// EMA
private var smoothedFps: Double = 15.0
/// EMA
private let alpha: Double = 0.2
///
private let minFps: Double = 8.0
///
private let maxFps: Double = 30.0
///
private let maxStep: Double = 2.0
///
private var renderedTimestamps: [Int64] = [] // ms
///
private let renderedTimestampsMaxCount = 20
///
private var renderedFrameCount = 0
/// N
private let fpsAdjustInterval = 10
private var renderFps: Int = 20 // Android
///
private var renderIntervalMs: Int64 = 0
///
private let renderJitterMs: Int64 = 2
///
private var lastRenderTimeMs: Int64 = 0
///
private var renderStarted = false
// ====== ======
///
private var reorderBuffer: [Int: (frameData: Data, frameType: Int, timestamp: Int64, frameSeq: Int, refIFrameSeq: Int?, sps: Data?, pps: Data?)] = [:]
/// I
private var receivedIFrames = Set<Int>()
///
private let reorderLock = NSLock()
///
private let maxReorderBufferSize = 100 //
/// CVPixelBuffer
var onFrameDecoded: ((CVPixelBuffer, Int64) -> Void)? = { _, _ in }
@ -87,7 +87,9 @@ class VideoDecoder {
self.width = width
self.height = height
self.codecType = CodecType(rawValue: codecType.lowercased()) ?? .h264
startRenderThread()
self.renderIntervalMs = Int64(1000.0 / Double(renderFps))
startRenderTimer()
print("[VideoDecoder] 初始化解码器: width=\(width), height=\(height)")
}
// ====== ======
@ -95,7 +97,8 @@ class VideoDecoder {
private func enqueueInput(_ item: (Data, Int, Int64, Int, Int?, Data?, Data?)) {
inputQueue.async(flags: .barrier) {
if self.inputBuffer.count >= self.inputBufferMaxCount {
self.inputBuffer.removeFirst()
self.inputBuffer.removeFirst() //
print("[VideoDecoder][警告] 输入缓冲区满,丢弃最旧帧")
}
self.inputBuffer.append(item)
}
@ -115,7 +118,8 @@ class VideoDecoder {
private func enqueueOutput(_ item: (CVPixelBuffer, Int64)) {
outputQueue.async(flags: .barrier) {
if self.outputBuffer.count >= self.outputBufferMaxCount {
self.outputBuffer.removeFirst()
self.outputBuffer.removeFirst() //
print("[VideoDecoder][警告] 输出缓冲区满,丢弃最旧帧")
}
self.outputBuffer.append(item)
}
@ -130,72 +134,141 @@ class VideoDecoder {
}
return item
}
// ====== 线 ======
/// 线FlutterEMA
private func startRenderThread() {
renderThreadRunning = true
renderThread = Thread { [weak self] in
guard let self = self else { return }
while self.renderThreadRunning {
let frameIntervalMs = Int(1000.0 / self.smoothedFps)
let loopStart = Date().timeIntervalSince1970 * 1000.0
if let (pixelBuffer, timestamp) = self.dequeueOutput() {
// Flutter
DispatchQueue.main.async {
self.onFrameDecoded?(pixelBuffer, timestamp)
}
// Flutter
if !self.hasNotifiedFlutter {
self.hasNotifiedFlutter = true
// onFrameRendered
}
//
self.renderedTimestamps.append(Int64(Date().timeIntervalSince1970 * 1000))
if self.renderedTimestamps.count > self.renderedTimestampsMaxCount {
self.renderedTimestamps.removeFirst()
}
self.renderedFrameCount += 1
if self.renderedFrameCount % self.fpsAdjustInterval == 0 {
let measuredFps = self.calculateDecodeFps()
let newFps = self.updateSmoothedFps(measuredFps)
self.renderFps = newFps
}
// ====== ======
///
private func handleFrameReordering(frameData: Data, frameType: Int, timestamp: Int64, frameSeq: Int, refIFrameSeq: Int?, sps: Data?, pps: Data?) -> Bool {
reorderLock.lock()
defer { reorderLock.unlock() }
// 1.
let now = Int64(Date().timeIntervalSince1970 * 1000)
let base = timestampBaseMs ?? 0
let firstRel = firstFrameRelativeTimestamp ?? 0
let absTimestamp = base + (timestamp - firstRel)
if absTimestamp < now - maxAllowedDelayMs {
print("[VideoDecoder][警告] 丢弃延迟帧: type=\(frameType), seq=\(frameSeq), delay=\(now - absTimestamp)ms")
return false
}
// 2.
if frameType == 0 { // I
receivedIFrames.insert(frameSeq)
lastIFrameSeq = frameSeq
// I
enqueueInput((frameData, frameType, timestamp, frameSeq, refIFrameSeq, sps, pps))
// IP
let readyPFrames = reorderBuffer.values
.filter { $0.refIFrameSeq == frameSeq }
.sorted { $0.frameSeq < $1.frameSeq }
for pFrame in readyPFrames {
enqueueInput(pFrame)
reorderBuffer.removeValue(forKey: pFrame.frameSeq)
}
if !readyPFrames.isEmpty {
print("[VideoDecoder] I帧\(frameSeq)释放\(readyPFrames.count)个P帧")
}
//
if reorderBuffer.count > maxReorderBufferSize {
let toRemove = reorderBuffer.keys.sorted().prefix(reorderBuffer.count - maxReorderBufferSize)
for seq in toRemove {
reorderBuffer.removeValue(forKey: seq)
}
//
let loopCost = Int(Date().timeIntervalSince1970 * 1000.0 - loopStart)
let sleepMs = frameIntervalMs - loopCost
if sleepMs > 0 {
Thread.sleep(forTimeInterval: Double(sleepMs) / 1000.0)
print("[VideoDecoder][警告] 重排序缓冲区溢出,清理\(toRemove.count)个P帧")
}
return true
} else { // P
// P
if let refIFrameSeq = refIFrameSeq, receivedIFrames.contains(refIFrameSeq) {
// I
enqueueInput((frameData, frameType, timestamp, frameSeq, refIFrameSeq, sps, pps))
return true
} else {
// IP
reorderBuffer[frameSeq] = (frameData, frameType, timestamp, frameSeq, refIFrameSeq, sps, pps)
print("[VideoDecoder] P帧\(frameSeq)缓存等待I帧\(refIFrameSeq ?? -1)")
//
if reorderBuffer.count > maxReorderBufferSize {
let toRemove = reorderBuffer.keys.sorted().prefix(reorderBuffer.count - maxReorderBufferSize)
for seq in toRemove {
reorderBuffer.removeValue(forKey: seq)
}
print("[VideoDecoder][警告] 重排序缓冲区溢出,清理\(toRemove.count)个P帧")
}
return false
}
}
}
// ====== ======
/// Flutter
private func startRenderTimer() {
renderThreadRunning = true
let timer = DispatchSource.makeTimerSource(queue: DispatchQueue.global())
timer.schedule(deadline: .now(), repeating: .milliseconds(Int(renderIntervalMs / 2)))
timer.setEventHandler { [weak self] in
guard let self = self else { return }
let now = Int64(Date().timeIntervalSince1970 * 1000)
//
let timeSinceLastRender = now - self.lastRenderTimeMs
if timeSinceLastRender < self.renderIntervalMs - self.renderJitterMs {
return
}
//
if !self.renderStarted {
var outputCount = 0
self.outputQueue.sync { outputCount = self.outputBuffer.count }
if outputCount >= Int(Double(self.outputBufferMaxCount) * 0.15) {
self.renderStarted = true
print("[VideoDecoder] 渲染启动outputBuffer已达低水位: \(outputCount)")
} else {
//
return
}
}
if let (pixelBuffer, timestamp) = self.dequeueOutput() {
//
let now = Int64(Date().timeIntervalSince1970 * 1000)
let base = timestampBaseMs ?? 0
let firstRel = firstFrameRelativeTimestamp ?? 0
let absTimestamp = base + (timestamp - firstRel)
if absTimestamp < now - self.maxAllowedDelayMs {
print("[VideoDecoder][警告] 丢弃延迟渲染帧: delay=\(now - absTimestamp)ms")
return
}
DispatchQueue.main.async {
self.onFrameDecoded?(pixelBuffer, timestamp)
}
self.lastRenderTimeMs = now
if !self.hasNotifiedFlutter {
self.hasNotifiedFlutter = true
}
}
}
renderThread?.start()
timer.resume()
renderTimer = timer
}
/// 线
private func stopRenderThread() {
///
private func stopRenderTimer() {
renderThreadRunning = false
renderThread?.cancel()
renderThread = nil
}
// ====== EMA ======
/// N
private func calculateDecodeFps() -> Double {
guard renderedTimestamps.count >= 2 else { return smoothedFps }
let first = renderedTimestamps.first!
let last = renderedTimestamps.last!
let frameCount = renderedTimestamps.count - 1
let durationMs = max(last - first, 1)
return Double(frameCount) * 1000.0 / Double(durationMs)
}
/// EMA
private func updateSmoothedFps(_ measuredFps: Double) -> Int {
let safeFps = min(max(measuredFps, minFps), maxFps)
let targetFps = alpha * safeFps + (1 - alpha) * smoothedFps
let delta = targetFps - smoothedFps
let step = min(max(delta, -maxStep), maxStep)
smoothedFps = min(max(smoothedFps + step, minFps), maxFps)
return Int(smoothedFps)
renderTimer?.cancel()
renderTimer = nil
}
/// I
private func setupSession(sps: Data?, pps: Data?) -> Bool {
//
@ -206,9 +279,10 @@ class VideoDecoder {
formatDesc = nil
isSessionReady = false
guard let sps = sps, let pps = pps else {
print("[VideoDecoder] 缺少SPS/PPS无法初始化解码会话")
print("[VideoDecoder][错误] 缺少SPS/PPS无法初始化解码会话")
return false
}
// SPS/PPS
let spsType: UInt8 = sps.count > 0 ? (sps[0] & 0x1F) : 0
let ppsType: UInt8 = pps.count > 0 ? (pps[0] & 0x1F) : 0
@ -220,6 +294,7 @@ class VideoDecoder {
print("[VideoDecoder][错误] PPS内容异常len=\(pps.count), type=\(ppsType)")
return false
}
var success = false
sps.withUnsafeBytes { spsPtr in
pps.withUnsafeBytes { ppsPtr in
@ -237,7 +312,7 @@ class VideoDecoder {
formatDescriptionOut: &formatDesc
)
if status != noErr {
print("[VideoDecoder] 创建FormatDescription失败: \(status)")
print("[VideoDecoder][错误] 创建FormatDescription失败: \(status)")
success = false
} else {
success = true
@ -252,7 +327,7 @@ class VideoDecoder {
// 线
decoder.enqueueOutput((pixelBuffer, Int64(pts.seconds * 1000)))
} else {
print("[VideoDecoder] 解码回调失败, status=\(status)")
print("[VideoDecoder][错误] 解码回调失败: \(status)")
}
},
decompressionOutputRefCon: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque())
@ -261,8 +336,10 @@ class VideoDecoder {
kCVPixelBufferPixelFormatTypeKey: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
kCVPixelBufferWidthKey: width,
kCVPixelBufferHeightKey: height,
kCVPixelBufferOpenGLESCompatibilityKey: true
kCVPixelBufferOpenGLESCompatibilityKey: true,
kCVPixelBufferIOSurfacePropertiesKey: [:]
]
let status2 = VTDecompressionSessionCreate(
allocator: kCFAllocatorDefault,
formatDescription: formatDesc!,
@ -272,7 +349,7 @@ class VideoDecoder {
decompressionSessionOut: &decompressionSession
)
if status2 != noErr {
print("[VideoDecoder] 创建解码会话失败: \(status2)")
print("[VideoDecoder][错误] 创建解码会话失败: \(status2)")
return false
}
isSessionReady = true
@ -282,38 +359,91 @@ class VideoDecoder {
///
func decodeFrame(frameData: Data, frameType: Int, timestamp: Int64, frameSeq: Int, refIFrameSeq: Int?, sps: Data? = nil, pps: Data? = nil) {
enqueueInput((frameData, frameType, timestamp, frameSeq, refIFrameSeq, sps, pps))
// 线
// 1.
if timestampBaseMs == nil {
timestampBaseMs = Int64(Date().timeIntervalSince1970 * 1000)
firstFrameRelativeTimestamp = timestamp
print("[VideoDecoder] 设置时间戳基准: base=\(timestampBaseMs!), firstRel=\(firstFrameRelativeTimestamp!)")
}
// 2.
if !handleFrameReordering(frameData: frameData, frameType: frameType, timestamp: timestamp, frameSeq: frameSeq, refIFrameSeq: refIFrameSeq, sps: sps, pps: pps) {
return
}
// 3. inputQueue
decodeQueue.async { [weak self] in
guard let self = self else { return }
guard let (frameData, frameType, timestamp, frameSeq, refIFrameSeq, sps, pps) = self.dequeueInput() else { return }
if !self.isSessionReady, let sps = sps, let pps = pps {
guard self.setupSession(sps: sps, pps: pps) else { return }
}
guard let session = self.decompressionSession else { return }
guard frameData.count > 4 else { return }
var avccData = frameData
let naluLen = UInt32(frameData.count - 4).bigEndian
if avccData.count >= 4 {
avccData.replaceSubrange(0..<4, with: withUnsafeBytes(of: naluLen) { Data($0) })
guard frameData.count > 3 else { return }
// AnnexB
var startCodeSize = 0
var naluData: Data
if frameData.count >= 4 && frameData[0] == 0x00 && frameData[1] == 0x00 && frameData[2] == 0x00 && frameData[3] == 0x01 {
startCodeSize = 4
naluData = frameData.subdata(in: 4..<frameData.count)
} else if frameData.count >= 3 && frameData[0] == 0x00 && frameData[1] == 0x00 && frameData[2] == 0x01 {
startCodeSize = 3
naluData = frameData.subdata(in: 3..<frameData.count)
} else {
return
print("[VideoDecoder][警告] 未找到起始码")
naluData = frameData
}
// AVCC
let naluLength = UInt32(naluData.count).bigEndian
var avccData = Data(capacity: naluData.count + 4)
withUnsafeBytes(of: naluLength) { ptr in
avccData.append(ptr.baseAddress!.assumingMemoryBound(to: UInt8.self), count: 4)
}
avccData.append(naluData)
var blockBuffer: CMBlockBuffer?
let status = CMBlockBufferCreateWithMemoryBlock(
allocator: kCFAllocatorDefault,
memoryBlock: UnsafeMutableRawPointer(mutating: (avccData as NSData).bytes),
memoryBlock: nil,
blockLength: avccData.count,
blockAllocator: kCFAllocatorNull,
blockAllocator: nil,
customBlockSource: nil,
offsetToData: 0,
dataLength: avccData.count,
flags: 0,
flags: kCMBlockBufferAssureMemoryNowFlag,
blockBufferOut: &blockBuffer
)
if status != kCMBlockBufferNoErr { return }
if status != kCMBlockBufferNoErr {
print("[VideoDecoder][错误] 创建BlockBuffer失败: \(status)")
return
}
// BlockBuffer
if let blockBuffer = blockBuffer {
let status2 = avccData.withUnsafeBytes { ptr in
CMBlockBufferReplaceDataBytes(
with: ptr.baseAddress!,
blockBuffer: blockBuffer,
offsetIntoDestination: 0,
dataLength: avccData.count
)
}
if status2 != kCMBlockBufferNoErr {
print("[VideoDecoder][错误] 复制数据到BlockBuffer失败: \(status2)")
return
}
}
var sampleBuffer: CMSampleBuffer?
var timing = CMSampleTimingInfo(duration: .invalid, presentationTimeStamp: CMTime(value: timestamp, timescale: 1000), decodeTimeStamp: .invalid)
var timing = CMSampleTimingInfo(
duration: .invalid,
presentationTimeStamp: CMTime(value: timestamp, timescale: 1000),
decodeTimeStamp: .invalid
)
let status2 = CMSampleBufferCreate(
allocator: kCFAllocatorDefault,
dataBuffer: blockBuffer,
@ -328,8 +458,12 @@ class VideoDecoder {
sampleSizeArray: [avccData.count],
sampleBufferOut: &sampleBuffer
)
if status2 != noErr { return }
let decodeFlags: VTDecodeFrameFlags = []
if status2 != noErr {
print("[VideoDecoder][错误] 创建SampleBuffer失败: \(status2)")
return
}
let decodeFlags: VTDecodeFrameFlags = [._EnableAsynchronousDecompression]
var infoFlags = VTDecodeInfoFlags()
let status3 = VTDecompressionSessionDecodeFrame(
session,
@ -339,14 +473,19 @@ class VideoDecoder {
infoFlagsOut: &infoFlags
)
if status3 != noErr {
print("[VideoDecoder] 解码失败: \(status3)")
print("[VideoDecoder][错误] 解码失败: \(status3)")
if status3 == -6661 {
print(" - 错误类型: kVTInvalidSessionErr (解码会话无效)")
print(" - 会话状态: \(self.isSessionReady ? "就绪" : "未就绪")")
print(" - formatDesc: \(self.formatDesc != nil ? "有效" : "无效")")
}
}
}
}
///
func release() {
stopRenderThread()
stopRenderTimer()
decodeQueue.sync {
if let session = decompressionSession {
VTDecompressionSessionInvalidate(session)
@ -356,6 +495,12 @@ class VideoDecoder {
isSessionReady = false
frameSeqSet.removeAll()
lastIFrameSeq = nil
//
reorderLock.lock()
reorderBuffer.removeAll()
receivedIFrames.removeAll()
reorderLock.unlock()
}
inputQueue.async(flags: .barrier) { self.inputBuffer.removeAll() }
outputQueue.async(flags: .barrier) { self.outputBuffer.removeAll() }