AVAssetWriter 비디오와 함께 오디오를 녹음 할 수 없음 | 충돌

Nov 25 2020

비디오 / 오디오 프레임을 캡처하려고 CMSampleBuffer하지만 적절한 비디오 녹화를 얻지 못했습니다.

예상 출력 :.mp4 오디오 (마이크의) 및 비디오 프레임이 모두 있는 형식 의 비디오 파일 .

현재 출력 : 빈 디렉토리 / 오디오가없는 비디오 파일.

실행시 충돌 :Media type of sample buffer must match receiver's media type ("soun")

이 문제를 해결하기 위해 온라인에서 거의 모든 것을 시도했습니다. 마감일이 다가오고 있으며 정확히 무슨 일이 일어나고 있는지 알아 내려고 머리를 잡아 당깁니다. 모든 도움말 / 포인터는 높이 평가됩니다.

아래는 소스입니다.

CameraController.swift

class CameraController: UIViewController, SFrameCaptureDelegate {
    
    override func viewDidLoad() {
        super.viewDidLoad()
        setupUI()
        
        assetWriter = AssetManager(filename: UUID().uuidString.appending(".mp4"))
        frameBuffer.delegate = self
        frameBuffer.startSession()
        
    }
    
    
    var previewView: PreviewView = {
        let instance = PreviewView()
        return instance
    }()
    
    var frameBuffer = FrameCapture(type: .AudioVideo)
    
    var captureButton: UIButton = {
        let instance = UIButton()
        instance.setTitle("Capture", for: .normal)
        instance.backgroundColor = .white
        return instance
    }()
 
    // if the user is recording the frames from the phone
    var frameCaptureRunning = false
    
    var assetWriter : AssetManager!
    
    var videoDirectoryPath = SFileManager.shared.getDocumentDirectory()
    
    func setupUI() {
        
        view.addSubview(previewView)
        previewView.top(to: view)
        previewView.left(to: view)
        previewView.right(to: view)
        previewView.height(view.frame.height)
        
        previewView.session = frameBuffer.session
        
        
        view.addSubview(captureButton)
        captureButton.size(CGSize(width: 100, height: 100))
        captureButton.centerX(to: view)
        captureButton.bottom(to: view, offset: -20)
        captureButton.addTarget(self, action: #selector(startpic), for: .touchDown)
        captureButton.addTarget(self, action: #selector(stopic), for: .touchUpInside)
    }
    
    @objc func startpic() {
        frameCaptureRunning = true
        assetWriter.isRecording = true
    }
    
    @objc func stopic() {
        frameCaptureRunning = false
        assetWriter.isRecording = false
        assetWriter.finish {
            DispatchQueue.main.async {
                let activity = UIActivityViewController(activityItems: [self.assetWriter.url!], applicationActivities: nil)
                self.present(activity, animated: true, completion: nil)
            }
            print("This -- ",self.assetWriter.url.path)
            do {
                let attr = try FileManager.default.attributesOfItem(atPath: self.assetWriter.url.path)
                let fileSize = attr[FileAttributeKey.size] as! UInt64
                print("H264 file size = \(fileSize)")

                DispatchQueue.main.async {
                    let player = AVPlayer(url: self.assetWriter.url)
                    let playerLayer = AVPlayerLayer(player: player)
                    playerLayer.videoGravity = .resizeAspectFill
                    playerLayer.frame = self.view.bounds
                    playerLayer.backgroundColor = UIColor.red.cgColor
                    self.view.layer.addSublayer(playerLayer)
                    player.play()
                }
            }catch{
                print("issues with finishing")
            }
        }
        
        
    }
    
    func capturedFrame(buffers: CMSampleBuffer) {
        
        if !frameCaptureRunning { return }
        assetWriter.write(buffer: buffers)
        
    }
    
}

FrameCapture.swift

protocol SFrameCaptureDelegate: class {
    func capturedFrame(buffers: CMSampleBuffer)
}

class FrameCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
    
    init(type: SessionType) {
        super.init()
        print("SFC - Frame Buffers initialized with Config - ", type.self)
        sessionType = type
    }
    
    func startSession() {
        print("SFC - Frame Buffers Session Starting")
        sessionQueue.async {
            self.configureSession(type: self.sessionType)
            self.session.startRunning()
        }
    }
    
    weak var delegate: SFrameCaptureDelegate?
    
    enum SessionSetupResult {
        case success
        case notAuthorized
        case configurationFailed
    }
    
    enum SessionType {
        case Audio
        case Video
        case AudioVideo
    }
    
    let session = AVCaptureSession()
    let sessionQueue = DispatchQueue(label: "sessionQueue", qos: .userInitiated)
    let videoQueue = DispatchQueue(label: "videoQueue", qos: .userInitiated)
    let audioQueue = DispatchQueue(label: "audioQueue", qos: .userInitiated)
    var setupResult: SessionSetupResult = .success
    var sessionType: SessionType = .Video
    
    @objc dynamic var videoDeviceInput: AVCaptureDeviceInput!
    let videoOutput = AVCaptureVideoDataOutput()
    let audioOutput = AVCaptureAudioDataOutput()
    var photoQualityPrioritizationMode: AVCapturePhotoOutput.QualityPrioritization = .balanced
    
///  MARK: SessionConfig
    func configureSession(type: SessionType) {
        
        if setupResult != .success { return }
        
        session.beginConfiguration()
        session.sessionPreset = .high
        
        do {
            var defaultVideoDevice: AVCaptureDevice?
            
            if let dualCameraDevice = AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: .back) {
                defaultVideoDevice = dualCameraDevice
            } else if let backCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back){
                defaultVideoDevice = backCameraDevice
            } else if let frontCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front){
                defaultVideoDevice = frontCameraDevice
            }
            
            guard let videoDevice = defaultVideoDevice else {
                print("CAM - Camera unavailable")
                setupResult = .configurationFailed
                session.commitConfiguration()
                return
            }
            
            let videoInputDevice = try AVCaptureDeviceInput(device: videoDevice)
            
            if session.canAddInput(videoInputDevice) {
                session.addInput(videoInputDevice)
                videoDeviceInput = videoInputDevice
            } else {
                print("CAM - Couldn't add input to the session")
                setupResult = .configurationFailed
                session.commitConfiguration()
                return
            }
        } catch {
            print("CAM - Couldn't create device input. Error - ", error.localizedDescription)
            setupResult = .configurationFailed
            session.commitConfiguration()
            return
        }
        
        if sessionType == .AudioVideo {
            do {
                let audioDevice = AVCaptureDevice.default(for: .audio)
                let audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice!)
                print("SFC - in audio device input")
                if session.canAddInput(audioDeviceInput) {
                    session.addInput(audioDeviceInput)
                } else { print("CAM - Couldn't add audio input device to session.") }
            } catch { print("couldn't create audio input device. Error - ",error.localizedDescription) }
        }
        
        
        videoOutput.setSampleBufferDelegate(self, queue: videoQueue)
        if session.canAddOutput(videoOutput) {
            session.addOutput(videoOutput)
            photoQualityPrioritizationMode = .balanced
        } else {
            print("Could not add photo output to the session")
            setupResult = .configurationFailed
            session.commitConfiguration()
            return
        }
        
        if sessionType == .AudioVideo {
            audioOutput.setSampleBufferDelegate(self, queue: audioQueue)
            if session.canAddOutput(audioOutput) {
                session.addOutput(audioOutput)
            } else {
                print("Couldn't add audio output")
                setupResult = .configurationFailed
                session.commitConfiguration()
            }
        }
        
        videoOutput.connections.first?.videoOrientation = .portrait
        videoOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA ]
        videoOutput.alwaysDiscardsLateVideoFrames = true
        
        session.commitConfiguration()

    }
    
/// MARK: CMSampleBufferDelegate
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        self.delegate?.capturedFrame(buffers: sampleBuffer)
    }
    
}

AssetManager.swift

class AssetManager: NSObject {
    
    private var assetWriter: AVAssetWriter?
    private var videoInput: AVAssetWriterInput?
    private var audioInput: AVAssetWriterInput?
    var url: URL!
    
    let writerQueue = DispatchQueue(label: "writerQueue", qos: .utility)
    
    var isRecording = false
    var video_frames_written = false
    
    init(filename: String) {
        super.init()
        self.videoDirectory.appendPathComponent(filename)
        self.url = self.videoDirectory
        
    }

    private var videoDirectory = SFileManager.shared.getDocumentDirectory()

    
    private func setupWriter() {
        
        SFileManager.shared.clearPreviousFiles(withPath: videoDirectory.path)
        SFileManager.shared.createNewDirectory(withPath: videoDirectory.path)
        printLog(item: self.videoDirectory)
        
        
        self.assetWriter = try? AVAssetWriter(outputURL: self.videoDirectory, fileType: AVFileType.mp4)
        
        let videoOutputSettings = [
            AVVideoCodecKey: AVVideoCodecType.h264,
            AVVideoHeightKey: 1280,
            AVVideoWidthKey:720
        ] as [String : Any]
        
        
        self.videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoOutputSettings)
        self.videoInput?.expectsMediaDataInRealTime = true
        if let videoInput = self.videoInput, (self.assetWriter?.canAdd(videoInput))! {
            self.assetWriter?.add(videoInput)
        }
        
        
        let audioOutputSettings = [
            AVFormatIDKey: kAudioFormatMPEG4AAC,
            AVNumberOfChannelsKey: 1,
            AVSampleRateKey: 44100,
            AVEncoderBitRateKey: 64000
        ] as [String: Any]
        
        
        self.audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioOutputSettings)
        self.audioInput?.expectsMediaDataInRealTime = true
        if let audioInput = self.audioInput, (self.assetWriter?.canAdd(audioInput))! {
            self.assetWriter?.add(audioInput)
            printDone(item: "Asset writer added, \(String(describing: self.audioInput))")
        } else {
            printError(item: "No audio Input")
        }
        
        
    }
    
    
    public func write(buffer: CMSampleBuffer) {
        writerQueue.sync {
            
            if assetWriter == nil { self.setupWriter() }

            if self.assetWriter?.status == .unknown {
                self.assetWriter?.startWriting()
                self.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(buffer))
                printDone(item: "Started AssetWriter")
            }

            if self.assetWriter?.status == .failed {
                printError(item: "Asset Writer Failed with Error: \(String(describing: self.assetWriter?.error))")
            }

            if CMSampleBufferDataIsReady(buffer) {

                if let videoInput = self.videoInput, videoInput.isReadyForMoreMediaData {
                    videoInput.append(buffer)
                }
                
                if let audioInput = self.audioInput, audioInput.isReadyForMoreMediaData {
                    audioInput.append(buffer) // Crashes at this line
                }
            }
        }
    }
    
    public func finish(completion: @escaping (() -> Void)) {
        writerQueue.async {
            self.assetWriter?.finishWriting(completionHandler: { [self] in
                printDone(item: "Finished Writing")
                completion()
            })
        }
    }
}

답변

1 RhythmicFistman Nov 27 2020 at 06:02

비디오 버퍼를에 쓰고 있으며 audioInput버퍼가 도착하는 방법에 따라 오디오 버퍼를 videoInput.

귀하의 경우 CMSampleBuffer에는에 오디오 또는 비디오가 포함되어 있으므로 오디오 버퍼를에 추가 audioInput하고 비디오 버퍼를에 추가 합니다 videoInput.

당신은 비교하여 버퍼의 두 가지 유형을 구분할 수 outputcaptureOutput:didOutput:audioInputvideoOutput또는 버퍼의 봄으로써 CMSampleBufferGetFormatDescription()CMFormatDescriptionGetMediaType(),하지만 좀 더 복잡합니다.