2019-01-26 14:41:28 +00:00
|
|
|
// Copyright 2018 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <atomic>
|
|
|
|
#include <condition_variable>
|
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
|
|
|
#include <mutex>
|
|
|
|
#include <thread>
|
|
|
|
#include <vector>
|
|
|
|
#include "common/common_types.h"
|
|
|
|
#include "common/thread.h"
|
|
|
|
#include "common/threadsafe_queue.h"
|
|
|
|
#include "core/dumping/backend.h"
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
#include <libavcodec/avcodec.h>
|
|
|
|
#include <libavformat/avformat.h>
|
|
|
|
#include <libswresample/swresample.h>
|
|
|
|
#include <libswscale/swscale.h>
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace VideoDumper {
|
|
|
|
|
|
|
|
using VariableAudioFrame = std::vector<s16>;
|
|
|
|
|
|
|
|
void InitFFmpegLibraries();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wrapper around FFmpeg AVCodecContext + AVStream.
|
|
|
|
* Rescales/Resamples, encodes and writes a frame.
|
|
|
|
*/
|
|
|
|
class FFmpegStream {
|
|
|
|
public:
|
|
|
|
bool Init(AVFormatContext* format_context);
|
|
|
|
void Free();
|
|
|
|
void Flush();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
~FFmpegStream();
|
|
|
|
|
|
|
|
void WritePacket(AVPacket& packet);
|
|
|
|
void SendFrame(AVFrame* frame);
|
|
|
|
|
|
|
|
struct AVCodecContextDeleter {
|
|
|
|
void operator()(AVCodecContext* codec_context) const {
|
|
|
|
avcodec_free_context(&codec_context);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct AVFrameDeleter {
|
|
|
|
void operator()(AVFrame* frame) const {
|
|
|
|
av_frame_free(&frame);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
AVFormatContext* format_context{};
|
|
|
|
std::unique_ptr<AVCodecContext, AVCodecContextDeleter> codec_context{};
|
|
|
|
AVStream* stream{};
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A FFmpegStream used for video data.
|
|
|
|
* Rescales, encodes and writes a frame.
|
|
|
|
*/
|
|
|
|
class FFmpegVideoStream : public FFmpegStream {
|
|
|
|
public:
|
|
|
|
~FFmpegVideoStream();
|
|
|
|
|
|
|
|
bool Init(AVFormatContext* format_context, AVOutputFormat* output_format,
|
|
|
|
const Layout::FramebufferLayout& layout);
|
|
|
|
void Free();
|
|
|
|
void ProcessFrame(VideoFrame& frame);
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct SwsContextDeleter {
|
|
|
|
void operator()(SwsContext* sws_context) const {
|
|
|
|
sws_freeContext(sws_context);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
u64 frame_count{};
|
|
|
|
|
|
|
|
std::unique_ptr<AVFrame, AVFrameDeleter> current_frame{};
|
|
|
|
std::unique_ptr<AVFrame, AVFrameDeleter> scaled_frame{};
|
|
|
|
std::unique_ptr<SwsContext, SwsContextDeleter> sws_context{};
|
|
|
|
Layout::FramebufferLayout layout;
|
|
|
|
|
|
|
|
/// The pixel format the frames are stored in
|
|
|
|
static constexpr AVPixelFormat pixel_format = AVPixelFormat::AV_PIX_FMT_BGRA;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A FFmpegStream used for audio data.
|
|
|
|
* Resamples (converts), encodes and writes a frame.
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
* This also temporarily stores resampled audio data before there are enough to form a frame.
|
2019-01-26 14:41:28 +00:00
|
|
|
*/
|
|
|
|
class FFmpegAudioStream : public FFmpegStream {
|
|
|
|
public:
|
|
|
|
~FFmpegAudioStream();
|
|
|
|
|
|
|
|
bool Init(AVFormatContext* format_context);
|
|
|
|
void Free();
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
void ProcessFrame(const VariableAudioFrame& channel0, const VariableAudioFrame& channel1);
|
|
|
|
void Flush();
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
struct SwrContextDeleter {
|
|
|
|
void operator()(SwrContext* swr_context) const {
|
|
|
|
swr_free(&swr_context);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
u64 frame_size{};
|
|
|
|
u64 frame_count{};
|
2019-01-26 14:41:28 +00:00
|
|
|
|
|
|
|
std::unique_ptr<AVFrame, AVFrameDeleter> audio_frame{};
|
|
|
|
std::unique_ptr<SwrContext, SwrContextDeleter> swr_context{};
|
|
|
|
|
|
|
|
u8** resampled_data{};
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
u64 offset{}; // Number of output samples that are currently in resampled_data.
|
2019-01-26 14:41:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Wrapper around FFmpeg AVFormatContext.
|
|
|
|
* Manages the video and audio streams, and accepts video and audio data.
|
|
|
|
*/
|
|
|
|
class FFmpegMuxer {
|
|
|
|
public:
|
|
|
|
~FFmpegMuxer();
|
|
|
|
|
2020-01-29 06:54:39 +00:00
|
|
|
bool Init(const std::string& path, const Layout::FramebufferLayout& layout);
|
2019-01-26 14:41:28 +00:00
|
|
|
void Free();
|
|
|
|
void ProcessVideoFrame(VideoFrame& frame);
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
void ProcessAudioFrame(const VariableAudioFrame& channel0, const VariableAudioFrame& channel1);
|
2019-01-26 14:41:28 +00:00
|
|
|
void FlushVideo();
|
|
|
|
void FlushAudio();
|
|
|
|
void WriteTrailer();
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct AVFormatContextDeleter {
|
|
|
|
void operator()(AVFormatContext* format_context) const {
|
|
|
|
avio_closep(&format_context->pb);
|
|
|
|
avformat_free_context(format_context);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
FFmpegAudioStream audio_stream{};
|
|
|
|
FFmpegVideoStream video_stream{};
|
|
|
|
std::unique_ptr<AVFormatContext, AVFormatContextDeleter> format_context{};
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* FFmpeg video dumping backend.
|
ffmpeg: Correctly handle sample rates
Previously, we just used the native sample rate for encoding. However, some encoders like libmp3lame doesn't support it. Therefore, we now use a supported sample rate (preferring the native one if possible).
FFmpeg requires audio data to be sent in a sequence of frames, each containing the same specific number of samples. Previously, we buffered input samples in FFmpegBackend. However, as the source and destination sample rates can now be different, we should buffer resampled data instead. swresample have an internal input buffer, so we now just forward all data to it and 'gradually' receive resampled data, at most one frame_size at a time. When there is not enough resampled data to form a frame, we will record the current offset and request for less data on the next call.
Additionally, this commit also fixes a flaw. When an encoder supports variable frame sizes, its frame size is reported to be 0, which breaks our buffering system. Now we treat variable frame size encoders as having a frame size of 160 (the size of a HLE audio frame).
2020-02-01 04:23:07 +00:00
|
|
|
* This class implements a double buffer.
|
2019-01-26 14:41:28 +00:00
|
|
|
*/
|
|
|
|
class FFmpegBackend : public Backend {
|
|
|
|
public:
|
|
|
|
FFmpegBackend();
|
|
|
|
~FFmpegBackend() override;
|
2020-01-29 06:54:39 +00:00
|
|
|
bool StartDumping(const std::string& path, const Layout::FramebufferLayout& layout) override;
|
2020-01-28 14:19:36 +00:00
|
|
|
void AddVideoFrame(VideoFrame frame) override;
|
|
|
|
void AddAudioFrame(AudioCore::StereoFrame16 frame) override;
|
2019-01-26 14:41:28 +00:00
|
|
|
void AddAudioSample(const std::array<s16, 2>& sample) override;
|
|
|
|
void StopDumping() override;
|
|
|
|
bool IsDumping() const override;
|
|
|
|
Layout::FramebufferLayout GetLayout() const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
void EndDumping();
|
|
|
|
|
|
|
|
std::atomic_bool is_dumping = false; ///< Whether the backend is currently dumping
|
|
|
|
|
|
|
|
FFmpegMuxer ffmpeg{};
|
|
|
|
|
|
|
|
Layout::FramebufferLayout video_layout;
|
|
|
|
std::array<VideoFrame, 2> video_frame_buffers;
|
|
|
|
u32 current_buffer = 0, next_buffer = 1;
|
|
|
|
Common::Event event1, event2;
|
|
|
|
std::thread video_processing_thread;
|
|
|
|
|
|
|
|
std::array<Common::SPSCQueue<VariableAudioFrame>, 2> audio_frame_queues;
|
|
|
|
std::thread audio_processing_thread;
|
|
|
|
|
|
|
|
Common::Event processing_ended;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VideoDumper
|