390 lines
12 KiB
C++
390 lines
12 KiB
C++
// VideoState.cpp
|
|
#include "VideoState.h"
|
|
|
|
#include "../video/VideoPlayer.h"
|
|
#include "../audio/Audio.h"
|
|
#include "../core/state/StateManager.h"
|
|
|
|
#include <SDL3/SDL.h>
|
|
|
|
#include <algorithm>
|
|
#include <cmath>
|
|
#include <cstring>
|
|
#include <cstdint>
|
|
|
|
extern "C" {
|
|
#include <libavformat/avformat.h>
|
|
#include <libavcodec/avcodec.h>
|
|
#include <libavutil/avutil.h>
|
|
#include <libavutil/channel_layout.h>
|
|
#include <libswresample/swresample.h>
|
|
}
|
|
|
|
VideoState::VideoState(StateContext& ctx)
|
|
: State(ctx)
|
|
, m_player(std::make_unique<VideoPlayer>())
|
|
{
|
|
}
|
|
|
|
VideoState::~VideoState() {
|
|
onExit();
|
|
}
|
|
|
|
bool VideoState::begin(SDL_Renderer* renderer, const std::string& path) {
|
|
m_path = path;
|
|
|
|
if (!m_player) {
|
|
m_player = std::make_unique<VideoPlayer>();
|
|
}
|
|
|
|
if (!m_player->open(m_path, renderer)) {
|
|
SDL_LogWarn(SDL_LOG_CATEGORY_APPLICATION, "[VideoState] Failed to open intro video: %s", m_path.c_str());
|
|
return false;
|
|
}
|
|
|
|
if (!m_player->decodeFirstFrame()) {
|
|
SDL_LogWarn(SDL_LOG_CATEGORY_APPLICATION, "[VideoState] Failed to decode first frame: %s", m_path.c_str());
|
|
// Still allow entering; we will likely render black.
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void VideoState::onEnter() {
|
|
m_phase = Phase::FadeInFirstFrame;
|
|
m_phaseClockMs = 0.0;
|
|
m_blackOverlayAlpha = 1.0f;
|
|
|
|
m_audioDecoded.store(false);
|
|
m_audioDecodeFailed.store(false);
|
|
m_audioStarted = false;
|
|
m_audioPcm.clear();
|
|
m_audioRate = 44100;
|
|
m_audioChannels = 2;
|
|
|
|
// Decode audio in the background during fade-in.
|
|
m_audioThread = std::make_unique<std::jthread>([this](std::stop_token st) {
|
|
(void)st;
|
|
std::vector<int16_t> pcm;
|
|
int rate = 44100;
|
|
int channels = 2;
|
|
|
|
const bool ok = decodeAudioPcm16Stereo44100(m_path, pcm, rate, channels);
|
|
if (!ok) {
|
|
m_audioDecodeFailed.store(true);
|
|
m_audioDecoded.store(true, std::memory_order_release);
|
|
return;
|
|
}
|
|
|
|
// Transfer results.
|
|
m_audioRate = rate;
|
|
m_audioChannels = channels;
|
|
m_audioPcm = std::move(pcm);
|
|
m_audioDecoded.store(true, std::memory_order_release);
|
|
});
|
|
}
|
|
|
|
void VideoState::onExit() {
|
|
stopAudio();
|
|
|
|
if (m_audioThread) {
|
|
// Request stop and join.
|
|
m_audioThread.reset();
|
|
}
|
|
}
|
|
|
|
void VideoState::handleEvent(const SDL_Event& e) {
|
|
(void)e;
|
|
}
|
|
|
|
void VideoState::startAudioIfReady() {
|
|
if (m_audioStarted) return;
|
|
if (!m_audioDecoded.load(std::memory_order_acquire)) return;
|
|
if (m_audioDecodeFailed.load()) return;
|
|
if (m_audioPcm.empty()) return;
|
|
|
|
// Use the existing audio output path (same device as music/SFX).
|
|
Audio::instance().playSfx(m_audioPcm, m_audioChannels, m_audioRate, 1.0f);
|
|
m_audioStarted = true;
|
|
}
|
|
|
|
void VideoState::stopAudio() {
|
|
// We currently feed intro audio as an SFX buffer into the mixer.
|
|
// It will naturally end; no explicit stop is required.
|
|
}
|
|
|
|
void VideoState::update(double frameMs) {
|
|
switch (m_phase) {
|
|
case Phase::FadeInFirstFrame: {
|
|
m_phaseClockMs += frameMs;
|
|
const float t = (FADE_IN_MS > 0.0) ? float(std::clamp(m_phaseClockMs / FADE_IN_MS, 0.0, 1.0)) : 1.0f;
|
|
m_blackOverlayAlpha = 1.0f - t;
|
|
|
|
if (t >= 1.0f) {
|
|
m_phase = Phase::Playing;
|
|
m_phaseClockMs = 0.0;
|
|
if (m_player) {
|
|
m_player->start();
|
|
}
|
|
startAudioIfReady();
|
|
}
|
|
break;
|
|
}
|
|
case Phase::Playing: {
|
|
startAudioIfReady();
|
|
if (m_player) {
|
|
m_player->update(frameMs);
|
|
if (m_player->isFinished()) {
|
|
m_phase = Phase::FadeOutToBlack;
|
|
m_phaseClockMs = 0.0;
|
|
m_blackOverlayAlpha = 0.0f;
|
|
}
|
|
} else {
|
|
m_phase = Phase::FadeOutToBlack;
|
|
m_phaseClockMs = 0.0;
|
|
m_blackOverlayAlpha = 0.0f;
|
|
}
|
|
break;
|
|
}
|
|
case Phase::FadeOutToBlack: {
|
|
m_phaseClockMs += frameMs;
|
|
const float t = (FADE_OUT_MS > 0.0) ? float(std::clamp(m_phaseClockMs / FADE_OUT_MS, 0.0, 1.0)) : 1.0f;
|
|
m_blackOverlayAlpha = t;
|
|
if (t >= 1.0f) {
|
|
// Switch to MAIN (Menu) with a fade-in from black.
|
|
if (ctx.startupFadeAlpha) {
|
|
*ctx.startupFadeAlpha = 1.0f;
|
|
}
|
|
if (ctx.startupFadeActive) {
|
|
*ctx.startupFadeActive = true;
|
|
}
|
|
if (ctx.stateManager) {
|
|
ctx.stateManager->setState(AppState::Menu);
|
|
}
|
|
m_phase = Phase::Done;
|
|
}
|
|
break;
|
|
}
|
|
case Phase::Done:
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
void VideoState::render(SDL_Renderer* renderer, float logicalScale, SDL_Rect logicalVP) {
|
|
(void)logicalScale;
|
|
(void)logicalVP;
|
|
|
|
if (!renderer) return;
|
|
|
|
int winW = 0, winH = 0;
|
|
SDL_GetRenderOutputSize(renderer, &winW, &winH);
|
|
|
|
// Draw video fullscreen if available.
|
|
if (m_player && m_player->isTextureReady()) {
|
|
SDL_SetRenderViewport(renderer, nullptr);
|
|
SDL_SetRenderScale(renderer, 1.0f, 1.0f);
|
|
m_player->render(renderer, winW, winH);
|
|
} else {
|
|
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
|
|
SDL_FRect r{0.f, 0.f, (float)winW, (float)winH};
|
|
SDL_RenderFillRect(renderer, &r);
|
|
}
|
|
|
|
// Apply fade overlay (black).
|
|
if (m_blackOverlayAlpha > 0.0f) {
|
|
const Uint8 a = (Uint8)std::clamp((int)std::lround(m_blackOverlayAlpha * 255.0f), 0, 255);
|
|
SDL_SetRenderDrawBlendMode(renderer, SDL_BLENDMODE_BLEND);
|
|
SDL_SetRenderDrawColor(renderer, 0, 0, 0, a);
|
|
SDL_FRect full{0.f, 0.f, (float)winW, (float)winH};
|
|
SDL_RenderFillRect(renderer, &full);
|
|
}
|
|
}
|
|
|
|
bool VideoState::decodeAudioPcm16Stereo44100(
|
|
const std::string& path,
|
|
std::vector<int16_t>& outPcm,
|
|
int& outRate,
|
|
int& outChannels
|
|
) {
|
|
outPcm.clear();
|
|
outRate = 44100;
|
|
outChannels = 2;
|
|
|
|
AVFormatContext* fmt = nullptr;
|
|
if (avformat_open_input(&fmt, path.c_str(), nullptr, nullptr) != 0) {
|
|
return false;
|
|
}
|
|
|
|
if (avformat_find_stream_info(fmt, nullptr) < 0) {
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
int audioStream = -1;
|
|
for (unsigned i = 0; i < fmt->nb_streams; ++i) {
|
|
if (fmt->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|
audioStream = (int)i;
|
|
break;
|
|
}
|
|
}
|
|
if (audioStream < 0) {
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
AVCodecParameters* codecpar = fmt->streams[audioStream]->codecpar;
|
|
const AVCodec* codec = avcodec_find_decoder(codecpar->codec_id);
|
|
if (!codec) {
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
AVCodecContext* dec = avcodec_alloc_context3(codec);
|
|
if (!dec) {
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
if (avcodec_parameters_to_context(dec, codecpar) < 0) {
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
if (avcodec_open2(dec, codec, nullptr) < 0) {
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
AVChannelLayout outLayout{};
|
|
av_channel_layout_default(&outLayout, 2);
|
|
|
|
AVChannelLayout inLayout{};
|
|
if (av_channel_layout_copy(&inLayout, &dec->ch_layout) < 0 || inLayout.nb_channels <= 0) {
|
|
av_channel_layout_uninit(&inLayout);
|
|
av_channel_layout_default(&inLayout, 2);
|
|
}
|
|
|
|
SwrContext* swr = nullptr;
|
|
if (swr_alloc_set_opts2(
|
|
&swr,
|
|
&outLayout,
|
|
AV_SAMPLE_FMT_S16,
|
|
44100,
|
|
&inLayout,
|
|
dec->sample_fmt,
|
|
dec->sample_rate,
|
|
0,
|
|
nullptr
|
|
) < 0) {
|
|
av_channel_layout_uninit(&inLayout);
|
|
av_channel_layout_uninit(&outLayout);
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
if (swr_init(swr) < 0) {
|
|
swr_free(&swr);
|
|
av_channel_layout_uninit(&inLayout);
|
|
av_channel_layout_uninit(&outLayout);
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
AVPacket* pkt = av_packet_alloc();
|
|
AVFrame* frame = av_frame_alloc();
|
|
if (!pkt || !frame) {
|
|
if (pkt) av_packet_free(&pkt);
|
|
if (frame) av_frame_free(&frame);
|
|
swr_free(&swr);
|
|
av_channel_layout_uninit(&inLayout);
|
|
av_channel_layout_uninit(&outLayout);
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
return false;
|
|
}
|
|
|
|
const int outRateConst = 44100;
|
|
const int outCh = 2;
|
|
|
|
while (av_read_frame(fmt, pkt) >= 0) {
|
|
if (pkt->stream_index != audioStream) {
|
|
av_packet_unref(pkt);
|
|
continue;
|
|
}
|
|
|
|
if (avcodec_send_packet(dec, pkt) < 0) {
|
|
av_packet_unref(pkt);
|
|
continue;
|
|
}
|
|
av_packet_unref(pkt);
|
|
|
|
while (true) {
|
|
const int rr = avcodec_receive_frame(dec, frame);
|
|
if (rr == AVERROR(EAGAIN) || rr == AVERROR_EOF) {
|
|
break;
|
|
}
|
|
if (rr < 0) {
|
|
break;
|
|
}
|
|
|
|
const int64_t delay = swr_get_delay(swr, dec->sample_rate);
|
|
const int dstNbSamples = (int)av_rescale_rnd(delay + frame->nb_samples, outRateConst, dec->sample_rate, AV_ROUND_UP);
|
|
|
|
std::vector<uint8_t> outBytes;
|
|
outBytes.resize((size_t)dstNbSamples * (size_t)outCh * sizeof(int16_t));
|
|
|
|
uint8_t* outData[1] = { outBytes.data() };
|
|
const uint8_t** inData = (const uint8_t**)frame->data;
|
|
|
|
const int converted = swr_convert(swr, outData, dstNbSamples, inData, frame->nb_samples);
|
|
if (converted > 0) {
|
|
const size_t samplesOut = (size_t)converted * (size_t)outCh;
|
|
const int16_t* asS16 = (const int16_t*)outBytes.data();
|
|
const size_t oldSize = outPcm.size();
|
|
outPcm.resize(oldSize + samplesOut);
|
|
std::memcpy(outPcm.data() + oldSize, asS16, samplesOut * sizeof(int16_t));
|
|
}
|
|
|
|
av_frame_unref(frame);
|
|
}
|
|
}
|
|
|
|
// Flush decoder
|
|
avcodec_send_packet(dec, nullptr);
|
|
while (avcodec_receive_frame(dec, frame) >= 0) {
|
|
const int64_t delay = swr_get_delay(swr, dec->sample_rate);
|
|
const int dstNbSamples = (int)av_rescale_rnd(delay + frame->nb_samples, outRateConst, dec->sample_rate, AV_ROUND_UP);
|
|
std::vector<uint8_t> outBytes;
|
|
outBytes.resize((size_t)dstNbSamples * (size_t)outCh * sizeof(int16_t));
|
|
uint8_t* outData[1] = { outBytes.data() };
|
|
const uint8_t** inData = (const uint8_t**)frame->data;
|
|
const int converted = swr_convert(swr, outData, dstNbSamples, inData, frame->nb_samples);
|
|
if (converted > 0) {
|
|
const size_t samplesOut = (size_t)converted * (size_t)outCh;
|
|
const int16_t* asS16 = (const int16_t*)outBytes.data();
|
|
const size_t oldSize = outPcm.size();
|
|
outPcm.resize(oldSize + samplesOut);
|
|
std::memcpy(outPcm.data() + oldSize, asS16, samplesOut * sizeof(int16_t));
|
|
}
|
|
av_frame_unref(frame);
|
|
}
|
|
|
|
av_frame_free(&frame);
|
|
av_packet_free(&pkt);
|
|
swr_free(&swr);
|
|
av_channel_layout_uninit(&inLayout);
|
|
av_channel_layout_uninit(&outLayout);
|
|
avcodec_free_context(&dec);
|
|
avformat_close_input(&fmt);
|
|
|
|
outRate = outRateConst;
|
|
outChannels = outCh;
|
|
|
|
return !outPcm.empty();
|
|
}
|