suyu/src/audio_core/audio_renderer.cpp
David Marcec 42250427c5 audren: Implement RendererInfo
Fixes ZLA softlock
2020-06-13 14:04:28 +10:00

472 lines
17 KiB
C++

// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/algorithm/interpolate.h"
#include "audio_core/audio_out.h"
#include "audio_core/audio_renderer.h"
#include "audio_core/codec.h"
#include "audio_core/common.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
namespace AudioCore {
constexpr u32 STREAM_SAMPLE_RATE{48000};
constexpr u32 STREAM_NUM_CHANNELS{2};
using VoiceChannelHolder = std::array<VoiceResourceInformation*, 6>;
class AudioRenderer::VoiceState {
public:
bool IsPlaying() const {
return is_in_use && info.play_state == PlayState::Started;
}
const VoiceOutStatus& GetOutStatus() const {
return out_status;
}
const VoiceInfo& GetInfo() const {
return info;
}
VoiceInfo& GetInfo() {
return info;
}
void SetWaveIndex(std::size_t index);
std::vector<s16> DequeueSamples(std::size_t sample_count, Core::Memory::Memory& memory,
const VoiceChannelHolder& voice_resources);
void UpdateState();
void RefreshBuffer(Core::Memory::Memory& memory, const VoiceChannelHolder& voice_resources);
private:
bool is_in_use{};
bool is_refresh_pending{};
std::size_t wave_index{};
std::size_t offset{};
Codec::ADPCMState adpcm_state{};
InterpolationState interp_state{};
std::vector<s16> samples;
VoiceOutStatus out_status{};
VoiceInfo info{};
};
class AudioRenderer::EffectState {
public:
const EffectOutStatus& GetOutStatus() const {
return out_status;
}
const EffectInStatus& GetInfo() const {
return info;
}
EffectInStatus& GetInfo() {
return info;
}
void UpdateState(Core::Memory::Memory& memory);
private:
EffectOutStatus out_status{};
EffectInStatus info{};
};
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
AudioRendererParameter params,
std::shared_ptr<Kernel::WritableEvent> buffer_event,
std::size_t instance_number)
: worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
voice_resources(params.voice_count), effects(params.effect_count), memory{memory_} {
behavior_info.SetUserRevision(params.revision);
audio_out = std::make_unique<AudioCore::AudioOut>();
stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
fmt::format("AudioRenderer-Instance{}", instance_number),
[=]() { buffer_event->Signal(); });
audio_out->StartStream(stream);
QueueMixedBuffer(0);
QueueMixedBuffer(1);
QueueMixedBuffer(2);
}
AudioRenderer::~AudioRenderer() = default;
u32 AudioRenderer::GetSampleRate() const {
return worker_params.sample_rate;
}
u32 AudioRenderer::GetSampleCount() const {
return worker_params.sample_count;
}
u32 AudioRenderer::GetMixBufferCount() const {
return worker_params.mix_buffer_count;
}
Stream::State AudioRenderer::GetStreamState() const {
return stream->GetState();
}
ResultVal<std::vector<u8>> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
// Copy UpdateDataHeader struct
UpdateDataHeader config{};
std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader));
u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4);
if (!behavior_info.UpdateInput(input_params, sizeof(UpdateDataHeader))) {
LOG_ERROR(Audio, "Failed to update behavior info input parameters");
return Audren::ERR_INVALID_PARAMETERS;
}
// Copy MemoryPoolInfo structs
std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count);
std::memcpy(mem_pool_info.data(),
input_params.data() + sizeof(UpdateDataHeader) + config.behavior_size,
memory_pool_count * sizeof(MemoryPoolInfo));
// Copy voice resources
const std::size_t voice_resource_offset{sizeof(UpdateDataHeader) + config.behavior_size +
config.memory_pools_size};
std::memcpy(voice_resources.data(), input_params.data() + voice_resource_offset,
sizeof(VoiceResourceInformation) * voice_resources.size());
// Copy VoiceInfo structs
std::size_t voice_offset{sizeof(UpdateDataHeader) + config.behavior_size +
config.memory_pools_size + config.voice_resource_size};
for (auto& voice : voices) {
std::memcpy(&voice.GetInfo(), input_params.data() + voice_offset, sizeof(VoiceInfo));
voice_offset += sizeof(VoiceInfo);
}
std::size_t effect_offset{sizeof(UpdateDataHeader) + config.behavior_size +
config.memory_pools_size + config.voice_resource_size +
config.voices_size};
for (auto& effect : effects) {
std::memcpy(&effect.GetInfo(), input_params.data() + effect_offset, sizeof(EffectInStatus));
effect_offset += sizeof(EffectInStatus);
}
// Update memory pool state
std::vector<MemoryPoolEntry> memory_pool(memory_pool_count);
for (std::size_t index = 0; index < memory_pool.size(); ++index) {
if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestAttach) {
memory_pool[index].state = MemoryPoolStates::Attached;
} else if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestDetach) {
memory_pool[index].state = MemoryPoolStates::Detached;
}
}
// Update voices
for (auto& voice : voices) {
voice.UpdateState();
if (!voice.GetInfo().is_in_use) {
continue;
}
if (voice.GetInfo().is_new) {
voice.SetWaveIndex(voice.GetInfo().wave_buffer_head);
}
}
for (auto& effect : effects) {
effect.UpdateState(memory);
}
// Release previous buffers and queue next ones for playback
ReleaseAndQueueBuffers();
// Copy output header
UpdateDataHeader response_data{worker_params};
if (behavior_info.IsElapsedFrameCountSupported()) {
response_data.render_info = sizeof(RendererInfo);
response_data.total_size += sizeof(RendererInfo);
}
std::vector<u8> output_params(response_data.total_size);
std::memcpy(output_params.data(), &response_data, sizeof(UpdateDataHeader));
// Copy output memory pool entries
std::memcpy(output_params.data() + sizeof(UpdateDataHeader), memory_pool.data(),
response_data.memory_pools_size);
// Copy output voice status
std::size_t voice_out_status_offset{sizeof(UpdateDataHeader) + response_data.memory_pools_size};
for (const auto& voice : voices) {
std::memcpy(output_params.data() + voice_out_status_offset, &voice.GetOutStatus(),
sizeof(VoiceOutStatus));
voice_out_status_offset += sizeof(VoiceOutStatus);
}
std::size_t effect_out_status_offset{
sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
response_data.voice_resource_size};
for (const auto& effect : effects) {
std::memcpy(output_params.data() + effect_out_status_offset, &effect.GetOutStatus(),
sizeof(EffectOutStatus));
effect_out_status_offset += sizeof(EffectOutStatus);
}
// Update behavior info output
const std::size_t behavior_out_status_offset{
sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
response_data.effects_size + response_data.sinks_size +
response_data.performance_manager_size};
if (!behavior_info.UpdateOutput(output_params, behavior_out_status_offset)) {
LOG_ERROR(Audio, "Failed to update behavior info output parameters");
return Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsElapsedFrameCountSupported()) {
const std::size_t renderer_info_offset{
sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
response_data.effects_size + response_data.sinks_size +
response_data.performance_manager_size + response_data.behavior_size};
RendererInfo renderer_info{};
renderer_info.elasped_frame_count = elapsed_frame_count;
std::memcpy(output_params.data() + renderer_info_offset, &renderer_info,
sizeof(RendererInfo));
}
return MakeResult(output_params);
}
void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
wave_index = index & 3;
is_refresh_pending = true;
}
std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(
std::size_t sample_count, Core::Memory::Memory& memory,
const VoiceChannelHolder& voice_resources) {
if (!IsPlaying()) {
return {};
}
if (is_refresh_pending) {
RefreshBuffer(memory, voice_resources);
}
const std::size_t max_size{samples.size() - offset};
const std::size_t dequeue_offset{offset};
std::size_t size{sample_count * STREAM_NUM_CHANNELS};
if (size > max_size) {
size = max_size;
}
out_status.played_sample_count += size / STREAM_NUM_CHANNELS;
offset += size;
const auto& wave_buffer{info.wave_buffer[wave_index]};
if (offset == samples.size()) {
offset = 0;
if (!wave_buffer.is_looping && wave_buffer.buffer_sz) {
SetWaveIndex(wave_index + 1);
}
if (wave_buffer.buffer_sz) {
out_status.wave_buffer_consumed++;
}
if (wave_buffer.end_of_stream || wave_buffer.buffer_sz == 0) {
info.play_state = PlayState::Paused;
}
}
return {samples.begin() + dequeue_offset, samples.begin() + dequeue_offset + size};
}
void AudioRenderer::VoiceState::UpdateState() {
if (is_in_use && !info.is_in_use) {
// No longer in use, reset state
is_refresh_pending = true;
wave_index = 0;
offset = 0;
out_status = {};
}
is_in_use = info.is_in_use;
}
void AudioRenderer::VoiceState::RefreshBuffer(Core::Memory::Memory& memory,
const VoiceChannelHolder& voice_resources) {
const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr;
const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz;
std::vector<s16> new_samples(wave_buffer_size / sizeof(s16));
memory.ReadBlock(wave_buffer_address, new_samples.data(), wave_buffer_size);
switch (static_cast<Codec::PcmFormat>(info.sample_format)) {
case Codec::PcmFormat::Int16: {
// PCM16 is played as-is
break;
}
case Codec::PcmFormat::Adpcm: {
// Decode ADPCM to PCM16
Codec::ADPCM_Coeff coeffs;
memory.ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff));
new_samples = Codec::DecodeADPCM(reinterpret_cast<u8*>(new_samples.data()),
new_samples.size() * sizeof(s16), coeffs, adpcm_state);
break;
}
default:
UNIMPLEMENTED_MSG("Unimplemented sample_format={}", info.sample_format);
break;
}
switch (info.channel_count) {
case 1: {
// 1 channel is upsampled to 2 channel
samples.resize(new_samples.size() * 2);
for (std::size_t index = 0; index < new_samples.size(); ++index) {
auto sample = static_cast<float>(new_samples[index]);
if (voice_resources[0]->in_use) {
sample *= voice_resources[0]->mix_volumes[0];
}
samples[index * 2] = static_cast<s16>(sample * info.volume);
samples[index * 2 + 1] = static_cast<s16>(sample * info.volume);
}
break;
}
case 2: {
// 2 channel is played as is
samples = std::move(new_samples);
const std::size_t sample_count = (samples.size() / 2);
for (std::size_t index = 0; index < sample_count; ++index) {
const std::size_t index_l = index * 2;
const std::size_t index_r = index * 2 + 1;
auto sample_l = static_cast<float>(samples[index_l]);
auto sample_r = static_cast<float>(samples[index_r]);
if (voice_resources[0]->in_use) {
sample_l *= voice_resources[0]->mix_volumes[0];
}
if (voice_resources[1]->in_use) {
sample_r *= voice_resources[1]->mix_volumes[1];
}
samples[index_l] = static_cast<s16>(sample_l * info.volume);
samples[index_r] = static_cast<s16>(sample_r * info.volume);
}
break;
}
case 6: {
samples.resize((new_samples.size() / 6) * 2);
const std::size_t sample_count = samples.size() / 2;
for (std::size_t index = 0; index < sample_count; ++index) {
auto FL = static_cast<float>(new_samples[index * 6]);
auto FR = static_cast<float>(new_samples[index * 6 + 1]);
auto FC = static_cast<float>(new_samples[index * 6 + 2]);
auto BL = static_cast<float>(new_samples[index * 6 + 4]);
auto BR = static_cast<float>(new_samples[index * 6 + 5]);
if (voice_resources[0]->in_use) {
FL *= voice_resources[0]->mix_volumes[0];
}
if (voice_resources[1]->in_use) {
FR *= voice_resources[1]->mix_volumes[1];
}
if (voice_resources[2]->in_use) {
FC *= voice_resources[2]->mix_volumes[2];
}
if (voice_resources[4]->in_use) {
BL *= voice_resources[4]->mix_volumes[4];
}
if (voice_resources[5]->in_use) {
BR *= voice_resources[5]->mix_volumes[5];
}
samples[index * 2] =
static_cast<s16>((0.3694f * FL + 0.2612f * FC + 0.3694f * BL) * info.volume);
samples[index * 2 + 1] =
static_cast<s16>((0.3694f * FR + 0.2612f * FC + 0.3694f * BR) * info.volume);
}
break;
}
default:
UNIMPLEMENTED_MSG("Unimplemented channel_count={}", info.channel_count);
break;
}
// Only interpolate when necessary, expensive.
if (GetInfo().sample_rate != STREAM_SAMPLE_RATE) {
samples = Interpolate(interp_state, std::move(samples), GetInfo().sample_rate,
STREAM_SAMPLE_RATE);
}
is_refresh_pending = false;
}
void AudioRenderer::EffectState::UpdateState(Core::Memory::Memory& memory) {
if (info.is_new) {
out_status.state = EffectStatus::New;
} else {
if (info.type == Effect::Aux) {
ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_info) == 0,
"Aux buffers tried to update");
ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_info) == 0,
"Aux buffers tried to update");
ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_base) == 0,
"Aux buffers tried to update");
ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_base) == 0,
"Aux buffers tried to update");
}
}
}
static constexpr s16 ClampToS16(s32 value) {
return static_cast<s16>(std::clamp(value, -32768, 32767));
}
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
constexpr std::size_t BUFFER_SIZE{512};
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels());
for (auto& voice : voices) {
if (!voice.IsPlaying()) {
continue;
}
VoiceChannelHolder resources{};
for (u32 channel = 0; channel < voice.GetInfo().channel_count; channel++) {
const auto channel_resource_id = voice.GetInfo().voice_channel_resource_ids[channel];
resources[channel] = &voice_resources[channel_resource_id];
}
std::size_t offset{};
s64 samples_remaining{BUFFER_SIZE};
while (samples_remaining > 0) {
const std::vector<s16> samples{
voice.DequeueSamples(samples_remaining, memory, resources)};
if (samples.empty()) {
break;
}
samples_remaining -= samples.size() / stream->GetNumChannels();
for (const auto& sample : samples) {
const s32 buffer_sample{buffer[offset]};
buffer[offset++] =
ClampToS16(buffer_sample + static_cast<s32>(sample * voice.GetInfo().volume));
}
}
}
audio_out->QueueBuffer(stream, tag, std::move(buffer));
elapsed_frame_count++;
}
void AudioRenderer::ReleaseAndQueueBuffers() {
const auto released_buffers{audio_out->GetTagsAndReleaseBuffers(stream, 2)};
for (const auto& tag : released_buffers) {
QueueMixedBuffer(tag);
}
}
} // namespace AudioCore