From 4a811008d1763dd295d78fdfc8fa9bdf6b1f1d98 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Mon, 25 Jan 2016 22:14:01 +0000 Subject: [PATCH] DSP/Audio: Reorganization, cleanup and use ExtractFromMemory --- src/core/audio/audio.cpp | 206 +++++++++++---------- src/core/audio/audio.h | 35 ++-- src/core/hle/service/dsp_dsp.cpp | 309 +++++++++++++++++-------------- 3 files changed, 297 insertions(+), 253 deletions(-) diff --git a/src/core/audio/audio.cpp b/src/core/audio/audio.cpp index 53e006ebf..84b7507b0 100644 --- a/src/core/audio/audio.cpp +++ b/src/core/audio/audio.cpp @@ -1,29 +1,39 @@ +// Copyright 2016 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. -#include "AL/al.h" -#include "AL/alc.h" -#include "AL/alext.h" +#include +#include +#include #include "common/logging/log.h" +#include "common/math_util.h" #include "core/audio/audio.h" #include #include +#include #include namespace Audio { +using ALCDevicePointer = std::unique_ptr; +using ALCContextPointer = std::unique_ptr; + +static ALCDevicePointer device = ALCDevicePointer(nullptr, nullptr); +static ALCContextPointer context = ALCContextPointer(nullptr, nullptr); + static const int BASE_SAMPLE_RATE = 22050; struct Buffer { u16 id; ///< buffer_id that userland gives us ALuint buffer; bool is_looping; - bool operator < (const Buffer& other) const { // We want things with lower id to appear first, unless we have wraparound. // priority_queue puts a before b when b < a. - // Should perhaps be a instead. + // Should perhaps be a functor instead. if ((other.id - id) > 1000) return true; if ((id - other.id) > 1000) return false; return id > other.id; @@ -39,17 +49,17 @@ struct AdpcmState { // GC-ADPCM with scale factor and variable coefficients. // Frames are 8 bytes long containing 14 samples each. // Samples are 4 bits (one nybble) long. -std::vector DecodeADPCM(const u8 * const data, const size_t sample_count, const std::array& adpcm_coeff, AdpcmState& state) { - const size_t FRAME_LEN = 8; - const size_t SAMPLES_PER_FRAME = 14; - const static int SIGNED_NYBBLES[16] = { 0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1 }; +static std::vector DecodeADPCM(const u8 * const data, const size_t sample_count, const std::array& adpcm_coeff, AdpcmState& state) { + constexpr size_t FRAME_LEN = 8; + constexpr size_t SAMPLES_PER_FRAME = 14; + constexpr int SIGNED_NYBBLES[16] = {0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1}; std::vector ret(sample_count); - int yn1 = 0, yn2 = 0;// state.yn1, yn2 = state.yn2; + int yn1 = state.yn1, yn2 = state.yn2; const int NUM_FRAMES = (sample_count + (SAMPLES_PER_FRAME-1)) / SAMPLES_PER_FRAME; // Round up. - for (int frameno = 0; frameno < NUM_FRAMES; frameno++) { - int frame_header = data[frameno * FRAME_LEN]; + for (int framei = 0; framei < NUM_FRAMES; framei++) { + int frame_header = data[framei * FRAME_LEN]; int scale = 1 << (frame_header & 0xF); int idx = (frame_header >> 4) & 0x7; @@ -57,26 +67,26 @@ std::vector DecodeADPCM(const u8 * const data, const size_t sample_count, c int coef1 = adpcm_coeff[idx * 2 + 0]; int coef2 = adpcm_coeff[idx * 2 + 1]; - auto process_nybble = [&](int nybble) -> s16 { + // Decodes an audio sample. One nybble produces one s16 sample. + auto decode_sample = [&](int nybble) -> s16 { int xn = nybble * scale; // We first transform everything into 11 bit fixed point, perform the second order digital filter, then transform back. // 0x400 == 0.5 in 11 bit fixed point. // Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2] int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11; // Clamp to output range. - if (val >= 32767) val = 32767; - if (val <= -32768) val = -32768; + val = MathUtil::Clamp(val, -32768, 32767); // Advance output feedback. yn2 = yn1; yn1 = val; return (s16)val; }; - int outputi = frameno * SAMPLES_PER_FRAME; - int datai = frameno * FRAME_LEN + 1; + int outputi = framei * SAMPLES_PER_FRAME; + int datai = framei * FRAME_LEN + 1; for (int i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) { - ret[outputi++] = process_nybble(SIGNED_NYBBLES[data[datai] & 0xF]); - ret[outputi++] = process_nybble(SIGNED_NYBBLES[data[datai] >> 4]); + ret[outputi++] = decode_sample(SIGNED_NYBBLES[data[datai] & 0xF]); + ret[outputi++] = decode_sample(SIGNED_NYBBLES[data[datai] >> 4]); datai++; } } @@ -88,55 +98,67 @@ std::vector DecodeADPCM(const u8 * const data, const size_t sample_count, c } struct OutputChannel { + ~OutputChannel() { + alDeleteSources(1, &source); + while (!queue.empty()) { + alDeleteBuffers(1, &queue.top().buffer); + queue.pop(); + } + while (!playing.empty()) { + alDeleteBuffers(1, &playing.front().buffer); + playing.pop(); + } + } + ALuint source; ///< Each channel has it's own output, we lean on OpenAL to do our mixing. // Configuration int mono_or_stereo; ///< Value from userland. 1 == mono, 2 == stereo, other == ??? Format format; bool enabled; ///< Userland wants us to remind them we have enabled this channel. + bool was_fed_data; ///< Userland wants to know if we have been fed data. // Buffer management std::priority_queue queue; ///< Things we have gotten from userland we haven't queued onto `source` yet. std::queue playing; ///< Things we have queued onto `source`. - u16 last_bufid; ///< Userland wants us to report back what was the thing we last played. + u16 last_buffer_id; ///< Userland wants us to report back what was the thing we last played. // For ADPCM decoding use. std::array adpcm_coeffs; AdpcmState adpcm_state; }; -OutputChannel chans[24]; +static std::array chans; int InitAL() { - ALCdevice *device = alcOpenDevice(nullptr); + device = ALCDevicePointer(alcOpenDevice(nullptr), &alcCloseDevice); if (!device) { LOG_CRITICAL(Audio, "Could not open a device!"); return 1; } - ALCcontext *ctx = alcCreateContext(device, nullptr); - if (ctx == nullptr || alcMakeContextCurrent(ctx) == ALC_FALSE) { - if (ctx != nullptr) { - alcDestroyContext(ctx); + context = ALCContextPointer(alcCreateContext(device.get(), nullptr), &alcDestroyContext); + if (context == nullptr || alcMakeContextCurrent(context.get()) == ALC_FALSE) { + if (context != nullptr) { + context = nullptr; } - alcCloseDevice(device); + device = nullptr; LOG_CRITICAL(Audio, "Could not set a context!"); return 1; } - LOG_INFO(Audio, "Audio output is on \"%s\"", alcGetString(device, ALC_DEVICE_SPECIFIER)); + LOG_INFO(Audio, "Audio output is on \"%s\"", alcGetString(device.get(), ALC_DEVICE_SPECIFIER)); return 0; } -ALCint dev_rate; ///< Native sample rate of our output device -std::array silence; ///< Some silence, used if an audio error occurs +static ALCint dev_rate; ///< Native sample rate of our output device +static std::array silence = {}; ///< Some silence, used if an audio error occurs void Init() { InitAL(); - ALCdevice *device = alcGetContextsDevice(alcGetCurrentContext()); - alcGetIntegerv(device, ALC_FREQUENCY, 1, &dev_rate); - if (alcGetError(device) != ALC_NO_ERROR) { + alcGetIntegerv(device.get(), ALC_FREQUENCY, 1, &dev_rate); + if (alcGetError(device.get()) != ALC_NO_ERROR) { LOG_CRITICAL(Audio, "Failed to get device sample rate"); } LOG_INFO(Audio, "Device Frequency: %i", dev_rate); @@ -147,58 +169,38 @@ void Init() { LOG_CRITICAL(Audio, "Channel %i: Failed to setup sound source", i); } } - - silence.fill(0); } void Shutdown() { - ALCcontext *ctx = alcGetCurrentContext(); - if (ctx == nullptr) { - return; - } - - ALCdevice* dev = alcGetContextsDevice(ctx); - - for (int i = 0; i < 24; i++) { - alDeleteSources(1, &chans[i].source); - while (!chans[i].queue.empty()) { - alDeleteBuffers(1, &chans[i].queue.top().buffer); - chans[i].queue.pop(); - } - while (!chans[i].playing.empty()) { - alDeleteBuffers(1, &chans[i].playing.front().buffer); - chans[i].playing.pop(); - } - } - alcMakeContextCurrent(nullptr); - alcDestroyContext(ctx); - alcCloseDevice(dev); } -void UpdateFormat(int chanid, int mono_or_stereo, Format format) { - chans[chanid].mono_or_stereo = mono_or_stereo; - chans[chanid].format = format; +void UpdateFormat(int channel_id, int mono_or_stereo, Format format) { + chans[channel_id].mono_or_stereo = mono_or_stereo; + chans[channel_id].format = format; } -void UpdateAdpcm(int chanid, s16 coeffs[16]) { - LOG_DEBUG(Audio, "Channel %i: ADPCM Coeffs updated", chanid); - std::copy(coeffs, coeffs+16, std::begin(chans[chanid].adpcm_coeffs)); +void UpdateAdpcm(int channel_id, s16 coeffs[16]) { + LOG_DEBUG(Audio, "Channel %i: ADPCM Coeffs updated", channel_id); + std::copy(coeffs, coeffs+16, std::begin(chans[channel_id].adpcm_coeffs)); } -void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool is_looping) { - LOG_DEBUG(Audio, "Channel %i: Buffer %i: Enqueued (size %i)", chanid, buffer_id, sample_count); +void EnqueueBuffer(int channel_id, u16 buffer_id, void* data, int sample_count, bool is_looping) { + LOG_DEBUG(Audio, "Channel %i: Buffer %i: Enqueued (size %i)", channel_id, buffer_id, sample_count); if (is_looping) { - LOG_WARNING(Audio, "Channel %i: Buffer %i: Looped buffers are unimplemented", chanid, buffer_id); + LOG_WARNING(Audio, "Channel %i: Buffer %i: Looped buffers are unimplemented", channel_id, buffer_id); } + auto& c = chans[channel_id]; + c.was_fed_data = true; + ALuint b; alGenBuffers(1, &b); - switch(chans[chanid].format) { - case FORMAT_PCM16: - switch (chans[chanid].mono_or_stereo) { + switch(c.format) { + case Format::PCM16: + switch (c.mono_or_stereo) { case 2: alBufferData(b, AL_FORMAT_STEREO16, data, sample_count * 4, BASE_SAMPLE_RATE); break; @@ -211,8 +213,8 @@ void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool if (alGetError() != AL_NO_ERROR) goto do_silence; break; - case FORMAT_PCM8: - switch (chans[chanid].mono_or_stereo) { + case Format::PCM8: + switch (c.mono_or_stereo) { case 2: alBufferData(b, AL_FORMAT_STEREO8, data, sample_count * 2, BASE_SAMPLE_RATE); break; @@ -225,12 +227,12 @@ void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool if (alGetError() != AL_NO_ERROR) goto do_silence; break; - case FORMAT_ADPCM: { - if (chans[chanid].mono_or_stereo != 1) { - LOG_ERROR(Audio, "Channel %i: Buffer %i: Being fed non-mono ADPCM (size: %i samples)", chanid, buffer_id, sample_count); + case Format::ADPCM: { + if (c.mono_or_stereo != 1) { + LOG_ERROR(Audio, "Channel %i: Buffer %i: Being fed non-mono ADPCM (size: %i samples)", channel_id, buffer_id, sample_count); } - std::vector decoded = DecodeADPCM((u8*)data, sample_count, chans[chanid].adpcm_coeffs, chans[chanid].adpcm_state); + std::vector decoded = DecodeADPCM((u8*)data, sample_count, c.adpcm_coeffs, c.adpcm_state); alBufferData(b, AL_FORMAT_STEREO16, decoded.data(), decoded.size() * 2, BASE_SAMPLE_RATE); if (alGetError() != AL_NO_ERROR) goto do_silence; @@ -238,43 +240,44 @@ void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool break; } default: - LOG_ERROR(Audio, "Channel %i: Buffer %i: Unrecognised audio format (size: %i samples)", chanid, buffer_id, sample_count); + LOG_ERROR(Audio, "Channel %i: Buffer %i: Unrecognised audio format (size: %i samples)", channel_id, buffer_id, sample_count); do_silence: if (alGetError() != AL_NO_ERROR) { - LOG_CRITICAL(Audio, "Channel %i: Buffer %i: OpenAL says \"%s\"", chanid, buffer_id, alGetString(alGetError())); + LOG_CRITICAL(Audio, "Channel %i: Buffer %i: OpenAL says \"%s\"", channel_id, buffer_id, alGetString(alGetError())); } alBufferData(b, AL_FORMAT_MONO8, silence.data(), silence.size(), BASE_SAMPLE_RATE); if (alGetError() != AL_NO_ERROR) { - LOG_CRITICAL(Audio, "Channel %i: Failed to init silence buffer!!! (%s)", chanid, alGetString(alGetError())); + LOG_CRITICAL(Audio, "Channel %i: Failed to init silence buffer!!! (%s)", channel_id, alGetString(alGetError())); } break; } - chans[chanid].queue.emplace( Buffer { buffer_id, b, is_looping }); + c.queue.emplace( Buffer { buffer_id, b, is_looping }); - if (chans[chanid].queue.size() > 10) { - LOG_ERROR(Audio, "We have far far too many buffers enqueued on channel %i (%i of them)", chanid, chans[chanid].queue.size()); + if (c.queue.size() > 10) { + LOG_ERROR(Audio, "We have far far too many buffers enqueued on channel %i (%i of them)", channel_id, c.queue.size()); } } -void Play(int chanid, bool play) { +void Play(int channel_id, bool play) { if (play) { - LOG_INFO(Audio, "Channel %i: Enabled", chanid); + LOG_INFO(Audio, "Channel %i: Enabled", channel_id); } else { - LOG_INFO(Audio, "Channel %i: Disabled", chanid); + LOG_INFO(Audio, "Channel %i: Disabled", channel_id); + chans[channel_id].was_fed_data = false; } - chans[chanid].enabled = play; + chans[channel_id].enabled = play; } -void Tick(int chanid) { - auto& c = chans[chanid]; +void Tick(int channel_id) { + auto& c = chans[channel_id]; if (!c.queue.empty()) { while (!c.queue.empty()) { alSourceQueueBuffers(c.source, 1, &c.queue.top().buffer); if (alGetError() != AL_NO_ERROR) { alDeleteBuffers(1, &c.queue.top().buffer); - LOG_CRITICAL(Audio, "Channel %i: Buffer %i: Failed to enqueue : %s", chanid, c.queue.top().id, alGetString(alGetError())); + LOG_CRITICAL(Audio, "Channel %i: Buffer %i: Failed to enqueue : %s", channel_id, c.queue.top().id, alGetString(alGetError())); c.queue.pop(); continue; } @@ -290,8 +293,8 @@ void Tick(int chanid) { } } - if (chans[chanid].playing.size() > 10) { - LOG_ERROR(Audio, "Channel %i: We have far far too many buffers enqueued (%i of them)", chanid, chans[chanid].playing.size()); + if (chans[channel_id].playing.size() > 10) { + LOG_ERROR(Audio, "Channel %i: We have far far too many buffers enqueued (%i of them)", channel_id, chans[channel_id].playing.size()); } ALint processed; @@ -303,37 +306,38 @@ void Tick(int chanid) { if (!c.playing.empty()) { if (c.playing.front().buffer != buf) { - LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (buf???)", chanid); + LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (buf???)", channel_id); } else { - LOG_DEBUG(Audio, "Channel %i: Buffer %i: Finished playing", chanid, c.playing.front().id); + LOG_DEBUG(Audio, "Channel %i: Buffer %i: Finished playing", channel_id, c.playing.front().id); } - c.last_bufid = c.playing.front().id; + c.last_buffer_id = c.playing.front().id; c.playing.pop(); } else { - LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (empty)", chanid); + LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (empty)", channel_id); } alDeleteBuffers(1, &buf); } if (!c.playing.empty()) { - c.last_bufid = c.playing.front().id; + c.last_buffer_id = c.playing.front().id; } } -std::tuple GetStatus(int chanid) { - auto& c = chans[chanid]; +ChannelStatus GetStatus(int channel_id) { + auto& c = chans[channel_id]; - bool isplaying = c.enabled; - u16 bufid = c.last_bufid; - u32 pos; + ChannelStatus ret; + ret.is_enabled = c.enabled; + ret.most_recent_buffer_id = c.last_buffer_id; + ret.was_fed_data = c.was_fed_data; ALint state, samples; alGetSourcei(c.source, AL_SOURCE_STATE, &state); alGetSourcei(c.source, AL_SAMPLE_OFFSET, &samples); - pos = samples; + ret.sample_position = samples; - return std::make_tuple(isplaying, bufid, pos); + return ret; } }; \ No newline at end of file diff --git a/src/core/audio/audio.h b/src/core/audio/audio.h index 4b5fcaeef..4d6bf9ae4 100644 --- a/src/core/audio/audio.h +++ b/src/core/audio/audio.h @@ -1,8 +1,8 @@ -#pragma once +// Copyright 2016 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. -#include "AL/al.h" -#include "AL/alc.h" -#include "AL/alext.h" +#pragma once #include "common/common_types.h" @@ -13,25 +13,28 @@ namespace Audio { void Init(); void Shutdown(); -enum Format : u16 { - FORMAT_PCM8 = 0, - FORMAT_PCM16 = 1, - FORMAT_ADPCM = 2 +enum class Format : u16 { + PCM8 = 0, + PCM16 = 1, + ADPCM = 2 }; void UpdateFormat(int chanid, int mono_or_stereo, Format format); void UpdateAdpcm(int chanid, s16 coeffs[16]); -void Play(int chanid, bool play); +void Play(int channel_id, bool play); -void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool is_looping); +void EnqueueBuffer(int channel_id, u16 buffer_id, void* data, int sample_count, bool is_looping); -void Tick(int chanid); +void Tick(int channel_id); -// Return values: -// <1>: is_enabled -// <2>: prev buffer_id -// <3>: current sample position in current buffer -std::tuple GetStatus(int chanid); +struct ChannelStatus { + bool is_enabled; + bool was_fed_data; ///< Have we been fed data since being enabled? + u16 most_recent_buffer_id; + u32 sample_position; ///< Play position in current buffer +}; + +ChannelStatus GetStatus(int channel_id); }; \ No newline at end of file diff --git a/src/core/hle/service/dsp_dsp.cpp b/src/core/hle/service/dsp_dsp.cpp index 00511d858..5072a1691 100644 --- a/src/core/hle/service/dsp_dsp.cpp +++ b/src/core/hle/service/dsp_dsp.cpp @@ -66,20 +66,22 @@ static constexpr VAddr DspAddrToVAddr(VAddr base, DspRegion dsp_addr) { /** * dsp_u32: - * Care must be taken when reading/writing 32-bit values as the words are not in the expected order. + * Care must be taken when reading/writing 32-bit values in the DSP shared memory region + * as the byte order for 32-bit values is middle endian. + * This is presumably because the DSP is big endian with a 16 bit wordsize. */ struct dsp_u32 { - operator u32() { + operator u32() const { return Convert(storage); } - void operator=(u32 newvalue) { - storage = Convert(newvalue); + void operator=(u32 new_value) { + storage = Convert(new_value); } private: static constexpr u32 Convert(u32 value) { return ((value & 0x0000FFFF) << 16) | ((value & 0xFFFF0000) >> 16); } - u32 storage; + u32 storage = 0; }; #define INSERT_PADDING_DSPWORDS(num_words) u16 CONCAT2(pad, __LINE__)[(num_words)] @@ -160,144 +162,179 @@ struct AdpcmCoefficients { }; ASSERT_STRUCT(AdpcmCoefficients, 32); -template -static inline bool TestAndUnsetBit(T& value, size_t bitno) { - T mask = 1 << bitno; - bool ret = (value & mask) == mask; - value &= ~mask; +// Temporary, switch ChannelContext::dirty to using BitFlags later. +template +static bool TestAndUnsetBit(T& value) { + auto& field = *reinterpret_cast*>(&value); + bool ret = field; + field = 0; return ret; } -static void AudioTick(u64, int cycles_late) { - VAddr current_base; +static VAddr GetCurrentBase() { + // Frame IDs. + int id0 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_0, DSPADDR0)); + int id1 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_1, DSPADDR0)); - { - // Frame IDs. - int id0 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_0, DSPADDR0)); - int id1 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_1, DSPADDR0)); + // The frame id increments once per audio frame, with wraparound at 65,535. + // I am uncertain whether the real DSP actually does something like this, + // or merely checks for a certan id for wraparound. TODO: Verify. + if (id1 - id0 > 10000 && id0 < 10) { + return BASE_ADDR_0; + } else if (id0 - id1 > 10000 && id1 < 10) { + return BASE_ADDR_1; + } else if (id1 > id0) { + return BASE_ADDR_1; + } else { + return BASE_ADDR_0; + } +} - // The frame id increments once per audio frame, with wraparound at 65,535. - // I am uncertain whether the real DSP actually does something like this, - // or merely checks for a certan id for wraparound. TODO: Verify. - if (id1 - id0 > 10000 && id0 < 10) { - current_base = BASE_ADDR_0; - } else if (id0 - id1 > 10000 && id1 < 10) { - current_base = BASE_ADDR_1; - } else if (id1 > id0) { - current_base = BASE_ADDR_1; - } else { - current_base = BASE_ADDR_0; - } +// Last recorded sync count from ChannelContext. +static std::array syncs; + +static ChannelContext GetChannelContext(VAddr base, int channel_id) { + ChannelContext ctx; + + if (!Memory::ExtractFromMemory(DspAddrToVAddr(base, DSPADDR1) + channel_id * sizeof(ChannelContext), ctx)) { + LOG_CRITICAL(Service_DSP, "ExtractFromMemory for DSPADDR1 failed"); } - auto channel_contexes = (ChannelContext*) Memory::GetPointer(DspAddrToVAddr(current_base, DSPADDR1)); - auto channel_contex0 = (ChannelContext*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_0, DSPADDR1)); - auto channel_contex1 = (ChannelContext*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_1, DSPADDR1)); - auto channel_status0 = (ChannelStatus*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_0, DSPADDR2)); - auto channel_status1 = (ChannelStatus*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_1, DSPADDR2)); - auto channel_adpcm_coeffs = (AdpcmCoefficients*) Memory::GetPointer(DspAddrToVAddr(current_base, DSPADDR3)); + return ctx; +} - for (int chanid=0; chanid(ctx.dirty)) { + // First time init + LOG_DEBUG(Service_DSP, "Channel %i: First Time Init", channel_id); + } + + if (TestAndUnsetBit<2>(ctx.dirty)) { + // Update ADPCM coefficients + AdpcmCoefficients coeff; + if (!Memory::ExtractFromMemory(DspAddrToVAddr(current_base, DSPADDR3) + channel_id * sizeof(coeff), coeff)) { + LOG_CRITICAL(Service_DSP, "ExtractFromMemory for DSPADDR3 failed"); + return; + } + Audio::UpdateAdpcm(channel_id, coeff.coeff); + } + + if (TestAndUnsetBit<17>(ctx.dirty)) { + // Interpolation type + LOG_WARNING(Service_DSP, "Channel %i: Unimplemented dirty bit 17", channel_id); + } + + if (TestAndUnsetBit<18>(ctx.dirty)) { + // Rate + LOG_WARNING(Service_DSP, "Channel %i: Unimplemented Rate %f", channel_id, ctx.rate); + } + + if (TestAndUnsetBit<22>(ctx.dirty)) { + // IIR + LOG_WARNING(Service_DSP, "Channel %i: Unimplemented IIR %x", channel_id, ctx.iirfilter_type); + } + + if (TestAndUnsetBit<28>(ctx.dirty)) { + // Sync count + LOG_DEBUG(Service_DSP, "Channel %i: Update Sync Count"); + syncs[channel_id] = ctx.sync; + } + + if (TestAndUnsetBit<25>(ctx.dirty) | TestAndUnsetBit<26>(ctx.dirty) | TestAndUnsetBit<27>(ctx.dirty)) { + // Mix + for (int i = 0; i < 12; i++) + LOG_DEBUG(Service_DSP, "Channel %i: mix[%i] %f", channel_id, i, ctx.mix[i]); + } + + if (TestAndUnsetBit<4>(ctx.dirty) | TestAndUnsetBit<21>(ctx.dirty) | TestAndUnsetBit<30>(ctx.dirty)) { + // TODO(merry): One of these bits might merely signify an update to the format. Verify this. + + // Format updated + Audio::UpdateFormat(channel_id, ctx.mono_or_stereo, ctx.format); + + // Synchronise flags + /* + auto ctx0 = GetChannelContext(BASE_ADDR_0, channel_id); + auto ctx1 = GetChannelContext(BASE_ADDR_1, channel_id); + ctx0.flags1_raw = ctx1.flags1_raw = ctx.flags1_raw; + ctx0.flags2_raw = ctx1.flags2_raw = ctx.flags2_raw; + SetChannelContext(BASE_ADDR_0, channel_id, ctx0); + SetChannelContext(BASE_ADDR_1, channel_id, ctx1); + */ + + // Embedded Buffer Changed + Audio::EnqueueBuffer(channel_id, ctx.buffer_id, Memory::GetPhysicalPointer(ctx.physical_address), ctx.sample_count, ctx.is_looping); + } + + if (TestAndUnsetBit<19>(ctx.dirty)) { + // Buffer queue + for (int i = 0; i < 4; i++) { + if (ctx.buffers_dirty & (1 << i)) { + auto& b = ctx.buffers[i]; + Audio::EnqueueBuffer(channel_id, b.buffer_id, Memory::GetPhysicalPointer(b.physical_address), b.sample_count, b.is_looping); } - - if (TestAndUnsetBit(ctx.dirty, 2)) { - // Update ADPCM coefficients - Audio::UpdateAdpcm(chanid, channel_adpcm_coeffs[chanid].coeff); - } - - if (TestAndUnsetBit(ctx.dirty, 17)) { - // Interpolation type - LOG_WARNING(Service_DSP, "Channel %i: Unimplemented dirty bit 17", chanid); - } - - if (TestAndUnsetBit(ctx.dirty, 18)) { - // Rate - LOG_WARNING(Service_DSP, "Channel %i: Unimplemented Rate %f", chanid, ctx.rate); - } - - if (TestAndUnsetBit(ctx.dirty, 22)) { - // IIR - LOG_WARNING(Service_DSP, "Channel %i: Unimplemented IIR %x", chanid, ctx.iirfilter_type); - } - - if (TestAndUnsetBit(ctx.dirty, 28)) { - // Sync count - LOG_DEBUG(Service_DSP, "Channel %i: Update Sync Count"); - status0.sync = ctx.sync; - status1.sync = ctx.sync; - } - - if (TestAndUnsetBit(ctx.dirty, 25) | TestAndUnsetBit(ctx.dirty, 26) | TestAndUnsetBit(ctx.dirty, 27)) { - // Mix - for (int i = 0; i < 12; i++) - LOG_DEBUG(Service_DSP, "Channel %i: mix[%i] %f", chanid, i, ctx.mix[i]); - } - - if (TestAndUnsetBit(ctx.dirty, 4) | TestAndUnsetBit(ctx.dirty, 21) | TestAndUnsetBit(ctx.dirty, 30)) { - // TODO(merry): One of these bits might merely signify an update to the format. Verify this. - - // Format updated - Audio::UpdateFormat(chanid, ctx.mono_or_stereo, ctx.format); - channel_contex0[chanid].flags1_raw = channel_contex1[chanid].flags1_raw = ctx.flags1_raw; - channel_contex0[chanid].flags2_raw = channel_contex1[chanid].flags2_raw = ctx.flags2_raw; - - // Embedded Buffer Changed - Audio::EnqueueBuffer(chanid, ctx.buffer_id, Memory::GetPhysicalPointer(ctx.physical_address), ctx.sample_count, ctx.is_looping); - - status0.is_playing |= 0x100; // TODO: This is supposed to flicker on then turn off. - } - - if (TestAndUnsetBit(ctx.dirty, 19)) { - // Buffer queue - for (int i = 0; i < 4; i++) { - if (TestAndUnsetBit(ctx.buffers_dirty, i)) { - auto& b = ctx.buffers[i]; - Audio::EnqueueBuffer(chanid, b.buffer_id, Memory::GetPhysicalPointer(b.physical_address), b.sample_count, b.is_looping); - } - } - - if (ctx.buffers_dirty) { - LOG_ERROR(Service_DSP, "Channel %i: Unknown channel buffer dirty bits: 0x%04x", chanid, ctx.buffers_dirty); - } - - ctx.buffers_dirty = 0; - - status0.is_playing |= 0x100; // TODO: This is supposed to flicker on then turn off. - } - - if (TestAndUnsetBit(ctx.dirty, 16)) { - // Is Active? - Audio::Play(chanid, (ctx.is_active & 0xFF) != 0); - } - - if (ctx.dirty) { - LOG_ERROR(Service_DSP, "Channel %i: Unknown channel dirty bits: 0x%08x", chanid, ctx.dirty); - } - - ctx.dirty = 0; } - // TODO: Detect any change to the structures without a dirty flag update to identify what the other bits do. - - Audio::Tick(chanid); - - // Update channel status - bool playing = false; - std::tie(playing, status0.current_buffer_id, status0.buffer_position) = Audio::GetStatus(chanid); - if (playing) { - status0.is_playing |= 1; - } else { - status0.is_playing = 0; + if (ctx.buffers_dirty & ~(u32)0xF) { + LOG_ERROR(Service_DSP, "Channel %i: Unknown channel buffer dirty bits: 0x%04x", channel_id, ctx.buffers_dirty); } - status1 = status0; + + ctx.buffers_dirty = 0; + } + + if (TestAndUnsetBit<16>(ctx.dirty)) { + // Is Active? + Audio::Play(channel_id, (ctx.is_active & 0xFF) != 0); + } + + if (ctx.dirty) { + LOG_ERROR(Service_DSP, "Channel %i: Unknown channel dirty bits: 0x%08x", channel_id, ctx.dirty); + } + + ctx.dirty = 0; + SetChannelContext(current_base, channel_id, ctx); +} + +static void UpdateChannelStatus(int channel_id) { + auto audio_status = Audio::GetStatus(channel_id); + + ChannelStatus status; + status.sync = syncs[channel_id]; + status.current_buffer_id = audio_status.most_recent_buffer_id; + status.buffer_position = audio_status.sample_position; + status.is_playing = 0; + if (audio_status.is_enabled) status.is_playing |= 1; + if (audio_status.was_fed_data) status.is_playing |= 0x100; + + bool success = true; + success &= Memory::InjectIntoMemory(DspAddrToVAddr(BASE_ADDR_0, DSPADDR2) + channel_id * sizeof(ChannelStatus), status); + success &= Memory::InjectIntoMemory(DspAddrToVAddr(BASE_ADDR_1, DSPADDR2) + channel_id * sizeof(ChannelStatus), status); + if (!success) { + LOG_CRITICAL(Service_DSP, "InjectIntoMemory for DSPADDR2 failed"); + } +} + +static void AudioTick(u64, int cycles_late) { + VAddr current_base = GetCurrentBase(); + + for (int channel_id = 0; channel_id < NUM_CHANNELS; channel_id++) { + ReadChannelContext(current_base, channel_id); + + Audio::Tick(channel_id); + + UpdateChannelStatus(channel_id); } for (auto interrupt_event : interrupt_events) @@ -585,12 +622,12 @@ static void GetHeadphoneStatus(Service::Interface* self) { static void RecvData(Service::Interface* self) { u32* cmd_buff = Kernel::GetCommandBuffer(); - u32 registerNo = cmd_buff[1]; + u32 register_number = cmd_buff[1]; cmd_buff[1] = RESULT_SUCCESS.raw; // No error cmd_buff[2] = 1; - LOG_WARNING(Service_DSP, "(STUBBED) called register=%u", registerNo); + LOG_WARNING(Service_DSP, "(STUBBED) called register=%u", register_number); } /** @@ -606,17 +643,17 @@ static void RecvData(Service::Interface* self) { static void RecvDataIsReady(Service::Interface* self) { u32* cmd_buff = Kernel::GetCommandBuffer(); - u32 registerNo = cmd_buff[1]; + u32 register_number = cmd_buff[1]; cmd_buff[1] = RESULT_SUCCESS.raw; // No error cmd_buff[2] = 1; - LOG_WARNING(Service_DSP, "(STUBBED) called register=%u", registerNo); + LOG_WARNING(Service_DSP, "(STUBBED) called register=%u", register_number); } const Interface::FunctionInfo FunctionTable[] = { - {0x00010040, nullptr, "RecvData"}, - {0x00020040, nullptr, "RecvDataIsReady"}, + {0x00010040, RecvData, "RecvData"}, + {0x00020040, RecvDataIsReady, "RecvDataIsReady"}, {0x00030080, nullptr, "SendData"}, {0x00040040, nullptr, "SendDataIsEmpty"}, {0x000500C2, nullptr, "SendFifoEx"},