mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-20 23:40:56 +01:00
Remove references to PICA and rasterizers in video_core
This commit is contained in:
parent
ebf9a784a9
commit
1d28b2e142
77 changed files with 4 additions and 16444 deletions
|
@ -41,8 +41,6 @@ set(SRCS
|
|||
hle/service/am/applet_oe.cpp
|
||||
hle/service/aoc/aoc_u.cpp
|
||||
hle/service/apm/apm.cpp
|
||||
hle/service/dsp_dsp.cpp
|
||||
hle/service/gsp_gpu.cpp
|
||||
hle/service/hid/hid.cpp
|
||||
hle/service/lm/lm.cpp
|
||||
hle/service/nvdrv/devices/nvdisp_disp0.cpp
|
||||
|
@ -58,10 +56,6 @@ set(SRCS
|
|||
hle/service/vi/vi.cpp
|
||||
hle/service/vi/vi_m.cpp
|
||||
hle/shared_page.cpp
|
||||
hw/aes/arithmetic128.cpp
|
||||
hw/aes/ccm.cpp
|
||||
hw/aes/key.cpp
|
||||
hw/gpu.cpp
|
||||
hw/hw.cpp
|
||||
hw/lcd.cpp
|
||||
loader/elf.cpp
|
||||
|
@ -130,8 +124,6 @@ set(HEADERS
|
|||
hle/service/am/applet_oe.h
|
||||
hle/service/aoc/aoc_u.h
|
||||
hle/service/apm/apm.h
|
||||
hle/service/dsp_dsp.h
|
||||
hle/service/gsp_gpu.h
|
||||
hle/service/hid/hid.h
|
||||
hle/service/lm/lm.h
|
||||
hle/service/nvdrv/devices/nvdevice.h
|
||||
|
@ -148,10 +140,6 @@ set(HEADERS
|
|||
hle/service/vi/vi.h
|
||||
hle/service/vi/vi_m.h
|
||||
hle/shared_page.h
|
||||
hw/aes/arithmetic128.h
|
||||
hw/aes/ccm.h
|
||||
hw/aes/key.h
|
||||
hw/gpu.h
|
||||
hw/hw.h
|
||||
hw/lcd.h
|
||||
loader/elf.h
|
||||
|
@ -171,8 +159,5 @@ set(HEADERS
|
|||
|
||||
create_directory_groups(${SRCS} ${HEADERS})
|
||||
add_library(core STATIC ${SRCS} ${HEADERS})
|
||||
target_link_libraries(core PUBLIC common PRIVATE audio_core dynarmic network video_core)
|
||||
target_link_libraries(core PUBLIC common PRIVATE dynarmic video_core)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static unicorn)
|
||||
if (ENABLE_WEB_SERVICE)
|
||||
target_link_libraries(core PUBLIC json-headers web_service)
|
||||
endif()
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/hle/pipe.h"
|
||||
#include "core/hle/service/dsp_dsp.h"
|
||||
|
||||
using DspPipe = DSP::HLE::DspPipe;
|
||||
|
||||
namespace Service {
|
||||
namespace DSP_DSP {
|
||||
|
||||
void SignalPipeInterrupt(DspPipe pipe) {
|
||||
}
|
||||
|
||||
} // namespace DSP_DSP
|
||||
} // namespace Service
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace DSP {
|
||||
namespace HLE {
|
||||
enum class DspPipe;
|
||||
}
|
||||
}
|
||||
|
||||
namespace Service {
|
||||
namespace DSP_DSP {
|
||||
|
||||
/**
|
||||
* Signal a specific DSP related interrupt of type == InterruptType::Pipe, pipe == pipe.
|
||||
* @param pipe The DSP pipe for which to signal an interrupt for.
|
||||
*/
|
||||
void SignalPipeInterrupt(DSP::HLE::DspPipe pipe);
|
||||
|
||||
} // namespace DSP_DSP
|
||||
} // namespace Service
|
|
@ -1,11 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/service/gsp_gpu.h"
|
||||
|
||||
namespace Service {
|
||||
namespace GSP {
|
||||
|
||||
} // namespace GSP
|
||||
} // namespace Service
|
|
@ -1,195 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service {
|
||||
namespace GSP {
|
||||
|
||||
/// GSP interrupt ID
|
||||
enum class InterruptId : u8 {
|
||||
PSC0 = 0x00,
|
||||
PSC1 = 0x01,
|
||||
PDC0 = 0x02, // Seems called every vertical screen line
|
||||
PDC1 = 0x03, // Seems called every frame
|
||||
PPF = 0x04,
|
||||
P3D = 0x05,
|
||||
DMA = 0x06,
|
||||
};
|
||||
|
||||
/// GSP command ID
|
||||
enum class CommandId : u32 {
|
||||
REQUEST_DMA = 0x00,
|
||||
/// Submits a commandlist for execution by the GPU.
|
||||
SUBMIT_GPU_CMDLIST = 0x01,
|
||||
|
||||
// Fills a given memory range with a particular value
|
||||
SET_MEMORY_FILL = 0x02,
|
||||
|
||||
// Copies an image and optionally performs color-conversion or scaling.
|
||||
// This is highly similar to the GameCube's EFB copy feature
|
||||
SET_DISPLAY_TRANSFER = 0x03,
|
||||
|
||||
// Conceptionally similar to SET_DISPLAY_TRANSFER and presumable uses the same hardware path
|
||||
SET_TEXTURE_COPY = 0x04,
|
||||
/// Flushes up to 3 cache regions in a single command.
|
||||
CACHE_FLUSH = 0x05,
|
||||
};
|
||||
|
||||
/// GSP thread interrupt relay queue
|
||||
struct InterruptRelayQueue {
|
||||
// Index of last interrupt in the queue
|
||||
u8 index;
|
||||
// Number of interrupts remaining to be processed by the userland code
|
||||
u8 number_interrupts;
|
||||
// Error code - zero on success, otherwise an error has occurred
|
||||
u8 error_code;
|
||||
u8 padding1;
|
||||
|
||||
u32 missed_PDC0;
|
||||
u32 missed_PDC1;
|
||||
|
||||
InterruptId slot[0x34]; ///< Interrupt ID slots
|
||||
};
|
||||
static_assert(sizeof(InterruptRelayQueue) == 0x40, "InterruptRelayQueue struct has incorrect size");
|
||||
|
||||
struct FrameBufferInfo {
|
||||
BitField<0, 1, u32> active_fb; // 0 = first, 1 = second
|
||||
|
||||
u32 address_left;
|
||||
u32 address_right;
|
||||
u32 stride; // maps to 0x1EF00X90 ?
|
||||
u32 format; // maps to 0x1EF00X70 ?
|
||||
u32 shown_fb; // maps to 0x1EF00X78 ?
|
||||
u32 unknown;
|
||||
};
|
||||
static_assert(sizeof(FrameBufferInfo) == 0x1c, "Struct has incorrect size");
|
||||
|
||||
struct FrameBufferUpdate {
|
||||
BitField<0, 1, u8> index; // Index used for GSP::SetBufferSwap
|
||||
BitField<0, 1, u8> is_dirty; // true if GSP should update GPU framebuffer registers
|
||||
u16 pad1;
|
||||
|
||||
FrameBufferInfo framebuffer_info[2];
|
||||
|
||||
u32 pad2;
|
||||
};
|
||||
static_assert(sizeof(FrameBufferUpdate) == 0x40, "Struct has incorrect size");
|
||||
// TODO: Not sure if this padding is correct.
|
||||
// Chances are the second block is stored at offset 0x24 rather than 0x20.
|
||||
#ifndef _MSC_VER
|
||||
static_assert(offsetof(FrameBufferUpdate, framebuffer_info[1]) == 0x20,
|
||||
"FrameBufferInfo element has incorrect alignment");
|
||||
#endif
|
||||
|
||||
/// GSP command
|
||||
struct Command {
|
||||
BitField<0, 8, CommandId> id;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32 source_address;
|
||||
u32 dest_address;
|
||||
u32 size;
|
||||
} dma_request;
|
||||
|
||||
struct {
|
||||
u32 address;
|
||||
u32 size;
|
||||
u32 flags;
|
||||
u32 unused[3];
|
||||
u32 do_flush;
|
||||
} submit_gpu_cmdlist;
|
||||
|
||||
struct {
|
||||
u32 start1;
|
||||
u32 value1;
|
||||
u32 end1;
|
||||
|
||||
u32 start2;
|
||||
u32 value2;
|
||||
u32 end2;
|
||||
|
||||
u16 control1;
|
||||
u16 control2;
|
||||
} memory_fill;
|
||||
|
||||
struct {
|
||||
u32 in_buffer_address;
|
||||
u32 out_buffer_address;
|
||||
u32 in_buffer_size;
|
||||
u32 out_buffer_size;
|
||||
u32 flags;
|
||||
} display_transfer;
|
||||
|
||||
struct {
|
||||
u32 in_buffer_address;
|
||||
u32 out_buffer_address;
|
||||
u32 size;
|
||||
u32 in_width_gap;
|
||||
u32 out_width_gap;
|
||||
u32 flags;
|
||||
} texture_copy;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
u32 address;
|
||||
u32 size;
|
||||
} regions[3];
|
||||
} cache_flush;
|
||||
|
||||
u8 raw_data[0x1C];
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(Command) == 0x20, "Command struct has incorrect size");
|
||||
|
||||
/// GSP shared memory GX command buffer header
|
||||
struct CommandBuffer {
|
||||
union {
|
||||
u32 hex;
|
||||
|
||||
// Current command index. This index is updated by GSP module after loading the command
|
||||
// data, right before the command is processed. When this index is updated by GSP module,
|
||||
// the total commands field is decreased by one as well.
|
||||
BitField<0, 8, u32> index;
|
||||
|
||||
// Total commands to process, must not be value 0 when GSP module handles commands. This
|
||||
// must be <=15 when writing a command to shared memory. This is incremented by the
|
||||
// application when writing a command to shared memory, after increasing this value
|
||||
// TriggerCmdReqQueue is only used if this field is value 1.
|
||||
BitField<8, 8, u32> number_commands;
|
||||
};
|
||||
|
||||
u32 unk[7];
|
||||
|
||||
Command commands[0xF];
|
||||
};
|
||||
static_assert(sizeof(CommandBuffer) == 0x200, "CommandBuffer struct has incorrect size");
|
||||
|
||||
/**
|
||||
* Signals that the specified interrupt type has occurred to userland code
|
||||
* @param interrupt_id ID of interrupt that is being signalled
|
||||
*/
|
||||
void SignalInterrupt(InterruptId interrupt_id);
|
||||
|
||||
ResultCode SetBufferSwap(u32 screen_id, const FrameBufferInfo& info);
|
||||
|
||||
/**
|
||||
* Retrieves the framebuffer info stored in the GSP shared memory for the
|
||||
* specified screen index and thread id.
|
||||
* @param thread_id GSP thread id of the process that accesses the structure that we are requesting.
|
||||
* @param screen_index Index of the screen we are requesting (Top = 0, Bottom = 1).
|
||||
* @returns FramebufferUpdate Information about the specified framebuffer.
|
||||
*/
|
||||
FrameBufferUpdate* GetFrameBufferInfo(u32 thread_id, u32 screen_index);
|
||||
|
||||
} // namespace GSP
|
||||
} // namespace Service
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include "core/hw/aes/arithmetic128.h"
|
||||
|
||||
namespace HW {
|
||||
namespace AES {
|
||||
|
||||
AESKey Lrot128(const AESKey& in, u32 rot) {
|
||||
AESKey out;
|
||||
rot %= 128;
|
||||
const u32 byte_shift = rot / 8;
|
||||
const u32 bit_shift = rot % 8;
|
||||
|
||||
for (u32 i = 0; i < 16; i++) {
|
||||
const u32 wrap_index_a = (i + byte_shift) % 16;
|
||||
const u32 wrap_index_b = (i + byte_shift + 1) % 16;
|
||||
out[i] = ((in[wrap_index_a] << bit_shift) | (in[wrap_index_b] >> (8 - bit_shift))) & 0xFF;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
AESKey Add128(const AESKey& a, const AESKey& b) {
|
||||
AESKey out;
|
||||
u32 carry = 0;
|
||||
u32 sum = 0;
|
||||
|
||||
for (int i = 15; i >= 0; i--) {
|
||||
sum = a[i] + b[i] + carry;
|
||||
carry = sum >> 8;
|
||||
out[i] = static_cast<u8>(sum & 0xff);
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
AESKey Xor128(const AESKey& a, const AESKey& b) {
|
||||
AESKey out;
|
||||
std::transform(a.cbegin(), a.cend(), b.cbegin(), out.begin(), std::bit_xor<>());
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace AES
|
||||
} // namespace HW
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hw/aes/key.h"
|
||||
|
||||
namespace HW {
|
||||
namespace AES {
|
||||
AESKey Lrot128(const AESKey& in, u32 rot);
|
||||
AESKey Add128(const AESKey& a, const AESKey& b);
|
||||
AESKey Xor128(const AESKey& a, const AESKey& b);
|
||||
|
||||
} // namspace AES
|
||||
} // namespace HW
|
|
@ -1,40 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace HW {
|
||||
namespace AES {
|
||||
|
||||
constexpr size_t CCM_NONCE_SIZE = 12;
|
||||
constexpr size_t CCM_MAC_SIZE = 16;
|
||||
|
||||
using CCMNonce = std::array<u8, CCM_NONCE_SIZE>;
|
||||
|
||||
/**
|
||||
* Encrypts and adds a MAC to the given data using AES-CCM algorithm.
|
||||
* @param pdata The plain text data to encrypt
|
||||
* @param nonce The nonce data to use for encryption
|
||||
* @param slot_id The slot ID of the key to use for encryption
|
||||
* @returns a vector of u8 containing the encrypted data with MAC at the end
|
||||
*/
|
||||
std::vector<u8> EncryptSignCCM(const std::vector<u8>& pdata, const CCMNonce& nonce, size_t slot_id);
|
||||
|
||||
/**
|
||||
* Decrypts and verify the MAC of the given data using AES-CCM algorithm.
|
||||
* @param cipher The cipher text data to decrypt, with MAC at the end to verify
|
||||
* @param nonce The nonce data to use for decryption
|
||||
* @param slot_id The slot ID of the key to use for decryption
|
||||
* @returns a vector of u8 containing the decrypted data; an empty vector if the verification fails
|
||||
*/
|
||||
std::vector<u8> DecryptVerifyCCM(const std::vector<u8>& cipher, const CCMNonce& nonce,
|
||||
size_t slot_id);
|
||||
|
||||
} // namespace AES
|
||||
} // namespace HW
|
|
@ -1,173 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <exception>
|
||||
#include <sstream>
|
||||
#include <boost/optional.hpp>
|
||||
#include "common/common_paths.h"
|
||||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/hw/aes/arithmetic128.h"
|
||||
#include "core/hw/aes/key.h"
|
||||
|
||||
namespace HW {
|
||||
namespace AES {
|
||||
|
||||
namespace {
|
||||
|
||||
boost::optional<AESKey> generator_constant;
|
||||
|
||||
struct KeySlot {
|
||||
boost::optional<AESKey> x;
|
||||
boost::optional<AESKey> y;
|
||||
boost::optional<AESKey> normal;
|
||||
|
||||
void SetKeyX(const AESKey& key) {
|
||||
x = key;
|
||||
if (y && generator_constant) {
|
||||
GenerateNormalKey();
|
||||
}
|
||||
}
|
||||
|
||||
void SetKeyY(const AESKey& key) {
|
||||
y = key;
|
||||
if (x && generator_constant) {
|
||||
GenerateNormalKey();
|
||||
}
|
||||
}
|
||||
|
||||
void SetNormalKey(const AESKey& key) {
|
||||
normal = key;
|
||||
}
|
||||
|
||||
void GenerateNormalKey() {
|
||||
normal = Lrot128(Add128(Xor128(Lrot128(*x, 2), *y), *generator_constant), 87);
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
x.reset();
|
||||
y.reset();
|
||||
normal.reset();
|
||||
}
|
||||
};
|
||||
|
||||
std::array<KeySlot, KeySlotID::MaxKeySlotID> key_slots;
|
||||
|
||||
void ClearAllKeys() {
|
||||
for (KeySlot& slot : key_slots) {
|
||||
slot.Clear();
|
||||
}
|
||||
generator_constant.reset();
|
||||
}
|
||||
|
||||
AESKey HexToKey(const std::string& hex) {
|
||||
if (hex.size() < 32) {
|
||||
throw std::invalid_argument("hex string is too short");
|
||||
}
|
||||
|
||||
AESKey key;
|
||||
for (size_t i = 0; i < key.size(); ++i) {
|
||||
key[i] = static_cast<u8>(std::stoi(hex.substr(i * 2, 2), 0, 16));
|
||||
}
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
void LoadPresetKeys() {
|
||||
const std::string filepath = FileUtil::GetUserPath(D_SYSDATA_IDX) + AES_KEYS;
|
||||
FileUtil::CreateFullPath(filepath); // Create path if not already created
|
||||
std::ifstream file;
|
||||
OpenFStream(file, filepath, std::ios_base::in);
|
||||
if (!file) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (!file.eof()) {
|
||||
std::string line;
|
||||
std::getline(file, line);
|
||||
std::vector<std::string> parts;
|
||||
Common::SplitString(line, '=', parts);
|
||||
if (parts.size() != 2) {
|
||||
LOG_ERROR(HW_AES, "Failed to parse %s", line.c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
const std::string& name = parts[0];
|
||||
AESKey key;
|
||||
try {
|
||||
key = HexToKey(parts[1]);
|
||||
} catch (const std::logic_error& e) {
|
||||
LOG_ERROR(HW_AES, "Invalid key %s: %s", parts[1].c_str(), e.what());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (name == "generator") {
|
||||
generator_constant = key;
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t slot_id;
|
||||
char key_type;
|
||||
if (std::sscanf(name.c_str(), "slot0x%zXKey%c", &slot_id, &key_type) != 2) {
|
||||
LOG_ERROR(HW_AES, "Invalid key name %s", name.c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (slot_id >= MaxKeySlotID) {
|
||||
LOG_ERROR(HW_AES, "Out of range slot ID 0x%zX", slot_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (key_type) {
|
||||
case 'X':
|
||||
key_slots.at(slot_id).SetKeyX(key);
|
||||
break;
|
||||
case 'Y':
|
||||
key_slots.at(slot_id).SetKeyY(key);
|
||||
break;
|
||||
case 'N':
|
||||
key_slots.at(slot_id).SetNormalKey(key);
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(HW_AES, "Invalid key type %c", key_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void InitKeys() {
|
||||
ClearAllKeys();
|
||||
LoadPresetKeys();
|
||||
}
|
||||
|
||||
void SetGeneratorConstant(const AESKey& key) {
|
||||
generator_constant = key;
|
||||
}
|
||||
|
||||
void SetKeyX(size_t slot_id, const AESKey& key) {
|
||||
key_slots.at(slot_id).SetKeyX(key);
|
||||
}
|
||||
|
||||
void SetKeyY(size_t slot_id, const AESKey& key) {
|
||||
key_slots.at(slot_id).SetKeyY(key);
|
||||
}
|
||||
|
||||
void SetNormalKey(size_t slot_id, const AESKey& key) {
|
||||
key_slots.at(slot_id).SetNormalKey(key);
|
||||
}
|
||||
|
||||
bool IsNormalKeyAvailable(size_t slot_id) {
|
||||
return key_slots.at(slot_id).normal.is_initialized();
|
||||
}
|
||||
|
||||
AESKey GetNormalKey(size_t slot_id) {
|
||||
return key_slots.at(slot_id).normal.value_or(AESKey{});
|
||||
}
|
||||
|
||||
} // namespace AES
|
||||
} // namespace HW
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace HW {
|
||||
namespace AES {
|
||||
|
||||
enum KeySlotID : size_t {
|
||||
// AES Keyslot used to generate the UDS data frame CCMP key.
|
||||
UDSDataKey = 0x2D,
|
||||
APTWrap = 0x31,
|
||||
|
||||
MaxKeySlotID = 0x40,
|
||||
};
|
||||
|
||||
constexpr size_t AES_BLOCK_SIZE = 16;
|
||||
|
||||
using AESKey = std::array<u8, AES_BLOCK_SIZE>;
|
||||
|
||||
void InitKeys();
|
||||
|
||||
void SetGeneratorConstant(const AESKey& key);
|
||||
void SetKeyX(size_t slot_id, const AESKey& key);
|
||||
void SetKeyY(size_t slot_id, const AESKey& key);
|
||||
void SetNormalKey(size_t slot_id, const AESKey& key);
|
||||
|
||||
bool IsNormalKeyAvailable(size_t slot_id);
|
||||
AESKey GetNormalKey(size_t slot_id);
|
||||
|
||||
} // namspace AES
|
||||
} // namespace HW
|
|
@ -1,573 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include <numeric>
|
||||
#include <type_traits>
|
||||
#include "common/alignment.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/gsp_gpu.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/command_processor.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/utils.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace GPU {
|
||||
|
||||
Regs g_regs;
|
||||
|
||||
/// 268MHz CPU clocks / 60Hz frames per second
|
||||
const u64 frame_ticks = static_cast<u64>(BASE_CLOCK_RATE / SCREEN_REFRESH_RATE);
|
||||
/// Event id for CoreTiming
|
||||
static CoreTiming::EventType* vblank_event;
|
||||
|
||||
template <typename T>
|
||||
inline void Read(T& var, const u32 raw_addr) {
|
||||
u32 addr = raw_addr - HW::VADDR_GPU;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Reads other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_GPU, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
var = g_regs[addr / 4];
|
||||
}
|
||||
|
||||
static Math::Vec4<u8> DecodePixel(Regs::PixelFormat input_format, const u8* src_pixel) {
|
||||
switch (input_format) {
|
||||
case Regs::PixelFormat::RGBA8:
|
||||
return Color::DecodeRGBA8(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB8:
|
||||
return Color::DecodeRGB8(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB565:
|
||||
return Color::DecodeRGB565(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGB5A1:
|
||||
return Color::DecodeRGB5A1(src_pixel);
|
||||
|
||||
case Regs::PixelFormat::RGBA4:
|
||||
return Color::DecodeRGBA4(src_pixel);
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown source framebuffer format %x", input_format);
|
||||
return {0, 0, 0, 0};
|
||||
}
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_DisplayTransfer, "GPU", "DisplayTransfer", MP_RGB(100, 100, 255));
|
||||
MICROPROFILE_DEFINE(GPU_CmdlistProcessing, "GPU", "Cmdlist Processing", MP_RGB(100, 255, 100));
|
||||
|
||||
static void MemoryFill(const Regs::MemoryFillConfig& config) {
|
||||
const PAddr start_addr = config.GetStartAddress();
|
||||
const PAddr end_addr = config.GetEndAddress();
|
||||
|
||||
// TODO: do hwtest with these cases
|
||||
if (!Memory::IsValidPhysicalAddress(start_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid start address 0x%08X", start_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Memory::IsValidPhysicalAddress(end_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid end address 0x%08X", end_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (end_addr <= start_addr) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid memory range from 0x%08X to 0x%08X", start_addr, end_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
u8* start = Memory::GetPhysicalPointer(start_addr);
|
||||
u8* end = Memory::GetPhysicalPointer(end_addr);
|
||||
|
||||
// TODO: Consider always accelerating and returning vector of
|
||||
// regions that the accelerated fill did not cover to
|
||||
// reduce/eliminate the fill that the cpu has to do.
|
||||
// This would also mean that the flush below is not needed.
|
||||
// Fill should first flush all surfaces that touch but are
|
||||
// not completely within the fill range.
|
||||
// Then fill all completely covered surfaces, and return the
|
||||
// regions that were between surfaces or within the touching
|
||||
// ones for cpu to manually fill here.
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateFill(config))
|
||||
return;
|
||||
|
||||
Memory::RasterizerFlushAndInvalidateRegion(config.GetStartAddress(),
|
||||
config.GetEndAddress() - config.GetStartAddress());
|
||||
|
||||
if (config.fill_24bit) {
|
||||
// fill with 24-bit values
|
||||
for (u8* ptr = start; ptr < end; ptr += 3) {
|
||||
ptr[0] = config.value_24bit_r;
|
||||
ptr[1] = config.value_24bit_g;
|
||||
ptr[2] = config.value_24bit_b;
|
||||
}
|
||||
} else if (config.fill_32bit) {
|
||||
// fill with 32-bit values
|
||||
if (end > start) {
|
||||
u32 value = config.value_32bit;
|
||||
size_t len = (end - start) / sizeof(u32);
|
||||
for (size_t i = 0; i < len; ++i)
|
||||
memcpy(&start[i * sizeof(u32)], &value, sizeof(u32));
|
||||
}
|
||||
} else {
|
||||
// fill with 16-bit values
|
||||
u16 value_16bit = config.value_16bit.Value();
|
||||
for (u8* ptr = start; ptr < end; ptr += sizeof(u16))
|
||||
memcpy(ptr, &value_16bit, sizeof(u16));
|
||||
}
|
||||
}
|
||||
|
||||
static void DisplayTransfer(const Regs::DisplayTransferConfig& config) {
|
||||
const PAddr src_addr = config.GetPhysicalInputAddress();
|
||||
const PAddr dst_addr = config.GetPhysicalOutputAddress();
|
||||
|
||||
// TODO: do hwtest with these cases
|
||||
if (!Memory::IsValidPhysicalAddress(src_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Memory::IsValidPhysicalAddress(dst_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input width");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_height == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input height");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.output_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output width");
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.output_height == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output height");
|
||||
return;
|
||||
}
|
||||
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateDisplayTransfer(config))
|
||||
return;
|
||||
|
||||
u8* src_pointer = Memory::GetPhysicalPointer(src_addr);
|
||||
u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr);
|
||||
|
||||
if (config.scaling > config.ScaleXY) {
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u",
|
||||
config.scaling.Value());
|
||||
UNIMPLEMENTED();
|
||||
return;
|
||||
}
|
||||
|
||||
if (config.input_linear && config.scaling != config.NoScale) {
|
||||
LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input");
|
||||
UNIMPLEMENTED();
|
||||
return;
|
||||
}
|
||||
|
||||
int horizontal_scale = config.scaling != config.NoScale ? 1 : 0;
|
||||
int vertical_scale = config.scaling == config.ScaleXY ? 1 : 0;
|
||||
|
||||
u32 output_width = config.output_width >> horizontal_scale;
|
||||
u32 output_height = config.output_height >> vertical_scale;
|
||||
|
||||
u32 input_size =
|
||||
config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format);
|
||||
u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format);
|
||||
|
||||
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(), input_size);
|
||||
Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(), output_size);
|
||||
|
||||
for (u32 y = 0; y < output_height; ++y) {
|
||||
for (u32 x = 0; x < output_width; ++x) {
|
||||
Math::Vec4<u8> src_color;
|
||||
|
||||
// Calculate the [x,y] position of the input image
|
||||
// based on the current output position and the scale
|
||||
u32 input_x = x << horizontal_scale;
|
||||
u32 input_y = y << vertical_scale;
|
||||
|
||||
u32 output_y;
|
||||
if (config.flip_vertically) {
|
||||
// Flip the y value of the output data,
|
||||
// we do this after calculating the [x,y] position of the input image
|
||||
// to account for the scaling options.
|
||||
output_y = output_height - y - 1;
|
||||
} else {
|
||||
output_y = y;
|
||||
}
|
||||
|
||||
u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format);
|
||||
u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format);
|
||||
u32 src_offset;
|
||||
u32 dst_offset;
|
||||
|
||||
if (config.input_linear) {
|
||||
if (!config.dont_swizzle) {
|
||||
// Interpret the input as linear and the output as tiled
|
||||
u32 coarse_y = output_y & ~7;
|
||||
u32 stride = output_width * dst_bytes_per_pixel;
|
||||
|
||||
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
|
||||
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
|
||||
coarse_y * stride;
|
||||
} else {
|
||||
// Both input and output are linear
|
||||
src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel;
|
||||
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
|
||||
}
|
||||
} else {
|
||||
if (!config.dont_swizzle) {
|
||||
// Interpret the input as tiled and the output as linear
|
||||
u32 coarse_y = input_y & ~7;
|
||||
u32 stride = config.input_width * src_bytes_per_pixel;
|
||||
|
||||
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
|
||||
coarse_y * stride;
|
||||
dst_offset = (x + output_y * output_width) * dst_bytes_per_pixel;
|
||||
} else {
|
||||
// Both input and output are tiled
|
||||
u32 out_coarse_y = output_y & ~7;
|
||||
u32 out_stride = output_width * dst_bytes_per_pixel;
|
||||
|
||||
u32 in_coarse_y = input_y & ~7;
|
||||
u32 in_stride = config.input_width * src_bytes_per_pixel;
|
||||
|
||||
src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) +
|
||||
in_coarse_y * in_stride;
|
||||
dst_offset = VideoCore::GetMortonOffset(x, output_y, dst_bytes_per_pixel) +
|
||||
out_coarse_y * out_stride;
|
||||
}
|
||||
}
|
||||
|
||||
const u8* src_pixel = src_pointer + src_offset;
|
||||
src_color = DecodePixel(config.input_format, src_pixel);
|
||||
if (config.scaling == config.ScaleX) {
|
||||
Math::Vec4<u8> pixel =
|
||||
DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel);
|
||||
src_color = ((src_color + pixel) / 2).Cast<u8>();
|
||||
} else if (config.scaling == config.ScaleXY) {
|
||||
Math::Vec4<u8> pixel1 =
|
||||
DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel);
|
||||
Math::Vec4<u8> pixel2 =
|
||||
DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel);
|
||||
Math::Vec4<u8> pixel3 =
|
||||
DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel);
|
||||
src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>();
|
||||
}
|
||||
|
||||
u8* dst_pixel = dst_pointer + dst_offset;
|
||||
switch (config.output_format) {
|
||||
case Regs::PixelFormat::RGBA8:
|
||||
Color::EncodeRGBA8(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB8:
|
||||
Color::EncodeRGB8(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB565:
|
||||
Color::EncodeRGB565(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGB5A1:
|
||||
Color::EncodeRGB5A1(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
case Regs::PixelFormat::RGBA4:
|
||||
Color::EncodeRGBA4(src_color, dst_pixel);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x",
|
||||
config.output_format.Value());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void TextureCopy(const Regs::DisplayTransferConfig& config) {
|
||||
const PAddr src_addr = config.GetPhysicalInputAddress();
|
||||
const PAddr dst_addr = config.GetPhysicalOutputAddress();
|
||||
|
||||
// TODO: do hwtest with invalid addresses
|
||||
if (!Memory::IsValidPhysicalAddress(src_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid input address 0x%08X", src_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Memory::IsValidPhysicalAddress(dst_addr)) {
|
||||
LOG_CRITICAL(HW_GPU, "invalid output address 0x%08X", dst_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (VideoCore::g_renderer->Rasterizer()->AccelerateTextureCopy(config))
|
||||
return;
|
||||
|
||||
u8* src_pointer = Memory::GetPhysicalPointer(src_addr);
|
||||
u8* dst_pointer = Memory::GetPhysicalPointer(dst_addr);
|
||||
|
||||
u32 remaining_size = Common::AlignDown(config.texture_copy.size, 16);
|
||||
|
||||
if (remaining_size == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero size. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
u32 input_gap = config.texture_copy.input_gap * 16;
|
||||
u32 output_gap = config.texture_copy.output_gap * 16;
|
||||
|
||||
// Zero gap means contiguous input/output even if width = 0. To avoid infinite loop below, width
|
||||
// is assigned with the total size if gap = 0.
|
||||
u32 input_width = input_gap == 0 ? remaining_size : config.texture_copy.input_width * 16;
|
||||
u32 output_width = output_gap == 0 ? remaining_size : config.texture_copy.output_width * 16;
|
||||
|
||||
if (input_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero input width. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (output_width == 0) {
|
||||
LOG_CRITICAL(HW_GPU, "zero output width. Real hardware freezes on this.");
|
||||
return;
|
||||
}
|
||||
|
||||
size_t contiguous_input_size =
|
||||
config.texture_copy.size / input_width * (input_width + input_gap);
|
||||
Memory::RasterizerFlushRegion(config.GetPhysicalInputAddress(),
|
||||
static_cast<u32>(contiguous_input_size));
|
||||
|
||||
size_t contiguous_output_size =
|
||||
config.texture_copy.size / output_width * (output_width + output_gap);
|
||||
Memory::RasterizerFlushAndInvalidateRegion(config.GetPhysicalOutputAddress(),
|
||||
static_cast<u32>(contiguous_output_size));
|
||||
|
||||
u32 remaining_input = input_width;
|
||||
u32 remaining_output = output_width;
|
||||
while (remaining_size > 0) {
|
||||
u32 copy_size = std::min({remaining_input, remaining_output, remaining_size});
|
||||
|
||||
std::memcpy(dst_pointer, src_pointer, copy_size);
|
||||
src_pointer += copy_size;
|
||||
dst_pointer += copy_size;
|
||||
|
||||
remaining_input -= copy_size;
|
||||
remaining_output -= copy_size;
|
||||
remaining_size -= copy_size;
|
||||
|
||||
if (remaining_input == 0) {
|
||||
remaining_input = input_width;
|
||||
src_pointer += input_gap;
|
||||
}
|
||||
if (remaining_output == 0) {
|
||||
remaining_output = output_width;
|
||||
dst_pointer += output_gap;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Write(u32 addr, const T data) {
|
||||
addr -= HW::VADDR_GPU;
|
||||
u32 index = addr / 4;
|
||||
|
||||
// Writes other than u32 are untested, so I'd rather have them abort than silently fail
|
||||
if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) {
|
||||
LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
g_regs[index] = static_cast<u32>(data);
|
||||
|
||||
switch (index) {
|
||||
|
||||
// Memory fills are triggered once the fill value is written.
|
||||
case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3):
|
||||
case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): {
|
||||
const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger));
|
||||
auto& config = g_regs.memory_fill_config[is_second_filler];
|
||||
|
||||
if (config.trigger) {
|
||||
MemoryFill(config);
|
||||
LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(),
|
||||
config.GetEndAddress());
|
||||
|
||||
// It seems that it won't signal interrupt if "address_start" is zero.
|
||||
// TODO: hwtest this
|
||||
if (config.GetStartAddress() != 0) {
|
||||
if (!is_second_filler) {
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC0);
|
||||
} else {
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PSC1);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset "trigger" flag and set the "finish" flag
|
||||
// NOTE: This was confirmed to happen on hardware even if "address_start" is zero.
|
||||
config.trigger.Assign(0);
|
||||
config.finished.Assign(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case GPU_REG_INDEX(display_transfer_config.trigger): {
|
||||
MICROPROFILE_SCOPE(GPU_DisplayTransfer);
|
||||
|
||||
const auto& config = g_regs.display_transfer_config;
|
||||
if (config.trigger & 1) {
|
||||
|
||||
if (Pica::g_debug_context)
|
||||
Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer,
|
||||
nullptr);
|
||||
|
||||
if (config.is_texture_copy) {
|
||||
TextureCopy(config);
|
||||
LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> "
|
||||
"0x%08X(%u+%u), flags 0x%08X",
|
||||
config.texture_copy.size, config.GetPhysicalInputAddress(),
|
||||
config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16,
|
||||
config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16,
|
||||
config.texture_copy.output_gap * 16, config.flags);
|
||||
} else {
|
||||
DisplayTransfer(config);
|
||||
LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> "
|
||||
"0x%08x(%ux%u), dst format %x, flags 0x%08X",
|
||||
config.GetPhysicalInputAddress(), config.input_width.Value(),
|
||||
config.input_height.Value(), config.GetPhysicalOutputAddress(),
|
||||
config.output_width.Value(), config.output_height.Value(),
|
||||
config.output_format.Value(), config.flags);
|
||||
}
|
||||
|
||||
g_regs.display_transfer_config.trigger = 0;
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PPF);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Seems like writing to this register triggers processing
|
||||
case GPU_REG_INDEX(command_processor_config.trigger): {
|
||||
const auto& config = g_regs.command_processor_config;
|
||||
if (config.trigger & 1) {
|
||||
MICROPROFILE_SCOPE(GPU_CmdlistProcessing);
|
||||
|
||||
u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress());
|
||||
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size,
|
||||
config.GetPhysicalAddress());
|
||||
}
|
||||
|
||||
Pica::CommandProcessor::ProcessCommandList(buffer, config.size);
|
||||
|
||||
g_regs.command_processor_config.trigger = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Notify tracer about the register write
|
||||
// This is happening *after* handling the write to make sure we properly catch all memory reads.
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
// addr + GPU VBase - IO VBase + IO PBase
|
||||
Pica::g_debug_context->recorder->RegisterWritten<T>(
|
||||
addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data);
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate template functions because we aren't defining this in the header:
|
||||
|
||||
template void Read<u64>(u64& var, const u32 addr);
|
||||
template void Read<u32>(u32& var, const u32 addr);
|
||||
template void Read<u16>(u16& var, const u32 addr);
|
||||
template void Read<u8>(u8& var, const u32 addr);
|
||||
|
||||
template void Write<u64>(u32 addr, const u64 data);
|
||||
template void Write<u32>(u32 addr, const u32 data);
|
||||
template void Write<u16>(u32 addr, const u16 data);
|
||||
template void Write<u8>(u32 addr, const u8 data);
|
||||
|
||||
/// Update hardware
|
||||
static void VBlankCallback(u64 userdata, int cycles_late) {
|
||||
//VideoCore::g_renderer->SwapBuffers();
|
||||
|
||||
//// Signal to GSP that GPU interrupt has occurred
|
||||
//// TODO(yuriks): hwtest to determine if PDC0 is for the Top screen and PDC1 for the Sub
|
||||
//// screen, or if both use the same interrupts and these two instead determine the
|
||||
//// beginning and end of the VBlank period. If needed, split the interrupt firing into
|
||||
//// two different intervals.
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC0);
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::PDC1);
|
||||
|
||||
// Reschedule recurrent event
|
||||
CoreTiming::ScheduleEvent(frame_ticks - cycles_late, vblank_event);
|
||||
}
|
||||
|
||||
/// Initialize hardware
|
||||
void Init() {
|
||||
memset(&g_regs, 0, sizeof(g_regs));
|
||||
|
||||
auto& framebuffer_top = g_regs.framebuffer_config[0];
|
||||
auto& framebuffer_sub = g_regs.framebuffer_config[1];
|
||||
|
||||
// Setup default framebuffer addresses (located in VRAM)
|
||||
// .. or at least these are the ones used by system applets.
|
||||
// There's probably a smarter way to come up with addresses
|
||||
// like this which does not require hardcoding.
|
||||
framebuffer_top.address_left1 = 0x181E6000;
|
||||
framebuffer_top.address_left2 = 0x1822C800;
|
||||
framebuffer_top.address_right1 = 0x18273000;
|
||||
framebuffer_top.address_right2 = 0x182B9800;
|
||||
framebuffer_sub.address_left1 = 0x1848F000;
|
||||
framebuffer_sub.address_left2 = 0x184C7800;
|
||||
|
||||
framebuffer_top.width.Assign(240);
|
||||
framebuffer_top.height.Assign(400);
|
||||
framebuffer_top.stride = 3 * 240;
|
||||
framebuffer_top.color_format.Assign(Regs::PixelFormat::RGB8);
|
||||
framebuffer_top.active_fb = 0;
|
||||
|
||||
framebuffer_sub.width.Assign(240);
|
||||
framebuffer_sub.height.Assign(320);
|
||||
framebuffer_sub.stride = 3 * 240;
|
||||
framebuffer_sub.color_format.Assign(Regs::PixelFormat::RGB8);
|
||||
framebuffer_sub.active_fb = 0;
|
||||
|
||||
vblank_event = CoreTiming::RegisterEvent("GPU::VBlankCallback", VBlankCallback);
|
||||
CoreTiming::ScheduleEvent(frame_ticks, vblank_event);
|
||||
|
||||
LOG_DEBUG(HW_GPU, "initialized OK");
|
||||
}
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown() {
|
||||
LOG_DEBUG(HW_GPU, "shutdown OK");
|
||||
}
|
||||
|
||||
} // namespace
|
|
@ -1,334 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <type_traits>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace GPU {
|
||||
|
||||
constexpr float SCREEN_REFRESH_RATE = 60;
|
||||
|
||||
// Returns index corresponding to the Regs member labeled by field_name
|
||||
// TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions
|
||||
// when used with array elements (e.g. GPU_REG_INDEX(memory_fill_config[0])).
|
||||
// For details cf.
|
||||
// https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members
|
||||
// Hopefully, this will be fixed sometime in the future.
|
||||
// For lack of better alternatives, we currently hardcode the offsets when constant
|
||||
// expressions are needed via GPU_REG_INDEX_WORKAROUND (on sane compilers, static_asserts
|
||||
// will then make sure the offsets indeed match the automatically calculated ones).
|
||||
#define GPU_REG_INDEX(field_name) (offsetof(GPU::Regs, field_name) / sizeof(u32))
|
||||
#if defined(_MSC_VER)
|
||||
#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index)
|
||||
#else
|
||||
// NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
|
||||
// really is this annoying. This macro just forwards its first argument to GPU_REG_INDEX
|
||||
// and then performs a (no-op) cast to size_t iff the second argument matches the expected
|
||||
// field offset. Otherwise, the compiler will fail to compile this code.
|
||||
#define GPU_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \
|
||||
((typename std::enable_if<backup_workaround_index == GPU_REG_INDEX(field_name), size_t>::type) \
|
||||
GPU_REG_INDEX(field_name))
|
||||
#endif
|
||||
|
||||
// MMIO region 0x1EFxxxxx
|
||||
struct Regs {
|
||||
|
||||
// helper macro to make sure the defined structures are of the expected size.
|
||||
#if defined(_MSC_VER)
|
||||
// TODO: MSVC does not support using sizeof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#define ASSERT_MEMBER_SIZE(name, size_in_bytes)
|
||||
#else
|
||||
#define ASSERT_MEMBER_SIZE(name, size_in_bytes) \
|
||||
static_assert(sizeof(name) == size_in_bytes, \
|
||||
"Structure size and register block length don't match")
|
||||
#endif
|
||||
|
||||
// Components are laid out in reverse byte order, most significant bits first.
|
||||
enum class PixelFormat : u32 {
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB565 = 2,
|
||||
RGB5A1 = 3,
|
||||
RGBA4 = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the number of bytes per pixel.
|
||||
*/
|
||||
static int BytesPerPixel(PixelFormat format) {
|
||||
switch (format) {
|
||||
case PixelFormat::RGBA8:
|
||||
return 4;
|
||||
case PixelFormat::RGB8:
|
||||
return 3;
|
||||
case PixelFormat::RGB565:
|
||||
case PixelFormat::RGB5A1:
|
||||
case PixelFormat::RGBA4:
|
||||
return 2;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
|
||||
struct MemoryFillConfig {
|
||||
u32 address_start;
|
||||
u32 address_end;
|
||||
|
||||
union {
|
||||
u32 value_32bit;
|
||||
|
||||
BitField<0, 16, u32> value_16bit;
|
||||
|
||||
// TODO: Verify component order
|
||||
BitField<0, 8, u32> value_24bit_r;
|
||||
BitField<8, 8, u32> value_24bit_g;
|
||||
BitField<16, 8, u32> value_24bit_b;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 control;
|
||||
|
||||
// Setting this field to 1 triggers the memory fill.
|
||||
// This field also acts as a status flag, and gets reset to 0 upon completion.
|
||||
BitField<0, 1, u32> trigger;
|
||||
|
||||
// Set to 1 upon completion.
|
||||
BitField<1, 1, u32> finished;
|
||||
|
||||
// If both of these bits are unset, then it will fill the memory with a 16 bit value
|
||||
// 1: fill with 24-bit wide values
|
||||
BitField<8, 1, u32> fill_24bit;
|
||||
// 1: fill with 32-bit wide values
|
||||
BitField<9, 1, u32> fill_32bit;
|
||||
};
|
||||
|
||||
inline u32 GetStartAddress() const {
|
||||
return DecodeAddressRegister(address_start);
|
||||
}
|
||||
|
||||
inline u32 GetEndAddress() const {
|
||||
return DecodeAddressRegister(address_end);
|
||||
}
|
||||
} memory_fill_config[2];
|
||||
ASSERT_MEMBER_SIZE(memory_fill_config[0], 0x10);
|
||||
|
||||
INSERT_PADDING_WORDS(0x10b);
|
||||
|
||||
struct FramebufferConfig {
|
||||
union {
|
||||
u32 size;
|
||||
|
||||
BitField<0, 16, u32> width;
|
||||
BitField<16, 16, u32> height;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
u32 address_left1;
|
||||
u32 address_left2;
|
||||
|
||||
union {
|
||||
u32 format;
|
||||
|
||||
BitField<0, 3, PixelFormat> color_format;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
union {
|
||||
u32 active_fb;
|
||||
|
||||
// 0: Use parameters ending with "1"
|
||||
// 1: Use parameters ending with "2"
|
||||
BitField<0, 1, u32> second_fb_active;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x5);
|
||||
|
||||
// Distance between two pixel rows, in bytes
|
||||
u32 stride;
|
||||
|
||||
u32 address_right1;
|
||||
u32 address_right2;
|
||||
|
||||
INSERT_PADDING_WORDS(0x30);
|
||||
} framebuffer_config[2];
|
||||
ASSERT_MEMBER_SIZE(framebuffer_config[0], 0x100);
|
||||
|
||||
INSERT_PADDING_WORDS(0x169);
|
||||
|
||||
struct DisplayTransferConfig {
|
||||
u32 input_address;
|
||||
u32 output_address;
|
||||
|
||||
inline u32 GetPhysicalInputAddress() const {
|
||||
return DecodeAddressRegister(input_address);
|
||||
}
|
||||
|
||||
inline u32 GetPhysicalOutputAddress() const {
|
||||
return DecodeAddressRegister(output_address);
|
||||
}
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_height;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_height;
|
||||
};
|
||||
|
||||
enum ScalingMode : u32 {
|
||||
NoScale = 0, // Doesn't scale the image
|
||||
ScaleX = 1, // Downscales the image in half in the X axis and applies a box filter
|
||||
ScaleXY =
|
||||
2, // Downscales the image in half in both the X and Y axes and applies a box filter
|
||||
};
|
||||
|
||||
union {
|
||||
u32 flags;
|
||||
|
||||
BitField<0, 1, u32> flip_vertically; // flips input data vertically
|
||||
BitField<1, 1, u32> input_linear; // Converts from linear to tiled format
|
||||
BitField<2, 1, u32> crop_input_lines;
|
||||
BitField<3, 1, u32> is_texture_copy; // Copies the data without performing any
|
||||
// processing and respecting texture copy fields
|
||||
BitField<5, 1, u32> dont_swizzle;
|
||||
BitField<8, 3, PixelFormat> input_format;
|
||||
BitField<12, 3, PixelFormat> output_format;
|
||||
/// Uses some kind of 32x32 block swizzling mode, instead of the usual 8x8 one.
|
||||
BitField<16, 1, u32> block_32; // TODO(yuriks): unimplemented
|
||||
BitField<24, 2, ScalingMode> scaling; // Determines the scaling mode of the transfer
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// it seems that writing to this field triggers the display transfer
|
||||
u32 trigger;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
struct {
|
||||
u32 size; // The lower 4 bits are ignored
|
||||
|
||||
union {
|
||||
u32 input_size;
|
||||
|
||||
BitField<0, 16, u32> input_width;
|
||||
BitField<16, 16, u32> input_gap;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 output_size;
|
||||
|
||||
BitField<0, 16, u32> output_width;
|
||||
BitField<16, 16, u32> output_gap;
|
||||
};
|
||||
} texture_copy;
|
||||
} display_transfer_config;
|
||||
ASSERT_MEMBER_SIZE(display_transfer_config, 0x2c);
|
||||
|
||||
INSERT_PADDING_WORDS(0x32D);
|
||||
|
||||
struct {
|
||||
// command list size (in bytes)
|
||||
u32 size;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// command list address
|
||||
u32 address;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// it seems that writing to this field triggers command list processing
|
||||
u32 trigger;
|
||||
|
||||
inline u32 GetPhysicalAddress() const {
|
||||
return DecodeAddressRegister(address);
|
||||
}
|
||||
} command_processor_config;
|
||||
ASSERT_MEMBER_SIZE(command_processor_config, 0x14);
|
||||
|
||||
INSERT_PADDING_WORDS(0x9c3);
|
||||
|
||||
static constexpr size_t NumIds() {
|
||||
return sizeof(Regs) / sizeof(u32);
|
||||
}
|
||||
|
||||
const u32& operator[](int index) const {
|
||||
const u32* content = reinterpret_cast<const u32*>(this);
|
||||
return content[index];
|
||||
}
|
||||
|
||||
u32& operator[](int index) {
|
||||
u32* content = reinterpret_cast<u32*>(this);
|
||||
return content[index];
|
||||
}
|
||||
|
||||
#undef ASSERT_MEMBER_SIZE
|
||||
|
||||
private:
|
||||
/*
|
||||
* Most physical addresses which GPU registers refer to are 8-byte aligned.
|
||||
* This function should be used to get the address from a raw register value.
|
||||
*/
|
||||
static inline u32 DecodeAddressRegister(u32 register_value) {
|
||||
return register_value * 8;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_standard_layout<Regs>::value, "Structure does not use standard layout");
|
||||
|
||||
// TODO: MSVC does not support using offsetof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#ifndef _MSC_VER
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Regs, field_name) == position * 4, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_REG_POSITION(memory_fill_config[0], 0x00004);
|
||||
ASSERT_REG_POSITION(memory_fill_config[1], 0x00008);
|
||||
ASSERT_REG_POSITION(framebuffer_config[0], 0x00117);
|
||||
ASSERT_REG_POSITION(framebuffer_config[1], 0x00157);
|
||||
ASSERT_REG_POSITION(display_transfer_config, 0x00300);
|
||||
ASSERT_REG_POSITION(command_processor_config, 0x00638);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
// The total number of registers is chosen arbitrarily, but let's make sure it's not some odd value
|
||||
// anyway.
|
||||
static_assert(sizeof(Regs) == 0x1000 * sizeof(u32), "Invalid total size of register set");
|
||||
|
||||
extern Regs g_regs;
|
||||
|
||||
template <typename T>
|
||||
void Read(T& var, const u32 addr);
|
||||
|
||||
template <typename T>
|
||||
void Write(u32 addr, const T data);
|
||||
|
||||
/// Initialize hardware
|
||||
void Init();
|
||||
|
||||
/// Shutdown hardware
|
||||
void Shutdown();
|
||||
|
||||
} // namespace
|
|
@ -2,7 +2,6 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/audio_core.h"
|
||||
#include "core/gdbstub/gdbstub.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
#include "core/settings.h"
|
||||
|
@ -19,8 +18,6 @@ void Apply() {
|
|||
GDBStub::SetServerPort(values.gdbstub_port);
|
||||
GDBStub::ToggleServer(values.use_gdbstub);
|
||||
|
||||
VideoCore::g_hw_renderer_enabled = values.use_hw_renderer;
|
||||
VideoCore::g_shader_jit_enabled = values.use_shader_jit;
|
||||
VideoCore::g_toggle_framelimit_enabled = values.toggle_framelimit;
|
||||
|
||||
if (VideoCore::g_emu_window) {
|
||||
|
@ -28,9 +25,6 @@ void Apply() {
|
|||
VideoCore::g_emu_window->UpdateCurrentFramebufferLayout(layout.width, layout.height);
|
||||
}
|
||||
|
||||
AudioCore::SelectSink(values.sink_id);
|
||||
AudioCore::EnableStretching(values.enable_audio_stretching);
|
||||
|
||||
Service::HID::ReloadInputDevices();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,96 +1,23 @@
|
|||
set(SRCS
|
||||
command_processor.cpp
|
||||
debug_utils/debug_utils.cpp
|
||||
geometry_pipeline.cpp
|
||||
pica.cpp
|
||||
primitive_assembly.cpp
|
||||
regs.cpp
|
||||
renderer_base.cpp
|
||||
renderer_opengl/gl_rasterizer.cpp
|
||||
renderer_opengl/gl_rasterizer_cache.cpp
|
||||
renderer_opengl/gl_shader_gen.cpp
|
||||
renderer_opengl/gl_shader_util.cpp
|
||||
renderer_opengl/gl_state.cpp
|
||||
renderer_opengl/renderer_opengl.cpp
|
||||
shader/shader.cpp
|
||||
shader/shader_interpreter.cpp
|
||||
swrasterizer/clipper.cpp
|
||||
swrasterizer/framebuffer.cpp
|
||||
swrasterizer/lighting.cpp
|
||||
swrasterizer/proctex.cpp
|
||||
swrasterizer/rasterizer.cpp
|
||||
swrasterizer/swrasterizer.cpp
|
||||
swrasterizer/texturing.cpp
|
||||
texture/etc1.cpp
|
||||
texture/texture_decode.cpp
|
||||
vertex_loader.cpp
|
||||
video_core.cpp
|
||||
)
|
||||
|
||||
set(HEADERS
|
||||
command_processor.h
|
||||
debug_utils/debug_utils.h
|
||||
geometry_pipeline.h
|
||||
gpu_debugger.h
|
||||
pica.h
|
||||
pica_state.h
|
||||
pica_types.h
|
||||
primitive_assembly.h
|
||||
rasterizer_interface.h
|
||||
regs.h
|
||||
regs_framebuffer.h
|
||||
regs_lighting.h
|
||||
regs_pipeline.h
|
||||
regs_rasterizer.h
|
||||
regs_shader.h
|
||||
regs_texturing.h
|
||||
renderer_base.h
|
||||
renderer_opengl/gl_rasterizer.h
|
||||
renderer_opengl/gl_rasterizer_cache.h
|
||||
renderer_opengl/gl_resource_manager.h
|
||||
renderer_opengl/gl_shader_gen.h
|
||||
renderer_opengl/gl_shader_util.h
|
||||
renderer_opengl/gl_state.h
|
||||
renderer_opengl/pica_to_gl.h
|
||||
renderer_opengl/renderer_opengl.h
|
||||
shader/debug_data.h
|
||||
shader/shader.h
|
||||
shader/shader_interpreter.h
|
||||
swrasterizer/clipper.h
|
||||
swrasterizer/framebuffer.h
|
||||
swrasterizer/lighting.h
|
||||
swrasterizer/proctex.h
|
||||
swrasterizer/rasterizer.h
|
||||
swrasterizer/swrasterizer.h
|
||||
swrasterizer/texturing.h
|
||||
texture/etc1.h
|
||||
texture/texture_decode.h
|
||||
utils.h
|
||||
vertex_loader.h
|
||||
video_core.h
|
||||
)
|
||||
|
||||
if(ARCHITECTURE_x86_64)
|
||||
set(SRCS ${SRCS}
|
||||
shader/shader_jit_x64.cpp
|
||||
shader/shader_jit_x64_compiler.cpp)
|
||||
|
||||
set(HEADERS ${HEADERS}
|
||||
shader/shader_jit_x64.h
|
||||
shader/shader_jit_x64_compiler.h)
|
||||
endif()
|
||||
|
||||
create_directory_groups(${SRCS} ${HEADERS})
|
||||
|
||||
add_library(video_core STATIC ${SRCS} ${HEADERS})
|
||||
target_link_libraries(video_core PUBLIC common core)
|
||||
target_link_libraries(video_core PRIVATE glad nihstro-headers)
|
||||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
target_link_libraries(video_core PRIVATE xbyak)
|
||||
endif()
|
||||
|
||||
if (PNG_FOUND)
|
||||
target_link_libraries(video_core PRIVATE PNG::PNG)
|
||||
target_compile_definitions(video_core PRIVATE HAVE_PNG)
|
||||
endif()
|
||||
target_link_libraries(video_core PRIVATE glad)
|
||||
|
|
|
@ -1,647 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hle/service/gsp_gpu.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/command_processor.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/vertex_loader.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace CommandProcessor {
|
||||
|
||||
static int vs_float_regs_counter = 0;
|
||||
static u32 vs_uniform_write_buffer[4];
|
||||
|
||||
static int gs_float_regs_counter = 0;
|
||||
static u32 gs_uniform_write_buffer[4];
|
||||
|
||||
static int default_attr_counter = 0;
|
||||
static u32 default_attr_write_buffer[3];
|
||||
|
||||
// Expand a 4-bit mask to 4-byte mask, e.g. 0b0101 -> 0x00FF00FF
|
||||
static const u32 expand_bits_to_bytes[] = {
|
||||
0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff, 0x00ffff00, 0x00ffffff,
|
||||
0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff, 0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff,
|
||||
};
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_Drawing, "GPU", "Drawing", MP_RGB(50, 50, 240));
|
||||
|
||||
static const char* GetShaderSetupTypeName(Shader::ShaderSetup& setup) {
|
||||
if (&setup == &g_state.vs) {
|
||||
return "vertex shader";
|
||||
}
|
||||
if (&setup == &g_state.gs) {
|
||||
return "geometry shader";
|
||||
}
|
||||
return "unknown shader";
|
||||
}
|
||||
|
||||
static void WriteUniformBoolReg(Shader::ShaderSetup& setup, u32 value) {
|
||||
for (unsigned i = 0; i < setup.uniforms.b.size(); ++i)
|
||||
setup.uniforms.b[i] = (value & (1 << i)) != 0;
|
||||
}
|
||||
|
||||
static void WriteUniformIntReg(Shader::ShaderSetup& setup, unsigned index,
|
||||
const Math::Vec4<u8>& values) {
|
||||
ASSERT(index < setup.uniforms.i.size());
|
||||
setup.uniforms.i[index] = values;
|
||||
LOG_TRACE(HW_GPU, "Set %s integer uniform %d to %02x %02x %02x %02x",
|
||||
GetShaderSetupTypeName(setup), index, values.x, values.y, values.z, values.w);
|
||||
}
|
||||
|
||||
static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup,
|
||||
int& float_regs_counter, u32 uniform_write_buffer[4], u32 value) {
|
||||
auto& uniform_setup = config.uniform_setup;
|
||||
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
uniform_write_buffer[float_regs_counter++] = value;
|
||||
|
||||
// Uniforms are written in a packed format such that four float24 values are encoded in
|
||||
// three 32-bit numbers. We write to internal memory once a full such vector is
|
||||
// written.
|
||||
if ((float_regs_counter >= 4 && uniform_setup.IsFloat32()) ||
|
||||
(float_regs_counter >= 3 && !uniform_setup.IsFloat32())) {
|
||||
float_regs_counter = 0;
|
||||
|
||||
auto& uniform = setup.uniforms.f[uniform_setup.index];
|
||||
|
||||
if (uniform_setup.index >= 96) {
|
||||
LOG_ERROR(HW_GPU, "Invalid %s float uniform index %d", GetShaderSetupTypeName(setup),
|
||||
(int)uniform_setup.index);
|
||||
} else {
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
if (uniform_setup.IsFloat32()) {
|
||||
for (auto i : {0, 1, 2, 3})
|
||||
uniform[3 - i] = float24::FromFloat32(*(float*)(&uniform_write_buffer[i]));
|
||||
} else {
|
||||
// TODO: Untested
|
||||
uniform.w = float24::FromRaw(uniform_write_buffer[0] >> 8);
|
||||
uniform.z = float24::FromRaw(((uniform_write_buffer[0] & 0xFF) << 16) |
|
||||
((uniform_write_buffer[1] >> 16) & 0xFFFF));
|
||||
uniform.y = float24::FromRaw(((uniform_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((uniform_write_buffer[2] >> 24) & 0xFF));
|
||||
uniform.x = float24::FromRaw(uniform_write_buffer[2] & 0xFFFFFF);
|
||||
}
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set %s float uniform %x to (%f %f %f %f)",
|
||||
GetShaderSetupTypeName(setup), (int)uniform_setup.index,
|
||||
uniform.x.ToFloat32(), uniform.y.ToFloat32(), uniform.z.ToFloat32(),
|
||||
uniform.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
uniform_setup.index.Assign(uniform_setup.index + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void LoadDefaultVertexAttributes(u32 register_value) {
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
default_attr_write_buffer[default_attr_counter++] = register_value;
|
||||
|
||||
// Default attributes are written in a packed format such that four float24 values are encoded
|
||||
// in three 32-bit numbers.
|
||||
// We write to internal memory once a full such vector is written.
|
||||
if (default_attr_counter >= 3) {
|
||||
default_attr_counter = 0;
|
||||
|
||||
auto& setup = regs.pipeline.vs_default_attributes_setup;
|
||||
|
||||
if (setup.index >= 16) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index);
|
||||
return;
|
||||
}
|
||||
|
||||
Math::Vec4<float24> attribute;
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8);
|
||||
attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) |
|
||||
((default_attr_write_buffer[1] >> 16) & 0xFFFF));
|
||||
attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((default_attr_write_buffer[2] >> 24) & 0xFF));
|
||||
attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF);
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index,
|
||||
attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(),
|
||||
attribute.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
if (setup.index < 15) {
|
||||
g_state.input_default_attributes.attr[setup.index] = attribute;
|
||||
setup.index++;
|
||||
} else {
|
||||
// Put each attribute into an immediate input buffer. When all specified immediate
|
||||
// attributes are present, the Vertex Shader is invoked and everything is sent to
|
||||
// the primitive assembler.
|
||||
|
||||
auto& immediate_input = g_state.immediate.input_vertex;
|
||||
auto& immediate_attribute_id = g_state.immediate.current_attribute;
|
||||
|
||||
immediate_input.attr[immediate_attribute_id] = attribute;
|
||||
|
||||
if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) {
|
||||
immediate_attribute_id += 1;
|
||||
} else {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
immediate_attribute_id = 0;
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
static_cast<void*>(&immediate_input));
|
||||
Shader::UnitState shader_unit;
|
||||
Shader::AttributeBuffer output{};
|
||||
|
||||
shader_unit.LoadInput(regs.vs, immediate_input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, output);
|
||||
|
||||
// Send to geometry pipeline
|
||||
if (g_state.immediate.reset_geometry_pipeline) {
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.immediate.reset_geometry_pipeline = false;
|
||||
}
|
||||
ASSERT(!g_state.geometry_pipeline.NeedIndexInput());
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
g_state.geometry_pipeline.SubmitVertex(output);
|
||||
|
||||
// TODO: If drawing after every immediate mode triangle kills performance,
|
||||
// change it to flush triangles whenever a drawing config register changes
|
||||
// See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void Draw(u32 command_id) {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
#if PICA_LOG_TEV
|
||||
DebugUtils::DumpTevStageConfig(regs.GetTevStages());
|
||||
#endif
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
|
||||
// Processes information about internal vertex attributes to figure out how a vertex is
|
||||
// loaded.
|
||||
// Later, these can be compiled and cached.
|
||||
const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
|
||||
VertexLoader loader(regs.pipeline);
|
||||
|
||||
// Load vertices
|
||||
bool is_indexed = (command_id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
|
||||
|
||||
const auto& index_info = regs.pipeline.index_array;
|
||||
const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset);
|
||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||
bool index_u16 = index_info.format != 0;
|
||||
|
||||
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
||||
|
||||
if (g_debug_context && g_debug_context->recorder) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto texture = regs.texturing.GetTextures()[i];
|
||||
if (!texture.enabled)
|
||||
continue;
|
||||
|
||||
u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress());
|
||||
g_debug_context->recorder->MemoryAccessed(
|
||||
texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) *
|
||||
texture.config.width / 2 * texture.config.height,
|
||||
texture.config.GetPhysicalAddress());
|
||||
}
|
||||
}
|
||||
|
||||
DebugUtils::MemoryAccessTracker memory_accesses;
|
||||
|
||||
// Simple circular-replacement vertex cache
|
||||
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
|
||||
const size_t VERTEX_CACHE_SIZE = 32;
|
||||
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
|
||||
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
|
||||
Shader::AttributeBuffer vs_output;
|
||||
|
||||
unsigned int vertex_cache_pos = 0;
|
||||
vertex_cache_ids.fill(-1);
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
Shader::UnitState shader_unit;
|
||||
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
if (g_state.geometry_pipeline.NeedIndexInput())
|
||||
ASSERT(is_indexed);
|
||||
|
||||
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
||||
// Indexed rendering doesn't use the start offset
|
||||
unsigned int vertex = is_indexed
|
||||
? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||
: (index + regs.pipeline.vertex_offset);
|
||||
|
||||
// -1 is a common special value used for primitive restart. Since it's unknown if
|
||||
// the PICA supports it, and it would mess up the caching, guard against it here.
|
||||
ASSERT(vertex != -1);
|
||||
|
||||
bool vertex_cache_hit = false;
|
||||
|
||||
if (is_indexed) {
|
||||
if (g_state.geometry_pipeline.NeedIndexInput()) {
|
||||
g_state.geometry_pipeline.SubmitIndex(vertex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||
int size = index_u16 ? 2 : 1;
|
||||
memory_accesses.AddAccess(base_address + index_info.offset + size * index, size);
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
|
||||
if (vertex == vertex_cache_ids[i]) {
|
||||
vs_output = vertex_cache[i];
|
||||
vertex_cache_hit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!vertex_cache_hit) {
|
||||
// Initialize data for the current vertex
|
||||
Shader::AttributeBuffer input;
|
||||
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
(void*)&input);
|
||||
shader_unit.LoadInput(regs.vs, input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, vs_output);
|
||||
|
||||
if (is_indexed) {
|
||||
vertex_cache[vertex_cache_pos] = vs_output;
|
||||
vertex_cache_ids[vertex_cache_pos] = vertex;
|
||||
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
// Send to geometry pipeline
|
||||
g_state.geometry_pipeline.SubmitVertex(vs_output);
|
||||
}
|
||||
|
||||
for (auto& range : memory_accesses.ranges) {
|
||||
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
|
||||
range.second, range.first);
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
if (id >= Regs::NUM_REGS) {
|
||||
LOG_ERROR(HW_GPU,
|
||||
"Commandlist tried to write to invalid register 0x%03X (value: %08X, mask: %X)",
|
||||
id, value, mask);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
|
||||
u32 old_value = regs.reg_array[id];
|
||||
|
||||
const u32 write_mask = expand_bits_to_bytes[mask];
|
||||
|
||||
regs.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
|
||||
|
||||
// Double check for is_pica_tracing to avoid call overhead
|
||||
if (DebugUtils::IsPicaTracing()) {
|
||||
DebugUtils::OnPicaRegWrite({(u16)id, (u16)mask, regs.reg_array[id]});
|
||||
}
|
||||
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::PicaCommandLoaded,
|
||||
reinterpret_cast<void*>(&id));
|
||||
|
||||
switch (id) {
|
||||
// Trigger IRQ
|
||||
case PICA_REG_INDEX(trigger_irq):
|
||||
//Service::GSP::SignalInterrupt(Service::GSP::InterruptId::P3D);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.triangle_topology):
|
||||
g_state.primitive_assembler.Reconfigure(regs.pipeline.triangle_topology);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.restart_primitive):
|
||||
g_state.primitive_assembler.Reset();
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
|
||||
g_state.immediate.current_attribute = 0;
|
||||
g_state.immediate.reset_geometry_pipeline = true;
|
||||
default_attr_counter = 0;
|
||||
break;
|
||||
|
||||
// Load default vertex input attributes
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233):
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234):
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235):
|
||||
LoadDefaultVertexAttributes(value);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(pipeline.gpu_mode):
|
||||
// This register likely just enables vertex processing and doesn't need any special handling
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[0], 0x23c):
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[1], 0x23d): {
|
||||
unsigned index =
|
||||
static_cast<unsigned>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
|
||||
u32* head_ptr = (u32*)Memory::GetPhysicalPointer(
|
||||
regs.pipeline.command_buffer.GetPhysicalAddress(index));
|
||||
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = head_ptr;
|
||||
g_state.cmd_list.length = regs.pipeline.command_buffer.GetSize(index) / sizeof(u32);
|
||||
break;
|
||||
}
|
||||
|
||||
// It seems like these trigger vertex rendering
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw):
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw_indexed):
|
||||
Draw(id);
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX(gs.bool_uniforms):
|
||||
WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[0], 0x281):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[1], 0x282):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[2], 0x283):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[3], 0x284): {
|
||||
unsigned index = (id - PICA_REG_INDEX_WORKAROUND(gs.int_uniforms[0], 0x281));
|
||||
auto values = regs.gs.int_uniforms[index];
|
||||
WriteUniformIntReg(g_state.gs, index,
|
||||
Math::Vec4<u8>(values.x, values.y, values.z, values.w));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[0], 0x291):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[1], 0x292):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[2], 0x293):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[3], 0x294):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[4], 0x295):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[5], 0x296):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[6], 0x297):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.uniform_setup.set_value[7], 0x298): {
|
||||
WriteUniformFloatReg(g_state.regs.gs, g_state.gs, gs_float_regs_counter,
|
||||
gs_uniform_write_buffer, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[0], 0x29c):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[1], 0x29d):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[2], 0x29e):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[3], 0x29f):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[4], 0x2a0):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[5], 0x2a1):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[6], 0x2a2):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.program.set_word[7], 0x2a3): {
|
||||
u32& offset = g_state.regs.gs.program.offset;
|
||||
if (offset >= 4096) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS program offset %u", offset);
|
||||
} else {
|
||||
g_state.gs.program_code[offset] = value;
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[0], 0x2a6):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[1], 0x2a7):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[2], 0x2a8):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[3], 0x2a9):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[4], 0x2aa):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[5], 0x2ab):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[6], 0x2ac):
|
||||
case PICA_REG_INDEX_WORKAROUND(gs.swizzle_patterns.set_word[7], 0x2ad): {
|
||||
u32& offset = g_state.regs.gs.swizzle_patterns.offset;
|
||||
if (offset >= g_state.gs.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid GS swizzle pattern offset %u", offset);
|
||||
} else {
|
||||
g_state.gs.swizzle_data[offset] = value;
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(vs.bool_uniforms):
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
WriteUniformBoolReg(g_state.vs, g_state.regs.vs.bool_uniforms.Value());
|
||||
break;
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[0], 0x2b1):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[1], 0x2b2):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[2], 0x2b3):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[3], 0x2b4): {
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
unsigned index = (id - PICA_REG_INDEX_WORKAROUND(vs.int_uniforms[0], 0x2b1));
|
||||
auto values = regs.vs.int_uniforms[index];
|
||||
WriteUniformIntReg(g_state.vs, index,
|
||||
Math::Vec4<u8>(values.x, values.y, values.z, values.w));
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[0], 0x2c1):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[1], 0x2c2):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[2], 0x2c3):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[3], 0x2c4):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[4], 0x2c5):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[5], 0x2c6):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[6], 0x2c7):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.uniform_setup.set_value[7], 0x2c8): {
|
||||
// TODO (wwylele): does regs.pipeline.gs_unit_exclusive_configuration affect this?
|
||||
WriteUniformFloatReg(g_state.regs.vs, g_state.vs, vs_float_regs_counter,
|
||||
vs_uniform_write_buffer, value);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[0], 0x2cc):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[1], 0x2cd):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[2], 0x2ce):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[3], 0x2cf):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[4], 0x2d0):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[5], 0x2d1):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[6], 0x2d2):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.program.set_word[7], 0x2d3): {
|
||||
u32& offset = g_state.regs.vs.program.offset;
|
||||
if (offset >= 512) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS program offset %u", offset);
|
||||
} else {
|
||||
g_state.vs.program_code[offset] = value;
|
||||
if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
|
||||
g_state.gs.program_code[offset] = value;
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[0], 0x2d6):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[1], 0x2d7):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[2], 0x2d8):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[3], 0x2d9):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[4], 0x2da):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[5], 0x2db):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[6], 0x2dc):
|
||||
case PICA_REG_INDEX_WORKAROUND(vs.swizzle_patterns.set_word[7], 0x2dd): {
|
||||
u32& offset = g_state.regs.vs.swizzle_patterns.offset;
|
||||
if (offset >= g_state.vs.swizzle_data.size()) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS swizzle pattern offset %u", offset);
|
||||
} else {
|
||||
g_state.vs.swizzle_data[offset] = value;
|
||||
if (!g_state.regs.pipeline.gs_unit_exclusive_configuration) {
|
||||
g_state.gs.swizzle_data[offset] = value;
|
||||
}
|
||||
offset++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[0], 0x1c8):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[1], 0x1c9):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[2], 0x1ca):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[3], 0x1cb):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[4], 0x1cc):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[5], 0x1cd):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[6], 0x1ce):
|
||||
case PICA_REG_INDEX_WORKAROUND(lighting.lut_data[7], 0x1cf): {
|
||||
auto& lut_config = regs.lighting.lut_config;
|
||||
|
||||
ASSERT_MSG(lut_config.index < 256, "lut_config.index exceeded maximum value of 255!");
|
||||
|
||||
g_state.lighting.luts[lut_config.type][lut_config.index].raw = value;
|
||||
lut_config.index.Assign(lut_config.index + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[0], 0xe8):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[1], 0xe9):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[2], 0xea):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[3], 0xeb):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[4], 0xec):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[5], 0xed):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[6], 0xee):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.fog_lut_data[7], 0xef): {
|
||||
g_state.fog.lut[regs.texturing.fog_lut_offset % 128].raw = value;
|
||||
regs.texturing.fog_lut_offset.Assign(regs.texturing.fog_lut_offset + 1);
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[0], 0xb0):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[1], 0xb1):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[2], 0xb2):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[3], 0xb3):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[4], 0xb4):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[5], 0xb5):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[6], 0xb6):
|
||||
case PICA_REG_INDEX_WORKAROUND(texturing.proctex_lut_data[7], 0xb7): {
|
||||
auto& index = regs.texturing.proctex_lut_config.index;
|
||||
auto& pt = g_state.proctex;
|
||||
|
||||
switch (regs.texturing.proctex_lut_config.ref_table.Value()) {
|
||||
case TexturingRegs::ProcTexLutTable::Noise:
|
||||
pt.noise_table[index % pt.noise_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorMap:
|
||||
pt.color_map_table[index % pt.color_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::AlphaMap:
|
||||
pt.alpha_map_table[index % pt.alpha_map_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::Color:
|
||||
pt.color_table[index % pt.color_table.size()].raw = value;
|
||||
break;
|
||||
case TexturingRegs::ProcTexLutTable::ColorDiff:
|
||||
pt.color_diff_table[index % pt.color_diff_table.size()].raw = value;
|
||||
break;
|
||||
}
|
||||
index.Assign(index + 1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->NotifyPicaRegisterChanged(id);
|
||||
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::PicaCommandProcessed,
|
||||
reinterpret_cast<void*>(&id));
|
||||
}
|
||||
|
||||
void ProcessCommandList(const u32* list, u32 size) {
|
||||
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = list;
|
||||
g_state.cmd_list.length = size / sizeof(u32);
|
||||
|
||||
while (g_state.cmd_list.current_ptr < g_state.cmd_list.head_ptr + g_state.cmd_list.length) {
|
||||
|
||||
// Align read pointer to 8 bytes
|
||||
if ((g_state.cmd_list.head_ptr - g_state.cmd_list.current_ptr) % 2 != 0)
|
||||
++g_state.cmd_list.current_ptr;
|
||||
|
||||
u32 value = *g_state.cmd_list.current_ptr++;
|
||||
const CommandHeader header = {*g_state.cmd_list.current_ptr++};
|
||||
|
||||
WritePicaReg(header.cmd_id, value, header.parameter_mask);
|
||||
|
||||
for (unsigned i = 0; i < header.extra_data_length; ++i) {
|
||||
u32 cmd = header.cmd_id + (header.group_commands ? i + 1 : 0);
|
||||
WritePicaReg(cmd, *g_state.cmd_list.current_ptr++, header.parameter_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace CommandProcessor
|
||||
|
||||
} // namespace Pica
|
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace CommandProcessor {
|
||||
|
||||
union CommandHeader {
|
||||
u32 hex;
|
||||
|
||||
BitField<0, 16, u32> cmd_id;
|
||||
|
||||
// parameter_mask:
|
||||
// Mask applied to the input value to make it possible to update
|
||||
// parts of a register without overwriting its other fields.
|
||||
// first bit: 0x000000FF
|
||||
// second bit: 0x0000FF00
|
||||
// third bit: 0x00FF0000
|
||||
// fourth bit: 0xFF000000
|
||||
BitField<16, 4, u32> parameter_mask;
|
||||
|
||||
BitField<20, 11, u32> extra_data_length;
|
||||
|
||||
BitField<31, 1, u32> group_commands;
|
||||
};
|
||||
static_assert(std::is_standard_layout<CommandHeader>::value == true,
|
||||
"CommandHeader does not use standard layout");
|
||||
static_assert(sizeof(CommandHeader) == sizeof(u32), "CommandHeader has incorrect size!");
|
||||
|
||||
void ProcessCommandList(const u32* list, u32 size);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,577 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#ifdef HAVE_PNG
|
||||
#include <png.h>
|
||||
#include <setjmp.h>
|
||||
#endif
|
||||
|
||||
#include <nihstro/bit_field.h>
|
||||
#include <nihstro/float24.h>
|
||||
#include <nihstro/shader_binary.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_shader.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/texture/texture_decode.h"
|
||||
#include "video_core/utils.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
using nihstro::DVLBHeader;
|
||||
using nihstro::DVLEHeader;
|
||||
using nihstro::DVLPHeader;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
void DebugContext::DoOnEvent(Event event, void* data) {
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(breakpoint_mutex);
|
||||
|
||||
// Commit the rasterizer's caches so framebuffers, render targets, etc. will show on debug
|
||||
// widgets
|
||||
VideoCore::g_renderer->Rasterizer()->FlushAll();
|
||||
|
||||
// TODO: Should stop the CPU thread here once we multithread emulation.
|
||||
|
||||
active_breakpoint = event;
|
||||
at_breakpoint = true;
|
||||
|
||||
// Tell all observers that we hit a breakpoint
|
||||
for (auto& breakpoint_observer : breakpoint_observers) {
|
||||
breakpoint_observer->OnPicaBreakPointHit(event, data);
|
||||
}
|
||||
|
||||
// Wait until another thread tells us to Resume()
|
||||
resume_from_breakpoint.wait(lock, [&] { return !at_breakpoint; });
|
||||
}
|
||||
}
|
||||
|
||||
void DebugContext::Resume() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(breakpoint_mutex);
|
||||
|
||||
// Tell all observers that we are about to resume
|
||||
for (auto& breakpoint_observer : breakpoint_observers) {
|
||||
breakpoint_observer->OnPicaResume();
|
||||
}
|
||||
|
||||
// Resume the waiting thread (i.e. OnEvent())
|
||||
at_breakpoint = false;
|
||||
}
|
||||
|
||||
resume_from_breakpoint.notify_one();
|
||||
}
|
||||
|
||||
std::shared_ptr<DebugContext> g_debug_context; // TODO: Get rid of this global
|
||||
|
||||
namespace DebugUtils {
|
||||
|
||||
void DumpShader(const std::string& filename, const ShaderRegs& config,
|
||||
const Shader::ShaderSetup& setup,
|
||||
const RasterizerRegs::VSOutputAttributes* output_attributes) {
|
||||
struct StuffToWrite {
|
||||
const u8* pointer;
|
||||
u32 size;
|
||||
};
|
||||
std::vector<StuffToWrite> writing_queue;
|
||||
u32 write_offset = 0;
|
||||
|
||||
auto QueueForWriting = [&writing_queue, &write_offset](const u8* pointer, u32 size) {
|
||||
writing_queue.push_back({pointer, size});
|
||||
u32 old_write_offset = write_offset;
|
||||
write_offset += size;
|
||||
return old_write_offset;
|
||||
};
|
||||
|
||||
// First off, try to translate Pica state (one enum for output attribute type and component)
|
||||
// into shbin format (separate type and component mask).
|
||||
union OutputRegisterInfo {
|
||||
enum Type : u64 {
|
||||
POSITION = 0,
|
||||
QUATERNION = 1,
|
||||
COLOR = 2,
|
||||
TEXCOORD0 = 3,
|
||||
TEXCOORD1 = 5,
|
||||
TEXCOORD2 = 6,
|
||||
|
||||
VIEW = 8,
|
||||
};
|
||||
|
||||
BitField<0, 64, u64> hex;
|
||||
|
||||
BitField<0, 16, Type> type;
|
||||
BitField<16, 16, u64> id;
|
||||
BitField<32, 4, u64> component_mask;
|
||||
};
|
||||
|
||||
// This is put into a try-catch block to make sure we notice unknown configurations.
|
||||
std::vector<OutputRegisterInfo> output_info_table;
|
||||
for (unsigned i = 0; i < 7; ++i) {
|
||||
using OutputAttributes = Pica::RasterizerRegs::VSOutputAttributes;
|
||||
|
||||
// TODO: It's still unclear how the attribute components map to the register!
|
||||
// Once we know that, this code probably will not make much sense anymore.
|
||||
std::map<OutputAttributes::Semantic, std::pair<OutputRegisterInfo::Type, u32>> map = {
|
||||
{OutputAttributes::POSITION_X, {OutputRegisterInfo::POSITION, 1}},
|
||||
{OutputAttributes::POSITION_Y, {OutputRegisterInfo::POSITION, 2}},
|
||||
{OutputAttributes::POSITION_Z, {OutputRegisterInfo::POSITION, 4}},
|
||||
{OutputAttributes::POSITION_W, {OutputRegisterInfo::POSITION, 8}},
|
||||
{OutputAttributes::QUATERNION_X, {OutputRegisterInfo::QUATERNION, 1}},
|
||||
{OutputAttributes::QUATERNION_Y, {OutputRegisterInfo::QUATERNION, 2}},
|
||||
{OutputAttributes::QUATERNION_Z, {OutputRegisterInfo::QUATERNION, 4}},
|
||||
{OutputAttributes::QUATERNION_W, {OutputRegisterInfo::QUATERNION, 8}},
|
||||
{OutputAttributes::COLOR_R, {OutputRegisterInfo::COLOR, 1}},
|
||||
{OutputAttributes::COLOR_G, {OutputRegisterInfo::COLOR, 2}},
|
||||
{OutputAttributes::COLOR_B, {OutputRegisterInfo::COLOR, 4}},
|
||||
{OutputAttributes::COLOR_A, {OutputRegisterInfo::COLOR, 8}},
|
||||
{OutputAttributes::TEXCOORD0_U, {OutputRegisterInfo::TEXCOORD0, 1}},
|
||||
{OutputAttributes::TEXCOORD0_V, {OutputRegisterInfo::TEXCOORD0, 2}},
|
||||
{OutputAttributes::TEXCOORD1_U, {OutputRegisterInfo::TEXCOORD1, 1}},
|
||||
{OutputAttributes::TEXCOORD1_V, {OutputRegisterInfo::TEXCOORD1, 2}},
|
||||
{OutputAttributes::TEXCOORD2_U, {OutputRegisterInfo::TEXCOORD2, 1}},
|
||||
{OutputAttributes::TEXCOORD2_V, {OutputRegisterInfo::TEXCOORD2, 2}},
|
||||
{OutputAttributes::VIEW_X, {OutputRegisterInfo::VIEW, 1}},
|
||||
{OutputAttributes::VIEW_Y, {OutputRegisterInfo::VIEW, 2}},
|
||||
{OutputAttributes::VIEW_Z, {OutputRegisterInfo::VIEW, 4}},
|
||||
};
|
||||
|
||||
for (const auto& semantic : std::vector<OutputAttributes::Semantic>{
|
||||
output_attributes[i].map_x, output_attributes[i].map_y, output_attributes[i].map_z,
|
||||
output_attributes[i].map_w}) {
|
||||
if (semantic == OutputAttributes::INVALID)
|
||||
continue;
|
||||
|
||||
try {
|
||||
OutputRegisterInfo::Type type = map.at(semantic).first;
|
||||
u32 component_mask = map.at(semantic).second;
|
||||
|
||||
auto it = std::find_if(output_info_table.begin(), output_info_table.end(),
|
||||
[&i, &type](const OutputRegisterInfo& info) {
|
||||
return info.id == i && info.type == type;
|
||||
});
|
||||
|
||||
if (it == output_info_table.end()) {
|
||||
output_info_table.emplace_back();
|
||||
output_info_table.back().type.Assign(type);
|
||||
output_info_table.back().component_mask.Assign(component_mask);
|
||||
output_info_table.back().id.Assign(i);
|
||||
} else {
|
||||
it->component_mask.Assign(it->component_mask | component_mask);
|
||||
}
|
||||
} catch (const std::out_of_range&) {
|
||||
DEBUG_ASSERT_MSG(false, "Unknown output attribute mapping");
|
||||
LOG_ERROR(HW_GPU, "Unknown output attribute mapping: %03x, %03x, %03x, %03x",
|
||||
(int)output_attributes[i].map_x.Value(),
|
||||
(int)output_attributes[i].map_y.Value(),
|
||||
(int)output_attributes[i].map_z.Value(),
|
||||
(int)output_attributes[i].map_w.Value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct {
|
||||
DVLBHeader header;
|
||||
u32 dvle_offset;
|
||||
} dvlb{{DVLBHeader::MAGIC_WORD, 1}}; // 1 DVLE
|
||||
|
||||
DVLPHeader dvlp{DVLPHeader::MAGIC_WORD};
|
||||
DVLEHeader dvle{DVLEHeader::MAGIC_WORD};
|
||||
|
||||
QueueForWriting(reinterpret_cast<const u8*>(&dvlb), sizeof(dvlb));
|
||||
u32 dvlp_offset = QueueForWriting(reinterpret_cast<const u8*>(&dvlp), sizeof(dvlp));
|
||||
dvlb.dvle_offset = QueueForWriting(reinterpret_cast<const u8*>(&dvle), sizeof(dvle));
|
||||
|
||||
// TODO: Reduce the amount of binary code written to relevant portions
|
||||
dvlp.binary_offset = write_offset - dvlp_offset;
|
||||
dvlp.binary_size_words = static_cast<uint32_t>(setup.program_code.size());
|
||||
QueueForWriting(reinterpret_cast<const u8*>(setup.program_code.data()),
|
||||
static_cast<u32>(setup.program_code.size()) * sizeof(u32));
|
||||
|
||||
dvlp.swizzle_info_offset = write_offset - dvlp_offset;
|
||||
dvlp.swizzle_info_num_entries = static_cast<uint32_t>(setup.swizzle_data.size());
|
||||
u32 dummy = 0;
|
||||
for (unsigned int i = 0; i < setup.swizzle_data.size(); ++i) {
|
||||
QueueForWriting(reinterpret_cast<const u8*>(&setup.swizzle_data[i]),
|
||||
sizeof(setup.swizzle_data[i]));
|
||||
QueueForWriting(reinterpret_cast<const u8*>(&dummy), sizeof(dummy));
|
||||
}
|
||||
|
||||
dvle.main_offset_words = config.main_offset;
|
||||
dvle.output_register_table_offset = write_offset - dvlb.dvle_offset;
|
||||
dvle.output_register_table_size = static_cast<u32>(output_info_table.size());
|
||||
QueueForWriting(reinterpret_cast<const u8*>(output_info_table.data()),
|
||||
static_cast<u32>(output_info_table.size() * sizeof(OutputRegisterInfo)));
|
||||
|
||||
// TODO: Create a label table for "main"
|
||||
|
||||
std::vector<nihstro::ConstantInfo> constant_table;
|
||||
for (unsigned i = 0; i < setup.uniforms.b.size(); ++i) {
|
||||
nihstro::ConstantInfo constant;
|
||||
memset(&constant, 0, sizeof(constant));
|
||||
constant.type = nihstro::ConstantInfo::Bool;
|
||||
constant.regid = i;
|
||||
constant.b = setup.uniforms.b[i];
|
||||
constant_table.emplace_back(constant);
|
||||
}
|
||||
for (unsigned i = 0; i < setup.uniforms.i.size(); ++i) {
|
||||
nihstro::ConstantInfo constant;
|
||||
memset(&constant, 0, sizeof(constant));
|
||||
constant.type = nihstro::ConstantInfo::Int;
|
||||
constant.regid = i;
|
||||
constant.i.x = setup.uniforms.i[i].x;
|
||||
constant.i.y = setup.uniforms.i[i].y;
|
||||
constant.i.z = setup.uniforms.i[i].z;
|
||||
constant.i.w = setup.uniforms.i[i].w;
|
||||
constant_table.emplace_back(constant);
|
||||
}
|
||||
for (unsigned i = 0; i < sizeof(setup.uniforms.f) / sizeof(setup.uniforms.f[0]); ++i) {
|
||||
nihstro::ConstantInfo constant;
|
||||
memset(&constant, 0, sizeof(constant));
|
||||
constant.type = nihstro::ConstantInfo::Float;
|
||||
constant.regid = i;
|
||||
constant.f.x = nihstro::to_float24(setup.uniforms.f[i].x.ToFloat32());
|
||||
constant.f.y = nihstro::to_float24(setup.uniforms.f[i].y.ToFloat32());
|
||||
constant.f.z = nihstro::to_float24(setup.uniforms.f[i].z.ToFloat32());
|
||||
constant.f.w = nihstro::to_float24(setup.uniforms.f[i].w.ToFloat32());
|
||||
|
||||
// Store constant if it's different from zero..
|
||||
if (setup.uniforms.f[i].x.ToFloat32() != 0.0 || setup.uniforms.f[i].y.ToFloat32() != 0.0 ||
|
||||
setup.uniforms.f[i].z.ToFloat32() != 0.0 || setup.uniforms.f[i].w.ToFloat32() != 0.0)
|
||||
constant_table.emplace_back(constant);
|
||||
}
|
||||
dvle.constant_table_offset = write_offset - dvlb.dvle_offset;
|
||||
dvle.constant_table_size = static_cast<uint32_t>(constant_table.size());
|
||||
for (const auto& constant : constant_table) {
|
||||
QueueForWriting(reinterpret_cast<const u8*>(&constant), sizeof(constant));
|
||||
}
|
||||
|
||||
// Write data to file
|
||||
std::ofstream file(filename, std::ios_base::out | std::ios_base::binary);
|
||||
|
||||
for (const auto& chunk : writing_queue) {
|
||||
file.write(reinterpret_cast<const char*>(chunk.pointer), chunk.size);
|
||||
}
|
||||
}
|
||||
|
||||
static std::unique_ptr<PicaTrace> pica_trace;
|
||||
static std::mutex pica_trace_mutex;
|
||||
bool g_is_pica_tracing = false;
|
||||
|
||||
void StartPicaTracing() {
|
||||
if (g_is_pica_tracing) {
|
||||
LOG_WARNING(HW_GPU, "StartPicaTracing called even though tracing already running!");
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(pica_trace_mutex);
|
||||
pica_trace = std::make_unique<PicaTrace>();
|
||||
|
||||
g_is_pica_tracing = true;
|
||||
}
|
||||
|
||||
void OnPicaRegWrite(PicaTrace::Write write) {
|
||||
std::lock_guard<std::mutex> lock(pica_trace_mutex);
|
||||
|
||||
if (!g_is_pica_tracing)
|
||||
return;
|
||||
|
||||
pica_trace->writes.push_back(write);
|
||||
}
|
||||
|
||||
std::unique_ptr<PicaTrace> FinishPicaTracing() {
|
||||
if (!g_is_pica_tracing) {
|
||||
LOG_WARNING(HW_GPU, "FinishPicaTracing called even though tracing isn't running!");
|
||||
return {};
|
||||
}
|
||||
|
||||
// signalize that no further tracing should be performed
|
||||
g_is_pica_tracing = false;
|
||||
|
||||
// Wait until running tracing is finished
|
||||
std::lock_guard<std::mutex> lock(pica_trace_mutex);
|
||||
std::unique_ptr<PicaTrace> ret(std::move(pica_trace));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef HAVE_PNG
|
||||
// Adapter functions to libpng to write/flush to File::IOFile instances.
|
||||
static void WriteIOFile(png_structp png_ptr, png_bytep data, png_size_t length) {
|
||||
auto* fp = static_cast<FileUtil::IOFile*>(png_get_io_ptr(png_ptr));
|
||||
if (!fp->WriteBytes(data, length))
|
||||
png_error(png_ptr, "Failed to write to output PNG file.");
|
||||
}
|
||||
|
||||
static void FlushIOFile(png_structp png_ptr) {
|
||||
auto* fp = static_cast<FileUtil::IOFile*>(png_get_io_ptr(png_ptr));
|
||||
if (!fp->Flush())
|
||||
png_error(png_ptr, "Failed to flush to output PNG file.");
|
||||
}
|
||||
#endif
|
||||
|
||||
void DumpTexture(const TexturingRegs::TextureConfig& texture_config, u8* data) {
|
||||
#ifndef HAVE_PNG
|
||||
return;
|
||||
#else
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
// Write data to file
|
||||
static int dump_index = 0;
|
||||
std::string filename =
|
||||
std::string("texture_dump") + std::to_string(++dump_index) + std::string(".png");
|
||||
u32 row_stride = texture_config.width * 3;
|
||||
|
||||
u8* buf;
|
||||
|
||||
char title[] = "Citra texture dump";
|
||||
char title_key[] = "Title";
|
||||
png_structp png_ptr = nullptr;
|
||||
png_infop info_ptr = nullptr;
|
||||
|
||||
// Open file for writing (binary mode)
|
||||
FileUtil::IOFile fp(filename, "wb");
|
||||
|
||||
// Initialize write structure
|
||||
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
|
||||
if (png_ptr == nullptr) {
|
||||
LOG_ERROR(Debug_GPU, "Could not allocate write struct");
|
||||
goto finalise;
|
||||
}
|
||||
|
||||
// Initialize info structure
|
||||
info_ptr = png_create_info_struct(png_ptr);
|
||||
if (info_ptr == nullptr) {
|
||||
LOG_ERROR(Debug_GPU, "Could not allocate info struct");
|
||||
goto finalise;
|
||||
}
|
||||
|
||||
// Setup Exception handling
|
||||
if (setjmp(png_jmpbuf(png_ptr))) {
|
||||
LOG_ERROR(Debug_GPU, "Error during png creation");
|
||||
goto finalise;
|
||||
}
|
||||
|
||||
png_set_write_fn(png_ptr, static_cast<void*>(&fp), WriteIOFile, FlushIOFile);
|
||||
|
||||
// Write header (8 bit color depth)
|
||||
png_set_IHDR(png_ptr, info_ptr, texture_config.width, texture_config.height, 8,
|
||||
PNG_COLOR_TYPE_RGB /*_ALPHA*/, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
|
||||
PNG_FILTER_TYPE_BASE);
|
||||
|
||||
png_text title_text;
|
||||
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
|
||||
title_text.key = title_key;
|
||||
title_text.text = title;
|
||||
png_set_text(png_ptr, info_ptr, &title_text, 1);
|
||||
|
||||
png_write_info(png_ptr, info_ptr);
|
||||
|
||||
buf = new u8[row_stride * texture_config.height];
|
||||
for (unsigned y = 0; y < texture_config.height; ++y) {
|
||||
for (unsigned x = 0; x < texture_config.width; ++x) {
|
||||
Pica::Texture::TextureInfo info;
|
||||
info.width = texture_config.width;
|
||||
info.height = texture_config.height;
|
||||
info.stride = row_stride;
|
||||
info.format = g_state.regs.texturing.texture0_format;
|
||||
Math::Vec4<u8> texture_color = Pica::Texture::LookupTexture(data, x, y, info);
|
||||
buf[3 * x + y * row_stride] = texture_color.r();
|
||||
buf[3 * x + y * row_stride + 1] = texture_color.g();
|
||||
buf[3 * x + y * row_stride + 2] = texture_color.b();
|
||||
}
|
||||
}
|
||||
|
||||
// Write image data
|
||||
for (unsigned y = 0; y < texture_config.height; ++y) {
|
||||
u8* row_ptr = (u8*)buf + y * row_stride;
|
||||
png_write_row(png_ptr, row_ptr);
|
||||
}
|
||||
|
||||
delete[] buf;
|
||||
|
||||
// End write
|
||||
png_write_end(png_ptr, nullptr);
|
||||
|
||||
finalise:
|
||||
if (info_ptr != nullptr)
|
||||
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
|
||||
if (png_ptr != nullptr)
|
||||
png_destroy_write_struct(&png_ptr, (png_infopp) nullptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
static std::string ReplacePattern(const std::string& input, const std::string& pattern,
|
||||
const std::string& replacement) {
|
||||
size_t start = input.find(pattern);
|
||||
if (start == std::string::npos)
|
||||
return input;
|
||||
|
||||
std::string ret = input;
|
||||
ret.replace(start, pattern.length(), replacement);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static std::string GetTevStageConfigSourceString(
|
||||
const TexturingRegs::TevStageConfig::Source& source) {
|
||||
|
||||
using Source = TexturingRegs::TevStageConfig::Source;
|
||||
static const std::map<Source, std::string> source_map = {
|
||||
{Source::PrimaryColor, "PrimaryColor"},
|
||||
{Source::PrimaryFragmentColor, "PrimaryFragmentColor"},
|
||||
{Source::SecondaryFragmentColor, "SecondaryFragmentColor"},
|
||||
{Source::Texture0, "Texture0"},
|
||||
{Source::Texture1, "Texture1"},
|
||||
{Source::Texture2, "Texture2"},
|
||||
{Source::Texture3, "Texture3"},
|
||||
{Source::PreviousBuffer, "PreviousBuffer"},
|
||||
{Source::Constant, "Constant"},
|
||||
{Source::Previous, "Previous"},
|
||||
};
|
||||
|
||||
const auto src_it = source_map.find(source);
|
||||
if (src_it == source_map.end())
|
||||
return "Unknown";
|
||||
|
||||
return src_it->second;
|
||||
}
|
||||
|
||||
static std::string GetTevStageConfigColorSourceString(
|
||||
const TexturingRegs::TevStageConfig::Source& source,
|
||||
const TexturingRegs::TevStageConfig::ColorModifier modifier) {
|
||||
|
||||
using ColorModifier = TexturingRegs::TevStageConfig::ColorModifier;
|
||||
static const std::map<ColorModifier, std::string> color_modifier_map = {
|
||||
{ColorModifier::SourceColor, "%source.rgb"},
|
||||
{ColorModifier::OneMinusSourceColor, "(1.0 - %source.rgb)"},
|
||||
{ColorModifier::SourceAlpha, "%source.aaa"},
|
||||
{ColorModifier::OneMinusSourceAlpha, "(1.0 - %source.aaa)"},
|
||||
{ColorModifier::SourceRed, "%source.rrr"},
|
||||
{ColorModifier::OneMinusSourceRed, "(1.0 - %source.rrr)"},
|
||||
{ColorModifier::SourceGreen, "%source.ggg"},
|
||||
{ColorModifier::OneMinusSourceGreen, "(1.0 - %source.ggg)"},
|
||||
{ColorModifier::SourceBlue, "%source.bbb"},
|
||||
{ColorModifier::OneMinusSourceBlue, "(1.0 - %source.bbb)"},
|
||||
};
|
||||
|
||||
auto src_str = GetTevStageConfigSourceString(source);
|
||||
auto modifier_it = color_modifier_map.find(modifier);
|
||||
std::string modifier_str = "%source.????";
|
||||
if (modifier_it != color_modifier_map.end())
|
||||
modifier_str = modifier_it->second;
|
||||
|
||||
return ReplacePattern(modifier_str, "%source", src_str);
|
||||
}
|
||||
|
||||
static std::string GetTevStageConfigAlphaSourceString(
|
||||
const TexturingRegs::TevStageConfig::Source& source,
|
||||
const TexturingRegs::TevStageConfig::AlphaModifier modifier) {
|
||||
|
||||
using AlphaModifier = TexturingRegs::TevStageConfig::AlphaModifier;
|
||||
static const std::map<AlphaModifier, std::string> alpha_modifier_map = {
|
||||
{AlphaModifier::SourceAlpha, "%source.a"},
|
||||
{AlphaModifier::OneMinusSourceAlpha, "(1.0 - %source.a)"},
|
||||
{AlphaModifier::SourceRed, "%source.r"},
|
||||
{AlphaModifier::OneMinusSourceRed, "(1.0 - %source.r)"},
|
||||
{AlphaModifier::SourceGreen, "%source.g"},
|
||||
{AlphaModifier::OneMinusSourceGreen, "(1.0 - %source.g)"},
|
||||
{AlphaModifier::SourceBlue, "%source.b"},
|
||||
{AlphaModifier::OneMinusSourceBlue, "(1.0 - %source.b)"},
|
||||
};
|
||||
|
||||
auto src_str = GetTevStageConfigSourceString(source);
|
||||
auto modifier_it = alpha_modifier_map.find(modifier);
|
||||
std::string modifier_str = "%source.????";
|
||||
if (modifier_it != alpha_modifier_map.end())
|
||||
modifier_str = modifier_it->second;
|
||||
|
||||
return ReplacePattern(modifier_str, "%source", src_str);
|
||||
}
|
||||
|
||||
static std::string GetTevStageConfigOperationString(
|
||||
const TexturingRegs::TevStageConfig::Operation& operation) {
|
||||
|
||||
using Operation = TexturingRegs::TevStageConfig::Operation;
|
||||
static const std::map<Operation, std::string> combiner_map = {
|
||||
{Operation::Replace, "%source1"},
|
||||
{Operation::Modulate, "(%source1 * %source2)"},
|
||||
{Operation::Add, "(%source1 + %source2)"},
|
||||
{Operation::AddSigned, "(%source1 + %source2) - 0.5"},
|
||||
{Operation::Lerp, "lerp(%source1, %source2, %source3)"},
|
||||
{Operation::Subtract, "(%source1 - %source2)"},
|
||||
{Operation::Dot3_RGB, "dot(%source1, %source2)"},
|
||||
{Operation::MultiplyThenAdd, "((%source1 * %source2) + %source3)"},
|
||||
{Operation::AddThenMultiply, "((%source1 + %source2) * %source3)"},
|
||||
};
|
||||
|
||||
const auto op_it = combiner_map.find(operation);
|
||||
if (op_it == combiner_map.end())
|
||||
return "Unknown op (%source1, %source2, %source3)";
|
||||
|
||||
return op_it->second;
|
||||
}
|
||||
|
||||
std::string GetTevStageConfigColorCombinerString(const TexturingRegs::TevStageConfig& tev_stage) {
|
||||
auto op_str = GetTevStageConfigOperationString(tev_stage.color_op);
|
||||
op_str = ReplacePattern(
|
||||
op_str, "%source1",
|
||||
GetTevStageConfigColorSourceString(tev_stage.color_source1, tev_stage.color_modifier1));
|
||||
op_str = ReplacePattern(
|
||||
op_str, "%source2",
|
||||
GetTevStageConfigColorSourceString(tev_stage.color_source2, tev_stage.color_modifier2));
|
||||
return ReplacePattern(
|
||||
op_str, "%source3",
|
||||
GetTevStageConfigColorSourceString(tev_stage.color_source3, tev_stage.color_modifier3));
|
||||
}
|
||||
|
||||
std::string GetTevStageConfigAlphaCombinerString(const TexturingRegs::TevStageConfig& tev_stage) {
|
||||
auto op_str = GetTevStageConfigOperationString(tev_stage.alpha_op);
|
||||
op_str = ReplacePattern(
|
||||
op_str, "%source1",
|
||||
GetTevStageConfigAlphaSourceString(tev_stage.alpha_source1, tev_stage.alpha_modifier1));
|
||||
op_str = ReplacePattern(
|
||||
op_str, "%source2",
|
||||
GetTevStageConfigAlphaSourceString(tev_stage.alpha_source2, tev_stage.alpha_modifier2));
|
||||
return ReplacePattern(
|
||||
op_str, "%source3",
|
||||
GetTevStageConfigAlphaSourceString(tev_stage.alpha_source3, tev_stage.alpha_modifier3));
|
||||
}
|
||||
|
||||
void DumpTevStageConfig(const std::array<TexturingRegs::TevStageConfig, 6>& stages) {
|
||||
std::string stage_info = "Tev setup:\n";
|
||||
for (size_t index = 0; index < stages.size(); ++index) {
|
||||
const auto& tev_stage = stages[index];
|
||||
stage_info += "Stage " + std::to_string(index) + ": " +
|
||||
GetTevStageConfigColorCombinerString(tev_stage) + " " +
|
||||
GetTevStageConfigAlphaCombinerString(tev_stage) + "\n";
|
||||
}
|
||||
LOG_TRACE(HW_GPU, "%s", stage_info.c_str());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,251 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <condition_variable>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_shader.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
|
||||
namespace CiTrace {
|
||||
class Recorder;
|
||||
}
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
struct ShaderSetup;
|
||||
}
|
||||
|
||||
class DebugContext {
|
||||
public:
|
||||
enum class Event {
|
||||
FirstEvent = 0,
|
||||
|
||||
PicaCommandLoaded = FirstEvent,
|
||||
PicaCommandProcessed,
|
||||
IncomingPrimitiveBatch,
|
||||
FinishedPrimitiveBatch,
|
||||
VertexShaderInvocation,
|
||||
IncomingDisplayTransfer,
|
||||
GSPCommandProcessed,
|
||||
BufferSwapped,
|
||||
|
||||
NumEvents
|
||||
};
|
||||
|
||||
/**
|
||||
* Inherit from this class to be notified of events registered to some debug context.
|
||||
* Most importantly this is used for our debugger GUI.
|
||||
*
|
||||
* To implement event handling, override the OnPicaBreakPointHit and OnPicaResume methods.
|
||||
* @warning All BreakPointObservers need to be on the same thread to guarantee thread-safe state
|
||||
* access
|
||||
* @todo Evaluate an alternative interface, in which there is only one managing observer and
|
||||
* multiple child observers running (by design) on the same thread.
|
||||
*/
|
||||
class BreakPointObserver {
|
||||
public:
|
||||
/// Constructs the object such that it observes events of the given DebugContext.
|
||||
BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
|
||||
: context_weak(debug_context) {
|
||||
std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex);
|
||||
debug_context->breakpoint_observers.push_back(this);
|
||||
}
|
||||
|
||||
virtual ~BreakPointObserver() {
|
||||
auto context = context_weak.lock();
|
||||
if (context) {
|
||||
std::unique_lock<std::mutex> lock(context->breakpoint_mutex);
|
||||
context->breakpoint_observers.remove(this);
|
||||
|
||||
// If we are the last observer to be destroyed, tell the debugger context that
|
||||
// it is free to continue. In particular, this is required for a proper Citra
|
||||
// shutdown, when the emulation thread is waiting at a breakpoint.
|
||||
if (context->breakpoint_observers.empty())
|
||||
context->Resume();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Action to perform when a breakpoint was reached.
|
||||
* @param event Type of event which triggered the breakpoint
|
||||
* @param data Optional data pointer (if unused, this is a nullptr)
|
||||
* @note This function will perform nothing unless it is overridden in the child class.
|
||||
*/
|
||||
virtual void OnPicaBreakPointHit(Event event, void* data) {}
|
||||
|
||||
/**
|
||||
* Action to perform when emulation is resumed from a breakpoint.
|
||||
* @note This function will perform nothing unless it is overridden in the child class.
|
||||
*/
|
||||
virtual void OnPicaResume() {}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Weak context pointer. This need not be valid, so when requesting a shared_ptr via
|
||||
* context_weak.lock(), always compare the result against nullptr.
|
||||
*/
|
||||
std::weak_ptr<DebugContext> context_weak;
|
||||
};
|
||||
|
||||
/**
|
||||
* Simple structure defining a breakpoint state
|
||||
*/
|
||||
struct BreakPoint {
|
||||
bool enabled = false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Static constructor used to create a shared_ptr of a DebugContext.
|
||||
*/
|
||||
static std::shared_ptr<DebugContext> Construct() {
|
||||
return std::shared_ptr<DebugContext>(new DebugContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by the emulation core when a given event has happened. If a breakpoint has been set
|
||||
* for this event, OnEvent calls the event handlers of the registered breakpoint observers.
|
||||
* The current thread then is halted until Resume() is called from another thread (or until
|
||||
* emulation is stopped).
|
||||
* @param event Event which has happened
|
||||
* @param data Optional data pointer (pass nullptr if unused). Needs to remain valid until
|
||||
* Resume() is called.
|
||||
*/
|
||||
void OnEvent(Event event, void* data) {
|
||||
// This check is left in the header to allow the compiler to inline it.
|
||||
if (!breakpoints[(int)event].enabled)
|
||||
return;
|
||||
// For the rest of event handling, call a separate function.
|
||||
DoOnEvent(event, data);
|
||||
}
|
||||
|
||||
void DoOnEvent(Event event, void* data);
|
||||
|
||||
/**
|
||||
* Resume from the current breakpoint.
|
||||
* @warning Calling this from the same thread that OnEvent was called in will cause a deadlock.
|
||||
* Calling from any other thread is safe.
|
||||
*/
|
||||
void Resume();
|
||||
|
||||
/**
|
||||
* Delete all set breakpoints and resume emulation.
|
||||
*/
|
||||
void ClearBreakpoints() {
|
||||
for (auto& bp : breakpoints) {
|
||||
bp.enabled = false;
|
||||
}
|
||||
Resume();
|
||||
}
|
||||
|
||||
// TODO: Evaluate if access to these members should be hidden behind a public interface.
|
||||
std::array<BreakPoint, (int)Event::NumEvents> breakpoints;
|
||||
Event active_breakpoint;
|
||||
bool at_breakpoint = false;
|
||||
|
||||
std::shared_ptr<CiTrace::Recorder> recorder = nullptr;
|
||||
|
||||
private:
|
||||
/**
|
||||
* Private default constructor to make sure people always construct this through Construct()
|
||||
* instead.
|
||||
*/
|
||||
DebugContext() = default;
|
||||
|
||||
/// Mutex protecting current breakpoint state and the observer list.
|
||||
std::mutex breakpoint_mutex;
|
||||
|
||||
/// Used by OnEvent to wait for resumption.
|
||||
std::condition_variable resume_from_breakpoint;
|
||||
|
||||
/// List of registered observers
|
||||
std::list<BreakPointObserver*> breakpoint_observers;
|
||||
};
|
||||
|
||||
extern std::shared_ptr<DebugContext> g_debug_context; // TODO: Get rid of this global
|
||||
|
||||
namespace DebugUtils {
|
||||
|
||||
#define PICA_DUMP_TEXTURES 0
|
||||
#define PICA_LOG_TEV 0
|
||||
|
||||
void DumpShader(const std::string& filename, const ShaderRegs& config,
|
||||
const Shader::ShaderSetup& setup,
|
||||
const RasterizerRegs::VSOutputAttributes* output_attributes);
|
||||
|
||||
// Utility class to log Pica commands.
|
||||
struct PicaTrace {
|
||||
struct Write {
|
||||
u16 cmd_id;
|
||||
u16 mask;
|
||||
u32 value;
|
||||
};
|
||||
std::vector<Write> writes;
|
||||
};
|
||||
|
||||
extern bool g_is_pica_tracing;
|
||||
|
||||
void StartPicaTracing();
|
||||
inline bool IsPicaTracing() {
|
||||
return g_is_pica_tracing;
|
||||
}
|
||||
void OnPicaRegWrite(PicaTrace::Write write);
|
||||
std::unique_ptr<PicaTrace> FinishPicaTracing();
|
||||
|
||||
void DumpTexture(const TexturingRegs::TextureConfig& texture_config, u8* data);
|
||||
|
||||
std::string GetTevStageConfigColorCombinerString(const TexturingRegs::TevStageConfig& tev_stage);
|
||||
std::string GetTevStageConfigAlphaCombinerString(const TexturingRegs::TevStageConfig& tev_stage);
|
||||
|
||||
/// Dumps the Tev stage config to log at trace level
|
||||
void DumpTevStageConfig(const std::array<TexturingRegs::TevStageConfig, 6>& stages);
|
||||
|
||||
/**
|
||||
* Used in the vertex loader to merge access records. TODO: Investigate if actually useful.
|
||||
*/
|
||||
class MemoryAccessTracker {
|
||||
/// Combine overlapping and close ranges
|
||||
void SimplifyRanges() {
|
||||
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
|
||||
// NOTE: We add 32 to the range end address to make sure "close" ranges are combined,
|
||||
// too
|
||||
auto it2 = std::next(it);
|
||||
while (it2 != ranges.end() && it->first + it->second + 32 >= it2->first) {
|
||||
it->second = std::max(it->second, it2->first + it2->second - it->first);
|
||||
it2 = ranges.erase(it2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
/// Record a particular memory access in the list
|
||||
void AddAccess(u32 paddr, u32 size) {
|
||||
// Create new range or extend existing one
|
||||
ranges[paddr] = std::max(ranges[paddr], size);
|
||||
|
||||
// Simplify ranges...
|
||||
SimplifyRanges();
|
||||
}
|
||||
|
||||
/// Map of accessed ranges (mapping start address to range size)
|
||||
std::map<u32, u32> ranges;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,274 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "video_core/geometry_pipeline.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/// An attribute buffering interface for different pipeline modes
|
||||
class GeometryPipelineBackend {
|
||||
public:
|
||||
virtual ~GeometryPipelineBackend() = default;
|
||||
|
||||
/// Checks if there is no incomplete data transfer
|
||||
virtual bool IsEmpty() const = 0;
|
||||
|
||||
/// Checks if the pipeline needs a direct input from index buffer
|
||||
virtual bool NeedIndexInput() const = 0;
|
||||
|
||||
/// Submits an index from index buffer
|
||||
virtual void SubmitIndex(unsigned int val) = 0;
|
||||
|
||||
/**
|
||||
* Submits vertex attributes
|
||||
* @param input attributes of a vertex output from vertex shader
|
||||
* @return if the buffer is full and the geometry shader should be invoked
|
||||
*/
|
||||
virtual bool SubmitVertex(const Shader::AttributeBuffer& input) = 0;
|
||||
};
|
||||
|
||||
// In the Point mode, vertex attributes are sent to the input registers in the geometry shader unit.
|
||||
// The size of vertex shader outputs and geometry shader inputs are constants. Geometry shader is
|
||||
// invoked upon inputs buffer filled up by vertex shader outputs. For example, if we have a geometry
|
||||
// shader that takes 6 inputs, and the vertex shader outputs 2 attributes, it would take 3 vertices
|
||||
// for one geometry shader invocation.
|
||||
// TODO: what happens when the input size is not divisible by the output size?
|
||||
class GeometryPipeline_Point : public GeometryPipelineBackend {
|
||||
public:
|
||||
GeometryPipeline_Point(const Regs& regs, Shader::GSUnitState& unit) : regs(regs), unit(unit) {
|
||||
ASSERT(regs.pipeline.variable_primitive == 0);
|
||||
ASSERT(regs.gs.input_to_uniform == 0);
|
||||
vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
|
||||
size_t gs_input_num = regs.gs.max_input_attribute_index + 1;
|
||||
ASSERT(gs_input_num % vs_output_num == 0);
|
||||
buffer_cur = attribute_buffer.attr;
|
||||
buffer_end = attribute_buffer.attr + gs_input_num;
|
||||
}
|
||||
|
||||
bool IsEmpty() const override {
|
||||
return buffer_cur == attribute_buffer.attr;
|
||||
}
|
||||
|
||||
bool NeedIndexInput() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void SubmitIndex(unsigned int val) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool SubmitVertex(const Shader::AttributeBuffer& input) override {
|
||||
buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
|
||||
if (buffer_cur == buffer_end) {
|
||||
buffer_cur = attribute_buffer.attr;
|
||||
unit.LoadInput(regs.gs, attribute_buffer);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
const Regs& regs;
|
||||
Shader::GSUnitState& unit;
|
||||
Shader::AttributeBuffer attribute_buffer;
|
||||
Math::Vec4<float24>* buffer_cur;
|
||||
Math::Vec4<float24>* buffer_end;
|
||||
unsigned int vs_output_num;
|
||||
};
|
||||
|
||||
// In VariablePrimitive mode, vertex attributes are buffered into the uniform registers in the
|
||||
// geometry shader unit. The number of vertex is variable, which is specified by the first index
|
||||
// value in the batch. This mode is usually used for subdivision.
|
||||
class GeometryPipeline_VariablePrimitive : public GeometryPipelineBackend {
|
||||
public:
|
||||
GeometryPipeline_VariablePrimitive(const Regs& regs, Shader::ShaderSetup& setup)
|
||||
: regs(regs), setup(setup) {
|
||||
ASSERT(regs.pipeline.variable_primitive == 1);
|
||||
ASSERT(regs.gs.input_to_uniform == 1);
|
||||
vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
|
||||
}
|
||||
|
||||
bool IsEmpty() const override {
|
||||
return need_index;
|
||||
}
|
||||
|
||||
bool NeedIndexInput() const override {
|
||||
return need_index;
|
||||
}
|
||||
|
||||
void SubmitIndex(unsigned int val) override {
|
||||
DEBUG_ASSERT(need_index);
|
||||
|
||||
// The number of vertex input is put to the uniform register
|
||||
float24 vertex_num = float24::FromFloat32(static_cast<float>(val));
|
||||
setup.uniforms.f[0] = Math::MakeVec(vertex_num, vertex_num, vertex_num, vertex_num);
|
||||
|
||||
// The second uniform register and so on are used for receiving input vertices
|
||||
buffer_cur = setup.uniforms.f + 1;
|
||||
|
||||
main_vertex_num = regs.pipeline.variable_vertex_main_num_minus_1 + 1;
|
||||
total_vertex_num = val;
|
||||
need_index = false;
|
||||
}
|
||||
|
||||
bool SubmitVertex(const Shader::AttributeBuffer& input) override {
|
||||
DEBUG_ASSERT(!need_index);
|
||||
if (main_vertex_num != 0) {
|
||||
// For main vertices, receive all attributes
|
||||
buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
|
||||
--main_vertex_num;
|
||||
} else {
|
||||
// For other vertices, only receive the first attribute (usually the position)
|
||||
*(buffer_cur++) = input.attr[0];
|
||||
}
|
||||
--total_vertex_num;
|
||||
|
||||
if (total_vertex_num == 0) {
|
||||
need_index = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
bool need_index = true;
|
||||
const Regs& regs;
|
||||
Shader::ShaderSetup& setup;
|
||||
unsigned int main_vertex_num;
|
||||
unsigned int total_vertex_num;
|
||||
Math::Vec4<float24>* buffer_cur;
|
||||
unsigned int vs_output_num;
|
||||
};
|
||||
|
||||
// In FixedPrimitive mode, vertex attributes are buffered into the uniform registers in the geometry
|
||||
// shader unit. The number of vertex per shader invocation is constant. This is usually used for
|
||||
// particle system.
|
||||
class GeometryPipeline_FixedPrimitive : public GeometryPipelineBackend {
|
||||
public:
|
||||
GeometryPipeline_FixedPrimitive(const Regs& regs, Shader::ShaderSetup& setup)
|
||||
: regs(regs), setup(setup) {
|
||||
ASSERT(regs.pipeline.variable_primitive == 0);
|
||||
ASSERT(regs.gs.input_to_uniform == 1);
|
||||
vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1;
|
||||
ASSERT(vs_output_num == regs.pipeline.gs_config.stride_minus_1 + 1);
|
||||
size_t vertex_num = regs.pipeline.gs_config.fixed_vertex_num_minus_1 + 1;
|
||||
buffer_cur = buffer_begin = setup.uniforms.f + regs.pipeline.gs_config.start_index;
|
||||
buffer_end = buffer_begin + vs_output_num * vertex_num;
|
||||
}
|
||||
|
||||
bool IsEmpty() const override {
|
||||
return buffer_cur == buffer_begin;
|
||||
}
|
||||
|
||||
bool NeedIndexInput() const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void SubmitIndex(unsigned int val) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool SubmitVertex(const Shader::AttributeBuffer& input) override {
|
||||
buffer_cur = std::copy(input.attr, input.attr + vs_output_num, buffer_cur);
|
||||
if (buffer_cur == buffer_end) {
|
||||
buffer_cur = buffer_begin;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
const Regs& regs;
|
||||
Shader::ShaderSetup& setup;
|
||||
Math::Vec4<float24>* buffer_begin;
|
||||
Math::Vec4<float24>* buffer_cur;
|
||||
Math::Vec4<float24>* buffer_end;
|
||||
unsigned int vs_output_num;
|
||||
};
|
||||
|
||||
GeometryPipeline::GeometryPipeline(State& state) : state(state) {}
|
||||
|
||||
GeometryPipeline::~GeometryPipeline() = default;
|
||||
|
||||
void GeometryPipeline::SetVertexHandler(Shader::VertexHandler vertex_handler) {
|
||||
this->vertex_handler = vertex_handler;
|
||||
}
|
||||
|
||||
void GeometryPipeline::Setup(Shader::ShaderEngine* shader_engine) {
|
||||
if (!backend)
|
||||
return;
|
||||
|
||||
this->shader_engine = shader_engine;
|
||||
shader_engine->SetupBatch(state.gs, state.regs.gs.main_offset);
|
||||
}
|
||||
|
||||
void GeometryPipeline::Reconfigure() {
|
||||
ASSERT(!backend || backend->IsEmpty());
|
||||
|
||||
if (state.regs.pipeline.use_gs == PipelineRegs::UseGS::No) {
|
||||
backend = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(state.regs.pipeline.use_gs == PipelineRegs::UseGS::Yes);
|
||||
|
||||
// The following assumes that when geometry shader is in use, the shader unit 3 is configured as
|
||||
// a geometry shader unit.
|
||||
// TODO: what happens if this is not true?
|
||||
ASSERT(state.regs.pipeline.gs_unit_exclusive_configuration == 1);
|
||||
ASSERT(state.regs.gs.shader_mode == ShaderRegs::ShaderMode::GS);
|
||||
|
||||
state.gs_unit.ConfigOutput(state.regs.gs);
|
||||
|
||||
ASSERT(state.regs.pipeline.vs_outmap_total_minus_1_a ==
|
||||
state.regs.pipeline.vs_outmap_total_minus_1_b);
|
||||
|
||||
switch (state.regs.pipeline.gs_config.mode) {
|
||||
case PipelineRegs::GSMode::Point:
|
||||
backend = std::make_unique<GeometryPipeline_Point>(state.regs, state.gs_unit);
|
||||
break;
|
||||
case PipelineRegs::GSMode::VariablePrimitive:
|
||||
backend = std::make_unique<GeometryPipeline_VariablePrimitive>(state.regs, state.gs);
|
||||
break;
|
||||
case PipelineRegs::GSMode::FixedPrimitive:
|
||||
backend = std::make_unique<GeometryPipeline_FixedPrimitive>(state.regs, state.gs);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
bool GeometryPipeline::NeedIndexInput() const {
|
||||
if (!backend)
|
||||
return false;
|
||||
return backend->NeedIndexInput();
|
||||
}
|
||||
|
||||
void GeometryPipeline::SubmitIndex(unsigned int val) {
|
||||
backend->SubmitIndex(val);
|
||||
}
|
||||
|
||||
void GeometryPipeline::SubmitVertex(const Shader::AttributeBuffer& input) {
|
||||
if (!backend) {
|
||||
// No backend means the geometry shader is disabled, so we send the vertex shader output
|
||||
// directly to the primitive assembler.
|
||||
vertex_handler(input);
|
||||
} else {
|
||||
if (backend->SubmitVertex(input)) {
|
||||
shader_engine->Run(state.gs, state.gs_unit);
|
||||
|
||||
// The uniform b15 is set to true after every geometry shader invocation. This is useful
|
||||
// for the shader to know if this is the first invocation in a batch, if the program set
|
||||
// b15 to false first.
|
||||
state.gs.uniforms.b[15] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct State;
|
||||
|
||||
class GeometryPipelineBackend;
|
||||
|
||||
/// A pipeline receiving from vertex shader and sending to geometry shader and primitive assembler
|
||||
class GeometryPipeline {
|
||||
public:
|
||||
explicit GeometryPipeline(State& state);
|
||||
~GeometryPipeline();
|
||||
|
||||
/// Sets the handler for receiving vertex outputs from vertex shader
|
||||
void SetVertexHandler(Shader::VertexHandler vertex_handler);
|
||||
|
||||
/**
|
||||
* Setup the geometry shader unit if it is in use
|
||||
* @param shader_engine the shader engine for the geometry shader to run
|
||||
*/
|
||||
void Setup(Shader::ShaderEngine* shader_engine);
|
||||
|
||||
/// Reconfigures the pipeline according to current register settings
|
||||
void Reconfigure();
|
||||
|
||||
/// Checks if the pipeline needs a direct input from index buffer
|
||||
bool NeedIndexInput() const;
|
||||
|
||||
/// Submits an index from index buffer. Call this only when NeedIndexInput returns true
|
||||
void SubmitIndex(unsigned int val);
|
||||
|
||||
/// Submits vertex attributes output from vertex shader
|
||||
void SubmitVertex(const Shader::AttributeBuffer& input);
|
||||
|
||||
private:
|
||||
Shader::VertexHandler vertex_handler;
|
||||
Shader::ShaderEngine* shader_engine;
|
||||
std::unique_ptr<GeometryPipelineBackend> backend;
|
||||
State& state;
|
||||
};
|
||||
} // namespace Pica
|
|
@ -1,85 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include "core/hle/service/gsp_gpu.h"
|
||||
|
||||
class GraphicsDebugger {
|
||||
public:
|
||||
// Base class for all objects which need to be notified about GPU events
|
||||
class DebuggerObserver {
|
||||
public:
|
||||
DebuggerObserver() : observed(nullptr) {}
|
||||
|
||||
virtual ~DebuggerObserver() {
|
||||
if (observed)
|
||||
observed->UnregisterObserver(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a GX command has been processed and is ready for being
|
||||
* read via GraphicsDebugger::ReadGXCommandHistory.
|
||||
* @param total_command_count Total number of commands in the GX history
|
||||
* @note All methods in this class are called from the GSP thread
|
||||
*/
|
||||
virtual void GXCommandProcessed(int total_command_count) {
|
||||
const Service::GSP::Command& cmd =
|
||||
observed->ReadGXCommandHistory(total_command_count - 1);
|
||||
LOG_TRACE(Debug_GPU, "Received command: id=%x", (int)cmd.id.Value());
|
||||
}
|
||||
|
||||
protected:
|
||||
const GraphicsDebugger* GetDebugger() const {
|
||||
return observed;
|
||||
}
|
||||
|
||||
private:
|
||||
GraphicsDebugger* observed;
|
||||
|
||||
friend class GraphicsDebugger;
|
||||
};
|
||||
|
||||
void GXCommandProcessed(u8* command_data) {
|
||||
if (observers.empty())
|
||||
return;
|
||||
|
||||
gx_command_history.emplace_back();
|
||||
Service::GSP::Command& cmd = gx_command_history.back();
|
||||
|
||||
memcpy(&cmd, command_data, sizeof(Service::GSP::Command));
|
||||
|
||||
ForEachObserver([this](DebuggerObserver* observer) {
|
||||
observer->GXCommandProcessed(static_cast<int>(this->gx_command_history.size()));
|
||||
});
|
||||
}
|
||||
|
||||
const Service::GSP::Command& ReadGXCommandHistory(int index) const {
|
||||
// TODO: Is this thread-safe?
|
||||
return gx_command_history[index];
|
||||
}
|
||||
|
||||
void RegisterObserver(DebuggerObserver* observer) {
|
||||
// TODO: Check for duplicates
|
||||
observers.push_back(observer);
|
||||
observer->observed = this;
|
||||
}
|
||||
|
||||
void UnregisterObserver(DebuggerObserver* observer) {
|
||||
observers.erase(std::remove(observers.begin(), observers.end(), observer), observers.end());
|
||||
observer->observed = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
void ForEachObserver(std::function<void(DebuggerObserver*)> func) {
|
||||
std::for_each(observers.begin(), observers.end(), func);
|
||||
}
|
||||
|
||||
std::vector<DebuggerObserver*> observers;
|
||||
|
||||
std::vector<Service::GSP::Command> gx_command_history;
|
||||
};
|
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "video_core/geometry_pipeline.h"
|
||||
#include "video_core/pica.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
State g_state;
|
||||
|
||||
void Init() {
|
||||
g_state.Reset();
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
Shader::Shutdown();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Zero(T& o) {
|
||||
memset(&o, 0, sizeof(o));
|
||||
}
|
||||
|
||||
State::State() : geometry_pipeline(*this) {
|
||||
auto SubmitVertex = [this](const Shader::AttributeBuffer& vertex) {
|
||||
using Pica::Shader::OutputVertex;
|
||||
auto AddTriangle = [this](const OutputVertex& v0, const OutputVertex& v1,
|
||||
const OutputVertex& v2) {
|
||||
VideoCore::g_renderer->Rasterizer()->AddTriangle(v0, v1, v2);
|
||||
};
|
||||
primitive_assembler.SubmitVertex(
|
||||
Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, vertex), AddTriangle);
|
||||
};
|
||||
|
||||
auto SetWinding = [this]() { primitive_assembler.SetWinding(); };
|
||||
|
||||
g_state.gs_unit.SetVertexHandler(SubmitVertex, SetWinding);
|
||||
g_state.geometry_pipeline.SetVertexHandler(SubmitVertex);
|
||||
}
|
||||
|
||||
void State::Reset() {
|
||||
Zero(regs);
|
||||
Zero(vs);
|
||||
Zero(gs);
|
||||
Zero(cmd_list);
|
||||
Zero(immediate);
|
||||
primitive_assembler.Reconfigure(PipelineRegs::TriangleTopology::List);
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "video_core/regs_texturing.h"
|
||||
namespace Pica {
|
||||
|
||||
/// Initialize Pica state
|
||||
void Init();
|
||||
|
||||
/// Shutdown Pica state
|
||||
void Shutdown();
|
||||
|
||||
} // namespace
|
|
@ -1,159 +0,0 @@
|
|||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/geometry_pipeline.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/regs.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/// Struct used to describe current Pica state
|
||||
struct State {
|
||||
State();
|
||||
void Reset();
|
||||
|
||||
/// Pica registers
|
||||
Regs regs;
|
||||
|
||||
Shader::ShaderSetup vs;
|
||||
Shader::ShaderSetup gs;
|
||||
|
||||
Shader::AttributeBuffer input_default_attributes;
|
||||
|
||||
struct ProcTex {
|
||||
union ValueEntry {
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Difference between two entry values. Used for efficient interpolation.
|
||||
// 0.0.12 fixed point with two's complement. The range is [-0.5, 0.5).
|
||||
// Note: the type of this is different from the one of lighting LUT
|
||||
BitField<12, 12, s32> difference;
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 4095.f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
return static_cast<float>(difference) / 4095.f;
|
||||
}
|
||||
};
|
||||
|
||||
union ColorEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
|
||||
Math::Vec4<u8> ToVector() const {
|
||||
return {static_cast<u8>(r), static_cast<u8>(g), static_cast<u8>(b),
|
||||
static_cast<u8>(a)};
|
||||
}
|
||||
};
|
||||
|
||||
union ColorDifferenceEntry {
|
||||
u32 raw;
|
||||
BitField<0, 8, s32> r; // half of the difference between two ColorEntry
|
||||
BitField<8, 8, s32> g;
|
||||
BitField<16, 8, s32> b;
|
||||
BitField<24, 8, s32> a;
|
||||
|
||||
Math::Vec4<s32> ToVector() const {
|
||||
return Math::Vec4<s32>{r, g, b, a} * 2;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<ValueEntry, 128> noise_table;
|
||||
std::array<ValueEntry, 128> color_map_table;
|
||||
std::array<ValueEntry, 128> alpha_map_table;
|
||||
std::array<ColorEntry, 256> color_table;
|
||||
std::array<ColorDifferenceEntry, 256> color_diff_table;
|
||||
} proctex;
|
||||
|
||||
struct Lighting {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
// LUT value, encoded as 12-bit fixed point, with 12 fraction bits
|
||||
BitField<0, 12, u32> value; // 0.0.12 fixed point
|
||||
|
||||
// Used for efficient interpolation.
|
||||
BitField<12, 11, u32> difference; // 0.0.11 fixed point
|
||||
BitField<23, 1, u32> neg_difference;
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 4095.f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
float diff = static_cast<float>(difference) / 2047.f;
|
||||
return neg_difference ? -diff : diff;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<std::array<LutEntry, 256>, 24> luts;
|
||||
} lighting;
|
||||
|
||||
struct {
|
||||
union LutEntry {
|
||||
// Used for raw access
|
||||
u32 raw;
|
||||
|
||||
BitField<0, 13, s32> difference; // 1.1.11 fixed point
|
||||
BitField<13, 11, u32> value; // 0.0.11 fixed point
|
||||
|
||||
float ToFloat() const {
|
||||
return static_cast<float>(value) / 2047.0f;
|
||||
}
|
||||
|
||||
float DiffToFloat() const {
|
||||
return static_cast<float>(difference) / 2047.0f;
|
||||
}
|
||||
};
|
||||
|
||||
std::array<LutEntry, 128> lut;
|
||||
} fog;
|
||||
|
||||
/// Current Pica command list
|
||||
struct {
|
||||
const u32* head_ptr;
|
||||
const u32* current_ptr;
|
||||
u32 length;
|
||||
} cmd_list;
|
||||
|
||||
/// Struct used to describe immediate mode rendering state
|
||||
struct ImmediateModeState {
|
||||
// Used to buffer partial vertices for immediate-mode rendering.
|
||||
Shader::AttributeBuffer input_vertex;
|
||||
// Index of the next attribute to be loaded into `input_vertex`.
|
||||
u32 current_attribute = 0;
|
||||
// Indicates the immediate mode just started and the geometry pipeline needs to reconfigure
|
||||
bool reset_geometry_pipeline = true;
|
||||
} immediate;
|
||||
|
||||
// the geometry shader needs to be kept in the global state because some shaders relie on
|
||||
// preserved register value across shader invocation.
|
||||
// TODO: also bring the three vertex shader units here and implement the shader scheduler.
|
||||
Shader::GSUnitState gs_unit;
|
||||
|
||||
GeometryPipeline geometry_pipeline;
|
||||
|
||||
// This is constructed with a dummy triangle topology
|
||||
PrimitiveAssembler<Shader::OutputVertex> primitive_assembler;
|
||||
};
|
||||
|
||||
extern State g_state; ///< Current Pica state
|
||||
|
||||
} // namespace
|
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/**
|
||||
* Template class for converting arbitrary Pica float types to IEEE 754 32-bit single-precision
|
||||
* floating point.
|
||||
*
|
||||
* When decoding, format is as follows:
|
||||
* - The first `M` bits are the mantissa
|
||||
* - The next `E` bits are the exponent
|
||||
* - The last bit is the sign bit
|
||||
*
|
||||
* @todo Verify on HW if this conversion is sufficiently accurate.
|
||||
*/
|
||||
template <unsigned M, unsigned E>
|
||||
struct Float {
|
||||
public:
|
||||
static Float<M, E> FromFloat32(float val) {
|
||||
Float<M, E> ret;
|
||||
ret.value = val;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Float<M, E> FromRaw(u32 hex) {
|
||||
Float<M, E> res;
|
||||
|
||||
const int width = M + E + 1;
|
||||
const int bias = 128 - (1 << (E - 1));
|
||||
const int exponent = (hex >> M) & ((1 << E) - 1);
|
||||
const unsigned mantissa = hex & ((1 << M) - 1);
|
||||
|
||||
if (hex & ((1 << (width - 1)) - 1))
|
||||
hex = ((hex >> (E + M)) << 31) | (mantissa << (23 - M)) | ((exponent + bias) << 23);
|
||||
else
|
||||
hex = ((hex >> (E + M)) << 31);
|
||||
|
||||
std::memcpy(&res.value, &hex, sizeof(float));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static Float<M, E> Zero() {
|
||||
return FromFloat32(0.f);
|
||||
}
|
||||
|
||||
// Not recommended for anything but logging
|
||||
float ToFloat32() const {
|
||||
return value;
|
||||
}
|
||||
|
||||
Float<M, E> operator*(const Float<M, E>& flt) const {
|
||||
float result = value * flt.ToFloat32();
|
||||
// PICA gives 0 instead of NaN when multiplying by inf
|
||||
if (!std::isnan(value) && !std::isnan(flt.ToFloat32()))
|
||||
if (std::isnan(result))
|
||||
result = 0.f;
|
||||
return Float<M, E>::FromFloat32(result);
|
||||
}
|
||||
|
||||
Float<M, E> operator/(const Float<M, E>& flt) const {
|
||||
return Float<M, E>::FromFloat32(ToFloat32() / flt.ToFloat32());
|
||||
}
|
||||
|
||||
Float<M, E> operator+(const Float<M, E>& flt) const {
|
||||
return Float<M, E>::FromFloat32(ToFloat32() + flt.ToFloat32());
|
||||
}
|
||||
|
||||
Float<M, E> operator-(const Float<M, E>& flt) const {
|
||||
return Float<M, E>::FromFloat32(ToFloat32() - flt.ToFloat32());
|
||||
}
|
||||
|
||||
Float<M, E>& operator*=(const Float<M, E>& flt) {
|
||||
value = operator*(flt).value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Float<M, E>& operator/=(const Float<M, E>& flt) {
|
||||
value /= flt.ToFloat32();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Float<M, E>& operator+=(const Float<M, E>& flt) {
|
||||
value += flt.ToFloat32();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Float<M, E>& operator-=(const Float<M, E>& flt) {
|
||||
value -= flt.ToFloat32();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Float<M, E> operator-() const {
|
||||
return Float<M, E>::FromFloat32(-ToFloat32());
|
||||
}
|
||||
|
||||
bool operator<(const Float<M, E>& flt) const {
|
||||
return ToFloat32() < flt.ToFloat32();
|
||||
}
|
||||
|
||||
bool operator>(const Float<M, E>& flt) const {
|
||||
return ToFloat32() > flt.ToFloat32();
|
||||
}
|
||||
|
||||
bool operator>=(const Float<M, E>& flt) const {
|
||||
return ToFloat32() >= flt.ToFloat32();
|
||||
}
|
||||
|
||||
bool operator<=(const Float<M, E>& flt) const {
|
||||
return ToFloat32() <= flt.ToFloat32();
|
||||
}
|
||||
|
||||
bool operator==(const Float<M, E>& flt) const {
|
||||
return ToFloat32() == flt.ToFloat32();
|
||||
}
|
||||
|
||||
bool operator!=(const Float<M, E>& flt) const {
|
||||
return ToFloat32() != flt.ToFloat32();
|
||||
}
|
||||
|
||||
private:
|
||||
static const unsigned MASK = (1 << (M + E + 1)) - 1;
|
||||
static const unsigned MANTISSA_MASK = (1 << M) - 1;
|
||||
static const unsigned EXPONENT_MASK = (1 << E) - 1;
|
||||
|
||||
// Stored as a regular float, merely for convenience
|
||||
// TODO: Perform proper arithmetic on this!
|
||||
float value;
|
||||
};
|
||||
|
||||
using float24 = Float<16, 7>;
|
||||
using float20 = Float<12, 7>;
|
||||
using float16 = Float<10, 5>;
|
||||
|
||||
} // namespace Pica
|
|
@ -1,77 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/primitive_assembly.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
template <typename VertexType>
|
||||
PrimitiveAssembler<VertexType>::PrimitiveAssembler(PipelineRegs::TriangleTopology topology)
|
||||
: topology(topology), buffer_index(0) {}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx,
|
||||
TriangleHandler triangle_handler) {
|
||||
switch (topology) {
|
||||
case PipelineRegs::TriangleTopology::List:
|
||||
case PipelineRegs::TriangleTopology::Shader:
|
||||
if (buffer_index < 2) {
|
||||
buffer[buffer_index++] = vtx;
|
||||
} else {
|
||||
buffer_index = 0;
|
||||
if (topology == PipelineRegs::TriangleTopology::Shader && winding) {
|
||||
triangle_handler(buffer[1], buffer[0], vtx);
|
||||
winding = false;
|
||||
} else {
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case PipelineRegs::TriangleTopology::Strip:
|
||||
case PipelineRegs::TriangleTopology::Fan:
|
||||
if (strip_ready)
|
||||
triangle_handler(buffer[0], buffer[1], vtx);
|
||||
|
||||
buffer[buffer_index] = vtx;
|
||||
|
||||
strip_ready |= (buffer_index == 1);
|
||||
|
||||
if (topology == PipelineRegs::TriangleTopology::Strip)
|
||||
buffer_index = !buffer_index;
|
||||
else if (topology == PipelineRegs::TriangleTopology::Fan)
|
||||
buffer_index = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown triangle topology %x:", (int)topology);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::SetWinding() {
|
||||
winding = true;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::Reset() {
|
||||
buffer_index = 0;
|
||||
strip_ready = false;
|
||||
winding = false;
|
||||
}
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::Reconfigure(PipelineRegs::TriangleTopology topology) {
|
||||
Reset();
|
||||
this->topology = topology;
|
||||
}
|
||||
|
||||
// explicitly instantiate use cases
|
||||
template struct PrimitiveAssembler<Shader::OutputVertex>;
|
||||
|
||||
} // namespace
|
|
@ -1,57 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include "video_core/regs_pipeline.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
/*
|
||||
* Utility class to build triangles from a series of vertices,
|
||||
* according to a given triangle topology.
|
||||
*/
|
||||
template <typename VertexType>
|
||||
struct PrimitiveAssembler {
|
||||
using TriangleHandler =
|
||||
std::function<void(const VertexType& v0, const VertexType& v1, const VertexType& v2)>;
|
||||
|
||||
PrimitiveAssembler(
|
||||
PipelineRegs::TriangleTopology topology = PipelineRegs::TriangleTopology::List);
|
||||
|
||||
/*
|
||||
* Queues a vertex, builds primitives from the vertex queue according to the given
|
||||
* triangle topology, and calls triangle_handler for each generated primitive.
|
||||
* NOTE: We could specify the triangle handler in the constructor, but this way we can
|
||||
* keep event and handler code next to each other.
|
||||
*/
|
||||
void SubmitVertex(const VertexType& vtx, TriangleHandler triangle_handler);
|
||||
|
||||
/**
|
||||
* Invert the vertex order of the next triangle. Called by geometry shader emitter.
|
||||
* This only takes effect for TriangleTopology::Shader.
|
||||
*/
|
||||
void SetWinding();
|
||||
|
||||
/**
|
||||
* Resets the internal state of the PrimitiveAssembler.
|
||||
*/
|
||||
void Reset();
|
||||
|
||||
/**
|
||||
* Reconfigures the PrimitiveAssembler to use a different triangle topology.
|
||||
*/
|
||||
void Reconfigure(PipelineRegs::TriangleTopology topology);
|
||||
|
||||
private:
|
||||
PipelineRegs::TriangleTopology topology;
|
||||
|
||||
int buffer_index;
|
||||
VertexType buffer[2];
|
||||
bool strip_ready = false;
|
||||
bool winding = false;
|
||||
};
|
||||
|
||||
} // namespace
|
|
@ -1,67 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hw/gpu.h"
|
||||
|
||||
struct ScreenInfo;
|
||||
|
||||
namespace Pica {
|
||||
namespace Shader {
|
||||
struct OutputVertex;
|
||||
}
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
class RasterizerInterface {
|
||||
public:
|
||||
virtual ~RasterizerInterface() {}
|
||||
|
||||
/// Queues the primitive formed by the given vertices for rendering
|
||||
virtual void AddTriangle(const Pica::Shader::OutputVertex& v0,
|
||||
const Pica::Shader::OutputVertex& v1,
|
||||
const Pica::Shader::OutputVertex& v2) = 0;
|
||||
|
||||
/// Draw the current batch of triangles
|
||||
virtual void DrawTriangles() = 0;
|
||||
|
||||
/// Notify rasterizer that the specified PICA register has been changed
|
||||
virtual void NotifyPicaRegisterChanged(u32 id) = 0;
|
||||
|
||||
/// Notify rasterizer that all caches should be flushed to 3DS memory
|
||||
virtual void FlushAll() = 0;
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to 3DS memory
|
||||
virtual void FlushRegion(PAddr addr, u64 size) = 0;
|
||||
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to 3DS memory
|
||||
/// and invalidated
|
||||
virtual void FlushAndInvalidateRegion(PAddr addr, u64 size) = 0;
|
||||
|
||||
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
|
||||
virtual bool AccelerateDisplayTransfer(const GPU::Regs::DisplayTransferConfig& config) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 1
|
||||
virtual bool AccelerateTextureCopy(const GPU::Regs::DisplayTransferConfig& config) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Attempt to use a faster method to fill a region
|
||||
virtual bool AccelerateFill(const GPU::Regs::MemoryFillConfig& config) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Attempt to use a faster method to display the framebuffer to screen
|
||||
virtual bool AccelerateDisplay(const GPU::Regs::FramebufferConfig& config,
|
||||
PAddr framebuffer_addr, u32 pixel_stride,
|
||||
ScreenInfo& screen_info) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
|
@ -1,488 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <utility>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/regs.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
static const std::pair<u16, const char*> register_names[] = {
|
||||
{0x010, "GPUREG_FINALIZE"},
|
||||
|
||||
{0x040, "GPUREG_FACECULLING_CONFIG"},
|
||||
{0x041, "GPUREG_VIEWPORT_WIDTH"},
|
||||
{0x042, "GPUREG_VIEWPORT_INVW"},
|
||||
{0x043, "GPUREG_VIEWPORT_HEIGHT"},
|
||||
{0x044, "GPUREG_VIEWPORT_INVH"},
|
||||
|
||||
{0x047, "GPUREG_FRAGOP_CLIP"},
|
||||
{0x048, "GPUREG_FRAGOP_CLIP_DATA0"},
|
||||
{0x049, "GPUREG_FRAGOP_CLIP_DATA1"},
|
||||
{0x04A, "GPUREG_FRAGOP_CLIP_DATA2"},
|
||||
{0x04B, "GPUREG_FRAGOP_CLIP_DATA3"},
|
||||
|
||||
{0x04D, "GPUREG_DEPTHMAP_SCALE"},
|
||||
{0x04E, "GPUREG_DEPTHMAP_OFFSET"},
|
||||
{0x04F, "GPUREG_SH_OUTMAP_TOTAL"},
|
||||
{0x050, "GPUREG_SH_OUTMAP_O0"},
|
||||
{0x051, "GPUREG_SH_OUTMAP_O1"},
|
||||
{0x052, "GPUREG_SH_OUTMAP_O2"},
|
||||
{0x053, "GPUREG_SH_OUTMAP_O3"},
|
||||
{0x054, "GPUREG_SH_OUTMAP_O4"},
|
||||
{0x055, "GPUREG_SH_OUTMAP_O5"},
|
||||
{0x056, "GPUREG_SH_OUTMAP_O6"},
|
||||
|
||||
{0x061, "GPUREG_EARLYDEPTH_FUNC"},
|
||||
{0x062, "GPUREG_EARLYDEPTH_TEST1"},
|
||||
{0x063, "GPUREG_EARLYDEPTH_CLEAR"},
|
||||
{0x064, "GPUREG_SH_OUTATTR_MODE"},
|
||||
{0x065, "GPUREG_SCISSORTEST_MODE"},
|
||||
{0x066, "GPUREG_SCISSORTEST_POS"},
|
||||
{0x067, "GPUREG_SCISSORTEST_DIM"},
|
||||
{0x068, "GPUREG_VIEWPORT_XY"},
|
||||
|
||||
{0x06A, "GPUREG_EARLYDEPTH_DATA"},
|
||||
|
||||
{0x06D, "GPUREG_DEPTHMAP_ENABLE"},
|
||||
{0x06E, "GPUREG_RENDERBUF_DIM"},
|
||||
{0x06F, "GPUREG_SH_OUTATTR_CLOCK"},
|
||||
|
||||
{0x080, "GPUREG_TEXUNIT_CONFIG"},
|
||||
{0x081, "GPUREG_TEXUNIT0_BORDER_COLOR"},
|
||||
{0x082, "GPUREG_TEXUNIT0_DIM"},
|
||||
{0x083, "GPUREG_TEXUNIT0_PARAM"},
|
||||
{0x084, "GPUREG_TEXUNIT0_LOD"},
|
||||
{0x085, "GPUREG_TEXUNIT0_ADDR1"},
|
||||
{0x086, "GPUREG_TEXUNIT0_ADDR2"},
|
||||
{0x087, "GPUREG_TEXUNIT0_ADDR3"},
|
||||
{0x088, "GPUREG_TEXUNIT0_ADDR4"},
|
||||
{0x089, "GPUREG_TEXUNIT0_ADDR5"},
|
||||
{0x08A, "GPUREG_TEXUNIT0_ADDR6"},
|
||||
{0x08B, "GPUREG_TEXUNIT0_SHADOW"},
|
||||
|
||||
{0x08E, "GPUREG_TEXUNIT0_TYPE"},
|
||||
{0x08F, "GPUREG_LIGHTING_ENABLE0"},
|
||||
|
||||
{0x091, "GPUREG_TEXUNIT1_BORDER_COLOR"},
|
||||
{0x092, "GPUREG_TEXUNIT1_DIM"},
|
||||
{0x093, "GPUREG_TEXUNIT1_PARAM"},
|
||||
{0x094, "GPUREG_TEXUNIT1_LOD"},
|
||||
{0x095, "GPUREG_TEXUNIT1_ADDR"},
|
||||
{0x096, "GPUREG_TEXUNIT1_TYPE"},
|
||||
|
||||
{0x099, "GPUREG_TEXUNIT2_BORDER_COLOR"},
|
||||
{0x09A, "GPUREG_TEXUNIT2_DIM"},
|
||||
{0x09B, "GPUREG_TEXUNIT2_PARAM"},
|
||||
{0x09C, "GPUREG_TEXUNIT2_LOD"},
|
||||
{0x09D, "GPUREG_TEXUNIT2_ADDR"},
|
||||
{0x09E, "GPUREG_TEXUNIT2_TYPE"},
|
||||
|
||||
{0x0A8, "GPUREG_TEXUNIT3_PROCTEX0"},
|
||||
{0x0A9, "GPUREG_TEXUNIT3_PROCTEX1"},
|
||||
{0x0AA, "GPUREG_TEXUNIT3_PROCTEX2"},
|
||||
{0x0AB, "GPUREG_TEXUNIT3_PROCTEX3"},
|
||||
{0x0AC, "GPUREG_TEXUNIT3_PROCTEX4"},
|
||||
{0x0AD, "GPUREG_TEXUNIT3_PROCTEX5"},
|
||||
|
||||
{0x0AF, "GPUREG_PROCTEX_LUT"},
|
||||
{0x0B0, "GPUREG_PROCTEX_LUT_DATA0"},
|
||||
{0x0B1, "GPUREG_PROCTEX_LUT_DATA1"},
|
||||
{0x0B2, "GPUREG_PROCTEX_LUT_DATA2"},
|
||||
{0x0B3, "GPUREG_PROCTEX_LUT_DATA3"},
|
||||
{0x0B4, "GPUREG_PROCTEX_LUT_DATA4"},
|
||||
{0x0B5, "GPUREG_PROCTEX_LUT_DATA5"},
|
||||
{0x0B6, "GPUREG_PROCTEX_LUT_DATA6"},
|
||||
{0x0B7, "GPUREG_PROCTEX_LUT_DATA7"},
|
||||
|
||||
{0x0C0, "GPUREG_TEXENV0_SOURCE"},
|
||||
{0x0C1, "GPUREG_TEXENV0_OPERAND"},
|
||||
{0x0C2, "GPUREG_TEXENV0_COMBINER"},
|
||||
{0x0C3, "GPUREG_TEXENV0_COLOR"},
|
||||
{0x0C4, "GPUREG_TEXENV0_SCALE"},
|
||||
|
||||
{0x0C8, "GPUREG_TEXENV1_SOURCE"},
|
||||
{0x0C9, "GPUREG_TEXENV1_OPERAND"},
|
||||
{0x0CA, "GPUREG_TEXENV1_COMBINER"},
|
||||
{0x0CB, "GPUREG_TEXENV1_COLOR"},
|
||||
{0x0CC, "GPUREG_TEXENV1_SCALE"},
|
||||
|
||||
{0x0D0, "GPUREG_TEXENV2_SOURCE"},
|
||||
{0x0D1, "GPUREG_TEXENV2_OPERAND"},
|
||||
{0x0D2, "GPUREG_TEXENV2_COMBINER"},
|
||||
{0x0D3, "GPUREG_TEXENV2_COLOR"},
|
||||
{0x0D4, "GPUREG_TEXENV2_SCALE"},
|
||||
|
||||
{0x0D8, "GPUREG_TEXENV3_SOURCE"},
|
||||
{0x0D9, "GPUREG_TEXENV3_OPERAND"},
|
||||
{0x0DA, "GPUREG_TEXENV3_COMBINER"},
|
||||
{0x0DB, "GPUREG_TEXENV3_COLOR"},
|
||||
{0x0DC, "GPUREG_TEXENV3_SCALE"},
|
||||
|
||||
{0x0E0, "GPUREG_TEXENV_UPDATE_BUFFER"},
|
||||
{0x0E1, "GPUREG_FOG_COLOR"},
|
||||
|
||||
{0x0E4, "GPUREG_GAS_ATTENUATION"},
|
||||
{0x0E5, "GPUREG_GAS_ACCMAX"},
|
||||
{0x0E6, "GPUREG_FOG_LUT_INDEX"},
|
||||
|
||||
{0x0E8, "GPUREG_FOG_LUT_DATA0"},
|
||||
{0x0E9, "GPUREG_FOG_LUT_DATA1"},
|
||||
{0x0EA, "GPUREG_FOG_LUT_DATA2"},
|
||||
{0x0EB, "GPUREG_FOG_LUT_DATA3"},
|
||||
{0x0EC, "GPUREG_FOG_LUT_DATA4"},
|
||||
{0x0ED, "GPUREG_FOG_LUT_DATA5"},
|
||||
{0x0EE, "GPUREG_FOG_LUT_DATA6"},
|
||||
{0x0EF, "GPUREG_FOG_LUT_DATA7"},
|
||||
{0x0F0, "GPUREG_TEXENV4_SOURCE"},
|
||||
{0x0F1, "GPUREG_TEXENV4_OPERAND"},
|
||||
{0x0F2, "GPUREG_TEXENV4_COMBINER"},
|
||||
{0x0F3, "GPUREG_TEXENV4_COLOR"},
|
||||
{0x0F4, "GPUREG_TEXENV4_SCALE"},
|
||||
|
||||
{0x0F8, "GPUREG_TEXENV5_SOURCE"},
|
||||
{0x0F9, "GPUREG_TEXENV5_OPERAND"},
|
||||
{0x0FA, "GPUREG_TEXENV5_COMBINER"},
|
||||
{0x0FB, "GPUREG_TEXENV5_COLOR"},
|
||||
{0x0FC, "GPUREG_TEXENV5_SCALE"},
|
||||
{0x0FD, "GPUREG_TEXENV_BUFFER_COLOR"},
|
||||
|
||||
{0x100, "GPUREG_COLOR_OPERATION"},
|
||||
{0x101, "GPUREG_BLEND_FUNC"},
|
||||
{0x102, "GPUREG_LOGIC_OP"},
|
||||
{0x103, "GPUREG_BLEND_COLOR"},
|
||||
{0x104, "GPUREG_FRAGOP_ALPHA_TEST"},
|
||||
{0x105, "GPUREG_STENCIL_TEST"},
|
||||
{0x106, "GPUREG_STENCIL_OP"},
|
||||
{0x107, "GPUREG_DEPTH_COLOR_MASK"},
|
||||
|
||||
{0x110, "GPUREG_FRAMEBUFFER_INVALIDATE"},
|
||||
{0x111, "GPUREG_FRAMEBUFFER_FLUSH"},
|
||||
{0x112, "GPUREG_COLORBUFFER_READ"},
|
||||
{0x113, "GPUREG_COLORBUFFER_WRITE"},
|
||||
{0x114, "GPUREG_DEPTHBUFFER_READ"},
|
||||
{0x115, "GPUREG_DEPTHBUFFER_WRITE"},
|
||||
{0x116, "GPUREG_DEPTHBUFFER_FORMAT"},
|
||||
{0x117, "GPUREG_COLORBUFFER_FORMAT"},
|
||||
{0x118, "GPUREG_EARLYDEPTH_TEST2"},
|
||||
|
||||
{0x11B, "GPUREG_FRAMEBUFFER_BLOCK32"},
|
||||
{0x11C, "GPUREG_DEPTHBUFFER_LOC"},
|
||||
{0x11D, "GPUREG_COLORBUFFER_LOC"},
|
||||
{0x11E, "GPUREG_FRAMEBUFFER_DIM"},
|
||||
|
||||
{0x120, "GPUREG_GAS_LIGHT_XY"},
|
||||
{0x121, "GPUREG_GAS_LIGHT_Z"},
|
||||
{0x122, "GPUREG_GAS_LIGHT_Z_COLOR"},
|
||||
{0x123, "GPUREG_GAS_LUT_INDEX"},
|
||||
{0x124, "GPUREG_GAS_LUT_DATA"},
|
||||
|
||||
{0x126, "GPUREG_GAS_DELTAZ_DEPTH"},
|
||||
|
||||
{0x130, "GPUREG_FRAGOP_SHADOW"},
|
||||
|
||||
{0x140, "GPUREG_LIGHT0_SPECULAR0"},
|
||||
{0x141, "GPUREG_LIGHT0_SPECULAR1"},
|
||||
{0x142, "GPUREG_LIGHT0_DIFFUSE"},
|
||||
{0x143, "GPUREG_LIGHT0_AMBIENT"},
|
||||
{0x144, "GPUREG_LIGHT0_XY"},
|
||||
{0x145, "GPUREG_LIGHT0_Z"},
|
||||
{0x146, "GPUREG_LIGHT0_SPOTDIR_XY"},
|
||||
{0x147, "GPUREG_LIGHT0_SPOTDIR_Z"},
|
||||
|
||||
{0x149, "GPUREG_LIGHT0_CONFIG"},
|
||||
{0x14A, "GPUREG_LIGHT0_ATTENUATION_BIAS"},
|
||||
{0x14B, "GPUREG_LIGHT0_ATTENUATION_SCALE"},
|
||||
|
||||
{0x150, "GPUREG_LIGHT1_SPECULAR0"},
|
||||
{0x151, "GPUREG_LIGHT1_SPECULAR1"},
|
||||
{0x152, "GPUREG_LIGHT1_DIFFUSE"},
|
||||
{0x153, "GPUREG_LIGHT1_AMBIENT"},
|
||||
{0x154, "GPUREG_LIGHT1_XY"},
|
||||
{0x155, "GPUREG_LIGHT1_Z"},
|
||||
{0x156, "GPUREG_LIGHT1_SPOTDIR_XY"},
|
||||
{0x157, "GPUREG_LIGHT1_SPOTDIR_Z"},
|
||||
|
||||
{0x159, "GPUREG_LIGHT1_CONFIG"},
|
||||
{0x15A, "GPUREG_LIGHT1_ATTENUATION_BIAS"},
|
||||
{0x15B, "GPUREG_LIGHT1_ATTENUATION_SCALE"},
|
||||
|
||||
{0x160, "GPUREG_LIGHT2_SPECULAR0"},
|
||||
{0x161, "GPUREG_LIGHT2_SPECULAR1"},
|
||||
{0x162, "GPUREG_LIGHT2_DIFFUSE"},
|
||||
{0x163, "GPUREG_LIGHT2_AMBIENT"},
|
||||
{0x164, "GPUREG_LIGHT2_XY"},
|
||||
{0x165, "GPUREG_LIGHT2_Z"},
|
||||
{0x166, "GPUREG_LIGHT2_SPOTDIR_XY"},
|
||||
{0x167, "GPUREG_LIGHT2_SPOTDIR_Z"},
|
||||
|
||||
{0x169, "GPUREG_LIGHT2_CONFIG"},
|
||||
{0x16A, "GPUREG_LIGHT2_ATTENUATION_BIAS"},
|
||||
{0x16B, "GPUREG_LIGHT2_ATTENUATION_SCALE"},
|
||||
|
||||
{0x170, "GPUREG_LIGHT3_SPECULAR0"},
|
||||
{0x171, "GPUREG_LIGHT3_SPECULAR1"},
|
||||
{0x172, "GPUREG_LIGHT3_DIFFUSE"},
|
||||
{0x173, "GPUREG_LIGHT3_AMBIENT"},
|
||||
{0x174, "GPUREG_LIGHT3_XY"},
|
||||
{0x175, "GPUREG_LIGHT3_Z"},
|
||||
{0x176, "GPUREG_LIGHT3_SPOTDIR_XY"},
|
||||
{0x177, "GPUREG_LIGHT3_SPOTDIR_Z"},
|
||||
|
||||
{0x179, "GPUREG_LIGHT3_CONFIG"},
|
||||
{0x17A, "GPUREG_LIGHT3_ATTENUATION_BIAS"},
|
||||
{0x17B, "GPUREG_LIGHT3_ATTENUATION_SCALE"},
|
||||
|
||||
{0x180, "GPUREG_LIGHT4_SPECULAR0"},
|
||||
{0x181, "GPUREG_LIGHT4_SPECULAR1"},
|
||||
{0x182, "GPUREG_LIGHT4_DIFFUSE"},
|
||||
{0x183, "GPUREG_LIGHT4_AMBIENT"},
|
||||
{0x184, "GPUREG_LIGHT4_XY"},
|
||||
{0x185, "GPUREG_LIGHT4_Z"},
|
||||
{0x186, "GPUREG_LIGHT4_SPOTDIR_XY"},
|
||||
{0x187, "GPUREG_LIGHT4_SPOTDIR_Z"},
|
||||
|
||||
{0x189, "GPUREG_LIGHT4_CONFIG"},
|
||||
{0x18A, "GPUREG_LIGHT4_ATTENUATION_BIAS"},
|
||||
{0x18B, "GPUREG_LIGHT4_ATTENUATION_SCALE"},
|
||||
|
||||
{0x190, "GPUREG_LIGHT5_SPECULAR0"},
|
||||
{0x191, "GPUREG_LIGHT5_SPECULAR1"},
|
||||
{0x192, "GPUREG_LIGHT5_DIFFUSE"},
|
||||
{0x193, "GPUREG_LIGHT5_AMBIENT"},
|
||||
{0x194, "GPUREG_LIGHT5_XY"},
|
||||
{0x195, "GPUREG_LIGHT5_Z"},
|
||||
{0x196, "GPUREG_LIGHT5_SPOTDIR_XY"},
|
||||
{0x197, "GPUREG_LIGHT5_SPOTDIR_Z"},
|
||||
|
||||
{0x199, "GPUREG_LIGHT5_CONFIG"},
|
||||
{0x19A, "GPUREG_LIGHT5_ATTENUATION_BIAS"},
|
||||
{0x19B, "GPUREG_LIGHT5_ATTENUATION_SCALE"},
|
||||
|
||||
{0x1A0, "GPUREG_LIGHT6_SPECULAR0"},
|
||||
{0x1A1, "GPUREG_LIGHT6_SPECULAR1"},
|
||||
{0x1A2, "GPUREG_LIGHT6_DIFFUSE"},
|
||||
{0x1A3, "GPUREG_LIGHT6_AMBIENT"},
|
||||
{0x1A4, "GPUREG_LIGHT6_XY"},
|
||||
{0x1A5, "GPUREG_LIGHT6_Z"},
|
||||
{0x1A6, "GPUREG_LIGHT6_SPOTDIR_XY"},
|
||||
{0x1A7, "GPUREG_LIGHT6_SPOTDIR_Z"},
|
||||
|
||||
{0x1A9, "GPUREG_LIGHT6_CONFIG"},
|
||||
{0x1AA, "GPUREG_LIGHT6_ATTENUATION_BIAS"},
|
||||
{0x1AB, "GPUREG_LIGHT6_ATTENUATION_SCALE"},
|
||||
|
||||
{0x1B0, "GPUREG_LIGHT7_SPECULAR0"},
|
||||
{0x1B1, "GPUREG_LIGHT7_SPECULAR1"},
|
||||
{0x1B2, "GPUREG_LIGHT7_DIFFUSE"},
|
||||
{0x1B3, "GPUREG_LIGHT7_AMBIENT"},
|
||||
{0x1B4, "GPUREG_LIGHT7_XY"},
|
||||
{0x1B5, "GPUREG_LIGHT7_Z"},
|
||||
{0x1B6, "GPUREG_LIGHT7_SPOTDIR_XY"},
|
||||
{0x1B7, "GPUREG_LIGHT7_SPOTDIR_Z"},
|
||||
|
||||
{0x1B9, "GPUREG_LIGHT7_CONFIG"},
|
||||
{0x1BA, "GPUREG_LIGHT7_ATTENUATION_BIAS"},
|
||||
{0x1BB, "GPUREG_LIGHT7_ATTENUATION_SCALE"},
|
||||
|
||||
{0x1C0, "GPUREG_LIGHTING_AMBIENT"},
|
||||
|
||||
{0x1C2, "GPUREG_LIGHTING_NUM_LIGHTS"},
|
||||
{0x1C3, "GPUREG_LIGHTING_CONFIG0"},
|
||||
{0x1C4, "GPUREG_LIGHTING_CONFIG1"},
|
||||
{0x1C5, "GPUREG_LIGHTING_LUT_INDEX"},
|
||||
{0x1C6, "GPUREG_LIGHTING_ENABLE1"},
|
||||
|
||||
{0x1C8, "GPUREG_LIGHTING_LUT_DATA0"},
|
||||
{0x1C9, "GPUREG_LIGHTING_LUT_DATA1"},
|
||||
{0x1CA, "GPUREG_LIGHTING_LUT_DATA2"},
|
||||
{0x1CB, "GPUREG_LIGHTING_LUT_DATA3"},
|
||||
{0x1CC, "GPUREG_LIGHTING_LUT_DATA4"},
|
||||
{0x1CD, "GPUREG_LIGHTING_LUT_DATA5"},
|
||||
{0x1CE, "GPUREG_LIGHTING_LUT_DATA6"},
|
||||
{0x1CF, "GPUREG_LIGHTING_LUT_DATA7"},
|
||||
{0x1D0, "GPUREG_LIGHTING_LUTINPUT_ABS"},
|
||||
{0x1D1, "GPUREG_LIGHTING_LUTINPUT_SELECT"},
|
||||
{0x1D2, "GPUREG_LIGHTING_LUTINPUT_SCALE"},
|
||||
|
||||
{0x1D9, "GPUREG_LIGHTING_LIGHT_PERMUTATION"},
|
||||
|
||||
{0x200, "GPUREG_ATTRIBBUFFERS_LOC"},
|
||||
{0x201, "GPUREG_ATTRIBBUFFERS_FORMAT_LOW"},
|
||||
{0x202, "GPUREG_ATTRIBBUFFERS_FORMAT_HIGH"},
|
||||
{0x203, "GPUREG_ATTRIBBUFFER0_OFFSET"},
|
||||
{0x204, "GPUREG_ATTRIBBUFFER0_CONFIG1"},
|
||||
{0x205, "GPUREG_ATTRIBBUFFER0_CONFIG2"},
|
||||
{0x206, "GPUREG_ATTRIBBUFFER1_OFFSET"},
|
||||
{0x207, "GPUREG_ATTRIBBUFFER1_CONFIG1"},
|
||||
{0x208, "GPUREG_ATTRIBBUFFER1_CONFIG2"},
|
||||
{0x209, "GPUREG_ATTRIBBUFFER2_OFFSET"},
|
||||
{0x20A, "GPUREG_ATTRIBBUFFER2_CONFIG1"},
|
||||
{0x20B, "GPUREG_ATTRIBBUFFER2_CONFIG2"},
|
||||
{0x20C, "GPUREG_ATTRIBBUFFER3_OFFSET"},
|
||||
{0x20D, "GPUREG_ATTRIBBUFFER3_CONFIG1"},
|
||||
{0x20E, "GPUREG_ATTRIBBUFFER3_CONFIG2"},
|
||||
{0x20F, "GPUREG_ATTRIBBUFFER4_OFFSET"},
|
||||
{0x210, "GPUREG_ATTRIBBUFFER4_CONFIG1"},
|
||||
{0x211, "GPUREG_ATTRIBBUFFER4_CONFIG2"},
|
||||
{0x212, "GPUREG_ATTRIBBUFFER5_OFFSET"},
|
||||
{0x213, "GPUREG_ATTRIBBUFFER5_CONFIG1"},
|
||||
{0x214, "GPUREG_ATTRIBBUFFER5_CONFIG2"},
|
||||
{0x215, "GPUREG_ATTRIBBUFFER6_OFFSET"},
|
||||
{0x216, "GPUREG_ATTRIBBUFFER6_CONFIG1"},
|
||||
{0x217, "GPUREG_ATTRIBBUFFER6_CONFIG2"},
|
||||
{0x218, "GPUREG_ATTRIBBUFFER7_OFFSET"},
|
||||
{0x219, "GPUREG_ATTRIBBUFFER7_CONFIG1"},
|
||||
{0x21A, "GPUREG_ATTRIBBUFFER7_CONFIG2"},
|
||||
{0x21B, "GPUREG_ATTRIBBUFFER8_OFFSET"},
|
||||
{0x21C, "GPUREG_ATTRIBBUFFER8_CONFIG1"},
|
||||
{0x21D, "GPUREG_ATTRIBBUFFER8_CONFIG2"},
|
||||
{0x21E, "GPUREG_ATTRIBBUFFER9_OFFSET"},
|
||||
{0x21F, "GPUREG_ATTRIBBUFFER9_CONFIG1"},
|
||||
{0x220, "GPUREG_ATTRIBBUFFER9_CONFIG2"},
|
||||
{0x221, "GPUREG_ATTRIBBUFFER10_OFFSET"},
|
||||
{0x222, "GPUREG_ATTRIBBUFFER10_CONFIG1"},
|
||||
{0x223, "GPUREG_ATTRIBBUFFER10_CONFIG2"},
|
||||
{0x224, "GPUREG_ATTRIBBUFFER11_OFFSET"},
|
||||
{0x225, "GPUREG_ATTRIBBUFFER11_CONFIG1"},
|
||||
{0x226, "GPUREG_ATTRIBBUFFER11_CONFIG2"},
|
||||
{0x227, "GPUREG_INDEXBUFFER_CONFIG"},
|
||||
{0x228, "GPUREG_NUMVERTICES"},
|
||||
{0x229, "GPUREG_GEOSTAGE_CONFIG"},
|
||||
{0x22A, "GPUREG_VERTEX_OFFSET"},
|
||||
|
||||
{0x22D, "GPUREG_POST_VERTEX_CACHE_NUM"},
|
||||
{0x22E, "GPUREG_DRAWARRAYS"},
|
||||
{0x22F, "GPUREG_DRAWELEMENTS"},
|
||||
|
||||
{0x231, "GPUREG_VTX_FUNC"},
|
||||
{0x232, "GPUREG_FIXEDATTRIB_INDEX"},
|
||||
{0x233, "GPUREG_FIXEDATTRIB_DATA0"},
|
||||
{0x234, "GPUREG_FIXEDATTRIB_DATA1"},
|
||||
{0x235, "GPUREG_FIXEDATTRIB_DATA2"},
|
||||
|
||||
{0x238, "GPUREG_CMDBUF_SIZE0"},
|
||||
{0x239, "GPUREG_CMDBUF_SIZE1"},
|
||||
{0x23A, "GPUREG_CMDBUF_ADDR0"},
|
||||
{0x23B, "GPUREG_CMDBUF_ADDR1"},
|
||||
{0x23C, "GPUREG_CMDBUF_JUMP0"},
|
||||
{0x23D, "GPUREG_CMDBUF_JUMP1"},
|
||||
|
||||
{0x242, "GPUREG_VSH_NUM_ATTR"},
|
||||
|
||||
{0x244, "GPUREG_VSH_COM_MODE"},
|
||||
{0x245, "GPUREG_START_DRAW_FUNC0"},
|
||||
|
||||
{0x24A, "GPUREG_VSH_OUTMAP_TOTAL1"},
|
||||
|
||||
{0x251, "GPUREG_VSH_OUTMAP_TOTAL2"},
|
||||
{0x252, "GPUREG_GSH_MISC0"},
|
||||
{0x253, "GPUREG_GEOSTAGE_CONFIG2"},
|
||||
{0x254, "GPUREG_GSH_MISC1"},
|
||||
|
||||
{0x25E, "GPUREG_PRIMITIVE_CONFIG"},
|
||||
{0x25F, "GPUREG_RESTART_PRIMITIVE"},
|
||||
|
||||
{0x280, "GPUREG_GSH_BOOLUNIFORM"},
|
||||
{0x281, "GPUREG_GSH_INTUNIFORM_I0"},
|
||||
{0x282, "GPUREG_GSH_INTUNIFORM_I1"},
|
||||
{0x283, "GPUREG_GSH_INTUNIFORM_I2"},
|
||||
{0x284, "GPUREG_GSH_INTUNIFORM_I3"},
|
||||
|
||||
{0x289, "GPUREG_GSH_INPUTBUFFER_CONFIG"},
|
||||
{0x28A, "GPUREG_GSH_ENTRYPOINT"},
|
||||
{0x28B, "GPUREG_GSH_ATTRIBUTES_PERMUTATION_LOW"},
|
||||
{0x28C, "GPUREG_GSH_ATTRIBUTES_PERMUTATION_HIGH"},
|
||||
{0x28D, "GPUREG_GSH_OUTMAP_MASK"},
|
||||
|
||||
{0x28F, "GPUREG_GSH_CODETRANSFER_END"},
|
||||
{0x290, "GPUREG_GSH_FLOATUNIFORM_INDEX"},
|
||||
{0x291, "GPUREG_GSH_FLOATUNIFORM_DATA0"},
|
||||
{0x292, "GPUREG_GSH_FLOATUNIFORM_DATA1"},
|
||||
{0x293, "GPUREG_GSH_FLOATUNIFORM_DATA2"},
|
||||
{0x294, "GPUREG_GSH_FLOATUNIFORM_DATA3"},
|
||||
{0x295, "GPUREG_GSH_FLOATUNIFORM_DATA4"},
|
||||
{0x296, "GPUREG_GSH_FLOATUNIFORM_DATA5"},
|
||||
{0x297, "GPUREG_GSH_FLOATUNIFORM_DATA6"},
|
||||
{0x298, "GPUREG_GSH_FLOATUNIFORM_DATA7"},
|
||||
|
||||
{0x29B, "GPUREG_GSH_CODETRANSFER_INDEX"},
|
||||
{0x29C, "GPUREG_GSH_CODETRANSFER_DATA0"},
|
||||
{0x29D, "GPUREG_GSH_CODETRANSFER_DATA1"},
|
||||
{0x29E, "GPUREG_GSH_CODETRANSFER_DATA2"},
|
||||
{0x29F, "GPUREG_GSH_CODETRANSFER_DATA3"},
|
||||
{0x2A0, "GPUREG_GSH_CODETRANSFER_DATA4"},
|
||||
{0x2A1, "GPUREG_GSH_CODETRANSFER_DATA5"},
|
||||
{0x2A2, "GPUREG_GSH_CODETRANSFER_DATA6"},
|
||||
{0x2A3, "GPUREG_GSH_CODETRANSFER_DATA7"},
|
||||
|
||||
{0x2A5, "GPUREG_GSH_OPDESCS_INDEX"},
|
||||
{0x2A6, "GPUREG_GSH_OPDESCS_DATA0"},
|
||||
{0x2A7, "GPUREG_GSH_OPDESCS_DATA1"},
|
||||
{0x2A8, "GPUREG_GSH_OPDESCS_DATA2"},
|
||||
{0x2A9, "GPUREG_GSH_OPDESCS_DATA3"},
|
||||
{0x2AA, "GPUREG_GSH_OPDESCS_DATA4"},
|
||||
{0x2AB, "GPUREG_GSH_OPDESCS_DATA5"},
|
||||
{0x2AC, "GPUREG_GSH_OPDESCS_DATA6"},
|
||||
{0x2AD, "GPUREG_GSH_OPDESCS_DATA7"},
|
||||
|
||||
{0x2B0, "GPUREG_VSH_BOOLUNIFORM"},
|
||||
{0x2B1, "GPUREG_VSH_INTUNIFORM_I0"},
|
||||
{0x2B2, "GPUREG_VSH_INTUNIFORM_I1"},
|
||||
{0x2B3, "GPUREG_VSH_INTUNIFORM_I2"},
|
||||
{0x2B4, "GPUREG_VSH_INTUNIFORM_I3"},
|
||||
|
||||
{0x2B9, "GPUREG_VSH_INPUTBUFFER_CONFIG"},
|
||||
{0x2BA, "GPUREG_VSH_ENTRYPOINT"},
|
||||
{0x2BB, "GPUREG_VSH_ATTRIBUTES_PERMUTATION_LOW"},
|
||||
{0x2BC, "GPUREG_VSH_ATTRIBUTES_PERMUTATION_HIGH"},
|
||||
{0x2BD, "GPUREG_VSH_OUTMAP_MASK"},
|
||||
|
||||
{0x2BF, "GPUREG_VSH_CODETRANSFER_END"},
|
||||
{0x2C0, "GPUREG_VSH_FLOATUNIFORM_INDEX"},
|
||||
{0x2C1, "GPUREG_VSH_FLOATUNIFORM_DATA0"},
|
||||
{0x2C2, "GPUREG_VSH_FLOATUNIFORM_DATA1"},
|
||||
{0x2C3, "GPUREG_VSH_FLOATUNIFORM_DATA2"},
|
||||
{0x2C4, "GPUREG_VSH_FLOATUNIFORM_DATA3"},
|
||||
{0x2C5, "GPUREG_VSH_FLOATUNIFORM_DATA4"},
|
||||
{0x2C6, "GPUREG_VSH_FLOATUNIFORM_DATA5"},
|
||||
{0x2C7, "GPUREG_VSH_FLOATUNIFORM_DATA6"},
|
||||
{0x2C8, "GPUREG_VSH_FLOATUNIFORM_DATA7"},
|
||||
|
||||
{0x2CB, "GPUREG_VSH_CODETRANSFER_INDEX"},
|
||||
{0x2CC, "GPUREG_VSH_CODETRANSFER_DATA0"},
|
||||
{0x2CD, "GPUREG_VSH_CODETRANSFER_DATA1"},
|
||||
{0x2CE, "GPUREG_VSH_CODETRANSFER_DATA2"},
|
||||
{0x2CF, "GPUREG_VSH_CODETRANSFER_DATA3"},
|
||||
{0x2D0, "GPUREG_VSH_CODETRANSFER_DATA4"},
|
||||
{0x2D1, "GPUREG_VSH_CODETRANSFER_DATA5"},
|
||||
{0x2D2, "GPUREG_VSH_CODETRANSFER_DATA6"},
|
||||
{0x2D3, "GPUREG_VSH_CODETRANSFER_DATA7"},
|
||||
|
||||
{0x2D5, "GPUREG_VSH_OPDESCS_INDEX"},
|
||||
{0x2D6, "GPUREG_VSH_OPDESCS_DATA0"},
|
||||
{0x2D7, "GPUREG_VSH_OPDESCS_DATA1"},
|
||||
{0x2D8, "GPUREG_VSH_OPDESCS_DATA2"},
|
||||
{0x2D9, "GPUREG_VSH_OPDESCS_DATA3"},
|
||||
{0x2DA, "GPUREG_VSH_OPDESCS_DATA4"},
|
||||
{0x2DB, "GPUREG_VSH_OPDESCS_DATA5"},
|
||||
{0x2DC, "GPUREG_VSH_OPDESCS_DATA6"},
|
||||
{0x2DD, "GPUREG_VSH_OPDESCS_DATA7"},
|
||||
};
|
||||
|
||||
const char* Regs::GetRegisterName(u16 index) {
|
||||
auto found = std::lower_bound(std::begin(register_names), std::end(register_names), index,
|
||||
[](auto p, auto i) { return p.first < i; });
|
||||
if (found->first == index) {
|
||||
return found->second;
|
||||
} else {
|
||||
// Return empty string if no match is found
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica
|
|
@ -1,149 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
#ifndef _MSC_VER
|
||||
#include <type_traits> // for std::enable_if
|
||||
#endif
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/regs_lighting.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_shader.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
// Returns index corresponding to the Regs member labeled by field_name
|
||||
// TODO: Due to Visual studio bug 209229, offsetof does not return constant expressions
|
||||
// when used with array elements (e.g. PICA_REG_INDEX(vs_uniform_setup.set_value[1])).
|
||||
// For details cf.
|
||||
// https://connect.microsoft.com/VisualStudio/feedback/details/209229/offsetof-does-not-produce-a-constant-expression-for-array-members
|
||||
// Hopefully, this will be fixed sometime in the future.
|
||||
// For lack of better alternatives, we currently hardcode the offsets when constant
|
||||
// expressions are needed via PICA_REG_INDEX_WORKAROUND (on sane compilers, static_asserts
|
||||
// will then make sure the offsets indeed match the automatically calculated ones).
|
||||
#define PICA_REG_INDEX(field_name) (offsetof(Pica::Regs, field_name) / sizeof(u32))
|
||||
#if defined(_MSC_VER)
|
||||
#define PICA_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) (backup_workaround_index)
|
||||
#else
|
||||
// NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
|
||||
// really is this annoying. This macro just forwards its first argument to PICA_REG_INDEX
|
||||
// and then performs a (no-op) cast to size_t iff the second argument matches the expected
|
||||
// field offset. Otherwise, the compiler will fail to compile this code.
|
||||
#define PICA_REG_INDEX_WORKAROUND(field_name, backup_workaround_index) \
|
||||
((typename std::enable_if<backup_workaround_index == PICA_REG_INDEX(field_name), \
|
||||
size_t>::type)PICA_REG_INDEX(field_name))
|
||||
#endif // _MSC_VER
|
||||
|
||||
struct Regs {
|
||||
static constexpr size_t NUM_REGS = 0x300;
|
||||
|
||||
union {
|
||||
struct {
|
||||
INSERT_PADDING_WORDS(0x10);
|
||||
u32 trigger_irq;
|
||||
INSERT_PADDING_WORDS(0x2f);
|
||||
RasterizerRegs rasterizer;
|
||||
TexturingRegs texturing;
|
||||
FramebufferRegs framebuffer;
|
||||
LightingRegs lighting;
|
||||
PipelineRegs pipeline;
|
||||
ShaderRegs gs;
|
||||
ShaderRegs vs;
|
||||
INSERT_PADDING_WORDS(0x20);
|
||||
};
|
||||
std::array<u32, NUM_REGS> reg_array;
|
||||
};
|
||||
|
||||
/// Map register indices to names readable by humans
|
||||
static const char* GetRegisterName(u16 index);
|
||||
};
|
||||
|
||||
static_assert(sizeof(Regs) == Regs::NUM_REGS * sizeof(u32), "Regs struct has wrong size");
|
||||
|
||||
// TODO: MSVC does not support using offsetof() on non-static data members even though this
|
||||
// is technically allowed since C++11. This macro should be enabled once MSVC adds
|
||||
// support for that.
|
||||
#ifndef _MSC_VER
|
||||
#define ASSERT_REG_POSITION(field_name, position) \
|
||||
static_assert(offsetof(Regs, field_name) == position * 4, \
|
||||
"Field " #field_name " has invalid position")
|
||||
|
||||
ASSERT_REG_POSITION(trigger_irq, 0x10);
|
||||
|
||||
ASSERT_REG_POSITION(rasterizer, 0x40);
|
||||
ASSERT_REG_POSITION(rasterizer.cull_mode, 0x40);
|
||||
ASSERT_REG_POSITION(rasterizer.viewport_size_x, 0x41);
|
||||
ASSERT_REG_POSITION(rasterizer.viewport_size_y, 0x43);
|
||||
ASSERT_REG_POSITION(rasterizer.viewport_depth_range, 0x4d);
|
||||
ASSERT_REG_POSITION(rasterizer.viewport_depth_near_plane, 0x4e);
|
||||
ASSERT_REG_POSITION(rasterizer.vs_output_attributes[0], 0x50);
|
||||
ASSERT_REG_POSITION(rasterizer.vs_output_attributes[1], 0x51);
|
||||
ASSERT_REG_POSITION(rasterizer.scissor_test, 0x65);
|
||||
ASSERT_REG_POSITION(rasterizer.viewport_corner, 0x68);
|
||||
ASSERT_REG_POSITION(rasterizer.depthmap_enable, 0x6D);
|
||||
|
||||
ASSERT_REG_POSITION(texturing, 0x80);
|
||||
ASSERT_REG_POSITION(texturing.main_config, 0x80);
|
||||
ASSERT_REG_POSITION(texturing.texture0, 0x81);
|
||||
ASSERT_REG_POSITION(texturing.texture0_format, 0x8e);
|
||||
ASSERT_REG_POSITION(texturing.fragment_lighting_enable, 0x8f);
|
||||
ASSERT_REG_POSITION(texturing.texture1, 0x91);
|
||||
ASSERT_REG_POSITION(texturing.texture1_format, 0x96);
|
||||
ASSERT_REG_POSITION(texturing.texture2, 0x99);
|
||||
ASSERT_REG_POSITION(texturing.texture2_format, 0x9e);
|
||||
ASSERT_REG_POSITION(texturing.proctex, 0xa8);
|
||||
ASSERT_REG_POSITION(texturing.proctex_noise_u, 0xa9);
|
||||
ASSERT_REG_POSITION(texturing.proctex_noise_v, 0xaa);
|
||||
ASSERT_REG_POSITION(texturing.proctex_noise_frequency, 0xab);
|
||||
ASSERT_REG_POSITION(texturing.proctex_lut, 0xac);
|
||||
ASSERT_REG_POSITION(texturing.proctex_lut_offset, 0xad);
|
||||
ASSERT_REG_POSITION(texturing.proctex_lut_config, 0xaf);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage0, 0xc0);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage1, 0xc8);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage2, 0xd0);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage3, 0xd8);
|
||||
ASSERT_REG_POSITION(texturing.tev_combiner_buffer_input, 0xe0);
|
||||
ASSERT_REG_POSITION(texturing.fog_mode, 0xe0);
|
||||
ASSERT_REG_POSITION(texturing.fog_color, 0xe1);
|
||||
ASSERT_REG_POSITION(texturing.fog_lut_offset, 0xe6);
|
||||
ASSERT_REG_POSITION(texturing.fog_lut_data, 0xe8);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage4, 0xf0);
|
||||
ASSERT_REG_POSITION(texturing.tev_stage5, 0xf8);
|
||||
ASSERT_REG_POSITION(texturing.tev_combiner_buffer_color, 0xfd);
|
||||
|
||||
ASSERT_REG_POSITION(framebuffer, 0x100);
|
||||
ASSERT_REG_POSITION(framebuffer.output_merger, 0x100);
|
||||
ASSERT_REG_POSITION(framebuffer.framebuffer, 0x110);
|
||||
|
||||
ASSERT_REG_POSITION(lighting, 0x140);
|
||||
|
||||
ASSERT_REG_POSITION(pipeline, 0x200);
|
||||
ASSERT_REG_POSITION(pipeline.vertex_attributes, 0x200);
|
||||
ASSERT_REG_POSITION(pipeline.index_array, 0x227);
|
||||
ASSERT_REG_POSITION(pipeline.num_vertices, 0x228);
|
||||
ASSERT_REG_POSITION(pipeline.vertex_offset, 0x22a);
|
||||
ASSERT_REG_POSITION(pipeline.trigger_draw, 0x22e);
|
||||
ASSERT_REG_POSITION(pipeline.trigger_draw_indexed, 0x22f);
|
||||
ASSERT_REG_POSITION(pipeline.vs_default_attributes_setup, 0x232);
|
||||
ASSERT_REG_POSITION(pipeline.command_buffer, 0x238);
|
||||
ASSERT_REG_POSITION(pipeline.gpu_mode, 0x245);
|
||||
ASSERT_REG_POSITION(pipeline.triangle_topology, 0x25e);
|
||||
ASSERT_REG_POSITION(pipeline.restart_primitive, 0x25f);
|
||||
|
||||
ASSERT_REG_POSITION(gs, 0x280);
|
||||
ASSERT_REG_POSITION(vs, 0x2b0);
|
||||
|
||||
#undef ASSERT_REG_POSITION
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
} // namespace Pica
|
|
@ -1,283 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct FramebufferRegs {
|
||||
enum class LogicOp : u32 {
|
||||
Clear = 0,
|
||||
And = 1,
|
||||
AndReverse = 2,
|
||||
Copy = 3,
|
||||
Set = 4,
|
||||
CopyInverted = 5,
|
||||
NoOp = 6,
|
||||
Invert = 7,
|
||||
Nand = 8,
|
||||
Or = 9,
|
||||
Nor = 10,
|
||||
Xor = 11,
|
||||
Equiv = 12,
|
||||
AndInverted = 13,
|
||||
OrReverse = 14,
|
||||
OrInverted = 15,
|
||||
};
|
||||
|
||||
enum class BlendEquation : u32 {
|
||||
Add = 0,
|
||||
Subtract = 1,
|
||||
ReverseSubtract = 2,
|
||||
Min = 3,
|
||||
Max = 4,
|
||||
};
|
||||
|
||||
enum class BlendFactor : u32 {
|
||||
Zero = 0,
|
||||
One = 1,
|
||||
SourceColor = 2,
|
||||
OneMinusSourceColor = 3,
|
||||
DestColor = 4,
|
||||
OneMinusDestColor = 5,
|
||||
SourceAlpha = 6,
|
||||
OneMinusSourceAlpha = 7,
|
||||
DestAlpha = 8,
|
||||
OneMinusDestAlpha = 9,
|
||||
ConstantColor = 10,
|
||||
OneMinusConstantColor = 11,
|
||||
ConstantAlpha = 12,
|
||||
OneMinusConstantAlpha = 13,
|
||||
SourceAlphaSaturate = 14,
|
||||
};
|
||||
|
||||
enum class CompareFunc : u32 {
|
||||
Never = 0,
|
||||
Always = 1,
|
||||
Equal = 2,
|
||||
NotEqual = 3,
|
||||
LessThan = 4,
|
||||
LessThanOrEqual = 5,
|
||||
GreaterThan = 6,
|
||||
GreaterThanOrEqual = 7,
|
||||
};
|
||||
|
||||
enum class StencilAction : u32 {
|
||||
Keep = 0,
|
||||
Zero = 1,
|
||||
Replace = 2,
|
||||
Increment = 3,
|
||||
Decrement = 4,
|
||||
Invert = 5,
|
||||
IncrementWrap = 6,
|
||||
DecrementWrap = 7,
|
||||
};
|
||||
|
||||
struct {
|
||||
union {
|
||||
// If false, logic blending is used
|
||||
BitField<8, 1, u32> alphablend_enable;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 3, BlendEquation> blend_equation_rgb;
|
||||
BitField<8, 3, BlendEquation> blend_equation_a;
|
||||
|
||||
BitField<16, 4, BlendFactor> factor_source_rgb;
|
||||
BitField<20, 4, BlendFactor> factor_dest_rgb;
|
||||
|
||||
BitField<24, 4, BlendFactor> factor_source_a;
|
||||
BitField<28, 4, BlendFactor> factor_dest_a;
|
||||
} alpha_blending;
|
||||
|
||||
union {
|
||||
BitField<0, 4, LogicOp> logic_op;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
} blend_const;
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> enable;
|
||||
BitField<4, 3, CompareFunc> func;
|
||||
BitField<8, 8, u32> ref;
|
||||
} alpha_test;
|
||||
|
||||
struct {
|
||||
union {
|
||||
// Raw value of this register
|
||||
u32 raw_func;
|
||||
|
||||
// If true, enable stencil testing
|
||||
BitField<0, 1, u32> enable;
|
||||
|
||||
// Comparison operation for stencil testing
|
||||
BitField<4, 3, CompareFunc> func;
|
||||
|
||||
// Mask used to control writing to the stencil buffer
|
||||
BitField<8, 8, u32> write_mask;
|
||||
|
||||
// Value to compare against for stencil testing
|
||||
BitField<16, 8, u32> reference_value;
|
||||
|
||||
// Mask to apply on stencil test inputs
|
||||
BitField<24, 8, u32> input_mask;
|
||||
};
|
||||
|
||||
union {
|
||||
// Raw value of this register
|
||||
u32 raw_op;
|
||||
|
||||
// Action to perform when the stencil test fails
|
||||
BitField<0, 3, StencilAction> action_stencil_fail;
|
||||
|
||||
// Action to perform when stencil testing passed but depth testing fails
|
||||
BitField<4, 3, StencilAction> action_depth_fail;
|
||||
|
||||
// Action to perform when both stencil and depth testing pass
|
||||
BitField<8, 3, StencilAction> action_depth_pass;
|
||||
};
|
||||
} stencil_test;
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> depth_test_enable;
|
||||
BitField<4, 3, CompareFunc> depth_test_func;
|
||||
BitField<8, 1, u32> red_enable;
|
||||
BitField<9, 1, u32> green_enable;
|
||||
BitField<10, 1, u32> blue_enable;
|
||||
BitField<11, 1, u32> alpha_enable;
|
||||
BitField<12, 1, u32> depth_write_enable;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x8);
|
||||
} output_merger;
|
||||
|
||||
// Components are laid out in reverse byte order, most significant bits first.
|
||||
enum class ColorFormat : u32 {
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB5A1 = 2,
|
||||
RGB565 = 3,
|
||||
RGBA4 = 4,
|
||||
};
|
||||
|
||||
enum class DepthFormat : u32 {
|
||||
D16 = 0,
|
||||
D24 = 2,
|
||||
D24S8 = 3,
|
||||
};
|
||||
|
||||
// Returns the number of bytes in the specified color format
|
||||
static unsigned BytesPerColorPixel(ColorFormat format) {
|
||||
switch (format) {
|
||||
case ColorFormat::RGBA8:
|
||||
return 4;
|
||||
case ColorFormat::RGB8:
|
||||
return 3;
|
||||
case ColorFormat::RGB5A1:
|
||||
case ColorFormat::RGB565:
|
||||
case ColorFormat::RGBA4:
|
||||
return 2;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown color format %u", format);
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
struct FramebufferConfig {
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
|
||||
union {
|
||||
BitField<0, 4, u32> allow_color_write; // 0 = disable, else enable
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
union {
|
||||
BitField<0, 2, u32> allow_depth_stencil_write; // 0 = disable, else enable
|
||||
};
|
||||
|
||||
BitField<0, 2, DepthFormat> depth_format;
|
||||
|
||||
BitField<16, 3, ColorFormat> color_format;
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
|
||||
BitField<0, 28, u32> depth_buffer_address;
|
||||
BitField<0, 28, u32> color_buffer_address;
|
||||
|
||||
union {
|
||||
// Apparently, the framebuffer width is stored as expected,
|
||||
// while the height is stored as the actual height minus one.
|
||||
// Hence, don't access these fields directly but use the accessors
|
||||
// GetWidth() and GetHeight() instead.
|
||||
BitField<0, 11, u32> width;
|
||||
BitField<12, 10, u32> height;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
inline PAddr GetColorBufferPhysicalAddress() const {
|
||||
return color_buffer_address * 8;
|
||||
}
|
||||
inline PAddr GetDepthBufferPhysicalAddress() const {
|
||||
return depth_buffer_address * 8;
|
||||
}
|
||||
|
||||
inline u32 GetWidth() const {
|
||||
return width;
|
||||
}
|
||||
|
||||
inline u32 GetHeight() const {
|
||||
return height + 1;
|
||||
}
|
||||
} framebuffer;
|
||||
|
||||
// Returns the number of bytes in the specified depth format
|
||||
static u32 BytesPerDepthPixel(DepthFormat format) {
|
||||
switch (format) {
|
||||
case DepthFormat::D16:
|
||||
return 2;
|
||||
case DepthFormat::D24:
|
||||
return 3;
|
||||
case DepthFormat::D24S8:
|
||||
return 4;
|
||||
}
|
||||
|
||||
ASSERT_MSG(false, "Unknown depth format %u", format);
|
||||
}
|
||||
|
||||
// Returns the number of bits per depth component of the specified depth format
|
||||
static u32 DepthBitsPerPixel(DepthFormat format) {
|
||||
switch (format) {
|
||||
case DepthFormat::D16:
|
||||
return 16;
|
||||
case DepthFormat::D24:
|
||||
case DepthFormat::D24S8:
|
||||
return 24;
|
||||
}
|
||||
|
||||
ASSERT_MSG(false, "Unknown depth format %u", format);
|
||||
}
|
||||
|
||||
INSERT_PADDING_WORDS(0x20);
|
||||
};
|
||||
|
||||
static_assert(sizeof(FramebufferRegs) == 0x40 * sizeof(u32),
|
||||
"FramebufferRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -1,321 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct LightingRegs {
|
||||
enum class LightingSampler {
|
||||
Distribution0 = 0,
|
||||
Distribution1 = 1,
|
||||
Fresnel = 3,
|
||||
ReflectBlue = 4,
|
||||
ReflectGreen = 5,
|
||||
ReflectRed = 6,
|
||||
SpotlightAttenuation = 8,
|
||||
DistanceAttenuation = 16,
|
||||
};
|
||||
|
||||
static constexpr unsigned NumLightingSampler = 24;
|
||||
|
||||
static LightingSampler SpotlightAttenuationSampler(unsigned index) {
|
||||
return static_cast<LightingSampler>(
|
||||
static_cast<unsigned>(LightingSampler::SpotlightAttenuation) + index);
|
||||
}
|
||||
|
||||
static LightingSampler DistanceAttenuationSampler(unsigned index) {
|
||||
return static_cast<LightingSampler>(
|
||||
static_cast<unsigned>(LightingSampler::DistanceAttenuation) + index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pica fragment lighting supports using different LUTs for each lighting component: Reflectance
|
||||
* R, G, and B channels, distribution function for specular components 0 and 1, fresnel factor,
|
||||
* and spotlight attenuation. Furthermore, which LUTs are used for each channel (or whether a
|
||||
* channel is enabled at all) is specified by various pre-defined lighting configurations. With
|
||||
* configurations that require more LUTs, more cycles are required on HW to perform lighting
|
||||
* computations.
|
||||
*/
|
||||
enum class LightingConfig : u32 {
|
||||
Config0 = 0, ///< Reflect Red, Distribution 0, Spotlight
|
||||
Config1 = 1, ///< Reflect Red, Fresnel, Spotlight
|
||||
Config2 = 2, ///< Reflect Red, Distribution 0/1
|
||||
Config3 = 3, ///< Distribution 0/1, Fresnel
|
||||
Config4 = 4, ///< Reflect Red/Green/Blue, Distribution 0/1, Spotlight
|
||||
Config5 = 5, ///< Reflect Red/Green/Blue, Distribution 0, Fresnel, Spotlight
|
||||
Config6 = 6, ///< Reflect Red, Distribution 0/1, Fresnel, Spotlight
|
||||
|
||||
Config7 = 8, ///< Reflect Red/Green/Blue, Distribution 0/1, Fresnel, Spotlight
|
||||
///< NOTE: '8' is intentional, '7' does not appear to be a valid configuration
|
||||
};
|
||||
|
||||
/// Selects which lighting components are affected by fresnel
|
||||
enum class LightingFresnelSelector : u32 {
|
||||
None = 0, ///< Fresnel is disabled
|
||||
PrimaryAlpha = 1, ///< Primary (diffuse) lighting alpha is affected by fresnel
|
||||
SecondaryAlpha = 2, ///< Secondary (specular) lighting alpha is affected by fresnel
|
||||
Both =
|
||||
PrimaryAlpha |
|
||||
SecondaryAlpha, ///< Both primary and secondary lighting alphas are affected by fresnel
|
||||
};
|
||||
|
||||
/// Factor used to scale the output of a lighting LUT
|
||||
enum class LightingScale : u32 {
|
||||
Scale1 = 0, ///< Scale is 1x
|
||||
Scale2 = 1, ///< Scale is 2x
|
||||
Scale4 = 2, ///< Scale is 4x
|
||||
Scale8 = 3, ///< Scale is 8x
|
||||
|
||||
Scale1_4 = 6, ///< Scale is 0.25x
|
||||
Scale1_2 = 7, ///< Scale is 0.5x
|
||||
};
|
||||
|
||||
enum class LightingLutInput : u32 {
|
||||
NH = 0, // Cosine of the angle between the normal and half-angle vectors
|
||||
VH = 1, // Cosine of the angle between the view and half-angle vectors
|
||||
NV = 2, // Cosine of the angle between the normal and the view vector
|
||||
LN = 3, // Cosine of the angle between the light and the normal vectors
|
||||
SP = 4, // Cosine of the angle between the light and the inverse spotlight vectors
|
||||
CP = 5, // Cosine of the angle between the tangent and projection of half-angle vectors
|
||||
};
|
||||
|
||||
enum class LightingBumpMode : u32 {
|
||||
None = 0,
|
||||
NormalMap = 1,
|
||||
TangentMap = 2,
|
||||
};
|
||||
|
||||
union LightColor {
|
||||
BitField<0, 10, u32> b;
|
||||
BitField<10, 10, u32> g;
|
||||
BitField<20, 10, u32> r;
|
||||
|
||||
Math::Vec3f ToVec3f() const {
|
||||
// These fields are 10 bits wide, however 255 corresponds to 1.0f for each color
|
||||
// component
|
||||
return Math::MakeVec((f32)r / 255.f, (f32)g / 255.f, (f32)b / 255.f);
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns true if the specified lighting sampler is supported by the current Pica lighting
|
||||
/// configuration
|
||||
static bool IsLightingSamplerSupported(LightingConfig config, LightingSampler sampler) {
|
||||
switch (sampler) {
|
||||
case LightingSampler::Distribution0:
|
||||
return (config != LightingConfig::Config1);
|
||||
|
||||
case LightingSampler::Distribution1:
|
||||
return (config != LightingConfig::Config0) && (config != LightingConfig::Config1) &&
|
||||
(config != LightingConfig::Config5);
|
||||
|
||||
case LightingSampler::SpotlightAttenuation:
|
||||
return (config != LightingConfig::Config2) && (config != LightingConfig::Config3);
|
||||
|
||||
case LightingSampler::Fresnel:
|
||||
return (config != LightingConfig::Config0) && (config != LightingConfig::Config2) &&
|
||||
(config != LightingConfig::Config4);
|
||||
|
||||
case LightingSampler::ReflectRed:
|
||||
return (config != LightingConfig::Config3);
|
||||
|
||||
case LightingSampler::ReflectGreen:
|
||||
case LightingSampler::ReflectBlue:
|
||||
return (config == LightingConfig::Config4) || (config == LightingConfig::Config5) ||
|
||||
(config == LightingConfig::Config7);
|
||||
default:
|
||||
UNREACHABLE_MSG("Regs::IsLightingSamplerSupported: Reached unreachable section, "
|
||||
"sampler should be one of Distribution0, Distribution1, "
|
||||
"SpotlightAttenuation, Fresnel, ReflectRed, ReflectGreen or "
|
||||
"ReflectBlue, instead got %i",
|
||||
static_cast<int>(config));
|
||||
}
|
||||
}
|
||||
|
||||
struct LightSrc {
|
||||
LightColor specular_0; // material.specular_0 * light.specular_0
|
||||
LightColor specular_1; // material.specular_1 * light.specular_1
|
||||
LightColor diffuse; // material.diffuse * light.diffuse
|
||||
LightColor ambient; // material.ambient * light.ambient
|
||||
|
||||
// Encoded as 16-bit floating point
|
||||
union {
|
||||
BitField<0, 16, u32> x;
|
||||
BitField<16, 16, u32> y;
|
||||
};
|
||||
union {
|
||||
BitField<0, 16, u32> z;
|
||||
};
|
||||
|
||||
// inverse spotlight direction vector, encoded as fixed1.1.11
|
||||
union {
|
||||
BitField<0, 13, s32> spot_x;
|
||||
BitField<16, 13, s32> spot_y;
|
||||
};
|
||||
union {
|
||||
BitField<0, 13, s32> spot_z;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> directional;
|
||||
BitField<1, 1, u32> two_sided_diffuse; // When disabled, clamp dot-product to 0
|
||||
BitField<2, 1, u32> geometric_factor_0;
|
||||
BitField<3, 1, u32> geometric_factor_1;
|
||||
} config;
|
||||
|
||||
BitField<0, 20, u32> dist_atten_bias;
|
||||
BitField<0, 20, u32> dist_atten_scale;
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
};
|
||||
static_assert(sizeof(LightSrc) == 0x10 * sizeof(u32), "LightSrc structure must be 0x10 words");
|
||||
|
||||
LightSrc light[8];
|
||||
LightColor global_ambient; // Emission + (material.ambient * lighting.ambient)
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
BitField<0, 3, u32> max_light_index; // Number of enabled lights - 1
|
||||
|
||||
union {
|
||||
BitField<2, 2, LightingFresnelSelector> fresnel_selector;
|
||||
BitField<4, 4, LightingConfig> config;
|
||||
BitField<22, 2, u32> bump_selector; // 0: Texture 0, 1: Texture 1, 2: Texture 2
|
||||
BitField<27, 1, u32> clamp_highlights;
|
||||
BitField<28, 2, LightingBumpMode> bump_mode;
|
||||
BitField<30, 1, u32> disable_bump_renorm;
|
||||
} config0;
|
||||
|
||||
union {
|
||||
u32 raw;
|
||||
|
||||
// Each bit specifies whether spot light attenuation should be applied for the corresponding
|
||||
// light.
|
||||
BitField<8, 8, u32> disable_spot_atten;
|
||||
|
||||
BitField<16, 1, u32> disable_lut_d0;
|
||||
BitField<17, 1, u32> disable_lut_d1;
|
||||
// Note: by intuition, BitField<18, 1, u32> should be disable_lut_sp, but it is actually a
|
||||
// dummy bit which is always set as 1.
|
||||
BitField<19, 1, u32> disable_lut_fr;
|
||||
BitField<20, 1, u32> disable_lut_rr;
|
||||
BitField<21, 1, u32> disable_lut_rg;
|
||||
BitField<22, 1, u32> disable_lut_rb;
|
||||
|
||||
// Each bit specifies whether distance attenuation should be applied for the corresponding
|
||||
// light.
|
||||
BitField<24, 8, u32> disable_dist_atten;
|
||||
} config1;
|
||||
|
||||
bool IsDistAttenDisabled(unsigned index) const {
|
||||
return (config1.disable_dist_atten & (1 << index)) != 0;
|
||||
}
|
||||
|
||||
bool IsSpotAttenDisabled(unsigned index) const {
|
||||
return (config1.disable_spot_atten & (1 << index)) != 0;
|
||||
}
|
||||
|
||||
union {
|
||||
BitField<0, 8, u32> index; ///< Index at which to set data in the LUT
|
||||
BitField<8, 5, u32> type; ///< Type of LUT for which to set data
|
||||
} lut_config;
|
||||
|
||||
BitField<0, 1, u32> disable;
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// When data is written to any of these registers, it gets written to the lookup table of the
|
||||
// selected type at the selected index, specified above in the `lut_config` register. With each
|
||||
// write, `lut_config.index` is incremented. It does not matter which of these registers is
|
||||
// written to, the behavior will be the same.
|
||||
u32 lut_data[8];
|
||||
|
||||
// These are used to specify if absolute (abs) value should be used for each LUT index. When
|
||||
// abs mode is disabled, LUT indexes are in the range of (-1.0, 1.0). Otherwise, they are in
|
||||
// the range of (0.0, 1.0).
|
||||
union {
|
||||
BitField<1, 1, u32> disable_d0;
|
||||
BitField<5, 1, u32> disable_d1;
|
||||
BitField<9, 1, u32> disable_sp;
|
||||
BitField<13, 1, u32> disable_fr;
|
||||
BitField<17, 1, u32> disable_rb;
|
||||
BitField<21, 1, u32> disable_rg;
|
||||
BitField<25, 1, u32> disable_rr;
|
||||
} abs_lut_input;
|
||||
|
||||
union {
|
||||
BitField<0, 3, LightingLutInput> d0;
|
||||
BitField<4, 3, LightingLutInput> d1;
|
||||
BitField<8, 3, LightingLutInput> sp;
|
||||
BitField<12, 3, LightingLutInput> fr;
|
||||
BitField<16, 3, LightingLutInput> rb;
|
||||
BitField<20, 3, LightingLutInput> rg;
|
||||
BitField<24, 3, LightingLutInput> rr;
|
||||
} lut_input;
|
||||
|
||||
union {
|
||||
BitField<0, 3, LightingScale> d0;
|
||||
BitField<4, 3, LightingScale> d1;
|
||||
BitField<8, 3, LightingScale> sp;
|
||||
BitField<12, 3, LightingScale> fr;
|
||||
BitField<16, 3, LightingScale> rb;
|
||||
BitField<20, 3, LightingScale> rg;
|
||||
BitField<24, 3, LightingScale> rr;
|
||||
|
||||
static float GetScale(LightingScale scale) {
|
||||
switch (scale) {
|
||||
case LightingScale::Scale1:
|
||||
return 1.0f;
|
||||
case LightingScale::Scale2:
|
||||
return 2.0f;
|
||||
case LightingScale::Scale4:
|
||||
return 4.0f;
|
||||
case LightingScale::Scale8:
|
||||
return 8.0f;
|
||||
case LightingScale::Scale1_4:
|
||||
return 0.25f;
|
||||
case LightingScale::Scale1_2:
|
||||
return 0.5f;
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
} lut_scale;
|
||||
|
||||
INSERT_PADDING_WORDS(0x6);
|
||||
|
||||
union {
|
||||
// There are 8 light enable "slots", corresponding to the total number of lights supported
|
||||
// by Pica. For N enabled lights (specified by register 0x1c2, or 'src_num' above), the
|
||||
// first N slots below will be set to integers within the range of 0-7, corresponding to the
|
||||
// actual light that is enabled for each slot.
|
||||
|
||||
BitField<0, 3, u32> slot_0;
|
||||
BitField<4, 3, u32> slot_1;
|
||||
BitField<8, 3, u32> slot_2;
|
||||
BitField<12, 3, u32> slot_3;
|
||||
BitField<16, 3, u32> slot_4;
|
||||
BitField<20, 3, u32> slot_5;
|
||||
BitField<24, 3, u32> slot_6;
|
||||
BitField<28, 3, u32> slot_7;
|
||||
|
||||
unsigned GetNum(unsigned index) const {
|
||||
const unsigned enable_slots[] = {slot_0, slot_1, slot_2, slot_3,
|
||||
slot_4, slot_5, slot_6, slot_7};
|
||||
return enable_slots[index];
|
||||
}
|
||||
} light_enable;
|
||||
|
||||
INSERT_PADDING_WORDS(0x26);
|
||||
};
|
||||
|
||||
static_assert(sizeof(LightingRegs) == 0xC0 * sizeof(u32), "LightingRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -1,269 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct PipelineRegs {
|
||||
enum class VertexAttributeFormat : u32 {
|
||||
BYTE = 0,
|
||||
UBYTE = 1,
|
||||
SHORT = 2,
|
||||
FLOAT = 3,
|
||||
};
|
||||
|
||||
struct {
|
||||
BitField<1, 28, u32> base_address;
|
||||
|
||||
PAddr GetPhysicalBaseAddress() const {
|
||||
return base_address * 16;
|
||||
}
|
||||
|
||||
// Descriptor for internal vertex attributes
|
||||
union {
|
||||
BitField<0, 2, VertexAttributeFormat> format0; // size of one element
|
||||
BitField<2, 2, u32> size0; // number of elements minus 1
|
||||
BitField<4, 2, VertexAttributeFormat> format1;
|
||||
BitField<6, 2, u32> size1;
|
||||
BitField<8, 2, VertexAttributeFormat> format2;
|
||||
BitField<10, 2, u32> size2;
|
||||
BitField<12, 2, VertexAttributeFormat> format3;
|
||||
BitField<14, 2, u32> size3;
|
||||
BitField<16, 2, VertexAttributeFormat> format4;
|
||||
BitField<18, 2, u32> size4;
|
||||
BitField<20, 2, VertexAttributeFormat> format5;
|
||||
BitField<22, 2, u32> size5;
|
||||
BitField<24, 2, VertexAttributeFormat> format6;
|
||||
BitField<26, 2, u32> size6;
|
||||
BitField<28, 2, VertexAttributeFormat> format7;
|
||||
BitField<30, 2, u32> size7;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 2, VertexAttributeFormat> format8;
|
||||
BitField<2, 2, u32> size8;
|
||||
BitField<4, 2, VertexAttributeFormat> format9;
|
||||
BitField<6, 2, u32> size9;
|
||||
BitField<8, 2, VertexAttributeFormat> format10;
|
||||
BitField<10, 2, u32> size10;
|
||||
BitField<12, 2, VertexAttributeFormat> format11;
|
||||
BitField<14, 2, u32> size11;
|
||||
|
||||
BitField<16, 12, u32> attribute_mask;
|
||||
|
||||
// number of total attributes minus 1
|
||||
BitField<28, 4, u32> max_attribute_index;
|
||||
};
|
||||
|
||||
inline VertexAttributeFormat GetFormat(int n) const {
|
||||
VertexAttributeFormat formats[] = {format0, format1, format2, format3,
|
||||
format4, format5, format6, format7,
|
||||
format8, format9, format10, format11};
|
||||
return formats[n];
|
||||
}
|
||||
|
||||
inline int GetNumElements(int n) const {
|
||||
u32 sizes[] = {size0, size1, size2, size3, size4, size5,
|
||||
size6, size7, size8, size9, size10, size11};
|
||||
return (int)sizes[n] + 1;
|
||||
}
|
||||
|
||||
inline int GetElementSizeInBytes(int n) const {
|
||||
return (GetFormat(n) == VertexAttributeFormat::FLOAT)
|
||||
? 4
|
||||
: (GetFormat(n) == VertexAttributeFormat::SHORT) ? 2 : 1;
|
||||
}
|
||||
|
||||
inline int GetStride(int n) const {
|
||||
return GetNumElements(n) * GetElementSizeInBytes(n);
|
||||
}
|
||||
|
||||
inline bool IsDefaultAttribute(int id) const {
|
||||
return (id >= 12) || (attribute_mask & (1ULL << id)) != 0;
|
||||
}
|
||||
|
||||
inline int GetNumTotalAttributes() const {
|
||||
return (int)max_attribute_index + 1;
|
||||
}
|
||||
|
||||
// Attribute loaders map the source vertex data to input attributes
|
||||
// This e.g. allows to load different attributes from different memory locations
|
||||
struct {
|
||||
// Source attribute data offset from the base address
|
||||
BitField<0, 28, u32> data_offset;
|
||||
|
||||
union {
|
||||
BitField<0, 4, u32> comp0;
|
||||
BitField<4, 4, u32> comp1;
|
||||
BitField<8, 4, u32> comp2;
|
||||
BitField<12, 4, u32> comp3;
|
||||
BitField<16, 4, u32> comp4;
|
||||
BitField<20, 4, u32> comp5;
|
||||
BitField<24, 4, u32> comp6;
|
||||
BitField<28, 4, u32> comp7;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 4, u32> comp8;
|
||||
BitField<4, 4, u32> comp9;
|
||||
BitField<8, 4, u32> comp10;
|
||||
BitField<12, 4, u32> comp11;
|
||||
|
||||
// bytes for a single vertex in this loader
|
||||
BitField<16, 8, u32> byte_count;
|
||||
|
||||
BitField<28, 4, u32> component_count;
|
||||
};
|
||||
|
||||
inline int GetComponent(int n) const {
|
||||
u32 components[] = {comp0, comp1, comp2, comp3, comp4, comp5,
|
||||
comp6, comp7, comp8, comp9, comp10, comp11};
|
||||
return (int)components[n];
|
||||
}
|
||||
} attribute_loaders[12];
|
||||
} vertex_attributes;
|
||||
|
||||
struct {
|
||||
enum IndexFormat : u32 {
|
||||
BYTE = 0,
|
||||
SHORT = 1,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 31, u32> offset; // relative to base attribute address
|
||||
BitField<31, 1, IndexFormat> format;
|
||||
};
|
||||
} index_array;
|
||||
|
||||
// Number of vertices to render
|
||||
u32 num_vertices;
|
||||
|
||||
enum class UseGS : u32 {
|
||||
No = 0,
|
||||
Yes = 2,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 2, UseGS> use_gs;
|
||||
BitField<31, 1, u32> variable_primitive;
|
||||
};
|
||||
|
||||
// The index of the first vertex to render
|
||||
u32 vertex_offset;
|
||||
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
|
||||
// These two trigger rendering of triangles
|
||||
u32 trigger_draw;
|
||||
u32 trigger_draw_indexed;
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
// These registers are used to setup the default "fall-back" vertex shader attributes
|
||||
struct {
|
||||
// Index of the current default attribute
|
||||
u32 index;
|
||||
|
||||
// Writing to these registers sets the "current" default attribute.
|
||||
u32 set_value[3];
|
||||
} vs_default_attributes_setup;
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
struct {
|
||||
// There are two channels that can be used to configure the next command buffer, which can
|
||||
// be then executed by writing to the "trigger" registers. There are two reasons why a game
|
||||
// might use this feature:
|
||||
// 1) With this, an arbitrary number of additional command buffers may be executed in
|
||||
// sequence without requiring any intervention of the CPU after the initial one is
|
||||
// kicked off.
|
||||
// 2) Games can configure these registers to provide a command list subroutine mechanism.
|
||||
|
||||
// TODO: verify the bit length of these two fields
|
||||
// According to 3dbrew, the bit length of them are 21 and 29, respectively
|
||||
BitField<0, 20, u32> size[2]; ///< Size (in bytes / 8) of each channel's command buffer
|
||||
BitField<0, 28, u32> addr[2]; ///< Physical address / 8 of each channel's command buffer
|
||||
u32 trigger[2]; ///< Triggers execution of the channel's command buffer when written to
|
||||
|
||||
unsigned GetSize(unsigned index) const {
|
||||
ASSERT(index < 2);
|
||||
return 8 * size[index];
|
||||
}
|
||||
|
||||
PAddr GetPhysicalAddress(unsigned index) const {
|
||||
ASSERT(index < 2);
|
||||
return (PAddr)(8 * addr[index]);
|
||||
}
|
||||
} command_buffer;
|
||||
|
||||
INSERT_PADDING_WORDS(4);
|
||||
|
||||
/// Number of input attributes to the vertex shader minus 1
|
||||
BitField<0, 4, u32> max_input_attrib_index;
|
||||
|
||||
INSERT_PADDING_WORDS(1);
|
||||
|
||||
// The shader unit 3, which can be used for both vertex and geometry shader, gets its
|
||||
// configuration depending on this register. If this is not set, unit 3 will share some
|
||||
// configuration with other units. It is known that program code and swizzle pattern uploaded
|
||||
// via regs.vs will be also uploaded to unit 3 if this is not set. Although very likely, it is
|
||||
// still unclear whether uniforms and other configuration can be also shared.
|
||||
BitField<0, 1, u32> gs_unit_exclusive_configuration;
|
||||
|
||||
enum class GPUMode : u32 {
|
||||
Drawing = 0,
|
||||
Configuring = 1,
|
||||
};
|
||||
|
||||
GPUMode gpu_mode;
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
BitField<0, 4, u32> vs_outmap_total_minus_1_a;
|
||||
INSERT_PADDING_WORDS(0x6);
|
||||
BitField<0, 4, u32> vs_outmap_total_minus_1_b;
|
||||
|
||||
enum class GSMode : u32 {
|
||||
Point = 0,
|
||||
VariablePrimitive = 1,
|
||||
FixedPrimitive = 2,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 8, GSMode> mode;
|
||||
BitField<8, 4, u32> fixed_vertex_num_minus_1;
|
||||
BitField<12, 4, u32> stride_minus_1;
|
||||
BitField<16, 4, u32> start_index;
|
||||
} gs_config;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
u32 variable_vertex_main_num_minus_1;
|
||||
|
||||
INSERT_PADDING_WORDS(0x9);
|
||||
|
||||
enum class TriangleTopology : u32 {
|
||||
List = 0,
|
||||
Strip = 1,
|
||||
Fan = 2,
|
||||
Shader = 3, // Programmable setup unit implemented in a geometry shader
|
||||
};
|
||||
|
||||
BitField<8, 2, TriangleTopology> triangle_topology;
|
||||
|
||||
u32 restart_primitive;
|
||||
|
||||
INSERT_PADDING_WORDS(0x20);
|
||||
};
|
||||
|
||||
static_assert(sizeof(PipelineRegs) == 0x80 * sizeof(u32), "PipelineRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -1,139 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/pica_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct RasterizerRegs {
|
||||
enum class CullMode : u32 {
|
||||
// Select which polygons are considered to be "frontfacing".
|
||||
KeepAll = 0,
|
||||
KeepClockWise = 1,
|
||||
KeepCounterClockWise = 2,
|
||||
// TODO: What does the third value imply?
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 2, CullMode> cull_mode;
|
||||
};
|
||||
|
||||
BitField<0, 24, u32> viewport_size_x;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
BitField<0, 24, u32> viewport_size_y;
|
||||
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
|
||||
BitField<0, 1, u32> clip_enable;
|
||||
BitField<0, 24, u32> clip_coef[4]; // float24
|
||||
|
||||
Math::Vec4<float24> GetClipCoef() const {
|
||||
return {float24::FromRaw(clip_coef[0]), float24::FromRaw(clip_coef[1]),
|
||||
float24::FromRaw(clip_coef[2]), float24::FromRaw(clip_coef[3])};
|
||||
}
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
BitField<0, 24, u32> viewport_depth_range; // float24
|
||||
BitField<0, 24, u32> viewport_depth_near_plane; // float24
|
||||
|
||||
BitField<0, 3, u32> vs_output_total;
|
||||
|
||||
union VSOutputAttributes {
|
||||
// Maps components of output vertex attributes to semantics
|
||||
enum Semantic : u32 {
|
||||
POSITION_X = 0,
|
||||
POSITION_Y = 1,
|
||||
POSITION_Z = 2,
|
||||
POSITION_W = 3,
|
||||
|
||||
QUATERNION_X = 4,
|
||||
QUATERNION_Y = 5,
|
||||
QUATERNION_Z = 6,
|
||||
QUATERNION_W = 7,
|
||||
|
||||
COLOR_R = 8,
|
||||
COLOR_G = 9,
|
||||
COLOR_B = 10,
|
||||
COLOR_A = 11,
|
||||
|
||||
TEXCOORD0_U = 12,
|
||||
TEXCOORD0_V = 13,
|
||||
TEXCOORD1_U = 14,
|
||||
TEXCOORD1_V = 15,
|
||||
|
||||
TEXCOORD0_W = 16,
|
||||
|
||||
VIEW_X = 18,
|
||||
VIEW_Y = 19,
|
||||
VIEW_Z = 20,
|
||||
|
||||
TEXCOORD2_U = 22,
|
||||
TEXCOORD2_V = 23,
|
||||
|
||||
INVALID = 31,
|
||||
};
|
||||
|
||||
BitField<0, 5, Semantic> map_x;
|
||||
BitField<8, 5, Semantic> map_y;
|
||||
BitField<16, 5, Semantic> map_z;
|
||||
BitField<24, 5, Semantic> map_w;
|
||||
} vs_output_attributes[7];
|
||||
|
||||
INSERT_PADDING_WORDS(0xe);
|
||||
|
||||
enum class ScissorMode : u32 {
|
||||
Disabled = 0,
|
||||
Exclude = 1, // Exclude pixels inside the scissor box
|
||||
|
||||
Include = 3 // Exclude pixels outside the scissor box
|
||||
};
|
||||
|
||||
struct {
|
||||
BitField<0, 2, ScissorMode> mode;
|
||||
|
||||
union {
|
||||
BitField<0, 10, u32> x1;
|
||||
BitField<16, 10, u32> y1;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 10, u32> x2;
|
||||
BitField<16, 10, u32> y2;
|
||||
};
|
||||
} scissor_test;
|
||||
|
||||
union {
|
||||
BitField<0, 10, s32> x;
|
||||
BitField<16, 10, s32> y;
|
||||
} viewport_corner;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// TODO: early depth
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
enum DepthBuffering : u32 {
|
||||
WBuffering = 0,
|
||||
ZBuffering = 1,
|
||||
};
|
||||
BitField<0, 1, DepthBuffering> depthmap_enable;
|
||||
|
||||
INSERT_PADDING_WORDS(0x12);
|
||||
};
|
||||
|
||||
static_assert(sizeof(RasterizerRegs) == 0x40 * sizeof(u32),
|
||||
"RasterizerRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -1,111 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct ShaderRegs {
|
||||
BitField<0, 16, u32> bool_uniforms;
|
||||
|
||||
union {
|
||||
BitField<0, 8, u32> x;
|
||||
BitField<8, 8, u32> y;
|
||||
BitField<16, 8, u32> z;
|
||||
BitField<24, 8, u32> w;
|
||||
} int_uniforms[4];
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
|
||||
enum ShaderMode {
|
||||
GS = 0x08,
|
||||
VS = 0xA0,
|
||||
};
|
||||
|
||||
union {
|
||||
// Number of input attributes to shader unit - 1
|
||||
BitField<0, 4, u32> max_input_attribute_index;
|
||||
BitField<8, 8, u32> input_to_uniform;
|
||||
BitField<24, 8, ShaderMode> shader_mode;
|
||||
};
|
||||
|
||||
// Offset to shader program entry point (in words)
|
||||
BitField<0, 16, u32> main_offset;
|
||||
|
||||
/// Maps input attributes to registers. 4-bits per attribute, specifying a register index
|
||||
u32 input_attribute_to_register_map_low;
|
||||
u32 input_attribute_to_register_map_high;
|
||||
|
||||
unsigned int GetRegisterForAttribute(unsigned int attribute_index) const {
|
||||
u64 map = ((u64)input_attribute_to_register_map_high << 32) |
|
||||
(u64)input_attribute_to_register_map_low;
|
||||
return (map >> (attribute_index * 4)) & 0b1111;
|
||||
}
|
||||
|
||||
BitField<0, 16, u32> output_mask;
|
||||
|
||||
// 0x28E, CODETRANSFER_END
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
struct {
|
||||
enum Format : u32 {
|
||||
FLOAT24 = 0,
|
||||
FLOAT32 = 1,
|
||||
};
|
||||
|
||||
bool IsFloat32() const {
|
||||
return format == FLOAT32;
|
||||
}
|
||||
|
||||
union {
|
||||
// Index of the next uniform to write to
|
||||
// TODO: ctrulib uses 8 bits for this, however that seems to yield lots of invalid
|
||||
// indices
|
||||
// TODO: Maybe the uppermost index is for the geometry shader? Investigate!
|
||||
BitField<0, 7, u32> index;
|
||||
|
||||
BitField<31, 1, Format> format;
|
||||
};
|
||||
|
||||
// Writing to these registers sets the current uniform.
|
||||
u32 set_value[8];
|
||||
|
||||
} uniform_setup;
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
struct {
|
||||
// Offset of the next instruction to write code to.
|
||||
// Incremented with each instruction write.
|
||||
u32 offset;
|
||||
|
||||
// Writing to these registers sets the "current" word in the shader program.
|
||||
u32 set_word[8];
|
||||
} program;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// This register group is used to load an internal table of swizzling patterns,
|
||||
// which are indexed by each shader instruction to specify vector component swizzling.
|
||||
struct {
|
||||
// Offset of the next swizzle pattern to write code to.
|
||||
// Incremented with each instruction write.
|
||||
u32 offset;
|
||||
|
||||
// Writing to these registers sets the current swizzle pattern in the table.
|
||||
u32 set_word[8];
|
||||
} swizzle_patterns;
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
};
|
||||
|
||||
static_assert(sizeof(ShaderRegs) == 0x30 * sizeof(u32), "ShaderRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -1,452 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
struct TexturingRegs {
|
||||
struct TextureConfig {
|
||||
enum TextureType : u32 {
|
||||
Texture2D = 0,
|
||||
TextureCube = 1,
|
||||
Shadow2D = 2,
|
||||
Projection2D = 3,
|
||||
ShadowCube = 4,
|
||||
Disabled = 5,
|
||||
};
|
||||
|
||||
enum WrapMode : u32 {
|
||||
ClampToEdge = 0,
|
||||
ClampToBorder = 1,
|
||||
Repeat = 2,
|
||||
MirroredRepeat = 3,
|
||||
// Mode 4-7 produces some weird result and may be just invalid:
|
||||
ClampToEdge2 = 4, // Positive coord: clamp to edge; negative coord: repeat
|
||||
ClampToBorder2 = 5, // Positive coord: clamp to border; negative coord: repeat
|
||||
Repeat2 = 6, // Same as Repeat
|
||||
Repeat3 = 7, // Same as Repeat
|
||||
};
|
||||
|
||||
enum TextureFilter : u32 {
|
||||
Nearest = 0,
|
||||
Linear = 1,
|
||||
};
|
||||
|
||||
union {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
} border_color;
|
||||
|
||||
union {
|
||||
BitField<0, 11, u32> height;
|
||||
BitField<16, 11, u32> width;
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<1, 1, TextureFilter> mag_filter;
|
||||
BitField<2, 1, TextureFilter> min_filter;
|
||||
BitField<8, 3, WrapMode> wrap_t;
|
||||
BitField<12, 3, WrapMode> wrap_s;
|
||||
/// @note Only valid for texture 0 according to 3DBrew.
|
||||
BitField<28, 3, TextureType> type;
|
||||
};
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
BitField<0, 28, u32> address;
|
||||
|
||||
PAddr GetPhysicalAddress() const {
|
||||
return address * 8;
|
||||
}
|
||||
|
||||
// texture1 and texture2 store the texture format directly after the address
|
||||
// whereas texture0 inserts some additional flags inbetween.
|
||||
// Hence, we store the format separately so that all other parameters can be described
|
||||
// in a single structure.
|
||||
};
|
||||
|
||||
enum class TextureFormat : u32 {
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB5A1 = 2,
|
||||
RGB565 = 3,
|
||||
RGBA4 = 4,
|
||||
IA8 = 5,
|
||||
RG8 = 6, ///< @note Also called HILO8 in 3DBrew.
|
||||
I8 = 7,
|
||||
A8 = 8,
|
||||
IA4 = 9,
|
||||
I4 = 10,
|
||||
A4 = 11,
|
||||
ETC1 = 12, // compressed
|
||||
ETC1A4 = 13, // compressed
|
||||
};
|
||||
|
||||
static unsigned NibblesPerPixel(TextureFormat format) {
|
||||
switch (format) {
|
||||
case TextureFormat::RGBA8:
|
||||
return 8;
|
||||
|
||||
case TextureFormat::RGB8:
|
||||
return 6;
|
||||
|
||||
case TextureFormat::RGB5A1:
|
||||
case TextureFormat::RGB565:
|
||||
case TextureFormat::RGBA4:
|
||||
case TextureFormat::IA8:
|
||||
case TextureFormat::RG8:
|
||||
return 4;
|
||||
|
||||
case TextureFormat::I4:
|
||||
case TextureFormat::A4:
|
||||
return 1;
|
||||
|
||||
case TextureFormat::I8:
|
||||
case TextureFormat::A8:
|
||||
case TextureFormat::IA4:
|
||||
|
||||
default: // placeholder for yet unknown formats
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
union {
|
||||
BitField<0, 1, u32> texture0_enable;
|
||||
BitField<1, 1, u32> texture1_enable;
|
||||
BitField<2, 1, u32> texture2_enable;
|
||||
BitField<8, 2, u32> texture3_coordinates;
|
||||
BitField<10, 1, u32> texture3_enable;
|
||||
BitField<13, 1, u32> texture2_use_coord1;
|
||||
BitField<16, 1, u32> clear_texture_cache; // TODO: unimplemented
|
||||
} main_config;
|
||||
TextureConfig texture0;
|
||||
|
||||
enum class CubeFace {
|
||||
PositiveX = 0,
|
||||
NegativeX = 1,
|
||||
PositiveY = 2,
|
||||
NegativeY = 3,
|
||||
PositiveZ = 4,
|
||||
NegativeZ = 5,
|
||||
};
|
||||
|
||||
BitField<0, 22, u32> cube_address[5];
|
||||
|
||||
PAddr GetCubePhysicalAddress(CubeFace face) const {
|
||||
PAddr address = texture0.address;
|
||||
if (face != CubeFace::PositiveX) {
|
||||
// Bits [22:27] from the main texture address is shared with all cubemap additional
|
||||
// addresses.
|
||||
auto& face_addr = cube_address[static_cast<size_t>(face) - 1];
|
||||
address &= ~face_addr.mask;
|
||||
address |= face_addr;
|
||||
}
|
||||
// A multiplier of 8 is also needed in the same way as the main address.
|
||||
return address * 8;
|
||||
}
|
||||
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
BitField<0, 4, TextureFormat> texture0_format;
|
||||
BitField<0, 1, u32> fragment_lighting_enable;
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
TextureConfig texture1;
|
||||
BitField<0, 4, TextureFormat> texture1_format;
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
TextureConfig texture2;
|
||||
BitField<0, 4, TextureFormat> texture2_format;
|
||||
INSERT_PADDING_WORDS(0x9);
|
||||
|
||||
struct FullTextureConfig {
|
||||
const bool enabled;
|
||||
const TextureConfig config;
|
||||
const TextureFormat format;
|
||||
};
|
||||
const std::array<FullTextureConfig, 3> GetTextures() const {
|
||||
return {{
|
||||
{main_config.texture0_enable.ToBool(), texture0, texture0_format},
|
||||
{main_config.texture1_enable.ToBool(), texture1, texture1_format},
|
||||
{main_config.texture2_enable.ToBool(), texture2, texture2_format},
|
||||
}};
|
||||
}
|
||||
|
||||
// 0xa8-0xad: ProcTex Config
|
||||
enum class ProcTexClamp : u32 {
|
||||
ToZero = 0,
|
||||
ToEdge = 1,
|
||||
SymmetricalRepeat = 2,
|
||||
MirroredRepeat = 3,
|
||||
Pulse = 4,
|
||||
};
|
||||
|
||||
enum class ProcTexCombiner : u32 {
|
||||
U = 0, // u
|
||||
U2 = 1, // u * u
|
||||
V = 2, // v
|
||||
V2 = 3, // v * v
|
||||
Add = 4, // (u + v) / 2
|
||||
Add2 = 5, // (u * u + v * v) / 2
|
||||
SqrtAdd2 = 6, // sqrt(u * u + v * v)
|
||||
Min = 7, // min(u, v)
|
||||
Max = 8, // max(u, v)
|
||||
RMax = 9, // Average of Max and SqrtAdd2
|
||||
};
|
||||
|
||||
enum class ProcTexShift : u32 {
|
||||
None = 0,
|
||||
Odd = 1,
|
||||
Even = 2,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 3, ProcTexClamp> u_clamp;
|
||||
BitField<3, 3, ProcTexClamp> v_clamp;
|
||||
BitField<6, 4, ProcTexCombiner> color_combiner;
|
||||
BitField<10, 4, ProcTexCombiner> alpha_combiner;
|
||||
BitField<14, 1, u32> separate_alpha;
|
||||
BitField<15, 1, u32> noise_enable;
|
||||
BitField<16, 2, ProcTexShift> u_shift;
|
||||
BitField<18, 2, ProcTexShift> v_shift;
|
||||
BitField<20, 8, u32> bias_low; // float16 TODO: unimplemented
|
||||
} proctex;
|
||||
|
||||
union ProcTexNoiseConfig {
|
||||
BitField<0, 16, s32> amplitude; // fixed1.3.12
|
||||
BitField<16, 16, u32> phase; // float16
|
||||
};
|
||||
|
||||
ProcTexNoiseConfig proctex_noise_u;
|
||||
ProcTexNoiseConfig proctex_noise_v;
|
||||
|
||||
union {
|
||||
BitField<0, 16, u32> u; // float16
|
||||
BitField<16, 16, u32> v; // float16
|
||||
} proctex_noise_frequency;
|
||||
|
||||
enum class ProcTexFilter : u32 {
|
||||
Nearest = 0,
|
||||
Linear = 1,
|
||||
NearestMipmapNearest = 2,
|
||||
LinearMipmapNearest = 3,
|
||||
NearestMipmapLinear = 4,
|
||||
LinearMipmapLinear = 5,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 3, ProcTexFilter> filter;
|
||||
BitField<11, 8, u32> width;
|
||||
BitField<19, 8, u32> bias_high; // TODO: unimplemented
|
||||
} proctex_lut;
|
||||
|
||||
BitField<0, 8, u32> proctex_lut_offset;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
// 0xaf-0xb7: ProcTex LUT
|
||||
enum class ProcTexLutTable : u32 {
|
||||
Noise = 0,
|
||||
ColorMap = 2,
|
||||
AlphaMap = 3,
|
||||
Color = 4,
|
||||
ColorDiff = 5,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 8, u32> index;
|
||||
BitField<8, 4, ProcTexLutTable> ref_table;
|
||||
} proctex_lut_config;
|
||||
|
||||
u32 proctex_lut_data[8];
|
||||
|
||||
INSERT_PADDING_WORDS(0x8);
|
||||
|
||||
// 0xc0-0xff: Texture Combiner (akin to glTexEnv)
|
||||
struct TevStageConfig {
|
||||
enum class Source : u32 {
|
||||
PrimaryColor = 0x0,
|
||||
PrimaryFragmentColor = 0x1,
|
||||
SecondaryFragmentColor = 0x2,
|
||||
|
||||
Texture0 = 0x3,
|
||||
Texture1 = 0x4,
|
||||
Texture2 = 0x5,
|
||||
Texture3 = 0x6,
|
||||
|
||||
PreviousBuffer = 0xd,
|
||||
Constant = 0xe,
|
||||
Previous = 0xf,
|
||||
};
|
||||
|
||||
enum class ColorModifier : u32 {
|
||||
SourceColor = 0x0,
|
||||
OneMinusSourceColor = 0x1,
|
||||
SourceAlpha = 0x2,
|
||||
OneMinusSourceAlpha = 0x3,
|
||||
SourceRed = 0x4,
|
||||
OneMinusSourceRed = 0x5,
|
||||
|
||||
SourceGreen = 0x8,
|
||||
OneMinusSourceGreen = 0x9,
|
||||
|
||||
SourceBlue = 0xc,
|
||||
OneMinusSourceBlue = 0xd,
|
||||
};
|
||||
|
||||
enum class AlphaModifier : u32 {
|
||||
SourceAlpha = 0x0,
|
||||
OneMinusSourceAlpha = 0x1,
|
||||
SourceRed = 0x2,
|
||||
OneMinusSourceRed = 0x3,
|
||||
SourceGreen = 0x4,
|
||||
OneMinusSourceGreen = 0x5,
|
||||
SourceBlue = 0x6,
|
||||
OneMinusSourceBlue = 0x7,
|
||||
};
|
||||
|
||||
enum class Operation : u32 {
|
||||
Replace = 0,
|
||||
Modulate = 1,
|
||||
Add = 2,
|
||||
AddSigned = 3,
|
||||
Lerp = 4,
|
||||
Subtract = 5,
|
||||
Dot3_RGB = 6,
|
||||
Dot3_RGBA = 7,
|
||||
MultiplyThenAdd = 8,
|
||||
AddThenMultiply = 9,
|
||||
};
|
||||
|
||||
union {
|
||||
u32 sources_raw;
|
||||
BitField<0, 4, Source> color_source1;
|
||||
BitField<4, 4, Source> color_source2;
|
||||
BitField<8, 4, Source> color_source3;
|
||||
BitField<16, 4, Source> alpha_source1;
|
||||
BitField<20, 4, Source> alpha_source2;
|
||||
BitField<24, 4, Source> alpha_source3;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 modifiers_raw;
|
||||
BitField<0, 4, ColorModifier> color_modifier1;
|
||||
BitField<4, 4, ColorModifier> color_modifier2;
|
||||
BitField<8, 4, ColorModifier> color_modifier3;
|
||||
BitField<12, 3, AlphaModifier> alpha_modifier1;
|
||||
BitField<16, 3, AlphaModifier> alpha_modifier2;
|
||||
BitField<20, 3, AlphaModifier> alpha_modifier3;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 ops_raw;
|
||||
BitField<0, 4, Operation> color_op;
|
||||
BitField<16, 4, Operation> alpha_op;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 const_color;
|
||||
BitField<0, 8, u32> const_r;
|
||||
BitField<8, 8, u32> const_g;
|
||||
BitField<16, 8, u32> const_b;
|
||||
BitField<24, 8, u32> const_a;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 scales_raw;
|
||||
BitField<0, 2, u32> color_scale;
|
||||
BitField<16, 2, u32> alpha_scale;
|
||||
};
|
||||
|
||||
inline unsigned GetColorMultiplier() const {
|
||||
return (color_scale < 3) ? (1 << color_scale) : 1;
|
||||
}
|
||||
|
||||
inline unsigned GetAlphaMultiplier() const {
|
||||
return (alpha_scale < 3) ? (1 << alpha_scale) : 1;
|
||||
}
|
||||
};
|
||||
|
||||
TevStageConfig tev_stage0;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
TevStageConfig tev_stage1;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
TevStageConfig tev_stage2;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
TevStageConfig tev_stage3;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
|
||||
enum class FogMode : u32 {
|
||||
None = 0,
|
||||
Fog = 5,
|
||||
Gas = 7,
|
||||
};
|
||||
|
||||
union {
|
||||
BitField<0, 3, FogMode> fog_mode;
|
||||
BitField<16, 1, u32> fog_flip;
|
||||
|
||||
union {
|
||||
// Tev stages 0-3 write their output to the combiner buffer if the corresponding bit in
|
||||
// these masks are set
|
||||
BitField<8, 4, u32> update_mask_rgb;
|
||||
BitField<12, 4, u32> update_mask_a;
|
||||
|
||||
bool TevStageUpdatesCombinerBufferColor(unsigned stage_index) const {
|
||||
return (stage_index < 4) && (update_mask_rgb & (1 << stage_index));
|
||||
}
|
||||
|
||||
bool TevStageUpdatesCombinerBufferAlpha(unsigned stage_index) const {
|
||||
return (stage_index < 4) && (update_mask_a & (1 << stage_index));
|
||||
}
|
||||
} tev_combiner_buffer_input;
|
||||
};
|
||||
|
||||
union {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
} fog_color;
|
||||
|
||||
INSERT_PADDING_WORDS(0x4);
|
||||
|
||||
BitField<0, 16, u32> fog_lut_offset;
|
||||
|
||||
INSERT_PADDING_WORDS(0x1);
|
||||
|
||||
u32 fog_lut_data[8];
|
||||
|
||||
TevStageConfig tev_stage4;
|
||||
INSERT_PADDING_WORDS(0x3);
|
||||
TevStageConfig tev_stage5;
|
||||
|
||||
union {
|
||||
u32 raw;
|
||||
BitField<0, 8, u32> r;
|
||||
BitField<8, 8, u32> g;
|
||||
BitField<16, 8, u32> b;
|
||||
BitField<24, 8, u32> a;
|
||||
} tev_combiner_buffer_color;
|
||||
|
||||
INSERT_PADDING_WORDS(0x2);
|
||||
|
||||
const std::array<TevStageConfig, 6> GetTevStages() const {
|
||||
return {{tev_stage0, tev_stage1, tev_stage2, tev_stage3, tev_stage4, tev_stage5}};
|
||||
};
|
||||
};
|
||||
|
||||
static_assert(sizeof(TexturingRegs) == 0x80 * sizeof(u32),
|
||||
"TexturingRegs struct has incorrect size");
|
||||
|
||||
} // namespace Pica
|
|
@ -5,19 +5,6 @@
|
|||
#include <atomic>
|
||||
#include <memory>
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_opengl/gl_rasterizer.h"
|
||||
#include "video_core/swrasterizer/swrasterizer.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
void RendererBase::RefreshRasterizerSetting() {
|
||||
bool hw_renderer_enabled = VideoCore::g_hw_renderer_enabled;
|
||||
if (rasterizer == nullptr || opengl_rasterizer_active != hw_renderer_enabled) {
|
||||
opengl_rasterizer_active = hw_renderer_enabled;
|
||||
|
||||
if (hw_renderer_enabled) {
|
||||
rasterizer = std::make_unique<RasterizerOpenGL>();
|
||||
} else {
|
||||
rasterizer = std::make_unique<VideoCore::SWRasterizer>();
|
||||
}
|
||||
}
|
||||
}
|
||||
void RendererBase::RefreshRasterizerSetting() {}
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
class EmuWindow;
|
||||
|
||||
|
@ -72,14 +72,9 @@ public:
|
|||
return m_current_frame;
|
||||
}
|
||||
|
||||
VideoCore::RasterizerInterface* Rasterizer() const {
|
||||
return rasterizer.get();
|
||||
}
|
||||
|
||||
void RefreshRasterizerSetting();
|
||||
|
||||
protected:
|
||||
std::unique_ptr<VideoCore::RasterizerInterface> rasterizer;
|
||||
f32 m_current_fps = 0.0f; ///< Current framerate, should be set by the renderer
|
||||
int m_current_frame = 0; ///< Current frame, should be set by the renderer
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,316 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <glad/glad.h>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/hash.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/regs_lighting.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
#include "video_core/renderer_opengl/pica_to_gl.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
struct ScreenInfo;
|
||||
|
||||
class RasterizerOpenGL : public VideoCore::RasterizerInterface {
|
||||
public:
|
||||
RasterizerOpenGL();
|
||||
~RasterizerOpenGL() override;
|
||||
|
||||
void AddTriangle(const Pica::Shader::OutputVertex& v0, const Pica::Shader::OutputVertex& v1,
|
||||
const Pica::Shader::OutputVertex& v2) override;
|
||||
void DrawTriangles() override;
|
||||
void NotifyPicaRegisterChanged(u32 id) override;
|
||||
void FlushAll() override;
|
||||
void FlushRegion(PAddr addr, u64 size) override;
|
||||
void FlushAndInvalidateRegion(PAddr addr, u64 size) override;
|
||||
bool AccelerateDisplayTransfer(const GPU::Regs::DisplayTransferConfig& config) override;
|
||||
bool AccelerateTextureCopy(const GPU::Regs::DisplayTransferConfig& config) override;
|
||||
bool AccelerateFill(const GPU::Regs::MemoryFillConfig& config) override;
|
||||
bool AccelerateDisplay(const GPU::Regs::FramebufferConfig& config, PAddr framebuffer_addr,
|
||||
u32 pixel_stride, ScreenInfo& screen_info) override;
|
||||
|
||||
/// OpenGL shader generated for a given Pica register state
|
||||
struct PicaShader {
|
||||
/// OpenGL shader resource
|
||||
OGLShader shader;
|
||||
};
|
||||
|
||||
private:
|
||||
struct SamplerInfo {
|
||||
using TextureConfig = Pica::TexturingRegs::TextureConfig;
|
||||
|
||||
OGLSampler sampler;
|
||||
|
||||
/// Creates the sampler object, initializing its state so that it's in sync with the
|
||||
/// SamplerInfo struct.
|
||||
void Create();
|
||||
/// Syncs the sampler object with the config, updating any necessary state.
|
||||
void SyncWithConfig(const TextureConfig& config);
|
||||
|
||||
private:
|
||||
TextureConfig::TextureFilter mag_filter;
|
||||
TextureConfig::TextureFilter min_filter;
|
||||
TextureConfig::WrapMode wrap_s;
|
||||
TextureConfig::WrapMode wrap_t;
|
||||
u32 border_color;
|
||||
};
|
||||
|
||||
/// Structure that the hardware rendered vertices are composed of
|
||||
struct HardwareVertex {
|
||||
HardwareVertex(const Pica::Shader::OutputVertex& v, bool flip_quaternion) {
|
||||
position[0] = v.pos.x.ToFloat32();
|
||||
position[1] = v.pos.y.ToFloat32();
|
||||
position[2] = v.pos.z.ToFloat32();
|
||||
position[3] = v.pos.w.ToFloat32();
|
||||
color[0] = v.color.x.ToFloat32();
|
||||
color[1] = v.color.y.ToFloat32();
|
||||
color[2] = v.color.z.ToFloat32();
|
||||
color[3] = v.color.w.ToFloat32();
|
||||
tex_coord0[0] = v.tc0.x.ToFloat32();
|
||||
tex_coord0[1] = v.tc0.y.ToFloat32();
|
||||
tex_coord1[0] = v.tc1.x.ToFloat32();
|
||||
tex_coord1[1] = v.tc1.y.ToFloat32();
|
||||
tex_coord2[0] = v.tc2.x.ToFloat32();
|
||||
tex_coord2[1] = v.tc2.y.ToFloat32();
|
||||
tex_coord0_w = v.tc0_w.ToFloat32();
|
||||
normquat[0] = v.quat.x.ToFloat32();
|
||||
normquat[1] = v.quat.y.ToFloat32();
|
||||
normquat[2] = v.quat.z.ToFloat32();
|
||||
normquat[3] = v.quat.w.ToFloat32();
|
||||
view[0] = v.view.x.ToFloat32();
|
||||
view[1] = v.view.y.ToFloat32();
|
||||
view[2] = v.view.z.ToFloat32();
|
||||
|
||||
if (flip_quaternion) {
|
||||
for (float& x : normquat) {
|
||||
x = -x;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GLfloat position[4];
|
||||
GLfloat color[4];
|
||||
GLfloat tex_coord0[2];
|
||||
GLfloat tex_coord1[2];
|
||||
GLfloat tex_coord2[2];
|
||||
GLfloat tex_coord0_w;
|
||||
GLfloat normquat[4];
|
||||
GLfloat view[3];
|
||||
};
|
||||
|
||||
struct LightSrc {
|
||||
alignas(16) GLvec3 specular_0;
|
||||
alignas(16) GLvec3 specular_1;
|
||||
alignas(16) GLvec3 diffuse;
|
||||
alignas(16) GLvec3 ambient;
|
||||
alignas(16) GLvec3 position;
|
||||
alignas(16) GLvec3 spot_direction; // negated
|
||||
GLfloat dist_atten_bias;
|
||||
GLfloat dist_atten_scale;
|
||||
};
|
||||
|
||||
/// Uniform structure for the Uniform Buffer Object, all vectors must be 16-byte aligned
|
||||
// NOTE: Always keep a vec4 at the end. The GL spec is not clear wether the alignment at
|
||||
// the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not.
|
||||
// Not following that rule will cause problems on some AMD drivers.
|
||||
struct UniformData {
|
||||
alignas(8) GLvec2 framebuffer_scale;
|
||||
GLint alphatest_ref;
|
||||
GLfloat depth_scale;
|
||||
GLfloat depth_offset;
|
||||
GLint scissor_x1;
|
||||
GLint scissor_y1;
|
||||
GLint scissor_x2;
|
||||
GLint scissor_y2;
|
||||
alignas(16) GLvec3 fog_color;
|
||||
alignas(8) GLvec2 proctex_noise_f;
|
||||
alignas(8) GLvec2 proctex_noise_a;
|
||||
alignas(8) GLvec2 proctex_noise_p;
|
||||
alignas(16) GLvec3 lighting_global_ambient;
|
||||
LightSrc light_src[8];
|
||||
alignas(16) GLvec4 const_color[6]; // A vec4 color for each of the six tev stages
|
||||
alignas(16) GLvec4 tev_combiner_buffer_color;
|
||||
alignas(16) GLvec4 clip_coef;
|
||||
};
|
||||
|
||||
static_assert(
|
||||
sizeof(UniformData) == 0x470,
|
||||
"The size of the UniformData structure has changed, update the structure in the shader");
|
||||
static_assert(sizeof(UniformData) < 16384,
|
||||
"UniformData structure must be less than 16kb as per the OpenGL spec");
|
||||
|
||||
/// Syncs the clip enabled status to match the PICA register
|
||||
void SyncClipEnabled();
|
||||
|
||||
/// Syncs the clip coefficients to match the PICA register
|
||||
void SyncClipCoef();
|
||||
|
||||
/// Sets the OpenGL shader in accordance with the current PICA register state
|
||||
void SetShader();
|
||||
|
||||
/// Syncs the cull mode to match the PICA register
|
||||
void SyncCullMode();
|
||||
|
||||
/// Syncs the depth scale to match the PICA register
|
||||
void SyncDepthScale();
|
||||
|
||||
/// Syncs the depth offset to match the PICA register
|
||||
void SyncDepthOffset();
|
||||
|
||||
/// Syncs the blend enabled status to match the PICA register
|
||||
void SyncBlendEnabled();
|
||||
|
||||
/// Syncs the blend functions to match the PICA register
|
||||
void SyncBlendFuncs();
|
||||
|
||||
/// Syncs the blend color to match the PICA register
|
||||
void SyncBlendColor();
|
||||
|
||||
/// Syncs the fog states to match the PICA register
|
||||
void SyncFogColor();
|
||||
void SyncFogLUT();
|
||||
|
||||
/// Sync the procedural texture noise configuration to match the PICA register
|
||||
void SyncProcTexNoise();
|
||||
|
||||
/// Sync the procedural texture lookup tables
|
||||
void SyncProcTexNoiseLUT();
|
||||
void SyncProcTexColorMap();
|
||||
void SyncProcTexAlphaMap();
|
||||
void SyncProcTexLUT();
|
||||
void SyncProcTexDiffLUT();
|
||||
|
||||
/// Syncs the alpha test states to match the PICA register
|
||||
void SyncAlphaTest();
|
||||
|
||||
/// Syncs the logic op states to match the PICA register
|
||||
void SyncLogicOp();
|
||||
|
||||
/// Syncs the color write mask to match the PICA register state
|
||||
void SyncColorWriteMask();
|
||||
|
||||
/// Syncs the stencil write mask to match the PICA register state
|
||||
void SyncStencilWriteMask();
|
||||
|
||||
/// Syncs the depth write mask to match the PICA register state
|
||||
void SyncDepthWriteMask();
|
||||
|
||||
/// Syncs the stencil test states to match the PICA register
|
||||
void SyncStencilTest();
|
||||
|
||||
/// Syncs the depth test states to match the PICA register
|
||||
void SyncDepthTest();
|
||||
|
||||
/// Syncs the TEV combiner color buffer to match the PICA register
|
||||
void SyncCombinerColor();
|
||||
|
||||
/// Syncs the TEV constant color to match the PICA register
|
||||
void SyncTevConstColor(int tev_index, const Pica::TexturingRegs::TevStageConfig& tev_stage);
|
||||
|
||||
/// Syncs the lighting global ambient color to match the PICA register
|
||||
void SyncGlobalAmbient();
|
||||
|
||||
/// Syncs the lighting lookup tables
|
||||
void SyncLightingLUT(unsigned index);
|
||||
|
||||
/// Syncs the specified light's specular 0 color to match the PICA register
|
||||
void SyncLightSpecular0(int light_index);
|
||||
|
||||
/// Syncs the specified light's specular 1 color to match the PICA register
|
||||
void SyncLightSpecular1(int light_index);
|
||||
|
||||
/// Syncs the specified light's diffuse color to match the PICA register
|
||||
void SyncLightDiffuse(int light_index);
|
||||
|
||||
/// Syncs the specified light's ambient color to match the PICA register
|
||||
void SyncLightAmbient(int light_index);
|
||||
|
||||
/// Syncs the specified light's position to match the PICA register
|
||||
void SyncLightPosition(int light_index);
|
||||
|
||||
/// Syncs the specified spot light direcition to match the PICA register
|
||||
void SyncLightSpotDirection(int light_index);
|
||||
|
||||
/// Syncs the specified light's distance attenuation bias to match the PICA register
|
||||
void SyncLightDistanceAttenuationBias(int light_index);
|
||||
|
||||
/// Syncs the specified light's distance attenuation scale to match the PICA register
|
||||
void SyncLightDistanceAttenuationScale(int light_index);
|
||||
|
||||
OpenGLState state;
|
||||
|
||||
RasterizerCacheOpenGL res_cache;
|
||||
|
||||
std::vector<HardwareVertex> vertex_batch;
|
||||
|
||||
std::unordered_map<GLShader::PicaShaderConfig, std::unique_ptr<PicaShader>> shader_cache;
|
||||
const PicaShader* current_shader = nullptr;
|
||||
bool shader_dirty;
|
||||
|
||||
struct {
|
||||
UniformData data;
|
||||
std::array<bool, Pica::LightingRegs::NumLightingSampler> lut_dirty;
|
||||
bool fog_lut_dirty;
|
||||
bool proctex_noise_lut_dirty;
|
||||
bool proctex_color_map_dirty;
|
||||
bool proctex_alpha_map_dirty;
|
||||
bool proctex_lut_dirty;
|
||||
bool proctex_diff_lut_dirty;
|
||||
bool dirty;
|
||||
} uniform_block_data = {};
|
||||
|
||||
std::array<SamplerInfo, 3> texture_samplers;
|
||||
OGLVertexArray vertex_array;
|
||||
OGLBuffer vertex_buffer;
|
||||
OGLBuffer uniform_buffer;
|
||||
OGLFramebuffer framebuffer;
|
||||
|
||||
OGLBuffer lighting_lut_buffer;
|
||||
OGLTexture lighting_lut;
|
||||
std::array<std::array<GLvec2, 256>, Pica::LightingRegs::NumLightingSampler> lighting_lut_data{};
|
||||
|
||||
OGLBuffer fog_lut_buffer;
|
||||
OGLTexture fog_lut;
|
||||
std::array<GLvec2, 128> fog_lut_data{};
|
||||
|
||||
OGLBuffer proctex_noise_lut_buffer;
|
||||
OGLTexture proctex_noise_lut;
|
||||
std::array<GLvec2, 128> proctex_noise_lut_data{};
|
||||
|
||||
OGLBuffer proctex_color_map_buffer;
|
||||
OGLTexture proctex_color_map;
|
||||
std::array<GLvec2, 128> proctex_color_map_data{};
|
||||
|
||||
OGLBuffer proctex_alpha_map_buffer;
|
||||
OGLTexture proctex_alpha_map;
|
||||
std::array<GLvec2, 128> proctex_alpha_map_data{};
|
||||
|
||||
OGLBuffer proctex_lut_buffer;
|
||||
OGLTexture proctex_lut;
|
||||
std::array<GLvec4, 256> proctex_lut_data{};
|
||||
|
||||
OGLBuffer proctex_diff_lut_buffer;
|
||||
OGLTexture proctex_diff_lut;
|
||||
std::array<GLvec4, 256> proctex_diff_lut_data{};
|
||||
};
|
|
@ -1,799 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <cstring>
|
||||
#include <iterator>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <glad/glad.h>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
#include "video_core/texture/texture_decode.h"
|
||||
#include "video_core/utils.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
struct FormatTuple {
|
||||
GLint internal_format;
|
||||
GLenum format;
|
||||
GLenum type;
|
||||
};
|
||||
|
||||
static const std::array<FormatTuple, 5> fb_format_tuples = {{
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8}, // RGBA8
|
||||
{GL_RGB8, GL_BGR, GL_UNSIGNED_BYTE}, // RGB8
|
||||
{GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1}, // RGB5A1
|
||||
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // RGB565
|
||||
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4}, // RGBA4
|
||||
}};
|
||||
|
||||
static const std::array<FormatTuple, 4> depth_format_tuples = {{
|
||||
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // D16
|
||||
{},
|
||||
{GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT}, // D24
|
||||
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // D24S8
|
||||
}};
|
||||
|
||||
RasterizerCacheOpenGL::RasterizerCacheOpenGL() {
|
||||
transfer_framebuffers[0].Create();
|
||||
transfer_framebuffers[1].Create();
|
||||
}
|
||||
|
||||
RasterizerCacheOpenGL::~RasterizerCacheOpenGL() {
|
||||
FlushAll();
|
||||
}
|
||||
|
||||
static void MortonCopyPixels(CachedSurface::PixelFormat pixel_format, u32 width, u32 height,
|
||||
u32 bytes_per_pixel, u32 gl_bytes_per_pixel, u8* morton_data,
|
||||
u8* gl_data, bool morton_to_gl) {
|
||||
using PixelFormat = CachedSurface::PixelFormat;
|
||||
|
||||
u8* data_ptrs[2];
|
||||
u32 depth_stencil_shifts[2] = {24, 8};
|
||||
|
||||
if (morton_to_gl) {
|
||||
std::swap(depth_stencil_shifts[0], depth_stencil_shifts[1]);
|
||||
}
|
||||
|
||||
if (pixel_format == PixelFormat::D24S8) {
|
||||
for (unsigned y = 0; y < height; ++y) {
|
||||
for (unsigned x = 0; x < width; ++x) {
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 morton_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
|
||||
coarse_y * width * bytes_per_pixel;
|
||||
u32 gl_pixel_index = (x + (height - 1 - y) * width) * gl_bytes_per_pixel;
|
||||
|
||||
data_ptrs[morton_to_gl] = morton_data + morton_offset;
|
||||
data_ptrs[!morton_to_gl] = &gl_data[gl_pixel_index];
|
||||
|
||||
// Swap depth and stencil value ordering since 3DS does not match OpenGL
|
||||
u32 depth_stencil;
|
||||
memcpy(&depth_stencil, data_ptrs[1], sizeof(u32));
|
||||
depth_stencil = (depth_stencil << depth_stencil_shifts[0]) |
|
||||
(depth_stencil >> depth_stencil_shifts[1]);
|
||||
|
||||
memcpy(data_ptrs[0], &depth_stencil, sizeof(u32));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (unsigned y = 0; y < height; ++y) {
|
||||
for (unsigned x = 0; x < width; ++x) {
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 morton_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
|
||||
coarse_y * width * bytes_per_pixel;
|
||||
u32 gl_pixel_index = (x + (height - 1 - y) * width) * gl_bytes_per_pixel;
|
||||
|
||||
data_ptrs[morton_to_gl] = morton_data + morton_offset;
|
||||
data_ptrs[!morton_to_gl] = &gl_data[gl_pixel_index];
|
||||
|
||||
memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::BlitTextures(GLuint src_tex, GLuint dst_tex,
|
||||
CachedSurface::SurfaceType type,
|
||||
const MathUtil::Rectangle<int>& src_rect,
|
||||
const MathUtil::Rectangle<int>& dst_rect) {
|
||||
using SurfaceType = CachedSurface::SurfaceType;
|
||||
|
||||
OpenGLState cur_state = OpenGLState::GetCurState();
|
||||
|
||||
// Make sure textures aren't bound to texture units, since going to bind them to framebuffer
|
||||
// components
|
||||
OpenGLState::ResetTexture(src_tex);
|
||||
OpenGLState::ResetTexture(dst_tex);
|
||||
|
||||
// Keep track of previous framebuffer bindings
|
||||
GLuint old_fbs[2] = {cur_state.draw.read_framebuffer, cur_state.draw.draw_framebuffer};
|
||||
cur_state.draw.read_framebuffer = transfer_framebuffers[0].handle;
|
||||
cur_state.draw.draw_framebuffer = transfer_framebuffers[1].handle;
|
||||
cur_state.Apply();
|
||||
|
||||
u32 buffers = 0;
|
||||
|
||||
if (type == SurfaceType::Color || type == SurfaceType::Texture) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, src_tex,
|
||||
0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
|
||||
0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, dst_tex,
|
||||
0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
|
||||
0);
|
||||
|
||||
buffers = GL_COLOR_BUFFER_BIT;
|
||||
} else if (type == SurfaceType::Depth) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, src_tex, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, dst_tex, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
|
||||
|
||||
buffers = GL_DEPTH_BUFFER_BIT;
|
||||
} else if (type == SurfaceType::DepthStencil) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
|
||||
src_tex, 0);
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
|
||||
dst_tex, 0);
|
||||
|
||||
buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
|
||||
}
|
||||
|
||||
glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
|
||||
dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
|
||||
buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
|
||||
|
||||
// Restore previous framebuffer bindings
|
||||
cur_state.draw.read_framebuffer = old_fbs[0];
|
||||
cur_state.draw.draw_framebuffer = old_fbs[1];
|
||||
cur_state.Apply();
|
||||
}
|
||||
|
||||
bool RasterizerCacheOpenGL::TryBlitSurfaces(CachedSurface* src_surface,
|
||||
const MathUtil::Rectangle<int>& src_rect,
|
||||
CachedSurface* dst_surface,
|
||||
const MathUtil::Rectangle<int>& dst_rect) {
|
||||
|
||||
if (!CachedSurface::CheckFormatsBlittable(src_surface->pixel_format,
|
||||
dst_surface->pixel_format)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
BlitTextures(src_surface->texture.handle, dst_surface->texture.handle,
|
||||
CachedSurface::GetFormatType(src_surface->pixel_format), src_rect, dst_rect);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void AllocateSurfaceTexture(GLuint texture, CachedSurface::PixelFormat pixel_format,
|
||||
u32 width, u32 height) {
|
||||
// Allocate an uninitialized texture of appropriate size and format for the surface
|
||||
using SurfaceType = CachedSurface::SurfaceType;
|
||||
|
||||
OpenGLState cur_state = OpenGLState::GetCurState();
|
||||
|
||||
// Keep track of previous texture bindings
|
||||
GLuint old_tex = cur_state.texture_units[0].texture_2d;
|
||||
cur_state.texture_units[0].texture_2d = texture;
|
||||
cur_state.Apply();
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
|
||||
SurfaceType type = CachedSurface::GetFormatType(pixel_format);
|
||||
|
||||
FormatTuple tuple;
|
||||
if (type == SurfaceType::Color) {
|
||||
ASSERT((size_t)pixel_format < fb_format_tuples.size());
|
||||
tuple = fb_format_tuples[(unsigned int)pixel_format];
|
||||
} else if (type == SurfaceType::Depth || type == SurfaceType::DepthStencil) {
|
||||
size_t tuple_idx = (size_t)pixel_format - 14;
|
||||
ASSERT(tuple_idx < depth_format_tuples.size());
|
||||
tuple = depth_format_tuples[tuple_idx];
|
||||
} else {
|
||||
tuple = {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE};
|
||||
}
|
||||
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, width, height, 0, tuple.format,
|
||||
tuple.type, nullptr);
|
||||
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
|
||||
// Restore previous texture bindings
|
||||
cur_state.texture_units[0].texture_2d = old_tex;
|
||||
cur_state.Apply();
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceUpload, "OpenGL", "Surface Upload", MP_RGB(128, 64, 192));
|
||||
CachedSurface* RasterizerCacheOpenGL::GetSurface(const CachedSurface& params, bool match_res_scale,
|
||||
bool load_if_create) {
|
||||
using PixelFormat = CachedSurface::PixelFormat;
|
||||
using SurfaceType = CachedSurface::SurfaceType;
|
||||
|
||||
if (params.addr == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
u32 params_size =
|
||||
params.width * params.height * CachedSurface::GetFormatBpp(params.pixel_format) / 8;
|
||||
|
||||
// Check for an exact match in existing surfaces
|
||||
CachedSurface* best_exact_surface = nullptr;
|
||||
float exact_surface_goodness = -1.f;
|
||||
|
||||
auto surface_interval =
|
||||
boost::icl::interval<PAddr>::right_open(params.addr, params.addr + params_size);
|
||||
auto range = surface_cache.equal_range(surface_interval);
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
|
||||
CachedSurface* surface = it2->get();
|
||||
|
||||
// Check if the request matches the surface exactly
|
||||
if (params.addr == surface->addr && params.width == surface->width &&
|
||||
params.height == surface->height && params.pixel_format == surface->pixel_format) {
|
||||
// Make sure optional param-matching criteria are fulfilled
|
||||
bool tiling_match = (params.is_tiled == surface->is_tiled);
|
||||
bool res_scale_match = (params.res_scale_width == surface->res_scale_width &&
|
||||
params.res_scale_height == surface->res_scale_height);
|
||||
if (!match_res_scale || res_scale_match) {
|
||||
// Prioritize same-tiling and highest resolution surfaces
|
||||
float match_goodness =
|
||||
(float)tiling_match + surface->res_scale_width * surface->res_scale_height;
|
||||
if (match_goodness > exact_surface_goodness || surface->dirty) {
|
||||
exact_surface_goodness = match_goodness;
|
||||
best_exact_surface = surface;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the best exact surface if found
|
||||
if (best_exact_surface != nullptr) {
|
||||
return best_exact_surface;
|
||||
}
|
||||
|
||||
// No matching surfaces found, so create a new one
|
||||
u8* texture_src_data = Memory::GetPhysicalPointer(params.addr);
|
||||
if (texture_src_data == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MICROPROFILE_SCOPE(OpenGL_SurfaceUpload);
|
||||
|
||||
// Stride only applies to linear images.
|
||||
ASSERT(params.pixel_stride == 0 || !params.is_tiled);
|
||||
|
||||
std::shared_ptr<CachedSurface> new_surface = std::make_shared<CachedSurface>();
|
||||
|
||||
new_surface->addr = params.addr;
|
||||
new_surface->size = params_size;
|
||||
|
||||
new_surface->texture.Create();
|
||||
new_surface->width = params.width;
|
||||
new_surface->height = params.height;
|
||||
new_surface->pixel_stride = params.pixel_stride;
|
||||
new_surface->res_scale_width = params.res_scale_width;
|
||||
new_surface->res_scale_height = params.res_scale_height;
|
||||
|
||||
new_surface->is_tiled = params.is_tiled;
|
||||
new_surface->pixel_format = params.pixel_format;
|
||||
new_surface->dirty = false;
|
||||
|
||||
if (!load_if_create) {
|
||||
// Don't load any data; just allocate the surface's texture
|
||||
AllocateSurfaceTexture(new_surface->texture.handle, new_surface->pixel_format,
|
||||
new_surface->GetScaledWidth(), new_surface->GetScaledHeight());
|
||||
} else {
|
||||
// TODO: Consider attempting subrect match in existing surfaces and direct blit here instead
|
||||
// of memory upload below if that's a common scenario in some game
|
||||
|
||||
Memory::RasterizerFlushRegion(params.addr, params_size);
|
||||
|
||||
// Load data from memory to the new surface
|
||||
OpenGLState cur_state = OpenGLState::GetCurState();
|
||||
|
||||
GLuint old_tex = cur_state.texture_units[0].texture_2d;
|
||||
cur_state.texture_units[0].texture_2d = new_surface->texture.handle;
|
||||
cur_state.Apply();
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
|
||||
if (!new_surface->is_tiled) {
|
||||
// TODO: Ensure this will always be a color format, not a depth or other format
|
||||
ASSERT((size_t)new_surface->pixel_format < fb_format_tuples.size());
|
||||
const FormatTuple& tuple = fb_format_tuples[(unsigned int)params.pixel_format];
|
||||
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, (GLint)new_surface->pixel_stride);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height, 0,
|
||||
tuple.format, tuple.type, texture_src_data);
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
|
||||
} else {
|
||||
SurfaceType type = CachedSurface::GetFormatType(new_surface->pixel_format);
|
||||
if (type != SurfaceType::Depth && type != SurfaceType::DepthStencil) {
|
||||
FormatTuple tuple;
|
||||
if ((size_t)params.pixel_format < fb_format_tuples.size()) {
|
||||
tuple = fb_format_tuples[(unsigned int)params.pixel_format];
|
||||
} else {
|
||||
// Texture
|
||||
tuple = {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE};
|
||||
}
|
||||
|
||||
std::vector<Math::Vec4<u8>> tex_buffer(params.width * params.height);
|
||||
|
||||
Pica::Texture::TextureInfo tex_info;
|
||||
tex_info.width = params.width;
|
||||
tex_info.height = params.height;
|
||||
tex_info.format = (Pica::TexturingRegs::TextureFormat)params.pixel_format;
|
||||
tex_info.SetDefaultStride();
|
||||
tex_info.physical_address = params.addr;
|
||||
|
||||
for (unsigned y = 0; y < params.height; ++y) {
|
||||
for (unsigned x = 0; x < params.width; ++x) {
|
||||
tex_buffer[x + params.width * y] = Pica::Texture::LookupTexture(
|
||||
texture_src_data, x, params.height - 1 - y, tex_info);
|
||||
}
|
||||
}
|
||||
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height,
|
||||
0, GL_RGBA, GL_UNSIGNED_BYTE, tex_buffer.data());
|
||||
} else {
|
||||
// Depth/Stencil formats need special treatment since they aren't sampleable using
|
||||
// LookupTexture and can't use RGBA format
|
||||
size_t tuple_idx = (size_t)params.pixel_format - 14;
|
||||
ASSERT(tuple_idx < depth_format_tuples.size());
|
||||
const FormatTuple& tuple = depth_format_tuples[tuple_idx];
|
||||
|
||||
u32 bytes_per_pixel = CachedSurface::GetFormatBpp(params.pixel_format) / 8;
|
||||
|
||||
// OpenGL needs 4 bpp alignment for D24 since using GL_UNSIGNED_INT as type
|
||||
bool use_4bpp = (params.pixel_format == PixelFormat::D24);
|
||||
|
||||
u32 gl_bytes_per_pixel = use_4bpp ? 4 : bytes_per_pixel;
|
||||
|
||||
std::vector<u8> temp_fb_depth_buffer(params.width * params.height *
|
||||
gl_bytes_per_pixel);
|
||||
|
||||
u8* temp_fb_depth_buffer_ptr =
|
||||
use_4bpp ? temp_fb_depth_buffer.data() + 1 : temp_fb_depth_buffer.data();
|
||||
|
||||
MortonCopyPixels(params.pixel_format, params.width, params.height, bytes_per_pixel,
|
||||
gl_bytes_per_pixel, texture_src_data, temp_fb_depth_buffer_ptr,
|
||||
true);
|
||||
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, params.width, params.height,
|
||||
0, tuple.format, tuple.type, temp_fb_depth_buffer.data());
|
||||
}
|
||||
}
|
||||
|
||||
// If not 1x scale, blit 1x texture to a new scaled texture and replace texture in surface
|
||||
if (new_surface->res_scale_width != 1.f || new_surface->res_scale_height != 1.f) {
|
||||
OGLTexture scaled_texture;
|
||||
scaled_texture.Create();
|
||||
|
||||
AllocateSurfaceTexture(scaled_texture.handle, new_surface->pixel_format,
|
||||
new_surface->GetScaledWidth(), new_surface->GetScaledHeight());
|
||||
BlitTextures(new_surface->texture.handle, scaled_texture.handle,
|
||||
CachedSurface::GetFormatType(new_surface->pixel_format),
|
||||
MathUtil::Rectangle<int>(0, 0, new_surface->width, new_surface->height),
|
||||
MathUtil::Rectangle<int>(0, 0, new_surface->GetScaledWidth(),
|
||||
new_surface->GetScaledHeight()));
|
||||
|
||||
new_surface->texture.Release();
|
||||
new_surface->texture.handle = scaled_texture.handle;
|
||||
scaled_texture.handle = 0;
|
||||
cur_state.texture_units[0].texture_2d = new_surface->texture.handle;
|
||||
cur_state.Apply();
|
||||
}
|
||||
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
||||
|
||||
cur_state.texture_units[0].texture_2d = old_tex;
|
||||
cur_state.Apply();
|
||||
}
|
||||
|
||||
Memory::RasterizerMarkRegionCached(new_surface->addr, new_surface->size, 1);
|
||||
surface_cache.add(std::make_pair(boost::icl::interval<PAddr>::right_open(
|
||||
new_surface->addr, new_surface->addr + new_surface->size),
|
||||
std::set<std::shared_ptr<CachedSurface>>({new_surface})));
|
||||
return new_surface.get();
|
||||
}
|
||||
|
||||
CachedSurface* RasterizerCacheOpenGL::GetSurfaceRect(const CachedSurface& params,
|
||||
bool match_res_scale, bool load_if_create,
|
||||
MathUtil::Rectangle<int>& out_rect) {
|
||||
if (params.addr == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
u32 total_pixels = params.width * params.height;
|
||||
u32 params_size = total_pixels * CachedSurface::GetFormatBpp(params.pixel_format) / 8;
|
||||
|
||||
// Attempt to find encompassing surfaces
|
||||
CachedSurface* best_subrect_surface = nullptr;
|
||||
float subrect_surface_goodness = -1.f;
|
||||
|
||||
auto surface_interval =
|
||||
boost::icl::interval<PAddr>::right_open(params.addr, params.addr + params_size);
|
||||
auto cache_upper_bound = surface_cache.upper_bound(surface_interval);
|
||||
for (auto it = surface_cache.lower_bound(surface_interval); it != cache_upper_bound; ++it) {
|
||||
for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
|
||||
CachedSurface* surface = it2->get();
|
||||
|
||||
// Check if the request is contained in the surface
|
||||
if (params.addr >= surface->addr &&
|
||||
params.addr + params_size - 1 <= surface->addr + surface->size - 1 &&
|
||||
params.pixel_format == surface->pixel_format) {
|
||||
// Make sure optional param-matching criteria are fulfilled
|
||||
bool tiling_match = (params.is_tiled == surface->is_tiled);
|
||||
bool res_scale_match = (params.res_scale_width == surface->res_scale_width &&
|
||||
params.res_scale_height == surface->res_scale_height);
|
||||
if (!match_res_scale || res_scale_match) {
|
||||
// Prioritize same-tiling and highest resolution surfaces
|
||||
float match_goodness =
|
||||
(float)tiling_match + surface->res_scale_width * surface->res_scale_height;
|
||||
if (match_goodness > subrect_surface_goodness || surface->dirty) {
|
||||
subrect_surface_goodness = match_goodness;
|
||||
best_subrect_surface = surface;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the best subrect surface if found
|
||||
if (best_subrect_surface != nullptr) {
|
||||
unsigned int bytes_per_pixel =
|
||||
(CachedSurface::GetFormatBpp(best_subrect_surface->pixel_format) / 8);
|
||||
|
||||
int x0, y0;
|
||||
|
||||
if (!params.is_tiled) {
|
||||
u32 begin_pixel_index = (params.addr - best_subrect_surface->addr) / bytes_per_pixel;
|
||||
x0 = begin_pixel_index % best_subrect_surface->width;
|
||||
y0 = begin_pixel_index / best_subrect_surface->width;
|
||||
|
||||
out_rect = MathUtil::Rectangle<int>(x0, y0, x0 + params.width, y0 + params.height);
|
||||
} else {
|
||||
u32 bytes_per_tile = 8 * 8 * bytes_per_pixel;
|
||||
u32 tiles_per_row = best_subrect_surface->width / 8;
|
||||
|
||||
u32 begin_tile_index = (params.addr - best_subrect_surface->addr) / bytes_per_tile;
|
||||
x0 = begin_tile_index % tiles_per_row * 8;
|
||||
y0 = begin_tile_index / tiles_per_row * 8;
|
||||
|
||||
// Tiled surfaces are flipped vertically in the rasterizer vs. 3DS memory.
|
||||
out_rect =
|
||||
MathUtil::Rectangle<int>(x0, best_subrect_surface->height - y0, x0 + params.width,
|
||||
best_subrect_surface->height - (y0 + params.height));
|
||||
}
|
||||
|
||||
out_rect.left = (int)(out_rect.left * best_subrect_surface->res_scale_width);
|
||||
out_rect.right = (int)(out_rect.right * best_subrect_surface->res_scale_width);
|
||||
out_rect.top = (int)(out_rect.top * best_subrect_surface->res_scale_height);
|
||||
out_rect.bottom = (int)(out_rect.bottom * best_subrect_surface->res_scale_height);
|
||||
|
||||
return best_subrect_surface;
|
||||
}
|
||||
|
||||
// No subrect found - create and return a new surface
|
||||
if (!params.is_tiled) {
|
||||
out_rect = MathUtil::Rectangle<int>(0, 0, (int)(params.width * params.res_scale_width),
|
||||
(int)(params.height * params.res_scale_height));
|
||||
} else {
|
||||
out_rect = MathUtil::Rectangle<int>(0, (int)(params.height * params.res_scale_height),
|
||||
(int)(params.width * params.res_scale_width), 0);
|
||||
}
|
||||
|
||||
return GetSurface(params, match_res_scale, load_if_create);
|
||||
}
|
||||
|
||||
CachedSurface* RasterizerCacheOpenGL::GetTextureSurface(
|
||||
const Pica::TexturingRegs::FullTextureConfig& config) {
|
||||
|
||||
Pica::Texture::TextureInfo info =
|
||||
Pica::Texture::TextureInfo::FromPicaRegister(config.config, config.format);
|
||||
|
||||
CachedSurface params;
|
||||
params.addr = info.physical_address;
|
||||
params.width = info.width;
|
||||
params.height = info.height;
|
||||
params.is_tiled = true;
|
||||
params.pixel_format = CachedSurface::PixelFormatFromTextureFormat(info.format);
|
||||
return GetSurface(params, false, true);
|
||||
}
|
||||
|
||||
std::tuple<CachedSurface*, CachedSurface*, MathUtil::Rectangle<int>>
|
||||
RasterizerCacheOpenGL::GetFramebufferSurfaces(
|
||||
const Pica::FramebufferRegs::FramebufferConfig& config) {
|
||||
|
||||
const auto& regs = Pica::g_state.regs;
|
||||
|
||||
// Make sur that framebuffers don't overlap if both color and depth are being used
|
||||
u32 fb_area = config.GetWidth() * config.GetHeight();
|
||||
bool framebuffers_overlap =
|
||||
config.GetColorBufferPhysicalAddress() != 0 &&
|
||||
config.GetDepthBufferPhysicalAddress() != 0 &&
|
||||
MathUtil::IntervalsIntersect(
|
||||
config.GetColorBufferPhysicalAddress(),
|
||||
fb_area * GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(config.color_format.Value())),
|
||||
config.GetDepthBufferPhysicalAddress(),
|
||||
fb_area * Pica::FramebufferRegs::BytesPerDepthPixel(config.depth_format));
|
||||
bool using_color_fb = config.GetColorBufferPhysicalAddress() != 0;
|
||||
bool depth_write_enable = regs.framebuffer.output_merger.depth_write_enable &&
|
||||
regs.framebuffer.framebuffer.allow_depth_stencil_write;
|
||||
bool using_depth_fb = config.GetDepthBufferPhysicalAddress() != 0 &&
|
||||
(regs.framebuffer.output_merger.depth_test_enable || depth_write_enable ||
|
||||
!framebuffers_overlap);
|
||||
|
||||
if (framebuffers_overlap && using_color_fb && using_depth_fb) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; "
|
||||
"overlapping framebuffers not supported!");
|
||||
using_depth_fb = false;
|
||||
}
|
||||
|
||||
// get color and depth surfaces
|
||||
CachedSurface color_params;
|
||||
CachedSurface depth_params;
|
||||
color_params.width = depth_params.width = config.GetWidth();
|
||||
color_params.height = depth_params.height = config.GetHeight();
|
||||
color_params.is_tiled = depth_params.is_tiled = true;
|
||||
|
||||
// Set the internal resolution, assume the same scaling factor for top and bottom screens
|
||||
float resolution_scale_factor = Settings::values.resolution_factor;
|
||||
if (resolution_scale_factor == 0.0f) {
|
||||
// Auto - scale resolution to the window size
|
||||
resolution_scale_factor = VideoCore::g_emu_window->GetFramebufferLayout().GetScalingRatio();
|
||||
}
|
||||
// Scale the resolution by the specified factor
|
||||
color_params.res_scale_width = resolution_scale_factor;
|
||||
depth_params.res_scale_width = resolution_scale_factor;
|
||||
color_params.res_scale_height = resolution_scale_factor;
|
||||
depth_params.res_scale_height = resolution_scale_factor;
|
||||
|
||||
color_params.addr = config.GetColorBufferPhysicalAddress();
|
||||
color_params.pixel_format = CachedSurface::PixelFormatFromColorFormat(config.color_format);
|
||||
|
||||
depth_params.addr = config.GetDepthBufferPhysicalAddress();
|
||||
depth_params.pixel_format = CachedSurface::PixelFormatFromDepthFormat(config.depth_format);
|
||||
|
||||
MathUtil::Rectangle<int> color_rect;
|
||||
CachedSurface* color_surface =
|
||||
using_color_fb ? GetSurfaceRect(color_params, true, true, color_rect) : nullptr;
|
||||
|
||||
MathUtil::Rectangle<int> depth_rect;
|
||||
CachedSurface* depth_surface =
|
||||
using_depth_fb ? GetSurfaceRect(depth_params, true, true, depth_rect) : nullptr;
|
||||
|
||||
// Sanity check to make sure found surfaces aren't the same
|
||||
if (using_depth_fb && using_color_fb && color_surface == depth_surface) {
|
||||
LOG_CRITICAL(
|
||||
Render_OpenGL,
|
||||
"Color and depth framebuffer surfaces overlap; overlapping surfaces not supported!");
|
||||
using_depth_fb = false;
|
||||
depth_surface = nullptr;
|
||||
}
|
||||
|
||||
MathUtil::Rectangle<int> rect;
|
||||
|
||||
if (color_surface != nullptr && depth_surface != nullptr &&
|
||||
(depth_rect.left != color_rect.left || depth_rect.top != color_rect.top)) {
|
||||
// Can't specify separate color and depth viewport offsets in OpenGL, so re-zero both if
|
||||
// they don't match
|
||||
if (color_rect.left != 0 || color_rect.top != 0) {
|
||||
color_surface = GetSurface(color_params, true, true);
|
||||
}
|
||||
|
||||
if (depth_rect.left != 0 || depth_rect.top != 0) {
|
||||
depth_surface = GetSurface(depth_params, true, true);
|
||||
}
|
||||
|
||||
if (!color_surface->is_tiled) {
|
||||
rect = MathUtil::Rectangle<int>(
|
||||
0, 0, (int)(color_params.width * color_params.res_scale_width),
|
||||
(int)(color_params.height * color_params.res_scale_height));
|
||||
} else {
|
||||
rect = MathUtil::Rectangle<int>(
|
||||
0, (int)(color_params.height * color_params.res_scale_height),
|
||||
(int)(color_params.width * color_params.res_scale_width), 0);
|
||||
}
|
||||
} else if (color_surface != nullptr) {
|
||||
rect = color_rect;
|
||||
} else if (depth_surface != nullptr) {
|
||||
rect = depth_rect;
|
||||
} else {
|
||||
rect = MathUtil::Rectangle<int>(0, 0, 0, 0);
|
||||
}
|
||||
|
||||
return std::make_tuple(color_surface, depth_surface, rect);
|
||||
}
|
||||
|
||||
CachedSurface* RasterizerCacheOpenGL::TryGetFillSurface(const GPU::Regs::MemoryFillConfig& config) {
|
||||
auto surface_interval =
|
||||
boost::icl::interval<PAddr>::right_open(config.GetStartAddress(), config.GetEndAddress());
|
||||
auto range = surface_cache.equal_range(surface_interval);
|
||||
for (auto it = range.first; it != range.second; ++it) {
|
||||
for (auto it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
|
||||
int bits_per_value = 0;
|
||||
if (config.fill_24bit) {
|
||||
bits_per_value = 24;
|
||||
} else if (config.fill_32bit) {
|
||||
bits_per_value = 32;
|
||||
} else {
|
||||
bits_per_value = 16;
|
||||
}
|
||||
|
||||
CachedSurface* surface = it2->get();
|
||||
|
||||
if (surface->addr == config.GetStartAddress() &&
|
||||
CachedSurface::GetFormatBpp(surface->pixel_format) == bits_per_value &&
|
||||
(surface->width * surface->height *
|
||||
CachedSurface::GetFormatBpp(surface->pixel_format) / 8) ==
|
||||
(config.GetEndAddress() - config.GetStartAddress())) {
|
||||
return surface;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(OpenGL_SurfaceDownload, "OpenGL", "Surface Download", MP_RGB(128, 192, 64));
|
||||
void RasterizerCacheOpenGL::FlushSurface(CachedSurface* surface) {
|
||||
using PixelFormat = CachedSurface::PixelFormat;
|
||||
using SurfaceType = CachedSurface::SurfaceType;
|
||||
|
||||
if (!surface->dirty) {
|
||||
return;
|
||||
}
|
||||
|
||||
MICROPROFILE_SCOPE(OpenGL_SurfaceDownload);
|
||||
|
||||
u8* dst_buffer = Memory::GetPhysicalPointer(surface->addr);
|
||||
if (dst_buffer == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
OpenGLState cur_state = OpenGLState::GetCurState();
|
||||
GLuint old_tex = cur_state.texture_units[0].texture_2d;
|
||||
|
||||
OGLTexture unscaled_tex;
|
||||
GLuint texture_to_flush = surface->texture.handle;
|
||||
|
||||
// If not 1x scale, blit scaled texture to a new 1x texture and use that to flush
|
||||
if (surface->res_scale_width != 1.f || surface->res_scale_height != 1.f) {
|
||||
unscaled_tex.Create();
|
||||
|
||||
AllocateSurfaceTexture(unscaled_tex.handle, surface->pixel_format, surface->width,
|
||||
surface->height);
|
||||
BlitTextures(
|
||||
surface->texture.handle, unscaled_tex.handle,
|
||||
CachedSurface::GetFormatType(surface->pixel_format),
|
||||
MathUtil::Rectangle<int>(0, 0, surface->GetScaledWidth(), surface->GetScaledHeight()),
|
||||
MathUtil::Rectangle<int>(0, 0, surface->width, surface->height));
|
||||
|
||||
texture_to_flush = unscaled_tex.handle;
|
||||
}
|
||||
|
||||
cur_state.texture_units[0].texture_2d = texture_to_flush;
|
||||
cur_state.Apply();
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
|
||||
if (!surface->is_tiled) {
|
||||
// TODO: Ensure this will always be a color format, not a depth or other format
|
||||
ASSERT((size_t)surface->pixel_format < fb_format_tuples.size());
|
||||
const FormatTuple& tuple = fb_format_tuples[(unsigned int)surface->pixel_format];
|
||||
|
||||
glPixelStorei(GL_PACK_ROW_LENGTH, (GLint)surface->pixel_stride);
|
||||
glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, dst_buffer);
|
||||
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
|
||||
} else {
|
||||
SurfaceType type = CachedSurface::GetFormatType(surface->pixel_format);
|
||||
if (type != SurfaceType::Depth && type != SurfaceType::DepthStencil) {
|
||||
ASSERT((size_t)surface->pixel_format < fb_format_tuples.size());
|
||||
const FormatTuple& tuple = fb_format_tuples[(unsigned int)surface->pixel_format];
|
||||
|
||||
u32 bytes_per_pixel = CachedSurface::GetFormatBpp(surface->pixel_format) / 8;
|
||||
|
||||
std::vector<u8> temp_gl_buffer(surface->width * surface->height * bytes_per_pixel);
|
||||
|
||||
glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, temp_gl_buffer.data());
|
||||
|
||||
// Directly copy pixels. Internal OpenGL color formats are consistent so no conversion
|
||||
// is necessary.
|
||||
MortonCopyPixels(surface->pixel_format, surface->width, surface->height,
|
||||
bytes_per_pixel, bytes_per_pixel, dst_buffer, temp_gl_buffer.data(),
|
||||
false);
|
||||
} else {
|
||||
// Depth/Stencil formats need special treatment since they aren't sampleable using
|
||||
// LookupTexture and can't use RGBA format
|
||||
size_t tuple_idx = (size_t)surface->pixel_format - 14;
|
||||
ASSERT(tuple_idx < depth_format_tuples.size());
|
||||
const FormatTuple& tuple = depth_format_tuples[tuple_idx];
|
||||
|
||||
u32 bytes_per_pixel = CachedSurface::GetFormatBpp(surface->pixel_format) / 8;
|
||||
|
||||
// OpenGL needs 4 bpp alignment for D24 since using GL_UNSIGNED_INT as type
|
||||
bool use_4bpp = (surface->pixel_format == PixelFormat::D24);
|
||||
|
||||
u32 gl_bytes_per_pixel = use_4bpp ? 4 : bytes_per_pixel;
|
||||
|
||||
std::vector<u8> temp_gl_buffer(surface->width * surface->height * gl_bytes_per_pixel);
|
||||
|
||||
glGetTexImage(GL_TEXTURE_2D, 0, tuple.format, tuple.type, temp_gl_buffer.data());
|
||||
|
||||
u8* temp_gl_buffer_ptr = use_4bpp ? temp_gl_buffer.data() + 1 : temp_gl_buffer.data();
|
||||
|
||||
MortonCopyPixels(surface->pixel_format, surface->width, surface->height,
|
||||
bytes_per_pixel, gl_bytes_per_pixel, dst_buffer, temp_gl_buffer_ptr,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
surface->dirty = false;
|
||||
|
||||
cur_state.texture_units[0].texture_2d = old_tex;
|
||||
cur_state.Apply();
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::FlushRegion(PAddr addr, u32 size, const CachedSurface* skip_surface,
|
||||
bool invalidate) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Gather up unique surfaces that touch the region
|
||||
std::unordered_set<std::shared_ptr<CachedSurface>> touching_surfaces;
|
||||
|
||||
auto surface_interval = boost::icl::interval<PAddr>::right_open(addr, addr + size);
|
||||
auto cache_upper_bound = surface_cache.upper_bound(surface_interval);
|
||||
for (auto it = surface_cache.lower_bound(surface_interval); it != cache_upper_bound; ++it) {
|
||||
std::copy_if(it->second.begin(), it->second.end(),
|
||||
std::inserter(touching_surfaces, touching_surfaces.end()),
|
||||
[skip_surface](std::shared_ptr<CachedSurface> surface) {
|
||||
return (surface.get() != skip_surface);
|
||||
});
|
||||
}
|
||||
|
||||
// Flush and invalidate surfaces
|
||||
for (auto surface : touching_surfaces) {
|
||||
FlushSurface(surface.get());
|
||||
if (invalidate) {
|
||||
Memory::RasterizerMarkRegionCached(surface->addr, surface->size, -1);
|
||||
surface_cache.subtract(
|
||||
std::make_pair(boost::icl::interval<PAddr>::right_open(
|
||||
surface->addr, surface->addr + surface->size),
|
||||
std::set<std::shared_ptr<CachedSurface>>({surface})));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerCacheOpenGL::FlushAll() {
|
||||
for (auto& surfaces : surface_cache) {
|
||||
for (auto& surface : surfaces.second) {
|
||||
FlushSurface(surface.get());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-local-typedef"
|
||||
#endif
|
||||
#include <boost/icl/interval_map.hpp>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include <glad/glad.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
|
||||
namespace MathUtil {
|
||||
template <class T>
|
||||
struct Rectangle;
|
||||
}
|
||||
|
||||
struct CachedSurface;
|
||||
|
||||
using SurfaceCache = boost::icl::interval_map<PAddr, std::set<std::shared_ptr<CachedSurface>>>;
|
||||
|
||||
struct CachedSurface {
|
||||
enum class PixelFormat {
|
||||
// First 5 formats are shared between textures and color buffers
|
||||
RGBA8 = 0,
|
||||
RGB8 = 1,
|
||||
RGB5A1 = 2,
|
||||
RGB565 = 3,
|
||||
RGBA4 = 4,
|
||||
|
||||
// Texture-only formats
|
||||
IA8 = 5,
|
||||
RG8 = 6,
|
||||
I8 = 7,
|
||||
A8 = 8,
|
||||
IA4 = 9,
|
||||
I4 = 10,
|
||||
A4 = 11,
|
||||
ETC1 = 12,
|
||||
ETC1A4 = 13,
|
||||
|
||||
// Depth buffer-only formats
|
||||
D16 = 14,
|
||||
// gap
|
||||
D24 = 16,
|
||||
D24S8 = 17,
|
||||
|
||||
Invalid = 255,
|
||||
};
|
||||
|
||||
enum class SurfaceType {
|
||||
Color = 0,
|
||||
Texture = 1,
|
||||
Depth = 2,
|
||||
DepthStencil = 3,
|
||||
Invalid = 4,
|
||||
};
|
||||
|
||||
static unsigned int GetFormatBpp(CachedSurface::PixelFormat format) {
|
||||
static const std::array<unsigned int, 18> bpp_table = {
|
||||
32, // RGBA8
|
||||
24, // RGB8
|
||||
16, // RGB5A1
|
||||
16, // RGB565
|
||||
16, // RGBA4
|
||||
16, // IA8
|
||||
16, // RG8
|
||||
8, // I8
|
||||
8, // A8
|
||||
8, // IA4
|
||||
4, // I4
|
||||
4, // A4
|
||||
4, // ETC1
|
||||
8, // ETC1A4
|
||||
16, // D16
|
||||
0,
|
||||
24, // D24
|
||||
32, // D24S8
|
||||
};
|
||||
|
||||
ASSERT((unsigned int)format < ARRAY_SIZE(bpp_table));
|
||||
return bpp_table[(unsigned int)format];
|
||||
}
|
||||
|
||||
static PixelFormat PixelFormatFromTextureFormat(Pica::TexturingRegs::TextureFormat format) {
|
||||
return ((unsigned int)format < 14) ? (PixelFormat)format : PixelFormat::Invalid;
|
||||
}
|
||||
|
||||
static PixelFormat PixelFormatFromColorFormat(Pica::FramebufferRegs::ColorFormat format) {
|
||||
return ((unsigned int)format < 5) ? (PixelFormat)format : PixelFormat::Invalid;
|
||||
}
|
||||
|
||||
static PixelFormat PixelFormatFromDepthFormat(Pica::FramebufferRegs::DepthFormat format) {
|
||||
return ((unsigned int)format < 4) ? (PixelFormat)((unsigned int)format + 14)
|
||||
: PixelFormat::Invalid;
|
||||
}
|
||||
|
||||
static PixelFormat PixelFormatFromGPUPixelFormat(GPU::Regs::PixelFormat format) {
|
||||
switch (format) {
|
||||
// RGB565 and RGB5A1 are switched in PixelFormat compared to ColorFormat
|
||||
case GPU::Regs::PixelFormat::RGB565:
|
||||
return PixelFormat::RGB565;
|
||||
case GPU::Regs::PixelFormat::RGB5A1:
|
||||
return PixelFormat::RGB5A1;
|
||||
default:
|
||||
return ((unsigned int)format < 5) ? (PixelFormat)format : PixelFormat::Invalid;
|
||||
}
|
||||
}
|
||||
|
||||
static bool CheckFormatsBlittable(PixelFormat pixel_format_a, PixelFormat pixel_format_b) {
|
||||
SurfaceType a_type = GetFormatType(pixel_format_a);
|
||||
SurfaceType b_type = GetFormatType(pixel_format_b);
|
||||
|
||||
if ((a_type == SurfaceType::Color || a_type == SurfaceType::Texture) &&
|
||||
(b_type == SurfaceType::Color || b_type == SurfaceType::Texture)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a_type == SurfaceType::Depth && b_type == SurfaceType::Depth) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a_type == SurfaceType::DepthStencil && b_type == SurfaceType::DepthStencil) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static SurfaceType GetFormatType(PixelFormat pixel_format) {
|
||||
if ((unsigned int)pixel_format < 5) {
|
||||
return SurfaceType::Color;
|
||||
}
|
||||
|
||||
if ((unsigned int)pixel_format < 14) {
|
||||
return SurfaceType::Texture;
|
||||
}
|
||||
|
||||
if (pixel_format == PixelFormat::D16 || pixel_format == PixelFormat::D24) {
|
||||
return SurfaceType::Depth;
|
||||
}
|
||||
|
||||
if (pixel_format == PixelFormat::D24S8) {
|
||||
return SurfaceType::DepthStencil;
|
||||
}
|
||||
|
||||
return SurfaceType::Invalid;
|
||||
}
|
||||
|
||||
u32 GetScaledWidth() const {
|
||||
return (u32)(width * res_scale_width);
|
||||
}
|
||||
|
||||
u32 GetScaledHeight() const {
|
||||
return (u32)(height * res_scale_height);
|
||||
}
|
||||
|
||||
PAddr addr;
|
||||
u32 size;
|
||||
|
||||
PAddr min_valid;
|
||||
PAddr max_valid;
|
||||
|
||||
OGLTexture texture;
|
||||
u32 width;
|
||||
u32 height;
|
||||
/// Stride between lines, in pixels. Only valid for images in linear format.
|
||||
u32 pixel_stride = 0;
|
||||
float res_scale_width = 1.f;
|
||||
float res_scale_height = 1.f;
|
||||
|
||||
bool is_tiled;
|
||||
PixelFormat pixel_format;
|
||||
bool dirty;
|
||||
};
|
||||
|
||||
class RasterizerCacheOpenGL : NonCopyable {
|
||||
public:
|
||||
RasterizerCacheOpenGL();
|
||||
~RasterizerCacheOpenGL();
|
||||
|
||||
/// Blits one texture to another
|
||||
void BlitTextures(GLuint src_tex, GLuint dst_tex, CachedSurface::SurfaceType type,
|
||||
const MathUtil::Rectangle<int>& src_rect,
|
||||
const MathUtil::Rectangle<int>& dst_rect);
|
||||
|
||||
/// Attempt to blit one surface's texture to another
|
||||
bool TryBlitSurfaces(CachedSurface* src_surface, const MathUtil::Rectangle<int>& src_rect,
|
||||
CachedSurface* dst_surface, const MathUtil::Rectangle<int>& dst_rect);
|
||||
|
||||
/// Loads a texture from 3DS memory to OpenGL and caches it (if not already cached)
|
||||
CachedSurface* GetSurface(const CachedSurface& params, bool match_res_scale,
|
||||
bool load_if_create);
|
||||
|
||||
/// Attempt to find a subrect (resolution scaled) of a surface, otherwise loads a texture from
|
||||
/// 3DS memory to OpenGL and caches it (if not already cached)
|
||||
CachedSurface* GetSurfaceRect(const CachedSurface& params, bool match_res_scale,
|
||||
bool load_if_create, MathUtil::Rectangle<int>& out_rect);
|
||||
|
||||
/// Gets a surface based on the texture configuration
|
||||
CachedSurface* GetTextureSurface(const Pica::TexturingRegs::FullTextureConfig& config);
|
||||
|
||||
/// Gets the color and depth surfaces and rect (resolution scaled) based on the framebuffer
|
||||
/// configuration
|
||||
std::tuple<CachedSurface*, CachedSurface*, MathUtil::Rectangle<int>> GetFramebufferSurfaces(
|
||||
const Pica::FramebufferRegs::FramebufferConfig& config);
|
||||
|
||||
/// Attempt to get a surface that exactly matches the fill region and format
|
||||
CachedSurface* TryGetFillSurface(const GPU::Regs::MemoryFillConfig& config);
|
||||
|
||||
/// Write the surface back to memory
|
||||
void FlushSurface(CachedSurface* surface);
|
||||
|
||||
/// Write any cached resources overlapping the region back to memory (if dirty) and optionally
|
||||
/// invalidate them in the cache
|
||||
void FlushRegion(PAddr addr, u32 size, const CachedSurface* skip_surface, bool invalidate);
|
||||
|
||||
/// Flush all cached resources tracked by this cache manager
|
||||
void FlushAll();
|
||||
|
||||
private:
|
||||
SurfaceCache surface_cache;
|
||||
OGLFramebuffer transfer_framebuffers[2];
|
||||
};
|
File diff suppressed because it is too large
Load diff
|
@ -1,162 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include "video_core/regs.h"
|
||||
|
||||
namespace GLShader {
|
||||
|
||||
enum Attributes {
|
||||
ATTRIBUTE_POSITION,
|
||||
ATTRIBUTE_COLOR,
|
||||
ATTRIBUTE_TEXCOORD0,
|
||||
ATTRIBUTE_TEXCOORD1,
|
||||
ATTRIBUTE_TEXCOORD2,
|
||||
ATTRIBUTE_TEXCOORD0_W,
|
||||
ATTRIBUTE_NORMQUAT,
|
||||
ATTRIBUTE_VIEW,
|
||||
};
|
||||
|
||||
/**
|
||||
* This struct contains all state used to generate the GLSL shader program that emulates the current
|
||||
* Pica register configuration. This struct is used as a cache key for generated GLSL shader
|
||||
* programs. The functions in gl_shader_gen.cpp should retrieve state from this struct only, not by
|
||||
* directly accessing Pica registers. This should reduce the risk of bugs in shader generation where
|
||||
* Pica state is not being captured in the shader cache key, thereby resulting in (what should be)
|
||||
* two separate shaders sharing the same key.
|
||||
*
|
||||
* We use a union because "implicitly-defined copy/move constructor for a union X copies the object
|
||||
* representation of X." and "implicitly-defined copy assignment operator for a union X copies the
|
||||
* object representation (3.9) of X." = Bytewise copy instead of memberwise copy. This is important
|
||||
* because the padding bytes are included in the hash and comparison between objects.
|
||||
*/
|
||||
union PicaShaderConfig {
|
||||
|
||||
/// Construct a PicaShaderConfig with the given Pica register configuration.
|
||||
static PicaShaderConfig BuildFromRegs(const Pica::Regs& regs);
|
||||
|
||||
bool TevStageUpdatesCombinerBufferColor(unsigned stage_index) const {
|
||||
return (stage_index < 4) && (state.combiner_buffer_input & (1 << stage_index));
|
||||
}
|
||||
|
||||
bool TevStageUpdatesCombinerBufferAlpha(unsigned stage_index) const {
|
||||
return (stage_index < 4) && ((state.combiner_buffer_input >> 4) & (1 << stage_index));
|
||||
}
|
||||
|
||||
bool operator==(const PicaShaderConfig& o) const {
|
||||
return std::memcmp(&state, &o.state, sizeof(PicaShaderConfig::State)) == 0;
|
||||
};
|
||||
|
||||
// NOTE: MSVC15 (Update 2) doesn't think `delete`'d constructors and operators are TC.
|
||||
// This makes BitField not TC when used in a union or struct so we have to resort
|
||||
// to this ugly hack.
|
||||
// Once that bug is fixed we can use Pica::Regs::TevStageConfig here.
|
||||
// Doesn't include const_color because we don't sync it, see comment in BuildFromRegs()
|
||||
struct TevStageConfigRaw {
|
||||
u32 sources_raw;
|
||||
u32 modifiers_raw;
|
||||
u32 ops_raw;
|
||||
u32 scales_raw;
|
||||
explicit operator Pica::TexturingRegs::TevStageConfig() const noexcept {
|
||||
Pica::TexturingRegs::TevStageConfig stage;
|
||||
stage.sources_raw = sources_raw;
|
||||
stage.modifiers_raw = modifiers_raw;
|
||||
stage.ops_raw = ops_raw;
|
||||
stage.const_color = 0;
|
||||
stage.scales_raw = scales_raw;
|
||||
return stage;
|
||||
}
|
||||
};
|
||||
|
||||
struct State {
|
||||
Pica::FramebufferRegs::CompareFunc alpha_test_func;
|
||||
Pica::RasterizerRegs::ScissorMode scissor_test_mode;
|
||||
Pica::TexturingRegs::TextureConfig::TextureType texture0_type;
|
||||
bool texture2_use_coord1;
|
||||
std::array<TevStageConfigRaw, 6> tev_stages;
|
||||
u8 combiner_buffer_input;
|
||||
|
||||
Pica::RasterizerRegs::DepthBuffering depthmap_enable;
|
||||
Pica::TexturingRegs::FogMode fog_mode;
|
||||
bool fog_flip;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
unsigned num;
|
||||
bool directional;
|
||||
bool two_sided_diffuse;
|
||||
bool dist_atten_enable;
|
||||
bool spot_atten_enable;
|
||||
bool geometric_factor_0;
|
||||
bool geometric_factor_1;
|
||||
} light[8];
|
||||
|
||||
bool enable;
|
||||
unsigned src_num;
|
||||
Pica::LightingRegs::LightingBumpMode bump_mode;
|
||||
unsigned bump_selector;
|
||||
bool bump_renorm;
|
||||
bool clamp_highlights;
|
||||
|
||||
Pica::LightingRegs::LightingConfig config;
|
||||
Pica::LightingRegs::LightingFresnelSelector fresnel_selector;
|
||||
|
||||
struct {
|
||||
bool enable;
|
||||
bool abs_input;
|
||||
Pica::LightingRegs::LightingLutInput type;
|
||||
float scale;
|
||||
} lut_d0, lut_d1, lut_sp, lut_fr, lut_rr, lut_rg, lut_rb;
|
||||
} lighting;
|
||||
|
||||
struct {
|
||||
bool enable;
|
||||
u32 coord;
|
||||
Pica::TexturingRegs::ProcTexClamp u_clamp, v_clamp;
|
||||
Pica::TexturingRegs::ProcTexCombiner color_combiner, alpha_combiner;
|
||||
bool separate_alpha;
|
||||
bool noise_enable;
|
||||
Pica::TexturingRegs::ProcTexShift u_shift, v_shift;
|
||||
u32 lut_width;
|
||||
u32 lut_offset;
|
||||
Pica::TexturingRegs::ProcTexFilter lut_filter;
|
||||
} proctex;
|
||||
|
||||
} state;
|
||||
};
|
||||
#if (__GNUC__ >= 5) || defined(__clang__) || defined(_MSC_VER)
|
||||
static_assert(std::is_trivially_copyable<PicaShaderConfig::State>::value,
|
||||
"PicaShaderConfig::State must be trivially copyable");
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Generates the GLSL vertex shader program source code for the current Pica state
|
||||
* @returns String of the shader source code
|
||||
*/
|
||||
std::string GenerateVertexShader();
|
||||
|
||||
/**
|
||||
* Generates the GLSL fragment shader program source code for the current Pica state
|
||||
* @param config ShaderCacheKey object generated for the current Pica state, used for the shader
|
||||
* configuration (NOTE: Use state in this struct only, not the Pica registers!)
|
||||
* @returns String of the shader source code
|
||||
*/
|
||||
std::string GenerateFragmentShader(const PicaShaderConfig& config);
|
||||
|
||||
} // namespace GLShader
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<GLShader::PicaShaderConfig> {
|
||||
size_t operator()(const GLShader::PicaShaderConfig& k) const {
|
||||
return Common::ComputeHash64(&k.state, sizeof(GLShader::PicaShaderConfig::State));
|
||||
}
|
||||
};
|
||||
} // namespace std
|
|
@ -1,235 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <glad/glad.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/regs_lighting.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
|
||||
using GLvec2 = std::array<GLfloat, 2>;
|
||||
using GLvec3 = std::array<GLfloat, 3>;
|
||||
using GLvec4 = std::array<GLfloat, 4>;
|
||||
|
||||
namespace PicaToGL {
|
||||
|
||||
inline GLenum TextureFilterMode(Pica::TexturingRegs::TextureConfig::TextureFilter mode) {
|
||||
static const GLenum filter_mode_table[] = {
|
||||
GL_NEAREST, // TextureFilter::Nearest
|
||||
GL_LINEAR, // TextureFilter::Linear
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(mode) >= ARRAY_SIZE(filter_mode_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown texture filtering mode %d", mode);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_LINEAR;
|
||||
}
|
||||
|
||||
GLenum gl_mode = filter_mode_table[mode];
|
||||
|
||||
// Check for dummy values indicating an unknown mode
|
||||
if (gl_mode == 0) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown texture filtering mode %d", mode);
|
||||
UNIMPLEMENTED();
|
||||
|
||||
return GL_LINEAR;
|
||||
}
|
||||
|
||||
return gl_mode;
|
||||
}
|
||||
|
||||
inline GLenum WrapMode(Pica::TexturingRegs::TextureConfig::WrapMode mode) {
|
||||
static const GLenum wrap_mode_table[] = {
|
||||
GL_CLAMP_TO_EDGE, // WrapMode::ClampToEdge
|
||||
GL_CLAMP_TO_BORDER, // WrapMode::ClampToBorder
|
||||
GL_REPEAT, // WrapMode::Repeat
|
||||
GL_MIRRORED_REPEAT, // WrapMode::MirroredRepeat
|
||||
// TODO(wwylele): ClampToEdge2 and ClampToBorder2 are not properly implemented here. See the
|
||||
// comments in enum WrapMode.
|
||||
GL_CLAMP_TO_EDGE, // WrapMode::ClampToEdge2
|
||||
GL_CLAMP_TO_BORDER, // WrapMode::ClampToBorder2
|
||||
GL_REPEAT, // WrapMode::Repeat2
|
||||
GL_REPEAT, // WrapMode::Repeat3
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(mode) >= ARRAY_SIZE(wrap_mode_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown texture wrap mode %d", mode);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_CLAMP_TO_EDGE;
|
||||
}
|
||||
|
||||
if (static_cast<u32>(mode) > 3) {
|
||||
Core::Telemetry().AddField(Telemetry::FieldType::Session,
|
||||
"VideoCore_Pica_UnsupportedTextureWrapMode",
|
||||
static_cast<u32>(mode));
|
||||
LOG_WARNING(Render_OpenGL, "Using texture wrap mode %u", static_cast<u32>(mode));
|
||||
}
|
||||
|
||||
GLenum gl_mode = wrap_mode_table[mode];
|
||||
|
||||
// Check for dummy values indicating an unknown mode
|
||||
if (gl_mode == 0) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown texture wrap mode %d", mode);
|
||||
UNIMPLEMENTED();
|
||||
|
||||
return GL_CLAMP_TO_EDGE;
|
||||
}
|
||||
|
||||
return gl_mode;
|
||||
}
|
||||
|
||||
inline GLenum BlendEquation(Pica::FramebufferRegs::BlendEquation equation) {
|
||||
static const GLenum blend_equation_table[] = {
|
||||
GL_FUNC_ADD, // BlendEquation::Add
|
||||
GL_FUNC_SUBTRACT, // BlendEquation::Subtract
|
||||
GL_FUNC_REVERSE_SUBTRACT, // BlendEquation::ReverseSubtract
|
||||
GL_MIN, // BlendEquation::Min
|
||||
GL_MAX, // BlendEquation::Max
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(equation) >= ARRAY_SIZE(blend_equation_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown blend equation %d", equation);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_FUNC_ADD;
|
||||
}
|
||||
|
||||
return blend_equation_table[(unsigned)equation];
|
||||
}
|
||||
|
||||
inline GLenum BlendFunc(Pica::FramebufferRegs::BlendFactor factor) {
|
||||
static const GLenum blend_func_table[] = {
|
||||
GL_ZERO, // BlendFactor::Zero
|
||||
GL_ONE, // BlendFactor::One
|
||||
GL_SRC_COLOR, // BlendFactor::SourceColor
|
||||
GL_ONE_MINUS_SRC_COLOR, // BlendFactor::OneMinusSourceColor
|
||||
GL_DST_COLOR, // BlendFactor::DestColor
|
||||
GL_ONE_MINUS_DST_COLOR, // BlendFactor::OneMinusDestColor
|
||||
GL_SRC_ALPHA, // BlendFactor::SourceAlpha
|
||||
GL_ONE_MINUS_SRC_ALPHA, // BlendFactor::OneMinusSourceAlpha
|
||||
GL_DST_ALPHA, // BlendFactor::DestAlpha
|
||||
GL_ONE_MINUS_DST_ALPHA, // BlendFactor::OneMinusDestAlpha
|
||||
GL_CONSTANT_COLOR, // BlendFactor::ConstantColor
|
||||
GL_ONE_MINUS_CONSTANT_COLOR, // BlendFactor::OneMinusConstantColor
|
||||
GL_CONSTANT_ALPHA, // BlendFactor::ConstantAlpha
|
||||
GL_ONE_MINUS_CONSTANT_ALPHA, // BlendFactor::OneMinusConstantAlpha
|
||||
GL_SRC_ALPHA_SATURATE, // BlendFactor::SourceAlphaSaturate
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(factor) >= ARRAY_SIZE(blend_func_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown blend factor %d", factor);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_ONE;
|
||||
}
|
||||
|
||||
return blend_func_table[(unsigned)factor];
|
||||
}
|
||||
|
||||
inline GLenum LogicOp(Pica::FramebufferRegs::LogicOp op) {
|
||||
static const GLenum logic_op_table[] = {
|
||||
GL_CLEAR, // Clear
|
||||
GL_AND, // And
|
||||
GL_AND_REVERSE, // AndReverse
|
||||
GL_COPY, // Copy
|
||||
GL_SET, // Set
|
||||
GL_COPY_INVERTED, // CopyInverted
|
||||
GL_NOOP, // NoOp
|
||||
GL_INVERT, // Invert
|
||||
GL_NAND, // Nand
|
||||
GL_OR, // Or
|
||||
GL_NOR, // Nor
|
||||
GL_XOR, // Xor
|
||||
GL_EQUIV, // Equiv
|
||||
GL_AND_INVERTED, // AndInverted
|
||||
GL_OR_REVERSE, // OrReverse
|
||||
GL_OR_INVERTED, // OrInverted
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(op) >= ARRAY_SIZE(logic_op_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown logic op %d", op);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_COPY;
|
||||
}
|
||||
|
||||
return logic_op_table[(unsigned)op];
|
||||
}
|
||||
|
||||
inline GLenum CompareFunc(Pica::FramebufferRegs::CompareFunc func) {
|
||||
static const GLenum compare_func_table[] = {
|
||||
GL_NEVER, // CompareFunc::Never
|
||||
GL_ALWAYS, // CompareFunc::Always
|
||||
GL_EQUAL, // CompareFunc::Equal
|
||||
GL_NOTEQUAL, // CompareFunc::NotEqual
|
||||
GL_LESS, // CompareFunc::LessThan
|
||||
GL_LEQUAL, // CompareFunc::LessThanOrEqual
|
||||
GL_GREATER, // CompareFunc::GreaterThan
|
||||
GL_GEQUAL, // CompareFunc::GreaterThanOrEqual
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(func) >= ARRAY_SIZE(compare_func_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown compare function %d", func);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_ALWAYS;
|
||||
}
|
||||
|
||||
return compare_func_table[(unsigned)func];
|
||||
}
|
||||
|
||||
inline GLenum StencilOp(Pica::FramebufferRegs::StencilAction action) {
|
||||
static const GLenum stencil_op_table[] = {
|
||||
GL_KEEP, // StencilAction::Keep
|
||||
GL_ZERO, // StencilAction::Zero
|
||||
GL_REPLACE, // StencilAction::Replace
|
||||
GL_INCR, // StencilAction::Increment
|
||||
GL_DECR, // StencilAction::Decrement
|
||||
GL_INVERT, // StencilAction::Invert
|
||||
GL_INCR_WRAP, // StencilAction::IncrementWrap
|
||||
GL_DECR_WRAP, // StencilAction::DecrementWrap
|
||||
};
|
||||
|
||||
// Range check table for input
|
||||
if (static_cast<size_t>(action) >= ARRAY_SIZE(stencil_op_table)) {
|
||||
LOG_CRITICAL(Render_OpenGL, "Unknown stencil op %d", action);
|
||||
UNREACHABLE();
|
||||
|
||||
return GL_KEEP;
|
||||
}
|
||||
|
||||
return stencil_op_table[(unsigned)action];
|
||||
}
|
||||
|
||||
inline GLvec4 ColorRGBA8(const u32 color) {
|
||||
return {{
|
||||
(color >> 0 & 0xFF) / 255.0f, (color >> 8 & 0xFF) / 255.0f, (color >> 16 & 0xFF) / 255.0f,
|
||||
(color >> 24 & 0xFF) / 255.0f,
|
||||
}};
|
||||
}
|
||||
|
||||
inline std::array<GLfloat, 3> LightColor(const Pica::LightingRegs::LightColor& color) {
|
||||
return {{
|
||||
color.r / 255.0f, color.g / 255.0f, color.b / 255.0f,
|
||||
}};
|
||||
}
|
||||
|
||||
} // namespace
|
|
@ -13,14 +13,11 @@
|
|||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/hw/lcd.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
#include "core/tracer/recorder.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
|
@ -128,10 +125,6 @@ void RendererOpenGL::SwapBuffers(const FramebufferInfo& framebuffer_info) {
|
|||
|
||||
prev_state.Apply();
|
||||
RefreshRasterizerSetting();
|
||||
|
||||
if (Pica::g_debug_context && Pica::g_debug_context->recorder) {
|
||||
Pica::g_debug_context->recorder->FrameFinished();
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 MortonInterleave128(u32 x, u32 y) {
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include <glad/glad.h>
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_state.h"
|
||||
|
|
|
@ -1,186 +0,0 @@
|
|||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_types.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Shader {
|
||||
|
||||
/// Helper structure used to keep track of data useful for inspection of shader emulation
|
||||
template <bool full_debugging>
|
||||
struct DebugData;
|
||||
|
||||
template <>
|
||||
struct DebugData<false> {
|
||||
// TODO: Hide these behind and interface and move them to DebugData<true>
|
||||
u32 max_offset = 0; ///< maximum program counter ever reached
|
||||
u32 max_opdesc_id = 0; ///< maximum swizzle pattern index ever used
|
||||
};
|
||||
|
||||
template <>
|
||||
struct DebugData<true> {
|
||||
/// Records store the input and output operands of a particular instruction.
|
||||
struct Record {
|
||||
enum Type {
|
||||
// Floating point arithmetic operands
|
||||
SRC1 = 0x1,
|
||||
SRC2 = 0x2,
|
||||
SRC3 = 0x4,
|
||||
|
||||
// Initial and final output operand value
|
||||
DEST_IN = 0x8,
|
||||
DEST_OUT = 0x10,
|
||||
|
||||
// Current and next instruction offset (in words)
|
||||
CUR_INSTR = 0x20,
|
||||
NEXT_INSTR = 0x40,
|
||||
|
||||
// Output address register value
|
||||
ADDR_REG_OUT = 0x80,
|
||||
|
||||
// Result of a comparison instruction
|
||||
CMP_RESULT = 0x100,
|
||||
|
||||
// Input values for conditional flow control instructions
|
||||
COND_BOOL_IN = 0x200,
|
||||
COND_CMP_IN = 0x400,
|
||||
|
||||
// Input values for a loop
|
||||
LOOP_INT_IN = 0x800,
|
||||
};
|
||||
|
||||
Math::Vec4<float24> src1;
|
||||
Math::Vec4<float24> src2;
|
||||
Math::Vec4<float24> src3;
|
||||
|
||||
Math::Vec4<float24> dest_in;
|
||||
Math::Vec4<float24> dest_out;
|
||||
|
||||
s32 address_registers[2];
|
||||
bool conditional_code[2];
|
||||
bool cond_bool;
|
||||
bool cond_cmp[2];
|
||||
Math::Vec4<u8> loop_int;
|
||||
|
||||
u32 instruction_offset;
|
||||
u32 next_instruction;
|
||||
|
||||
/// set of enabled fields (as a combination of Type flags)
|
||||
unsigned mask = 0;
|
||||
};
|
||||
|
||||
u32 max_offset = 0; ///< maximum program counter ever reached
|
||||
u32 max_opdesc_id = 0; ///< maximum swizzle pattern index ever used
|
||||
|
||||
/// List of records for each executed shader instruction
|
||||
std::vector<DebugData<true>::Record> records;
|
||||
};
|
||||
|
||||
/// Type alias for better readability
|
||||
using DebugDataRecord = DebugData<true>::Record;
|
||||
|
||||
/// Helper function to set a DebugData<true>::Record field based on the template enum parameter.
|
||||
template <DebugDataRecord::Type type, typename ValueType>
|
||||
inline void SetField(DebugDataRecord& record, ValueType value);
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::SRC1>(DebugDataRecord& record, float24* value) {
|
||||
record.src1.x = value[0];
|
||||
record.src1.y = value[1];
|
||||
record.src1.z = value[2];
|
||||
record.src1.w = value[3];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::SRC2>(DebugDataRecord& record, float24* value) {
|
||||
record.src2.x = value[0];
|
||||
record.src2.y = value[1];
|
||||
record.src2.z = value[2];
|
||||
record.src2.w = value[3];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::SRC3>(DebugDataRecord& record, float24* value) {
|
||||
record.src3.x = value[0];
|
||||
record.src3.y = value[1];
|
||||
record.src3.z = value[2];
|
||||
record.src3.w = value[3];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::DEST_IN>(DebugDataRecord& record, float24* value) {
|
||||
record.dest_in.x = value[0];
|
||||
record.dest_in.y = value[1];
|
||||
record.dest_in.z = value[2];
|
||||
record.dest_in.w = value[3];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::DEST_OUT>(DebugDataRecord& record, float24* value) {
|
||||
record.dest_out.x = value[0];
|
||||
record.dest_out.y = value[1];
|
||||
record.dest_out.z = value[2];
|
||||
record.dest_out.w = value[3];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::ADDR_REG_OUT>(DebugDataRecord& record, s32* value) {
|
||||
record.address_registers[0] = value[0];
|
||||
record.address_registers[1] = value[1];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::CMP_RESULT>(DebugDataRecord& record, bool* value) {
|
||||
record.conditional_code[0] = value[0];
|
||||
record.conditional_code[1] = value[1];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::COND_BOOL_IN>(DebugDataRecord& record, bool value) {
|
||||
record.cond_bool = value;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::COND_CMP_IN>(DebugDataRecord& record, bool* value) {
|
||||
record.cond_cmp[0] = value[0];
|
||||
record.cond_cmp[1] = value[1];
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::LOOP_INT_IN>(DebugDataRecord& record, Math::Vec4<u8> value) {
|
||||
record.loop_int = value;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::CUR_INSTR>(DebugDataRecord& record, u32 value) {
|
||||
record.instruction_offset = value;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void SetField<DebugDataRecord::NEXT_INSTR>(DebugDataRecord& record, u32 value) {
|
||||
record.next_instruction = value;
|
||||
}
|
||||
|
||||
/// Helper function to set debug information on the current shader iteration.
|
||||
template <DebugDataRecord::Type type, typename ValueType>
|
||||
inline void Record(DebugData<false>& debug_data, u32 offset, ValueType value) {
|
||||
// Debugging disabled => nothing to do
|
||||
}
|
||||
|
||||
template <DebugDataRecord::Type type, typename ValueType>
|
||||
inline void Record(DebugData<true>& debug_data, u32 offset, ValueType value) {
|
||||
if (offset >= debug_data.records.size())
|
||||
debug_data.records.resize(offset + 1);
|
||||
|
||||
SetField<type, ValueType>(debug_data.records[offset], value);
|
||||
debug_data.records[offset].mask |= type;
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
} // namespace Pica
|
|
@ -1,154 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include "common/bit_set.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_shader.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_interpreter.h"
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "video_core/shader/shader_jit_x64.h"
|
||||
#endif // ARCHITECTURE_x86_64
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
OutputVertex OutputVertex::FromAttributeBuffer(const RasterizerRegs& regs,
|
||||
const AttributeBuffer& input) {
|
||||
// Setup output data
|
||||
union {
|
||||
OutputVertex ret{};
|
||||
std::array<float24, 24> vertex_slots;
|
||||
};
|
||||
static_assert(sizeof(vertex_slots) == sizeof(ret), "Struct and array have different sizes.");
|
||||
|
||||
unsigned int num_attributes = regs.vs_output_total;
|
||||
ASSERT(num_attributes <= 7);
|
||||
for (unsigned int i = 0; i < num_attributes; ++i) {
|
||||
const auto& output_register_map = regs.vs_output_attributes[i];
|
||||
|
||||
RasterizerRegs::VSOutputAttributes::Semantic semantics[4] = {
|
||||
output_register_map.map_x, output_register_map.map_y, output_register_map.map_z,
|
||||
output_register_map.map_w};
|
||||
|
||||
for (unsigned comp = 0; comp < 4; ++comp) {
|
||||
RasterizerRegs::VSOutputAttributes::Semantic semantic = semantics[comp];
|
||||
if (semantic < vertex_slots.size()) {
|
||||
vertex_slots[semantic] = input.attr[i][comp];
|
||||
} else if (semantic != RasterizerRegs::VSOutputAttributes::INVALID) {
|
||||
LOG_ERROR(HW_GPU, "Invalid/unknown semantic id: %u", (unsigned int)semantic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The hardware takes the absolute and saturates vertex colors like this, *before* doing
|
||||
// interpolation
|
||||
for (unsigned i = 0; i < 4; ++i) {
|
||||
float c = std::fabs(ret.color[i].ToFloat32());
|
||||
ret.color[i] = float24::FromFloat32(c < 1.0f ? c : 1.0f);
|
||||
}
|
||||
|
||||
LOG_TRACE(HW_GPU, "Output vertex: pos(%.2f, %.2f, %.2f, %.2f), quat(%.2f, %.2f, %.2f, %.2f), "
|
||||
"col(%.2f, %.2f, %.2f, %.2f), tc0(%.2f, %.2f), view(%.2f, %.2f, %.2f)",
|
||||
ret.pos.x.ToFloat32(), ret.pos.y.ToFloat32(), ret.pos.z.ToFloat32(),
|
||||
ret.pos.w.ToFloat32(), ret.quat.x.ToFloat32(), ret.quat.y.ToFloat32(),
|
||||
ret.quat.z.ToFloat32(), ret.quat.w.ToFloat32(), ret.color.x.ToFloat32(),
|
||||
ret.color.y.ToFloat32(), ret.color.z.ToFloat32(), ret.color.w.ToFloat32(),
|
||||
ret.tc0.u().ToFloat32(), ret.tc0.v().ToFloat32(), ret.view.x.ToFloat32(),
|
||||
ret.view.y.ToFloat32(), ret.view.z.ToFloat32());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void UnitState::LoadInput(const ShaderRegs& config, const AttributeBuffer& input) {
|
||||
const unsigned max_attribute = config.max_input_attribute_index;
|
||||
|
||||
for (unsigned attr = 0; attr <= max_attribute; ++attr) {
|
||||
unsigned reg = config.GetRegisterForAttribute(attr);
|
||||
registers.input[reg] = input.attr[attr];
|
||||
}
|
||||
}
|
||||
|
||||
void UnitState::WriteOutput(const ShaderRegs& config, AttributeBuffer& output) {
|
||||
unsigned int output_i = 0;
|
||||
for (unsigned int reg : Common::BitSet<u32>(config.output_mask)) {
|
||||
output.attr[output_i++] = registers.output[reg];
|
||||
}
|
||||
}
|
||||
|
||||
UnitState::UnitState(GSEmitter* emitter) : emitter_ptr(emitter) {}
|
||||
|
||||
GSEmitter::GSEmitter() {
|
||||
handlers = new Handlers;
|
||||
}
|
||||
|
||||
GSEmitter::~GSEmitter() {
|
||||
delete handlers;
|
||||
}
|
||||
|
||||
void GSEmitter::Emit(Math::Vec4<float24> (&vertex)[16]) {
|
||||
ASSERT(vertex_id < 3);
|
||||
std::copy(std::begin(vertex), std::end(vertex), buffer[vertex_id].begin());
|
||||
if (prim_emit) {
|
||||
if (winding)
|
||||
handlers->winding_setter();
|
||||
for (size_t i = 0; i < buffer.size(); ++i) {
|
||||
AttributeBuffer output;
|
||||
unsigned int output_i = 0;
|
||||
for (unsigned int reg : Common::BitSet<u32>(output_mask)) {
|
||||
output.attr[output_i++] = buffer[i][reg];
|
||||
}
|
||||
handlers->vertex_handler(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GSUnitState::GSUnitState() : UnitState(&emitter) {}
|
||||
|
||||
void GSUnitState::SetVertexHandler(VertexHandler vertex_handler, WindingSetter winding_setter) {
|
||||
emitter.handlers->vertex_handler = std::move(vertex_handler);
|
||||
emitter.handlers->winding_setter = std::move(winding_setter);
|
||||
}
|
||||
|
||||
void GSUnitState::ConfigOutput(const ShaderRegs& config) {
|
||||
emitter.output_mask = config.output_mask;
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_Shader, "GPU", "Shader", MP_RGB(50, 50, 240));
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
static std::unique_ptr<JitX64Engine> jit_engine;
|
||||
#endif // ARCHITECTURE_x86_64
|
||||
static InterpreterEngine interpreter_engine;
|
||||
|
||||
ShaderEngine* GetEngine() {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
// TODO(yuriks): Re-initialize on each change rather than being persistent
|
||||
if (VideoCore::g_shader_jit_enabled) {
|
||||
if (jit_engine == nullptr) {
|
||||
jit_engine = std::make_unique<JitX64Engine>();
|
||||
}
|
||||
return jit_engine.get();
|
||||
}
|
||||
#endif // ARCHITECTURE_x86_64
|
||||
|
||||
return &interpreter_engine;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
jit_engine = nullptr;
|
||||
#endif // ARCHITECTURE_x86_64
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
|
@ -1,233 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <type_traits>
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_shader.h"
|
||||
|
||||
using nihstro::RegisterType;
|
||||
using nihstro::SourceRegister;
|
||||
using nihstro::DestRegister;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
constexpr unsigned MAX_PROGRAM_CODE_LENGTH = 4096;
|
||||
constexpr unsigned MAX_SWIZZLE_DATA_LENGTH = 4096;
|
||||
|
||||
struct AttributeBuffer {
|
||||
alignas(16) Math::Vec4<float24> attr[16];
|
||||
};
|
||||
|
||||
/// Handler type for receiving vertex outputs from vertex shader or geometry shader
|
||||
using VertexHandler = std::function<void(const AttributeBuffer&)>;
|
||||
|
||||
/// Handler type for signaling to invert the vertex order of the next triangle
|
||||
using WindingSetter = std::function<void()>;
|
||||
|
||||
struct OutputVertex {
|
||||
Math::Vec4<float24> pos;
|
||||
Math::Vec4<float24> quat;
|
||||
Math::Vec4<float24> color;
|
||||
Math::Vec2<float24> tc0;
|
||||
Math::Vec2<float24> tc1;
|
||||
float24 tc0_w;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
Math::Vec3<float24> view;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
Math::Vec2<float24> tc2;
|
||||
|
||||
static OutputVertex FromAttributeBuffer(const RasterizerRegs& regs,
|
||||
const AttributeBuffer& output);
|
||||
};
|
||||
#define ASSERT_POS(var, pos) \
|
||||
static_assert(offsetof(OutputVertex, var) == pos * sizeof(float24), "Semantic at wrong " \
|
||||
"offset.")
|
||||
ASSERT_POS(pos, RasterizerRegs::VSOutputAttributes::POSITION_X);
|
||||
ASSERT_POS(quat, RasterizerRegs::VSOutputAttributes::QUATERNION_X);
|
||||
ASSERT_POS(color, RasterizerRegs::VSOutputAttributes::COLOR_R);
|
||||
ASSERT_POS(tc0, RasterizerRegs::VSOutputAttributes::TEXCOORD0_U);
|
||||
ASSERT_POS(tc1, RasterizerRegs::VSOutputAttributes::TEXCOORD1_U);
|
||||
ASSERT_POS(tc0_w, RasterizerRegs::VSOutputAttributes::TEXCOORD0_W);
|
||||
ASSERT_POS(view, RasterizerRegs::VSOutputAttributes::VIEW_X);
|
||||
ASSERT_POS(tc2, RasterizerRegs::VSOutputAttributes::TEXCOORD2_U);
|
||||
#undef ASSERT_POS
|
||||
static_assert(std::is_pod<OutputVertex>::value, "Structure is not POD");
|
||||
static_assert(sizeof(OutputVertex) == 24 * sizeof(float), "OutputVertex has invalid size");
|
||||
|
||||
/**
|
||||
* This structure contains state information for primitive emitting in geometry shader.
|
||||
*/
|
||||
struct GSEmitter {
|
||||
std::array<std::array<Math::Vec4<float24>, 16>, 3> buffer;
|
||||
u8 vertex_id;
|
||||
bool prim_emit;
|
||||
bool winding;
|
||||
u32 output_mask;
|
||||
|
||||
// Function objects are hidden behind a raw pointer to make the structure standard layout type,
|
||||
// for JIT to use offsetof to access other members.
|
||||
struct Handlers {
|
||||
VertexHandler vertex_handler;
|
||||
WindingSetter winding_setter;
|
||||
} * handlers;
|
||||
|
||||
GSEmitter();
|
||||
~GSEmitter();
|
||||
void Emit(Math::Vec4<float24> (&vertex)[16]);
|
||||
};
|
||||
static_assert(std::is_standard_layout<GSEmitter>::value, "GSEmitter is not standard layout type");
|
||||
|
||||
/**
|
||||
* This structure contains the state information that needs to be unique for a shader unit. The 3DS
|
||||
* has four shader units that process shaders in parallel. At the present, Citra only implements a
|
||||
* single shader unit that processes all shaders serially. Putting the state information in a struct
|
||||
* here will make it easier for us to parallelize the shader processing later.
|
||||
*/
|
||||
struct UnitState {
|
||||
explicit UnitState(GSEmitter* emitter = nullptr);
|
||||
struct Registers {
|
||||
// The registers are accessed by the shader JIT using SSE instructions, and are therefore
|
||||
// required to be 16-byte aligned.
|
||||
alignas(16) Math::Vec4<float24> input[16];
|
||||
alignas(16) Math::Vec4<float24> temporary[16];
|
||||
alignas(16) Math::Vec4<float24> output[16];
|
||||
} registers;
|
||||
static_assert(std::is_pod<Registers>::value, "Structure is not POD");
|
||||
|
||||
bool conditional_code[2];
|
||||
|
||||
// Two Address registers and one loop counter
|
||||
// TODO: How many bits do these actually have?
|
||||
s32 address_registers[3];
|
||||
|
||||
GSEmitter* emitter_ptr;
|
||||
|
||||
static size_t InputOffset(const SourceRegister& reg) {
|
||||
switch (reg.GetRegisterType()) {
|
||||
case RegisterType::Input:
|
||||
return offsetof(UnitState, registers.input) +
|
||||
reg.GetIndex() * sizeof(Math::Vec4<float24>);
|
||||
|
||||
case RegisterType::Temporary:
|
||||
return offsetof(UnitState, registers.temporary) +
|
||||
reg.GetIndex() * sizeof(Math::Vec4<float24>);
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t OutputOffset(const DestRegister& reg) {
|
||||
switch (reg.GetRegisterType()) {
|
||||
case RegisterType::Output:
|
||||
return offsetof(UnitState, registers.output) +
|
||||
reg.GetIndex() * sizeof(Math::Vec4<float24>);
|
||||
|
||||
case RegisterType::Temporary:
|
||||
return offsetof(UnitState, registers.temporary) +
|
||||
reg.GetIndex() * sizeof(Math::Vec4<float24>);
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the unit state with an input vertex.
|
||||
*
|
||||
* @param config Shader configuration registers corresponding to the unit.
|
||||
* @param input Attribute buffer to load into the input registers.
|
||||
*/
|
||||
void LoadInput(const ShaderRegs& config, const AttributeBuffer& input);
|
||||
|
||||
void WriteOutput(const ShaderRegs& config, AttributeBuffer& output);
|
||||
};
|
||||
|
||||
/**
|
||||
* This is an extended shader unit state that represents the special unit that can run both vertex
|
||||
* shader and geometry shader. It contains an additional primitive emitter and utilities for
|
||||
* geometry shader.
|
||||
*/
|
||||
struct GSUnitState : public UnitState {
|
||||
GSUnitState();
|
||||
void SetVertexHandler(VertexHandler vertex_handler, WindingSetter winding_setter);
|
||||
void ConfigOutput(const ShaderRegs& config);
|
||||
|
||||
GSEmitter emitter;
|
||||
};
|
||||
|
||||
struct ShaderSetup {
|
||||
struct {
|
||||
// The float uniforms are accessed by the shader JIT using SSE instructions, and are
|
||||
// therefore required to be 16-byte aligned.
|
||||
alignas(16) Math::Vec4<float24> f[96];
|
||||
|
||||
std::array<bool, 16> b;
|
||||
std::array<Math::Vec4<u8>, 4> i;
|
||||
} uniforms;
|
||||
|
||||
static size_t GetFloatUniformOffset(unsigned index) {
|
||||
return offsetof(ShaderSetup, uniforms.f) + index * sizeof(Math::Vec4<float24>);
|
||||
}
|
||||
|
||||
static size_t GetBoolUniformOffset(unsigned index) {
|
||||
return offsetof(ShaderSetup, uniforms.b) + index * sizeof(bool);
|
||||
}
|
||||
|
||||
static size_t GetIntUniformOffset(unsigned index) {
|
||||
return offsetof(ShaderSetup, uniforms.i) + index * sizeof(Math::Vec4<u8>);
|
||||
}
|
||||
|
||||
std::array<u32, MAX_PROGRAM_CODE_LENGTH> program_code;
|
||||
std::array<u32, MAX_SWIZZLE_DATA_LENGTH> swizzle_data;
|
||||
|
||||
/// Data private to ShaderEngines
|
||||
struct EngineData {
|
||||
unsigned int entry_point;
|
||||
/// Used by the JIT, points to a compiled shader object.
|
||||
const void* cached_shader = nullptr;
|
||||
} engine_data;
|
||||
};
|
||||
|
||||
class ShaderEngine {
|
||||
public:
|
||||
virtual ~ShaderEngine() = default;
|
||||
|
||||
/**
|
||||
* Performs any shader unit setup that only needs to happen once per shader (as opposed to once
|
||||
* per vertex, which would happen within the `Run` function).
|
||||
*/
|
||||
virtual void SetupBatch(ShaderSetup& setup, unsigned int entry_point) = 0;
|
||||
|
||||
/**
|
||||
* Runs the currently setup shader.
|
||||
*
|
||||
* @param setup Shader engine state, must be setup with SetupBatch on each shader change.
|
||||
* @param state Shader unit state, must be setup with input data before each shader invocation.
|
||||
*/
|
||||
virtual void Run(const ShaderSetup& setup, UnitState& state) const = 0;
|
||||
};
|
||||
|
||||
// TODO(yuriks): Remove and make it non-global state somewhere
|
||||
ShaderEngine* GetEngine();
|
||||
void Shutdown();
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
|
@ -1,701 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
#include <boost/range/algorithm/fill.hpp>
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_interpreter.h"
|
||||
|
||||
using nihstro::OpCode;
|
||||
using nihstro::Instruction;
|
||||
using nihstro::RegisterType;
|
||||
using nihstro::SourceRegister;
|
||||
using nihstro::SwizzlePattern;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
struct CallStackElement {
|
||||
u32 final_address; // Address upon which we jump to return_address
|
||||
u32 return_address; // Where to jump when leaving scope
|
||||
u8 repeat_counter; // How often to repeat until this call stack element is removed
|
||||
u8 loop_increment; // Which value to add to the loop counter after an iteration
|
||||
// TODO: Should this be a signed value? Does it even matter?
|
||||
u32 loop_address; // The address where we'll return to after each loop iteration
|
||||
};
|
||||
|
||||
template <bool Debug>
|
||||
static void RunInterpreter(const ShaderSetup& setup, UnitState& state, DebugData<Debug>& debug_data,
|
||||
unsigned offset) {
|
||||
// TODO: Is there a maximal size for this?
|
||||
boost::container::static_vector<CallStackElement, 16> call_stack;
|
||||
u32 program_counter = offset;
|
||||
|
||||
state.conditional_code[0] = false;
|
||||
state.conditional_code[1] = false;
|
||||
|
||||
auto call = [&program_counter, &call_stack](u32 offset, u32 num_instructions, u32 return_offset,
|
||||
u8 repeat_count, u8 loop_increment) {
|
||||
// -1 to make sure when incrementing the PC we end up at the correct offset
|
||||
program_counter = offset - 1;
|
||||
ASSERT(call_stack.size() < call_stack.capacity());
|
||||
call_stack.push_back(
|
||||
{offset + num_instructions, return_offset, repeat_count, loop_increment, offset});
|
||||
};
|
||||
|
||||
auto evaluate_condition = [&state](Instruction::FlowControlType flow_control) {
|
||||
using Op = Instruction::FlowControlType::Op;
|
||||
|
||||
bool result_x = flow_control.refx.Value() == state.conditional_code[0];
|
||||
bool result_y = flow_control.refy.Value() == state.conditional_code[1];
|
||||
|
||||
switch (flow_control.op) {
|
||||
case Op::Or:
|
||||
return result_x || result_y;
|
||||
case Op::And:
|
||||
return result_x && result_y;
|
||||
case Op::JustX:
|
||||
return result_x;
|
||||
case Op::JustY:
|
||||
return result_y;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
const auto& uniforms = setup.uniforms;
|
||||
const auto& swizzle_data = setup.swizzle_data;
|
||||
const auto& program_code = setup.program_code;
|
||||
|
||||
// Placeholder for invalid inputs
|
||||
static float24 dummy_vec4_float24[4];
|
||||
|
||||
unsigned iteration = 0;
|
||||
bool exit_loop = false;
|
||||
while (!exit_loop) {
|
||||
if (!call_stack.empty()) {
|
||||
auto& top = call_stack.back();
|
||||
if (program_counter == top.final_address) {
|
||||
state.address_registers[2] += top.loop_increment;
|
||||
|
||||
if (top.repeat_counter-- == 0) {
|
||||
program_counter = top.return_address;
|
||||
call_stack.pop_back();
|
||||
} else {
|
||||
program_counter = top.loop_address;
|
||||
}
|
||||
|
||||
// TODO: Is "trying again" accurate to hardware?
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction instr = {program_code[program_counter]};
|
||||
const SwizzlePattern swizzle = {swizzle_data[instr.common.operand_desc_id]};
|
||||
|
||||
Record<DebugDataRecord::CUR_INSTR>(debug_data, iteration, program_counter);
|
||||
if (iteration > 0)
|
||||
Record<DebugDataRecord::NEXT_INSTR>(debug_data, iteration - 1, program_counter);
|
||||
|
||||
debug_data.max_offset = std::max<u32>(debug_data.max_offset, 1 + program_counter);
|
||||
|
||||
auto LookupSourceRegister = [&](const SourceRegister& source_reg) -> const float24* {
|
||||
switch (source_reg.GetRegisterType()) {
|
||||
case RegisterType::Input:
|
||||
return &state.registers.input[source_reg.GetIndex()].x;
|
||||
|
||||
case RegisterType::Temporary:
|
||||
return &state.registers.temporary[source_reg.GetIndex()].x;
|
||||
|
||||
case RegisterType::FloatUniform:
|
||||
return &uniforms.f[source_reg.GetIndex()].x;
|
||||
|
||||
default:
|
||||
return dummy_vec4_float24;
|
||||
}
|
||||
};
|
||||
|
||||
switch (instr.opcode.Value().GetInfo().type) {
|
||||
case OpCode::Type::Arithmetic: {
|
||||
const bool is_inverted =
|
||||
(0 != (instr.opcode.Value().GetInfo().subtype & OpCode::Info::SrcInversed));
|
||||
|
||||
const int address_offset =
|
||||
(instr.common.address_register_index == 0)
|
||||
? 0
|
||||
: state.address_registers[instr.common.address_register_index - 1];
|
||||
|
||||
const float24* src1_ = LookupSourceRegister(instr.common.GetSrc1(is_inverted) +
|
||||
(is_inverted ? 0 : address_offset));
|
||||
const float24* src2_ = LookupSourceRegister(instr.common.GetSrc2(is_inverted) +
|
||||
(is_inverted ? address_offset : 0));
|
||||
|
||||
const bool negate_src1 = ((bool)swizzle.negate_src1 != false);
|
||||
const bool negate_src2 = ((bool)swizzle.negate_src2 != false);
|
||||
|
||||
float24 src1[4] = {
|
||||
src1_[(int)swizzle.src1_selector_0.Value()],
|
||||
src1_[(int)swizzle.src1_selector_1.Value()],
|
||||
src1_[(int)swizzle.src1_selector_2.Value()],
|
||||
src1_[(int)swizzle.src1_selector_3.Value()],
|
||||
};
|
||||
if (negate_src1) {
|
||||
src1[0] = -src1[0];
|
||||
src1[1] = -src1[1];
|
||||
src1[2] = -src1[2];
|
||||
src1[3] = -src1[3];
|
||||
}
|
||||
float24 src2[4] = {
|
||||
src2_[(int)swizzle.src2_selector_0.Value()],
|
||||
src2_[(int)swizzle.src2_selector_1.Value()],
|
||||
src2_[(int)swizzle.src2_selector_2.Value()],
|
||||
src2_[(int)swizzle.src2_selector_3.Value()],
|
||||
};
|
||||
if (negate_src2) {
|
||||
src2[0] = -src2[0];
|
||||
src2[1] = -src2[1];
|
||||
src2[2] = -src2[2];
|
||||
src2[3] = -src2[3];
|
||||
}
|
||||
|
||||
float24* dest =
|
||||
(instr.common.dest.Value() < 0x10)
|
||||
? &state.registers.output[instr.common.dest.Value().GetIndex()][0]
|
||||
: (instr.common.dest.Value() < 0x20)
|
||||
? &state.registers.temporary[instr.common.dest.Value().GetIndex()][0]
|
||||
: dummy_vec4_float24;
|
||||
|
||||
debug_data.max_opdesc_id =
|
||||
std::max<u32>(debug_data.max_opdesc_id, 1 + instr.common.operand_desc_id);
|
||||
|
||||
switch (instr.opcode.Value().EffectiveOpCode()) {
|
||||
case OpCode::Id::ADD: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = src1[i] + src2[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::MUL: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = src1[i] * src2[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::FLR:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = float24::FromFloat32(std::floor(src1[i].ToFloat32()));
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
|
||||
case OpCode::Id::MAX:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
// NOTE: Exact form required to match NaN semantics to hardware:
|
||||
// max(0, NaN) -> NaN
|
||||
// max(NaN, 0) -> 0
|
||||
dest[i] = (src1[i] > src2[i]) ? src1[i] : src2[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
|
||||
case OpCode::Id::MIN:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
// NOTE: Exact form required to match NaN semantics to hardware:
|
||||
// min(0, NaN) -> NaN
|
||||
// min(NaN, 0) -> 0
|
||||
dest[i] = (src1[i] < src2[i]) ? src1[i] : src2[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
|
||||
case OpCode::Id::DP3:
|
||||
case OpCode::Id::DP4:
|
||||
case OpCode::Id::DPH:
|
||||
case OpCode::Id::DPHI: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
|
||||
OpCode::Id opcode = instr.opcode.Value().EffectiveOpCode();
|
||||
if (opcode == OpCode::Id::DPH || opcode == OpCode::Id::DPHI)
|
||||
src1[3] = float24::FromFloat32(1.0f);
|
||||
|
||||
int num_components = (opcode == OpCode::Id::DP3) ? 3 : 4;
|
||||
float24 dot = std::inner_product(src1, src1 + num_components, src2,
|
||||
float24::FromFloat32(0.f));
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = dot;
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
// Reciprocal
|
||||
case OpCode::Id::RCP: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
float24 rcp_res = float24::FromFloat32(1.0f / src1[0].ToFloat32());
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = rcp_res;
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
// Reciprocal Square Root
|
||||
case OpCode::Id::RSQ: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
float24 rsq_res = float24::FromFloat32(1.0f / std::sqrt(src1[0].ToFloat32()));
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = rsq_res;
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::MOVA: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
// TODO: Figure out how the rounding is done on hardware
|
||||
state.address_registers[i] = static_cast<s32>(src1[i].ToFloat32());
|
||||
}
|
||||
Record<DebugDataRecord::ADDR_REG_OUT>(debug_data, iteration,
|
||||
state.address_registers);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::MOV: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = src1[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::SGE:
|
||||
case OpCode::Id::SGEI:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = (src1[i] >= src2[i]) ? float24::FromFloat32(1.0f)
|
||||
: float24::FromFloat32(0.0f);
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
|
||||
case OpCode::Id::SLT:
|
||||
case OpCode::Id::SLTI:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = (src1[i] < src2[i]) ? float24::FromFloat32(1.0f)
|
||||
: float24::FromFloat32(0.0f);
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
|
||||
case OpCode::Id::CMP:
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
// TODO: Can you restrict to one compare via dest masking?
|
||||
|
||||
auto compare_op = instr.common.compare_op;
|
||||
auto op = (i == 0) ? compare_op.x.Value() : compare_op.y.Value();
|
||||
|
||||
switch (op) {
|
||||
case Instruction::Common::CompareOpType::Equal:
|
||||
state.conditional_code[i] = (src1[i] == src2[i]);
|
||||
break;
|
||||
|
||||
case Instruction::Common::CompareOpType::NotEqual:
|
||||
state.conditional_code[i] = (src1[i] != src2[i]);
|
||||
break;
|
||||
|
||||
case Instruction::Common::CompareOpType::LessThan:
|
||||
state.conditional_code[i] = (src1[i] < src2[i]);
|
||||
break;
|
||||
|
||||
case Instruction::Common::CompareOpType::LessEqual:
|
||||
state.conditional_code[i] = (src1[i] <= src2[i]);
|
||||
break;
|
||||
|
||||
case Instruction::Common::CompareOpType::GreaterThan:
|
||||
state.conditional_code[i] = (src1[i] > src2[i]);
|
||||
break;
|
||||
|
||||
case Instruction::Common::CompareOpType::GreaterEqual:
|
||||
state.conditional_code[i] = (src1[i] >= src2[i]);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown compare mode %x", static_cast<int>(op));
|
||||
break;
|
||||
}
|
||||
}
|
||||
Record<DebugDataRecord::CMP_RESULT>(debug_data, iteration, state.conditional_code);
|
||||
break;
|
||||
|
||||
case OpCode::Id::EX2: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
|
||||
// EX2 only takes first component exp2 and writes it to all dest components
|
||||
float24 ex2_res = float24::FromFloat32(std::exp2(src1[0].ToFloat32()));
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = ex2_res;
|
||||
}
|
||||
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::LG2: {
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
|
||||
// LG2 only takes the first component log2 and writes it to all dest components
|
||||
float24 lg2_res = float24::FromFloat32(std::log2(src1[0].ToFloat32()));
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = lg2_res;
|
||||
}
|
||||
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unhandled arithmetic instruction: 0x%02x (%s): 0x%08x",
|
||||
(int)instr.opcode.Value().EffectiveOpCode(),
|
||||
instr.opcode.Value().GetInfo().name, instr.hex);
|
||||
DEBUG_ASSERT(false);
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Type::MultiplyAdd: {
|
||||
if ((instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD) ||
|
||||
(instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI)) {
|
||||
const SwizzlePattern& swizzle = *reinterpret_cast<const SwizzlePattern*>(
|
||||
&swizzle_data[instr.mad.operand_desc_id]);
|
||||
|
||||
bool is_inverted = (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI);
|
||||
|
||||
const int address_offset =
|
||||
(instr.mad.address_register_index == 0)
|
||||
? 0
|
||||
: state.address_registers[instr.mad.address_register_index - 1];
|
||||
|
||||
const float24* src1_ = LookupSourceRegister(instr.mad.GetSrc1(is_inverted));
|
||||
const float24* src2_ = LookupSourceRegister(instr.mad.GetSrc2(is_inverted) +
|
||||
(!is_inverted * address_offset));
|
||||
const float24* src3_ = LookupSourceRegister(instr.mad.GetSrc3(is_inverted) +
|
||||
(is_inverted * address_offset));
|
||||
|
||||
const bool negate_src1 = ((bool)swizzle.negate_src1 != false);
|
||||
const bool negate_src2 = ((bool)swizzle.negate_src2 != false);
|
||||
const bool negate_src3 = ((bool)swizzle.negate_src3 != false);
|
||||
|
||||
float24 src1[4] = {
|
||||
src1_[(int)swizzle.src1_selector_0.Value()],
|
||||
src1_[(int)swizzle.src1_selector_1.Value()],
|
||||
src1_[(int)swizzle.src1_selector_2.Value()],
|
||||
src1_[(int)swizzle.src1_selector_3.Value()],
|
||||
};
|
||||
if (negate_src1) {
|
||||
src1[0] = -src1[0];
|
||||
src1[1] = -src1[1];
|
||||
src1[2] = -src1[2];
|
||||
src1[3] = -src1[3];
|
||||
}
|
||||
float24 src2[4] = {
|
||||
src2_[(int)swizzle.src2_selector_0.Value()],
|
||||
src2_[(int)swizzle.src2_selector_1.Value()],
|
||||
src2_[(int)swizzle.src2_selector_2.Value()],
|
||||
src2_[(int)swizzle.src2_selector_3.Value()],
|
||||
};
|
||||
if (negate_src2) {
|
||||
src2[0] = -src2[0];
|
||||
src2[1] = -src2[1];
|
||||
src2[2] = -src2[2];
|
||||
src2[3] = -src2[3];
|
||||
}
|
||||
float24 src3[4] = {
|
||||
src3_[(int)swizzle.src3_selector_0.Value()],
|
||||
src3_[(int)swizzle.src3_selector_1.Value()],
|
||||
src3_[(int)swizzle.src3_selector_2.Value()],
|
||||
src3_[(int)swizzle.src3_selector_3.Value()],
|
||||
};
|
||||
if (negate_src3) {
|
||||
src3[0] = -src3[0];
|
||||
src3[1] = -src3[1];
|
||||
src3[2] = -src3[2];
|
||||
src3[3] = -src3[3];
|
||||
}
|
||||
|
||||
float24* dest =
|
||||
(instr.mad.dest.Value() < 0x10)
|
||||
? &state.registers.output[instr.mad.dest.Value().GetIndex()][0]
|
||||
: (instr.mad.dest.Value() < 0x20)
|
||||
? &state.registers.temporary[instr.mad.dest.Value().GetIndex()][0]
|
||||
: dummy_vec4_float24;
|
||||
|
||||
Record<DebugDataRecord::SRC1>(debug_data, iteration, src1);
|
||||
Record<DebugDataRecord::SRC2>(debug_data, iteration, src2);
|
||||
Record<DebugDataRecord::SRC3>(debug_data, iteration, src3);
|
||||
Record<DebugDataRecord::DEST_IN>(debug_data, iteration, dest);
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if (!swizzle.DestComponentEnabled(i))
|
||||
continue;
|
||||
|
||||
dest[i] = src1[i] * src2[i] + src3[i];
|
||||
}
|
||||
Record<DebugDataRecord::DEST_OUT>(debug_data, iteration, dest);
|
||||
} else {
|
||||
LOG_ERROR(HW_GPU, "Unhandled multiply-add instruction: 0x%02x (%s): 0x%08x",
|
||||
(int)instr.opcode.Value().EffectiveOpCode(),
|
||||
instr.opcode.Value().GetInfo().name, instr.hex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
// Handle each instruction on its own
|
||||
switch (instr.opcode.Value()) {
|
||||
case OpCode::Id::END:
|
||||
exit_loop = true;
|
||||
break;
|
||||
|
||||
case OpCode::Id::JMPC:
|
||||
Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
|
||||
if (evaluate_condition(instr.flow_control)) {
|
||||
program_counter = instr.flow_control.dest_offset - 1;
|
||||
}
|
||||
break;
|
||||
|
||||
case OpCode::Id::JMPU:
|
||||
Record<DebugDataRecord::COND_BOOL_IN>(
|
||||
debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
|
||||
|
||||
if (uniforms.b[instr.flow_control.bool_uniform_id] ==
|
||||
!(instr.flow_control.num_instructions & 1)) {
|
||||
program_counter = instr.flow_control.dest_offset - 1;
|
||||
}
|
||||
break;
|
||||
|
||||
case OpCode::Id::CALL:
|
||||
call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
|
||||
program_counter + 1, 0, 0);
|
||||
break;
|
||||
|
||||
case OpCode::Id::CALLU:
|
||||
Record<DebugDataRecord::COND_BOOL_IN>(
|
||||
debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
|
||||
if (uniforms.b[instr.flow_control.bool_uniform_id]) {
|
||||
call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
|
||||
program_counter + 1, 0, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
case OpCode::Id::CALLC:
|
||||
Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
|
||||
if (evaluate_condition(instr.flow_control)) {
|
||||
call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
|
||||
program_counter + 1, 0, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
case OpCode::Id::NOP:
|
||||
break;
|
||||
|
||||
case OpCode::Id::IFU:
|
||||
Record<DebugDataRecord::COND_BOOL_IN>(
|
||||
debug_data, iteration, uniforms.b[instr.flow_control.bool_uniform_id]);
|
||||
if (uniforms.b[instr.flow_control.bool_uniform_id]) {
|
||||
call(program_counter + 1, instr.flow_control.dest_offset - program_counter - 1,
|
||||
instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
|
||||
0);
|
||||
} else {
|
||||
call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
|
||||
instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
|
||||
0);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case OpCode::Id::IFC: {
|
||||
// TODO: Do we need to consider swizzlers here?
|
||||
|
||||
Record<DebugDataRecord::COND_CMP_IN>(debug_data, iteration, state.conditional_code);
|
||||
if (evaluate_condition(instr.flow_control)) {
|
||||
call(program_counter + 1, instr.flow_control.dest_offset - program_counter - 1,
|
||||
instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
|
||||
0);
|
||||
} else {
|
||||
call(instr.flow_control.dest_offset, instr.flow_control.num_instructions,
|
||||
instr.flow_control.dest_offset + instr.flow_control.num_instructions, 0,
|
||||
0);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::LOOP: {
|
||||
Math::Vec4<u8> loop_param(uniforms.i[instr.flow_control.int_uniform_id].x,
|
||||
uniforms.i[instr.flow_control.int_uniform_id].y,
|
||||
uniforms.i[instr.flow_control.int_uniform_id].z,
|
||||
uniforms.i[instr.flow_control.int_uniform_id].w);
|
||||
state.address_registers[2] = loop_param.y;
|
||||
|
||||
Record<DebugDataRecord::LOOP_INT_IN>(debug_data, iteration, loop_param);
|
||||
call(program_counter + 1, instr.flow_control.dest_offset - program_counter,
|
||||
instr.flow_control.dest_offset + 1, loop_param.x, loop_param.z);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::EMIT: {
|
||||
GSEmitter* emitter = state.emitter_ptr;
|
||||
ASSERT_MSG(emitter, "Execute EMIT on VS");
|
||||
emitter->Emit(state.registers.output);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpCode::Id::SETEMIT: {
|
||||
GSEmitter* emitter = state.emitter_ptr;
|
||||
ASSERT_MSG(emitter, "Execute SETEMIT on VS");
|
||||
emitter->vertex_id = instr.setemit.vertex_id;
|
||||
emitter->prim_emit = instr.setemit.prim_emit != 0;
|
||||
emitter->winding = instr.setemit.winding != 0;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unhandled instruction: 0x%02x (%s): 0x%08x",
|
||||
(int)instr.opcode.Value().EffectiveOpCode(),
|
||||
instr.opcode.Value().GetInfo().name, instr.hex);
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
++program_counter;
|
||||
++iteration;
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterEngine::SetupBatch(ShaderSetup& setup, unsigned int entry_point) {
|
||||
ASSERT(entry_point < MAX_PROGRAM_CODE_LENGTH);
|
||||
setup.engine_data.entry_point = entry_point;
|
||||
}
|
||||
|
||||
MICROPROFILE_DECLARE(GPU_Shader);
|
||||
|
||||
void InterpreterEngine::Run(const ShaderSetup& setup, UnitState& state) const {
|
||||
|
||||
MICROPROFILE_SCOPE(GPU_Shader);
|
||||
|
||||
DebugData<false> dummy_debug_data;
|
||||
RunInterpreter(setup, state, dummy_debug_data, setup.engine_data.entry_point);
|
||||
}
|
||||
|
||||
DebugData<true> InterpreterEngine::ProduceDebugInfo(const ShaderSetup& setup,
|
||||
const AttributeBuffer& input,
|
||||
const ShaderRegs& config) const {
|
||||
UnitState state;
|
||||
DebugData<true> debug_data;
|
||||
|
||||
// Setup input register table
|
||||
boost::fill(state.registers.input, Math::Vec4<float24>::AssignToAll(float24::Zero()));
|
||||
state.LoadInput(config, input);
|
||||
RunInterpreter(setup, state, debug_data, setup.engine_data.entry_point);
|
||||
return debug_data;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "video_core/shader/debug_data.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
class InterpreterEngine final : public ShaderEngine {
|
||||
public:
|
||||
void SetupBatch(ShaderSetup& setup, unsigned int entry_point) override;
|
||||
void Run(const ShaderSetup& setup, UnitState& state) const override;
|
||||
|
||||
/**
|
||||
* Produce debug information based on the given shader and input vertex
|
||||
* @param setup Shader engine state
|
||||
* @param input Input vertex into the shader
|
||||
* @param config Configuration object for the shader pipeline
|
||||
* @return Debug information for this shader with regards to the given vertex
|
||||
*/
|
||||
DebugData<true> ProduceDebugInfo(const ShaderSetup& setup, const AttributeBuffer& input,
|
||||
const ShaderRegs& config) const;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,48 +0,0 @@
|
|||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/hash.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_jit_x64.h"
|
||||
#include "video_core/shader/shader_jit_x64_compiler.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Shader {
|
||||
|
||||
JitX64Engine::JitX64Engine() = default;
|
||||
JitX64Engine::~JitX64Engine() = default;
|
||||
|
||||
void JitX64Engine::SetupBatch(ShaderSetup& setup, unsigned int entry_point) {
|
||||
ASSERT(entry_point < MAX_PROGRAM_CODE_LENGTH);
|
||||
setup.engine_data.entry_point = entry_point;
|
||||
|
||||
u64 code_hash = Common::ComputeHash64(&setup.program_code, sizeof(setup.program_code));
|
||||
u64 swizzle_hash = Common::ComputeHash64(&setup.swizzle_data, sizeof(setup.swizzle_data));
|
||||
|
||||
u64 cache_key = code_hash ^ swizzle_hash;
|
||||
auto iter = cache.find(cache_key);
|
||||
if (iter != cache.end()) {
|
||||
setup.engine_data.cached_shader = iter->second.get();
|
||||
} else {
|
||||
auto shader = std::make_unique<JitShader>();
|
||||
shader->Compile(&setup.program_code, &setup.swizzle_data);
|
||||
setup.engine_data.cached_shader = shader.get();
|
||||
cache.emplace_hint(iter, cache_key, std::move(shader));
|
||||
}
|
||||
}
|
||||
|
||||
MICROPROFILE_DECLARE(GPU_Shader);
|
||||
|
||||
void JitX64Engine::Run(const ShaderSetup& setup, UnitState& state) const {
|
||||
ASSERT(setup.engine_data.cached_shader != nullptr);
|
||||
|
||||
MICROPROFILE_SCOPE(GPU_Shader);
|
||||
|
||||
const JitShader* shader = static_cast<const JitShader*>(setup.engine_data.cached_shader);
|
||||
shader->Run(setup, state, setup.engine_data.entry_point);
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
} // namespace Pica
|
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Shader {
|
||||
|
||||
class JitShader;
|
||||
|
||||
class JitX64Engine final : public ShaderEngine {
|
||||
public:
|
||||
JitX64Engine();
|
||||
~JitX64Engine() override;
|
||||
|
||||
void SetupBatch(ShaderSetup& setup, unsigned int entry_point) override;
|
||||
void Run(const ShaderSetup& setup, UnitState& state) const override;
|
||||
|
||||
private:
|
||||
std::unordered_map<u64, std::unique_ptr<JitShader>> cache;
|
||||
};
|
||||
|
||||
} // namespace Shader
|
||||
} // namespace Pica
|
|
@ -1,942 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
#include <smmintrin.h>
|
||||
#include <xmmintrin.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "common/x64/cpu_detect.h"
|
||||
#include "common/x64/xbyak_abi.h"
|
||||
#include "common/x64/xbyak_util.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_jit_x64_compiler.h"
|
||||
|
||||
using namespace Common::X64;
|
||||
using namespace Xbyak::util;
|
||||
using Xbyak::Label;
|
||||
using Xbyak::Reg32;
|
||||
using Xbyak::Reg64;
|
||||
using Xbyak::Xmm;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
typedef void (JitShader::*JitFunction)(Instruction instr);
|
||||
|
||||
const JitFunction instr_table[64] = {
|
||||
&JitShader::Compile_ADD, // add
|
||||
&JitShader::Compile_DP3, // dp3
|
||||
&JitShader::Compile_DP4, // dp4
|
||||
&JitShader::Compile_DPH, // dph
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_EX2, // ex2
|
||||
&JitShader::Compile_LG2, // lg2
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_MUL, // mul
|
||||
&JitShader::Compile_SGE, // sge
|
||||
&JitShader::Compile_SLT, // slt
|
||||
&JitShader::Compile_FLR, // flr
|
||||
&JitShader::Compile_MAX, // max
|
||||
&JitShader::Compile_MIN, // min
|
||||
&JitShader::Compile_RCP, // rcp
|
||||
&JitShader::Compile_RSQ, // rsq
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_MOVA, // mova
|
||||
&JitShader::Compile_MOV, // mov
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_DPH, // dphi
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_SGE, // sgei
|
||||
&JitShader::Compile_SLT, // slti
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
nullptr, // unknown
|
||||
&JitShader::Compile_NOP, // nop
|
||||
&JitShader::Compile_END, // end
|
||||
nullptr, // break
|
||||
&JitShader::Compile_CALL, // call
|
||||
&JitShader::Compile_CALLC, // callc
|
||||
&JitShader::Compile_CALLU, // callu
|
||||
&JitShader::Compile_IF, // ifu
|
||||
&JitShader::Compile_IF, // ifc
|
||||
&JitShader::Compile_LOOP, // loop
|
||||
&JitShader::Compile_EMIT, // emit
|
||||
&JitShader::Compile_SETE, // sete
|
||||
&JitShader::Compile_JMP, // jmpc
|
||||
&JitShader::Compile_JMP, // jmpu
|
||||
&JitShader::Compile_CMP, // cmp
|
||||
&JitShader::Compile_CMP, // cmp
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // madi
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
&JitShader::Compile_MAD, // mad
|
||||
};
|
||||
|
||||
// The following is used to alias some commonly used registers. Generally, RAX-RDX and XMM0-XMM3 can
|
||||
// be used as scratch registers within a compiler function. The other registers have designated
|
||||
// purposes, as documented below:
|
||||
|
||||
/// Pointer to the uniform memory
|
||||
static const Reg64 SETUP = r9;
|
||||
/// The two 32-bit VS address offset registers set by the MOVA instruction
|
||||
static const Reg64 ADDROFFS_REG_0 = r10;
|
||||
static const Reg64 ADDROFFS_REG_1 = r11;
|
||||
/// VS loop count register (Multiplied by 16)
|
||||
static const Reg32 LOOPCOUNT_REG = r12d;
|
||||
/// Current VS loop iteration number (we could probably use LOOPCOUNT_REG, but this quicker)
|
||||
static const Reg32 LOOPCOUNT = esi;
|
||||
/// Number to increment LOOPCOUNT_REG by on each loop iteration (Multiplied by 16)
|
||||
static const Reg32 LOOPINC = edi;
|
||||
/// Result of the previous CMP instruction for the X-component comparison
|
||||
static const Reg64 COND0 = r13;
|
||||
/// Result of the previous CMP instruction for the Y-component comparison
|
||||
static const Reg64 COND1 = r14;
|
||||
/// Pointer to the UnitState instance for the current VS unit
|
||||
static const Reg64 STATE = r15;
|
||||
/// SIMD scratch register
|
||||
static const Xmm SCRATCH = xmm0;
|
||||
/// Loaded with the first swizzled source register, otherwise can be used as a scratch register
|
||||
static const Xmm SRC1 = xmm1;
|
||||
/// Loaded with the second swizzled source register, otherwise can be used as a scratch register
|
||||
static const Xmm SRC2 = xmm2;
|
||||
/// Loaded with the third swizzled source register, otherwise can be used as a scratch register
|
||||
static const Xmm SRC3 = xmm3;
|
||||
/// Additional scratch register
|
||||
static const Xmm SCRATCH2 = xmm4;
|
||||
/// Constant vector of [1.0f, 1.0f, 1.0f, 1.0f], used to efficiently set a vector to one
|
||||
static const Xmm ONE = xmm14;
|
||||
/// Constant vector of [-0.f, -0.f, -0.f, -0.f], used to efficiently negate a vector with XOR
|
||||
static const Xmm NEGBIT = xmm15;
|
||||
|
||||
// State registers that must not be modified by external functions calls
|
||||
// Scratch registers, e.g., SRC1 and SCRATCH, have to be saved on the side if needed
|
||||
static const BitSet32 persistent_regs = BuildRegSet({
|
||||
// Pointers to register blocks
|
||||
SETUP, STATE,
|
||||
// Cached registers
|
||||
ADDROFFS_REG_0, ADDROFFS_REG_1, LOOPCOUNT_REG, COND0, COND1,
|
||||
// Constants
|
||||
ONE, NEGBIT,
|
||||
// Loop variables
|
||||
LOOPCOUNT, LOOPINC,
|
||||
});
|
||||
|
||||
/// Raw constant for the source register selector that indicates no swizzling is performed
|
||||
static const u8 NO_SRC_REG_SWIZZLE = 0x1b;
|
||||
/// Raw constant for the destination register enable mask that indicates all components are enabled
|
||||
static const u8 NO_DEST_REG_MASK = 0xf;
|
||||
|
||||
static void LogCritical(const char* msg) {
|
||||
LOG_CRITICAL(HW_GPU, "%s", msg);
|
||||
}
|
||||
|
||||
void JitShader::Compile_Assert(bool condition, const char* msg) {
|
||||
if (!condition) {
|
||||
mov(ABI_PARAM1, reinterpret_cast<size_t>(msg));
|
||||
CallFarFunction(*this, LogCritical);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads and swizzles a source register into the specified XMM register.
|
||||
* @param instr VS instruction, used for determining how to load the source register
|
||||
* @param src_num Number indicating which source register to load (1 = src1, 2 = src2, 3 = src3)
|
||||
* @param src_reg SourceRegister object corresponding to the source register to load
|
||||
* @param dest Destination XMM register to store the loaded, swizzled source register
|
||||
*/
|
||||
void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
|
||||
Xmm dest) {
|
||||
Reg64 src_ptr;
|
||||
size_t src_offset;
|
||||
|
||||
if (src_reg.GetRegisterType() == RegisterType::FloatUniform) {
|
||||
src_ptr = SETUP;
|
||||
src_offset = ShaderSetup::GetFloatUniformOffset(src_reg.GetIndex());
|
||||
} else {
|
||||
src_ptr = STATE;
|
||||
src_offset = UnitState::InputOffset(src_reg);
|
||||
}
|
||||
|
||||
int src_offset_disp = (int)src_offset;
|
||||
ASSERT_MSG(src_offset == src_offset_disp, "Source register offset too large for int type");
|
||||
|
||||
unsigned operand_desc_id;
|
||||
|
||||
const bool is_inverted =
|
||||
(0 != (instr.opcode.Value().GetInfo().subtype & OpCode::Info::SrcInversed));
|
||||
|
||||
unsigned address_register_index;
|
||||
unsigned offset_src;
|
||||
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
|
||||
instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
||||
operand_desc_id = instr.mad.operand_desc_id;
|
||||
offset_src = is_inverted ? 3 : 2;
|
||||
address_register_index = instr.mad.address_register_index;
|
||||
} else {
|
||||
operand_desc_id = instr.common.operand_desc_id;
|
||||
offset_src = is_inverted ? 2 : 1;
|
||||
address_register_index = instr.common.address_register_index;
|
||||
}
|
||||
|
||||
if (src_num == offset_src && address_register_index != 0) {
|
||||
switch (address_register_index) {
|
||||
case 1: // address offset 1
|
||||
movaps(dest, xword[src_ptr + ADDROFFS_REG_0 + src_offset_disp]);
|
||||
break;
|
||||
case 2: // address offset 2
|
||||
movaps(dest, xword[src_ptr + ADDROFFS_REG_1 + src_offset_disp]);
|
||||
break;
|
||||
case 3: // address offset 3
|
||||
movaps(dest, xword[src_ptr + LOOPCOUNT_REG.cvt64() + src_offset_disp]);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Load the source
|
||||
movaps(dest, xword[src_ptr + src_offset_disp]);
|
||||
}
|
||||
|
||||
SwizzlePattern swiz = {(*swizzle_data)[operand_desc_id]};
|
||||
|
||||
// Generate instructions for source register swizzling as needed
|
||||
u8 sel = swiz.GetRawSelector(src_num);
|
||||
if (sel != NO_SRC_REG_SWIZZLE) {
|
||||
// Selector component order needs to be reversed for the SHUFPS instruction
|
||||
sel = ((sel & 0xc0) >> 6) | ((sel & 3) << 6) | ((sel & 0xc) << 2) | ((sel & 0x30) >> 2);
|
||||
|
||||
// Shuffle inputs for swizzle
|
||||
shufps(dest, dest, sel);
|
||||
}
|
||||
|
||||
// If the source register should be negated, flip the negative bit using XOR
|
||||
const bool negate[] = {swiz.negate_src1, swiz.negate_src2, swiz.negate_src3};
|
||||
if (negate[src_num - 1]) {
|
||||
xorps(dest, NEGBIT);
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::Compile_DestEnable(Instruction instr, Xmm src) {
|
||||
DestRegister dest;
|
||||
unsigned operand_desc_id;
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MAD ||
|
||||
instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
||||
operand_desc_id = instr.mad.operand_desc_id;
|
||||
dest = instr.mad.dest.Value();
|
||||
} else {
|
||||
operand_desc_id = instr.common.operand_desc_id;
|
||||
dest = instr.common.dest.Value();
|
||||
}
|
||||
|
||||
SwizzlePattern swiz = {(*swizzle_data)[operand_desc_id]};
|
||||
|
||||
size_t dest_offset_disp = UnitState::OutputOffset(dest);
|
||||
|
||||
// If all components are enabled, write the result to the destination register
|
||||
if (swiz.dest_mask == NO_DEST_REG_MASK) {
|
||||
// Store dest back to memory
|
||||
movaps(xword[STATE + dest_offset_disp], src);
|
||||
|
||||
} else {
|
||||
// Not all components are enabled, so mask the result when storing to the destination
|
||||
// register...
|
||||
movaps(SCRATCH, xword[STATE + dest_offset_disp]);
|
||||
|
||||
if (Common::GetCPUCaps().sse4_1) {
|
||||
u8 mask = ((swiz.dest_mask & 1) << 3) | ((swiz.dest_mask & 8) >> 3) |
|
||||
((swiz.dest_mask & 2) << 1) | ((swiz.dest_mask & 4) >> 1);
|
||||
blendps(SCRATCH, src, mask);
|
||||
} else {
|
||||
movaps(SCRATCH2, src);
|
||||
unpckhps(SCRATCH2, SCRATCH); // Unpack X/Y components of source and destination
|
||||
unpcklps(SCRATCH, src); // Unpack Z/W components of source and destination
|
||||
|
||||
// Compute selector to selectively copy source components to destination for SHUFPS
|
||||
// instruction
|
||||
u8 sel = ((swiz.DestComponentEnabled(0) ? 1 : 0) << 0) |
|
||||
((swiz.DestComponentEnabled(1) ? 3 : 2) << 2) |
|
||||
((swiz.DestComponentEnabled(2) ? 0 : 1) << 4) |
|
||||
((swiz.DestComponentEnabled(3) ? 2 : 3) << 6);
|
||||
shufps(SCRATCH, SCRATCH2, sel);
|
||||
}
|
||||
|
||||
// Store dest back to memory
|
||||
movaps(xword[STATE + dest_offset_disp], SCRATCH);
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::Compile_SanitizedMul(Xmm src1, Xmm src2, Xmm scratch) {
|
||||
// 0 * inf and inf * 0 in the PICA should return 0 instead of NaN. This can be implemented by
|
||||
// checking for NaNs before and after the multiplication. If the multiplication result is NaN
|
||||
// where neither source was, this NaN was generated by a 0 * inf multiplication, and so the
|
||||
// result should be transformed to 0 to match PICA fp rules.
|
||||
|
||||
// Set scratch to mask of (src1 != NaN and src2 != NaN)
|
||||
movaps(scratch, src1);
|
||||
cmpordps(scratch, src2);
|
||||
|
||||
mulps(src1, src2);
|
||||
|
||||
// Set src2 to mask of (result == NaN)
|
||||
movaps(src2, src1);
|
||||
cmpunordps(src2, src2);
|
||||
|
||||
// Clear components where scratch != src2 (i.e. if result is NaN where neither source was NaN)
|
||||
xorps(scratch, src2);
|
||||
andps(src1, scratch);
|
||||
}
|
||||
|
||||
void JitShader::Compile_EvaluateCondition(Instruction instr) {
|
||||
// Note: NXOR is used below to check for equality
|
||||
switch (instr.flow_control.op) {
|
||||
case Instruction::FlowControlType::Or:
|
||||
mov(eax, COND0);
|
||||
mov(ebx, COND1);
|
||||
xor_(eax, (instr.flow_control.refx.Value() ^ 1));
|
||||
xor_(ebx, (instr.flow_control.refy.Value() ^ 1));
|
||||
or_(eax, ebx);
|
||||
break;
|
||||
|
||||
case Instruction::FlowControlType::And:
|
||||
mov(eax, COND0);
|
||||
mov(ebx, COND1);
|
||||
xor_(eax, (instr.flow_control.refx.Value() ^ 1));
|
||||
xor_(ebx, (instr.flow_control.refy.Value() ^ 1));
|
||||
and_(eax, ebx);
|
||||
break;
|
||||
|
||||
case Instruction::FlowControlType::JustX:
|
||||
mov(eax, COND0);
|
||||
xor_(eax, (instr.flow_control.refx.Value() ^ 1));
|
||||
break;
|
||||
|
||||
case Instruction::FlowControlType::JustY:
|
||||
mov(eax, COND1);
|
||||
xor_(eax, (instr.flow_control.refy.Value() ^ 1));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::Compile_UniformCondition(Instruction instr) {
|
||||
size_t offset = ShaderSetup::GetBoolUniformOffset(instr.flow_control.bool_uniform_id);
|
||||
cmp(byte[SETUP + offset], 0);
|
||||
}
|
||||
|
||||
BitSet32 JitShader::PersistentCallerSavedRegs() {
|
||||
return persistent_regs & ABI_ALL_CALLER_SAVED;
|
||||
}
|
||||
|
||||
void JitShader::Compile_ADD(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
addps(SRC1, SRC2);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_DP3(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
|
||||
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
||||
|
||||
movaps(SRC2, SRC1);
|
||||
shufps(SRC2, SRC2, _MM_SHUFFLE(1, 1, 1, 1));
|
||||
|
||||
movaps(SRC3, SRC1);
|
||||
shufps(SRC3, SRC3, _MM_SHUFFLE(2, 2, 2, 2));
|
||||
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0));
|
||||
addps(SRC1, SRC2);
|
||||
addps(SRC1, SRC3);
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_DP4(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
|
||||
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
||||
|
||||
movaps(SRC2, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
|
||||
addps(SRC1, SRC2);
|
||||
|
||||
movaps(SRC2, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
|
||||
addps(SRC1, SRC2);
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_DPH(Instruction instr) {
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::DPHI) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
||||
} else {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
}
|
||||
|
||||
if (Common::GetCPUCaps().sse4_1) {
|
||||
// Set 4th component to 1.0
|
||||
blendps(SRC1, ONE, 0b1000);
|
||||
} else {
|
||||
// Set 4th component to 1.0
|
||||
movaps(SCRATCH, SRC1);
|
||||
unpckhps(SCRATCH, ONE); // XYZW, 1111 -> Z1__
|
||||
unpcklpd(SRC1, SCRATCH); // XYZW, Z1__ -> XYZ1
|
||||
}
|
||||
|
||||
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
||||
|
||||
movaps(SRC2, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(2, 3, 0, 1)); // XYZW -> ZWXY
|
||||
addps(SRC1, SRC2);
|
||||
|
||||
movaps(SRC2, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(0, 1, 2, 3)); // XYZW -> WZYX
|
||||
addps(SRC1, SRC2);
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_EX2(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
movss(xmm0, SRC1); // ABI_PARAM1
|
||||
|
||||
ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
CallFarFunction(*this, exp2f);
|
||||
ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
|
||||
shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
|
||||
movaps(SRC1, xmm0);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_LG2(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
movss(xmm0, SRC1); // ABI_PARAM1
|
||||
|
||||
ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
CallFarFunction(*this, log2f);
|
||||
ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
|
||||
shufps(xmm0, xmm0, _MM_SHUFFLE(0, 0, 0, 0)); // ABI_RETURN
|
||||
movaps(SRC1, xmm0);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_MUL(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_SGE(Instruction instr) {
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SGEI) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
||||
} else {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
}
|
||||
|
||||
cmpleps(SRC2, SRC1);
|
||||
andps(SRC2, ONE);
|
||||
|
||||
Compile_DestEnable(instr, SRC2);
|
||||
}
|
||||
|
||||
void JitShader::Compile_SLT(Instruction instr) {
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::SLTI) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1i, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2i, SRC2);
|
||||
} else {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
}
|
||||
|
||||
cmpltps(SRC1, SRC2);
|
||||
andps(SRC1, ONE);
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_FLR(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
|
||||
if (Common::GetCPUCaps().sse4_1) {
|
||||
roundps(SRC1, SRC1, _MM_FROUND_FLOOR);
|
||||
} else {
|
||||
cvttps2dq(SRC1, SRC1);
|
||||
cvtdq2ps(SRC1, SRC1);
|
||||
}
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_MAX(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
// SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
|
||||
maxps(SRC1, SRC2);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_MIN(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
// SSE semantics match PICA200 ones: In case of NaN, SRC2 is returned.
|
||||
minps(SRC1, SRC2);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_MOVA(Instruction instr) {
|
||||
SwizzlePattern swiz = {(*swizzle_data)[instr.common.operand_desc_id]};
|
||||
|
||||
if (!swiz.DestComponentEnabled(0) && !swiz.DestComponentEnabled(1)) {
|
||||
return; // NoOp
|
||||
}
|
||||
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
|
||||
// Convert floats to integers using truncation (only care about X and Y components)
|
||||
cvttps2dq(SRC1, SRC1);
|
||||
|
||||
// Get result
|
||||
movq(rax, SRC1);
|
||||
|
||||
// Handle destination enable
|
||||
if (swiz.DestComponentEnabled(0) && swiz.DestComponentEnabled(1)) {
|
||||
// Move and sign-extend low 32 bits
|
||||
movsxd(ADDROFFS_REG_0, eax);
|
||||
|
||||
// Move and sign-extend high 32 bits
|
||||
shr(rax, 32);
|
||||
movsxd(ADDROFFS_REG_1, eax);
|
||||
|
||||
// Multiply by 16 to be used as an offset later
|
||||
shl(ADDROFFS_REG_0, 4);
|
||||
shl(ADDROFFS_REG_1, 4);
|
||||
} else {
|
||||
if (swiz.DestComponentEnabled(0)) {
|
||||
// Move and sign-extend low 32 bits
|
||||
movsxd(ADDROFFS_REG_0, eax);
|
||||
|
||||
// Multiply by 16 to be used as an offset later
|
||||
shl(ADDROFFS_REG_0, 4);
|
||||
} else if (swiz.DestComponentEnabled(1)) {
|
||||
// Move and sign-extend high 32 bits
|
||||
shr(rax, 32);
|
||||
movsxd(ADDROFFS_REG_1, eax);
|
||||
|
||||
// Multiply by 16 to be used as an offset later
|
||||
shl(ADDROFFS_REG_1, 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::Compile_MOV(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_RCP(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
|
||||
// TODO(bunnei): RCPSS is a pretty rough approximation, this might cause problems if Pica
|
||||
// performs this operation more accurately. This should be checked on hardware.
|
||||
rcpss(SRC1, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_RSQ(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
|
||||
// TODO(bunnei): RSQRTSS is a pretty rough approximation, this might cause problems if Pica
|
||||
// performs this operation more accurately. This should be checked on hardware.
|
||||
rsqrtss(SRC1, SRC1);
|
||||
shufps(SRC1, SRC1, _MM_SHUFFLE(0, 0, 0, 0)); // XYWZ -> XXXX
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_NOP(Instruction instr) {}
|
||||
|
||||
void JitShader::Compile_END(Instruction instr) {
|
||||
ABI_PopRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8, 16);
|
||||
ret();
|
||||
}
|
||||
|
||||
void JitShader::Compile_CALL(Instruction instr) {
|
||||
// Push offset of the return
|
||||
push(qword, (instr.flow_control.dest_offset + instr.flow_control.num_instructions));
|
||||
|
||||
// Call the subroutine
|
||||
call(instruction_labels[instr.flow_control.dest_offset]);
|
||||
|
||||
// Skip over the return offset that's on the stack
|
||||
add(rsp, 8);
|
||||
}
|
||||
|
||||
void JitShader::Compile_CALLC(Instruction instr) {
|
||||
Compile_EvaluateCondition(instr);
|
||||
Label b;
|
||||
jz(b);
|
||||
Compile_CALL(instr);
|
||||
L(b);
|
||||
}
|
||||
|
||||
void JitShader::Compile_CALLU(Instruction instr) {
|
||||
Compile_UniformCondition(instr);
|
||||
Label b;
|
||||
jz(b);
|
||||
Compile_CALL(instr);
|
||||
L(b);
|
||||
}
|
||||
|
||||
void JitShader::Compile_CMP(Instruction instr) {
|
||||
using Op = Instruction::Common::CompareOpType::Op;
|
||||
Op op_x = instr.common.compare_op.x;
|
||||
Op op_y = instr.common.compare_op.y;
|
||||
|
||||
Compile_SwizzleSrc(instr, 1, instr.common.src1, SRC1);
|
||||
Compile_SwizzleSrc(instr, 2, instr.common.src2, SRC2);
|
||||
|
||||
// SSE doesn't have greater-than (GT) or greater-equal (GE) comparison operators. You need to
|
||||
// emulate them by swapping the lhs and rhs and using LT and LE. NLT and NLE can't be used here
|
||||
// because they don't match when used with NaNs.
|
||||
static const u8 cmp[] = {CMP_EQ, CMP_NEQ, CMP_LT, CMP_LE, CMP_LT, CMP_LE};
|
||||
|
||||
bool invert_op_x = (op_x == Op::GreaterThan || op_x == Op::GreaterEqual);
|
||||
Xmm lhs_x = invert_op_x ? SRC2 : SRC1;
|
||||
Xmm rhs_x = invert_op_x ? SRC1 : SRC2;
|
||||
|
||||
if (op_x == op_y) {
|
||||
// Compare X-component and Y-component together
|
||||
cmpps(lhs_x, rhs_x, cmp[op_x]);
|
||||
movq(COND0, lhs_x);
|
||||
|
||||
mov(COND1, COND0);
|
||||
} else {
|
||||
bool invert_op_y = (op_y == Op::GreaterThan || op_y == Op::GreaterEqual);
|
||||
Xmm lhs_y = invert_op_y ? SRC2 : SRC1;
|
||||
Xmm rhs_y = invert_op_y ? SRC1 : SRC2;
|
||||
|
||||
// Compare X-component
|
||||
movaps(SCRATCH, lhs_x);
|
||||
cmpss(SCRATCH, rhs_x, cmp[op_x]);
|
||||
|
||||
// Compare Y-component
|
||||
cmpps(lhs_y, rhs_y, cmp[op_y]);
|
||||
|
||||
movq(COND0, SCRATCH);
|
||||
movq(COND1, lhs_y);
|
||||
}
|
||||
|
||||
shr(COND0.cvt32(), 31); // ignores upper 32 bits in source
|
||||
shr(COND1, 63);
|
||||
}
|
||||
|
||||
void JitShader::Compile_MAD(Instruction instr) {
|
||||
Compile_SwizzleSrc(instr, 1, instr.mad.src1, SRC1);
|
||||
|
||||
if (instr.opcode.Value().EffectiveOpCode() == OpCode::Id::MADI) {
|
||||
Compile_SwizzleSrc(instr, 2, instr.mad.src2i, SRC2);
|
||||
Compile_SwizzleSrc(instr, 3, instr.mad.src3i, SRC3);
|
||||
} else {
|
||||
Compile_SwizzleSrc(instr, 2, instr.mad.src2, SRC2);
|
||||
Compile_SwizzleSrc(instr, 3, instr.mad.src3, SRC3);
|
||||
}
|
||||
|
||||
Compile_SanitizedMul(SRC1, SRC2, SCRATCH);
|
||||
addps(SRC1, SRC3);
|
||||
|
||||
Compile_DestEnable(instr, SRC1);
|
||||
}
|
||||
|
||||
void JitShader::Compile_IF(Instruction instr) {
|
||||
Compile_Assert(instr.flow_control.dest_offset >= program_counter,
|
||||
"Backwards if-statements not supported");
|
||||
Label l_else, l_endif;
|
||||
|
||||
// Evaluate the "IF" condition
|
||||
if (instr.opcode.Value() == OpCode::Id::IFU) {
|
||||
Compile_UniformCondition(instr);
|
||||
} else if (instr.opcode.Value() == OpCode::Id::IFC) {
|
||||
Compile_EvaluateCondition(instr);
|
||||
}
|
||||
jz(l_else, T_NEAR);
|
||||
|
||||
// Compile the code that corresponds to the condition evaluating as true
|
||||
Compile_Block(instr.flow_control.dest_offset);
|
||||
|
||||
// If there isn't an "ELSE" condition, we are done here
|
||||
if (instr.flow_control.num_instructions == 0) {
|
||||
L(l_else);
|
||||
return;
|
||||
}
|
||||
|
||||
jmp(l_endif, T_NEAR);
|
||||
|
||||
L(l_else);
|
||||
// This code corresponds to the "ELSE" condition
|
||||
// Comple the code that corresponds to the condition evaluating as false
|
||||
Compile_Block(instr.flow_control.dest_offset + instr.flow_control.num_instructions);
|
||||
|
||||
L(l_endif);
|
||||
}
|
||||
|
||||
void JitShader::Compile_LOOP(Instruction instr) {
|
||||
Compile_Assert(instr.flow_control.dest_offset >= program_counter,
|
||||
"Backwards loops not supported");
|
||||
Compile_Assert(!looping, "Nested loops not supported");
|
||||
|
||||
looping = true;
|
||||
|
||||
// This decodes the fields from the integer uniform at index instr.flow_control.int_uniform_id.
|
||||
// The Y (LOOPCOUNT_REG) and Z (LOOPINC) component are kept multiplied by 16 (Left shifted by
|
||||
// 4 bits) to be used as an offset into the 16-byte vector registers later
|
||||
size_t offset = ShaderSetup::GetIntUniformOffset(instr.flow_control.int_uniform_id);
|
||||
mov(LOOPCOUNT, dword[SETUP + offset]);
|
||||
mov(LOOPCOUNT_REG, LOOPCOUNT);
|
||||
shr(LOOPCOUNT_REG, 4);
|
||||
and_(LOOPCOUNT_REG, 0xFF0); // Y-component is the start
|
||||
mov(LOOPINC, LOOPCOUNT);
|
||||
shr(LOOPINC, 12);
|
||||
and_(LOOPINC, 0xFF0); // Z-component is the incrementer
|
||||
movzx(LOOPCOUNT, LOOPCOUNT.cvt8()); // X-component is iteration count
|
||||
add(LOOPCOUNT, 1); // Iteration count is X-component + 1
|
||||
|
||||
Label l_loop_start;
|
||||
L(l_loop_start);
|
||||
|
||||
Compile_Block(instr.flow_control.dest_offset + 1);
|
||||
|
||||
add(LOOPCOUNT_REG, LOOPINC); // Increment LOOPCOUNT_REG by Z-component
|
||||
sub(LOOPCOUNT, 1); // Increment loop count by 1
|
||||
jnz(l_loop_start); // Loop if not equal
|
||||
|
||||
looping = false;
|
||||
}
|
||||
|
||||
void JitShader::Compile_JMP(Instruction instr) {
|
||||
if (instr.opcode.Value() == OpCode::Id::JMPC)
|
||||
Compile_EvaluateCondition(instr);
|
||||
else if (instr.opcode.Value() == OpCode::Id::JMPU)
|
||||
Compile_UniformCondition(instr);
|
||||
else
|
||||
UNREACHABLE();
|
||||
|
||||
bool inverted_condition =
|
||||
(instr.opcode.Value() == OpCode::Id::JMPU) && (instr.flow_control.num_instructions & 1);
|
||||
|
||||
Label& b = instruction_labels[instr.flow_control.dest_offset];
|
||||
if (inverted_condition) {
|
||||
jz(b, T_NEAR);
|
||||
} else {
|
||||
jnz(b, T_NEAR);
|
||||
}
|
||||
}
|
||||
|
||||
static void Emit(GSEmitter* emitter, Math::Vec4<float24> (*output)[16]) {
|
||||
emitter->Emit(*output);
|
||||
}
|
||||
|
||||
void JitShader::Compile_EMIT(Instruction instr) {
|
||||
Label have_emitter, end;
|
||||
mov(rax, qword[STATE + offsetof(UnitState, emitter_ptr)]);
|
||||
test(rax, rax);
|
||||
jnz(have_emitter);
|
||||
|
||||
ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute EMIT on VS"));
|
||||
CallFarFunction(*this, LogCritical);
|
||||
ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
jmp(end);
|
||||
|
||||
L(have_emitter);
|
||||
ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
mov(ABI_PARAM1, rax);
|
||||
mov(ABI_PARAM2, STATE);
|
||||
add(ABI_PARAM2, static_cast<Xbyak::uint32>(offsetof(UnitState, registers.output)));
|
||||
CallFarFunction(*this, Emit);
|
||||
ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
L(end);
|
||||
}
|
||||
|
||||
void JitShader::Compile_SETE(Instruction instr) {
|
||||
Label have_emitter, end;
|
||||
mov(rax, qword[STATE + offsetof(UnitState, emitter_ptr)]);
|
||||
test(rax, rax);
|
||||
jnz(have_emitter);
|
||||
|
||||
ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute SETEMIT on VS"));
|
||||
CallFarFunction(*this, LogCritical);
|
||||
ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
|
||||
jmp(end);
|
||||
|
||||
L(have_emitter);
|
||||
mov(byte[rax + offsetof(GSEmitter, vertex_id)], instr.setemit.vertex_id);
|
||||
mov(byte[rax + offsetof(GSEmitter, prim_emit)], instr.setemit.prim_emit);
|
||||
mov(byte[rax + offsetof(GSEmitter, winding)], instr.setemit.winding);
|
||||
L(end);
|
||||
}
|
||||
|
||||
void JitShader::Compile_Block(unsigned end) {
|
||||
while (program_counter < end) {
|
||||
Compile_NextInstr();
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::Compile_Return() {
|
||||
// Peek return offset on the stack and check if we're at that offset
|
||||
mov(rax, qword[rsp + 8]);
|
||||
cmp(eax, (program_counter));
|
||||
|
||||
// If so, jump back to before CALL
|
||||
Label b;
|
||||
jnz(b);
|
||||
ret();
|
||||
L(b);
|
||||
}
|
||||
|
||||
void JitShader::Compile_NextInstr() {
|
||||
if (std::binary_search(return_offsets.begin(), return_offsets.end(), program_counter)) {
|
||||
Compile_Return();
|
||||
}
|
||||
|
||||
L(instruction_labels[program_counter]);
|
||||
|
||||
Instruction instr = {(*program_code)[program_counter++]};
|
||||
|
||||
OpCode::Id opcode = instr.opcode.Value();
|
||||
auto instr_func = instr_table[static_cast<unsigned>(opcode)];
|
||||
|
||||
if (instr_func) {
|
||||
// JIT the instruction!
|
||||
((*this).*instr_func)(instr);
|
||||
} else {
|
||||
// Unhandled instruction
|
||||
LOG_CRITICAL(HW_GPU, "Unhandled instruction: 0x%02x (0x%08x)",
|
||||
instr.opcode.Value().EffectiveOpCode(), instr.hex);
|
||||
}
|
||||
}
|
||||
|
||||
void JitShader::FindReturnOffsets() {
|
||||
return_offsets.clear();
|
||||
|
||||
for (size_t offset = 0; offset < program_code->size(); ++offset) {
|
||||
Instruction instr = {(*program_code)[offset]};
|
||||
|
||||
switch (instr.opcode.Value()) {
|
||||
case OpCode::Id::CALL:
|
||||
case OpCode::Id::CALLC:
|
||||
case OpCode::Id::CALLU:
|
||||
return_offsets.push_back(instr.flow_control.dest_offset +
|
||||
instr.flow_control.num_instructions);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Sort for efficient binary search later
|
||||
std::sort(return_offsets.begin(), return_offsets.end());
|
||||
}
|
||||
|
||||
void JitShader::Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code_,
|
||||
const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data_) {
|
||||
program_code = program_code_;
|
||||
swizzle_data = swizzle_data_;
|
||||
|
||||
// Reset flow control state
|
||||
program = (CompiledShader*)getCurr();
|
||||
program_counter = 0;
|
||||
looping = false;
|
||||
instruction_labels.fill(Xbyak::Label());
|
||||
|
||||
// Find all `CALL` instructions and identify return locations
|
||||
FindReturnOffsets();
|
||||
|
||||
// The stack pointer is 8 modulo 16 at the entry of a procedure
|
||||
// We reserve 16 bytes and assign a dummy value to the first 8 bytes, to catch any potential
|
||||
// return checks (see Compile_Return) that happen in shader main routine.
|
||||
ABI_PushRegistersAndAdjustStack(*this, ABI_ALL_CALLEE_SAVED, 8, 16);
|
||||
mov(qword[rsp + 8], 0xFFFFFFFFFFFFFFFFULL);
|
||||
|
||||
mov(SETUP, ABI_PARAM1);
|
||||
mov(STATE, ABI_PARAM2);
|
||||
|
||||
// Zero address/loop registers
|
||||
xor_(ADDROFFS_REG_0.cvt32(), ADDROFFS_REG_0.cvt32());
|
||||
xor_(ADDROFFS_REG_1.cvt32(), ADDROFFS_REG_1.cvt32());
|
||||
xor_(LOOPCOUNT_REG, LOOPCOUNT_REG);
|
||||
|
||||
// Used to set a register to one
|
||||
static const __m128 one = {1.f, 1.f, 1.f, 1.f};
|
||||
mov(rax, reinterpret_cast<size_t>(&one));
|
||||
movaps(ONE, xword[rax]);
|
||||
|
||||
// Used to negate registers
|
||||
static const __m128 neg = {-0.f, -0.f, -0.f, -0.f};
|
||||
mov(rax, reinterpret_cast<size_t>(&neg));
|
||||
movaps(NEGBIT, xword[rax]);
|
||||
|
||||
// Jump to start of the shader program
|
||||
jmp(ABI_PARAM3);
|
||||
|
||||
// Compile entire program
|
||||
Compile_Block(static_cast<unsigned>(program_code->size()));
|
||||
|
||||
// Free memory that's no longer needed
|
||||
program_code = nullptr;
|
||||
swizzle_data = nullptr;
|
||||
return_offsets.clear();
|
||||
return_offsets.shrink_to_fit();
|
||||
|
||||
ready();
|
||||
|
||||
ASSERT_MSG(getSize() <= MAX_SHADER_SIZE, "Compiled a shader that exceeds the allocated size!");
|
||||
LOG_DEBUG(HW_GPU, "Compiled shader size=%lu", getSize());
|
||||
}
|
||||
|
||||
JitShader::JitShader() : Xbyak::CodeGenerator(MAX_SHADER_SIZE) {}
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
|
@ -1,127 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
#include <xbyak.h>
|
||||
#include "common/bit_set.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
using nihstro::Instruction;
|
||||
using nihstro::OpCode;
|
||||
using nihstro::SwizzlePattern;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
/// Memory allocated for each compiled shader
|
||||
constexpr size_t MAX_SHADER_SIZE = MAX_PROGRAM_CODE_LENGTH * 64;
|
||||
|
||||
/**
|
||||
* This class implements the shader JIT compiler. It recompiles a Pica shader program into x86_64
|
||||
* code that can be executed on the host machine directly.
|
||||
*/
|
||||
class JitShader : public Xbyak::CodeGenerator {
|
||||
public:
|
||||
JitShader();
|
||||
|
||||
void Run(const ShaderSetup& setup, UnitState& state, unsigned offset) const {
|
||||
program(&setup, &state, instruction_labels[offset].getAddress());
|
||||
}
|
||||
|
||||
void Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code,
|
||||
const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data);
|
||||
|
||||
void Compile_ADD(Instruction instr);
|
||||
void Compile_DP3(Instruction instr);
|
||||
void Compile_DP4(Instruction instr);
|
||||
void Compile_DPH(Instruction instr);
|
||||
void Compile_EX2(Instruction instr);
|
||||
void Compile_LG2(Instruction instr);
|
||||
void Compile_MUL(Instruction instr);
|
||||
void Compile_SGE(Instruction instr);
|
||||
void Compile_SLT(Instruction instr);
|
||||
void Compile_FLR(Instruction instr);
|
||||
void Compile_MAX(Instruction instr);
|
||||
void Compile_MIN(Instruction instr);
|
||||
void Compile_RCP(Instruction instr);
|
||||
void Compile_RSQ(Instruction instr);
|
||||
void Compile_MOVA(Instruction instr);
|
||||
void Compile_MOV(Instruction instr);
|
||||
void Compile_NOP(Instruction instr);
|
||||
void Compile_END(Instruction instr);
|
||||
void Compile_CALL(Instruction instr);
|
||||
void Compile_CALLC(Instruction instr);
|
||||
void Compile_CALLU(Instruction instr);
|
||||
void Compile_IF(Instruction instr);
|
||||
void Compile_LOOP(Instruction instr);
|
||||
void Compile_JMP(Instruction instr);
|
||||
void Compile_CMP(Instruction instr);
|
||||
void Compile_MAD(Instruction instr);
|
||||
void Compile_EMIT(Instruction instr);
|
||||
void Compile_SETE(Instruction instr);
|
||||
|
||||
private:
|
||||
void Compile_Block(unsigned end);
|
||||
void Compile_NextInstr();
|
||||
|
||||
void Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg,
|
||||
Xbyak::Xmm dest);
|
||||
void Compile_DestEnable(Instruction instr, Xbyak::Xmm dest);
|
||||
|
||||
/**
|
||||
* Compiles a `MUL src1, src2` operation, properly handling the PICA semantics when multiplying
|
||||
* zero by inf. Clobbers `src2` and `scratch`.
|
||||
*/
|
||||
void Compile_SanitizedMul(Xbyak::Xmm src1, Xbyak::Xmm src2, Xbyak::Xmm scratch);
|
||||
|
||||
void Compile_EvaluateCondition(Instruction instr);
|
||||
void Compile_UniformCondition(Instruction instr);
|
||||
|
||||
/**
|
||||
* Emits the code to conditionally return from a subroutine envoked by the `CALL` instruction.
|
||||
*/
|
||||
void Compile_Return();
|
||||
|
||||
BitSet32 PersistentCallerSavedRegs();
|
||||
|
||||
/**
|
||||
* Assertion evaluated at compile-time, but only triggered if executed at runtime.
|
||||
* @param condition Condition to be evaluated.
|
||||
* @param msg Message to be logged if the assertion fails.
|
||||
*/
|
||||
void Compile_Assert(bool condition, const char* msg);
|
||||
|
||||
/**
|
||||
* Analyzes the entire shader program for `CALL` instructions before emitting any code,
|
||||
* identifying the locations where a return needs to be inserted.
|
||||
*/
|
||||
void FindReturnOffsets();
|
||||
|
||||
const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_code = nullptr;
|
||||
const std::array<u32, MAX_SWIZZLE_DATA_LENGTH>* swizzle_data = nullptr;
|
||||
|
||||
/// Mapping of Pica VS instructions to pointers in the emitted code
|
||||
std::array<Xbyak::Label, MAX_PROGRAM_CODE_LENGTH> instruction_labels;
|
||||
|
||||
/// Offsets in code where a return needs to be inserted
|
||||
std::vector<unsigned> return_offsets;
|
||||
|
||||
unsigned program_counter = 0; ///< Offset of the next instruction to decode
|
||||
bool looping = false; ///< True if compiling a loop, used to check for nested loops
|
||||
|
||||
using CompiledShader = void(const void* setup, void* state, const u8* start_addr);
|
||||
CompiledShader* program = nullptr;
|
||||
};
|
||||
|
||||
} // Shader
|
||||
|
||||
} // Pica
|
|
@ -1,197 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
#include <boost/container/vector.hpp>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/swrasterizer/clipper.h"
|
||||
#include "video_core/swrasterizer/rasterizer.h"
|
||||
|
||||
using Pica::Rasterizer::Vertex;
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Clipper {
|
||||
|
||||
struct ClippingEdge {
|
||||
public:
|
||||
ClippingEdge(Math::Vec4<float24> coeffs, Math::Vec4<float24> bias = Math::Vec4<float24>(
|
||||
float24::FromFloat32(0), float24::FromFloat32(0),
|
||||
float24::FromFloat32(0), float24::FromFloat32(0)))
|
||||
: coeffs(coeffs), bias(bias) {}
|
||||
|
||||
bool IsInside(const Vertex& vertex) const {
|
||||
return Math::Dot(vertex.pos + bias, coeffs) >= float24::FromFloat32(0);
|
||||
}
|
||||
|
||||
bool IsOutSide(const Vertex& vertex) const {
|
||||
return !IsInside(vertex);
|
||||
}
|
||||
|
||||
Vertex GetIntersection(const Vertex& v0, const Vertex& v1) const {
|
||||
float24 dp = Math::Dot(v0.pos + bias, coeffs);
|
||||
float24 dp_prev = Math::Dot(v1.pos + bias, coeffs);
|
||||
float24 factor = dp_prev / (dp_prev - dp);
|
||||
|
||||
return Vertex::Lerp(factor, v0, v1);
|
||||
}
|
||||
|
||||
private:
|
||||
float24 pos;
|
||||
Math::Vec4<float24> coeffs;
|
||||
Math::Vec4<float24> bias;
|
||||
};
|
||||
|
||||
static void InitScreenCoordinates(Vertex& vtx) {
|
||||
struct {
|
||||
float24 halfsize_x;
|
||||
float24 offset_x;
|
||||
float24 halfsize_y;
|
||||
float24 offset_y;
|
||||
float24 zscale;
|
||||
float24 offset_z;
|
||||
} viewport;
|
||||
|
||||
const auto& regs = g_state.regs;
|
||||
viewport.halfsize_x = float24::FromRaw(regs.rasterizer.viewport_size_x);
|
||||
viewport.halfsize_y = float24::FromRaw(regs.rasterizer.viewport_size_y);
|
||||
viewport.offset_x = float24::FromFloat32(static_cast<float>(regs.rasterizer.viewport_corner.x));
|
||||
viewport.offset_y = float24::FromFloat32(static_cast<float>(regs.rasterizer.viewport_corner.y));
|
||||
|
||||
float24 inv_w = float24::FromFloat32(1.f) / vtx.pos.w;
|
||||
vtx.pos.w = inv_w;
|
||||
vtx.quat *= inv_w;
|
||||
vtx.color *= inv_w;
|
||||
vtx.tc0 *= inv_w;
|
||||
vtx.tc1 *= inv_w;
|
||||
vtx.tc0_w *= inv_w;
|
||||
vtx.view *= inv_w;
|
||||
vtx.tc2 *= inv_w;
|
||||
|
||||
vtx.screenpos[0] =
|
||||
(vtx.pos.x * inv_w + float24::FromFloat32(1.0)) * viewport.halfsize_x + viewport.offset_x;
|
||||
vtx.screenpos[1] =
|
||||
(vtx.pos.y * inv_w + float24::FromFloat32(1.0)) * viewport.halfsize_y + viewport.offset_y;
|
||||
vtx.screenpos[2] = vtx.pos.z * inv_w;
|
||||
}
|
||||
|
||||
void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const OutputVertex& v2) {
|
||||
using boost::container::static_vector;
|
||||
|
||||
// Clipping a planar n-gon against a plane will remove at least 1 vertex and introduces 2 at
|
||||
// the new edge (or less in degenerate cases). As such, we can say that each clipping plane
|
||||
// introduces at most 1 new vertex to the polygon. Since we start with a triangle and have a
|
||||
// fixed 6 clipping planes, the maximum number of vertices of the clipped polygon is 3 + 6 = 9.
|
||||
static const size_t MAX_VERTICES = 9;
|
||||
static_vector<Vertex, MAX_VERTICES> buffer_a = {v0, v1, v2};
|
||||
static_vector<Vertex, MAX_VERTICES> buffer_b;
|
||||
|
||||
auto FlipQuaternionIfOpposite = [](auto& a, const auto& b) {
|
||||
if (Math::Dot(a, b) < float24::Zero())
|
||||
a = a * float24::FromFloat32(-1.0f);
|
||||
};
|
||||
|
||||
// Flip the quaternions if they are opposite to prevent interpolating them over the wrong
|
||||
// direction.
|
||||
FlipQuaternionIfOpposite(buffer_a[1].quat, buffer_a[0].quat);
|
||||
FlipQuaternionIfOpposite(buffer_a[2].quat, buffer_a[0].quat);
|
||||
|
||||
auto* output_list = &buffer_a;
|
||||
auto* input_list = &buffer_b;
|
||||
|
||||
// NOTE: We clip against a w=epsilon plane to guarantee that the output has a positive w value.
|
||||
// TODO: Not sure if this is a valid approach. Also should probably instead use the smallest
|
||||
// epsilon possible within float24 accuracy.
|
||||
static const float24 EPSILON = float24::FromFloat32(0.00001f);
|
||||
static const float24 f0 = float24::FromFloat32(0.0);
|
||||
static const float24 f1 = float24::FromFloat32(1.0);
|
||||
static const std::array<ClippingEdge, 7> clipping_edges = {{
|
||||
{Math::MakeVec(-f1, f0, f0, f1)}, // x = +w
|
||||
{Math::MakeVec(f1, f0, f0, f1)}, // x = -w
|
||||
{Math::MakeVec(f0, -f1, f0, f1)}, // y = +w
|
||||
{Math::MakeVec(f0, f1, f0, f1)}, // y = -w
|
||||
{Math::MakeVec(f0, f0, -f1, f0)}, // z = 0
|
||||
{Math::MakeVec(f0, f0, f1, f1)}, // z = -w
|
||||
{Math::MakeVec(f0, f0, f0, f1), Math::Vec4<float24>(f0, f0, f0, EPSILON)}, // w = EPSILON
|
||||
}};
|
||||
|
||||
// Simple implementation of the Sutherland-Hodgman clipping algorithm.
|
||||
// TODO: Make this less inefficient (currently lots of useless buffering overhead happens here)
|
||||
auto Clip = [&](const ClippingEdge& edge) {
|
||||
std::swap(input_list, output_list);
|
||||
output_list->clear();
|
||||
|
||||
const Vertex* reference_vertex = &input_list->back();
|
||||
|
||||
for (const auto& vertex : *input_list) {
|
||||
// NOTE: This algorithm changes vertex order in some cases!
|
||||
if (edge.IsInside(vertex)) {
|
||||
if (edge.IsOutSide(*reference_vertex)) {
|
||||
output_list->push_back(edge.GetIntersection(vertex, *reference_vertex));
|
||||
}
|
||||
|
||||
output_list->push_back(vertex);
|
||||
} else if (edge.IsInside(*reference_vertex)) {
|
||||
output_list->push_back(edge.GetIntersection(vertex, *reference_vertex));
|
||||
}
|
||||
reference_vertex = &vertex;
|
||||
}
|
||||
};
|
||||
|
||||
for (auto edge : clipping_edges) {
|
||||
Clip(edge);
|
||||
|
||||
// Need to have at least a full triangle to continue...
|
||||
if (output_list->size() < 3)
|
||||
return;
|
||||
}
|
||||
|
||||
if (g_state.regs.rasterizer.clip_enable) {
|
||||
ClippingEdge custom_edge{g_state.regs.rasterizer.GetClipCoef()};
|
||||
Clip(custom_edge);
|
||||
|
||||
if (output_list->size() < 3)
|
||||
return;
|
||||
}
|
||||
|
||||
InitScreenCoordinates((*output_list)[0]);
|
||||
InitScreenCoordinates((*output_list)[1]);
|
||||
|
||||
for (size_t i = 0; i < output_list->size() - 2; i++) {
|
||||
Vertex& vtx0 = (*output_list)[0];
|
||||
Vertex& vtx1 = (*output_list)[i + 1];
|
||||
Vertex& vtx2 = (*output_list)[i + 2];
|
||||
|
||||
InitScreenCoordinates(vtx2);
|
||||
|
||||
LOG_TRACE(Render_Software,
|
||||
"Triangle %lu/%lu at position (%.3f, %.3f, %.3f, %.3f), "
|
||||
"(%.3f, %.3f, %.3f, %.3f), (%.3f, %.3f, %.3f, %.3f) and "
|
||||
"screen position (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f), (%.2f, %.2f, %.2f)",
|
||||
i + 1, output_list->size() - 2, vtx0.pos.x.ToFloat32(), vtx0.pos.y.ToFloat32(),
|
||||
vtx0.pos.z.ToFloat32(), vtx0.pos.w.ToFloat32(), vtx1.pos.x.ToFloat32(),
|
||||
vtx1.pos.y.ToFloat32(), vtx1.pos.z.ToFloat32(), vtx1.pos.w.ToFloat32(),
|
||||
vtx2.pos.x.ToFloat32(), vtx2.pos.y.ToFloat32(), vtx2.pos.z.ToFloat32(),
|
||||
vtx2.pos.w.ToFloat32(), vtx0.screenpos.x.ToFloat32(),
|
||||
vtx0.screenpos.y.ToFloat32(), vtx0.screenpos.z.ToFloat32(),
|
||||
vtx1.screenpos.x.ToFloat32(), vtx1.screenpos.y.ToFloat32(),
|
||||
vtx1.screenpos.z.ToFloat32(), vtx2.screenpos.x.ToFloat32(),
|
||||
vtx2.screenpos.y.ToFloat32(), vtx2.screenpos.z.ToFloat32());
|
||||
|
||||
Rasterizer::ProcessTriangle(vtx0, vtx1, vtx2);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,21 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
struct OutputVertex;
|
||||
}
|
||||
|
||||
namespace Clipper {
|
||||
|
||||
using Shader::OutputVertex;
|
||||
|
||||
void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const OutputVertex& v2);
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace
|
|
@ -1,360 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/swrasterizer/framebuffer.h"
|
||||
#include "video_core/utils.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
void DrawPixel(int x, int y, const Math::Vec4<u8>& color) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetColorBufferPhysicalAddress();
|
||||
|
||||
// Similarly to textures, the render framebuffer is laid out from bottom to top, too.
|
||||
// NOTE: The framebuffer height register contains the actual FB height minus one.
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel =
|
||||
GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(framebuffer.color_format.Value()));
|
||||
u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
|
||||
coarse_y * framebuffer.width * bytes_per_pixel;
|
||||
u8* dst_pixel = Memory::GetPhysicalPointer(addr) + dst_offset;
|
||||
|
||||
switch (framebuffer.color_format) {
|
||||
case FramebufferRegs::ColorFormat::RGBA8:
|
||||
Color::EncodeRGBA8(color, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB8:
|
||||
Color::EncodeRGB8(color, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB5A1:
|
||||
Color::EncodeRGB5A1(color, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB565:
|
||||
Color::EncodeRGB565(color, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGBA4:
|
||||
Color::EncodeRGBA4(color, dst_pixel);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(Render_Software, "Unknown framebuffer color format %x",
|
||||
framebuffer.color_format.Value());
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
const Math::Vec4<u8> GetPixel(int x, int y) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetColorBufferPhysicalAddress();
|
||||
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel =
|
||||
GPU::Regs::BytesPerPixel(GPU::Regs::PixelFormat(framebuffer.color_format.Value()));
|
||||
u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) +
|
||||
coarse_y * framebuffer.width * bytes_per_pixel;
|
||||
u8* src_pixel = Memory::GetPhysicalPointer(addr) + src_offset;
|
||||
|
||||
switch (framebuffer.color_format) {
|
||||
case FramebufferRegs::ColorFormat::RGBA8:
|
||||
return Color::DecodeRGBA8(src_pixel);
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB8:
|
||||
return Color::DecodeRGB8(src_pixel);
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB5A1:
|
||||
return Color::DecodeRGB5A1(src_pixel);
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGB565:
|
||||
return Color::DecodeRGB565(src_pixel);
|
||||
|
||||
case FramebufferRegs::ColorFormat::RGBA4:
|
||||
return Color::DecodeRGBA4(src_pixel);
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(Render_Software, "Unknown framebuffer color format %x",
|
||||
framebuffer.color_format.Value());
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
return {0, 0, 0, 0};
|
||||
}
|
||||
|
||||
u32 GetDepth(int x, int y) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
|
||||
u8* depth_buffer = Memory::GetPhysicalPointer(addr);
|
||||
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel = FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
|
||||
u32 stride = framebuffer.width * bytes_per_pixel;
|
||||
|
||||
u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
|
||||
u8* src_pixel = depth_buffer + src_offset;
|
||||
|
||||
switch (framebuffer.depth_format) {
|
||||
case FramebufferRegs::DepthFormat::D16:
|
||||
return Color::DecodeD16(src_pixel);
|
||||
case FramebufferRegs::DepthFormat::D24:
|
||||
return Color::DecodeD24(src_pixel);
|
||||
case FramebufferRegs::DepthFormat::D24S8:
|
||||
return Color::DecodeD24S8(src_pixel).x;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
u8 GetStencil(int x, int y) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
|
||||
u8* depth_buffer = Memory::GetPhysicalPointer(addr);
|
||||
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel = Pica::FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
|
||||
u32 stride = framebuffer.width * bytes_per_pixel;
|
||||
|
||||
u32 src_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
|
||||
u8* src_pixel = depth_buffer + src_offset;
|
||||
|
||||
switch (framebuffer.depth_format) {
|
||||
case FramebufferRegs::DepthFormat::D24S8:
|
||||
return Color::DecodeD24S8(src_pixel).y;
|
||||
|
||||
default:
|
||||
LOG_WARNING(
|
||||
HW_GPU,
|
||||
"GetStencil called for function which doesn't have a stencil component (format %u)",
|
||||
framebuffer.depth_format);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void SetDepth(int x, int y, u32 value) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
|
||||
u8* depth_buffer = Memory::GetPhysicalPointer(addr);
|
||||
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel = FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
|
||||
u32 stride = framebuffer.width * bytes_per_pixel;
|
||||
|
||||
u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
|
||||
u8* dst_pixel = depth_buffer + dst_offset;
|
||||
|
||||
switch (framebuffer.depth_format) {
|
||||
case FramebufferRegs::DepthFormat::D16:
|
||||
Color::EncodeD16(value, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::DepthFormat::D24:
|
||||
Color::EncodeD24(value, dst_pixel);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::DepthFormat::D24S8:
|
||||
Color::EncodeD24X8(value, dst_pixel);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void SetStencil(int x, int y, u8 value) {
|
||||
const auto& framebuffer = g_state.regs.framebuffer.framebuffer;
|
||||
const PAddr addr = framebuffer.GetDepthBufferPhysicalAddress();
|
||||
u8* depth_buffer = Memory::GetPhysicalPointer(addr);
|
||||
|
||||
y = framebuffer.height - y;
|
||||
|
||||
const u32 coarse_y = y & ~7;
|
||||
u32 bytes_per_pixel = Pica::FramebufferRegs::BytesPerDepthPixel(framebuffer.depth_format);
|
||||
u32 stride = framebuffer.width * bytes_per_pixel;
|
||||
|
||||
u32 dst_offset = VideoCore::GetMortonOffset(x, y, bytes_per_pixel) + coarse_y * stride;
|
||||
u8* dst_pixel = depth_buffer + dst_offset;
|
||||
|
||||
switch (framebuffer.depth_format) {
|
||||
case Pica::FramebufferRegs::DepthFormat::D16:
|
||||
case Pica::FramebufferRegs::DepthFormat::D24:
|
||||
// Nothing to do
|
||||
break;
|
||||
|
||||
case Pica::FramebufferRegs::DepthFormat::D24S8:
|
||||
Color::EncodeX24S8(value, dst_pixel);
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented depth format %u", framebuffer.depth_format);
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
u8 PerformStencilAction(FramebufferRegs::StencilAction action, u8 old_stencil, u8 ref) {
|
||||
switch (action) {
|
||||
case FramebufferRegs::StencilAction::Keep:
|
||||
return old_stencil;
|
||||
|
||||
case FramebufferRegs::StencilAction::Zero:
|
||||
return 0;
|
||||
|
||||
case FramebufferRegs::StencilAction::Replace:
|
||||
return ref;
|
||||
|
||||
case FramebufferRegs::StencilAction::Increment:
|
||||
// Saturated increment
|
||||
return std::min<u8>(old_stencil, 254) + 1;
|
||||
|
||||
case FramebufferRegs::StencilAction::Decrement:
|
||||
// Saturated decrement
|
||||
return std::max<u8>(old_stencil, 1) - 1;
|
||||
|
||||
case FramebufferRegs::StencilAction::Invert:
|
||||
return ~old_stencil;
|
||||
|
||||
case FramebufferRegs::StencilAction::IncrementWrap:
|
||||
return old_stencil + 1;
|
||||
|
||||
case FramebufferRegs::StencilAction::DecrementWrap:
|
||||
return old_stencil - 1;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown stencil action %x", (int)action);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
Math::Vec4<u8> EvaluateBlendEquation(const Math::Vec4<u8>& src, const Math::Vec4<u8>& srcfactor,
|
||||
const Math::Vec4<u8>& dest, const Math::Vec4<u8>& destfactor,
|
||||
FramebufferRegs::BlendEquation equation) {
|
||||
Math::Vec4<int> result;
|
||||
|
||||
auto src_result = (src * srcfactor).Cast<int>();
|
||||
auto dst_result = (dest * destfactor).Cast<int>();
|
||||
|
||||
switch (equation) {
|
||||
case FramebufferRegs::BlendEquation::Add:
|
||||
result = (src_result + dst_result) / 255;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::BlendEquation::Subtract:
|
||||
result = (src_result - dst_result) / 255;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::BlendEquation::ReverseSubtract:
|
||||
result = (dst_result - src_result) / 255;
|
||||
break;
|
||||
|
||||
// TODO: How do these two actually work? OpenGL doesn't include the blend factors in the
|
||||
// min/max computations, but is this what the 3DS actually does?
|
||||
case FramebufferRegs::BlendEquation::Min:
|
||||
result.r() = std::min(src.r(), dest.r());
|
||||
result.g() = std::min(src.g(), dest.g());
|
||||
result.b() = std::min(src.b(), dest.b());
|
||||
result.a() = std::min(src.a(), dest.a());
|
||||
break;
|
||||
|
||||
case FramebufferRegs::BlendEquation::Max:
|
||||
result.r() = std::max(src.r(), dest.r());
|
||||
result.g() = std::max(src.g(), dest.g());
|
||||
result.b() = std::max(src.b(), dest.b());
|
||||
result.a() = std::max(src.a(), dest.a());
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown RGB blend equation %x", equation);
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
return Math::Vec4<u8>(MathUtil::Clamp(result.r(), 0, 255), MathUtil::Clamp(result.g(), 0, 255),
|
||||
MathUtil::Clamp(result.b(), 0, 255), MathUtil::Clamp(result.a(), 0, 255));
|
||||
};
|
||||
|
||||
u8 LogicOp(u8 src, u8 dest, FramebufferRegs::LogicOp op) {
|
||||
switch (op) {
|
||||
case FramebufferRegs::LogicOp::Clear:
|
||||
return 0;
|
||||
|
||||
case FramebufferRegs::LogicOp::And:
|
||||
return src & dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::AndReverse:
|
||||
return src & ~dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::Copy:
|
||||
return src;
|
||||
|
||||
case FramebufferRegs::LogicOp::Set:
|
||||
return 255;
|
||||
|
||||
case FramebufferRegs::LogicOp::CopyInverted:
|
||||
return ~src;
|
||||
|
||||
case FramebufferRegs::LogicOp::NoOp:
|
||||
return dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::Invert:
|
||||
return ~dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::Nand:
|
||||
return ~(src & dest);
|
||||
|
||||
case FramebufferRegs::LogicOp::Or:
|
||||
return src | dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::Nor:
|
||||
return ~(src | dest);
|
||||
|
||||
case FramebufferRegs::LogicOp::Xor:
|
||||
return src ^ dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::Equiv:
|
||||
return ~(src ^ dest);
|
||||
|
||||
case FramebufferRegs::LogicOp::AndInverted:
|
||||
return ~src & dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::OrReverse:
|
||||
return src | ~dest;
|
||||
|
||||
case FramebufferRegs::LogicOp::OrInverted:
|
||||
return ~src | dest;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
};
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,29 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
void DrawPixel(int x, int y, const Math::Vec4<u8>& color);
|
||||
const Math::Vec4<u8> GetPixel(int x, int y);
|
||||
u32 GetDepth(int x, int y);
|
||||
u8 GetStencil(int x, int y);
|
||||
void SetDepth(int x, int y, u32 value);
|
||||
void SetStencil(int x, int y, u8 value);
|
||||
u8 PerformStencilAction(FramebufferRegs::StencilAction action, u8 old_stencil, u8 ref);
|
||||
|
||||
Math::Vec4<u8> EvaluateBlendEquation(const Math::Vec4<u8>& src, const Math::Vec4<u8>& srcfactor,
|
||||
const Math::Vec4<u8>& dest, const Math::Vec4<u8>& destfactor,
|
||||
FramebufferRegs::BlendEquation equation);
|
||||
|
||||
u8 LogicOp(u8 src, u8 dest, FramebufferRegs::LogicOp op);
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,308 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/math_util.h"
|
||||
#include "video_core/swrasterizer/lighting.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
static float LookupLightingLut(const Pica::State::Lighting& lighting, size_t lut_index, u8 index,
|
||||
float delta) {
|
||||
ASSERT_MSG(lut_index < lighting.luts.size(), "Out of range lut");
|
||||
ASSERT_MSG(index < lighting.luts[lut_index].size(), "Out of range index");
|
||||
|
||||
const auto& lut = lighting.luts[lut_index][index];
|
||||
|
||||
float lut_value = lut.ToFloat();
|
||||
float lut_diff = lut.DiffToFloat();
|
||||
|
||||
return lut_value + lut_diff * delta;
|
||||
}
|
||||
|
||||
std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors(
|
||||
const Pica::LightingRegs& lighting, const Pica::State::Lighting& lighting_state,
|
||||
const Math::Quaternion<float>& normquat, const Math::Vec3<float>& view,
|
||||
const Math::Vec4<u8> (&texture_color)[4]) {
|
||||
|
||||
Math::Vec3<float> surface_normal;
|
||||
Math::Vec3<float> surface_tangent;
|
||||
|
||||
if (lighting.config0.bump_mode != LightingRegs::LightingBumpMode::None) {
|
||||
Math::Vec3<float> perturbation =
|
||||
texture_color[lighting.config0.bump_selector].xyz().Cast<float>() / 127.5f -
|
||||
Math::MakeVec(1.0f, 1.0f, 1.0f);
|
||||
if (lighting.config0.bump_mode == LightingRegs::LightingBumpMode::NormalMap) {
|
||||
if (!lighting.config0.disable_bump_renorm) {
|
||||
const float z_square = 1 - perturbation.xy().Length2();
|
||||
perturbation.z = std::sqrt(std::max(z_square, 0.0f));
|
||||
}
|
||||
surface_normal = perturbation;
|
||||
surface_tangent = Math::MakeVec(1.0f, 0.0f, 0.0f);
|
||||
} else if (lighting.config0.bump_mode == LightingRegs::LightingBumpMode::TangentMap) {
|
||||
surface_normal = Math::MakeVec(0.0f, 0.0f, 1.0f);
|
||||
surface_tangent = perturbation;
|
||||
} else {
|
||||
LOG_ERROR(HW_GPU, "Unknown bump mode %u", lighting.config0.bump_mode.Value());
|
||||
}
|
||||
} else {
|
||||
surface_normal = Math::MakeVec(0.0f, 0.0f, 1.0f);
|
||||
surface_tangent = Math::MakeVec(1.0f, 0.0f, 0.0f);
|
||||
}
|
||||
|
||||
// Use the normalized the quaternion when performing the rotation
|
||||
auto normal = Math::QuaternionRotate(normquat, surface_normal);
|
||||
auto tangent = Math::QuaternionRotate(normquat, surface_tangent);
|
||||
|
||||
Math::Vec4<float> diffuse_sum = {0.0f, 0.0f, 0.0f, 1.0f};
|
||||
Math::Vec4<float> specular_sum = {0.0f, 0.0f, 0.0f, 1.0f};
|
||||
|
||||
for (unsigned light_index = 0; light_index <= lighting.max_light_index; ++light_index) {
|
||||
unsigned num = lighting.light_enable.GetNum(light_index);
|
||||
const auto& light_config = lighting.light[num];
|
||||
|
||||
Math::Vec3<float> refl_value = {};
|
||||
Math::Vec3<float> position = {float16::FromRaw(light_config.x).ToFloat32(),
|
||||
float16::FromRaw(light_config.y).ToFloat32(),
|
||||
float16::FromRaw(light_config.z).ToFloat32()};
|
||||
Math::Vec3<float> light_vector;
|
||||
|
||||
if (light_config.config.directional)
|
||||
light_vector = position;
|
||||
else
|
||||
light_vector = position + view;
|
||||
|
||||
light_vector.Normalize();
|
||||
|
||||
Math::Vec3<float> norm_view = view.Normalized();
|
||||
Math::Vec3<float> half_vector = norm_view + light_vector;
|
||||
|
||||
float dist_atten = 1.0f;
|
||||
if (!lighting.IsDistAttenDisabled(num)) {
|
||||
auto distance = (-view - position).Length();
|
||||
float scale = Pica::float20::FromRaw(light_config.dist_atten_scale).ToFloat32();
|
||||
float bias = Pica::float20::FromRaw(light_config.dist_atten_bias).ToFloat32();
|
||||
size_t lut =
|
||||
static_cast<size_t>(LightingRegs::LightingSampler::DistanceAttenuation) + num;
|
||||
|
||||
float sample_loc = MathUtil::Clamp(scale * distance + bias, 0.0f, 1.0f);
|
||||
|
||||
u8 lutindex =
|
||||
static_cast<u8>(MathUtil::Clamp(std::floor(sample_loc * 256.0f), 0.0f, 255.0f));
|
||||
float delta = sample_loc * 256 - lutindex;
|
||||
dist_atten = LookupLightingLut(lighting_state, lut, lutindex, delta);
|
||||
}
|
||||
|
||||
auto GetLutValue = [&](LightingRegs::LightingLutInput input, bool abs,
|
||||
LightingRegs::LightingScale scale_enum,
|
||||
LightingRegs::LightingSampler sampler) {
|
||||
float result = 0.0f;
|
||||
|
||||
switch (input) {
|
||||
case LightingRegs::LightingLutInput::NH:
|
||||
result = Math::Dot(normal, half_vector.Normalized());
|
||||
break;
|
||||
|
||||
case LightingRegs::LightingLutInput::VH:
|
||||
result = Math::Dot(norm_view, half_vector.Normalized());
|
||||
break;
|
||||
|
||||
case LightingRegs::LightingLutInput::NV:
|
||||
result = Math::Dot(normal, norm_view);
|
||||
break;
|
||||
|
||||
case LightingRegs::LightingLutInput::LN:
|
||||
result = Math::Dot(light_vector, normal);
|
||||
break;
|
||||
|
||||
case LightingRegs::LightingLutInput::SP: {
|
||||
Math::Vec3<s32> spot_dir{light_config.spot_x.Value(), light_config.spot_y.Value(),
|
||||
light_config.spot_z.Value()};
|
||||
result = Math::Dot(light_vector, spot_dir.Cast<float>() / 2047.0f);
|
||||
break;
|
||||
}
|
||||
case LightingRegs::LightingLutInput::CP:
|
||||
if (lighting.config0.config == LightingRegs::LightingConfig::Config7) {
|
||||
const Math::Vec3<float> norm_half_vector = half_vector.Normalized();
|
||||
const Math::Vec3<float> half_vector_proj =
|
||||
norm_half_vector - normal * Math::Dot(normal, norm_half_vector);
|
||||
result = Math::Dot(half_vector_proj, tangent);
|
||||
} else {
|
||||
result = 0.0f;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown lighting LUT input %u\n", static_cast<u32>(input));
|
||||
UNIMPLEMENTED();
|
||||
result = 0.0f;
|
||||
}
|
||||
|
||||
u8 index;
|
||||
float delta;
|
||||
|
||||
if (abs) {
|
||||
if (light_config.config.two_sided_diffuse)
|
||||
result = std::abs(result);
|
||||
else
|
||||
result = std::max(result, 0.0f);
|
||||
|
||||
float flr = std::floor(result * 256.0f);
|
||||
index = static_cast<u8>(MathUtil::Clamp(flr, 0.0f, 255.0f));
|
||||
delta = result * 256 - index;
|
||||
} else {
|
||||
float flr = std::floor(result * 128.0f);
|
||||
s8 signed_index = static_cast<s8>(MathUtil::Clamp(flr, -128.0f, 127.0f));
|
||||
delta = result * 128.0f - signed_index;
|
||||
index = static_cast<u8>(signed_index);
|
||||
}
|
||||
|
||||
float scale = lighting.lut_scale.GetScale(scale_enum);
|
||||
return scale *
|
||||
LookupLightingLut(lighting_state, static_cast<size_t>(sampler), index, delta);
|
||||
};
|
||||
|
||||
// If enabled, compute spot light attenuation value
|
||||
float spot_atten = 1.0f;
|
||||
if (!lighting.IsSpotAttenDisabled(num) &&
|
||||
LightingRegs::IsLightingSamplerSupported(
|
||||
lighting.config0.config, LightingRegs::LightingSampler::SpotlightAttenuation)) {
|
||||
auto lut = LightingRegs::SpotlightAttenuationSampler(num);
|
||||
spot_atten = GetLutValue(lighting.lut_input.sp, lighting.abs_lut_input.disable_sp == 0,
|
||||
lighting.lut_scale.sp, lut);
|
||||
}
|
||||
|
||||
// Specular 0 component
|
||||
float d0_lut_value = 1.0f;
|
||||
if (lighting.config1.disable_lut_d0 == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(
|
||||
lighting.config0.config, LightingRegs::LightingSampler::Distribution0)) {
|
||||
d0_lut_value =
|
||||
GetLutValue(lighting.lut_input.d0, lighting.abs_lut_input.disable_d0 == 0,
|
||||
lighting.lut_scale.d0, LightingRegs::LightingSampler::Distribution0);
|
||||
}
|
||||
|
||||
Math::Vec3<float> specular_0 = d0_lut_value * light_config.specular_0.ToVec3f();
|
||||
|
||||
// If enabled, lookup ReflectRed value, otherwise, 1.0 is used
|
||||
if (lighting.config1.disable_lut_rr == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
|
||||
LightingRegs::LightingSampler::ReflectRed)) {
|
||||
refl_value.x =
|
||||
GetLutValue(lighting.lut_input.rr, lighting.abs_lut_input.disable_rr == 0,
|
||||
lighting.lut_scale.rr, LightingRegs::LightingSampler::ReflectRed);
|
||||
} else {
|
||||
refl_value.x = 1.0f;
|
||||
}
|
||||
|
||||
// If enabled, lookup ReflectGreen value, otherwise, ReflectRed value is used
|
||||
if (lighting.config1.disable_lut_rg == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
|
||||
LightingRegs::LightingSampler::ReflectGreen)) {
|
||||
refl_value.y =
|
||||
GetLutValue(lighting.lut_input.rg, lighting.abs_lut_input.disable_rg == 0,
|
||||
lighting.lut_scale.rg, LightingRegs::LightingSampler::ReflectGreen);
|
||||
} else {
|
||||
refl_value.y = refl_value.x;
|
||||
}
|
||||
|
||||
// If enabled, lookup ReflectBlue value, otherwise, ReflectRed value is used
|
||||
if (lighting.config1.disable_lut_rb == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
|
||||
LightingRegs::LightingSampler::ReflectBlue)) {
|
||||
refl_value.z =
|
||||
GetLutValue(lighting.lut_input.rb, lighting.abs_lut_input.disable_rb == 0,
|
||||
lighting.lut_scale.rb, LightingRegs::LightingSampler::ReflectBlue);
|
||||
} else {
|
||||
refl_value.z = refl_value.x;
|
||||
}
|
||||
|
||||
// Specular 1 component
|
||||
float d1_lut_value = 1.0f;
|
||||
if (lighting.config1.disable_lut_d1 == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(
|
||||
lighting.config0.config, LightingRegs::LightingSampler::Distribution1)) {
|
||||
d1_lut_value =
|
||||
GetLutValue(lighting.lut_input.d1, lighting.abs_lut_input.disable_d1 == 0,
|
||||
lighting.lut_scale.d1, LightingRegs::LightingSampler::Distribution1);
|
||||
}
|
||||
|
||||
Math::Vec3<float> specular_1 =
|
||||
d1_lut_value * refl_value * light_config.specular_1.ToVec3f();
|
||||
|
||||
// Fresnel
|
||||
// Note: only the last entry in the light slots applies the Fresnel factor
|
||||
if (light_index == lighting.max_light_index && lighting.config1.disable_lut_fr == 0 &&
|
||||
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
|
||||
LightingRegs::LightingSampler::Fresnel)) {
|
||||
|
||||
float lut_value =
|
||||
GetLutValue(lighting.lut_input.fr, lighting.abs_lut_input.disable_fr == 0,
|
||||
lighting.lut_scale.fr, LightingRegs::LightingSampler::Fresnel);
|
||||
|
||||
// Enabled for diffuse lighting alpha component
|
||||
if (lighting.config0.fresnel_selector ==
|
||||
LightingRegs::LightingFresnelSelector::PrimaryAlpha ||
|
||||
lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
|
||||
diffuse_sum.a() = lut_value;
|
||||
}
|
||||
|
||||
// Enabled for the specular lighting alpha component
|
||||
if (lighting.config0.fresnel_selector ==
|
||||
LightingRegs::LightingFresnelSelector::SecondaryAlpha ||
|
||||
lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
|
||||
specular_sum.a() = lut_value;
|
||||
}
|
||||
}
|
||||
|
||||
auto dot_product = Math::Dot(light_vector, normal);
|
||||
|
||||
// Calculate clamp highlights before applying the two-sided diffuse configuration to the dot
|
||||
// product.
|
||||
float clamp_highlights = 1.0f;
|
||||
if (lighting.config0.clamp_highlights) {
|
||||
if (dot_product <= 0.0f)
|
||||
clamp_highlights = 0.0f;
|
||||
else
|
||||
clamp_highlights = 1.0f;
|
||||
}
|
||||
|
||||
if (light_config.config.two_sided_diffuse)
|
||||
dot_product = std::abs(dot_product);
|
||||
else
|
||||
dot_product = std::max(dot_product, 0.0f);
|
||||
|
||||
if (light_config.config.geometric_factor_0 || light_config.config.geometric_factor_1) {
|
||||
float geo_factor = half_vector.Length2();
|
||||
geo_factor = geo_factor == 0.0f ? 0.0f : std::min(dot_product / geo_factor, 1.0f);
|
||||
if (light_config.config.geometric_factor_0) {
|
||||
specular_0 *= geo_factor;
|
||||
}
|
||||
if (light_config.config.geometric_factor_1) {
|
||||
specular_1 *= geo_factor;
|
||||
}
|
||||
}
|
||||
|
||||
auto diffuse =
|
||||
light_config.diffuse.ToVec3f() * dot_product + light_config.ambient.ToVec3f();
|
||||
diffuse_sum += Math::MakeVec(diffuse * dist_atten * spot_atten, 0.0f);
|
||||
|
||||
specular_sum += Math::MakeVec(
|
||||
(specular_0 + specular_1) * clamp_highlights * dist_atten * spot_atten, 0.0f);
|
||||
}
|
||||
|
||||
diffuse_sum += Math::MakeVec(lighting.global_ambient.ToVec3f(), 0.0f);
|
||||
|
||||
auto diffuse = Math::MakeVec<float>(MathUtil::Clamp(diffuse_sum.x, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(diffuse_sum.y, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(diffuse_sum.z, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(diffuse_sum.w, 0.0f, 1.0f) * 255)
|
||||
.Cast<u8>();
|
||||
auto specular = Math::MakeVec<float>(MathUtil::Clamp(specular_sum.x, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(specular_sum.y, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(specular_sum.z, 0.0f, 1.0f) * 255,
|
||||
MathUtil::Clamp(specular_sum.w, 0.0f, 1.0f) * 255)
|
||||
.Cast<u8>();
|
||||
return std::make_tuple(diffuse, specular);
|
||||
}
|
||||
|
||||
} // namespace Pica
|
|
@ -1,19 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <tuple>
|
||||
#include "common/quaternion.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_state.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors(
|
||||
const Pica::LightingRegs& lighting, const Pica::State::Lighting& lighting_state,
|
||||
const Math::Quaternion<float>& normquat, const Math::Vec3<float>& view,
|
||||
const Math::Vec4<u8> (&texture_color)[4]);
|
||||
|
||||
} // namespace Pica
|
|
@ -1,223 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <cmath>
|
||||
#include "common/math_util.h"
|
||||
#include "video_core/swrasterizer/proctex.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
using ProcTexClamp = TexturingRegs::ProcTexClamp;
|
||||
using ProcTexShift = TexturingRegs::ProcTexShift;
|
||||
using ProcTexCombiner = TexturingRegs::ProcTexCombiner;
|
||||
using ProcTexFilter = TexturingRegs::ProcTexFilter;
|
||||
|
||||
static float LookupLUT(const std::array<State::ProcTex::ValueEntry, 128>& lut, float coord) {
|
||||
// For NoiseLUT/ColorMap/AlphaMap, coord=0.0 is lut[0], coord=127.0/128.0 is lut[127] and
|
||||
// coord=1.0 is lut[127]+lut_diff[127]. For other indices, the result is interpolated using
|
||||
// value entries and difference entries.
|
||||
coord *= 128;
|
||||
const int index_int = std::min(static_cast<int>(coord), 127);
|
||||
const float frac = coord - index_int;
|
||||
return lut[index_int].ToFloat() + frac * lut[index_int].DiffToFloat();
|
||||
}
|
||||
|
||||
// These function are used to generate random noise for procedural texture. Their results are
|
||||
// verified against real hardware, but it's not known if the algorithm is the same as hardware.
|
||||
static unsigned int NoiseRand1D(unsigned int v) {
|
||||
static constexpr std::array<unsigned int, 16> table{
|
||||
{0, 4, 10, 8, 4, 9, 7, 12, 5, 15, 13, 14, 11, 15, 2, 11}};
|
||||
return ((v % 9 + 2) * 3 & 0xF) ^ table[(v / 9) & 0xF];
|
||||
}
|
||||
|
||||
static float NoiseRand2D(unsigned int x, unsigned int y) {
|
||||
static constexpr std::array<unsigned int, 16> table{
|
||||
{10, 2, 15, 8, 0, 7, 4, 5, 5, 13, 2, 6, 13, 9, 3, 14}};
|
||||
unsigned int u2 = NoiseRand1D(x);
|
||||
unsigned int v2 = NoiseRand1D(y);
|
||||
v2 += ((u2 & 3) == 1) ? 4 : 0;
|
||||
v2 ^= (u2 & 1) * 6;
|
||||
v2 += 10 + u2;
|
||||
v2 &= 0xF;
|
||||
v2 ^= table[u2];
|
||||
return -1.0f + v2 * 2.0f / 15.0f;
|
||||
}
|
||||
|
||||
static float NoiseCoef(float u, float v, TexturingRegs regs, State::ProcTex state) {
|
||||
const float freq_u = float16::FromRaw(regs.proctex_noise_frequency.u).ToFloat32();
|
||||
const float freq_v = float16::FromRaw(regs.proctex_noise_frequency.v).ToFloat32();
|
||||
const float phase_u = float16::FromRaw(regs.proctex_noise_u.phase).ToFloat32();
|
||||
const float phase_v = float16::FromRaw(regs.proctex_noise_v.phase).ToFloat32();
|
||||
const float x = 9 * freq_u * std::abs(u + phase_u);
|
||||
const float y = 9 * freq_v * std::abs(v + phase_v);
|
||||
const int x_int = static_cast<int>(x);
|
||||
const int y_int = static_cast<int>(y);
|
||||
const float x_frac = x - x_int;
|
||||
const float y_frac = y - y_int;
|
||||
|
||||
const float g0 = NoiseRand2D(x_int, y_int) * (x_frac + y_frac);
|
||||
const float g1 = NoiseRand2D(x_int + 1, y_int) * (x_frac + y_frac - 1);
|
||||
const float g2 = NoiseRand2D(x_int, y_int + 1) * (x_frac + y_frac - 1);
|
||||
const float g3 = NoiseRand2D(x_int + 1, y_int + 1) * (x_frac + y_frac - 2);
|
||||
const float x_noise = LookupLUT(state.noise_table, x_frac);
|
||||
const float y_noise = LookupLUT(state.noise_table, y_frac);
|
||||
return Math::BilinearInterp(g0, g1, g2, g3, x_noise, y_noise);
|
||||
}
|
||||
|
||||
static float GetShiftOffset(float v, ProcTexShift mode, ProcTexClamp clamp_mode) {
|
||||
const float offset = (clamp_mode == ProcTexClamp::MirroredRepeat) ? 1 : 0.5f;
|
||||
switch (mode) {
|
||||
case ProcTexShift::None:
|
||||
return 0;
|
||||
case ProcTexShift::Odd:
|
||||
return offset * (((int)v / 2) % 2);
|
||||
case ProcTexShift::Even:
|
||||
return offset * ((((int)v + 1) / 2) % 2);
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown shift mode %u", static_cast<u32>(mode));
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
static void ClampCoord(float& coord, ProcTexClamp mode) {
|
||||
switch (mode) {
|
||||
case ProcTexClamp::ToZero:
|
||||
if (coord > 1.0f)
|
||||
coord = 0.0f;
|
||||
break;
|
||||
case ProcTexClamp::ToEdge:
|
||||
coord = std::min(coord, 1.0f);
|
||||
break;
|
||||
case ProcTexClamp::SymmetricalRepeat:
|
||||
coord = coord - std::floor(coord);
|
||||
break;
|
||||
case ProcTexClamp::MirroredRepeat: {
|
||||
int integer = static_cast<int>(coord);
|
||||
float frac = coord - integer;
|
||||
coord = (integer % 2) == 0 ? frac : (1.0f - frac);
|
||||
break;
|
||||
}
|
||||
case ProcTexClamp::Pulse:
|
||||
if (coord <= 0.5f)
|
||||
coord = 0.0f;
|
||||
else
|
||||
coord = 1.0f;
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown clamp mode %u", static_cast<u32>(mode));
|
||||
coord = std::min(coord, 1.0f);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
float CombineAndMap(float u, float v, ProcTexCombiner combiner,
|
||||
const std::array<State::ProcTex::ValueEntry, 128>& map_table) {
|
||||
float f;
|
||||
switch (combiner) {
|
||||
case ProcTexCombiner::U:
|
||||
f = u;
|
||||
break;
|
||||
case ProcTexCombiner::U2:
|
||||
f = u * u;
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::V:
|
||||
f = v;
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::V2:
|
||||
f = v * v;
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::Add:
|
||||
f = (u + v) * 0.5f;
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::Add2:
|
||||
f = (u * u + v * v) * 0.5f;
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::SqrtAdd2:
|
||||
f = std::min(std::sqrt(u * u + v * v), 1.0f);
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::Min:
|
||||
f = std::min(u, v);
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::Max:
|
||||
f = std::max(u, v);
|
||||
break;
|
||||
case TexturingRegs::ProcTexCombiner::RMax:
|
||||
f = std::min(((u + v) * 0.5f + std::sqrt(u * u + v * v)) * 0.5f, 1.0f);
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown combiner %u", static_cast<u32>(combiner));
|
||||
f = 0.0f;
|
||||
break;
|
||||
}
|
||||
return LookupLUT(map_table, f);
|
||||
}
|
||||
|
||||
Math::Vec4<u8> ProcTex(float u, float v, TexturingRegs regs, State::ProcTex state) {
|
||||
u = std::abs(u);
|
||||
v = std::abs(v);
|
||||
|
||||
// Get shift offset before noise generation
|
||||
const float u_shift = GetShiftOffset(v, regs.proctex.u_shift, regs.proctex.u_clamp);
|
||||
const float v_shift = GetShiftOffset(u, regs.proctex.v_shift, regs.proctex.v_clamp);
|
||||
|
||||
// Generate noise
|
||||
if (regs.proctex.noise_enable) {
|
||||
float noise = NoiseCoef(u, v, regs, state);
|
||||
u += noise * regs.proctex_noise_u.amplitude / 4095.0f;
|
||||
v += noise * regs.proctex_noise_v.amplitude / 4095.0f;
|
||||
u = std::abs(u);
|
||||
v = std::abs(v);
|
||||
}
|
||||
|
||||
// Shift
|
||||
u += u_shift;
|
||||
v += v_shift;
|
||||
|
||||
// Clamp
|
||||
ClampCoord(u, regs.proctex.u_clamp);
|
||||
ClampCoord(v, regs.proctex.v_clamp);
|
||||
|
||||
// Combine and map
|
||||
const float lut_coord = CombineAndMap(u, v, regs.proctex.color_combiner, state.color_map_table);
|
||||
|
||||
// Look up the color
|
||||
// For the color lut, coord=0.0 is lut[offset] and coord=1.0 is lut[offset+width-1]
|
||||
const u32 offset = regs.proctex_lut_offset;
|
||||
const u32 width = regs.proctex_lut.width;
|
||||
const float index = offset + (lut_coord * (width - 1));
|
||||
Math::Vec4<u8> final_color;
|
||||
// TODO(wwylele): implement mipmap
|
||||
switch (regs.proctex_lut.filter) {
|
||||
case ProcTexFilter::Linear:
|
||||
case ProcTexFilter::LinearMipmapLinear:
|
||||
case ProcTexFilter::LinearMipmapNearest: {
|
||||
const int index_int = static_cast<int>(index);
|
||||
const float frac = index - index_int;
|
||||
const auto color_value = state.color_table[index_int].ToVector().Cast<float>();
|
||||
const auto color_diff = state.color_diff_table[index_int].ToVector().Cast<float>();
|
||||
final_color = (color_value + frac * color_diff).Cast<u8>();
|
||||
break;
|
||||
}
|
||||
case ProcTexFilter::Nearest:
|
||||
case ProcTexFilter::NearestMipmapLinear:
|
||||
case ProcTexFilter::NearestMipmapNearest:
|
||||
final_color = state.color_table[static_cast<int>(std::round(index))].ToVector();
|
||||
break;
|
||||
}
|
||||
|
||||
if (regs.proctex.separate_alpha) {
|
||||
// Note: in separate alpha mode, the alpha channel skips the color LUT look up stage. It
|
||||
// uses the output of CombineAndMap directly instead.
|
||||
const float final_alpha =
|
||||
CombineAndMap(u, v, regs.proctex.alpha_combiner, state.alpha_map_table);
|
||||
return Math::MakeVec<u8>(final_color.rgb(), static_cast<u8>(final_alpha * 255));
|
||||
} else {
|
||||
return final_color;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/pica_state.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
/// Generates procedural texture color for the given coordinates
|
||||
Math::Vec4<u8> ProcTex(float u, float v, TexturingRegs regs, State::ProcTex state);
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,853 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cmath>
|
||||
#include <tuple>
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/quaternion.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hw/gpu.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/regs_framebuffer.h"
|
||||
#include "video_core/regs_rasterizer.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/swrasterizer/framebuffer.h"
|
||||
#include "video_core/swrasterizer/lighting.h"
|
||||
#include "video_core/swrasterizer/proctex.h"
|
||||
#include "video_core/swrasterizer/rasterizer.h"
|
||||
#include "video_core/swrasterizer/texturing.h"
|
||||
#include "video_core/texture/texture_decode.h"
|
||||
#include "video_core/utils.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
// NOTE: Assuming that rasterizer coordinates are 12.4 fixed-point values
|
||||
struct Fix12P4 {
|
||||
Fix12P4() {}
|
||||
Fix12P4(u16 val) : val(val) {}
|
||||
|
||||
static u16 FracMask() {
|
||||
return 0xF;
|
||||
}
|
||||
static u16 IntMask() {
|
||||
return (u16)~0xF;
|
||||
}
|
||||
|
||||
operator u16() const {
|
||||
return val;
|
||||
}
|
||||
|
||||
bool operator<(const Fix12P4& oth) const {
|
||||
return (u16) * this < (u16)oth;
|
||||
}
|
||||
|
||||
private:
|
||||
u16 val;
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculate signed area of the triangle spanned by the three argument vertices.
|
||||
* The sign denotes an orientation.
|
||||
*
|
||||
* @todo define orientation concretely.
|
||||
*/
|
||||
static int SignedArea(const Math::Vec2<Fix12P4>& vtx1, const Math::Vec2<Fix12P4>& vtx2,
|
||||
const Math::Vec2<Fix12P4>& vtx3) {
|
||||
const auto vec1 = Math::MakeVec(vtx2 - vtx1, 0);
|
||||
const auto vec2 = Math::MakeVec(vtx3 - vtx1, 0);
|
||||
// TODO: There is a very small chance this will overflow for sizeof(int) == 4
|
||||
return Math::Cross(vec1, vec2).z;
|
||||
};
|
||||
|
||||
/// Convert a 3D vector for cube map coordinates to 2D texture coordinates along with the face name
|
||||
static std::tuple<float24, float24, PAddr> ConvertCubeCoord(float24 u, float24 v, float24 w,
|
||||
const TexturingRegs& regs) {
|
||||
const float abs_u = std::abs(u.ToFloat32());
|
||||
const float abs_v = std::abs(v.ToFloat32());
|
||||
const float abs_w = std::abs(w.ToFloat32());
|
||||
float24 x, y, z;
|
||||
PAddr addr;
|
||||
if (abs_u > abs_v && abs_u > abs_w) {
|
||||
if (u > float24::FromFloat32(0)) {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveX);
|
||||
y = -v;
|
||||
} else {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeX);
|
||||
y = v;
|
||||
}
|
||||
x = -w;
|
||||
z = u;
|
||||
} else if (abs_v > abs_w) {
|
||||
if (v > float24::FromFloat32(0)) {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveY);
|
||||
x = u;
|
||||
} else {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeY);
|
||||
x = -u;
|
||||
}
|
||||
y = w;
|
||||
z = v;
|
||||
} else {
|
||||
if (w > float24::FromFloat32(0)) {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveZ);
|
||||
y = -v;
|
||||
} else {
|
||||
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeZ);
|
||||
y = v;
|
||||
}
|
||||
x = u;
|
||||
z = w;
|
||||
}
|
||||
const float24 half = float24::FromFloat32(0.5f);
|
||||
return std::make_tuple(x / z * half + half, y / z * half + half, addr);
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_Rasterization, "GPU", "Rasterization", MP_RGB(50, 50, 240));
|
||||
|
||||
/**
|
||||
* Helper function for ProcessTriangle with the "reversed" flag to allow for implementing
|
||||
* culling via recursion.
|
||||
*/
|
||||
static void ProcessTriangleInternal(const Vertex& v0, const Vertex& v1, const Vertex& v2,
|
||||
bool reversed = false) {
|
||||
const auto& regs = g_state.regs;
|
||||
MICROPROFILE_SCOPE(GPU_Rasterization);
|
||||
|
||||
// vertex positions in rasterizer coordinates
|
||||
static auto FloatToFix = [](float24 flt) {
|
||||
// TODO: Rounding here is necessary to prevent garbage pixels at
|
||||
// triangle borders. Is it that the correct solution, though?
|
||||
return Fix12P4(static_cast<unsigned short>(round(flt.ToFloat32() * 16.0f)));
|
||||
};
|
||||
static auto ScreenToRasterizerCoordinates = [](const Math::Vec3<float24>& vec) {
|
||||
return Math::Vec3<Fix12P4>{FloatToFix(vec.x), FloatToFix(vec.y), FloatToFix(vec.z)};
|
||||
};
|
||||
|
||||
Math::Vec3<Fix12P4> vtxpos[3]{ScreenToRasterizerCoordinates(v0.screenpos),
|
||||
ScreenToRasterizerCoordinates(v1.screenpos),
|
||||
ScreenToRasterizerCoordinates(v2.screenpos)};
|
||||
|
||||
if (regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepAll) {
|
||||
// Make sure we always end up with a triangle wound counter-clockwise
|
||||
if (!reversed && SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0) {
|
||||
ProcessTriangleInternal(v0, v2, v1, true);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (!reversed && regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepClockWise) {
|
||||
// Reverse vertex order and use the CCW code path.
|
||||
ProcessTriangleInternal(v0, v2, v1, true);
|
||||
return;
|
||||
}
|
||||
|
||||
// Cull away triangles which are wound clockwise.
|
||||
if (SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0)
|
||||
return;
|
||||
}
|
||||
|
||||
u16 min_x = std::min({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
|
||||
u16 min_y = std::min({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
|
||||
u16 max_x = std::max({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
|
||||
u16 max_y = std::max({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
|
||||
|
||||
// Convert the scissor box coordinates to 12.4 fixed point
|
||||
u16 scissor_x1 = (u16)(regs.rasterizer.scissor_test.x1 << 4);
|
||||
u16 scissor_y1 = (u16)(regs.rasterizer.scissor_test.y1 << 4);
|
||||
// x2,y2 have +1 added to cover the entire sub-pixel area
|
||||
u16 scissor_x2 = (u16)((regs.rasterizer.scissor_test.x2 + 1) << 4);
|
||||
u16 scissor_y2 = (u16)((regs.rasterizer.scissor_test.y2 + 1) << 4);
|
||||
|
||||
if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Include) {
|
||||
// Calculate the new bounds
|
||||
min_x = std::max(min_x, scissor_x1);
|
||||
min_y = std::max(min_y, scissor_y1);
|
||||
max_x = std::min(max_x, scissor_x2);
|
||||
max_y = std::min(max_y, scissor_y2);
|
||||
}
|
||||
|
||||
min_x &= Fix12P4::IntMask();
|
||||
min_y &= Fix12P4::IntMask();
|
||||
max_x = ((max_x + Fix12P4::FracMask()) & Fix12P4::IntMask());
|
||||
max_y = ((max_y + Fix12P4::FracMask()) & Fix12P4::IntMask());
|
||||
|
||||
// Triangle filling rules: Pixels on the right-sided edge or on flat bottom edges are not
|
||||
// drawn. Pixels on any other triangle border are drawn. This is implemented with three bias
|
||||
// values which are added to the barycentric coordinates w0, w1 and w2, respectively.
|
||||
// NOTE: These are the PSP filling rules. Not sure if the 3DS uses the same ones...
|
||||
auto IsRightSideOrFlatBottomEdge = [](const Math::Vec2<Fix12P4>& vtx,
|
||||
const Math::Vec2<Fix12P4>& line1,
|
||||
const Math::Vec2<Fix12P4>& line2) {
|
||||
if (line1.y == line2.y) {
|
||||
// just check if vertex is above us => bottom line parallel to x-axis
|
||||
return vtx.y < line1.y;
|
||||
} else {
|
||||
// check if vertex is on our left => right side
|
||||
// TODO: Not sure how likely this is to overflow
|
||||
return (int)vtx.x < (int)line1.x +
|
||||
((int)line2.x - (int)line1.x) * ((int)vtx.y - (int)line1.y) /
|
||||
((int)line2.y - (int)line1.y);
|
||||
}
|
||||
};
|
||||
int bias0 =
|
||||
IsRightSideOrFlatBottomEdge(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) ? -1 : 0;
|
||||
int bias1 =
|
||||
IsRightSideOrFlatBottomEdge(vtxpos[1].xy(), vtxpos[2].xy(), vtxpos[0].xy()) ? -1 : 0;
|
||||
int bias2 =
|
||||
IsRightSideOrFlatBottomEdge(vtxpos[2].xy(), vtxpos[0].xy(), vtxpos[1].xy()) ? -1 : 0;
|
||||
|
||||
auto w_inverse = Math::MakeVec(v0.pos.w, v1.pos.w, v2.pos.w);
|
||||
|
||||
auto textures = regs.texturing.GetTextures();
|
||||
auto tev_stages = regs.texturing.GetTevStages();
|
||||
|
||||
bool stencil_action_enable =
|
||||
g_state.regs.framebuffer.output_merger.stencil_test.enable &&
|
||||
g_state.regs.framebuffer.framebuffer.depth_format == FramebufferRegs::DepthFormat::D24S8;
|
||||
const auto stencil_test = g_state.regs.framebuffer.output_merger.stencil_test;
|
||||
|
||||
// Enter rasterization loop, starting at the center of the topleft bounding box corner.
|
||||
// TODO: Not sure if looping through x first might be faster
|
||||
for (u16 y = min_y + 8; y < max_y; y += 0x10) {
|
||||
for (u16 x = min_x + 8; x < max_x; x += 0x10) {
|
||||
|
||||
// Do not process the pixel if it's inside the scissor box and the scissor mode is set
|
||||
// to Exclude
|
||||
if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Exclude) {
|
||||
if (x >= scissor_x1 && x < scissor_x2 && y >= scissor_y1 && y < scissor_y2)
|
||||
continue;
|
||||
}
|
||||
|
||||
// Calculate the barycentric coordinates w0, w1 and w2
|
||||
int w0 = bias0 + SignedArea(vtxpos[1].xy(), vtxpos[2].xy(), {x, y});
|
||||
int w1 = bias1 + SignedArea(vtxpos[2].xy(), vtxpos[0].xy(), {x, y});
|
||||
int w2 = bias2 + SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), {x, y});
|
||||
int wsum = w0 + w1 + w2;
|
||||
|
||||
// If current pixel is not covered by the current primitive
|
||||
if (w0 < 0 || w1 < 0 || w2 < 0)
|
||||
continue;
|
||||
|
||||
auto baricentric_coordinates =
|
||||
Math::MakeVec(float24::FromFloat32(static_cast<float>(w0)),
|
||||
float24::FromFloat32(static_cast<float>(w1)),
|
||||
float24::FromFloat32(static_cast<float>(w2)));
|
||||
float24 interpolated_w_inverse =
|
||||
float24::FromFloat32(1.0f) / Math::Dot(w_inverse, baricentric_coordinates);
|
||||
|
||||
// interpolated_z = z / w
|
||||
float interpolated_z_over_w =
|
||||
(v0.screenpos[2].ToFloat32() * w0 + v1.screenpos[2].ToFloat32() * w1 +
|
||||
v2.screenpos[2].ToFloat32() * w2) /
|
||||
wsum;
|
||||
|
||||
// Not fully accurate. About 3 bits in precision are missing.
|
||||
// Z-Buffer (z / w * scale + offset)
|
||||
float depth_scale = float24::FromRaw(regs.rasterizer.viewport_depth_range).ToFloat32();
|
||||
float depth_offset =
|
||||
float24::FromRaw(regs.rasterizer.viewport_depth_near_plane).ToFloat32();
|
||||
float depth = interpolated_z_over_w * depth_scale + depth_offset;
|
||||
|
||||
// Potentially switch to W-Buffer
|
||||
if (regs.rasterizer.depthmap_enable ==
|
||||
Pica::RasterizerRegs::DepthBuffering::WBuffering) {
|
||||
// W-Buffer (z * scale + w * offset = (z / w * scale + offset) * w)
|
||||
depth *= interpolated_w_inverse.ToFloat32() * wsum;
|
||||
}
|
||||
|
||||
// Clamp the result
|
||||
depth = MathUtil::Clamp(depth, 0.0f, 1.0f);
|
||||
|
||||
// Perspective correct attribute interpolation:
|
||||
// Attribute values cannot be calculated by simple linear interpolation since
|
||||
// they are not linear in screen space. For example, when interpolating a
|
||||
// texture coordinate across two vertices, something simple like
|
||||
// u = (u0*w0 + u1*w1)/(w0+w1)
|
||||
// will not work. However, the attribute value divided by the
|
||||
// clipspace w-coordinate (u/w) and and the inverse w-coordinate (1/w) are linear
|
||||
// in screenspace. Hence, we can linearly interpolate these two independently and
|
||||
// calculate the interpolated attribute by dividing the results.
|
||||
// I.e.
|
||||
// u_over_w = ((u0/v0.pos.w)*w0 + (u1/v1.pos.w)*w1)/(w0+w1)
|
||||
// one_over_w = (( 1/v0.pos.w)*w0 + ( 1/v1.pos.w)*w1)/(w0+w1)
|
||||
// u = u_over_w / one_over_w
|
||||
//
|
||||
// The generalization to three vertices is straightforward in baricentric coordinates.
|
||||
auto GetInterpolatedAttribute = [&](float24 attr0, float24 attr1, float24 attr2) {
|
||||
auto attr_over_w = Math::MakeVec(attr0, attr1, attr2);
|
||||
float24 interpolated_attr_over_w = Math::Dot(attr_over_w, baricentric_coordinates);
|
||||
return interpolated_attr_over_w * interpolated_w_inverse;
|
||||
};
|
||||
|
||||
Math::Vec4<u8> primary_color{
|
||||
(u8)(
|
||||
GetInterpolatedAttribute(v0.color.r(), v1.color.r(), v2.color.r()).ToFloat32() *
|
||||
255),
|
||||
(u8)(
|
||||
GetInterpolatedAttribute(v0.color.g(), v1.color.g(), v2.color.g()).ToFloat32() *
|
||||
255),
|
||||
(u8)(
|
||||
GetInterpolatedAttribute(v0.color.b(), v1.color.b(), v2.color.b()).ToFloat32() *
|
||||
255),
|
||||
(u8)(
|
||||
GetInterpolatedAttribute(v0.color.a(), v1.color.a(), v2.color.a()).ToFloat32() *
|
||||
255),
|
||||
};
|
||||
|
||||
Math::Vec2<float24> uv[3];
|
||||
uv[0].u() = GetInterpolatedAttribute(v0.tc0.u(), v1.tc0.u(), v2.tc0.u());
|
||||
uv[0].v() = GetInterpolatedAttribute(v0.tc0.v(), v1.tc0.v(), v2.tc0.v());
|
||||
uv[1].u() = GetInterpolatedAttribute(v0.tc1.u(), v1.tc1.u(), v2.tc1.u());
|
||||
uv[1].v() = GetInterpolatedAttribute(v0.tc1.v(), v1.tc1.v(), v2.tc1.v());
|
||||
uv[2].u() = GetInterpolatedAttribute(v0.tc2.u(), v1.tc2.u(), v2.tc2.u());
|
||||
uv[2].v() = GetInterpolatedAttribute(v0.tc2.v(), v1.tc2.v(), v2.tc2.v());
|
||||
|
||||
Math::Vec4<u8> texture_color[4]{};
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto& texture = textures[i];
|
||||
if (!texture.enabled)
|
||||
continue;
|
||||
|
||||
DEBUG_ASSERT(0 != texture.config.address);
|
||||
|
||||
int coordinate_i =
|
||||
(i == 2 && regs.texturing.main_config.texture2_use_coord1) ? 1 : i;
|
||||
float24 u = uv[coordinate_i].u();
|
||||
float24 v = uv[coordinate_i].v();
|
||||
|
||||
// Only unit 0 respects the texturing type (according to 3DBrew)
|
||||
// TODO: Refactor so cubemaps and shadowmaps can be handled
|
||||
PAddr texture_address = texture.config.GetPhysicalAddress();
|
||||
if (i == 0) {
|
||||
switch (texture.config.type) {
|
||||
case TexturingRegs::TextureConfig::Texture2D:
|
||||
break;
|
||||
case TexturingRegs::TextureConfig::TextureCube: {
|
||||
auto w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
|
||||
std::tie(u, v, texture_address) = ConvertCubeCoord(u, v, w, regs.texturing);
|
||||
break;
|
||||
}
|
||||
case TexturingRegs::TextureConfig::Projection2D: {
|
||||
auto tc0_w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
|
||||
u /= tc0_w;
|
||||
v /= tc0_w;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
// TODO: Change to LOG_ERROR when more types are handled.
|
||||
LOG_DEBUG(HW_GPU, "Unhandled texture type %x", (int)texture.config.type);
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int s = (int)(u * float24::FromFloat32(static_cast<float>(texture.config.width)))
|
||||
.ToFloat32();
|
||||
int t = (int)(v * float24::FromFloat32(static_cast<float>(texture.config.height)))
|
||||
.ToFloat32();
|
||||
|
||||
bool use_border_s = false;
|
||||
bool use_border_t = false;
|
||||
|
||||
if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder) {
|
||||
use_border_s = s < 0 || s >= static_cast<int>(texture.config.width);
|
||||
} else if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder2) {
|
||||
use_border_s = s >= static_cast<int>(texture.config.width);
|
||||
}
|
||||
|
||||
if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder) {
|
||||
use_border_t = t < 0 || t >= static_cast<int>(texture.config.height);
|
||||
} else if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder2) {
|
||||
use_border_t = t >= static_cast<int>(texture.config.height);
|
||||
}
|
||||
|
||||
if (use_border_s || use_border_t) {
|
||||
auto border_color = texture.config.border_color;
|
||||
texture_color[i] = {border_color.r, border_color.g, border_color.b,
|
||||
border_color.a};
|
||||
} else {
|
||||
// Textures are laid out from bottom to top, hence we invert the t coordinate.
|
||||
// NOTE: This may not be the right place for the inversion.
|
||||
// TODO: Check if this applies to ETC textures, too.
|
||||
s = GetWrappedTexCoord(texture.config.wrap_s, s, texture.config.width);
|
||||
t = texture.config.height - 1 -
|
||||
GetWrappedTexCoord(texture.config.wrap_t, t, texture.config.height);
|
||||
|
||||
const u8* texture_data = Memory::GetPhysicalPointer(texture_address);
|
||||
auto info =
|
||||
Texture::TextureInfo::FromPicaRegister(texture.config, texture.format);
|
||||
|
||||
// TODO: Apply the min and mag filters to the texture
|
||||
texture_color[i] = Texture::LookupTexture(texture_data, s, t, info);
|
||||
#if PICA_DUMP_TEXTURES
|
||||
DebugUtils::DumpTexture(texture.config, texture_data);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// sample procedural texture
|
||||
if (regs.texturing.main_config.texture3_enable) {
|
||||
const auto& proctex_uv = uv[regs.texturing.main_config.texture3_coordinates];
|
||||
texture_color[3] = ProcTex(proctex_uv.u().ToFloat32(), proctex_uv.v().ToFloat32(),
|
||||
g_state.regs.texturing, g_state.proctex);
|
||||
}
|
||||
|
||||
// Texture environment - consists of 6 stages of color and alpha combining.
|
||||
//
|
||||
// Color combiners take three input color values from some source (e.g. interpolated
|
||||
// vertex color, texture color, previous stage, etc), perform some very simple
|
||||
// operations on each of them (e.g. inversion) and then calculate the output color
|
||||
// with some basic arithmetic. Alpha combiners can be configured separately but work
|
||||
// analogously.
|
||||
Math::Vec4<u8> combiner_output;
|
||||
Math::Vec4<u8> combiner_buffer = {0, 0, 0, 0};
|
||||
Math::Vec4<u8> next_combiner_buffer = {
|
||||
regs.texturing.tev_combiner_buffer_color.r,
|
||||
regs.texturing.tev_combiner_buffer_color.g,
|
||||
regs.texturing.tev_combiner_buffer_color.b,
|
||||
regs.texturing.tev_combiner_buffer_color.a,
|
||||
};
|
||||
|
||||
Math::Vec4<u8> primary_fragment_color = {0, 0, 0, 0};
|
||||
Math::Vec4<u8> secondary_fragment_color = {0, 0, 0, 0};
|
||||
|
||||
if (!g_state.regs.lighting.disable) {
|
||||
Math::Quaternion<float> normquat = Math::Quaternion<float>{
|
||||
{GetInterpolatedAttribute(v0.quat.x, v1.quat.x, v2.quat.x).ToFloat32(),
|
||||
GetInterpolatedAttribute(v0.quat.y, v1.quat.y, v2.quat.y).ToFloat32(),
|
||||
GetInterpolatedAttribute(v0.quat.z, v1.quat.z, v2.quat.z).ToFloat32()},
|
||||
GetInterpolatedAttribute(v0.quat.w, v1.quat.w, v2.quat.w).ToFloat32(),
|
||||
}.Normalized();
|
||||
|
||||
Math::Vec3<float> view{
|
||||
GetInterpolatedAttribute(v0.view.x, v1.view.x, v2.view.x).ToFloat32(),
|
||||
GetInterpolatedAttribute(v0.view.y, v1.view.y, v2.view.y).ToFloat32(),
|
||||
GetInterpolatedAttribute(v0.view.z, v1.view.z, v2.view.z).ToFloat32(),
|
||||
};
|
||||
std::tie(primary_fragment_color, secondary_fragment_color) = ComputeFragmentsColors(
|
||||
g_state.regs.lighting, g_state.lighting, normquat, view, texture_color);
|
||||
}
|
||||
|
||||
for (unsigned tev_stage_index = 0; tev_stage_index < tev_stages.size();
|
||||
++tev_stage_index) {
|
||||
const auto& tev_stage = tev_stages[tev_stage_index];
|
||||
using Source = TexturingRegs::TevStageConfig::Source;
|
||||
|
||||
auto GetSource = [&](Source source) -> Math::Vec4<u8> {
|
||||
switch (source) {
|
||||
case Source::PrimaryColor:
|
||||
return primary_color;
|
||||
|
||||
case Source::PrimaryFragmentColor:
|
||||
return primary_fragment_color;
|
||||
|
||||
case Source::SecondaryFragmentColor:
|
||||
return secondary_fragment_color;
|
||||
|
||||
case Source::Texture0:
|
||||
return texture_color[0];
|
||||
|
||||
case Source::Texture1:
|
||||
return texture_color[1];
|
||||
|
||||
case Source::Texture2:
|
||||
return texture_color[2];
|
||||
|
||||
case Source::Texture3:
|
||||
return texture_color[3];
|
||||
|
||||
case Source::PreviousBuffer:
|
||||
return combiner_buffer;
|
||||
|
||||
case Source::Constant:
|
||||
return {tev_stage.const_r, tev_stage.const_g, tev_stage.const_b,
|
||||
tev_stage.const_a};
|
||||
|
||||
case Source::Previous:
|
||||
return combiner_output;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown color combiner source %d", (int)source);
|
||||
UNIMPLEMENTED();
|
||||
return {0, 0, 0, 0};
|
||||
}
|
||||
};
|
||||
|
||||
// color combiner
|
||||
// NOTE: Not sure if the alpha combiner might use the color output of the previous
|
||||
// stage as input. Hence, we currently don't directly write the result to
|
||||
// combiner_output.rgb(), but instead store it in a temporary variable until
|
||||
// alpha combining has been done.
|
||||
Math::Vec3<u8> color_result[3] = {
|
||||
GetColorModifier(tev_stage.color_modifier1, GetSource(tev_stage.color_source1)),
|
||||
GetColorModifier(tev_stage.color_modifier2, GetSource(tev_stage.color_source2)),
|
||||
GetColorModifier(tev_stage.color_modifier3, GetSource(tev_stage.color_source3)),
|
||||
};
|
||||
auto color_output = ColorCombine(tev_stage.color_op, color_result);
|
||||
|
||||
u8 alpha_output;
|
||||
if (tev_stage.color_op == TexturingRegs::TevStageConfig::Operation::Dot3_RGBA) {
|
||||
// result of Dot3_RGBA operation is also placed to the alpha component
|
||||
alpha_output = color_output.x;
|
||||
} else {
|
||||
// alpha combiner
|
||||
std::array<u8, 3> alpha_result = {{
|
||||
GetAlphaModifier(tev_stage.alpha_modifier1,
|
||||
GetSource(tev_stage.alpha_source1)),
|
||||
GetAlphaModifier(tev_stage.alpha_modifier2,
|
||||
GetSource(tev_stage.alpha_source2)),
|
||||
GetAlphaModifier(tev_stage.alpha_modifier3,
|
||||
GetSource(tev_stage.alpha_source3)),
|
||||
}};
|
||||
alpha_output = AlphaCombine(tev_stage.alpha_op, alpha_result);
|
||||
}
|
||||
|
||||
combiner_output[0] =
|
||||
std::min((unsigned)255, color_output.r() * tev_stage.GetColorMultiplier());
|
||||
combiner_output[1] =
|
||||
std::min((unsigned)255, color_output.g() * tev_stage.GetColorMultiplier());
|
||||
combiner_output[2] =
|
||||
std::min((unsigned)255, color_output.b() * tev_stage.GetColorMultiplier());
|
||||
combiner_output[3] =
|
||||
std::min((unsigned)255, alpha_output * tev_stage.GetAlphaMultiplier());
|
||||
|
||||
combiner_buffer = next_combiner_buffer;
|
||||
|
||||
if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferColor(
|
||||
tev_stage_index)) {
|
||||
next_combiner_buffer.r() = combiner_output.r();
|
||||
next_combiner_buffer.g() = combiner_output.g();
|
||||
next_combiner_buffer.b() = combiner_output.b();
|
||||
}
|
||||
|
||||
if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferAlpha(
|
||||
tev_stage_index)) {
|
||||
next_combiner_buffer.a() = combiner_output.a();
|
||||
}
|
||||
}
|
||||
|
||||
const auto& output_merger = regs.framebuffer.output_merger;
|
||||
// TODO: Does alpha testing happen before or after stencil?
|
||||
if (output_merger.alpha_test.enable) {
|
||||
bool pass = false;
|
||||
|
||||
switch (output_merger.alpha_test.func) {
|
||||
case FramebufferRegs::CompareFunc::Never:
|
||||
pass = false;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Always:
|
||||
pass = true;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Equal:
|
||||
pass = combiner_output.a() == output_merger.alpha_test.ref;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::NotEqual:
|
||||
pass = combiner_output.a() != output_merger.alpha_test.ref;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThan:
|
||||
pass = combiner_output.a() < output_merger.alpha_test.ref;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThanOrEqual:
|
||||
pass = combiner_output.a() <= output_merger.alpha_test.ref;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThan:
|
||||
pass = combiner_output.a() > output_merger.alpha_test.ref;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
|
||||
pass = combiner_output.a() >= output_merger.alpha_test.ref;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pass)
|
||||
continue;
|
||||
}
|
||||
|
||||
// Apply fog combiner
|
||||
// Not fully accurate. We'd have to know what data type is used to
|
||||
// store the depth etc. Using float for now until we know more
|
||||
// about Pica datatypes
|
||||
if (regs.texturing.fog_mode == TexturingRegs::FogMode::Fog) {
|
||||
const Math::Vec3<u8> fog_color = {
|
||||
static_cast<u8>(regs.texturing.fog_color.r.Value()),
|
||||
static_cast<u8>(regs.texturing.fog_color.g.Value()),
|
||||
static_cast<u8>(regs.texturing.fog_color.b.Value()),
|
||||
};
|
||||
|
||||
// Get index into fog LUT
|
||||
float fog_index;
|
||||
if (g_state.regs.texturing.fog_flip) {
|
||||
fog_index = (1.0f - depth) * 128.0f;
|
||||
} else {
|
||||
fog_index = depth * 128.0f;
|
||||
}
|
||||
|
||||
// Generate clamped fog factor from LUT for given fog index
|
||||
float fog_i = MathUtil::Clamp(floorf(fog_index), 0.0f, 127.0f);
|
||||
float fog_f = fog_index - fog_i;
|
||||
const auto& fog_lut_entry = g_state.fog.lut[static_cast<unsigned int>(fog_i)];
|
||||
float fog_factor = fog_lut_entry.ToFloat() + fog_lut_entry.DiffToFloat() * fog_f;
|
||||
fog_factor = MathUtil::Clamp(fog_factor, 0.0f, 1.0f);
|
||||
|
||||
// Blend the fog
|
||||
for (unsigned i = 0; i < 3; i++) {
|
||||
combiner_output[i] = static_cast<u8>(fog_factor * combiner_output[i] +
|
||||
(1.0f - fog_factor) * fog_color[i]);
|
||||
}
|
||||
}
|
||||
|
||||
u8 old_stencil = 0;
|
||||
|
||||
auto UpdateStencil = [stencil_test, x, y,
|
||||
&old_stencil](Pica::FramebufferRegs::StencilAction action) {
|
||||
u8 new_stencil =
|
||||
PerformStencilAction(action, old_stencil, stencil_test.reference_value);
|
||||
if (g_state.regs.framebuffer.framebuffer.allow_depth_stencil_write != 0)
|
||||
SetStencil(x >> 4, y >> 4, (new_stencil & stencil_test.write_mask) |
|
||||
(old_stencil & ~stencil_test.write_mask));
|
||||
};
|
||||
|
||||
if (stencil_action_enable) {
|
||||
old_stencil = GetStencil(x >> 4, y >> 4);
|
||||
u8 dest = old_stencil & stencil_test.input_mask;
|
||||
u8 ref = stencil_test.reference_value & stencil_test.input_mask;
|
||||
|
||||
bool pass = false;
|
||||
switch (stencil_test.func) {
|
||||
case FramebufferRegs::CompareFunc::Never:
|
||||
pass = false;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Always:
|
||||
pass = true;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Equal:
|
||||
pass = (ref == dest);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::NotEqual:
|
||||
pass = (ref != dest);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThan:
|
||||
pass = (ref < dest);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThanOrEqual:
|
||||
pass = (ref <= dest);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThan:
|
||||
pass = (ref > dest);
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
|
||||
pass = (ref >= dest);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pass) {
|
||||
UpdateStencil(stencil_test.action_stencil_fail);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert float to integer
|
||||
unsigned num_bits =
|
||||
FramebufferRegs::DepthBitsPerPixel(regs.framebuffer.framebuffer.depth_format);
|
||||
u32 z = (u32)(depth * ((1 << num_bits) - 1));
|
||||
|
||||
if (output_merger.depth_test_enable) {
|
||||
u32 ref_z = GetDepth(x >> 4, y >> 4);
|
||||
|
||||
bool pass = false;
|
||||
|
||||
switch (output_merger.depth_test_func) {
|
||||
case FramebufferRegs::CompareFunc::Never:
|
||||
pass = false;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Always:
|
||||
pass = true;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::Equal:
|
||||
pass = z == ref_z;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::NotEqual:
|
||||
pass = z != ref_z;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThan:
|
||||
pass = z < ref_z;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::LessThanOrEqual:
|
||||
pass = z <= ref_z;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThan:
|
||||
pass = z > ref_z;
|
||||
break;
|
||||
|
||||
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
|
||||
pass = z >= ref_z;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pass) {
|
||||
if (stencil_action_enable)
|
||||
UpdateStencil(stencil_test.action_depth_fail);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (regs.framebuffer.framebuffer.allow_depth_stencil_write != 0 &&
|
||||
output_merger.depth_write_enable) {
|
||||
|
||||
SetDepth(x >> 4, y >> 4, z);
|
||||
}
|
||||
|
||||
// The stencil depth_pass action is executed even if depth testing is disabled
|
||||
if (stencil_action_enable)
|
||||
UpdateStencil(stencil_test.action_depth_pass);
|
||||
|
||||
auto dest = GetPixel(x >> 4, y >> 4);
|
||||
Math::Vec4<u8> blend_output = combiner_output;
|
||||
|
||||
if (output_merger.alphablend_enable) {
|
||||
auto params = output_merger.alpha_blending;
|
||||
|
||||
auto LookupFactor = [&](unsigned channel,
|
||||
FramebufferRegs::BlendFactor factor) -> u8 {
|
||||
DEBUG_ASSERT(channel < 4);
|
||||
|
||||
const Math::Vec4<u8> blend_const = {
|
||||
static_cast<u8>(output_merger.blend_const.r),
|
||||
static_cast<u8>(output_merger.blend_const.g),
|
||||
static_cast<u8>(output_merger.blend_const.b),
|
||||
static_cast<u8>(output_merger.blend_const.a),
|
||||
};
|
||||
|
||||
switch (factor) {
|
||||
case FramebufferRegs::BlendFactor::Zero:
|
||||
return 0;
|
||||
|
||||
case FramebufferRegs::BlendFactor::One:
|
||||
return 255;
|
||||
|
||||
case FramebufferRegs::BlendFactor::SourceColor:
|
||||
return combiner_output[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusSourceColor:
|
||||
return 255 - combiner_output[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::DestColor:
|
||||
return dest[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusDestColor:
|
||||
return 255 - dest[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::SourceAlpha:
|
||||
return combiner_output.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusSourceAlpha:
|
||||
return 255 - combiner_output.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::DestAlpha:
|
||||
return dest.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusDestAlpha:
|
||||
return 255 - dest.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::ConstantColor:
|
||||
return blend_const[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusConstantColor:
|
||||
return 255 - blend_const[channel];
|
||||
|
||||
case FramebufferRegs::BlendFactor::ConstantAlpha:
|
||||
return blend_const.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::OneMinusConstantAlpha:
|
||||
return 255 - blend_const.a();
|
||||
|
||||
case FramebufferRegs::BlendFactor::SourceAlphaSaturate:
|
||||
// Returns 1.0 for the alpha channel
|
||||
if (channel == 3)
|
||||
return 255;
|
||||
return std::min(combiner_output.a(), static_cast<u8>(255 - dest.a()));
|
||||
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown blend factor %x", factor);
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
}
|
||||
|
||||
return combiner_output[channel];
|
||||
};
|
||||
|
||||
auto srcfactor = Math::MakeVec(LookupFactor(0, params.factor_source_rgb),
|
||||
LookupFactor(1, params.factor_source_rgb),
|
||||
LookupFactor(2, params.factor_source_rgb),
|
||||
LookupFactor(3, params.factor_source_a));
|
||||
|
||||
auto dstfactor = Math::MakeVec(LookupFactor(0, params.factor_dest_rgb),
|
||||
LookupFactor(1, params.factor_dest_rgb),
|
||||
LookupFactor(2, params.factor_dest_rgb),
|
||||
LookupFactor(3, params.factor_dest_a));
|
||||
|
||||
blend_output = EvaluateBlendEquation(combiner_output, srcfactor, dest, dstfactor,
|
||||
params.blend_equation_rgb);
|
||||
blend_output.a() = EvaluateBlendEquation(combiner_output, srcfactor, dest,
|
||||
dstfactor, params.blend_equation_a)
|
||||
.a();
|
||||
} else {
|
||||
blend_output =
|
||||
Math::MakeVec(LogicOp(combiner_output.r(), dest.r(), output_merger.logic_op),
|
||||
LogicOp(combiner_output.g(), dest.g(), output_merger.logic_op),
|
||||
LogicOp(combiner_output.b(), dest.b(), output_merger.logic_op),
|
||||
LogicOp(combiner_output.a(), dest.a(), output_merger.logic_op));
|
||||
}
|
||||
|
||||
const Math::Vec4<u8> result = {
|
||||
output_merger.red_enable ? blend_output.r() : dest.r(),
|
||||
output_merger.green_enable ? blend_output.g() : dest.g(),
|
||||
output_merger.blue_enable ? blend_output.b() : dest.b(),
|
||||
output_merger.alpha_enable ? blend_output.a() : dest.a(),
|
||||
};
|
||||
|
||||
if (regs.framebuffer.framebuffer.allow_color_write != 0)
|
||||
DrawPixel(x >> 4, y >> 4, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ProcessTriangle(const Vertex& v0, const Vertex& v1, const Vertex& v2) {
|
||||
ProcessTriangleInternal(v0, v1, v2);
|
||||
}
|
||||
|
||||
} // namespace Rasterizer
|
||||
|
||||
} // namespace Pica
|
|
@ -1,48 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "video_core/shader/shader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Rasterizer {
|
||||
|
||||
struct Vertex : Shader::OutputVertex {
|
||||
Vertex(const OutputVertex& v) : OutputVertex(v) {}
|
||||
|
||||
// Attributes used to store intermediate results
|
||||
// position after perspective divide
|
||||
Math::Vec3<float24> screenpos;
|
||||
|
||||
// Linear interpolation
|
||||
// factor: 0=this, 1=vtx
|
||||
// Note: This function cannot be called after perspective divide
|
||||
void Lerp(float24 factor, const Vertex& vtx) {
|
||||
pos = pos * factor + vtx.pos * (float24::FromFloat32(1) - factor);
|
||||
quat = quat * factor + vtx.quat * (float24::FromFloat32(1) - factor);
|
||||
color = color * factor + vtx.color * (float24::FromFloat32(1) - factor);
|
||||
tc0 = tc0 * factor + vtx.tc0 * (float24::FromFloat32(1) - factor);
|
||||
tc1 = tc1 * factor + vtx.tc1 * (float24::FromFloat32(1) - factor);
|
||||
tc0_w = tc0_w * factor + vtx.tc0_w * (float24::FromFloat32(1) - factor);
|
||||
view = view * factor + vtx.view * (float24::FromFloat32(1) - factor);
|
||||
tc2 = tc2 * factor + vtx.tc2 * (float24::FromFloat32(1) - factor);
|
||||
}
|
||||
|
||||
// Linear interpolation
|
||||
// factor: 0=v0, 1=v1
|
||||
// Note: This function cannot be called after perspective divide
|
||||
static Vertex Lerp(float24 factor, const Vertex& v0, const Vertex& v1) {
|
||||
Vertex ret = v0;
|
||||
ret.Lerp(factor, v1);
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
void ProcessTriangle(const Vertex& v0, const Vertex& v1, const Vertex& v2);
|
||||
|
||||
} // namespace Rasterizer
|
||||
|
||||
} // namespace Pica
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "video_core/swrasterizer/clipper.h"
|
||||
#include "video_core/swrasterizer/swrasterizer.h"
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
void SWRasterizer::AddTriangle(const Pica::Shader::OutputVertex& v0,
|
||||
const Pica::Shader::OutputVertex& v1,
|
||||
const Pica::Shader::OutputVertex& v2) {
|
||||
Pica::Clipper::ProcessTriangle(v0, v1, v2);
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/rasterizer_interface.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Shader {
|
||||
struct OutputVertex;
|
||||
}
|
||||
}
|
||||
|
||||
namespace VideoCore {
|
||||
|
||||
class SWRasterizer : public RasterizerInterface {
|
||||
void AddTriangle(const Pica::Shader::OutputVertex& v0, const Pica::Shader::OutputVertex& v1,
|
||||
const Pica::Shader::OutputVertex& v2) override;
|
||||
void DrawTriangles() override {}
|
||||
void NotifyPicaRegisterChanged(u32 id) override {}
|
||||
void FlushAll() override {}
|
||||
void FlushRegion(PAddr addr, u64 size) override {}
|
||||
void FlushAndInvalidateRegion(PAddr addr, u64 size) override {}
|
||||
};
|
||||
}
|
|
@ -1,244 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/swrasterizer/texturing.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
using TevStageConfig = TexturingRegs::TevStageConfig;
|
||||
|
||||
int GetWrappedTexCoord(TexturingRegs::TextureConfig::WrapMode mode, int val, unsigned size) {
|
||||
switch (mode) {
|
||||
case TexturingRegs::TextureConfig::ClampToEdge2:
|
||||
// For negative coordinate, ClampToEdge2 behaves the same as Repeat
|
||||
if (val < 0) {
|
||||
return static_cast<int>(static_cast<unsigned>(val) % size);
|
||||
}
|
||||
// [[fallthrough]]
|
||||
case TexturingRegs::TextureConfig::ClampToEdge:
|
||||
val = std::max(val, 0);
|
||||
val = std::min(val, static_cast<int>(size) - 1);
|
||||
return val;
|
||||
|
||||
case TexturingRegs::TextureConfig::ClampToBorder:
|
||||
return val;
|
||||
|
||||
case TexturingRegs::TextureConfig::ClampToBorder2:
|
||||
// For ClampToBorder2, the case of positive coordinate beyond the texture size is already
|
||||
// handled outside. Here we only handle the negative coordinate in the same way as Repeat.
|
||||
case TexturingRegs::TextureConfig::Repeat2:
|
||||
case TexturingRegs::TextureConfig::Repeat3:
|
||||
case TexturingRegs::TextureConfig::Repeat:
|
||||
return static_cast<int>(static_cast<unsigned>(val) % size);
|
||||
|
||||
case TexturingRegs::TextureConfig::MirroredRepeat: {
|
||||
unsigned int coord = (static_cast<unsigned>(val) % (2 * size));
|
||||
if (coord >= size)
|
||||
coord = 2 * size - 1 - coord;
|
||||
return static_cast<int>(coord);
|
||||
}
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown texture coordinate wrapping mode %x", (int)mode);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
Math::Vec3<u8> GetColorModifier(TevStageConfig::ColorModifier factor,
|
||||
const Math::Vec4<u8>& values) {
|
||||
using ColorModifier = TevStageConfig::ColorModifier;
|
||||
|
||||
switch (factor) {
|
||||
case ColorModifier::SourceColor:
|
||||
return values.rgb();
|
||||
|
||||
case ColorModifier::OneMinusSourceColor:
|
||||
return (Math::Vec3<u8>(255, 255, 255) - values.rgb()).Cast<u8>();
|
||||
|
||||
case ColorModifier::SourceAlpha:
|
||||
return values.aaa();
|
||||
|
||||
case ColorModifier::OneMinusSourceAlpha:
|
||||
return (Math::Vec3<u8>(255, 255, 255) - values.aaa()).Cast<u8>();
|
||||
|
||||
case ColorModifier::SourceRed:
|
||||
return values.rrr();
|
||||
|
||||
case ColorModifier::OneMinusSourceRed:
|
||||
return (Math::Vec3<u8>(255, 255, 255) - values.rrr()).Cast<u8>();
|
||||
|
||||
case ColorModifier::SourceGreen:
|
||||
return values.ggg();
|
||||
|
||||
case ColorModifier::OneMinusSourceGreen:
|
||||
return (Math::Vec3<u8>(255, 255, 255) - values.ggg()).Cast<u8>();
|
||||
|
||||
case ColorModifier::SourceBlue:
|
||||
return values.bbb();
|
||||
|
||||
case ColorModifier::OneMinusSourceBlue:
|
||||
return (Math::Vec3<u8>(255, 255, 255) - values.bbb()).Cast<u8>();
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
};
|
||||
|
||||
u8 GetAlphaModifier(TevStageConfig::AlphaModifier factor, const Math::Vec4<u8>& values) {
|
||||
using AlphaModifier = TevStageConfig::AlphaModifier;
|
||||
|
||||
switch (factor) {
|
||||
case AlphaModifier::SourceAlpha:
|
||||
return values.a();
|
||||
|
||||
case AlphaModifier::OneMinusSourceAlpha:
|
||||
return 255 - values.a();
|
||||
|
||||
case AlphaModifier::SourceRed:
|
||||
return values.r();
|
||||
|
||||
case AlphaModifier::OneMinusSourceRed:
|
||||
return 255 - values.r();
|
||||
|
||||
case AlphaModifier::SourceGreen:
|
||||
return values.g();
|
||||
|
||||
case AlphaModifier::OneMinusSourceGreen:
|
||||
return 255 - values.g();
|
||||
|
||||
case AlphaModifier::SourceBlue:
|
||||
return values.b();
|
||||
|
||||
case AlphaModifier::OneMinusSourceBlue:
|
||||
return 255 - values.b();
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
};
|
||||
|
||||
Math::Vec3<u8> ColorCombine(TevStageConfig::Operation op, const Math::Vec3<u8> input[3]) {
|
||||
using Operation = TevStageConfig::Operation;
|
||||
|
||||
switch (op) {
|
||||
case Operation::Replace:
|
||||
return input[0];
|
||||
|
||||
case Operation::Modulate:
|
||||
return ((input[0] * input[1]) / 255).Cast<u8>();
|
||||
|
||||
case Operation::Add: {
|
||||
auto result = input[0] + input[1];
|
||||
result.r() = std::min(255, result.r());
|
||||
result.g() = std::min(255, result.g());
|
||||
result.b() = std::min(255, result.b());
|
||||
return result.Cast<u8>();
|
||||
}
|
||||
|
||||
case Operation::AddSigned: {
|
||||
// TODO(bunnei): Verify that the color conversion from (float) 0.5f to
|
||||
// (byte) 128 is correct
|
||||
auto result =
|
||||
input[0].Cast<int>() + input[1].Cast<int>() - Math::MakeVec<int>(128, 128, 128);
|
||||
result.r() = MathUtil::Clamp<int>(result.r(), 0, 255);
|
||||
result.g() = MathUtil::Clamp<int>(result.g(), 0, 255);
|
||||
result.b() = MathUtil::Clamp<int>(result.b(), 0, 255);
|
||||
return result.Cast<u8>();
|
||||
}
|
||||
|
||||
case Operation::Lerp:
|
||||
return ((input[0] * input[2] +
|
||||
input[1] * (Math::MakeVec<u8>(255, 255, 255) - input[2]).Cast<u8>()) /
|
||||
255)
|
||||
.Cast<u8>();
|
||||
|
||||
case Operation::Subtract: {
|
||||
auto result = input[0].Cast<int>() - input[1].Cast<int>();
|
||||
result.r() = std::max(0, result.r());
|
||||
result.g() = std::max(0, result.g());
|
||||
result.b() = std::max(0, result.b());
|
||||
return result.Cast<u8>();
|
||||
}
|
||||
|
||||
case Operation::MultiplyThenAdd: {
|
||||
auto result = (input[0] * input[1] + 255 * input[2].Cast<int>()) / 255;
|
||||
result.r() = std::min(255, result.r());
|
||||
result.g() = std::min(255, result.g());
|
||||
result.b() = std::min(255, result.b());
|
||||
return result.Cast<u8>();
|
||||
}
|
||||
|
||||
case Operation::AddThenMultiply: {
|
||||
auto result = input[0] + input[1];
|
||||
result.r() = std::min(255, result.r());
|
||||
result.g() = std::min(255, result.g());
|
||||
result.b() = std::min(255, result.b());
|
||||
result = (result * input[2].Cast<int>()) / 255;
|
||||
return result.Cast<u8>();
|
||||
}
|
||||
case Operation::Dot3_RGB:
|
||||
case Operation::Dot3_RGBA: {
|
||||
// Not fully accurate. Worst case scenario seems to yield a +/-3 error. Some HW results
|
||||
// indicate that the per-component computation can't have a higher precision than 1/256,
|
||||
// while dot3_rgb((0x80,g0,b0), (0x7F,g1,b1)) and dot3_rgb((0x80,g0,b0), (0x80,g1,b1)) give
|
||||
// different results.
|
||||
int result = ((input[0].r() * 2 - 255) * (input[1].r() * 2 - 255) + 128) / 256 +
|
||||
((input[0].g() * 2 - 255) * (input[1].g() * 2 - 255) + 128) / 256 +
|
||||
((input[0].b() * 2 - 255) * (input[1].b() * 2 - 255) + 128) / 256;
|
||||
result = std::max(0, std::min(255, result));
|
||||
return {(u8)result, (u8)result, (u8)result};
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown color combiner operation %d", (int)op);
|
||||
UNIMPLEMENTED();
|
||||
return {0, 0, 0};
|
||||
}
|
||||
};
|
||||
|
||||
u8 AlphaCombine(TevStageConfig::Operation op, const std::array<u8, 3>& input) {
|
||||
switch (op) {
|
||||
using Operation = TevStageConfig::Operation;
|
||||
case Operation::Replace:
|
||||
return input[0];
|
||||
|
||||
case Operation::Modulate:
|
||||
return input[0] * input[1] / 255;
|
||||
|
||||
case Operation::Add:
|
||||
return std::min(255, input[0] + input[1]);
|
||||
|
||||
case Operation::AddSigned: {
|
||||
// TODO(bunnei): Verify that the color conversion from (float) 0.5f to (byte) 128 is correct
|
||||
auto result = static_cast<int>(input[0]) + static_cast<int>(input[1]) - 128;
|
||||
return static_cast<u8>(MathUtil::Clamp<int>(result, 0, 255));
|
||||
}
|
||||
|
||||
case Operation::Lerp:
|
||||
return (input[0] * input[2] + input[1] * (255 - input[2])) / 255;
|
||||
|
||||
case Operation::Subtract:
|
||||
return std::max(0, (int)input[0] - (int)input[1]);
|
||||
|
||||
case Operation::MultiplyThenAdd:
|
||||
return std::min(255, (input[0] * input[1] + 255 * input[2]) / 255);
|
||||
|
||||
case Operation::AddThenMultiply:
|
||||
return (std::min(255, (input[0] + input[1])) * input[2]) / 255;
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown alpha combiner operation %d", (int)op);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,28 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Rasterizer {
|
||||
|
||||
int GetWrappedTexCoord(TexturingRegs::TextureConfig::WrapMode mode, int val, unsigned size);
|
||||
|
||||
Math::Vec3<u8> GetColorModifier(TexturingRegs::TevStageConfig::ColorModifier factor,
|
||||
const Math::Vec4<u8>& values);
|
||||
|
||||
u8 GetAlphaModifier(TexturingRegs::TevStageConfig::AlphaModifier factor,
|
||||
const Math::Vec4<u8>& values);
|
||||
|
||||
Math::Vec3<u8> ColorCombine(TexturingRegs::TevStageConfig::Operation op,
|
||||
const Math::Vec3<u8> input[3]);
|
||||
|
||||
u8 AlphaCombine(TexturingRegs::TevStageConfig::Operation op, const std::array<u8, 3>& input);
|
||||
|
||||
} // namespace Rasterizer
|
||||
} // namespace Pica
|
|
@ -1,122 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/color.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/texture/etc1.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Texture {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr std::array<std::array<u8, 2>, 8> etc1_modifier_table = {{
|
||||
{2, 8}, {5, 17}, {9, 29}, {13, 42}, {18, 60}, {24, 80}, {33, 106}, {47, 183},
|
||||
}};
|
||||
|
||||
union ETC1Tile {
|
||||
u64 raw;
|
||||
|
||||
// Each of these two is a collection of 16 bits (one per lookup value)
|
||||
BitField<0, 16, u64> table_subindexes;
|
||||
BitField<16, 16, u64> negation_flags;
|
||||
|
||||
unsigned GetTableSubIndex(unsigned index) const {
|
||||
return (table_subindexes >> index) & 1;
|
||||
}
|
||||
|
||||
bool GetNegationFlag(unsigned index) const {
|
||||
return ((negation_flags >> index) & 1) == 1;
|
||||
}
|
||||
|
||||
BitField<32, 1, u64> flip;
|
||||
BitField<33, 1, u64> differential_mode;
|
||||
|
||||
BitField<34, 3, u64> table_index_2;
|
||||
BitField<37, 3, u64> table_index_1;
|
||||
|
||||
union {
|
||||
// delta value + base value
|
||||
BitField<40, 3, s64> db;
|
||||
BitField<43, 5, u64> b;
|
||||
|
||||
BitField<48, 3, s64> dg;
|
||||
BitField<51, 5, u64> g;
|
||||
|
||||
BitField<56, 3, s64> dr;
|
||||
BitField<59, 5, u64> r;
|
||||
} differential;
|
||||
|
||||
union {
|
||||
BitField<40, 4, u64> b2;
|
||||
BitField<44, 4, u64> b1;
|
||||
|
||||
BitField<48, 4, u64> g2;
|
||||
BitField<52, 4, u64> g1;
|
||||
|
||||
BitField<56, 4, u64> r2;
|
||||
BitField<60, 4, u64> r1;
|
||||
} separate;
|
||||
|
||||
const Math::Vec3<u8> GetRGB(unsigned int x, unsigned int y) const {
|
||||
int texel = 4 * x + y;
|
||||
|
||||
if (flip)
|
||||
std::swap(x, y);
|
||||
|
||||
// Lookup base value
|
||||
Math::Vec3<int> ret;
|
||||
if (differential_mode) {
|
||||
ret.r() = static_cast<int>(differential.r);
|
||||
ret.g() = static_cast<int>(differential.g);
|
||||
ret.b() = static_cast<int>(differential.b);
|
||||
if (x >= 2) {
|
||||
ret.r() += static_cast<int>(differential.dr);
|
||||
ret.g() += static_cast<int>(differential.dg);
|
||||
ret.b() += static_cast<int>(differential.db);
|
||||
}
|
||||
ret.r() = Color::Convert5To8(ret.r());
|
||||
ret.g() = Color::Convert5To8(ret.g());
|
||||
ret.b() = Color::Convert5To8(ret.b());
|
||||
} else {
|
||||
if (x < 2) {
|
||||
ret.r() = Color::Convert4To8(static_cast<u8>(separate.r1));
|
||||
ret.g() = Color::Convert4To8(static_cast<u8>(separate.g1));
|
||||
ret.b() = Color::Convert4To8(static_cast<u8>(separate.b1));
|
||||
} else {
|
||||
ret.r() = Color::Convert4To8(static_cast<u8>(separate.r2));
|
||||
ret.g() = Color::Convert4To8(static_cast<u8>(separate.g2));
|
||||
ret.b() = Color::Convert4To8(static_cast<u8>(separate.b2));
|
||||
}
|
||||
}
|
||||
|
||||
// Add modifier
|
||||
unsigned table_index =
|
||||
static_cast<int>((x < 2) ? table_index_1.Value() : table_index_2.Value());
|
||||
|
||||
int modifier = etc1_modifier_table[table_index][GetTableSubIndex(texel)];
|
||||
if (GetNegationFlag(texel))
|
||||
modifier *= -1;
|
||||
|
||||
ret.r() = MathUtil::Clamp(ret.r() + modifier, 0, 255);
|
||||
ret.g() = MathUtil::Clamp(ret.g() + modifier, 0, 255);
|
||||
ret.b() = MathUtil::Clamp(ret.b() + modifier, 0, 255);
|
||||
|
||||
return ret.Cast<u8>();
|
||||
}
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
Math::Vec3<u8> SampleETC1Subtile(u64 value, unsigned int x, unsigned int y) {
|
||||
ETC1Tile tile{value};
|
||||
return tile.GetRGB(x, y);
|
||||
}
|
||||
|
||||
} // namespace Texture
|
||||
} // namespace Pica
|
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Texture {
|
||||
|
||||
Math::Vec3<u8> SampleETC1Subtile(u64 value, unsigned int x, unsigned int y);
|
||||
|
||||
} // namespace Texture
|
||||
} // namespace Pica
|
|
@ -1,227 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/color.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/math_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
#include "video_core/texture/etc1.h"
|
||||
#include "video_core/texture/texture_decode.h"
|
||||
#include "video_core/utils.h"
|
||||
|
||||
using TextureFormat = Pica::TexturingRegs::TextureFormat;
|
||||
|
||||
namespace Pica {
|
||||
namespace Texture {
|
||||
|
||||
constexpr size_t TILE_SIZE = 8 * 8;
|
||||
constexpr size_t ETC1_SUBTILES = 2 * 2;
|
||||
|
||||
size_t CalculateTileSize(TextureFormat format) {
|
||||
switch (format) {
|
||||
case TextureFormat::RGBA8:
|
||||
return 4 * TILE_SIZE;
|
||||
|
||||
case TextureFormat::RGB8:
|
||||
return 3 * TILE_SIZE;
|
||||
|
||||
case TextureFormat::RGB5A1:
|
||||
case TextureFormat::RGB565:
|
||||
case TextureFormat::RGBA4:
|
||||
case TextureFormat::IA8:
|
||||
case TextureFormat::RG8:
|
||||
return 2 * TILE_SIZE;
|
||||
|
||||
case TextureFormat::I8:
|
||||
case TextureFormat::A8:
|
||||
case TextureFormat::IA4:
|
||||
return 1 * TILE_SIZE;
|
||||
|
||||
case TextureFormat::I4:
|
||||
case TextureFormat::A4:
|
||||
return TILE_SIZE / 2;
|
||||
|
||||
case TextureFormat::ETC1:
|
||||
return ETC1_SUBTILES * 8;
|
||||
|
||||
case TextureFormat::ETC1A4:
|
||||
return ETC1_SUBTILES * 16;
|
||||
|
||||
default: // placeholder for yet unknown formats
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
Math::Vec4<u8> LookupTexture(const u8* source, unsigned int x, unsigned int y,
|
||||
const TextureInfo& info, bool disable_alpha) {
|
||||
// Coordinate in tiles
|
||||
const unsigned int coarse_x = x / 8;
|
||||
const unsigned int coarse_y = y / 8;
|
||||
|
||||
// Coordinate inside the tile
|
||||
const unsigned int fine_x = x % 8;
|
||||
const unsigned int fine_y = y % 8;
|
||||
|
||||
const u8* line = source + coarse_y * info.stride;
|
||||
const u8* tile = line + coarse_x * CalculateTileSize(info.format);
|
||||
return LookupTexelInTile(tile, fine_x, fine_y, info, disable_alpha);
|
||||
}
|
||||
|
||||
Math::Vec4<u8> LookupTexelInTile(const u8* source, unsigned int x, unsigned int y,
|
||||
const TextureInfo& info, bool disable_alpha) {
|
||||
DEBUG_ASSERT(x < 8);
|
||||
DEBUG_ASSERT(y < 8);
|
||||
|
||||
using VideoCore::MortonInterleave;
|
||||
|
||||
switch (info.format) {
|
||||
case TextureFormat::RGBA8: {
|
||||
auto res = Color::DecodeRGBA8(source + MortonInterleave(x, y) * 4);
|
||||
return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
|
||||
}
|
||||
|
||||
case TextureFormat::RGB8: {
|
||||
auto res = Color::DecodeRGB8(source + MortonInterleave(x, y) * 3);
|
||||
return {res.r(), res.g(), res.b(), 255};
|
||||
}
|
||||
|
||||
case TextureFormat::RGB5A1: {
|
||||
auto res = Color::DecodeRGB5A1(source + MortonInterleave(x, y) * 2);
|
||||
return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
|
||||
}
|
||||
|
||||
case TextureFormat::RGB565: {
|
||||
auto res = Color::DecodeRGB565(source + MortonInterleave(x, y) * 2);
|
||||
return {res.r(), res.g(), res.b(), 255};
|
||||
}
|
||||
|
||||
case TextureFormat::RGBA4: {
|
||||
auto res = Color::DecodeRGBA4(source + MortonInterleave(x, y) * 2);
|
||||
return {res.r(), res.g(), res.b(), static_cast<u8>(disable_alpha ? 255 : res.a())};
|
||||
}
|
||||
|
||||
case TextureFormat::IA8: {
|
||||
const u8* source_ptr = source + MortonInterleave(x, y) * 2;
|
||||
|
||||
if (disable_alpha) {
|
||||
// Show intensity as red, alpha as green
|
||||
return {source_ptr[1], source_ptr[0], 0, 255};
|
||||
} else {
|
||||
return {source_ptr[1], source_ptr[1], source_ptr[1], source_ptr[0]};
|
||||
}
|
||||
}
|
||||
|
||||
case TextureFormat::RG8: {
|
||||
auto res = Color::DecodeRG8(source + MortonInterleave(x, y) * 2);
|
||||
return {res.r(), res.g(), 0, 255};
|
||||
}
|
||||
|
||||
case TextureFormat::I8: {
|
||||
const u8* source_ptr = source + MortonInterleave(x, y);
|
||||
return {*source_ptr, *source_ptr, *source_ptr, 255};
|
||||
}
|
||||
|
||||
case TextureFormat::A8: {
|
||||
const u8* source_ptr = source + MortonInterleave(x, y);
|
||||
|
||||
if (disable_alpha) {
|
||||
return {*source_ptr, *source_ptr, *source_ptr, 255};
|
||||
} else {
|
||||
return {0, 0, 0, *source_ptr};
|
||||
}
|
||||
}
|
||||
|
||||
case TextureFormat::IA4: {
|
||||
const u8* source_ptr = source + MortonInterleave(x, y);
|
||||
|
||||
u8 i = Color::Convert4To8(((*source_ptr) & 0xF0) >> 4);
|
||||
u8 a = Color::Convert4To8((*source_ptr) & 0xF);
|
||||
|
||||
if (disable_alpha) {
|
||||
// Show intensity as red, alpha as green
|
||||
return {i, a, 0, 255};
|
||||
} else {
|
||||
return {i, i, i, a};
|
||||
}
|
||||
}
|
||||
|
||||
case TextureFormat::I4: {
|
||||
u32 morton_offset = MortonInterleave(x, y);
|
||||
const u8* source_ptr = source + morton_offset / 2;
|
||||
|
||||
u8 i = (morton_offset % 2) ? ((*source_ptr & 0xF0) >> 4) : (*source_ptr & 0xF);
|
||||
i = Color::Convert4To8(i);
|
||||
|
||||
return {i, i, i, 255};
|
||||
}
|
||||
|
||||
case TextureFormat::A4: {
|
||||
u32 morton_offset = MortonInterleave(x, y);
|
||||
const u8* source_ptr = source + morton_offset / 2;
|
||||
|
||||
u8 a = (morton_offset % 2) ? ((*source_ptr & 0xF0) >> 4) : (*source_ptr & 0xF);
|
||||
a = Color::Convert4To8(a);
|
||||
|
||||
if (disable_alpha) {
|
||||
return {a, a, a, 255};
|
||||
} else {
|
||||
return {0, 0, 0, a};
|
||||
}
|
||||
}
|
||||
|
||||
case TextureFormat::ETC1:
|
||||
case TextureFormat::ETC1A4: {
|
||||
bool has_alpha = (info.format == TextureFormat::ETC1A4);
|
||||
size_t subtile_size = has_alpha ? 16 : 8;
|
||||
|
||||
// ETC1 further subdivides each 8x8 tile into four 4x4 subtiles
|
||||
constexpr unsigned int subtile_width = 4;
|
||||
constexpr unsigned int subtile_height = 4;
|
||||
|
||||
unsigned int subtile_index = (x / subtile_width) + 2 * (y / subtile_height);
|
||||
x %= subtile_width;
|
||||
y %= subtile_height;
|
||||
|
||||
const u8* subtile_ptr = source + subtile_index * subtile_size;
|
||||
|
||||
u8 alpha = 255;
|
||||
if (has_alpha) {
|
||||
u64_le packed_alpha;
|
||||
memcpy(&packed_alpha, subtile_ptr, sizeof(u64));
|
||||
subtile_ptr += sizeof(u64);
|
||||
|
||||
alpha = Color::Convert4To8((packed_alpha >> (4 * (x * subtile_width + y))) & 0xF);
|
||||
}
|
||||
|
||||
u64_le subtile_data;
|
||||
memcpy(&subtile_data, subtile_ptr, sizeof(u64));
|
||||
|
||||
return Math::MakeVec(SampleETC1Subtile(subtile_data, x, y),
|
||||
disable_alpha ? (u8)255 : alpha);
|
||||
}
|
||||
|
||||
default:
|
||||
LOG_ERROR(HW_GPU, "Unknown texture format: %x", (u32)info.format);
|
||||
DEBUG_ASSERT(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
TextureInfo TextureInfo::FromPicaRegister(const TexturingRegs::TextureConfig& config,
|
||||
const TexturingRegs::TextureFormat& format) {
|
||||
TextureInfo info;
|
||||
info.physical_address = config.GetPhysicalAddress();
|
||||
info.width = config.width;
|
||||
info.height = config.height;
|
||||
info.format = format;
|
||||
info.SetDefaultStride();
|
||||
return info;
|
||||
}
|
||||
|
||||
} // namespace Texture
|
||||
} // namespace Pica
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "video_core/regs_texturing.h"
|
||||
|
||||
namespace Pica {
|
||||
namespace Texture {
|
||||
|
||||
/// Returns the byte size of a 8*8 tile of the specified texture format.
|
||||
size_t CalculateTileSize(TexturingRegs::TextureFormat format);
|
||||
|
||||
struct TextureInfo {
|
||||
PAddr physical_address;
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
ptrdiff_t stride;
|
||||
TexturingRegs::TextureFormat format;
|
||||
|
||||
static TextureInfo FromPicaRegister(const TexturingRegs::TextureConfig& config,
|
||||
const TexturingRegs::TextureFormat& format);
|
||||
|
||||
/// Calculates stride from format and width, assuming that the entire texture is contiguous.
|
||||
void SetDefaultStride() {
|
||||
stride = CalculateTileSize(format) * (width / 8);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Lookup texel located at the given coordinates and return an RGBA vector of its color.
|
||||
* @param source Source pointer to read data from
|
||||
* @param x,y Texture coordinates to read from
|
||||
* @param info TextureInfo object describing the texture setup
|
||||
* @param disable_alpha This is used for debug widgets which use this method to display textures
|
||||
* without providing a good way to visualize alpha by themselves. If true, this will return 255 for
|
||||
* the alpha component, and either drop the information entirely or store it in an "unused" color
|
||||
* channel.
|
||||
* @todo Eventually we should get rid of the disable_alpha parameter.
|
||||
*/
|
||||
Math::Vec4<u8> LookupTexture(const u8* source, unsigned int x, unsigned int y,
|
||||
const TextureInfo& info, bool disable_alpha = false);
|
||||
|
||||
/**
|
||||
* Looks up a texel from a single 8x8 texture tile.
|
||||
*
|
||||
* @param source Pointer to the beginning of the tile.
|
||||
* @param x, y In-tile coordinates to read from. Must be < 8.
|
||||
* @param info TextureInfo describing the texture format.
|
||||
* @param disable_alpha Used for debugging. Sets the result alpha to 255 and either discards the
|
||||
* real alpha or inserts it in an otherwise unused channel.
|
||||
*/
|
||||
Math::Vec4<u8> LookupTexelInTile(const u8* source, unsigned int x, unsigned int y,
|
||||
const TextureInfo& info, bool disable_alpha);
|
||||
|
||||
} // namespace Texture
|
||||
} // namespace Pica
|
|
@ -1,160 +0,0 @@
|
|||
#include <memory>
|
||||
#include <boost/range/algorithm/fill.hpp>
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/debug_utils/debug_utils.h"
|
||||
#include "video_core/pica_state.h"
|
||||
#include "video_core/pica_types.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/vertex_loader.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
void VertexLoader::Setup(const PipelineRegs& regs) {
|
||||
ASSERT_MSG(!is_setup, "VertexLoader is not intended to be setup more than once.");
|
||||
|
||||
const auto& attribute_config = regs.vertex_attributes;
|
||||
num_total_attributes = attribute_config.GetNumTotalAttributes();
|
||||
|
||||
boost::fill(vertex_attribute_sources, 0xdeadbeef);
|
||||
|
||||
for (int i = 0; i < 16; i++) {
|
||||
vertex_attribute_is_default[i] = attribute_config.IsDefaultAttribute(i);
|
||||
}
|
||||
|
||||
// Setup attribute data from loaders
|
||||
for (int loader = 0; loader < 12; ++loader) {
|
||||
const auto& loader_config = attribute_config.attribute_loaders[loader];
|
||||
|
||||
u32 offset = 0;
|
||||
|
||||
// TODO: What happens if a loader overwrites a previous one's data?
|
||||
for (unsigned component = 0; component < loader_config.component_count; ++component) {
|
||||
if (component >= 12) {
|
||||
LOG_ERROR(HW_GPU,
|
||||
"Overflow in the vertex attribute loader %u trying to load component %u",
|
||||
loader, component);
|
||||
continue;
|
||||
}
|
||||
|
||||
u32 attribute_index = loader_config.GetComponent(component);
|
||||
if (attribute_index < 12) {
|
||||
offset = Common::AlignUp(offset,
|
||||
attribute_config.GetElementSizeInBytes(attribute_index));
|
||||
vertex_attribute_sources[attribute_index] = loader_config.data_offset + offset;
|
||||
vertex_attribute_strides[attribute_index] =
|
||||
static_cast<u32>(loader_config.byte_count);
|
||||
vertex_attribute_formats[attribute_index] =
|
||||
attribute_config.GetFormat(attribute_index);
|
||||
vertex_attribute_elements[attribute_index] =
|
||||
attribute_config.GetNumElements(attribute_index);
|
||||
offset += attribute_config.GetStride(attribute_index);
|
||||
} else if (attribute_index < 16) {
|
||||
// Attribute ids 12, 13, 14 and 15 signify 4, 8, 12 and 16-byte paddings,
|
||||
// respectively
|
||||
offset = Common::AlignUp(offset, 4);
|
||||
offset += (attribute_index - 11) * 4;
|
||||
} else {
|
||||
UNREACHABLE(); // This is truly unreachable due to the number of bits for each
|
||||
// component
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
is_setup = true;
|
||||
}
|
||||
|
||||
void VertexLoader::LoadVertex(u32 base_address, int index, int vertex,
|
||||
Shader::AttributeBuffer& input,
|
||||
DebugUtils::MemoryAccessTracker& memory_accesses) {
|
||||
ASSERT_MSG(is_setup, "A VertexLoader needs to be setup before loading vertices.");
|
||||
|
||||
for (int i = 0; i < num_total_attributes; ++i) {
|
||||
if (vertex_attribute_elements[i] != 0) {
|
||||
// Load per-vertex data from the loader arrays
|
||||
u32 source_addr =
|
||||
base_address + vertex_attribute_sources[i] + vertex_attribute_strides[i] * vertex;
|
||||
|
||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||
memory_accesses.AddAccess(
|
||||
source_addr,
|
||||
vertex_attribute_elements[i] *
|
||||
((vertex_attribute_formats[i] == PipelineRegs::VertexAttributeFormat::FLOAT)
|
||||
? 4
|
||||
: (vertex_attribute_formats[i] ==
|
||||
PipelineRegs::VertexAttributeFormat::SHORT)
|
||||
? 2
|
||||
: 1));
|
||||
}
|
||||
|
||||
switch (vertex_attribute_formats[i]) {
|
||||
case PipelineRegs::VertexAttributeFormat::BYTE: {
|
||||
const s8* srcdata =
|
||||
reinterpret_cast<const s8*>(Memory::GetPhysicalPointer(source_addr));
|
||||
for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
|
||||
input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PipelineRegs::VertexAttributeFormat::UBYTE: {
|
||||
const u8* srcdata =
|
||||
reinterpret_cast<const u8*>(Memory::GetPhysicalPointer(source_addr));
|
||||
for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
|
||||
input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PipelineRegs::VertexAttributeFormat::SHORT: {
|
||||
const s16* srcdata =
|
||||
reinterpret_cast<const s16*>(Memory::GetPhysicalPointer(source_addr));
|
||||
for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
|
||||
input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PipelineRegs::VertexAttributeFormat::FLOAT: {
|
||||
const float* srcdata =
|
||||
reinterpret_cast<const float*>(Memory::GetPhysicalPointer(source_addr));
|
||||
for (unsigned int comp = 0; comp < vertex_attribute_elements[i]; ++comp) {
|
||||
input.attr[i][comp] = float24::FromFloat32(srcdata[comp]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Default attribute values set if array elements have < 4 components. This
|
||||
// is *not* carried over from the default attribute settings even if they're
|
||||
// enabled for this attribute.
|
||||
for (unsigned int comp = vertex_attribute_elements[i]; comp < 4; ++comp) {
|
||||
input.attr[i][comp] =
|
||||
comp == 3 ? float24::FromFloat32(1.0f) : float24::FromFloat32(0.0f);
|
||||
}
|
||||
|
||||
LOG_TRACE(HW_GPU, "Loaded %d components of attribute %x for vertex %x (index %x) from "
|
||||
"0x%08x + 0x%08x + 0x%04x: %f %f %f %f",
|
||||
vertex_attribute_elements[i], i, vertex, index, base_address,
|
||||
vertex_attribute_sources[i], vertex_attribute_strides[i] * vertex,
|
||||
input.attr[i][0].ToFloat32(), input.attr[i][1].ToFloat32(),
|
||||
input.attr[i][2].ToFloat32(), input.attr[i][3].ToFloat32());
|
||||
} else if (vertex_attribute_is_default[i]) {
|
||||
// Load the default attribute if we're configured to do so
|
||||
input.attr[i] = g_state.input_default_attributes.attr[i];
|
||||
LOG_TRACE(HW_GPU,
|
||||
"Loaded default attribute %x for vertex %x (index %x): (%f, %f, %f, %f)", i,
|
||||
vertex, index, input.attr[i][0].ToFloat32(), input.attr[i][1].ToFloat32(),
|
||||
input.attr[i][2].ToFloat32(), input.attr[i][3].ToFloat32());
|
||||
} else {
|
||||
// TODO(yuriks): In this case, no data gets loaded and the vertex
|
||||
// remains with the last value it had. This isn't currently maintained
|
||||
// as global state, however, and so won't work in Citra yet.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Pica
|
|
@ -1,42 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/regs_pipeline.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace DebugUtils {
|
||||
class MemoryAccessTracker;
|
||||
}
|
||||
|
||||
namespace Shader {
|
||||
struct AttributeBuffer;
|
||||
}
|
||||
|
||||
class VertexLoader {
|
||||
public:
|
||||
VertexLoader() = default;
|
||||
explicit VertexLoader(const PipelineRegs& regs) {
|
||||
Setup(regs);
|
||||
}
|
||||
|
||||
void Setup(const PipelineRegs& regs);
|
||||
void LoadVertex(u32 base_address, int index, int vertex, Shader::AttributeBuffer& input,
|
||||
DebugUtils::MemoryAccessTracker& memory_accesses);
|
||||
|
||||
int GetNumTotalAttributes() const {
|
||||
return num_total_attributes;
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<u32, 16> vertex_attribute_sources;
|
||||
std::array<u32, 16> vertex_attribute_strides{};
|
||||
std::array<PipelineRegs::VertexAttributeFormat, 16> vertex_attribute_formats;
|
||||
std::array<u32, 16> vertex_attribute_elements{};
|
||||
std::array<bool, 16> vertex_attribute_is_default;
|
||||
int num_total_attributes = 0;
|
||||
bool is_setup = false;
|
||||
};
|
||||
|
||||
} // namespace Pica
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include <memory>
|
||||
#include "common/logging/log.h"
|
||||
#include "video_core/pica.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||
#include "video_core/video_core.h"
|
||||
|
@ -24,8 +23,6 @@ std::atomic<bool> g_toggle_framelimit_enabled;
|
|||
|
||||
/// Initialize the video core
|
||||
bool Init(EmuWindow* emu_window) {
|
||||
Pica::Init();
|
||||
|
||||
g_emu_window = emu_window;
|
||||
g_renderer = std::make_unique<RendererOpenGL>();
|
||||
g_renderer->SetWindow(g_emu_window);
|
||||
|
@ -40,8 +37,6 @@ bool Init(EmuWindow* emu_window) {
|
|||
|
||||
/// Shutdown the video core
|
||||
void Shutdown() {
|
||||
Pica::Shutdown();
|
||||
|
||||
g_renderer.reset();
|
||||
|
||||
LOG_DEBUG(Render, "shutdown OK");
|
||||
|
|
Loading…
Reference in a new issue