citra/src/video_core/swrasterizer/rasterizer.cpp

1116 lines
48 KiB
C++

// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <array>
#include <cmath>
#include <tuple>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/color.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/math_util.h"
#include "common/microprofile.h"
#include "common/quaternion.h"
#include "common/vector_math.h"
#include "core/hw/gpu.h"
#include "core/memory.h"
#include "video_core/debug_utils/debug_utils.h"
#include "video_core/pica_state.h"
#include "video_core/pica_types.h"
#include "video_core/regs_framebuffer.h"
#include "video_core/regs_rasterizer.h"
#include "video_core/regs_texturing.h"
#include "video_core/shader/shader.h"
#include "video_core/swrasterizer/framebuffer.h"
#include "video_core/swrasterizer/proctex.h"
#include "video_core/swrasterizer/rasterizer.h"
#include "video_core/swrasterizer/texturing.h"
#include "video_core/texture/texture_decode.h"
#include "video_core/utils.h"
namespace Pica {
namespace Rasterizer {
// NOTE: Assuming that rasterizer coordinates are 12.4 fixed-point values
struct Fix12P4 {
Fix12P4() {}
Fix12P4(u16 val) : val(val) {}
static u16 FracMask() {
return 0xF;
}
static u16 IntMask() {
return (u16)~0xF;
}
operator u16() const {
return val;
}
bool operator<(const Fix12P4& oth) const {
return (u16) * this < (u16)oth;
}
private:
u16 val;
};
/**
* Calculate signed area of the triangle spanned by the three argument vertices.
* The sign denotes an orientation.
*
* @todo define orientation concretely.
*/
static int SignedArea(const Math::Vec2<Fix12P4>& vtx1, const Math::Vec2<Fix12P4>& vtx2,
const Math::Vec2<Fix12P4>& vtx3) {
const auto vec1 = Math::MakeVec(vtx2 - vtx1, 0);
const auto vec2 = Math::MakeVec(vtx3 - vtx1, 0);
// TODO: There is a very small chance this will overflow for sizeof(int) == 4
return Math::Cross(vec1, vec2).z;
};
/// Convert a 3D vector for cube map coordinates to 2D texture coordinates along with the face name
static std::tuple<float24, float24, PAddr> ConvertCubeCoord(float24 u, float24 v, float24 w,
const TexturingRegs& regs) {
const float abs_u = std::abs(u.ToFloat32());
const float abs_v = std::abs(v.ToFloat32());
const float abs_w = std::abs(w.ToFloat32());
float24 x, y, z;
PAddr addr;
if (abs_u > abs_v && abs_u > abs_w) {
if (u > float24::FromFloat32(0)) {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveX);
y = -v;
} else {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeX);
y = v;
}
x = -w;
z = u;
} else if (abs_v > abs_w) {
if (v > float24::FromFloat32(0)) {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveY);
x = u;
} else {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeY);
x = -u;
}
y = w;
z = v;
} else {
if (w > float24::FromFloat32(0)) {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::PositiveZ);
y = -v;
} else {
addr = regs.GetCubePhysicalAddress(TexturingRegs::CubeFace::NegativeZ);
y = v;
}
x = u;
z = w;
}
const float24 half = float24::FromFloat32(0.5f);
return std::make_tuple(x / z * half + half, y / z * half + half, addr);
}
float LookupLightingLut(size_t lut_index, u8 index, float delta) {
ASSERT_MSG(lut_index < g_state.lighting.luts.size(), "Out of range lut");
ASSERT_MSG(index < g_state.lighting.luts[0].size(), "Out of range index");
float lut_value = g_state.lighting.luts[lut_index][index].ToFloat();
float lut_diff = g_state.lighting.luts[lut_index][index].DiffToFloat();
return lut_value + lut_diff * delta;
}
std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors(const Math::Quaternion<float>& normquat, const Math::Vec3<float>& view) {
const auto& lighting = g_state.regs.lighting;
if (lighting.disable)
return {{}, {}};
// TODO(Subv): Bump mapping
Math::Vec3<float> surface_normal = {0.0f, 0.0f, 1.0f};
if (lighting.config0.bump_mode != LightingRegs::LightingBumpMode::None) {
LOG_CRITICAL(HW_GPU, "unimplemented bump mapping");
UNIMPLEMENTED();
}
// Use the normalized the quaternion when performing the rotation
auto normal = Math::QuaternionRotate(normquat.Normalized(), surface_normal);
Math::Vec3<float> light_vector = {};
Math::Vec4<float> diffuse_sum = {0.f, 0.f, 0.f, 1.f};
Math::Vec4<float> specular_sum = {0.f, 0.f, 0.f, 1.f};
Math::Vec3<float> refl_value = {};
for (unsigned light_index = 0; light_index <= lighting.max_light_index; ++light_index) {
unsigned num = lighting.light_enable.GetNum(light_index);
const auto& light_config = g_state.regs.lighting.light[num];
Math::Vec3<float> position = {float16::FromRaw(light_config.x).ToFloat32(), float16::FromRaw(light_config.y).ToFloat32(), float16::FromRaw(light_config.z).ToFloat32()};
if (light_config.config.directional)
light_vector = position;
else
light_vector = position + view;
light_vector.Normalize();
auto LV_N = Math::Dot(light_vector, normal);
auto dot_product = LV_N;
if (light_config.config.two_sided_diffuse)
dot_product = std::abs(dot_product);
else
dot_product = std::max(dot_product, 0.0f);
float dist_atten = 1.0f;
if (!lighting.IsDistAttenDisabled(num)) {
auto distance = (-view - position).Length();
float scale = Pica::float20::FromRaw(light_config.dist_atten_scale).ToFloat32();
float bias = Pica::float20::FromRaw(light_config.dist_atten_scale).ToFloat32();
size_t lut = static_cast<size_t>(LightingRegs::LightingSampler::DistanceAttenuation) + num;
float sample_loc = scale * distance + bias;
u8 lutindex = MathUtil::Clamp(std::floor(sample_loc * 256.f), 0.0f, 255.0f);
float delta = sample_loc * 256 - lutindex;
dist_atten = LookupLightingLut(lut, lutindex, delta);
}
float clamp_highlights = 1.0f;
if (lighting.config0.clamp_highlights) {
if (LV_N <= 0.f)
clamp_highlights = 0.f;
else
clamp_highlights = 1.f;
}
auto GetLutIndex = [&](unsigned num, LightingRegs::LightingLutInput input,
bool abs) -> std::tuple<u8, float> {
Math::Vec3<float> norm_view = view.Normalized();
Math::Vec3<float> half_angle = (norm_view + light_vector).Normalized();
float result = 0.0f;
switch (input) {
case LightingRegs::LightingLutInput::NH:
result = Math::Dot(normal, half_angle);
break;
case LightingRegs::LightingLutInput::VH:
result = Math::Dot(norm_view, half_angle);
break;
case LightingRegs::LightingLutInput::NV:
result = Math::Dot(normal, norm_view);
break;
case LightingRegs::LightingLutInput::LN:
result = Math::Dot(light_vector, normal);
break;
default:
LOG_CRITICAL(HW_GPU, "Unknown lighting LUT input %d\n", (int)input);
UNIMPLEMENTED();
result = 0.f;
}
if (abs) {
if (light_config.config.two_sided_diffuse)
result = std::abs(result);
else
result = std::max(result, 0.0f);
u8 lutindex = MathUtil::Clamp(std::floor(result * 256.f), 0.0f, 255.0f);
float delta = result * 256 - lutindex;
return { lutindex, delta };
} else {
float flr = std::floor(result * 128.f);
s8 tmpi = MathUtil::Clamp(flr, -128.0f, 127.0f);
float delta = result * 128.f - tmpi;
return { tmpi & 0xFF, delta };
}
};
// Specular 0 component
float d0_lut_value = 1.0f;
if (lighting.config1.disable_lut_d0 == 0 &&
LightingRegs::IsLightingSamplerSupported(
lighting.config0.config, LightingRegs::LightingSampler::Distribution0)) {
// Lookup specular "distribution 0" LUT value
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.d0.Value(), lighting.abs_lut_input.disable_d0 == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.d0);
d0_lut_value = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::Distribution0), index, delta);
}
Math::Vec3<float> specular_0 = d0_lut_value * light_config.specular_0.ToVec3f();
// If enabled, lookup ReflectRed value, otherwise, 1.0 is used
if (lighting.config1.disable_lut_rr == 0 &&
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
LightingRegs::LightingSampler::ReflectRed)) {
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.rr, lighting.abs_lut_input.disable_rr == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.rr);
refl_value.x = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::ReflectRed), index, delta);
} else {
refl_value.x = 1.0f;
}
// If enabled, lookup ReflectGreen value, otherwise, ReflectRed value is used
if (lighting.config1.disable_lut_rg == 0 &&
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
LightingRegs::LightingSampler::ReflectGreen)) {
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.rg, lighting.abs_lut_input.disable_rg == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.rg);
refl_value.y = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::ReflectGreen), index, delta);
} else {
refl_value.y = refl_value.x;
}
// If enabled, lookup ReflectBlue value, otherwise, ReflectRed value is used
if (lighting.config1.disable_lut_rb == 0 &&
LightingRegs::IsLightingSamplerSupported(lighting.config0.config,
LightingRegs::LightingSampler::ReflectBlue)) {
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.rb, lighting.abs_lut_input.disable_rb == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.rb);
refl_value.z = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::ReflectBlue), index, delta);
} else {
refl_value.z = refl_value.x;
}
float d1_lut_value = 1.0f;
if (lighting.config1.disable_lut_d1 == 0 &&
LightingRegs::IsLightingSamplerSupported(
lighting.config0.config, LightingRegs::LightingSampler::Distribution1)) {
// Lookup specular "distribution 1" LUT value
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.d1.Value(), lighting.abs_lut_input.disable_d1 == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.d1);
d1_lut_value = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::Distribution1), index, delta);
}
Math::Vec3<float> specular_1 = d1_lut_value * refl_value * light_config.specular_1.ToVec3f();
if (lighting.config1.disable_lut_fr == 0 &&
LightingRegs::IsLightingSamplerSupported(
lighting.config0.config, LightingRegs::LightingSampler::Fresnel)) {
// Lookup fresnel LUT value
u8 index;
float delta;
std::tie(index, delta) = GetLutIndex(num, lighting.lut_input.fr.Value(), lighting.abs_lut_input.disable_fr == 0);
float scale = lighting.lut_scale.GetScale(lighting.lut_scale.fr);
float lut_value = scale * LookupLightingLut(static_cast<size_t>(LightingRegs::LightingSampler::Fresnel), index, delta);
// Enabled for difffuse lighting alpha component
if (lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::PrimaryAlpha ||
lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
diffuse_sum.a() *= lut_value;
}
// Enabled for the specular lighting alpha component
if (lighting.config0.fresnel_selector ==
LightingRegs::LightingFresnelSelector::SecondaryAlpha ||
lighting.config0.fresnel_selector == LightingRegs::LightingFresnelSelector::Both) {
specular_sum.a() *= lut_value;
}
}
auto diffuse = light_config.diffuse.ToVec3f() * dot_product + light_config.ambient.ToVec3f();
diffuse_sum += Math::MakeVec(diffuse * dist_atten, 0.0f);
specular_sum += Math::MakeVec((specular_0 + specular_1) * clamp_highlights * dist_atten, 0.f);
}
diffuse_sum += Math::MakeVec(lighting.global_ambient.ToVec3f(), 0.0f);
return {
Math::MakeVec<float>(MathUtil::Clamp(diffuse_sum.x, 0.0f, 1.0f) * 255, MathUtil::Clamp(diffuse_sum.y, 0.0f, 1.0f) * 255, MathUtil::Clamp(diffuse_sum.z, 0.0f, 1.0f) * 255, MathUtil::Clamp(diffuse_sum.w, 0.0f, 1.0f) * 255).Cast<u8>(),
Math::MakeVec<float>(MathUtil::Clamp(specular_sum.x, 0.0f, 1.0f) * 255, MathUtil::Clamp(specular_sum.y, 0.0f, 1.0f) * 255, MathUtil::Clamp(specular_sum.z, 0.0f, 1.0f) * 255, MathUtil::Clamp(specular_sum.w, 0.0f, 1.0f) * 255).Cast<u8>()
};
}
static bool AreQuaternionsOpposite(Math::Vec4<Pica::float24> qa, Math::Vec4<Pica::float24> qb) {
Math::Vec4f a{ qa.x.ToFloat32(), qa.y.ToFloat32(), qa.z.ToFloat32(), qa.w.ToFloat32() };
Math::Vec4f b{ qb.x.ToFloat32(), qb.y.ToFloat32(), qb.z.ToFloat32(), qb.w.ToFloat32() };
return (Math::Dot(a, b) < 0.f);
}
MICROPROFILE_DEFINE(GPU_Rasterization, "GPU", "Rasterization", MP_RGB(50, 50, 240));
/**
* Helper function for ProcessTriangle with the "reversed" flag to allow for implementing
* culling via recursion.
*/
static void ProcessTriangleInternal(const Vertex& v0, const Vertex& v1, const Vertex& v2,
bool reversed = false) {
const auto& regs = g_state.regs;
MICROPROFILE_SCOPE(GPU_Rasterization);
// vertex positions in rasterizer coordinates
static auto FloatToFix = [](float24 flt) {
// TODO: Rounding here is necessary to prevent garbage pixels at
// triangle borders. Is it that the correct solution, though?
return Fix12P4(static_cast<unsigned short>(round(flt.ToFloat32() * 16.0f)));
};
static auto ScreenToRasterizerCoordinates = [](const Math::Vec3<float24>& vec) {
return Math::Vec3<Fix12P4>{FloatToFix(vec.x), FloatToFix(vec.y), FloatToFix(vec.z)};
};
Math::Vec3<Fix12P4> vtxpos[3]{ScreenToRasterizerCoordinates(v0.screenpos),
ScreenToRasterizerCoordinates(v1.screenpos),
ScreenToRasterizerCoordinates(v2.screenpos)};
if (regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepAll) {
// Make sure we always end up with a triangle wound counter-clockwise
if (!reversed && SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0) {
ProcessTriangleInternal(v0, v2, v1, true);
return;
}
} else {
if (!reversed && regs.rasterizer.cull_mode == RasterizerRegs::CullMode::KeepClockWise) {
// Reverse vertex order and use the CCW code path.
ProcessTriangleInternal(v0, v2, v1, true);
return;
}
// Cull away triangles which are wound clockwise.
if (SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) <= 0)
return;
}
u16 min_x = std::min({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
u16 min_y = std::min({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
u16 max_x = std::max({vtxpos[0].x, vtxpos[1].x, vtxpos[2].x});
u16 max_y = std::max({vtxpos[0].y, vtxpos[1].y, vtxpos[2].y});
// Convert the scissor box coordinates to 12.4 fixed point
u16 scissor_x1 = (u16)(regs.rasterizer.scissor_test.x1 << 4);
u16 scissor_y1 = (u16)(regs.rasterizer.scissor_test.y1 << 4);
// x2,y2 have +1 added to cover the entire sub-pixel area
u16 scissor_x2 = (u16)((regs.rasterizer.scissor_test.x2 + 1) << 4);
u16 scissor_y2 = (u16)((regs.rasterizer.scissor_test.y2 + 1) << 4);
if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Include) {
// Calculate the new bounds
min_x = std::max(min_x, scissor_x1);
min_y = std::max(min_y, scissor_y1);
max_x = std::min(max_x, scissor_x2);
max_y = std::min(max_y, scissor_y2);
}
min_x &= Fix12P4::IntMask();
min_y &= Fix12P4::IntMask();
max_x = ((max_x + Fix12P4::FracMask()) & Fix12P4::IntMask());
max_y = ((max_y + Fix12P4::FracMask()) & Fix12P4::IntMask());
// Triangle filling rules: Pixels on the right-sided edge or on flat bottom edges are not
// drawn. Pixels on any other triangle border are drawn. This is implemented with three bias
// values which are added to the barycentric coordinates w0, w1 and w2, respectively.
// NOTE: These are the PSP filling rules. Not sure if the 3DS uses the same ones...
auto IsRightSideOrFlatBottomEdge = [](const Math::Vec2<Fix12P4>& vtx,
const Math::Vec2<Fix12P4>& line1,
const Math::Vec2<Fix12P4>& line2) {
if (line1.y == line2.y) {
// just check if vertex is above us => bottom line parallel to x-axis
return vtx.y < line1.y;
} else {
// check if vertex is on our left => right side
// TODO: Not sure how likely this is to overflow
return (int)vtx.x < (int)line1.x +
((int)line2.x - (int)line1.x) * ((int)vtx.y - (int)line1.y) /
((int)line2.y - (int)line1.y);
}
};
int bias0 =
IsRightSideOrFlatBottomEdge(vtxpos[0].xy(), vtxpos[1].xy(), vtxpos[2].xy()) ? -1 : 0;
int bias1 =
IsRightSideOrFlatBottomEdge(vtxpos[1].xy(), vtxpos[2].xy(), vtxpos[0].xy()) ? -1 : 0;
int bias2 =
IsRightSideOrFlatBottomEdge(vtxpos[2].xy(), vtxpos[0].xy(), vtxpos[1].xy()) ? -1 : 0;
// Flip the quaternions if they are opposite to prevent interpolating them over the wrong direction.
auto v1_quat = v1.quat;
auto v2_quat = v2.quat;
if (AreQuaternionsOpposite(v0.quat, v1.quat))
v1_quat = v1_quat * float24::FromFloat32(-1.0f);
if (AreQuaternionsOpposite(v0.quat, v2.quat))
v2_quat = v2_quat * float24::FromFloat32(-1.0f);
auto w_inverse = Math::MakeVec(v0.pos.w, v1.pos.w, v2.pos.w);
auto textures = regs.texturing.GetTextures();
auto tev_stages = regs.texturing.GetTevStages();
bool stencil_action_enable =
g_state.regs.framebuffer.output_merger.stencil_test.enable &&
g_state.regs.framebuffer.framebuffer.depth_format == FramebufferRegs::DepthFormat::D24S8;
const auto stencil_test = g_state.regs.framebuffer.output_merger.stencil_test;
// Enter rasterization loop, starting at the center of the topleft bounding box corner.
// TODO: Not sure if looping through x first might be faster
for (u16 y = min_y + 8; y < max_y; y += 0x10) {
for (u16 x = min_x + 8; x < max_x; x += 0x10) {
// Do not process the pixel if it's inside the scissor box and the scissor mode is set
// to Exclude
if (regs.rasterizer.scissor_test.mode == RasterizerRegs::ScissorMode::Exclude) {
if (x >= scissor_x1 && x < scissor_x2 && y >= scissor_y1 && y < scissor_y2)
continue;
}
// Calculate the barycentric coordinates w0, w1 and w2
int w0 = bias0 + SignedArea(vtxpos[1].xy(), vtxpos[2].xy(), {x, y});
int w1 = bias1 + SignedArea(vtxpos[2].xy(), vtxpos[0].xy(), {x, y});
int w2 = bias2 + SignedArea(vtxpos[0].xy(), vtxpos[1].xy(), {x, y});
int wsum = w0 + w1 + w2;
// If current pixel is not covered by the current primitive
if (w0 < 0 || w1 < 0 || w2 < 0)
continue;
auto baricentric_coordinates =
Math::MakeVec(float24::FromFloat32(static_cast<float>(w0)),
float24::FromFloat32(static_cast<float>(w1)),
float24::FromFloat32(static_cast<float>(w2)));
float24 interpolated_w_inverse =
float24::FromFloat32(1.0f) / Math::Dot(w_inverse, baricentric_coordinates);
// interpolated_z = z / w
float interpolated_z_over_w =
(v0.screenpos[2].ToFloat32() * w0 + v1.screenpos[2].ToFloat32() * w1 +
v2.screenpos[2].ToFloat32() * w2) /
wsum;
// Not fully accurate. About 3 bits in precision are missing.
// Z-Buffer (z / w * scale + offset)
float depth_scale = float24::FromRaw(regs.rasterizer.viewport_depth_range).ToFloat32();
float depth_offset =
float24::FromRaw(regs.rasterizer.viewport_depth_near_plane).ToFloat32();
float depth = interpolated_z_over_w * depth_scale + depth_offset;
// Potentially switch to W-Buffer
if (regs.rasterizer.depthmap_enable ==
Pica::RasterizerRegs::DepthBuffering::WBuffering) {
// W-Buffer (z * scale + w * offset = (z / w * scale + offset) * w)
depth *= interpolated_w_inverse.ToFloat32() * wsum;
}
// Clamp the result
depth = MathUtil::Clamp(depth, 0.0f, 1.0f);
// Perspective correct attribute interpolation:
// Attribute values cannot be calculated by simple linear interpolation since
// they are not linear in screen space. For example, when interpolating a
// texture coordinate across two vertices, something simple like
// u = (u0*w0 + u1*w1)/(w0+w1)
// will not work. However, the attribute value divided by the
// clipspace w-coordinate (u/w) and and the inverse w-coordinate (1/w) are linear
// in screenspace. Hence, we can linearly interpolate these two independently and
// calculate the interpolated attribute by dividing the results.
// I.e.
// u_over_w = ((u0/v0.pos.w)*w0 + (u1/v1.pos.w)*w1)/(w0+w1)
// one_over_w = (( 1/v0.pos.w)*w0 + ( 1/v1.pos.w)*w1)/(w0+w1)
// u = u_over_w / one_over_w
//
// The generalization to three vertices is straightforward in baricentric coordinates.
auto GetInterpolatedAttribute = [&](float24 attr0, float24 attr1, float24 attr2) {
auto attr_over_w = Math::MakeVec(attr0, attr1, attr2);
float24 interpolated_attr_over_w = Math::Dot(attr_over_w, baricentric_coordinates);
return interpolated_attr_over_w * interpolated_w_inverse;
};
Math::Vec4<u8> primary_color{
(u8)(
GetInterpolatedAttribute(v0.color.r(), v1.color.r(), v2.color.r()).ToFloat32() *
255),
(u8)(
GetInterpolatedAttribute(v0.color.g(), v1.color.g(), v2.color.g()).ToFloat32() *
255),
(u8)(
GetInterpolatedAttribute(v0.color.b(), v1.color.b(), v2.color.b()).ToFloat32() *
255),
(u8)(
GetInterpolatedAttribute(v0.color.a(), v1.color.a(), v2.color.a()).ToFloat32() *
255),
};
Math::Quaternion<float> normquat{
{
GetInterpolatedAttribute(v0.quat.x, v1_quat.x, v2_quat.x).ToFloat32(),
GetInterpolatedAttribute(v0.quat.y, v1_quat.y, v2_quat.y).ToFloat32(),
GetInterpolatedAttribute(v0.quat.z, v1_quat.z, v2_quat.z).ToFloat32()
},
GetInterpolatedAttribute(v0.quat.w, v1_quat.w, v2_quat.w).ToFloat32(),
};
Math::Vec3<float> fragment_position{
GetInterpolatedAttribute(v0.view.x, v1.view.x, v2.view.x).ToFloat32(),
GetInterpolatedAttribute(v0.view.y, v1.view.y, v2.view.y).ToFloat32(),
GetInterpolatedAttribute(v0.view.z, v1.view.z, v2.view.z).ToFloat32()
};
Math::Vec2<float24> uv[3];
uv[0].u() = GetInterpolatedAttribute(v0.tc0.u(), v1.tc0.u(), v2.tc0.u());
uv[0].v() = GetInterpolatedAttribute(v0.tc0.v(), v1.tc0.v(), v2.tc0.v());
uv[1].u() = GetInterpolatedAttribute(v0.tc1.u(), v1.tc1.u(), v2.tc1.u());
uv[1].v() = GetInterpolatedAttribute(v0.tc1.v(), v1.tc1.v(), v2.tc1.v());
uv[2].u() = GetInterpolatedAttribute(v0.tc2.u(), v1.tc2.u(), v2.tc2.u());
uv[2].v() = GetInterpolatedAttribute(v0.tc2.v(), v1.tc2.v(), v2.tc2.v());
Math::Vec4<u8> texture_color[4]{};
for (int i = 0; i < 3; ++i) {
const auto& texture = textures[i];
if (!texture.enabled)
continue;
DEBUG_ASSERT(0 != texture.config.address);
int coordinate_i =
(i == 2 && regs.texturing.main_config.texture2_use_coord1) ? 1 : i;
float24 u = uv[coordinate_i].u();
float24 v = uv[coordinate_i].v();
// Only unit 0 respects the texturing type (according to 3DBrew)
// TODO: Refactor so cubemaps and shadowmaps can be handled
PAddr texture_address = texture.config.GetPhysicalAddress();
if (i == 0) {
switch (texture.config.type) {
case TexturingRegs::TextureConfig::Texture2D:
break;
case TexturingRegs::TextureConfig::TextureCube: {
auto w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
std::tie(u, v, texture_address) = ConvertCubeCoord(u, v, w, regs.texturing);
break;
}
case TexturingRegs::TextureConfig::Projection2D: {
auto tc0_w = GetInterpolatedAttribute(v0.tc0_w, v1.tc0_w, v2.tc0_w);
u /= tc0_w;
v /= tc0_w;
break;
}
default:
// TODO: Change to LOG_ERROR when more types are handled.
LOG_DEBUG(HW_GPU, "Unhandled texture type %x", (int)texture.config.type);
UNIMPLEMENTED();
break;
}
}
int s = (int)(u * float24::FromFloat32(static_cast<float>(texture.config.width)))
.ToFloat32();
int t = (int)(v * float24::FromFloat32(static_cast<float>(texture.config.height)))
.ToFloat32();
bool use_border_s = false;
bool use_border_t = false;
if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder) {
use_border_s = s < 0 || s >= static_cast<int>(texture.config.width);
} else if (texture.config.wrap_s == TexturingRegs::TextureConfig::ClampToBorder2) {
use_border_s = s >= static_cast<int>(texture.config.width);
}
if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder) {
use_border_t = t < 0 || t >= static_cast<int>(texture.config.height);
} else if (texture.config.wrap_t == TexturingRegs::TextureConfig::ClampToBorder2) {
use_border_t = t >= static_cast<int>(texture.config.height);
}
if (use_border_s || use_border_t) {
auto border_color = texture.config.border_color;
texture_color[i] = {border_color.r, border_color.g, border_color.b,
border_color.a};
} else {
// Textures are laid out from bottom to top, hence we invert the t coordinate.
// NOTE: This may not be the right place for the inversion.
// TODO: Check if this applies to ETC textures, too.
s = GetWrappedTexCoord(texture.config.wrap_s, s, texture.config.width);
t = texture.config.height - 1 -
GetWrappedTexCoord(texture.config.wrap_t, t, texture.config.height);
const u8* texture_data = Memory::GetPhysicalPointer(texture_address);
auto info =
Texture::TextureInfo::FromPicaRegister(texture.config, texture.format);
// TODO: Apply the min and mag filters to the texture
texture_color[i] = Texture::LookupTexture(texture_data, s, t, info);
#if PICA_DUMP_TEXTURES
DebugUtils::DumpTexture(texture.config, texture_data);
#endif
}
}
// sample procedural texture
if (regs.texturing.main_config.texture3_enable) {
const auto& proctex_uv = uv[regs.texturing.main_config.texture3_coordinates];
texture_color[3] = ProcTex(proctex_uv.u().ToFloat32(), proctex_uv.v().ToFloat32(),
g_state.regs.texturing, g_state.proctex);
}
// Texture environment - consists of 6 stages of color and alpha combining.
//
// Color combiners take three input color values from some source (e.g. interpolated
// vertex color, texture color, previous stage, etc), perform some very simple
// operations on each of them (e.g. inversion) and then calculate the output color
// with some basic arithmetic. Alpha combiners can be configured separately but work
// analogously.
Math::Vec4<u8> combiner_output;
Math::Vec4<u8> combiner_buffer = {0, 0, 0, 0};
Math::Vec4<u8> next_combiner_buffer = {
regs.texturing.tev_combiner_buffer_color.r,
regs.texturing.tev_combiner_buffer_color.g,
regs.texturing.tev_combiner_buffer_color.b,
regs.texturing.tev_combiner_buffer_color.a,
};
Math::Vec4<u8> primary_fragment_color;
Math::Vec4<u8> secondary_fragment_color;
std::tie(primary_fragment_color, secondary_fragment_color) = ComputeFragmentsColors(normquat, fragment_position);
for (unsigned tev_stage_index = 0; tev_stage_index < tev_stages.size();
++tev_stage_index) {
const auto& tev_stage = tev_stages[tev_stage_index];
using Source = TexturingRegs::TevStageConfig::Source;
auto GetSource = [&](Source source) -> Math::Vec4<u8> {
switch (source) {
case Source::PrimaryColor:
return primary_color;
case Source::PrimaryFragmentColor:
return primary_fragment_color;
case Source::SecondaryFragmentColor:
return secondary_fragment_color;
case Source::Texture0:
return texture_color[0];
case Source::Texture1:
return texture_color[1];
case Source::Texture2:
return texture_color[2];
case Source::Texture3:
return texture_color[3];
case Source::PreviousBuffer:
return combiner_buffer;
case Source::Constant:
return {tev_stage.const_r, tev_stage.const_g, tev_stage.const_b,
tev_stage.const_a};
case Source::Previous:
return combiner_output;
default:
LOG_ERROR(HW_GPU, "Unknown color combiner source %d", (int)source);
UNIMPLEMENTED();
return {0, 0, 0, 0};
}
};
// color combiner
// NOTE: Not sure if the alpha combiner might use the color output of the previous
// stage as input. Hence, we currently don't directly write the result to
// combiner_output.rgb(), but instead store it in a temporary variable until
// alpha combining has been done.
Math::Vec3<u8> color_result[3] = {
GetColorModifier(tev_stage.color_modifier1, GetSource(tev_stage.color_source1)),
GetColorModifier(tev_stage.color_modifier2, GetSource(tev_stage.color_source2)),
GetColorModifier(tev_stage.color_modifier3, GetSource(tev_stage.color_source3)),
};
auto color_output = ColorCombine(tev_stage.color_op, color_result);
u8 alpha_output;
if (tev_stage.color_op == TexturingRegs::TevStageConfig::Operation::Dot3_RGBA) {
// result of Dot3_RGBA operation is also placed to the alpha component
alpha_output = color_output.x;
} else {
// alpha combiner
std::array<u8, 3> alpha_result = {{
GetAlphaModifier(tev_stage.alpha_modifier1,
GetSource(tev_stage.alpha_source1)),
GetAlphaModifier(tev_stage.alpha_modifier2,
GetSource(tev_stage.alpha_source2)),
GetAlphaModifier(tev_stage.alpha_modifier3,
GetSource(tev_stage.alpha_source3)),
}};
alpha_output = AlphaCombine(tev_stage.alpha_op, alpha_result);
}
combiner_output[0] =
std::min((unsigned)255, color_output.r() * tev_stage.GetColorMultiplier());
combiner_output[1] =
std::min((unsigned)255, color_output.g() * tev_stage.GetColorMultiplier());
combiner_output[2] =
std::min((unsigned)255, color_output.b() * tev_stage.GetColorMultiplier());
combiner_output[3] =
std::min((unsigned)255, alpha_output * tev_stage.GetAlphaMultiplier());
combiner_buffer = next_combiner_buffer;
if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferColor(
tev_stage_index)) {
next_combiner_buffer.r() = combiner_output.r();
next_combiner_buffer.g() = combiner_output.g();
next_combiner_buffer.b() = combiner_output.b();
}
if (regs.texturing.tev_combiner_buffer_input.TevStageUpdatesCombinerBufferAlpha(
tev_stage_index)) {
next_combiner_buffer.a() = combiner_output.a();
}
}
const auto& output_merger = regs.framebuffer.output_merger;
// TODO: Does alpha testing happen before or after stencil?
if (output_merger.alpha_test.enable) {
bool pass = false;
switch (output_merger.alpha_test.func) {
case FramebufferRegs::CompareFunc::Never:
pass = false;
break;
case FramebufferRegs::CompareFunc::Always:
pass = true;
break;
case FramebufferRegs::CompareFunc::Equal:
pass = combiner_output.a() == output_merger.alpha_test.ref;
break;
case FramebufferRegs::CompareFunc::NotEqual:
pass = combiner_output.a() != output_merger.alpha_test.ref;
break;
case FramebufferRegs::CompareFunc::LessThan:
pass = combiner_output.a() < output_merger.alpha_test.ref;
break;
case FramebufferRegs::CompareFunc::LessThanOrEqual:
pass = combiner_output.a() <= output_merger.alpha_test.ref;
break;
case FramebufferRegs::CompareFunc::GreaterThan:
pass = combiner_output.a() > output_merger.alpha_test.ref;
break;
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
pass = combiner_output.a() >= output_merger.alpha_test.ref;
break;
}
if (!pass)
continue;
}
// Apply fog combiner
// Not fully accurate. We'd have to know what data type is used to
// store the depth etc. Using float for now until we know more
// about Pica datatypes
if (regs.texturing.fog_mode == TexturingRegs::FogMode::Fog) {
const Math::Vec3<u8> fog_color = {
static_cast<u8>(regs.texturing.fog_color.r.Value()),
static_cast<u8>(regs.texturing.fog_color.g.Value()),
static_cast<u8>(regs.texturing.fog_color.b.Value()),
};
// Get index into fog LUT
float fog_index;
if (g_state.regs.texturing.fog_flip) {
fog_index = (1.0f - depth) * 128.0f;
} else {
fog_index = depth * 128.0f;
}
// Generate clamped fog factor from LUT for given fog index
float fog_i = MathUtil::Clamp(floorf(fog_index), 0.0f, 127.0f);
float fog_f = fog_index - fog_i;
const auto& fog_lut_entry = g_state.fog.lut[static_cast<unsigned int>(fog_i)];
float fog_factor = fog_lut_entry.ToFloat() + fog_lut_entry.DiffToFloat() * fog_f;
fog_factor = MathUtil::Clamp(fog_factor, 0.0f, 1.0f);
// Blend the fog
for (unsigned i = 0; i < 3; i++) {
combiner_output[i] = static_cast<u8>(fog_factor * combiner_output[i] +
(1.0f - fog_factor) * fog_color[i]);
}
}
u8 old_stencil = 0;
auto UpdateStencil = [stencil_test, x, y,
&old_stencil](Pica::FramebufferRegs::StencilAction action) {
u8 new_stencil =
PerformStencilAction(action, old_stencil, stencil_test.reference_value);
if (g_state.regs.framebuffer.framebuffer.allow_depth_stencil_write != 0)
SetStencil(x >> 4, y >> 4, (new_stencil & stencil_test.write_mask) |
(old_stencil & ~stencil_test.write_mask));
};
if (stencil_action_enable) {
old_stencil = GetStencil(x >> 4, y >> 4);
u8 dest = old_stencil & stencil_test.input_mask;
u8 ref = stencil_test.reference_value & stencil_test.input_mask;
bool pass = false;
switch (stencil_test.func) {
case FramebufferRegs::CompareFunc::Never:
pass = false;
break;
case FramebufferRegs::CompareFunc::Always:
pass = true;
break;
case FramebufferRegs::CompareFunc::Equal:
pass = (ref == dest);
break;
case FramebufferRegs::CompareFunc::NotEqual:
pass = (ref != dest);
break;
case FramebufferRegs::CompareFunc::LessThan:
pass = (ref < dest);
break;
case FramebufferRegs::CompareFunc::LessThanOrEqual:
pass = (ref <= dest);
break;
case FramebufferRegs::CompareFunc::GreaterThan:
pass = (ref > dest);
break;
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
pass = (ref >= dest);
break;
}
if (!pass) {
UpdateStencil(stencil_test.action_stencil_fail);
continue;
}
}
// Convert float to integer
unsigned num_bits =
FramebufferRegs::DepthBitsPerPixel(regs.framebuffer.framebuffer.depth_format);
u32 z = (u32)(depth * ((1 << num_bits) - 1));
if (output_merger.depth_test_enable) {
u32 ref_z = GetDepth(x >> 4, y >> 4);
bool pass = false;
switch (output_merger.depth_test_func) {
case FramebufferRegs::CompareFunc::Never:
pass = false;
break;
case FramebufferRegs::CompareFunc::Always:
pass = true;
break;
case FramebufferRegs::CompareFunc::Equal:
pass = z == ref_z;
break;
case FramebufferRegs::CompareFunc::NotEqual:
pass = z != ref_z;
break;
case FramebufferRegs::CompareFunc::LessThan:
pass = z < ref_z;
break;
case FramebufferRegs::CompareFunc::LessThanOrEqual:
pass = z <= ref_z;
break;
case FramebufferRegs::CompareFunc::GreaterThan:
pass = z > ref_z;
break;
case FramebufferRegs::CompareFunc::GreaterThanOrEqual:
pass = z >= ref_z;
break;
}
if (!pass) {
if (stencil_action_enable)
UpdateStencil(stencil_test.action_depth_fail);
continue;
}
}
if (regs.framebuffer.framebuffer.allow_depth_stencil_write != 0 &&
output_merger.depth_write_enable) {
SetDepth(x >> 4, y >> 4, z);
}
// The stencil depth_pass action is executed even if depth testing is disabled
if (stencil_action_enable)
UpdateStencil(stencil_test.action_depth_pass);
auto dest = GetPixel(x >> 4, y >> 4);
Math::Vec4<u8> blend_output = combiner_output;
if (output_merger.alphablend_enable) {
auto params = output_merger.alpha_blending;
auto LookupFactor = [&](unsigned channel,
FramebufferRegs::BlendFactor factor) -> u8 {
DEBUG_ASSERT(channel < 4);
const Math::Vec4<u8> blend_const = {
static_cast<u8>(output_merger.blend_const.r),
static_cast<u8>(output_merger.blend_const.g),
static_cast<u8>(output_merger.blend_const.b),
static_cast<u8>(output_merger.blend_const.a),
};
switch (factor) {
case FramebufferRegs::BlendFactor::Zero:
return 0;
case FramebufferRegs::BlendFactor::One:
return 255;
case FramebufferRegs::BlendFactor::SourceColor:
return combiner_output[channel];
case FramebufferRegs::BlendFactor::OneMinusSourceColor:
return 255 - combiner_output[channel];
case FramebufferRegs::BlendFactor::DestColor:
return dest[channel];
case FramebufferRegs::BlendFactor::OneMinusDestColor:
return 255 - dest[channel];
case FramebufferRegs::BlendFactor::SourceAlpha:
return combiner_output.a();
case FramebufferRegs::BlendFactor::OneMinusSourceAlpha:
return 255 - combiner_output.a();
case FramebufferRegs::BlendFactor::DestAlpha:
return dest.a();
case FramebufferRegs::BlendFactor::OneMinusDestAlpha:
return 255 - dest.a();
case FramebufferRegs::BlendFactor::ConstantColor:
return blend_const[channel];
case FramebufferRegs::BlendFactor::OneMinusConstantColor:
return 255 - blend_const[channel];
case FramebufferRegs::BlendFactor::ConstantAlpha:
return blend_const.a();
case FramebufferRegs::BlendFactor::OneMinusConstantAlpha:
return 255 - blend_const.a();
case FramebufferRegs::BlendFactor::SourceAlphaSaturate:
// Returns 1.0 for the alpha channel
if (channel == 3)
return 255;
return std::min(combiner_output.a(), static_cast<u8>(255 - dest.a()));
default:
LOG_CRITICAL(HW_GPU, "Unknown blend factor %x", factor);
UNIMPLEMENTED();
break;
}
return combiner_output[channel];
};
auto srcfactor = Math::MakeVec(LookupFactor(0, params.factor_source_rgb),
LookupFactor(1, params.factor_source_rgb),
LookupFactor(2, params.factor_source_rgb),
LookupFactor(3, params.factor_source_a));
auto dstfactor = Math::MakeVec(LookupFactor(0, params.factor_dest_rgb),
LookupFactor(1, params.factor_dest_rgb),
LookupFactor(2, params.factor_dest_rgb),
LookupFactor(3, params.factor_dest_a));
blend_output = EvaluateBlendEquation(combiner_output, srcfactor, dest, dstfactor,
params.blend_equation_rgb);
blend_output.a() = EvaluateBlendEquation(combiner_output, srcfactor, dest,
dstfactor, params.blend_equation_a)
.a();
} else {
blend_output =
Math::MakeVec(LogicOp(combiner_output.r(), dest.r(), output_merger.logic_op),
LogicOp(combiner_output.g(), dest.g(), output_merger.logic_op),
LogicOp(combiner_output.b(), dest.b(), output_merger.logic_op),
LogicOp(combiner_output.a(), dest.a(), output_merger.logic_op));
}
const Math::Vec4<u8> result = {
output_merger.red_enable ? blend_output.r() : dest.r(),
output_merger.green_enable ? blend_output.g() : dest.g(),
output_merger.blue_enable ? blend_output.b() : dest.b(),
output_merger.alpha_enable ? blend_output.a() : dest.a(),
};
if (regs.framebuffer.framebuffer.allow_color_write != 0)
DrawPixel(x >> 4, y >> 4, result);
}
}
}
void ProcessTriangle(const Vertex& v0, const Vertex& v1, const Vertex& v2) {
ProcessTriangleInternal(v0, v1, v2);
}
} // namespace Rasterizer
} // namespace Pica