mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-27 09:12:46 +01:00
spirv: Replace Constant/ConstantComposite with Const helper
This commit is contained in:
parent
2999028976
commit
5b8afed871
12 changed files with 100 additions and 111 deletions
|
@ -131,13 +131,13 @@ Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
|
|||
case Stage::TessellationControl:
|
||||
case Stage::TessellationEval:
|
||||
if (per_invocation) {
|
||||
type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], 32u));
|
||||
type = ctx.TypeArray(type, ctx.Const(32u));
|
||||
}
|
||||
break;
|
||||
case Stage::Geometry:
|
||||
if (per_invocation) {
|
||||
const u32 num_vertices{NumVertices(ctx.profile.input_topology)};
|
||||
type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], num_vertices));
|
||||
type = ctx.TypeArray(type, ctx.Const(num_vertices));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -149,7 +149,7 @@ Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
|
|||
Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations,
|
||||
std::optional<spv::BuiltIn> builtin = std::nullopt) {
|
||||
if (invocations && ctx.stage == Stage::TessellationControl) {
|
||||
type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], *invocations));
|
||||
type = ctx.TypeArray(type, ctx.Const(*invocations));
|
||||
}
|
||||
return DefineVariable(ctx, type, builtin, spv::StorageClass::Output);
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
|
|||
|
||||
void DefineConstBuffers(EmitContext& ctx, const Info& info, Id UniformDefinitions::*member_type,
|
||||
u32 binding, Id type, char type_char, u32 element_size) {
|
||||
const Id array_type{ctx.TypeArray(type, ctx.Constant(ctx.U32[1], 65536U / element_size))};
|
||||
const Id array_type{ctx.TypeArray(type, ctx.Const(65536U / element_size))};
|
||||
ctx.Decorate(array_type, spv::Decoration::ArrayStride, element_size);
|
||||
|
||||
const Id struct_type{ctx.TypeStruct(array_type)};
|
||||
|
@ -328,7 +328,7 @@ Id CasLoop(EmitContext& ctx, Operation operation, Id array_pointer, Id element_p
|
|||
const bool is_struct{!is_shared || ctx.profile.support_explicit_workgroup_layout};
|
||||
const Id cas_func{CasFunction(ctx, operation, value_type)};
|
||||
const Id zero{ctx.u32_zero_value};
|
||||
const Id scope_id{ctx.Constant(ctx.U32[1], static_cast<u32>(scope))};
|
||||
const Id scope_id{ctx.Const(static_cast<u32>(scope))};
|
||||
|
||||
const Id loop_header{ctx.OpLabel()};
|
||||
const Id continue_block{ctx.OpLabel()};
|
||||
|
@ -428,11 +428,11 @@ Id EmitContext::Def(const IR::Value& value) {
|
|||
case IR::Type::U1:
|
||||
return value.U1() ? true_value : false_value;
|
||||
case IR::Type::U32:
|
||||
return Constant(U32[1], value.U32());
|
||||
return Const(value.U32());
|
||||
case IR::Type::U64:
|
||||
return Constant(U64, value.U64());
|
||||
case IR::Type::F32:
|
||||
return Constant(F32[1], value.F32());
|
||||
return Const(value.F32());
|
||||
case IR::Type::F64:
|
||||
return Constant(F64[1], value.F64());
|
||||
case IR::Type::Label:
|
||||
|
@ -486,8 +486,8 @@ void EmitContext::DefineCommonTypes(const Info& info) {
|
|||
void EmitContext::DefineCommonConstants() {
|
||||
true_value = ConstantTrue(U1);
|
||||
false_value = ConstantFalse(U1);
|
||||
u32_zero_value = Constant(U32[1], 0U);
|
||||
f32_zero_value = Constant(F32[1], 0.0f);
|
||||
u32_zero_value = Const(0U);
|
||||
f32_zero_value = Const(0.0f);
|
||||
}
|
||||
|
||||
void EmitContext::DefineInterfaces(const IR::Program& program) {
|
||||
|
@ -500,7 +500,7 @@ void EmitContext::DefineLocalMemory(const IR::Program& program) {
|
|||
return;
|
||||
}
|
||||
const u32 num_elements{Common::DivCeil(program.local_memory_size, 4U)};
|
||||
const Id type{TypeArray(U32[1], Constant(U32[1], num_elements))};
|
||||
const Id type{TypeArray(U32[1], Const(num_elements))};
|
||||
const Id pointer{TypePointer(spv::StorageClass::Private, type)};
|
||||
local_memory = AddGlobalVariable(pointer, spv::StorageClass::Private);
|
||||
if (profile.supported_spirv >= 0x00010400) {
|
||||
|
@ -514,7 +514,7 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
|||
}
|
||||
const auto make{[&](Id element_type, u32 element_size) {
|
||||
const u32 num_elements{Common::DivCeil(program.shared_memory_size, element_size)};
|
||||
const Id array_type{TypeArray(element_type, Constant(U32[1], num_elements))};
|
||||
const Id array_type{TypeArray(element_type, Const(num_elements))};
|
||||
Decorate(array_type, spv::Decoration::ArrayStride, element_size);
|
||||
|
||||
const Id struct_type{TypeStruct(array_type)};
|
||||
|
@ -549,7 +549,7 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
|||
return;
|
||||
}
|
||||
const u32 num_elements{Common::DivCeil(program.shared_memory_size, 4U)};
|
||||
const Id type{TypeArray(U32[1], Constant(U32[1], num_elements))};
|
||||
const Id type{TypeArray(U32[1], Const(num_elements))};
|
||||
shared_memory_u32_type = TypePointer(spv::StorageClass::Workgroup, type);
|
||||
|
||||
shared_u32 = TypePointer(spv::StorageClass::Workgroup, U32[1]);
|
||||
|
@ -569,10 +569,10 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
|||
OpBranch(loop_header);
|
||||
|
||||
AddLabel(loop_header);
|
||||
const Id word_offset{OpShiftRightArithmetic(U32[1], offset, Constant(U32[1], 2U))};
|
||||
const Id shift_offset{OpShiftLeftLogical(U32[1], offset, Constant(U32[1], 3U))};
|
||||
const Id bit_offset{OpBitwiseAnd(U32[1], shift_offset, Constant(U32[1], mask))};
|
||||
const Id count{Constant(U32[1], size)};
|
||||
const Id word_offset{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
|
||||
const Id shift_offset{OpShiftLeftLogical(U32[1], offset, Const(3U))};
|
||||
const Id bit_offset{OpBitwiseAnd(U32[1], shift_offset, Const(mask))};
|
||||
const Id count{Const(size)};
|
||||
OpLoopMerge(merge_block, continue_block, spv::LoopControlMask::MaskNone);
|
||||
OpBranch(continue_block);
|
||||
|
||||
|
@ -580,9 +580,8 @@ void EmitContext::DefineSharedMemory(const IR::Program& program) {
|
|||
const Id word_pointer{OpAccessChain(shared_u32, shared_memory_u32, word_offset)};
|
||||
const Id old_value{OpLoad(U32[1], word_pointer)};
|
||||
const Id new_value{OpBitFieldInsert(U32[1], old_value, insert_value, bit_offset, count)};
|
||||
const Id atomic_res{OpAtomicCompareExchange(U32[1], word_pointer, Constant(U32[1], 1U),
|
||||
u32_zero_value, u32_zero_value, new_value,
|
||||
old_value)};
|
||||
const Id atomic_res{OpAtomicCompareExchange(U32[1], word_pointer, Const(1U), u32_zero_value,
|
||||
u32_zero_value, new_value, old_value)};
|
||||
const Id success{OpIEqual(U1, atomic_res, old_value)};
|
||||
OpBranchConditional(success, merge_block, loop_header);
|
||||
|
||||
|
@ -623,9 +622,9 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
const Id vertex{is_array ? OpFunctionParameter(U32[1]) : Id{}};
|
||||
|
||||
AddLabel();
|
||||
const Id base_index{OpShiftRightArithmetic(U32[1], offset, Constant(U32[1], 2U))};
|
||||
const Id masked_index{OpBitwiseAnd(U32[1], base_index, Constant(U32[1], 3U))};
|
||||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Constant(U32[1], 2U))};
|
||||
const Id base_index{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
|
||||
const Id masked_index{OpBitwiseAnd(U32[1], base_index, Const(3U))};
|
||||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
|
||||
std::vector<Sirit::Literal> literals;
|
||||
std::vector<Id> labels;
|
||||
if (info.loads_position) {
|
||||
|
@ -643,7 +642,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
|
||||
OpSwitch(compare_index, default_label, literals, labels);
|
||||
AddLabel(default_label);
|
||||
OpReturnValue(Constant(F32[1], 0.0f));
|
||||
OpReturnValue(Const(0.0f));
|
||||
size_t label_index{0};
|
||||
if (info.loads_position) {
|
||||
AddLabel(labels[label_index]);
|
||||
|
@ -661,7 +660,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
AddLabel(labels[label_index]);
|
||||
const auto type{AttrTypes(*this, static_cast<u32>(i))};
|
||||
if (!type) {
|
||||
OpReturnValue(Constant(F32[1], 0.0f));
|
||||
OpReturnValue(Const(0.0f));
|
||||
++label_index;
|
||||
continue;
|
||||
}
|
||||
|
@ -688,9 +687,9 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
const Id offset{OpFunctionParameter(U32[1])};
|
||||
const Id store_value{OpFunctionParameter(F32[1])};
|
||||
AddLabel();
|
||||
const Id base_index{OpShiftRightArithmetic(U32[1], offset, Constant(U32[1], 2U))};
|
||||
const Id masked_index{OpBitwiseAnd(U32[1], base_index, Constant(U32[1], 3U))};
|
||||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Constant(U32[1], 2U))};
|
||||
const Id base_index{OpShiftRightArithmetic(U32[1], offset, Const(2U))};
|
||||
const Id masked_index{OpBitwiseAnd(U32[1], base_index, Const(3U))};
|
||||
const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
|
||||
std::vector<Sirit::Literal> literals;
|
||||
std::vector<Id> labels;
|
||||
if (info.stores_position) {
|
||||
|
@ -744,7 +743,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
|
|||
OpReturn();
|
||||
++label_index;
|
||||
AddLabel(labels[label_index]);
|
||||
const Id fixed_index{OpIAdd(U32[1], masked_index, Constant(U32[1], 4))};
|
||||
const Id fixed_index{OpIAdd(U32[1], masked_index, Const(4U))};
|
||||
const Id pointer2{OpAccessChain(output_f32, clip_distances, fixed_index)};
|
||||
OpStore(pointer2, store_value);
|
||||
OpReturn();
|
||||
|
@ -1018,9 +1017,9 @@ void EmitContext::DefineInputs(const Info& info) {
|
|||
DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);
|
||||
}
|
||||
if (info.uses_fswzadd) {
|
||||
const Id f32_one{Constant(F32[1], 1.0f)};
|
||||
const Id f32_minus_one{Constant(F32[1], -1.0f)};
|
||||
const Id f32_zero{Constant(F32[1], 0.0f)};
|
||||
const Id f32_one{Const(1.0f)};
|
||||
const Id f32_minus_one{Const(-1.0f)};
|
||||
const Id f32_zero{Const(0.0f)};
|
||||
fswzadd_lut_a = ConstantComposite(F32[4], f32_minus_one, f32_one, f32_minus_one, f32_zero);
|
||||
fswzadd_lut_b =
|
||||
ConstantComposite(F32[4], f32_minus_one, f32_minus_one, f32_one, f32_minus_one);
|
||||
|
@ -1118,7 +1117,7 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
|
|||
if (stage == Stage::Fragment) {
|
||||
throw NotImplementedException("Storing ClipDistance in fragment stage");
|
||||
}
|
||||
const Id type{TypeArray(F32[1], Constant(U32[1], 8U))};
|
||||
const Id type{TypeArray(F32[1], Const(8U))};
|
||||
clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
|
||||
}
|
||||
if (info.stores_layer &&
|
||||
|
@ -1136,7 +1135,7 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
|
|||
viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex);
|
||||
}
|
||||
if (info.stores_viewport_mask && profile.support_viewport_mask) {
|
||||
viewport_mask = DefineOutput(*this, TypeArray(U32[1], Constant(U32[1], 1u)), std::nullopt);
|
||||
viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt);
|
||||
}
|
||||
for (size_t index = 0; index < info.stores_generics.size(); ++index) {
|
||||
if (info.stores_generics[index]) {
|
||||
|
@ -1146,13 +1145,13 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
|
|||
switch (stage) {
|
||||
case Stage::TessellationControl:
|
||||
if (info.stores_tess_level_outer) {
|
||||
const Id type{TypeArray(F32[1], Constant(U32[1], 4))};
|
||||
const Id type{TypeArray(F32[1], Const(4U))};
|
||||
output_tess_level_outer =
|
||||
DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelOuter);
|
||||
Decorate(output_tess_level_outer, spv::Decoration::Patch);
|
||||
}
|
||||
if (info.stores_tess_level_inner) {
|
||||
const Id type{TypeArray(F32[1], Constant(U32[1], 2))};
|
||||
const Id type{TypeArray(F32[1], Const(2U))};
|
||||
output_tess_level_inner =
|
||||
DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelInner);
|
||||
Decorate(output_tess_level_inner, spv::Decoration::Patch);
|
||||
|
|
|
@ -114,7 +114,7 @@ public:
|
|||
}
|
||||
|
||||
Id Const(u32 element_1, u32 element_2, u32 element_3, u32 element_4) {
|
||||
return ConstantComposite(U32[2], Const(element_1), Const(element_2), Const(element_3),
|
||||
return ConstantComposite(U32[4], Const(element_1), Const(element_2), Const(element_3),
|
||||
Const(element_4));
|
||||
}
|
||||
|
||||
|
|
|
@ -7,10 +7,10 @@
|
|||
namespace Shader::Backend::SPIRV {
|
||||
namespace {
|
||||
Id SharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
if (index_offset > 0) {
|
||||
index = ctx.OpIAdd(ctx.U32[1], index, ctx.Constant(ctx.U32[1], index_offset));
|
||||
index = ctx.OpIAdd(ctx.U32[1], index, ctx.Const(index_offset));
|
||||
}
|
||||
return ctx.profile.support_explicit_workgroup_layout
|
||||
? ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)
|
||||
|
@ -20,14 +20,14 @@ Id SharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
|
|||
Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size) {
|
||||
if (offset.IsImmediate()) {
|
||||
const u32 imm_offset{static_cast<u32>(offset.U32() / element_size)};
|
||||
return ctx.Constant(ctx.U32[1], imm_offset);
|
||||
return ctx.Const(imm_offset);
|
||||
}
|
||||
const u32 shift{static_cast<u32>(std::countr_zero(element_size))};
|
||||
const Id index{ctx.Def(offset)};
|
||||
if (shift == 0) {
|
||||
return index;
|
||||
}
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], shift)};
|
||||
const Id shift_id{ctx.Const(shift)};
|
||||
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ Id StoragePointer(EmitContext& ctx, const StorageTypeDefinition& type_def,
|
|||
}
|
||||
|
||||
std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
|
||||
const Id scope{ctx.Constant(ctx.U32[1], static_cast<u32>(spv::Scope::Device))};
|
||||
const Id scope{ctx.Const(static_cast<u32>(spv::Scope::Device))};
|
||||
const Id semantics{ctx.u32_zero_value};
|
||||
return {scope, semantics};
|
||||
}
|
||||
|
@ -103,13 +103,13 @@ Id EmitSharedAtomicUMax32(EmitContext& ctx, Id offset, Id value) {
|
|||
}
|
||||
|
||||
Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset, Id value) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_shared, index, value);
|
||||
}
|
||||
|
||||
Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset, Id value) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_shared, index, value);
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ Id EmitSharedAtomicExchange32(EmitContext& ctx, Id offset, Id value) {
|
|||
|
||||
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
|
||||
if (ctx.profile.support_int64_atomics && ctx.profile.support_explicit_workgroup_layout) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 3U)};
|
||||
const Id shift_id{ctx.Const(3U)};
|
||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
const Id pointer{
|
||||
ctx.OpAccessChain(ctx.shared_u64, ctx.shared_memory_u64, ctx.u32_zero_value, index)};
|
||||
|
|
|
@ -12,8 +12,7 @@ void MemoryBarrier(EmitContext& ctx, spv::Scope scope) {
|
|||
spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory |
|
||||
spv::MemorySemanticsMask::WorkgroupMemory | spv::MemorySemanticsMask::AtomicCounterMemory |
|
||||
spv::MemorySemanticsMask::ImageMemory};
|
||||
ctx.OpMemoryBarrier(ctx.Constant(ctx.U32[1], static_cast<u32>(scope)),
|
||||
ctx.Constant(ctx.U32[1], static_cast<u32>(semantics)));
|
||||
ctx.OpMemoryBarrier(ctx.Const(static_cast<u32>(scope)), ctx.Const(static_cast<u32>(semantics)));
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
@ -22,9 +21,9 @@ void EmitBarrier(EmitContext& ctx) {
|
|||
const auto memory{spv::Scope::Workgroup};
|
||||
const auto memory_semantics{spv::MemorySemanticsMask::AcquireRelease |
|
||||
spv::MemorySemanticsMask::WorkgroupMemory};
|
||||
ctx.OpControlBarrier(ctx.Constant(ctx.U32[1], static_cast<u32>(execution)),
|
||||
ctx.Constant(ctx.U32[1], static_cast<u32>(memory)),
|
||||
ctx.Constant(ctx.U32[1], static_cast<u32>(memory_semantics)));
|
||||
ctx.OpControlBarrier(ctx.Const(static_cast<u32>(execution)),
|
||||
ctx.Const(static_cast<u32>(memory)),
|
||||
ctx.Const(static_cast<u32>(memory_semantics)));
|
||||
}
|
||||
|
||||
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
|
||||
|
|
|
@ -69,7 +69,7 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
|
|||
return info.id;
|
||||
} else {
|
||||
const u32 index_element{element - info.first_element};
|
||||
const Id index_id{ctx.Constant(ctx.U32[1], index_element)};
|
||||
const Id index_id{ctx.Const(index_element)};
|
||||
return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id);
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
|
|||
case IR::Attribute::PositionZ:
|
||||
case IR::Attribute::PositionW: {
|
||||
const u32 element{static_cast<u32>(attr) % 4};
|
||||
const Id element_id{ctx.Constant(ctx.U32[1], element)};
|
||||
const Id element_id{ctx.Const(element)};
|
||||
return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id);
|
||||
}
|
||||
case IR::Attribute::ClipDistance0:
|
||||
|
@ -94,7 +94,7 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
|
|||
case IR::Attribute::ClipDistance7: {
|
||||
const u32 base{static_cast<u32>(IR::Attribute::ClipDistance0)};
|
||||
const u32 index{static_cast<u32>(attr) - base};
|
||||
const Id clip_num{ctx.Constant(ctx.U32[1], index)};
|
||||
const Id clip_num{ctx.Const(index)};
|
||||
return OutputAccessChain(ctx, ctx.output_f32, ctx.clip_distances, clip_num);
|
||||
}
|
||||
case IR::Attribute::Layer:
|
||||
|
@ -131,7 +131,7 @@ Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr,
|
|||
Id index{ctx.Def(offset)};
|
||||
if (element_size > 1) {
|
||||
const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
|
||||
const Id shift{ctx.Constant(ctx.U32[1], log2_element_size)};
|
||||
const Id shift{ctx.Const(log2_element_size)};
|
||||
index = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
|
||||
}
|
||||
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, index)};
|
||||
|
@ -140,7 +140,7 @@ Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr,
|
|||
if (offset.U32() % element_size != 0) {
|
||||
throw NotImplementedException("Unaligned immediate constant buffer load");
|
||||
}
|
||||
const Id imm_offset{ctx.Constant(ctx.U32[1], offset.U32() / element_size)};
|
||||
const Id imm_offset{ctx.Const(offset.U32() / element_size)};
|
||||
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, imm_offset)};
|
||||
return ctx.OpLoad(result_type, access_chain);
|
||||
}
|
||||
|
@ -212,13 +212,13 @@ Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value&
|
|||
|
||||
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
|
||||
const u32 element{static_cast<u32>(attr) % 4};
|
||||
const auto element_id{[&] { return ctx.Constant(ctx.U32[1], element); }};
|
||||
const auto element_id{[&] { return ctx.Const(element); }};
|
||||
if (IR::IsGeneric(attr)) {
|
||||
const u32 index{IR::GenericAttributeIndex(attr)};
|
||||
const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
|
||||
if (!type) {
|
||||
// Attribute is disabled
|
||||
return ctx.Constant(ctx.F32[1], 0.0f);
|
||||
return ctx.Const(0.0f);
|
||||
}
|
||||
const Id generic_id{ctx.input_generics.at(index)};
|
||||
const Id pointer{AttrPointer(ctx, type->pointer, vertex, generic_id, element_id())};
|
||||
|
@ -252,20 +252,19 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
|
|||
}
|
||||
case IR::Attribute::FrontFace:
|
||||
return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1, ctx.front_face),
|
||||
ctx.Constant(ctx.U32[1], std::numeric_limits<u32>::max()),
|
||||
ctx.u32_zero_value);
|
||||
ctx.Const(std::numeric_limits<u32>::max()), ctx.u32_zero_value);
|
||||
case IR::Attribute::PointSpriteS:
|
||||
return ctx.OpLoad(ctx.F32[1],
|
||||
ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value));
|
||||
case IR::Attribute::PointSpriteT:
|
||||
return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(ctx.input_f32, ctx.point_coord,
|
||||
ctx.Constant(ctx.U32[1], 1U)));
|
||||
return ctx.OpLoad(ctx.F32[1],
|
||||
ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.Const(1U)));
|
||||
case IR::Attribute::TessellationEvaluationPointU:
|
||||
return ctx.OpLoad(ctx.F32[1],
|
||||
ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.u32_zero_value));
|
||||
case IR::Attribute::TessellationEvaluationPointV:
|
||||
return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord,
|
||||
ctx.Constant(ctx.U32[1], 1U)));
|
||||
return ctx.OpLoad(ctx.F32[1],
|
||||
ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U)));
|
||||
|
||||
default:
|
||||
throw NotImplementedException("Read attribute {}", attr);
|
||||
|
@ -303,7 +302,7 @@ Id EmitGetPatch(EmitContext& ctx, IR::Patch patch) {
|
|||
throw NotImplementedException("Non-generic patch load");
|
||||
}
|
||||
const u32 index{IR::GenericPatchIndex(patch)};
|
||||
const Id element{ctx.Constant(ctx.U32[1], IR::GenericPatchElement(patch))};
|
||||
const Id element{ctx.Const(IR::GenericPatchElement(patch))};
|
||||
const Id pointer{ctx.OpAccessChain(ctx.input_f32, ctx.patches.at(index), element)};
|
||||
return ctx.OpLoad(ctx.F32[1], pointer);
|
||||
}
|
||||
|
@ -312,7 +311,7 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
|
|||
const Id pointer{[&] {
|
||||
if (IR::IsGeneric(patch)) {
|
||||
const u32 index{IR::GenericPatchIndex(patch)};
|
||||
const Id element{ctx.Constant(ctx.U32[1], IR::GenericPatchElement(patch))};
|
||||
const Id element{ctx.Const(IR::GenericPatchElement(patch))};
|
||||
return ctx.OpAccessChain(ctx.output_f32, ctx.patches.at(index), element);
|
||||
}
|
||||
switch (patch) {
|
||||
|
@ -321,15 +320,14 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
|
|||
case IR::Patch::TessellationLodTop:
|
||||
case IR::Patch::TessellationLodBottom: {
|
||||
const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
|
||||
const Id index_id{ctx.Constant(ctx.U32[1], index)};
|
||||
const Id index_id{ctx.Const(index)};
|
||||
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_outer, index_id);
|
||||
}
|
||||
case IR::Patch::TessellationLodInteriorU:
|
||||
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
|
||||
ctx.u32_zero_value);
|
||||
case IR::Patch::TessellationLodInteriorV:
|
||||
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
|
||||
ctx.Constant(ctx.U32[1], 1u));
|
||||
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner, ctx.Const(1u));
|
||||
default:
|
||||
throw NotImplementedException("Patch {}", patch);
|
||||
}
|
||||
|
@ -338,7 +336,7 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
|
|||
}
|
||||
|
||||
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
|
||||
const Id component_id{ctx.Constant(ctx.U32[1], component)};
|
||||
const Id component_id{ctx.Const(component)};
|
||||
const Id pointer{ctx.OpAccessChain(ctx.output_f32, ctx.frag_color.at(index), component_id)};
|
||||
ctx.OpStore(pointer, value);
|
||||
}
|
||||
|
@ -404,7 +402,7 @@ Id EmitIsHelperInvocation(EmitContext& ctx) {
|
|||
}
|
||||
|
||||
Id EmitYDirection(EmitContext& ctx) {
|
||||
return ctx.Constant(ctx.F32[1], ctx.profile.y_negate ? -1.0f : 1.0f);
|
||||
return ctx.Const(ctx.profile.y_negate ? -1.0f : 1.0f);
|
||||
}
|
||||
|
||||
Id EmitLoadLocal(EmitContext& ctx, Id word_offset) {
|
||||
|
|
|
@ -117,7 +117,7 @@ Id EmitFPLog2(EmitContext& ctx, Id value) {
|
|||
}
|
||||
|
||||
Id EmitFPRecip32(EmitContext& ctx, Id value) {
|
||||
return ctx.OpFDiv(ctx.F32[1], ctx.Constant(ctx.F32[1], 1.0f), value);
|
||||
return ctx.OpFDiv(ctx.F32[1], ctx.Const(1.0f), value);
|
||||
}
|
||||
|
||||
Id EmitFPRecip64(EmitContext& ctx, Id value) {
|
||||
|
@ -143,8 +143,8 @@ Id EmitFPSaturate16(EmitContext& ctx, Id value) {
|
|||
}
|
||||
|
||||
Id EmitFPSaturate32(EmitContext& ctx, Id value) {
|
||||
const Id zero{ctx.Constant(ctx.F32[1], f32{0.0})};
|
||||
const Id one{ctx.Constant(ctx.F32[1], f32{1.0})};
|
||||
const Id zero{ctx.Const(f32{0.0})};
|
||||
const Id one{ctx.Const(f32{1.0})};
|
||||
return Clamp(ctx, ctx.F32[1], value, zero, one);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,16 +45,12 @@ public:
|
|||
if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
|
||||
throw LogicError("Invalid PTP arguments");
|
||||
}
|
||||
auto read{[&](unsigned int a, unsigned int b) {
|
||||
return ctx.Constant(ctx.U32[1], values[a]->Arg(b).U32());
|
||||
}};
|
||||
auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }};
|
||||
|
||||
const Id offsets{
|
||||
ctx.ConstantComposite(ctx.TypeArray(ctx.U32[2], ctx.Constant(ctx.U32[1], 4)),
|
||||
ctx.ConstantComposite(ctx.U32[2], read(0, 0), read(0, 1)),
|
||||
ctx.ConstantComposite(ctx.U32[2], read(0, 2), read(0, 3)),
|
||||
ctx.ConstantComposite(ctx.U32[2], read(1, 0), read(1, 1)),
|
||||
ctx.ConstantComposite(ctx.U32[2], read(1, 2), read(1, 3)))};
|
||||
const Id offsets{ctx.ConstantComposite(
|
||||
ctx.TypeArray(ctx.U32[2], ctx.Const(4U)), ctx.Const(read(0, 0), read(0, 1)),
|
||||
ctx.Const(read(0, 2), read(0, 3)), ctx.Const(read(1, 0), read(1, 1)),
|
||||
ctx.Const(read(1, 2), read(1, 3)))};
|
||||
Add(spv::ImageOperandsMask::ConstOffsets, offsets);
|
||||
}
|
||||
|
||||
|
@ -108,7 +104,7 @@ private:
|
|||
return;
|
||||
}
|
||||
if (offset.IsImmediate()) {
|
||||
Add(spv::ImageOperandsMask::ConstOffset, ctx.Constant(ctx.U32[1], offset.U32()));
|
||||
Add(spv::ImageOperandsMask::ConstOffset, ctx.Const(offset.U32()));
|
||||
return;
|
||||
}
|
||||
IR::Inst* const inst{offset.InstRecursive()};
|
||||
|
@ -361,9 +357,8 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
|
|||
const auto info{inst->Flags<IR::TextureInstInfo>()};
|
||||
const ImageOperands operands(ctx, offset, offset2);
|
||||
return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
|
||||
ctx.F32[4], Texture(ctx, index), coords,
|
||||
ctx.Constant(ctx.U32[1], info.gather_component.Value()), operands.Mask(),
|
||||
operands.Span());
|
||||
ctx.F32[4], Texture(ctx, index), coords, ctx.Const(info.gather_component),
|
||||
operands.Mask(), operands.Span());
|
||||
}
|
||||
|
||||
Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
|
||||
|
|
|
@ -44,7 +44,7 @@ Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) {
|
|||
// https://stackoverflow.com/questions/55468823/how-to-detect-integer-overflow-in-c
|
||||
constexpr u32 s32_max{static_cast<u32>(std::numeric_limits<s32>::max())};
|
||||
const Id is_positive{ctx.OpSGreaterThanEqual(ctx.U1, a, ctx.u32_zero_value)};
|
||||
const Id sub_a{ctx.OpISub(ctx.U32[1], ctx.Constant(ctx.U32[1], s32_max), a)};
|
||||
const Id sub_a{ctx.OpISub(ctx.U32[1], ctx.Const(s32_max), a)};
|
||||
|
||||
const Id positive_test{ctx.OpSGreaterThan(ctx.U1, b, sub_a)};
|
||||
const Id negative_test{ctx.OpSLessThan(ctx.U1, b, sub_a)};
|
||||
|
|
|
@ -11,14 +11,14 @@ namespace {
|
|||
Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size) {
|
||||
if (offset.IsImmediate()) {
|
||||
const u32 imm_offset{static_cast<u32>(offset.U32() / element_size)};
|
||||
return ctx.Constant(ctx.U32[1], imm_offset);
|
||||
return ctx.Const(imm_offset);
|
||||
}
|
||||
const u32 shift{static_cast<u32>(std::countr_zero(element_size))};
|
||||
const Id index{ctx.Def(offset)};
|
||||
if (shift == 0) {
|
||||
return index;
|
||||
}
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], shift)};
|
||||
const Id shift_id{ctx.Const(shift)};
|
||||
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,22 +7,22 @@
|
|||
namespace Shader::Backend::SPIRV {
|
||||
namespace {
|
||||
Id Pointer(EmitContext& ctx, Id pointer_type, Id array, Id offset, u32 shift) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], shift)};
|
||||
const Id shift_id{ctx.Const(shift)};
|
||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
return ctx.OpAccessChain(pointer_type, array, ctx.u32_zero_value, index);
|
||||
}
|
||||
|
||||
Id Word(EmitContext& ctx, Id offset) {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
|
||||
return ctx.OpLoad(ctx.U32[1], pointer);
|
||||
}
|
||||
|
||||
std::pair<Id, Id> ExtractArgs(EmitContext& ctx, Id offset, u32 mask, u32 count) {
|
||||
const Id shift{ctx.OpShiftLeftLogical(ctx.U32[1], offset, ctx.Constant(ctx.U32[1], 3U))};
|
||||
const Id bit{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Constant(ctx.U32[1], mask))};
|
||||
const Id count_id{ctx.Constant(ctx.U32[1], count)};
|
||||
const Id shift{ctx.OpShiftLeftLogical(ctx.U32[1], offset, ctx.Const(3U))};
|
||||
const Id bit{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(mask))};
|
||||
const Id count_id{ctx.Const(count)};
|
||||
return {bit, count_id};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
@ -83,9 +83,9 @@ Id EmitLoadSharedU64(EmitContext& ctx, Id offset) {
|
|||
const Id pointer{Pointer(ctx, ctx.shared_u32x2, ctx.shared_memory_u32x2, offset, 3)};
|
||||
return ctx.OpLoad(ctx.U32[2], pointer);
|
||||
} else {
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
const Id next_index{ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], 1U))};
|
||||
const Id next_index{ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(1U))};
|
||||
const Id lhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, base_index)};
|
||||
const Id rhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, next_index)};
|
||||
return ctx.OpCompositeConstruct(ctx.U32[2], ctx.OpLoad(ctx.U32[1], lhs_pointer),
|
||||
|
@ -98,12 +98,11 @@ Id EmitLoadSharedU128(EmitContext& ctx, Id offset) {
|
|||
const Id pointer{Pointer(ctx, ctx.shared_u32x4, ctx.shared_memory_u32x4, offset, 4)};
|
||||
return ctx.OpLoad(ctx.U32[4], pointer);
|
||||
}
|
||||
const Id shift_id{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift_id{ctx.Const(2U)};
|
||||
const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
|
||||
std::array<Id, 4> values{};
|
||||
for (u32 i = 0; i < 4; ++i) {
|
||||
const Id index{i == 0 ? base_index
|
||||
: ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], i))};
|
||||
const Id index{i == 0 ? base_index : ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(i))};
|
||||
const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
|
||||
values[i] = ctx.OpLoad(ctx.U32[1], pointer);
|
||||
}
|
||||
|
@ -134,7 +133,7 @@ void EmitWriteSharedU32(EmitContext& ctx, Id offset, Id value) {
|
|||
if (ctx.profile.support_explicit_workgroup_layout) {
|
||||
pointer = Pointer(ctx, ctx.shared_u32, ctx.shared_memory_u32, offset, 2);
|
||||
} else {
|
||||
const Id shift{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift{ctx.Const(2U)};
|
||||
const Id word_offset{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
|
||||
pointer = ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, word_offset);
|
||||
}
|
||||
|
@ -147,9 +146,9 @@ void EmitWriteSharedU64(EmitContext& ctx, Id offset, Id value) {
|
|||
ctx.OpStore(pointer, value);
|
||||
return;
|
||||
}
|
||||
const Id shift{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift{ctx.Const(2U)};
|
||||
const Id word_offset{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
|
||||
const Id next_offset{ctx.OpIAdd(ctx.U32[1], word_offset, ctx.Constant(ctx.U32[1], 1U))};
|
||||
const Id next_offset{ctx.OpIAdd(ctx.U32[1], word_offset, ctx.Const(1U))};
|
||||
const Id lhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, word_offset)};
|
||||
const Id rhs_pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, next_offset)};
|
||||
ctx.OpStore(lhs_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 0U));
|
||||
|
@ -162,11 +161,10 @@ void EmitWriteSharedU128(EmitContext& ctx, Id offset, Id value) {
|
|||
ctx.OpStore(pointer, value);
|
||||
return;
|
||||
}
|
||||
const Id shift{ctx.Constant(ctx.U32[1], 2U)};
|
||||
const Id shift{ctx.Const(2U)};
|
||||
const Id base_index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift)};
|
||||
for (u32 i = 0; i < 4; ++i) {
|
||||
const Id index{i == 0 ? base_index
|
||||
: ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], i))};
|
||||
const Id index{i == 0 ? base_index : ctx.OpIAdd(ctx.U32[1], base_index, ctx.Const(i))};
|
||||
const Id pointer{ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index)};
|
||||
ctx.OpStore(pointer, ctx.OpCompositeExtract(ctx.U32[1], value, i));
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ void ConvertDepthMode(EmitContext& ctx) {
|
|||
void SetFixedPipelinePointSize(EmitContext& ctx) {
|
||||
if (ctx.profile.fixed_state_point_size) {
|
||||
const float point_size{*ctx.profile.fixed_state_point_size};
|
||||
ctx.OpStore(ctx.output_point_size, ctx.Constant(ctx.F32[1], point_size));
|
||||
ctx.OpStore(ctx.output_point_size, ctx.Const(point_size));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ void AlphaTest(EmitContext& ctx) {
|
|||
|
||||
const Id true_label{ctx.OpLabel()};
|
||||
const Id discard_label{ctx.OpLabel()};
|
||||
const Id alpha_reference{ctx.Constant(ctx.F32[1], ctx.profile.alpha_test_reference)};
|
||||
const Id alpha_reference{ctx.Const(ctx.profile.alpha_test_reference)};
|
||||
const Id condition{ComparisonFunction(ctx, comparison, alpha, alpha_reference)};
|
||||
|
||||
ctx.OpSelectionMerge(true_label, spv::SelectionControlMask::MaskNone);
|
||||
|
@ -88,8 +88,8 @@ void AlphaTest(EmitContext& ctx) {
|
|||
|
||||
void EmitPrologue(EmitContext& ctx) {
|
||||
if (ctx.stage == Stage::VertexB) {
|
||||
const Id zero{ctx.Constant(ctx.F32[1], 0.0f)};
|
||||
const Id one{ctx.Constant(ctx.F32[1], 1.0f)};
|
||||
const Id zero{ctx.Const(0.0f)};
|
||||
const Id one{ctx.Const(1.0f)};
|
||||
const Id default_vector{ctx.ConstantComposite(ctx.F32[4], zero, zero, zero, one)};
|
||||
ctx.OpStore(ctx.output_position, default_vector);
|
||||
for (const auto& info : ctx.output_generics) {
|
||||
|
|
|
@ -54,7 +54,7 @@ Id EmitLaneId(EmitContext& ctx) {
|
|||
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
|
||||
return id;
|
||||
}
|
||||
return ctx.OpBitwiseAnd(ctx.U32[1], id, ctx.Constant(ctx.U32[1], 31U));
|
||||
return ctx.OpBitwiseAnd(ctx.U32[1], id, ctx.Const(31U));
|
||||
}
|
||||
|
||||
Id EmitVoteAll(EmitContext& ctx, Id pred) {
|
||||
|
@ -168,10 +168,10 @@ Id EmitShuffleButterfly(EmitContext& ctx, IR::Inst* inst, Id value, Id index, Id
|
|||
}
|
||||
|
||||
Id EmitFSwizzleAdd(EmitContext& ctx, Id op_a, Id op_b, Id swizzle) {
|
||||
const Id three{ctx.Constant(ctx.U32[1], 3)};
|
||||
const Id three{ctx.Const(3U)};
|
||||
Id mask{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
|
||||
mask = ctx.OpBitwiseAnd(ctx.U32[1], mask, three);
|
||||
mask = ctx.OpShiftLeftLogical(ctx.U32[1], mask, ctx.Constant(ctx.U32[1], 1));
|
||||
mask = ctx.OpShiftLeftLogical(ctx.U32[1], mask, ctx.Const(1U));
|
||||
mask = ctx.OpShiftRightLogical(ctx.U32[1], swizzle, mask);
|
||||
mask = ctx.OpBitwiseAnd(ctx.U32[1], mask, three);
|
||||
|
||||
|
|
Loading…
Reference in a new issue