mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-12-27 02:40:58 +01:00
Merge pull request #7806 from ameerj/atomic64-fallbacks
shaders: Implement U32x2 atomic fallbacks when device does not support int64
This commit is contained in:
commit
09400e4f4e
11 changed files with 582 additions and 3 deletions
|
@ -372,6 +372,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
|
|||
ScalarU32 value);
|
||||
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
|
||||
Register value);
|
||||
void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
|
||||
Register value);
|
||||
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, ScalarU32 value);
|
||||
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
|
@ -412,6 +414,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
|
|||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, Register value);
|
||||
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, ScalarF32 value);
|
||||
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
|
@ -448,6 +468,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
|
|||
void EmitGlobalAtomicOr64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicXor64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicExchange64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicInc32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicDec32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicOr32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicXor32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF32(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
|
||||
|
|
|
@ -311,6 +311,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
|
|||
ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset);
|
||||
}
|
||||
|
||||
void EmitSharedAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
|
||||
[[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] ScalarU32 pointer_offset,
|
||||
[[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, ScalarU32 value) {
|
||||
Atom(ctx, inst, binding, offset, value, "ADD", "U32");
|
||||
|
@ -411,6 +418,62 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
|
|||
Atom(ctx, inst, binding, offset, value, "EXCH", "U64");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicIAdd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicSMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicUMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicSMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicUMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicAnd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicOr32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicXor32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
|
||||
[[maybe_unused]] IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& binding,
|
||||
[[maybe_unused]] ScalarU32 offset,
|
||||
[[maybe_unused]] Register value) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
ScalarU32 offset, ScalarF32 value) {
|
||||
Atom(ctx, inst, binding, offset, value, "ADD", "F32");
|
||||
|
@ -537,6 +600,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
|
|||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicIAdd32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicSMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicUMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicSMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicUMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicInc32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicDec32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicAnd32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicOr32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicXor32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicExchange32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicAddF32(EmitContext&) {
|
||||
throw NotImplementedException("GLASM instruction");
|
||||
}
|
||||
|
|
|
@ -105,6 +105,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_vi
|
|||
pointer_offset, value, pointer_offset, value);
|
||||
}
|
||||
|
||||
void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
|
||||
std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, pointer_offset, pointer_offset);
|
||||
ctx.Add("smem[{}>>2]={}.x;smem[({}+4)>>2]={}.y;", pointer_offset, value, pointer_offset, value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
|
||||
|
@ -265,6 +272,97 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
|
|||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset));
|
||||
ctx.Add("{}_ssbo{}[{}>>2]+={}.x;{}_ssbo{}[({}>>2)+1]+={}.y;", ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset), value, ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset));
|
||||
ctx.Add("for(int "
|
||||
"i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset));
|
||||
ctx.Add("for(int i=0;i<2;++i){{ "
|
||||
"{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],{}[i]);}}",
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset));
|
||||
ctx.Add("for(int "
|
||||
"i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
|
||||
ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
|
||||
ctx.var_alloc.Consume(offset));
|
||||
ctx.Add("for(int i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],{}[i]);}}",
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
|
||||
binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
|
||||
ctx.AddU32x2("{}=uvec2(atomicAnd({}_ssbo{}[{}>>2],{}.x),atomicAnd({}_ssbo{}[({}>>2)+1],{}.y));",
|
||||
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
|
||||
ctx.AddU32x2("{}=uvec2(atomicOr({}_ssbo{}[{}>>2],{}.x),atomicOr({}_ssbo{}[({}>>2)+1],{}.y));",
|
||||
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
|
||||
ctx.AddU32x2("{}=uvec2(atomicXor({}_ssbo{}[{}>>2],{}.x),atomicXor({}_ssbo{}[({}>>2)+1],{}.y));",
|
||||
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
|
||||
ctx.AddU32x2("{}=uvec2(atomicExchange({}_ssbo{}[{}>>2],{}.x),atomicExchange({}_ssbo{}[({}>>2)+"
|
||||
"1],{}.y));",
|
||||
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
|
||||
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
|
||||
}
|
||||
|
||||
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value) {
|
||||
SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
|
||||
|
@ -388,6 +486,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
|
|||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicIAdd32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicSMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicUMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicSMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicUMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicInc32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicDec32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicAnd32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicOr32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicXor32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicExchange32x2(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
||||
void EmitGlobalAtomicAddF32(EmitContext&) {
|
||||
throw NotImplementedException("GLSL Instrucion");
|
||||
}
|
||||
|
|
|
@ -442,6 +442,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
|
|||
std::string_view value);
|
||||
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
|
||||
std::string_view value);
|
||||
void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
|
||||
std::string_view value);
|
||||
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
|
@ -482,6 +484,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
|
|||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
const IR::Value& offset, std::string_view value);
|
||||
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
|
||||
|
@ -518,6 +538,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
|
|||
void EmitGlobalAtomicOr64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicXor64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicExchange64(EmitContext& ctx);
|
||||
void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicInc32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicDec32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicOr32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicXor32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF32(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
|
||||
void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
|
||||
|
|
|
@ -82,6 +82,17 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
|
|||
ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
|
||||
return original_value;
|
||||
}
|
||||
|
||||
Id StorageAtomicU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
|
||||
Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
|
||||
LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
|
||||
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
|
||||
binding, offset, sizeof(u32[2]))};
|
||||
const Id original_value{ctx.OpLoad(ctx.U32[2], pointer)};
|
||||
const Id result{(ctx.*non_atomic_func)(ctx.U32[2], value, original_value)};
|
||||
ctx.OpStore(pointer, result);
|
||||
return original_value;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
|
||||
|
@ -141,7 +152,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
|
|||
const auto [scope, semantics]{AtomicArgs(ctx)};
|
||||
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
|
||||
}
|
||||
LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
|
||||
LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
|
||||
const Id pointer_1{SharedPointer(ctx, offset, 0)};
|
||||
const Id pointer_2{SharedPointer(ctx, offset, 1)};
|
||||
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
|
||||
|
@ -152,6 +163,18 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
|
|||
return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
|
||||
}
|
||||
|
||||
Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id offset, Id value) {
|
||||
LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
|
||||
const Id pointer_1{SharedPointer(ctx, offset, 0)};
|
||||
const Id pointer_2{SharedPointer(ctx, offset, 1)};
|
||||
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
|
||||
const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
|
||||
const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
|
||||
ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
|
||||
ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
|
||||
return ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
|
||||
|
@ -275,6 +298,56 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
|
|||
return original;
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpIAdd);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMin);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMin);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMax);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMax);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseAnd);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseOr);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseXor);
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
|
||||
const IR::Value& offset, Id value) {
|
||||
LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
|
||||
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
|
||||
binding, offset, sizeof(u32[2]))};
|
||||
const Id original{ctx.OpLoad(ctx.U32[2], pointer)};
|
||||
ctx.OpStore(pointer, value);
|
||||
return original;
|
||||
}
|
||||
|
||||
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value) {
|
||||
const Id ssbo{ctx.ssbos[binding.U32()].U32};
|
||||
|
@ -418,6 +491,50 @@ Id EmitGlobalAtomicExchange64(EmitContext&) {
|
|||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicIAdd32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicSMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicUMin32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicSMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicUMax32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicInc32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicDec32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicAnd32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicOr32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicXor32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicExchange32x2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
Id EmitGlobalAtomicAddF32(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
|
|
@ -335,6 +335,7 @@ Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value);
|
|||
Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value);
|
||||
Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value);
|
||||
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value);
|
||||
Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id pointer_offset, Id value);
|
||||
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
|
@ -375,6 +376,24 @@ Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::
|
|||
Id value);
|
||||
Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
|
||||
const IR::Value& offset, Id value);
|
||||
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
Id value);
|
||||
Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
|
||||
|
@ -411,6 +430,17 @@ Id EmitGlobalAtomicAnd64(EmitContext& ctx);
|
|||
Id EmitGlobalAtomicOr64(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicXor64(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicExchange64(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicSMin32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicUMin32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicSMax32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicUMax32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicInc32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicDec32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicAnd32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicOr32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicXor32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicExchange32x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicAddF32(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicAddF16x2(EmitContext& ctx);
|
||||
Id EmitGlobalAtomicAddF32x2(EmitContext& ctx);
|
||||
|
|
|
@ -118,6 +118,7 @@ bool Inst::MayHaveSideEffects() const noexcept {
|
|||
case Opcode::SharedAtomicXor32:
|
||||
case Opcode::SharedAtomicExchange32:
|
||||
case Opcode::SharedAtomicExchange64:
|
||||
case Opcode::SharedAtomicExchange32x2:
|
||||
case Opcode::GlobalAtomicIAdd32:
|
||||
case Opcode::GlobalAtomicSMin32:
|
||||
case Opcode::GlobalAtomicUMin32:
|
||||
|
@ -138,6 +139,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
|
|||
case Opcode::GlobalAtomicOr64:
|
||||
case Opcode::GlobalAtomicXor64:
|
||||
case Opcode::GlobalAtomicExchange64:
|
||||
case Opcode::GlobalAtomicIAdd32x2:
|
||||
case Opcode::GlobalAtomicSMin32x2:
|
||||
case Opcode::GlobalAtomicUMin32x2:
|
||||
case Opcode::GlobalAtomicSMax32x2:
|
||||
case Opcode::GlobalAtomicUMax32x2:
|
||||
case Opcode::GlobalAtomicAnd32x2:
|
||||
case Opcode::GlobalAtomicOr32x2:
|
||||
case Opcode::GlobalAtomicXor32x2:
|
||||
case Opcode::GlobalAtomicExchange32x2:
|
||||
case Opcode::GlobalAtomicAddF32:
|
||||
case Opcode::GlobalAtomicAddF16x2:
|
||||
case Opcode::GlobalAtomicAddF32x2:
|
||||
|
@ -165,6 +175,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
|
|||
case Opcode::StorageAtomicOr64:
|
||||
case Opcode::StorageAtomicXor64:
|
||||
case Opcode::StorageAtomicExchange64:
|
||||
case Opcode::StorageAtomicIAdd32x2:
|
||||
case Opcode::StorageAtomicSMin32x2:
|
||||
case Opcode::StorageAtomicUMin32x2:
|
||||
case Opcode::StorageAtomicSMax32x2:
|
||||
case Opcode::StorageAtomicUMax32x2:
|
||||
case Opcode::StorageAtomicAnd32x2:
|
||||
case Opcode::StorageAtomicOr32x2:
|
||||
case Opcode::StorageAtomicXor32x2:
|
||||
case Opcode::StorageAtomicExchange32x2:
|
||||
case Opcode::StorageAtomicAddF32:
|
||||
case Opcode::StorageAtomicAddF16x2:
|
||||
case Opcode::StorageAtomicAddF32x2:
|
||||
|
|
|
@ -341,6 +341,7 @@ OPCODE(SharedAtomicOr32, U32, U32,
|
|||
OPCODE(SharedAtomicXor32, U32, U32, U32, )
|
||||
OPCODE(SharedAtomicExchange32, U32, U32, U32, )
|
||||
OPCODE(SharedAtomicExchange64, U64, U32, U64, )
|
||||
OPCODE(SharedAtomicExchange32x2, U32x2, U32, U32x2, )
|
||||
|
||||
OPCODE(GlobalAtomicIAdd32, U32, U64, U32, )
|
||||
OPCODE(GlobalAtomicSMin32, U32, U64, U32, )
|
||||
|
@ -362,6 +363,15 @@ OPCODE(GlobalAtomicAnd64, U64, U64,
|
|||
OPCODE(GlobalAtomicOr64, U64, U64, U64, )
|
||||
OPCODE(GlobalAtomicXor64, U64, U64, U64, )
|
||||
OPCODE(GlobalAtomicExchange64, U64, U64, U64, )
|
||||
OPCODE(GlobalAtomicIAdd32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicSMin32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicUMin32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicSMax32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicUMax32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicAnd32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicOr32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicXor32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicExchange32x2, U32x2, U32x2, U32x2, )
|
||||
OPCODE(GlobalAtomicAddF32, F32, U64, F32, )
|
||||
OPCODE(GlobalAtomicAddF16x2, U32, U64, F16x2, )
|
||||
OPCODE(GlobalAtomicAddF32x2, U32, U64, F32x2, )
|
||||
|
@ -390,6 +400,15 @@ OPCODE(StorageAtomicAnd64, U64, U32,
|
|||
OPCODE(StorageAtomicOr64, U64, U32, U32, U64, )
|
||||
OPCODE(StorageAtomicXor64, U64, U32, U32, U64, )
|
||||
OPCODE(StorageAtomicExchange64, U64, U32, U32, U64, )
|
||||
OPCODE(StorageAtomicIAdd32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicSMin32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicUMin32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicSMax32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicUMax32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicAnd32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicOr32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicXor32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicExchange32x2, U32x2, U32, U32, U32x2, )
|
||||
OPCODE(StorageAtomicAddF32, F32, U32, U32, F32, )
|
||||
OPCODE(StorageAtomicAddF16x2, U32, U32, U32, F16x2, )
|
||||
OPCODE(StorageAtomicAddF32x2, U32, U32, U32, F32x2, )
|
||||
|
|
|
@ -360,6 +360,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
|||
case IR::Opcode::GlobalAtomicOr64:
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
case IR::Opcode::GlobalAtomicIAdd32x2:
|
||||
case IR::Opcode::GlobalAtomicSMin32x2:
|
||||
case IR::Opcode::GlobalAtomicUMin32x2:
|
||||
case IR::Opcode::GlobalAtomicSMax32x2:
|
||||
case IR::Opcode::GlobalAtomicUMax32x2:
|
||||
case IR::Opcode::GlobalAtomicAnd32x2:
|
||||
case IR::Opcode::GlobalAtomicOr32x2:
|
||||
case IR::Opcode::GlobalAtomicXor32x2:
|
||||
case IR::Opcode::GlobalAtomicExchange32x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32:
|
||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32x2:
|
||||
|
@ -597,6 +606,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
|||
break;
|
||||
case IR::Opcode::LoadStorage64:
|
||||
case IR::Opcode::WriteStorage64:
|
||||
case IR::Opcode::StorageAtomicIAdd32x2:
|
||||
case IR::Opcode::StorageAtomicSMin32x2:
|
||||
case IR::Opcode::StorageAtomicUMin32x2:
|
||||
case IR::Opcode::StorageAtomicSMax32x2:
|
||||
case IR::Opcode::StorageAtomicUMax32x2:
|
||||
case IR::Opcode::StorageAtomicAnd32x2:
|
||||
case IR::Opcode::StorageAtomicOr32x2:
|
||||
case IR::Opcode::StorageAtomicXor32x2:
|
||||
case IR::Opcode::StorageAtomicExchange32x2:
|
||||
info.used_storage_buffer_types |= IR::Type::U32x2;
|
||||
break;
|
||||
case IR::Opcode::LoadStorage128:
|
||||
|
|
|
@ -92,6 +92,15 @@ bool IsGlobalMemory(const IR::Inst& inst) {
|
|||
case IR::Opcode::GlobalAtomicOr64:
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
case IR::Opcode::GlobalAtomicIAdd32x2:
|
||||
case IR::Opcode::GlobalAtomicSMin32x2:
|
||||
case IR::Opcode::GlobalAtomicUMin32x2:
|
||||
case IR::Opcode::GlobalAtomicSMax32x2:
|
||||
case IR::Opcode::GlobalAtomicUMax32x2:
|
||||
case IR::Opcode::GlobalAtomicAnd32x2:
|
||||
case IR::Opcode::GlobalAtomicOr32x2:
|
||||
case IR::Opcode::GlobalAtomicXor32x2:
|
||||
case IR::Opcode::GlobalAtomicExchange32x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32:
|
||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32x2:
|
||||
|
@ -135,6 +144,15 @@ bool IsGlobalMemoryWrite(const IR::Inst& inst) {
|
|||
case IR::Opcode::GlobalAtomicOr64:
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
case IR::Opcode::GlobalAtomicIAdd32x2:
|
||||
case IR::Opcode::GlobalAtomicSMin32x2:
|
||||
case IR::Opcode::GlobalAtomicUMin32x2:
|
||||
case IR::Opcode::GlobalAtomicSMax32x2:
|
||||
case IR::Opcode::GlobalAtomicUMax32x2:
|
||||
case IR::Opcode::GlobalAtomicAnd32x2:
|
||||
case IR::Opcode::GlobalAtomicOr32x2:
|
||||
case IR::Opcode::GlobalAtomicXor32x2:
|
||||
case IR::Opcode::GlobalAtomicExchange32x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32:
|
||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32x2:
|
||||
|
@ -199,6 +217,8 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
|
|||
return IR::Opcode::StorageAtomicOr32;
|
||||
case IR::Opcode::GlobalAtomicXor32:
|
||||
return IR::Opcode::StorageAtomicXor32;
|
||||
case IR::Opcode::GlobalAtomicExchange32:
|
||||
return IR::Opcode::StorageAtomicExchange32;
|
||||
case IR::Opcode::GlobalAtomicIAdd64:
|
||||
return IR::Opcode::StorageAtomicIAdd64;
|
||||
case IR::Opcode::GlobalAtomicSMin64:
|
||||
|
@ -215,10 +235,26 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
|
|||
return IR::Opcode::StorageAtomicOr64;
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
return IR::Opcode::StorageAtomicXor64;
|
||||
case IR::Opcode::GlobalAtomicExchange32:
|
||||
return IR::Opcode::StorageAtomicExchange32;
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
return IR::Opcode::StorageAtomicExchange64;
|
||||
case IR::Opcode::GlobalAtomicIAdd32x2:
|
||||
return IR::Opcode::StorageAtomicIAdd32x2;
|
||||
case IR::Opcode::GlobalAtomicSMin32x2:
|
||||
return IR::Opcode::StorageAtomicSMin32x2;
|
||||
case IR::Opcode::GlobalAtomicUMin32x2:
|
||||
return IR::Opcode::StorageAtomicUMin32x2;
|
||||
case IR::Opcode::GlobalAtomicSMax32x2:
|
||||
return IR::Opcode::StorageAtomicSMax32x2;
|
||||
case IR::Opcode::GlobalAtomicUMax32x2:
|
||||
return IR::Opcode::StorageAtomicUMax32x2;
|
||||
case IR::Opcode::GlobalAtomicAnd32x2:
|
||||
return IR::Opcode::StorageAtomicAnd32x2;
|
||||
case IR::Opcode::GlobalAtomicOr32x2:
|
||||
return IR::Opcode::StorageAtomicOr32x2;
|
||||
case IR::Opcode::GlobalAtomicXor32x2:
|
||||
return IR::Opcode::StorageAtomicXor32x2;
|
||||
case IR::Opcode::GlobalAtomicExchange32x2:
|
||||
return IR::Opcode::StorageAtomicExchange32x2;
|
||||
case IR::Opcode::GlobalAtomicAddF32:
|
||||
return IR::Opcode::StorageAtomicAddF32;
|
||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||
|
@ -454,6 +490,15 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
|
|||
case IR::Opcode::GlobalAtomicOr64:
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
case IR::Opcode::GlobalAtomicIAdd32x2:
|
||||
case IR::Opcode::GlobalAtomicSMin32x2:
|
||||
case IR::Opcode::GlobalAtomicUMin32x2:
|
||||
case IR::Opcode::GlobalAtomicSMax32x2:
|
||||
case IR::Opcode::GlobalAtomicUMax32x2:
|
||||
case IR::Opcode::GlobalAtomicAnd32x2:
|
||||
case IR::Opcode::GlobalAtomicOr32x2:
|
||||
case IR::Opcode::GlobalAtomicXor32x2:
|
||||
case IR::Opcode::GlobalAtomicExchange32x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32:
|
||||
case IR::Opcode::GlobalAtomicAddF16x2:
|
||||
case IR::Opcode::GlobalAtomicAddF32x2:
|
||||
|
|
|
@ -199,6 +199,26 @@ void Lower(IR::Block& block, IR::Inst& inst) {
|
|||
return ShiftRightLogical64To32(block, inst);
|
||||
case IR::Opcode::ShiftRightArithmetic64:
|
||||
return ShiftRightArithmetic64To32(block, inst);
|
||||
case IR::Opcode::SharedAtomicExchange64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::SharedAtomicExchange32x2);
|
||||
case IR::Opcode::GlobalAtomicIAdd64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicIAdd32x2);
|
||||
case IR::Opcode::GlobalAtomicSMin64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMin32x2);
|
||||
case IR::Opcode::GlobalAtomicUMin64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMin32x2);
|
||||
case IR::Opcode::GlobalAtomicSMax64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMax32x2);
|
||||
case IR::Opcode::GlobalAtomicUMax64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMax32x2);
|
||||
case IR::Opcode::GlobalAtomicAnd64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicAnd32x2);
|
||||
case IR::Opcode::GlobalAtomicOr64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicOr32x2);
|
||||
case IR::Opcode::GlobalAtomicXor64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicXor32x2);
|
||||
case IR::Opcode::GlobalAtomicExchange64:
|
||||
return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicExchange32x2);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue