Implement Fast Paths for most A32 SIMD instructions (#952)

* Begin work on A32 SIMD Intrinsics

* More instructions, some cleanup.

* Intrinsics for Move instructions (zip etc)

These pass the existing tests.

* Intrinsics for some of Cvt

While doing this I noticed that the conversion for int/fp was incorrect
in the slow path. I'll fix this in the original repo.

* Intrinsics for more Arithmetic instructions.

* Intrinsics for Vext

* Fix VEXT Intrinsic for double words.

* Use InsertPs to move scalar values.

* Cleanup, fix VPADD.f32 and VMIN signed integer.

* Cleanup, add SSE2 support for scalar insert.

Works similarly to the IR scalar insert, but obviously this one works
directly on V128.

* Minor cleanup.

* Enable intrinsic for FP64 to integer conversion.

* Address feedback apart from splitting out intrinsic float abs

Also: bad VREV encodings as undefined rather than throwing in translation.

* Move float abs to helper, fix bug with cvt

* Rename opc2 & 3 to match A32 docs, use ArgumentOutOfRangeException appropriately.

* Get name of variable at compilation rather than string literal.

* Use correct double sign mask.
This commit is contained in:
jduncanator 2020-03-05 11:41:33 +11:00 committed by GitHub
parent d9ed827696
commit 68e15c1a74
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 2077 additions and 400 deletions

View file

@ -52,6 +52,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Intrinsic.X86Divss, new IntrinsicInfo(X86Instruction.Divss, IntrinsicType.Binary));
Add(Intrinsic.X86Haddpd, new IntrinsicInfo(X86Instruction.Haddpd, IntrinsicType.Binary));
Add(Intrinsic.X86Haddps, new IntrinsicInfo(X86Instruction.Haddps, IntrinsicType.Binary));
Add(Intrinsic.X86Insertps, new IntrinsicInfo(X86Instruction.Insertps, IntrinsicType.TernaryImm));
Add(Intrinsic.X86Maxpd, new IntrinsicInfo(X86Instruction.Maxpd, IntrinsicType.Binary));
Add(Intrinsic.X86Maxps, new IntrinsicInfo(X86Instruction.Maxps, IntrinsicType.Binary));
Add(Intrinsic.X86Maxsd, new IntrinsicInfo(X86Instruction.Maxsd, IntrinsicType.Binary));
@ -62,6 +63,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Intrinsic.X86Minss, new IntrinsicInfo(X86Instruction.Minss, IntrinsicType.Binary));
Add(Intrinsic.X86Movhlps, new IntrinsicInfo(X86Instruction.Movhlps, IntrinsicType.Binary));
Add(Intrinsic.X86Movlhps, new IntrinsicInfo(X86Instruction.Movlhps, IntrinsicType.Binary));
Add(Intrinsic.X86Movss, new IntrinsicInfo(X86Instruction.Movss, IntrinsicType.Binary));
Add(Intrinsic.X86Mulpd, new IntrinsicInfo(X86Instruction.Mulpd, IntrinsicType.Binary));
Add(Intrinsic.X86Mulps, new IntrinsicInfo(X86Instruction.Mulps, IntrinsicType.Binary));
Add(Intrinsic.X86Mulsd, new IntrinsicInfo(X86Instruction.Mulsd, IntrinsicType.Binary));

View file

@ -4,6 +4,12 @@
{
public OpCode32SimdRev(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode)
{
if (Opc + Size >= 3)
{
Instruction = InstDescriptor.Undefined;
return;
}
// Currently, this instruction is treated as though it's OPCODE is the true size,
// which lets us deal with reversing vectors on a single element basis (eg. math magic an I64 rather than insert lots of I8s).
int tempSize = Size;

View file

@ -186,9 +186,7 @@ namespace ARMeilleure.Instructions
{
Operand res = context.AddIntrinsic(Intrinsic.X86Subss, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetScalar(context, -0f);
res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, res);
res = EmitFloatAbs(context, res, true, false);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
}
@ -196,9 +194,7 @@ namespace ARMeilleure.Instructions
{
Operand res = context.AddIntrinsic(Intrinsic.X86Subsd, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetScalar(context, -0d);
res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, res);
res = EmitFloatAbs(context, res, false, false);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
}
@ -226,9 +222,7 @@ namespace ARMeilleure.Instructions
{
Operand res = context.AddIntrinsic(Intrinsic.X86Subps, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetAllElements(context, -0f);
res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, res);
res = EmitFloatAbs(context, res, true, true);
if (op.RegisterSize == RegisterSize.Simd64)
{
@ -241,9 +235,7 @@ namespace ARMeilleure.Instructions
{
Operand res = context.AddIntrinsic(Intrinsic.X86Subpd, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetAllElements(context, -0d);
res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, res);
res = EmitFloatAbs(context, res, false, true);
context.Copy(GetVec(op.Rd), res);
}
@ -267,17 +259,13 @@ namespace ARMeilleure.Instructions
if (op.Size == 0)
{
Operand mask = X86GetScalar(context, -0f);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, GetVec(op.Rn));
Operand res = EmitFloatAbs(context, GetVec(op.Rn), true, false);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
}
else /* if (op.Size == 1) */
{
Operand mask = X86GetScalar(context, -0d);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, GetVec(op.Rn));
Operand res = EmitFloatAbs(context, GetVec(op.Rn), false, false);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
}
@ -299,11 +287,9 @@ namespace ARMeilleure.Instructions
int sizeF = op.Size & 1;
if (sizeF == 0)
if (sizeF == 0)
{
Operand mask = X86GetAllElements(context, -0f);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, GetVec(op.Rn));
Operand res = EmitFloatAbs(context, GetVec(op.Rn), true, true);
if (op.RegisterSize == RegisterSize.Simd64)
{
@ -314,9 +300,7 @@ namespace ARMeilleure.Instructions
}
else /* if (sizeF == 1) */
{
Operand mask = X86GetAllElements(context, -0d);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, GetVec(op.Rn));
Operand res = EmitFloatAbs(context, GetVec(op.Rn), false, true);
context.Copy(GetVec(op.Rd), res);
}
@ -3121,7 +3105,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res);
}
private static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF)
public static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF)
{
IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,7 @@ using ARMeilleure.Translation;
using System;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper32;
using static ARMeilleure.IntermediateRepresentation.OperandHelper;
@ -16,7 +17,14 @@ namespace ARMeilleure.Instructions
{
public static void Vceq_V(ArmEmitterContext context)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, false);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.Equal, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, false);
}
}
public static void Vceq_I(ArmEmitterContext context)
@ -30,7 +38,14 @@ namespace ARMeilleure.Instructions
if (op.F)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, true);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.Equal, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, true);
}
}
else
{
@ -40,7 +55,14 @@ namespace ARMeilleure.Instructions
public static void Vcge_V(ArmEmitterContext context)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, false);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThanOrEqual, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, false);
}
}
public static void Vcge_I(ArmEmitterContext context)
@ -56,7 +78,14 @@ namespace ARMeilleure.Instructions
if (op.F)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, true);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThanOrEqual, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, true);
}
}
else
{
@ -66,7 +95,14 @@ namespace ARMeilleure.Instructions
public static void Vcgt_V(ArmEmitterContext context)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, false);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThan, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, false);
}
}
public static void Vcgt_I(ArmEmitterContext context)
@ -82,7 +118,14 @@ namespace ARMeilleure.Instructions
if (op.F)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, true);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThan, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, true);
}
}
else
{
@ -96,7 +139,14 @@ namespace ARMeilleure.Instructions
if (op.F)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLEFpscr, SoftFloat64.FPCompareLEFpscr, true);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.LessThanOrEqual, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLEFpscr, SoftFloat64.FPCompareLEFpscr, true);
}
}
else
{
@ -110,7 +160,14 @@ namespace ARMeilleure.Instructions
if (op.F)
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLTFpscr, SoftFloat64.FPCompareLTFpscr, true);
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.LessThan, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLTFpscr, SoftFloat64.FPCompareLTFpscr, true);
}
}
else
{
@ -224,23 +281,77 @@ namespace ARMeilleure.Instructions
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool cmpWithZero = (op.Opc & 2) != 0;
int sizeF = op.Size & 1;
if (Optimizations.FastFP && (signalNaNs ? Optimizations.UseAvx : Optimizations.UseSse2))
{
int fSize = op.Size & 1;
OperandType type = fSize != 0 ? OperandType.FP64 : OperandType.FP32;
CmpCondition cmpOrdered = signalNaNs ? CmpCondition.OrderedS : CmpCondition.OrderedQ;
bool doubleSize = sizeF != 0;
int shift = doubleSize ? 1 : 2;
Operand m = GetVecA32(op.Vm >> shift);
Operand n = GetVecA32(op.Vd >> shift);
n = EmitSwapScalar(context, n, op.Vd, doubleSize);
m = cmpWithZero ? context.VectorZero() : EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand lblNaN = Label();
Operand lblEnd = Label();
if (!doubleSize)
{
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const((int)cmpOrdered));
Operand isOrdered = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, ordMask);
context.BranchIfFalse(lblNaN, isOrdered);
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, n, m);
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, n, m);
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, n, m);
EmitSetFPSCRFlags(context, nf, zf, cf, Const(0));
}
else
{
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const((int)cmpOrdered));
Operand isOrdered = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, ordMask);
context.BranchIfFalse(lblNaN, isOrdered);
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, n, m);
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, n, m);
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, n, m);
EmitSetFPSCRFlags(context, nf, zf, cf, Const(0));
}
context.Branch(lblEnd);
context.MarkLabel(lblNaN);
EmitSetFPSCRFlags(context, Const(3));
context.MarkLabel(lblEnd);
}
else
{
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
Operand ne = ExtractScalar(context, type, op.Vd);
Operand me;
if (cmpWithZero)
{
me = fSize == 0 ? ConstF(0f) : ConstF(0d);
me = sizeF == 0 ? ConstF(0f) : ConstF(0d);
}
else
{
me = ExtractScalar(context, type, op.Vm);
}
Delegate dlg = fSize != 0
Delegate dlg = sizeF != 0
? (Delegate)new _S32_F64_F64_Bool(SoftFloat64.FPCompare)
: (Delegate)new _S32_F32_F32_Bool(SoftFloat32.FPCompare);
@ -269,5 +380,36 @@ namespace ARMeilleure.Instructions
SetFpFlag(context, FPState.ZFlag, Extract(nzcv, 2));
SetFpFlag(context, FPState.NFlag, Extract(nzcv, 3));
}
private static void EmitSetFPSCRFlags(ArmEmitterContext context, Operand n, Operand z, Operand c, Operand v)
{
SetFpFlag(context, FPState.VFlag, v);
SetFpFlag(context, FPState.CFlag, c);
SetFpFlag(context, FPState.ZFlag, z);
SetFpFlag(context, FPState.NFlag, n);
}
private static void EmitSse2CmpOpF32(ArmEmitterContext context, CmpCondition cond, bool zero)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
int sizeF = op.Size & 1;
Intrinsic inst = (sizeF == 0) ? Intrinsic.X86Cmpps : Intrinsic.X86Cmppd;
if (zero)
{
EmitVectorUnaryOpSimd32(context, (m) =>
{
return context.AddIntrinsic(inst, m, context.VectorZero(), Const((int)cond));
});
}
else
{
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
return context.AddIntrinsic(inst, n, m, Const((int)cond));
});
}
}
}
}

View file

@ -869,7 +869,7 @@ namespace ARMeilleure.Instructions
}
}
private static Operand EmitSse2CvtDoubleToInt64OpF(ArmEmitterContext context, Operand opF, bool scalar)
public static Operand EmitSse2CvtDoubleToInt64OpF(ArmEmitterContext context, Operand opF, bool scalar)
{
Debug.Assert(opF.Type == OperandType.V128);

View file

@ -1,9 +1,11 @@
using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.State;
using ARMeilleure.Translation;
using System;
using System.Diagnostics;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper32;
using static ARMeilleure.IntermediateRepresentation.OperandHelper;
@ -63,21 +65,56 @@ namespace ARMeilleure.Instructions
if (toInteger)
{
EmitVectorUnaryOpF32(context, (op1) =>
if (Optimizations.UseSse41)
{
return EmitSaturateFloatToInt(context, op1, unsigned);
});
EmitSse41ConvertVector32(context, FPRoundingMode.TowardsZero, !unsigned);
}
else
{
EmitVectorUnaryOpF32(context, (op1) =>
{
return EmitSaturateFloatToInt(context, op1, unsigned);
});
}
}
else
{
if (unsigned)
if (Optimizations.UseSse2)
{
EmitVectorUnaryOpZx32(context, (op1) => EmitFPConvert(context, op1, floatSize, false));
}
EmitVectorUnaryOpSimd32(context, (n) =>
{
if (unsigned)
{
Operand mask = X86GetAllElements(context, 0x47800000);
Operand res = context.AddIntrinsic(Intrinsic.X86Psrld, n, Const(16));
res = context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, res);
res = context.AddIntrinsic(Intrinsic.X86Mulps, res, mask);
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pslld, n, Const(16));
res2 = context.AddIntrinsic(Intrinsic.X86Psrld, res2, Const(16));
res2 = context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, res2);
return context.AddIntrinsic(Intrinsic.X86Addps, res, res2);
}
else
{
return context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, n);
}
});
}
else
{
EmitVectorUnaryOpSx32(context, (op1) => EmitFPConvert(context, op1, floatSize, true));
if (unsigned)
{
EmitVectorUnaryOpZx32(context, (op1) => EmitFPConvert(context, op1, floatSize, false));
}
else
{
EmitVectorUnaryOpSx32(context, (op1) => EmitFPConvert(context, op1, floatSize, true));
}
}
}
}
@ -123,44 +160,51 @@ namespace ARMeilleure.Instructions
bool unsigned = (op.Opc2 & 1) == 0;
bool roundWithFpscr = op.Opc != 1;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
Operand asInteger;
// TODO: Fast Path.
if (roundWithFpscr)
if (!roundWithFpscr && Optimizations.UseSse41)
{
// These need to get the FPSCR value, so it's worth noting we'd need to do a c# call at some point.
if (floatSize == OperandType.FP64)
{
if (unsigned)
{
asInteger = context.Call(new _U32_F64(SoftFallback.DoubleToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F64(SoftFallback.DoubleToInt32), toConvert);
}
}
else
{
if (unsigned)
{
asInteger = context.Call(new _U32_F32(SoftFallback.FloatToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F32(SoftFallback.FloatToInt32), toConvert);
}
}
}
EmitSse41ConvertInt32(context, FPRoundingMode.TowardsZero, !unsigned);
}
else
{
// Round towards zero.
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
}
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
InsertScalar(context, op.Vd, asInteger);
Operand asInteger;
// TODO: Fast Path.
if (roundWithFpscr)
{
if (floatSize == OperandType.FP64)
{
if (unsigned)
{
asInteger = context.Call(new _U32_F64(SoftFallback.DoubleToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F64(SoftFallback.DoubleToInt32), toConvert);
}
}
else
{
if (unsigned)
{
asInteger = context.Call(new _U32_F32(SoftFallback.FloatToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F32(SoftFallback.FloatToInt32), toConvert);
}
}
}
else
{
// Round towards zero.
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
}
InsertScalar(context, op.Vd, asInteger);
}
}
else
{
@ -192,6 +236,26 @@ namespace ARMeilleure.Instructions
return context.Call(dlg, n, Const((int)roundMode));
}
private static FPRoundingMode RMToRoundMode(int rm)
{
FPRoundingMode roundMode;
switch (rm)
{
case 0b01:
roundMode = FPRoundingMode.ToNearest;
break;
case 0b10:
roundMode = FPRoundingMode.TowardsPlusInfinity;
break;
case 0b11:
roundMode = FPRoundingMode.TowardsMinusInfinity;
break;
default:
throw new ArgumentOutOfRangeException(nameof(rm));
}
return roundMode;
}
public static void Vcvt_R(ArmEmitterContext context)
{
OpCode32SimdCvtFI op = (OpCode32SimdCvtFI)context.CurrOp;
@ -199,30 +263,38 @@ namespace ARMeilleure.Instructions
OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32;
bool unsigned = (op.Opc & 1) == 0;
int rm = op.Opc2 & 3;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
switch (op.Opc2)
if (Optimizations.UseSse41 && rm != 0b00)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
EmitSse41ConvertInt32(context, RMToRoundMode(rm), !unsigned);
}
else
{
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
Operand asInteger;
switch (rm)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
Operand asInteger;
InsertScalar(context, op.Vd, asInteger);
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
InsertScalar(context, op.Vd, asInteger);
}
}
public static void Vrint_RM(ArmEmitterContext context)
@ -231,30 +303,59 @@ namespace ARMeilleure.Instructions
OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
int rm = op.Opc2 & 3;
switch (op.Opc2)
if (Optimizations.UseSse2 && rm != 0b00)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
EmitScalarUnaryOpSimd32(context, (m) =>
{
Intrinsic inst = (op.Size & 1) == 0 ? Intrinsic.X86Roundss : Intrinsic.X86Roundsd;
InsertScalar(context, op.Vd, toConvert);
FPRoundingMode roundMode = RMToRoundMode(rm);
return context.AddIntrinsic(inst, m, Const(X86GetRoundControl(roundMode)));
});
}
else
{
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
switch (rm)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
InsertScalar(context, op.Vd, toConvert);
}
}
public static void Vrint_Z(ArmEmitterContext context)
{
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1));
IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;
if (Optimizations.UseSse2)
{
EmitScalarUnaryOpSimd32(context, (m) =>
{
Intrinsic inst = (op.Size & 1) == 0 ? Intrinsic.X86Roundss : Intrinsic.X86Roundsd;
return context.AddIntrinsic(inst, m, Const(X86GetRoundControl(FPRoundingMode.TowardsZero)));
});
}
else
{
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1));
}
}
private static Operand EmitFPConvert(ArmEmitterContext context, Operand value, OperandType type, bool signed)
@ -270,5 +371,211 @@ namespace ARMeilleure.Instructions
return context.ConvertToFPUI(type, value);
}
}
private static void EmitSse41ConvertInt32(ArmEmitterContext context, FPRoundingMode roundMode, bool signed)
{
// A port of the similar round function in InstEmitSimdCvt.
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vm >> shift);
n = EmitSwapScalar(context, n, op.Vm, doubleSize);
if (!doubleSize)
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpss, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundss, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
Operand nIntOrLong2 = null;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
int fpMaxVal = 0x4F000000; // 2.14748365E9f (2147483648)
Operand fpMaxValMask = X86GetScalar(context, fpMaxVal);
Operand nIntOrLong = context.AddIntrinsicInt(Intrinsic.X86Cvtss2si, nRes);
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subss, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nIntOrLong2 = context.AddIntrinsicInt(Intrinsic.X86Cvtss2si, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
Operand nInt = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, nRes);
Operand dRes;
if (signed)
{
dRes = context.BitwiseExclusiveOr(nIntOrLong, nInt);
}
else
{
dRes = context.BitwiseExclusiveOr(nIntOrLong2, nInt);
dRes = context.Add(dRes, nIntOrLong);
}
InsertScalar(context, op.Vd, dRes);
}
else
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundsd, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
Operand nIntOrLong2 = null;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
long fpMaxVal = 0x41E0000000000000L; // 2147483648.0000000d (2147483648)
Operand fpMaxValMask = X86GetScalar(context, fpMaxVal);
Operand nIntOrLong = context.AddIntrinsicInt(Intrinsic.X86Cvtsd2si, nRes);
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subsd, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nIntOrLong2 = context.AddIntrinsicInt(Intrinsic.X86Cvtsd2si, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
Operand nLong = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, nRes);
nLong = context.ConvertI64ToI32(nLong);
Operand dRes;
if (signed)
{
dRes = context.BitwiseExclusiveOr(nIntOrLong, nLong);
}
else
{
dRes = context.BitwiseExclusiveOr(nIntOrLong2, nLong);
dRes = context.Add(dRes, nIntOrLong);
}
InsertScalar(context, op.Vd, dRes);
}
}
private static void EmitSse41ConvertVector32(ArmEmitterContext context, FPRoundingMode roundMode, bool signed)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
EmitVectorUnaryOpSimd32(context, (n) =>
{
int sizeF = op.Size & 1;
if (sizeF == 0)
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpps, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundps, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
Operand fpMaxValMask = X86GetAllElements(context, 0x4F000000); // 2.14748365E9f (2147483648)
Operand nInt = context.AddIntrinsic(Intrinsic.X86Cvtps2dq, nRes);
Operand nInt2 = null;
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subps, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nInt2 = context.AddIntrinsic(Intrinsic.X86Cvtps2dq, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
if (signed)
{
return context.AddIntrinsic(Intrinsic.X86Pxor, nInt, nRes);
}
else
{
Operand dRes = context.AddIntrinsic(Intrinsic.X86Pxor, nInt2, nRes);
return context.AddIntrinsic(Intrinsic.X86Paddd, dRes, nInt);
}
}
else /* if (sizeF == 1) */
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmppd, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundpd, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
Operand fpMaxValMask = X86GetAllElements(context, 0x43E0000000000000L); // 9.2233720368547760E18d (9223372036854775808)
Operand nLong = InstEmit.EmitSse2CvtDoubleToInt64OpF(context, nRes, false);
Operand nLong2 = null;
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subpd, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nLong2 = InstEmit.EmitSse2CvtDoubleToInt64OpF(context, nRes, false);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
if (signed)
{
return context.AddIntrinsic(Intrinsic.X86Pxor, nLong, nRes);
}
else
{
Operand dRes = context.AddIntrinsic(Intrinsic.X86Pxor, nLong2, nRes);
return context.AddIntrinsic(Intrinsic.X86Paddq, dRes, nLong);
}
}
});
}
}
}

View file

@ -31,7 +31,7 @@ namespace ARMeilleure.Instructions
15L << 56 | 14L << 48 | 13L << 40 | 12L << 32 | 07L << 24 | 06L << 16 | 05L << 8 | 04L << 0 // S
};
private static readonly long _zeroMask = 128L << 56 | 128L << 48 | 128L << 40 | 128L << 32 | 128L << 24 | 128L << 16 | 128L << 8 | 128L << 0;
public static readonly long ZeroMask = 128L << 56 | 128L << 48 | 128L << 40 | 128L << 32 | 128L << 24 | 128L << 16 | 128L << 8 | 128L << 0;
#endregion
#region "X86 SSE Intrinsics"
@ -1026,8 +1026,8 @@ namespace ARMeilleure.Instructions
if (op.RegisterSize == RegisterSize.Simd64)
{
Operand zeroEvenMask = X86GetElements(context, _zeroMask, EvenMasks[op.Size]);
Operand zeroOddMask = X86GetElements(context, _zeroMask, OddMasks [op.Size]);
Operand zeroEvenMask = X86GetElements(context, ZeroMask, EvenMasks[op.Size]);
Operand zeroOddMask = X86GetElements(context, ZeroMask, OddMasks [op.Size]);
Operand mN = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m); // m:n
@ -1467,6 +1467,21 @@ namespace ARMeilleure.Instructions
return context.Call(dlg, op1, op2);
}
public static Operand EmitFloatAbs(ArmEmitterContext context, Operand value, bool single, bool vector)
{
Operand mask;
if (single)
{
mask = vector ? X86GetAllElements(context, -0f) : X86GetScalar(context, -0f);
}
else
{
mask = vector ? X86GetAllElements(context, -0d) : X86GetScalar(context, -0d);
}
return context.AddIntrinsic(single ? Intrinsic.X86Andnps : Intrinsic.X86Andnpd, mask, value);
}
public static Operand EmitVectorExtractSx(ArmEmitterContext context, int reg, int index, int size)
{
return EmitVectorExtract(context, reg, index, size, true);

View file

@ -473,6 +473,446 @@ namespace ARMeilleure.Instructions
context.Copy(GetVecA32(op.Qd), res);
}
// Intrinsic Helpers
public static Operand EmitMoveDoubleWordToSide(ArmEmitterContext context, Operand input, int originalV, int targetV)
{
Debug.Assert(input.Type == OperandType.V128);
int originalSide = originalV & 1;
int targetSide = targetV & 1;
if (originalSide == targetSide)
{
return input;
}
if (targetSide == 1)
{
return context.AddIntrinsic(Intrinsic.X86Movlhps, input, input); // Low to high.
}
else
{
return context.AddIntrinsic(Intrinsic.X86Movhlps, input, input); // High to low.
}
}
public static Operand EmitDoubleWordInsert(ArmEmitterContext context, Operand target, Operand value, int targetV)
{
Debug.Assert(target.Type == OperandType.V128 && value.Type == OperandType.V128);
int targetSide = targetV & 1;
int shuffleMask = 2;
if (targetSide == 1)
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, target, value, Const(shuffleMask));
}
else
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, value, target, Const(shuffleMask));
}
}
public static Operand EmitScalarInsert(ArmEmitterContext context, Operand target, Operand value, int reg, bool doubleWidth)
{
Debug.Assert(target.Type == OperandType.V128 && value.Type == OperandType.V128);
// Insert from index 0 in value to index in target.
int index = reg & (doubleWidth ? 1 : 3);
if (doubleWidth)
{
if (index == 1)
{
return context.AddIntrinsic(Intrinsic.X86Movlhps, target, value); // Low to high.
}
else
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, value, target, Const(2)); // Low to low, keep high from original.
}
}
else
{
if (Optimizations.UseSse41)
{
return context.AddIntrinsic(Intrinsic.X86Insertps, target, value, Const(index << 4));
}
else
{
target = EmitSwapScalar(context, target, index, doubleWidth); // Swap value to replace into element 0.
target = context.AddIntrinsic(Intrinsic.X86Movss, target, value); // Move the value into element 0 of the vector.
return EmitSwapScalar(context, target, index, doubleWidth); // Swap new value back to the correct index.
}
}
}
public static Operand EmitSwapScalar(ArmEmitterContext context, Operand target, int reg, bool doubleWidth)
{
// Index into 0, 0 into index. This swap happens at the start of an A32 scalar op if required.
int index = reg & (doubleWidth ? 1 : 3);
if (index == 0) return target;
if (doubleWidth)
{
int shuffleMask = 1; // Swap top and bottom. (b0 = 1, b1 = 0)
return context.AddIntrinsic(Intrinsic.X86Shufpd, target, target, Const(shuffleMask));
}
else
{
int shuffleMask = (3 << 6) | (2 << 4) | (1 << 2) | index; // Swap index and 0. (others remain)
shuffleMask &= ~(3 << (index * 2));
return context.AddIntrinsic(Intrinsic.X86Shufps, target, target, Const(shuffleMask));
}
}
// Vector Operand Templates
public static void EmitVectorUnaryOpSimd32(ArmEmitterContext context, Func1I vectorFunc)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
m = EmitMoveDoubleWordToSide(context, m, op.Vm, op.Vd);
}
Operand res = vectorFunc(m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorUnaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorUnaryOpSimd32(context, (m) => context.AddIntrinsic(inst, m));
}
public static void EmitVectorBinaryOpSimd32(ArmEmitterContext context, Func2I vectorFunc, int side = -1)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
if (side == -1)
{
side = op.Vd;
}
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, side);
m = EmitMoveDoubleWordToSide(context, m, op.Vm, side);
}
Operand res = vectorFunc(n, m);
if (!op.Q) // Register insert.
{
if (side != op.Vd)
{
res = EmitMoveDoubleWordToSide(context, res, side, op.Vd);
}
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorBinaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitVectorTernaryOpSimd32(ArmEmitterContext context, Func3I vectorFunc)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
Operand initialD = d;
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
m = EmitMoveDoubleWordToSide(context, m, op.Vm, op.Vd);
}
Operand res = vectorFunc(d, n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, initialD, res, op.Vd);
}
context.Copy(initialD, res);
}
public static void EmitVectorTernaryOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Intrinsic inst1 = (op.Size & 1) != 0 ? inst64pt1 : inst32pt1;
Intrinsic inst2 = (op.Size & 1) != 0 ? inst64pt2 : inst32pt2;
EmitVectorTernaryOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return res = context.AddIntrinsic(inst2, d, res);
});
}
public static void EmitScalarUnaryOpSimd32(ArmEmitterContext context, Func1I scalarFunc)
{
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand res = scalarFunc(m);
// Insert scalar into vector.
res = EmitScalarInsert(context, d, res, op.Vd, doubleSize);
context.Copy(d, res);
}
public static void EmitScalarUnaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitScalarUnaryOpSimd32(context, (m) => (inst == 0) ? m : context.AddIntrinsic(inst, m));
}
public static void EmitScalarBinaryOpSimd32(ArmEmitterContext context, Func2I scalarFunc)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vn >> shift);
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
n = EmitSwapScalar(context, n, op.Vn, doubleSize);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand res = scalarFunc(n, m);
// Insert scalar into vector.
res = EmitScalarInsert(context, d, res, op.Vd, doubleSize);
context.Copy(d, res);
}
public static void EmitScalarBinaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitScalarBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitScalarTernaryOpSimd32(ArmEmitterContext context, Func3I scalarFunc)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vn >> shift);
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
Operand initialD = d;
n = EmitSwapScalar(context, n, op.Vn, doubleSize);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
d = EmitSwapScalar(context, d, op.Vd, doubleSize);
Operand res = scalarFunc(d, n, m);
// Insert scalar into vector.
res = EmitScalarInsert(context, initialD, res, op.Vd, doubleSize);
context.Copy(initialD, res);
}
public static void EmitScalarTernaryOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Intrinsic inst1 = doubleSize ? inst64pt1 : inst32pt1;
Intrinsic inst2 = doubleSize ? inst64pt2 : inst32pt2;
EmitScalarTernaryOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return context.AddIntrinsic(inst2, d, res);
});
}
// By Scalar
public static void EmitVectorByScalarOpSimd32(ArmEmitterContext context, Func2I vectorFunc)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand d = GetVecA32(op.Qd);
int index = op.Vm & 3;
int dupeMask = (index << 6) | (index << 4) | (index << 2) | index;
Operand m = GetVecA32(op.Vm >> 2);
m = context.AddIntrinsic(Intrinsic.X86Shufps, m, m, Const(dupeMask));
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
}
Operand res = vectorFunc(n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorByScalarOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorByScalarOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitVectorsByScalarOpSimd32(ArmEmitterContext context, Func3I vectorFunc)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand d = GetVecA32(op.Qd);
Operand initialD = d;
int index = op.Vm & 3;
int dupeMask = (index << 6) | (index << 4) | (index << 2) | index;
Operand m = GetVecA32(op.Vm >> 2);
m = context.AddIntrinsic(Intrinsic.X86Shufps, m, m, Const(dupeMask));
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
}
Operand res = vectorFunc(d, n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, initialD, res, op.Vd);
}
context.Copy(initialD, res);
}
public static void EmitVectorsByScalarOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Intrinsic inst1 = (op.Size & 1) != 0 ? inst64pt1 : inst32pt1;
Intrinsic inst2 = (op.Size & 1) != 0 ? inst64pt2 : inst32pt2;
EmitVectorsByScalarOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return res = context.AddIntrinsic(inst2, d, res);
});
}
// Pairwise
public static void EmitSse2VectorPairwiseOpF32(ArmEmitterContext context, Intrinsic inst32)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
Operand unpck = context.AddIntrinsic(Intrinsic.X86Unpcklps, n, m);
Operand part0 = unpck;
Operand part1 = context.AddIntrinsic(Intrinsic.X86Movhlps, unpck, unpck);
return context.AddIntrinsic(inst32, part0, part1);
}, 0);
}
public static void EmitSsse3VectorPairwiseOp32(ArmEmitterContext context, Intrinsic[] inst)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
if (op.RegisterSize == RegisterSize.Simd64)
{
Operand zeroEvenMask = X86GetElements(context, ZeroMask, EvenMasks[op.Size]);
Operand zeroOddMask = X86GetElements(context, ZeroMask, OddMasks[op.Size]);
Operand mN = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m); // m:n
Operand left = context.AddIntrinsic(Intrinsic.X86Pshufb, mN, zeroEvenMask); // 0:even from m:n
Operand right = context.AddIntrinsic(Intrinsic.X86Pshufb, mN, zeroOddMask); // 0:odd from m:n
return context.AddIntrinsic(inst[op.Size], left, right);
}
else if (op.Size < 3)
{
Operand oddEvenMask = X86GetElements(context, OddMasks[op.Size], EvenMasks[op.Size]);
Operand oddEvenN = context.AddIntrinsic(Intrinsic.X86Pshufb, n, oddEvenMask); // odd:even from n
Operand oddEvenM = context.AddIntrinsic(Intrinsic.X86Pshufb, m, oddEvenMask); // odd:even from m
Operand left = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, oddEvenN, oddEvenM);
Operand right = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, oddEvenN, oddEvenM);
return context.AddIntrinsic(inst[op.Size], left, right);
}
else
{
Operand left = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m);
Operand right = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, n, m);
return context.AddIntrinsic(inst[3], left, right);
}
}, 0);
}
// Generic Functions
public static Operand EmitSoftFloatCallDefaultFpscr(

View file

@ -1,4 +1,5 @@
using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation;
using static ARMeilleure.Instructions.InstEmitSimdHelper32;
@ -9,7 +10,14 @@ namespace ARMeilleure.Instructions
{
public static void Vand_I(ArmEmitterContext context)
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, op2));
if (Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Pand, Intrinsic.X86Pand);
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, op2));
}
}
public static void Vbif(ArmEmitterContext context)
@ -24,33 +32,64 @@ namespace ARMeilleure.Instructions
public static void Vbsl(ArmEmitterContext context)
{
EmitVectorTernaryOpZx32(context, (op1, op2, op3) =>
if (Optimizations.UseSse2)
{
return context.BitwiseExclusiveOr(
context.BitwiseAnd(op1,
context.BitwiseExclusiveOr(op2, op3)), op3);
});
EmitVectorTernaryOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(Intrinsic.X86Pxor, n, m);
res = context.AddIntrinsic(Intrinsic.X86Pand, res, d);
return context.AddIntrinsic(Intrinsic.X86Pxor, res, m);
});
}
else
{
EmitVectorTernaryOpZx32(context, (op1, op2, op3) =>
{
return context.BitwiseExclusiveOr(
context.BitwiseAnd(op1,
context.BitwiseExclusiveOr(op2, op3)), op3);
});
}
}
public static void Vorr_I(ArmEmitterContext context)
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseOr(op1, op2));
if (Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Por, Intrinsic.X86Por);
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseOr(op1, op2));
}
}
private static void EmitBifBit(ArmEmitterContext context, bool notRm)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorTernaryOpZx32(context, (d, n, m) =>
if (Optimizations.UseSse2)
{
if (notRm)
EmitVectorTernaryOpSimd32(context, (d, n, m) =>
{
m = context.BitwiseNot(m);
}
return context.BitwiseExclusiveOr(
context.BitwiseAnd(m,
context.BitwiseExclusiveOr(d, n)), d);
});
Operand res = context.AddIntrinsic(Intrinsic.X86Pxor, n, d);
res = context.AddIntrinsic((notRm) ? Intrinsic.X86Pandn : Intrinsic.X86Pand, m, res);
return context.AddIntrinsic(Intrinsic.X86Pxor, d, res);
});
}
else
{
EmitVectorTernaryOpZx32(context, (d, n, m) =>
{
if (notRm)
{
m = context.BitwiseNot(m);
}
return context.BitwiseExclusiveOr(
context.BitwiseAnd(m,
context.BitwiseExclusiveOr(d, n)), d);
});
}
}
}
}

View file

@ -1,6 +1,7 @@
using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation;
using System;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
@ -11,6 +12,21 @@ namespace ARMeilleure.Instructions
{
static partial class InstEmit32
{
#region "Masks"
// Same as InstEmitSimdMove, as the instructions do the same thing.
private static readonly long[] _masksE0_Uzp = new long[]
{
13L << 56 | 09L << 48 | 05L << 40 | 01L << 32 | 12L << 24 | 08L << 16 | 04L << 8 | 00L << 0,
11L << 56 | 10L << 48 | 03L << 40 | 02L << 32 | 09L << 24 | 08L << 16 | 01L << 8 | 00L << 0
};
private static readonly long[] _masksE1_Uzp = new long[]
{
15L << 56 | 11L << 48 | 07L << 40 | 03L << 32 | 14L << 24 | 10L << 16 | 06L << 8 | 02L << 0,
15L << 56 | 14L << 48 | 07L << 40 | 06L << 32 | 13L << 24 | 12L << 16 | 05L << 8 | 04L << 0
};
#endregion
public static void Vmov_I(ArmEmitterContext context)
{
EmitVectorImmUnaryOp32(context, (op1) => op1);
@ -31,7 +47,7 @@ namespace ARMeilleure.Instructions
// To general purpose.
Operand value = context.VectorExtract(OperandType.I32, vec, op.Vn & 0x3);
SetIntA32(context, op.Rt, value);
}
}
else
{
// From general purpose.
@ -88,7 +104,7 @@ namespace ARMeilleure.Instructions
if (sameOwnerVec)
{
context.Copy(vec, context.VectorInsert(resultVec, highValue, vm1 & 3));
}
}
else
{
context.Copy(vec, resultVec);
@ -128,114 +144,201 @@ namespace ARMeilleure.Instructions
OpCode32SimdTbl op = (OpCode32SimdTbl)context.CurrOp;
bool extension = op.Opc == 1;
int elems = op.GetBytesCount() >> op.Size;
int length = op.Length + 1;
(int Qx, int Ix)[] tableTuples = new (int, int)[length];
for (int i = 0; i < length; i++)
if (Optimizations.UseSsse3)
{
(int vn, int en) = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize);
tableTuples[i] = (vn, en);
}
Operand d = GetVecA32(op.Qd);
Operand m = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qm), op.Vm, 0);
int byteLength = length * 8;
Operand res;
Operand mask = X86GetAllElements(context, 0x0707070707070707L);
Operand res = GetVecA32(op.Qd);
Operand m = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
Operand elemRes = null; // Note: This is I64 for ease of calculation.
// TODO: Branching rather than conditional select.
// Get indexed byte.
// To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
// This does have to extract `length` times for every element but certainly not as bad as it could be.
// Which vector number is the index on.
Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
// What should we shift by to extract it.
Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
for (int i = 0; i < length; i++)
// Fast path for single register table.
{
(int qx, int ix) = tableTuples[i];
// Get the whole vector, we'll get a byte out of it.
Operand lookupResult;
if (qx == op.Qd)
{
// Result contains the current state of the vector.
lookupResult = context.VectorExtract(OperandType.I64, res, ix);
}
else
{
lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
}
lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector.
Operand n = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qn), op.Vn, 0);
if (i == 0)
{
elemRes = lookupResult; // First result is always default.
}
else
{
Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
}
Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, mask);
mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, m);
res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mMask);
}
Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L);
for (int index = 1; index < length; index++)
{
int newVn = (op.Vn + index) & 0x1F;
(int qn, int ind) = GetQuadwordAndSubindex(newVn, op.RegisterSize);
Operand ni = EmitMoveDoubleWordToSide(context, GetVecA32(qn), newVn, 0);
res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0);
Operand idxMask = X86GetAllElements(context, 0x0808080808080808L * index);
Operand mSubMask = context.AddIntrinsic(Intrinsic.X86Psubb, m, idxMask);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, mSubMask, mask);
mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, mSubMask);
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, ni, mMask);
res = context.AddIntrinsic(Intrinsic.X86Por, res, res2);
}
if (extension)
{
Operand idxMask = X86GetAllElements(context, (0x0808080808080808L * length) - 0x0101010101010101L);
Operand zeroMask = context.VectorZero();
Operand mPosMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, idxMask);
Operand mNegMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, zeroMask, m);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Por, mPosMask, mNegMask);
Operand dMask = context.AddIntrinsic(Intrinsic.X86Pand, EmitMoveDoubleWordToSide(context, d, op.Vd, 0), mMask);
res = context.AddIntrinsic(Intrinsic.X86Por, res, dMask);
}
res = EmitMoveDoubleWordToSide(context, res, 0, op.Vd);
context.Copy(d, EmitDoubleWordInsert(context, d, res, op.Vd));
}
else
{
int elems = op.GetBytesCount() >> op.Size;
context.Copy(GetVecA32(op.Qd), res);
(int Qx, int Ix)[] tableTuples = new (int, int)[length];
for (int i = 0; i < length; i++)
{
tableTuples[i] = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize);
}
int byteLength = length * 8;
Operand res = GetVecA32(op.Qd);
Operand m = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
Operand elemRes = null; // Note: This is I64 for ease of calculation.
// TODO: Branching rather than conditional select.
// Get indexed byte.
// To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
// This does have to extract `length` times for every element but certainly not as bad as it could be.
// Which vector number is the index on.
Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
// What should we shift by to extract it.
Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
for (int i = 0; i < length; i++)
{
(int qx, int ix) = tableTuples[i];
// Get the whole vector, we'll get a byte out of it.
Operand lookupResult;
if (qx == op.Qd)
{
// Result contains the current state of the vector.
lookupResult = context.VectorExtract(OperandType.I64, res, ix);
}
else
{
lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
}
lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector.
if (i == 0)
{
elemRes = lookupResult; // First result is always default.
}
else
{
Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
}
}
Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L);
res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0);
}
context.Copy(GetVecA32(op.Qd), res);
}
}
public static void Vtrn(ArmEmitterContext context)
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
if (Optimizations.UseSsse3)
{
int pairIndex = index << 1;
Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
if (overlap)
EmitVectorShuffleOpSimd32(context, (m, d) =>
{
resM = resD;
}
Operand mask = null;
resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size);
if (op.Size < 3)
{
long maskE0 = EvenMasks[op.Size];
long maskE1 = OddMasks[op.Size];
if (overlap)
{
resD = resM;
}
mask = X86GetScalar(context, maskE0);
mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
}
if (op.Size < 3)
{
d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
}
Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
return (resM, resD);
});
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
else
{
context.Copy(GetVecA32(op.Qm), resM);
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{
int pairIndex = index << 1;
Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
}
}
@ -243,44 +346,68 @@ namespace ARMeilleure.Instructions
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
if (Optimizations.UseSse2)
{
int pairIndex = index << 1;
Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
if (overlap)
EmitVectorShuffleOpSimd32(context, (m, d) =>
{
resM = resD;
}
if (op.RegisterSize == RegisterSize.Simd128)
{
Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size);
resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size);
return (resM, resD);
}
else
{
Operand res = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
if (overlap)
{
resD = resM;
}
Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
return (resM, resD);
}
});
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
else
{
context.Copy(GetVecA32(op.Qm), resM);
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{
int pairIndex = index << 1;
Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size);
resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
}
}
@ -288,49 +415,135 @@ namespace ARMeilleure.Instructions
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
if (Optimizations.UseSsse3)
{
EmitVectorShuffleOpSimd32(context, (m, d) =>
{
if (op.RegisterSize == RegisterSize.Simd128)
{
Operand mask = null;
if (op.Size < 3)
{
long maskE0 = EvenMasks[op.Size];
long maskE1 = OddMasks[op.Size];
mask = X86GetScalar(context, maskE0);
mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
}
Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, d, m);
Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, d, m);
return (resM, resD);
}
else
{
Intrinsic punpcklInst = X86PunpcklInstruction[op.Size];
Operand res = context.AddIntrinsic(punpcklInst, d, m);
if (op.Size < 2)
{
long maskE0 = _masksE0_Uzp[op.Size];
long maskE1 = _masksE1_Uzp[op.Size];
Operand mask = X86GetScalar(context, maskE0);
mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
res = context.AddIntrinsic(Intrinsic.X86Pshufb, res, mask);
}
Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
return (resM, resD);
}
});
}
else
{
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand dIns, mIns;
if (index >= pairs)
{
int pairIndex = index - pairs;
dIns = EmitVectorExtract32(context, op.Qm, (pairIndex << 1) + op.Im, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qm, ((pairIndex << 1) | 1) + op.Im, op.Size, false);
}
else
{
dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
}
resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
}
}
public static void EmitVectorShuffleOpSimd32(ArmEmitterContext context, Func<Operand, Operand, (Operand, Operand)> shuffleFunc)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
Operand initialM = m;
Operand initialD = d;
if (!op.Q) // Register swap: move relevant doubleword to side 0, for consistency.
{
m = EmitMoveDoubleWordToSide(context, m, op.Vm, 0);
d = EmitMoveDoubleWordToSide(context, d, op.Vd, 0);
}
(Operand resM, Operand resD) = shuffleFunc(m, d);
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
if (!op.Q) // Register insert.
{
Operand dIns, mIns;
if (index >= pairs)
{
int pind = index - pairs;
dIns = EmitVectorExtract32(context, op.Qm, (pind << 1) + op.Im, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qm, ((pind << 1) | 1) + op.Im, op.Size, false);
}
else
{
dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
}
resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
resM = EmitDoubleWordInsert(context, initialM, EmitMoveDoubleWordToSide(context, resM, 0, op.Vm), op.Vm);
resD = EmitDoubleWordInsert(context, overlap ? resM : initialD, EmitMoveDoubleWordToSide(context, resD, 0, op.Vd), op.Vd);
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
context.Copy(initialM, resM);
}
context.Copy(initialD, resD);
}
}
}

View file

@ -41,6 +41,7 @@ namespace ARMeilleure.IntermediateRepresentation
X86Divss,
X86Haddpd,
X86Haddps,
X86Insertps,
X86Maxpd,
X86Maxps,
X86Maxsd,
@ -51,6 +52,7 @@ namespace ARMeilleure.IntermediateRepresentation
X86Minss,
X86Movhlps,
X86Movlhps,
X86Movss,
X86Mulpd,
X86Mulps,
X86Mulsd,