Implement Fast Paths for most A32 SIMD instructions (#952)

* Begin work on A32 SIMD Intrinsics

* More instructions, some cleanup.

* Intrinsics for Move instructions (zip etc)

These pass the existing tests.

* Intrinsics for some of Cvt

While doing this I noticed that the conversion for int/fp was incorrect
in the slow path. I'll fix this in the original repo.

* Intrinsics for more Arithmetic instructions.

* Intrinsics for Vext

* Fix VEXT Intrinsic for double words.

* Use InsertPs to move scalar values.

* Cleanup, fix VPADD.f32 and VMIN signed integer.

* Cleanup, add SSE2 support for scalar insert.

Works similarly to the IR scalar insert, but obviously this one works
directly on V128.

* Minor cleanup.

* Enable intrinsic for FP64 to integer conversion.

* Address feedback apart from splitting out intrinsic float abs

Also: bad VREV encodings as undefined rather than throwing in translation.

* Move float abs to helper, fix bug with cvt

* Rename opc2 & 3 to match A32 docs, use ArgumentOutOfRangeException appropriately.

* Get name of variable at compilation rather than string literal.

* Use correct double sign mask.
This commit is contained in:
jduncanator 2020-03-05 11:41:33 +11:00 committed by GitHub
parent d9ed827696
commit 68e15c1a74
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 2077 additions and 400 deletions

View file

@ -52,6 +52,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Intrinsic.X86Divss, new IntrinsicInfo(X86Instruction.Divss, IntrinsicType.Binary)); Add(Intrinsic.X86Divss, new IntrinsicInfo(X86Instruction.Divss, IntrinsicType.Binary));
Add(Intrinsic.X86Haddpd, new IntrinsicInfo(X86Instruction.Haddpd, IntrinsicType.Binary)); Add(Intrinsic.X86Haddpd, new IntrinsicInfo(X86Instruction.Haddpd, IntrinsicType.Binary));
Add(Intrinsic.X86Haddps, new IntrinsicInfo(X86Instruction.Haddps, IntrinsicType.Binary)); Add(Intrinsic.X86Haddps, new IntrinsicInfo(X86Instruction.Haddps, IntrinsicType.Binary));
Add(Intrinsic.X86Insertps, new IntrinsicInfo(X86Instruction.Insertps, IntrinsicType.TernaryImm));
Add(Intrinsic.X86Maxpd, new IntrinsicInfo(X86Instruction.Maxpd, IntrinsicType.Binary)); Add(Intrinsic.X86Maxpd, new IntrinsicInfo(X86Instruction.Maxpd, IntrinsicType.Binary));
Add(Intrinsic.X86Maxps, new IntrinsicInfo(X86Instruction.Maxps, IntrinsicType.Binary)); Add(Intrinsic.X86Maxps, new IntrinsicInfo(X86Instruction.Maxps, IntrinsicType.Binary));
Add(Intrinsic.X86Maxsd, new IntrinsicInfo(X86Instruction.Maxsd, IntrinsicType.Binary)); Add(Intrinsic.X86Maxsd, new IntrinsicInfo(X86Instruction.Maxsd, IntrinsicType.Binary));
@ -62,6 +63,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Intrinsic.X86Minss, new IntrinsicInfo(X86Instruction.Minss, IntrinsicType.Binary)); Add(Intrinsic.X86Minss, new IntrinsicInfo(X86Instruction.Minss, IntrinsicType.Binary));
Add(Intrinsic.X86Movhlps, new IntrinsicInfo(X86Instruction.Movhlps, IntrinsicType.Binary)); Add(Intrinsic.X86Movhlps, new IntrinsicInfo(X86Instruction.Movhlps, IntrinsicType.Binary));
Add(Intrinsic.X86Movlhps, new IntrinsicInfo(X86Instruction.Movlhps, IntrinsicType.Binary)); Add(Intrinsic.X86Movlhps, new IntrinsicInfo(X86Instruction.Movlhps, IntrinsicType.Binary));
Add(Intrinsic.X86Movss, new IntrinsicInfo(X86Instruction.Movss, IntrinsicType.Binary));
Add(Intrinsic.X86Mulpd, new IntrinsicInfo(X86Instruction.Mulpd, IntrinsicType.Binary)); Add(Intrinsic.X86Mulpd, new IntrinsicInfo(X86Instruction.Mulpd, IntrinsicType.Binary));
Add(Intrinsic.X86Mulps, new IntrinsicInfo(X86Instruction.Mulps, IntrinsicType.Binary)); Add(Intrinsic.X86Mulps, new IntrinsicInfo(X86Instruction.Mulps, IntrinsicType.Binary));
Add(Intrinsic.X86Mulsd, new IntrinsicInfo(X86Instruction.Mulsd, IntrinsicType.Binary)); Add(Intrinsic.X86Mulsd, new IntrinsicInfo(X86Instruction.Mulsd, IntrinsicType.Binary));

View file

@ -4,6 +4,12 @@
{ {
public OpCode32SimdRev(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode) public OpCode32SimdRev(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode)
{ {
if (Opc + Size >= 3)
{
Instruction = InstDescriptor.Undefined;
return;
}
// Currently, this instruction is treated as though it's OPCODE is the true size, // Currently, this instruction is treated as though it's OPCODE is the true size,
// which lets us deal with reversing vectors on a single element basis (eg. math magic an I64 rather than insert lots of I8s). // which lets us deal with reversing vectors on a single element basis (eg. math magic an I64 rather than insert lots of I8s).
int tempSize = Size; int tempSize = Size;

View file

@ -186,9 +186,7 @@ namespace ARMeilleure.Instructions
{ {
Operand res = context.AddIntrinsic(Intrinsic.X86Subss, GetVec(op.Rn), GetVec(op.Rm)); Operand res = context.AddIntrinsic(Intrinsic.X86Subss, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetScalar(context, -0f); res = EmitFloatAbs(context, res, true, false);
res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, res);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res)); context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
} }
@ -196,9 +194,7 @@ namespace ARMeilleure.Instructions
{ {
Operand res = context.AddIntrinsic(Intrinsic.X86Subsd, GetVec(op.Rn), GetVec(op.Rm)); Operand res = context.AddIntrinsic(Intrinsic.X86Subsd, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetScalar(context, -0d); res = EmitFloatAbs(context, res, false, false);
res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, res);
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res)); context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
} }
@ -226,9 +222,7 @@ namespace ARMeilleure.Instructions
{ {
Operand res = context.AddIntrinsic(Intrinsic.X86Subps, GetVec(op.Rn), GetVec(op.Rm)); Operand res = context.AddIntrinsic(Intrinsic.X86Subps, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetAllElements(context, -0f); res = EmitFloatAbs(context, res, true, true);
res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, res);
if (op.RegisterSize == RegisterSize.Simd64) if (op.RegisterSize == RegisterSize.Simd64)
{ {
@ -241,9 +235,7 @@ namespace ARMeilleure.Instructions
{ {
Operand res = context.AddIntrinsic(Intrinsic.X86Subpd, GetVec(op.Rn), GetVec(op.Rm)); Operand res = context.AddIntrinsic(Intrinsic.X86Subpd, GetVec(op.Rn), GetVec(op.Rm));
Operand mask = X86GetAllElements(context, -0d); res = EmitFloatAbs(context, res, false, true);
res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, res);
context.Copy(GetVec(op.Rd), res); context.Copy(GetVec(op.Rd), res);
} }
@ -267,17 +259,13 @@ namespace ARMeilleure.Instructions
if (op.Size == 0) if (op.Size == 0)
{ {
Operand mask = X86GetScalar(context, -0f); Operand res = EmitFloatAbs(context, GetVec(op.Rn), true, false);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, GetVec(op.Rn));
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res)); context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
} }
else /* if (op.Size == 1) */ else /* if (op.Size == 1) */
{ {
Operand mask = X86GetScalar(context, -0d); Operand res = EmitFloatAbs(context, GetVec(op.Rn), false, false);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, GetVec(op.Rn));
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res)); context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
} }
@ -299,11 +287,9 @@ namespace ARMeilleure.Instructions
int sizeF = op.Size & 1; int sizeF = op.Size & 1;
if (sizeF == 0) if (sizeF == 0)
{ {
Operand mask = X86GetAllElements(context, -0f); Operand res = EmitFloatAbs(context, GetVec(op.Rn), true, true);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnps, mask, GetVec(op.Rn));
if (op.RegisterSize == RegisterSize.Simd64) if (op.RegisterSize == RegisterSize.Simd64)
{ {
@ -314,9 +300,7 @@ namespace ARMeilleure.Instructions
} }
else /* if (sizeF == 1) */ else /* if (sizeF == 1) */
{ {
Operand mask = X86GetAllElements(context, -0d); Operand res = EmitFloatAbs(context, GetVec(op.Rn), false, true);
Operand res = context.AddIntrinsic(Intrinsic.X86Andnpd, mask, GetVec(op.Rn));
context.Copy(GetVec(op.Rd), res); context.Copy(GetVec(op.Rd), res);
} }
@ -3121,7 +3105,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res); context.Copy(GetVec(op.Rd), res);
} }
private static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF) public static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF)
{ {
IOpCodeSimd op = (IOpCodeSimd)context.CurrOp; IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;

View file

@ -15,7 +15,19 @@ namespace ARMeilleure.Instructions
{ {
public static void Vabs_S(ArmEmitterContext context) public static void Vabs_S(ArmEmitterContext context)
{ {
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1)); OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarUnaryOpSimd32(context, (m) =>
{
return EmitFloatAbs(context, m, (op.Size & 1) == 0, false);
});
}
else
{
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1));
}
} }
public static void Vabs_V(ArmEmitterContext context) public static void Vabs_V(ArmEmitterContext context)
@ -24,7 +36,17 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitVectorUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1)); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorUnaryOpSimd32(context, (m) =>
{
return EmitFloatAbs(context, m, (op.Size & 1) == 0, true);
});
}
else
{
EmitVectorUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1));
}
} }
else else
{ {
@ -41,7 +63,11 @@ namespace ARMeilleure.Instructions
public static void Vadd_S(ArmEmitterContext context) public static void Vadd_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarBinaryOpF32(context, Intrinsic.X86Addss, Intrinsic.X86Addsd);
}
else if (Optimizations.FastFP)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => context.Add(op1, op2)); EmitScalarBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
} }
@ -53,7 +79,11 @@ namespace ARMeilleure.Instructions
public static void Vadd_V(ArmEmitterContext context) public static void Vadd_V(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Addps, Intrinsic.X86Addpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => context.Add(op1, op2)); EmitVectorBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
} }
@ -65,7 +95,15 @@ namespace ARMeilleure.Instructions
public static void Vadd_I(ArmEmitterContext context) public static void Vadd_I(ArmEmitterContext context)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.Add(op1, op2)); if (Optimizations.UseSse2)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PaddInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.Add(op1, op2));
}
} }
public static void Vdup(ArmEmitterContext context) public static void Vdup(ArmEmitterContext context)
@ -126,38 +164,89 @@ namespace ARMeilleure.Instructions
} }
} }
private static (long, long) MaskHelperByteSequence(int start, int length, int startByte)
{
int end = start + length;
int b = startByte;
long result = 0;
long result2 = 0;
for (int i = 0; i < 8; i++)
{
result |= (long)((i >= end || i < start) ? 0x80 : b++) << (i * 8);
}
for (int i = 8; i < 16; i++)
{
result2 |= (long)((i >= end || i < start) ? 0x80 : b++) << ((i - 8) * 8);
}
return (result2, result);
}
public static void Vext(ArmEmitterContext context) public static void Vext(ArmEmitterContext context)
{ {
OpCode32SimdExt op = (OpCode32SimdExt)context.CurrOp; OpCode32SimdExt op = (OpCode32SimdExt)context.CurrOp;
int elems = op.GetBytesCount(); int elems = op.GetBytesCount();
int byteOff = op.Immediate; int byteOff = op.Immediate;
Operand res = GetVecA32(op.Qd); if (Optimizations.UseSsse3)
for (int index = 0; index < elems; index++)
{ {
Operand extract; EmitVectorBinaryOpSimd32(context, (n, m) =>
if (byteOff >= elems)
{ {
extract = EmitVectorExtractZx32(context, op.Qm, op.Im + (byteOff - elems), op.Size); // Writing low to high of d: start <imm> into n, overlap into m.
} // Then rotate n down by <imm>, m up by (elems)-imm.
else // Then OR them together for the result.
{
extract = EmitVectorExtractZx32(context, op.Qn, op.In + byteOff, op.Size);
}
byteOff++;
res = EmitVectorInsert(context, res, extract, op.Id + index, op.Size); (long nMaskHigh, long nMaskLow) = MaskHelperByteSequence(0, elems - byteOff, byteOff);
(long mMaskHigh, long mMaskLow) = MaskHelperByteSequence(elems - byteOff, byteOff, 0);
Operand nMask, mMask;
if (!op.Q)
{
// Do the same operation to the bytes in the top doubleword too, as our target could be in either.
nMaskHigh = nMaskLow + 0x0808080808080808L;
mMaskHigh = mMaskLow + 0x0808080808080808L;
}
nMask = X86GetElements(context, nMaskHigh, nMaskLow);
mMask = X86GetElements(context, mMaskHigh, mMaskLow);
Operand nPart = context.AddIntrinsic(Intrinsic.X86Pshufb, n, nMask);
Operand mPart = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mMask);
return context.AddIntrinsic(Intrinsic.X86Por, nPart, mPart);
});
} }
else
{
Operand res = GetVecA32(op.Qd);
context.Copy(GetVecA32(op.Qd), res); for (int index = 0; index < elems; index++)
{
Operand extract;
if (byteOff >= elems)
{
extract = EmitVectorExtractZx32(context, op.Qm, op.Im + (byteOff - elems), op.Size);
}
else
{
extract = EmitVectorExtractZx32(context, op.Qn, op.In + byteOff, op.Size);
}
byteOff++;
res = EmitVectorInsert(context, res, extract, op.Id + index, op.Size);
}
context.Copy(GetVecA32(op.Qd), res);
}
} }
public static void Vmov_S(ArmEmitterContext context) public static void Vmov_S(ArmEmitterContext context)
{ {
EmitScalarUnaryOpF32(context, (op1) => op1); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarUnaryOpF32(context, 0, 0);
}
else
{
EmitScalarUnaryOpF32(context, (op1) => op1);
}
} }
public static void Vmovn(ArmEmitterContext context) public static void Vmovn(ArmEmitterContext context)
@ -167,17 +256,83 @@ namespace ARMeilleure.Instructions
public static void Vneg_S(ArmEmitterContext context) public static void Vneg_S(ArmEmitterContext context)
{ {
EmitScalarUnaryOpF32(context, (op1) => context.Negate(op1)); OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
if (Optimizations.UseSse2)
{
EmitScalarUnaryOpSimd32(context, (m) =>
{
if ((op.Size & 1) == 0)
{
Operand mask = X86GetScalar(context, -0f);
return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
}
else
{
Operand mask = X86GetScalar(context, -0d);
return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
}
});
}
else
{
EmitScalarUnaryOpF32(context, (op1) => context.Negate(op1));
}
} }
public static void Vnmul_S(ArmEmitterContext context) public static void Vnmul_S(ArmEmitterContext context)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => context.Negate(context.Multiply(op1, op2))); OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
if (Optimizations.UseSse2)
{
EmitScalarBinaryOpSimd32(context, (n, m) =>
{
if ((op.Size & 1) == 0)
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
Operand mask = X86GetScalar(context, -0f);
return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
}
else
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
Operand mask = X86GetScalar(context, -0d);
return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
}
});
}
else
{
EmitScalarBinaryOpF32(context, (op1, op2) => context.Negate(context.Multiply(op1, op2)));
}
} }
public static void Vnmla_S(ArmEmitterContext context) public static void Vnmla_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarTernaryOpSimd32(context, (d, n, m) =>
{
if ((op.Size & 1) == 0)
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
res = context.AddIntrinsic(Intrinsic.X86Addss, d, res);
Operand mask = X86GetScalar(context, -0f);
return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
}
else
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
res = context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
Operand mask = X86GetScalar(context, -0d);
return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
}
});
}
else if (Optimizations.FastFP)
{ {
EmitScalarTernaryOpF32(context, (op1, op2, op3) => EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
{ {
@ -195,7 +350,30 @@ namespace ARMeilleure.Instructions
public static void Vnmls_S(ArmEmitterContext context) public static void Vnmls_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarTernaryOpSimd32(context, (d, n, m) =>
{
if ((op.Size & 1) == 0)
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
Operand mask = X86GetScalar(context, -0f);
d = context.AddIntrinsic(Intrinsic.X86Xorps, mask, d);
return context.AddIntrinsic(Intrinsic.X86Addss, d, res);
}
else
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
Operand mask = X86GetScalar(context, -0d);
d = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
return context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
}
});
}
else if (Optimizations.FastFP)
{ {
EmitScalarTernaryOpF32(context, (op1, op2, op3) => EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
{ {
@ -213,9 +391,30 @@ namespace ARMeilleure.Instructions
public static void Vneg_V(ArmEmitterContext context) public static void Vneg_V(ArmEmitterContext context)
{ {
if ((context.CurrOp as OpCode32Simd).F) OpCode32Simd op = (OpCode32Simd)context.CurrOp;
if (op.F)
{ {
EmitVectorUnaryOpF32(context, (op1) => context.Negate(op1)); if (Optimizations.UseSse2)
{
EmitVectorUnaryOpSimd32(context, (m) =>
{
if ((op.Size & 1) == 0)
{
Operand mask = X86GetScalar(context, -0f);
return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
}
else
{
Operand mask = X86GetScalar(context, -0d);
return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
}
});
}
else
{
EmitVectorUnaryOpF32(context, (op1) => context.Negate(op1));
}
} }
else else
{ {
@ -225,7 +424,11 @@ namespace ARMeilleure.Instructions
public static void Vdiv_S(ArmEmitterContext context) public static void Vdiv_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarBinaryOpF32(context, Intrinsic.X86Divss, Intrinsic.X86Divsd);
}
else if (Optimizations.FastFP)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => context.Divide(op1, op2)); EmitScalarBinaryOpF32(context, (op1, op2) => context.Divide(op1, op2));
} }
@ -240,69 +443,145 @@ namespace ARMeilleure.Instructions
public static void Vmaxnm_S(ArmEmitterContext context) public static void Vmaxnm_S(ArmEmitterContext context)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse41)
{
EmitSse41MaxMinNumOpF32(context, true, true);
}
else
{
EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2));
}
} }
public static void Vmaxnm_V(ArmEmitterContext context) public static void Vmaxnm_V(ArmEmitterContext context)
{ {
EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxNumFpscr, SoftFloat64.FPMaxNumFpscr, op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse41)
{
EmitSse41MaxMinNumOpF32(context, true, false);
}
else
{
EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxNumFpscr, SoftFloat64.FPMaxNumFpscr, op1, op2));
}
} }
public static void Vminnm_S(ArmEmitterContext context) public static void Vminnm_S(ArmEmitterContext context)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse41)
{
EmitSse41MaxMinNumOpF32(context, false, true);
}
else
{
EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2));
}
} }
public static void Vminnm_V(ArmEmitterContext context) public static void Vminnm_V(ArmEmitterContext context)
{ {
EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinNumFpscr, SoftFloat64.FPMinNumFpscr, op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse41)
{
EmitSse41MaxMinNumOpF32(context, false, false);
}
else
{
EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinNumFpscr, SoftFloat64.FPMinNumFpscr, op1, op2));
}
} }
public static void Vmax_V(ArmEmitterContext context) public static void Vmax_V(ArmEmitterContext context)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => if (Optimizations.FastFP && Optimizations.UseSse2)
{ {
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxFpscr, SoftFloat64.FPMaxFpscr, op1, op2); EmitVectorBinaryOpF32(context, Intrinsic.X86Maxps, Intrinsic.X86Maxpd);
}); }
else
{
EmitVectorBinaryOpF32(context, (op1, op2) =>
{
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxFpscr, SoftFloat64.FPMaxFpscr, op1, op2);
});
}
} }
public static void Vmax_I(ArmEmitterContext context) public static void Vmax_I(ArmEmitterContext context)
{ {
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp; OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
if (op.U) if (op.U)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreaterUI(op1, op2), op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxuInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreaterUI(op1, op2), op1, op2));
}
} }
else else
{ {
EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreater(op1, op2), op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxsInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreater(op1, op2), op1, op2));
}
} }
} }
public static void Vmin_V(ArmEmitterContext context) public static void Vmin_V(ArmEmitterContext context)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => if (Optimizations.FastFP && Optimizations.UseSse2)
{ {
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinFpscr, SoftFloat64.FPMinFpscr, op1, op2); EmitVectorBinaryOpF32(context, Intrinsic.X86Minps, Intrinsic.X86Minpd);
}); }
else
{
EmitVectorBinaryOpF32(context, (op1, op2) =>
{
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinFpscr, SoftFloat64.FPMinFpscr, op1, op2);
});
}
} }
public static void Vmin_I(ArmEmitterContext context) public static void Vmin_I(ArmEmitterContext context)
{ {
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp; OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
if (op.U) if (op.U)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLessUI(op1, op2), op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminuInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLessUI(op1, op2), op1, op2));
}
} }
else else
{ {
EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLess(op1, op2), op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminsInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLess(op1, op2), op1, op2));
}
} }
} }
public static void Vmul_S(ArmEmitterContext context) public static void Vmul_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarBinaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd);
}
else if (Optimizations.FastFP)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2)); EmitScalarBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
} }
@ -317,7 +596,11 @@ namespace ARMeilleure.Instructions
public static void Vmul_V(ArmEmitterContext context) public static void Vmul_V(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2)); EmitVectorBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
} }
@ -342,7 +625,11 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorByScalarOpF32(context, (op1, op2) => context.Multiply(op1, op2)); EmitVectorByScalarOpF32(context, (op1, op2) => context.Multiply(op1, op2));
} }
@ -359,7 +646,11 @@ namespace ARMeilleure.Instructions
public static void Vmla_S(ArmEmitterContext context) public static void Vmla_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Addss, Intrinsic.X86Addsd);
}
else if (Optimizations.FastFP)
{ {
EmitScalarTernaryOpF32(context, (op1, op2, op3) => EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
{ {
@ -377,7 +668,11 @@ namespace ARMeilleure.Instructions
public static void Vmla_V(ArmEmitterContext context) public static void Vmla_V(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3))); EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
} }
@ -401,7 +696,11 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3))); EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
} }
@ -418,7 +717,11 @@ namespace ARMeilleure.Instructions
public static void Vmls_S(ArmEmitterContext context) public static void Vmls_S(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Subss, Intrinsic.X86Subsd);
}
else if (Optimizations.FastFP)
{ {
EmitScalarTernaryOpF32(context, (op1, op2, op3) => EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
{ {
@ -436,7 +739,11 @@ namespace ARMeilleure.Instructions
public static void Vmls_V(ArmEmitterContext context) public static void Vmls_V(ArmEmitterContext context)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3))); EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
} }
@ -460,7 +767,11 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
if (Optimizations.FastFP) if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
}
else if (Optimizations.FastFP)
{ {
EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3))); EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
} }
@ -477,58 +788,115 @@ namespace ARMeilleure.Instructions
public static void Vpadd_V(ArmEmitterContext context) public static void Vpadd_V(ArmEmitterContext context)
{ {
EmitVectorPairwiseOpF32(context, (op1, op2) => context.Add(op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2VectorPairwiseOpF32(context, Intrinsic.X86Addps);
}
else
{
EmitVectorPairwiseOpF32(context, (op1, op2) => context.Add(op1, op2));
}
} }
public static void Vpadd_I(ArmEmitterContext context) public static void Vpadd_I(ArmEmitterContext context)
{ {
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp; OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorPairwiseOpI32(context, (op1, op2) => context.Add(op1, op2), !op.U); if (Optimizations.UseSsse3)
{
EmitSsse3VectorPairwiseOp32(context, X86PaddInstruction);
}
else
{
EmitVectorPairwiseOpI32(context, (op1, op2) => context.Add(op1, op2), !op.U);
}
} }
public static void Vrev(ArmEmitterContext context) public static void Vrev(ArmEmitterContext context)
{ {
OpCode32Simd op = (OpCode32Simd)context.CurrOp; OpCode32SimdRev op = (OpCode32SimdRev)context.CurrOp;
EmitVectorUnaryOpZx32(context, (op1) => if (Optimizations.UseSsse3)
{ {
switch (op.Opc) EmitVectorUnaryOpSimd32(context, (op1) =>
{ {
case 0: Operand mask;
switch (op.Size) // Swap bytes. switch (op.Size)
{ {
default: case 3:
return op1; // Rev64
case 1: switch (op.Opc)
return InstEmitAluHelper.EmitReverseBytes16_32Op(context, op1); {
case 2: case 0:
case 3: mask = X86GetElements(context, 0x08090a0b0c0d0e0fL, 0x0001020304050607L);
return context.ByteSwap(op1); return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
} case 1:
case 1: mask = X86GetElements(context, 0x09080b0a0d0c0f0eL, 0x0100030205040706L);
switch (op.Size) return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
{ case 2:
default: return context.AddIntrinsic(Intrinsic.X86Shufps, op1, op1, Const(1 | (0 << 2) | (3 << 4) | (2 << 6)));
return op1; }
case 2: break;
return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff0000)), Const(16)), case 2:
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x0000ffff)), Const(16))); // Rev32
case 3: switch (op.Opc)
return context.BitwiseOr( {
context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff000000000000ul)), Const(48)), case 0:
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x000000000000fffful)), Const(48))), mask = X86GetElements(context, 0x0c0d0e0f_08090a0bL, 0x04050607_00010203L);
context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0x0000ffff00000000ul)), Const(16)), return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000ffff0000ul)), Const(16)))); case 1:
} mask = X86GetElements(context, 0x0d0c0f0e_09080b0aL, 0x05040706_01000302L);
case 2: return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
// Swap upper and lower halves. }
return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffffffff00000000ul)), Const(32)), break;
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000fffffffful)), Const(32))); case 1:
} // Rev16
mask = X86GetElements(context, 0x0e0f_0c0d_0a0b_0809L, 0x_0607_0405_0203_0001L);
return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
}
return op1; throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
}); });
}
else
{
EmitVectorUnaryOpZx32(context, (op1) =>
{
switch (op.Opc)
{
case 0:
switch (op.Size) // Swap bytes.
{
case 1:
return InstEmitAluHelper.EmitReverseBytes16_32Op(context, op1);
case 2:
case 3:
return context.ByteSwap(op1);
}
break;
case 1:
switch (op.Size)
{
case 2:
return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff0000)), Const(16)),
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x0000ffff)), Const(16)));
case 3:
return context.BitwiseOr(
context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff000000000000ul)), Const(48)),
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x000000000000fffful)), Const(48))),
context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0x0000ffff00000000ul)), Const(16)),
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000ffff0000ul)), Const(16))));
}
break;
case 2:
// Swap upper and lower halves.
return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffffffff00000000ul)), Const(32)),
context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000fffffffful)), Const(32)));
}
throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
});
}
} }
public static void Vrecpe(ArmEmitterContext context) public static void Vrecpe(ArmEmitterContext context)
@ -537,10 +905,19 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitVectorUnaryOpF32(context, (op1) => int sizeF = op.Size & 1;
if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
{ {
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRecipEstimateFpscr, SoftFloat64.FPRecipEstimateFpscr, op1); EmitVectorUnaryOpF32(context, Intrinsic.X86Rcpps, 0);
}); }
else
{
EmitVectorUnaryOpF32(context, (op1) =>
{
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRecipEstimateFpscr, SoftFloat64.FPRecipEstimateFpscr, op1);
});
}
} }
else else
{ {
@ -550,10 +927,39 @@ namespace ARMeilleure.Instructions
public static void Vrecps(ArmEmitterContext context) public static void Vrecps(ArmEmitterContext context)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => if (Optimizations.FastFP && Optimizations.UseSse2)
{ {
return EmitSoftFloatCall(context, SoftFloat32.FPRecipStep, SoftFloat64.FPRecipStep, op1, op2); OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
}); bool single = (op.Size & 1) == 0;
// (2 - (n*m))
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
if (single)
{
Operand maskTwo = X86GetAllElements(context, 2f);
Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
return context.AddIntrinsic(Intrinsic.X86Subps, maskTwo, res);
}
else
{
Operand maskTwo = X86GetAllElements(context, 2d);
Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
return context.AddIntrinsic(Intrinsic.X86Subpd, maskTwo, res);
}
});
}
else
{
EmitVectorBinaryOpF32(context, (op1, op2) =>
{
return EmitSoftFloatCall(context, SoftFloat32.FPRecipStep, SoftFloat64.FPRecipStep, op1, op2);
});
}
} }
public static void Vrsqrte(ArmEmitterContext context) public static void Vrsqrte(ArmEmitterContext context)
@ -562,10 +968,19 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitVectorUnaryOpF32(context, (op1) => int sizeF = op.Size & 1;
if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
{ {
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRSqrtEstimateFpscr, SoftFloat64.FPRSqrtEstimateFpscr, op1); EmitVectorUnaryOpF32(context, Intrinsic.X86Rsqrtps, 0);
}); }
else
{
EmitVectorUnaryOpF32(context, (op1) =>
{
return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRSqrtEstimateFpscr, SoftFloat64.FPRSqrtEstimateFpscr, op1);
});
}
} }
else else
{ {
@ -575,10 +990,43 @@ namespace ARMeilleure.Instructions
public static void Vrsqrts(ArmEmitterContext context) public static void Vrsqrts(ArmEmitterContext context)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => if (Optimizations.FastFP && Optimizations.UseSse2)
{ {
return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtStep, SoftFloat64.FPRSqrtStep, op1, op2); OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
}); bool single = (op.Size & 1) == 0;
// (3 - (n*m)) / 2
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
if (single)
{
Operand maskHalf = X86GetAllElements(context, 0.5f);
Operand maskThree = X86GetAllElements(context, 3f);
Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
res = context.AddIntrinsic(Intrinsic.X86Subps, maskThree, res);
return context.AddIntrinsic(Intrinsic.X86Mulps, maskHalf, res);
}
else
{
Operand maskHalf = X86GetAllElements(context, 0.5d);
Operand maskThree = X86GetAllElements(context, 3d);
Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
res = context.AddIntrinsic(Intrinsic.X86Subpd, maskThree, res);
return context.AddIntrinsic(Intrinsic.X86Mulpd, maskHalf, res);
}
});
}
else
{
EmitVectorBinaryOpF32(context, (op1, op2) =>
{
return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtStep, SoftFloat64.FPRSqrtStep, op1, op2);
});
}
} }
public static void Vsel(ArmEmitterContext context) public static void Vsel(ArmEmitterContext context)
@ -610,25 +1058,104 @@ namespace ARMeilleure.Instructions
public static void Vsqrt_S(ArmEmitterContext context) public static void Vsqrt_S(ArmEmitterContext context)
{ {
EmitScalarUnaryOpF32(context, (op1) => if (Optimizations.FastFP && Optimizations.UseSse2)
{ {
return EmitSoftFloatCall(context, SoftFloat32.FPSqrt, SoftFloat64.FPSqrt, op1); EmitScalarUnaryOpF32(context, Intrinsic.X86Sqrtss, Intrinsic.X86Sqrtsd);
}); }
else
{
EmitScalarUnaryOpF32(context, (op1) =>
{
return EmitSoftFloatCall(context, SoftFloat32.FPSqrt, SoftFloat64.FPSqrt, op1);
});
}
} }
public static void Vsub_S(ArmEmitterContext context) public static void Vsub_S(ArmEmitterContext context)
{ {
EmitScalarBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitScalarBinaryOpF32(context, Intrinsic.X86Subss, Intrinsic.X86Subsd);
}
else
{
EmitScalarBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
}
} }
public static void Vsub_V(ArmEmitterContext context) public static void Vsub_V(ArmEmitterContext context)
{ {
EmitVectorBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2)); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Subps, Intrinsic.X86Subpd);
}
else
{
EmitVectorBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
}
} }
public static void Vsub_I(ArmEmitterContext context) public static void Vsub_I(ArmEmitterContext context)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.Subtract(op1, op2)); if (Optimizations.UseSse2)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PsubInstruction[op.Size], op1, op2));
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.Subtract(op1, op2));
}
}
private static void EmitSse41MaxMinNumOpF32(ArmEmitterContext context, bool isMaxNum, bool scalar)
{
IOpCode32Simd op = (IOpCode32Simd)context.CurrOp;
Func<Operand, Operand, Operand> genericEmit = (n, m) =>
{
Operand nNum = context.Copy(n);
Operand mNum = context.Copy(m);
Operand nQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, nNum);
Operand mQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, mNum);
int sizeF = op.Size & 1;
if (sizeF == 0)
{
Operand negInfMask = X86GetAllElements(context, isMaxNum ? float.NegativeInfinity : float.PositiveInfinity);
Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnps, mQNaNMask, nQNaNMask);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnps, nQNaNMask, mQNaNMask);
nNum = context.AddIntrinsic(Intrinsic.X86Blendvps, nNum, negInfMask, nMask);
mNum = context.AddIntrinsic(Intrinsic.X86Blendvps, mNum, negInfMask, mMask);
return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxps : Intrinsic.X86Minps, nNum, mNum);
}
else /* if (sizeF == 1) */
{
Operand negInfMask = X86GetAllElements(context, isMaxNum ? double.NegativeInfinity : double.PositiveInfinity);
Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnpd, mQNaNMask, nQNaNMask);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnpd, nQNaNMask, mQNaNMask);
nNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, nNum, negInfMask, nMask);
mNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, mNum, negInfMask, mMask);
return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxpd : Intrinsic.X86Minpd, nNum, mNum);
}
};
if (scalar)
{
EmitScalarBinaryOpSimd32(context, genericEmit);
}
else
{
EmitVectorBinaryOpSimd32(context, genericEmit);
}
} }
} }
} }

View file

@ -5,6 +5,7 @@ using ARMeilleure.Translation;
using System; using System;
using static ARMeilleure.Instructions.InstEmitHelper; using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper32; using static ARMeilleure.Instructions.InstEmitSimdHelper32;
using static ARMeilleure.IntermediateRepresentation.OperandHelper; using static ARMeilleure.IntermediateRepresentation.OperandHelper;
@ -16,7 +17,14 @@ namespace ARMeilleure.Instructions
{ {
public static void Vceq_V(ArmEmitterContext context) public static void Vceq_V(ArmEmitterContext context)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, false); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.Equal, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, false);
}
} }
public static void Vceq_I(ArmEmitterContext context) public static void Vceq_I(ArmEmitterContext context)
@ -30,7 +38,14 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, true); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.Equal, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareEQFpscr, SoftFloat64.FPCompareEQFpscr, true);
}
} }
else else
{ {
@ -40,7 +55,14 @@ namespace ARMeilleure.Instructions
public static void Vcge_V(ArmEmitterContext context) public static void Vcge_V(ArmEmitterContext context)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, false); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThanOrEqual, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, false);
}
} }
public static void Vcge_I(ArmEmitterContext context) public static void Vcge_I(ArmEmitterContext context)
@ -56,7 +78,14 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, true); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThanOrEqual, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGEFpscr, SoftFloat64.FPCompareGEFpscr, true);
}
} }
else else
{ {
@ -66,7 +95,14 @@ namespace ARMeilleure.Instructions
public static void Vcgt_V(ArmEmitterContext context) public static void Vcgt_V(ArmEmitterContext context)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, false); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThan, false);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, false);
}
} }
public static void Vcgt_I(ArmEmitterContext context) public static void Vcgt_I(ArmEmitterContext context)
@ -82,7 +118,14 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, true); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.GreaterThan, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareGTFpscr, SoftFloat64.FPCompareGTFpscr, true);
}
} }
else else
{ {
@ -96,7 +139,14 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareLEFpscr, SoftFloat64.FPCompareLEFpscr, true); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.LessThanOrEqual, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLEFpscr, SoftFloat64.FPCompareLEFpscr, true);
}
} }
else else
{ {
@ -110,7 +160,14 @@ namespace ARMeilleure.Instructions
if (op.F) if (op.F)
{ {
EmitCmpOpF32(context, SoftFloat32.FPCompareLTFpscr, SoftFloat64.FPCompareLTFpscr, true); if (Optimizations.FastFP && Optimizations.UseSse2)
{
EmitSse2CmpOpF32(context, CmpCondition.LessThan, true);
}
else
{
EmitCmpOpF32(context, SoftFloat32.FPCompareLTFpscr, SoftFloat64.FPCompareLTFpscr, true);
}
} }
else else
{ {
@ -224,23 +281,77 @@ namespace ARMeilleure.Instructions
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp; OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool cmpWithZero = (op.Opc & 2) != 0; bool cmpWithZero = (op.Opc & 2) != 0;
int sizeF = op.Size & 1;
if (Optimizations.FastFP && (signalNaNs ? Optimizations.UseAvx : Optimizations.UseSse2))
{ {
int fSize = op.Size & 1; CmpCondition cmpOrdered = signalNaNs ? CmpCondition.OrderedS : CmpCondition.OrderedQ;
OperandType type = fSize != 0 ? OperandType.FP64 : OperandType.FP32;
bool doubleSize = sizeF != 0;
int shift = doubleSize ? 1 : 2;
Operand m = GetVecA32(op.Vm >> shift);
Operand n = GetVecA32(op.Vd >> shift);
n = EmitSwapScalar(context, n, op.Vd, doubleSize);
m = cmpWithZero ? context.VectorZero() : EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand lblNaN = Label();
Operand lblEnd = Label();
if (!doubleSize)
{
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpss, n, m, Const((int)cmpOrdered));
Operand isOrdered = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, ordMask);
context.BranchIfFalse(lblNaN, isOrdered);
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comissge, n, m);
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisseq, n, m);
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisslt, n, m);
EmitSetFPSCRFlags(context, nf, zf, cf, Const(0));
}
else
{
Operand ordMask = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, m, Const((int)cmpOrdered));
Operand isOrdered = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, ordMask);
context.BranchIfFalse(lblNaN, isOrdered);
Operand cf = context.AddIntrinsicInt(Intrinsic.X86Comisdge, n, m);
Operand zf = context.AddIntrinsicInt(Intrinsic.X86Comisdeq, n, m);
Operand nf = context.AddIntrinsicInt(Intrinsic.X86Comisdlt, n, m);
EmitSetFPSCRFlags(context, nf, zf, cf, Const(0));
}
context.Branch(lblEnd);
context.MarkLabel(lblNaN);
EmitSetFPSCRFlags(context, Const(3));
context.MarkLabel(lblEnd);
}
else
{
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
Operand ne = ExtractScalar(context, type, op.Vd); Operand ne = ExtractScalar(context, type, op.Vd);
Operand me; Operand me;
if (cmpWithZero) if (cmpWithZero)
{ {
me = fSize == 0 ? ConstF(0f) : ConstF(0d); me = sizeF == 0 ? ConstF(0f) : ConstF(0d);
} }
else else
{ {
me = ExtractScalar(context, type, op.Vm); me = ExtractScalar(context, type, op.Vm);
} }
Delegate dlg = fSize != 0 Delegate dlg = sizeF != 0
? (Delegate)new _S32_F64_F64_Bool(SoftFloat64.FPCompare) ? (Delegate)new _S32_F64_F64_Bool(SoftFloat64.FPCompare)
: (Delegate)new _S32_F32_F32_Bool(SoftFloat32.FPCompare); : (Delegate)new _S32_F32_F32_Bool(SoftFloat32.FPCompare);
@ -269,5 +380,36 @@ namespace ARMeilleure.Instructions
SetFpFlag(context, FPState.ZFlag, Extract(nzcv, 2)); SetFpFlag(context, FPState.ZFlag, Extract(nzcv, 2));
SetFpFlag(context, FPState.NFlag, Extract(nzcv, 3)); SetFpFlag(context, FPState.NFlag, Extract(nzcv, 3));
} }
private static void EmitSetFPSCRFlags(ArmEmitterContext context, Operand n, Operand z, Operand c, Operand v)
{
SetFpFlag(context, FPState.VFlag, v);
SetFpFlag(context, FPState.CFlag, c);
SetFpFlag(context, FPState.ZFlag, z);
SetFpFlag(context, FPState.NFlag, n);
}
private static void EmitSse2CmpOpF32(ArmEmitterContext context, CmpCondition cond, bool zero)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
int sizeF = op.Size & 1;
Intrinsic inst = (sizeF == 0) ? Intrinsic.X86Cmpps : Intrinsic.X86Cmppd;
if (zero)
{
EmitVectorUnaryOpSimd32(context, (m) =>
{
return context.AddIntrinsic(inst, m, context.VectorZero(), Const((int)cond));
});
}
else
{
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
return context.AddIntrinsic(inst, n, m, Const((int)cond));
});
}
}
} }
} }

View file

@ -869,7 +869,7 @@ namespace ARMeilleure.Instructions
} }
} }
private static Operand EmitSse2CvtDoubleToInt64OpF(ArmEmitterContext context, Operand opF, bool scalar) public static Operand EmitSse2CvtDoubleToInt64OpF(ArmEmitterContext context, Operand opF, bool scalar)
{ {
Debug.Assert(opF.Type == OperandType.V128); Debug.Assert(opF.Type == OperandType.V128);

View file

@ -1,9 +1,11 @@
using ARMeilleure.Decoders; using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation; using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.State;
using ARMeilleure.Translation; using ARMeilleure.Translation;
using System; using System;
using System.Diagnostics; using System.Diagnostics;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper; using static ARMeilleure.Instructions.InstEmitSimdHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper32; using static ARMeilleure.Instructions.InstEmitSimdHelper32;
using static ARMeilleure.IntermediateRepresentation.OperandHelper; using static ARMeilleure.IntermediateRepresentation.OperandHelper;
@ -63,22 +65,57 @@ namespace ARMeilleure.Instructions
if (toInteger) if (toInteger)
{ {
EmitVectorUnaryOpF32(context, (op1) => if (Optimizations.UseSse41)
{ {
return EmitSaturateFloatToInt(context, op1, unsigned); EmitSse41ConvertVector32(context, FPRoundingMode.TowardsZero, !unsigned);
});
}
else
{
if (unsigned)
{
EmitVectorUnaryOpZx32(context, (op1) => EmitFPConvert(context, op1, floatSize, false));
} }
else else
{ {
EmitVectorUnaryOpSx32(context, (op1) => EmitFPConvert(context, op1, floatSize, true)); EmitVectorUnaryOpF32(context, (op1) =>
{
return EmitSaturateFloatToInt(context, op1, unsigned);
});
} }
} }
else
{
if (Optimizations.UseSse2)
{
EmitVectorUnaryOpSimd32(context, (n) =>
{
if (unsigned)
{
Operand mask = X86GetAllElements(context, 0x47800000);
Operand res = context.AddIntrinsic(Intrinsic.X86Psrld, n, Const(16));
res = context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, res);
res = context.AddIntrinsic(Intrinsic.X86Mulps, res, mask);
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pslld, n, Const(16));
res2 = context.AddIntrinsic(Intrinsic.X86Psrld, res2, Const(16));
res2 = context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, res2);
return context.AddIntrinsic(Intrinsic.X86Addps, res, res2);
}
else
{
return context.AddIntrinsic(Intrinsic.X86Cvtdq2ps, n);
}
});
}
else
{
if (unsigned)
{
EmitVectorUnaryOpZx32(context, (op1) => EmitFPConvert(context, op1, floatSize, false));
}
else
{
EmitVectorUnaryOpSx32(context, (op1) => EmitFPConvert(context, op1, floatSize, true));
}
}
}
} }
@ -123,44 +160,51 @@ namespace ARMeilleure.Instructions
bool unsigned = (op.Opc2 & 1) == 0; bool unsigned = (op.Opc2 & 1) == 0;
bool roundWithFpscr = op.Opc != 1; bool roundWithFpscr = op.Opc != 1;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm); if (!roundWithFpscr && Optimizations.UseSse41)
Operand asInteger;
// TODO: Fast Path.
if (roundWithFpscr)
{ {
// These need to get the FPSCR value, so it's worth noting we'd need to do a c# call at some point. EmitSse41ConvertInt32(context, FPRoundingMode.TowardsZero, !unsigned);
if (floatSize == OperandType.FP64) }
else
{
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
Operand asInteger;
// TODO: Fast Path.
if (roundWithFpscr)
{ {
if (unsigned) if (floatSize == OperandType.FP64)
{ {
asInteger = context.Call(new _U32_F64(SoftFallback.DoubleToUInt32), toConvert); if (unsigned)
{
asInteger = context.Call(new _U32_F64(SoftFallback.DoubleToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F64(SoftFallback.DoubleToInt32), toConvert);
}
} }
else else
{ {
asInteger = context.Call(new _S32_F64(SoftFallback.DoubleToInt32), toConvert); if (unsigned)
{
asInteger = context.Call(new _U32_F32(SoftFallback.FloatToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F32(SoftFallback.FloatToInt32), toConvert);
}
} }
} }
else else
{ {
if (unsigned) // Round towards zero.
{ asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
asInteger = context.Call(new _U32_F32(SoftFallback.FloatToUInt32), toConvert);
}
else
{
asInteger = context.Call(new _S32_F32(SoftFallback.FloatToInt32), toConvert);
}
} }
}
else
{
// Round towards zero.
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
}
InsertScalar(context, op.Vd, asInteger); InsertScalar(context, op.Vd, asInteger);
}
} }
else else
{ {
@ -192,6 +236,26 @@ namespace ARMeilleure.Instructions
return context.Call(dlg, n, Const((int)roundMode)); return context.Call(dlg, n, Const((int)roundMode));
} }
private static FPRoundingMode RMToRoundMode(int rm)
{
FPRoundingMode roundMode;
switch (rm)
{
case 0b01:
roundMode = FPRoundingMode.ToNearest;
break;
case 0b10:
roundMode = FPRoundingMode.TowardsPlusInfinity;
break;
case 0b11:
roundMode = FPRoundingMode.TowardsMinusInfinity;
break;
default:
throw new ArgumentOutOfRangeException(nameof(rm));
}
return roundMode;
}
public static void Vcvt_R(ArmEmitterContext context) public static void Vcvt_R(ArmEmitterContext context)
{ {
OpCode32SimdCvtFI op = (OpCode32SimdCvtFI)context.CurrOp; OpCode32SimdCvtFI op = (OpCode32SimdCvtFI)context.CurrOp;
@ -199,30 +263,38 @@ namespace ARMeilleure.Instructions
OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32; OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32;
bool unsigned = (op.Opc & 1) == 0; bool unsigned = (op.Opc & 1) == 0;
int rm = op.Opc2 & 3;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm); if (Optimizations.UseSse41 && rm != 0b00)
switch (op.Opc2)
{ {
case 0b00: // Away EmitSse41ConvertInt32(context, RMToRoundMode(rm), !unsigned);
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
} }
else
{
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
Operand asInteger; switch (rm)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned); Operand asInteger;
InsertScalar(context, op.Vd, asInteger); asInteger = EmitSaturateFloatToInt(context, toConvert, unsigned);
InsertScalar(context, op.Vd, asInteger);
}
} }
public static void Vrint_RM(ArmEmitterContext context) public static void Vrint_RM(ArmEmitterContext context)
@ -231,30 +303,59 @@ namespace ARMeilleure.Instructions
OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32; OperandType floatSize = op.RegisterSize == RegisterSize.Int64 ? OperandType.FP64 : OperandType.FP32;
Operand toConvert = ExtractScalar(context, floatSize, op.Vm); int rm = op.Opc2 & 3;
switch (op.Opc2) if (Optimizations.UseSse2 && rm != 0b00)
{ {
case 0b00: // Away EmitScalarUnaryOpSimd32(context, (m) =>
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert); {
break; Intrinsic inst = (op.Size & 1) == 0 ? Intrinsic.X86Roundss : Intrinsic.X86Roundsd;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
InsertScalar(context, op.Vd, toConvert); FPRoundingMode roundMode = RMToRoundMode(rm);
return context.AddIntrinsic(inst, m, Const(X86GetRoundControl(roundMode)));
});
}
else
{
Operand toConvert = ExtractScalar(context, floatSize, op.Vm);
switch (rm)
{
case 0b00: // Away
toConvert = EmitRoundMathCall(context, MidpointRounding.AwayFromZero, toConvert);
break;
case 0b01: // Nearest
toConvert = EmitRoundMathCall(context, MidpointRounding.ToEven, toConvert);
break;
case 0b10: // Towards positive infinity
toConvert = EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, toConvert);
break;
case 0b11: // Towards negative infinity
toConvert = EmitUnaryMathCall(context, MathF.Floor, Math.Floor, toConvert);
break;
}
InsertScalar(context, op.Vd, toConvert);
}
} }
public static void Vrint_Z(ArmEmitterContext context) public static void Vrint_Z(ArmEmitterContext context)
{ {
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1)); IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;
if (Optimizations.UseSse2)
{
EmitScalarUnaryOpSimd32(context, (m) =>
{
Intrinsic inst = (op.Size & 1) == 0 ? Intrinsic.X86Roundss : Intrinsic.X86Roundsd;
return context.AddIntrinsic(inst, m, Const(X86GetRoundControl(FPRoundingMode.TowardsZero)));
});
}
else
{
EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1));
}
} }
private static Operand EmitFPConvert(ArmEmitterContext context, Operand value, OperandType type, bool signed) private static Operand EmitFPConvert(ArmEmitterContext context, Operand value, OperandType type, bool signed)
@ -270,5 +371,211 @@ namespace ARMeilleure.Instructions
return context.ConvertToFPUI(type, value); return context.ConvertToFPUI(type, value);
} }
} }
private static void EmitSse41ConvertInt32(ArmEmitterContext context, FPRoundingMode roundMode, bool signed)
{
// A port of the similar round function in InstEmitSimdCvt.
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vm >> shift);
n = EmitSwapScalar(context, n, op.Vm, doubleSize);
if (!doubleSize)
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpss, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundss, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
Operand nIntOrLong2 = null;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
int fpMaxVal = 0x4F000000; // 2.14748365E9f (2147483648)
Operand fpMaxValMask = X86GetScalar(context, fpMaxVal);
Operand nIntOrLong = context.AddIntrinsicInt(Intrinsic.X86Cvtss2si, nRes);
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subss, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nIntOrLong2 = context.AddIntrinsicInt(Intrinsic.X86Cvtss2si, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpss, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
Operand nInt = context.AddIntrinsicInt(Intrinsic.X86Cvtsi2si, nRes);
Operand dRes;
if (signed)
{
dRes = context.BitwiseExclusiveOr(nIntOrLong, nInt);
}
else
{
dRes = context.BitwiseExclusiveOr(nIntOrLong2, nInt);
dRes = context.Add(dRes, nIntOrLong);
}
InsertScalar(context, op.Vd, dRes);
}
else
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpsd, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundsd, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
Operand nIntOrLong2 = null;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
long fpMaxVal = 0x41E0000000000000L; // 2147483648.0000000d (2147483648)
Operand fpMaxValMask = X86GetScalar(context, fpMaxVal);
Operand nIntOrLong = context.AddIntrinsicInt(Intrinsic.X86Cvtsd2si, nRes);
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subsd, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nIntOrLong2 = context.AddIntrinsicInt(Intrinsic.X86Cvtsd2si, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpsd, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
Operand nLong = context.AddIntrinsicLong(Intrinsic.X86Cvtsi2si, nRes);
nLong = context.ConvertI64ToI32(nLong);
Operand dRes;
if (signed)
{
dRes = context.BitwiseExclusiveOr(nIntOrLong, nLong);
}
else
{
dRes = context.BitwiseExclusiveOr(nIntOrLong2, nLong);
dRes = context.Add(dRes, nIntOrLong);
}
InsertScalar(context, op.Vd, dRes);
}
}
private static void EmitSse41ConvertVector32(ArmEmitterContext context, FPRoundingMode roundMode, bool signed)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
EmitVectorUnaryOpSimd32(context, (n) =>
{
int sizeF = op.Size & 1;
if (sizeF == 0)
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmpps, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundps, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
Operand fpMaxValMask = X86GetAllElements(context, 0x4F000000); // 2.14748365E9f (2147483648)
Operand nInt = context.AddIntrinsic(Intrinsic.X86Cvtps2dq, nRes);
Operand nInt2 = null;
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subps, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nInt2 = context.AddIntrinsic(Intrinsic.X86Cvtps2dq, nRes);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmpps, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
if (signed)
{
return context.AddIntrinsic(Intrinsic.X86Pxor, nInt, nRes);
}
else
{
Operand dRes = context.AddIntrinsic(Intrinsic.X86Pxor, nInt2, nRes);
return context.AddIntrinsic(Intrinsic.X86Paddd, dRes, nInt);
}
}
else /* if (sizeF == 1) */
{
Operand nRes = context.AddIntrinsic(Intrinsic.X86Cmppd, n, n, Const((int)CmpCondition.OrderedQ));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, n);
nRes = context.AddIntrinsic(Intrinsic.X86Roundpd, nRes, Const(X86GetRoundControl(roundMode)));
Operand zero = context.VectorZero();
Operand nCmp;
if (!signed)
{
nCmp = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
}
Operand fpMaxValMask = X86GetAllElements(context, 0x43E0000000000000L); // 9.2233720368547760E18d (9223372036854775808)
Operand nLong = InstEmit.EmitSse2CvtDoubleToInt64OpF(context, nRes, false);
Operand nLong2 = null;
if (!signed)
{
nRes = context.AddIntrinsic(Intrinsic.X86Subpd, nRes, fpMaxValMask);
nCmp = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, zero, Const((int)CmpCondition.NotLessThanOrEqual));
nRes = context.AddIntrinsic(Intrinsic.X86Pand, nRes, nCmp);
nLong2 = InstEmit.EmitSse2CvtDoubleToInt64OpF(context, nRes, false);
}
nRes = context.AddIntrinsic(Intrinsic.X86Cmppd, nRes, fpMaxValMask, Const((int)CmpCondition.NotLessThan));
if (signed)
{
return context.AddIntrinsic(Intrinsic.X86Pxor, nLong, nRes);
}
else
{
Operand dRes = context.AddIntrinsic(Intrinsic.X86Pxor, nLong2, nRes);
return context.AddIntrinsic(Intrinsic.X86Paddq, dRes, nLong);
}
}
});
}
} }
} }

View file

@ -31,7 +31,7 @@ namespace ARMeilleure.Instructions
15L << 56 | 14L << 48 | 13L << 40 | 12L << 32 | 07L << 24 | 06L << 16 | 05L << 8 | 04L << 0 // S 15L << 56 | 14L << 48 | 13L << 40 | 12L << 32 | 07L << 24 | 06L << 16 | 05L << 8 | 04L << 0 // S
}; };
private static readonly long _zeroMask = 128L << 56 | 128L << 48 | 128L << 40 | 128L << 32 | 128L << 24 | 128L << 16 | 128L << 8 | 128L << 0; public static readonly long ZeroMask = 128L << 56 | 128L << 48 | 128L << 40 | 128L << 32 | 128L << 24 | 128L << 16 | 128L << 8 | 128L << 0;
#endregion #endregion
#region "X86 SSE Intrinsics" #region "X86 SSE Intrinsics"
@ -1026,8 +1026,8 @@ namespace ARMeilleure.Instructions
if (op.RegisterSize == RegisterSize.Simd64) if (op.RegisterSize == RegisterSize.Simd64)
{ {
Operand zeroEvenMask = X86GetElements(context, _zeroMask, EvenMasks[op.Size]); Operand zeroEvenMask = X86GetElements(context, ZeroMask, EvenMasks[op.Size]);
Operand zeroOddMask = X86GetElements(context, _zeroMask, OddMasks [op.Size]); Operand zeroOddMask = X86GetElements(context, ZeroMask, OddMasks [op.Size]);
Operand mN = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m); // m:n Operand mN = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m); // m:n
@ -1467,6 +1467,21 @@ namespace ARMeilleure.Instructions
return context.Call(dlg, op1, op2); return context.Call(dlg, op1, op2);
} }
public static Operand EmitFloatAbs(ArmEmitterContext context, Operand value, bool single, bool vector)
{
Operand mask;
if (single)
{
mask = vector ? X86GetAllElements(context, -0f) : X86GetScalar(context, -0f);
}
else
{
mask = vector ? X86GetAllElements(context, -0d) : X86GetScalar(context, -0d);
}
return context.AddIntrinsic(single ? Intrinsic.X86Andnps : Intrinsic.X86Andnpd, mask, value);
}
public static Operand EmitVectorExtractSx(ArmEmitterContext context, int reg, int index, int size) public static Operand EmitVectorExtractSx(ArmEmitterContext context, int reg, int index, int size)
{ {
return EmitVectorExtract(context, reg, index, size, true); return EmitVectorExtract(context, reg, index, size, true);

View file

@ -473,6 +473,446 @@ namespace ARMeilleure.Instructions
context.Copy(GetVecA32(op.Qd), res); context.Copy(GetVecA32(op.Qd), res);
} }
// Intrinsic Helpers
public static Operand EmitMoveDoubleWordToSide(ArmEmitterContext context, Operand input, int originalV, int targetV)
{
Debug.Assert(input.Type == OperandType.V128);
int originalSide = originalV & 1;
int targetSide = targetV & 1;
if (originalSide == targetSide)
{
return input;
}
if (targetSide == 1)
{
return context.AddIntrinsic(Intrinsic.X86Movlhps, input, input); // Low to high.
}
else
{
return context.AddIntrinsic(Intrinsic.X86Movhlps, input, input); // High to low.
}
}
public static Operand EmitDoubleWordInsert(ArmEmitterContext context, Operand target, Operand value, int targetV)
{
Debug.Assert(target.Type == OperandType.V128 && value.Type == OperandType.V128);
int targetSide = targetV & 1;
int shuffleMask = 2;
if (targetSide == 1)
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, target, value, Const(shuffleMask));
}
else
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, value, target, Const(shuffleMask));
}
}
public static Operand EmitScalarInsert(ArmEmitterContext context, Operand target, Operand value, int reg, bool doubleWidth)
{
Debug.Assert(target.Type == OperandType.V128 && value.Type == OperandType.V128);
// Insert from index 0 in value to index in target.
int index = reg & (doubleWidth ? 1 : 3);
if (doubleWidth)
{
if (index == 1)
{
return context.AddIntrinsic(Intrinsic.X86Movlhps, target, value); // Low to high.
}
else
{
return context.AddIntrinsic(Intrinsic.X86Shufpd, value, target, Const(2)); // Low to low, keep high from original.
}
}
else
{
if (Optimizations.UseSse41)
{
return context.AddIntrinsic(Intrinsic.X86Insertps, target, value, Const(index << 4));
}
else
{
target = EmitSwapScalar(context, target, index, doubleWidth); // Swap value to replace into element 0.
target = context.AddIntrinsic(Intrinsic.X86Movss, target, value); // Move the value into element 0 of the vector.
return EmitSwapScalar(context, target, index, doubleWidth); // Swap new value back to the correct index.
}
}
}
public static Operand EmitSwapScalar(ArmEmitterContext context, Operand target, int reg, bool doubleWidth)
{
// Index into 0, 0 into index. This swap happens at the start of an A32 scalar op if required.
int index = reg & (doubleWidth ? 1 : 3);
if (index == 0) return target;
if (doubleWidth)
{
int shuffleMask = 1; // Swap top and bottom. (b0 = 1, b1 = 0)
return context.AddIntrinsic(Intrinsic.X86Shufpd, target, target, Const(shuffleMask));
}
else
{
int shuffleMask = (3 << 6) | (2 << 4) | (1 << 2) | index; // Swap index and 0. (others remain)
shuffleMask &= ~(3 << (index * 2));
return context.AddIntrinsic(Intrinsic.X86Shufps, target, target, Const(shuffleMask));
}
}
// Vector Operand Templates
public static void EmitVectorUnaryOpSimd32(ArmEmitterContext context, Func1I vectorFunc)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
m = EmitMoveDoubleWordToSide(context, m, op.Vm, op.Vd);
}
Operand res = vectorFunc(m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorUnaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorUnaryOpSimd32(context, (m) => context.AddIntrinsic(inst, m));
}
public static void EmitVectorBinaryOpSimd32(ArmEmitterContext context, Func2I vectorFunc, int side = -1)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
if (side == -1)
{
side = op.Vd;
}
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, side);
m = EmitMoveDoubleWordToSide(context, m, op.Vm, side);
}
Operand res = vectorFunc(n, m);
if (!op.Q) // Register insert.
{
if (side != op.Vd)
{
res = EmitMoveDoubleWordToSide(context, res, side, op.Vd);
}
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorBinaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitVectorTernaryOpSimd32(ArmEmitterContext context, Func3I vectorFunc)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
Operand initialD = d;
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
m = EmitMoveDoubleWordToSide(context, m, op.Vm, op.Vd);
}
Operand res = vectorFunc(d, n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, initialD, res, op.Vd);
}
context.Copy(initialD, res);
}
public static void EmitVectorTernaryOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
Intrinsic inst1 = (op.Size & 1) != 0 ? inst64pt1 : inst32pt1;
Intrinsic inst2 = (op.Size & 1) != 0 ? inst64pt2 : inst32pt2;
EmitVectorTernaryOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return res = context.AddIntrinsic(inst2, d, res);
});
}
public static void EmitScalarUnaryOpSimd32(ArmEmitterContext context, Func1I scalarFunc)
{
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand res = scalarFunc(m);
// Insert scalar into vector.
res = EmitScalarInsert(context, d, res, op.Vd, doubleSize);
context.Copy(d, res);
}
public static void EmitScalarUnaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitScalarUnaryOpSimd32(context, (m) => (inst == 0) ? m : context.AddIntrinsic(inst, m));
}
public static void EmitScalarBinaryOpSimd32(ArmEmitterContext context, Func2I scalarFunc)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vn >> shift);
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
n = EmitSwapScalar(context, n, op.Vn, doubleSize);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
Operand res = scalarFunc(n, m);
// Insert scalar into vector.
res = EmitScalarInsert(context, d, res, op.Vd, doubleSize);
context.Copy(d, res);
}
public static void EmitScalarBinaryOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitScalarBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitScalarTernaryOpSimd32(ArmEmitterContext context, Func3I scalarFunc)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Operand n = GetVecA32(op.Vn >> shift);
Operand m = GetVecA32(op.Vm >> shift);
Operand d = GetVecA32(op.Vd >> shift);
Operand initialD = d;
n = EmitSwapScalar(context, n, op.Vn, doubleSize);
m = EmitSwapScalar(context, m, op.Vm, doubleSize);
d = EmitSwapScalar(context, d, op.Vd, doubleSize);
Operand res = scalarFunc(d, n, m);
// Insert scalar into vector.
res = EmitScalarInsert(context, initialD, res, op.Vd, doubleSize);
context.Copy(initialD, res);
}
public static void EmitScalarTernaryOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
bool doubleSize = (op.Size & 1) != 0;
int shift = doubleSize ? 1 : 2;
Intrinsic inst1 = doubleSize ? inst64pt1 : inst32pt1;
Intrinsic inst2 = doubleSize ? inst64pt2 : inst32pt2;
EmitScalarTernaryOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return context.AddIntrinsic(inst2, d, res);
});
}
// By Scalar
public static void EmitVectorByScalarOpSimd32(ArmEmitterContext context, Func2I vectorFunc)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand d = GetVecA32(op.Qd);
int index = op.Vm & 3;
int dupeMask = (index << 6) | (index << 4) | (index << 2) | index;
Operand m = GetVecA32(op.Vm >> 2);
m = context.AddIntrinsic(Intrinsic.X86Shufps, m, m, Const(dupeMask));
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
}
Operand res = vectorFunc(n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, d, res, op.Vd);
}
context.Copy(d, res);
}
public static void EmitVectorByScalarOpF32(ArmEmitterContext context, Intrinsic inst32, Intrinsic inst64)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Intrinsic inst = (op.Size & 1) != 0 ? inst64 : inst32;
EmitVectorByScalarOpSimd32(context, (n, m) => context.AddIntrinsic(inst, n, m));
}
public static void EmitVectorsByScalarOpSimd32(ArmEmitterContext context, Func3I vectorFunc)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Operand n = GetVecA32(op.Qn);
Operand d = GetVecA32(op.Qd);
Operand initialD = d;
int index = op.Vm & 3;
int dupeMask = (index << 6) | (index << 4) | (index << 2) | index;
Operand m = GetVecA32(op.Vm >> 2);
m = context.AddIntrinsic(Intrinsic.X86Shufps, m, m, Const(dupeMask));
if (!op.Q) // Register swap: move relevant doubleword to destination side.
{
n = EmitMoveDoubleWordToSide(context, n, op.Vn, op.Vd);
}
Operand res = vectorFunc(d, n, m);
if (!op.Q) // Register insert.
{
res = EmitDoubleWordInsert(context, initialD, res, op.Vd);
}
context.Copy(initialD, res);
}
public static void EmitVectorsByScalarOpF32(ArmEmitterContext context, Intrinsic inst32pt1, Intrinsic inst64pt1, Intrinsic inst32pt2, Intrinsic inst64pt2)
{
OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
Intrinsic inst1 = (op.Size & 1) != 0 ? inst64pt1 : inst32pt1;
Intrinsic inst2 = (op.Size & 1) != 0 ? inst64pt2 : inst32pt2;
EmitVectorsByScalarOpSimd32(context, (d, n, m) =>
{
Operand res = context.AddIntrinsic(inst1, n, m);
return res = context.AddIntrinsic(inst2, d, res);
});
}
// Pairwise
public static void EmitSse2VectorPairwiseOpF32(ArmEmitterContext context, Intrinsic inst32)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
Operand unpck = context.AddIntrinsic(Intrinsic.X86Unpcklps, n, m);
Operand part0 = unpck;
Operand part1 = context.AddIntrinsic(Intrinsic.X86Movhlps, unpck, unpck);
return context.AddIntrinsic(inst32, part0, part1);
}, 0);
}
public static void EmitSsse3VectorPairwiseOp32(ArmEmitterContext context, Intrinsic[] inst)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorBinaryOpSimd32(context, (n, m) =>
{
if (op.RegisterSize == RegisterSize.Simd64)
{
Operand zeroEvenMask = X86GetElements(context, ZeroMask, EvenMasks[op.Size]);
Operand zeroOddMask = X86GetElements(context, ZeroMask, OddMasks[op.Size]);
Operand mN = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m); // m:n
Operand left = context.AddIntrinsic(Intrinsic.X86Pshufb, mN, zeroEvenMask); // 0:even from m:n
Operand right = context.AddIntrinsic(Intrinsic.X86Pshufb, mN, zeroOddMask); // 0:odd from m:n
return context.AddIntrinsic(inst[op.Size], left, right);
}
else if (op.Size < 3)
{
Operand oddEvenMask = X86GetElements(context, OddMasks[op.Size], EvenMasks[op.Size]);
Operand oddEvenN = context.AddIntrinsic(Intrinsic.X86Pshufb, n, oddEvenMask); // odd:even from n
Operand oddEvenM = context.AddIntrinsic(Intrinsic.X86Pshufb, m, oddEvenMask); // odd:even from m
Operand left = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, oddEvenN, oddEvenM);
Operand right = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, oddEvenN, oddEvenM);
return context.AddIntrinsic(inst[op.Size], left, right);
}
else
{
Operand left = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, n, m);
Operand right = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, n, m);
return context.AddIntrinsic(inst[3], left, right);
}
}, 0);
}
// Generic Functions // Generic Functions
public static Operand EmitSoftFloatCallDefaultFpscr( public static Operand EmitSoftFloatCallDefaultFpscr(

View file

@ -1,4 +1,5 @@
using ARMeilleure.Decoders; using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation; using ARMeilleure.Translation;
using static ARMeilleure.Instructions.InstEmitSimdHelper32; using static ARMeilleure.Instructions.InstEmitSimdHelper32;
@ -9,7 +10,14 @@ namespace ARMeilleure.Instructions
{ {
public static void Vand_I(ArmEmitterContext context) public static void Vand_I(ArmEmitterContext context)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Pand, Intrinsic.X86Pand);
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, op2));
}
} }
public static void Vbif(ArmEmitterContext context) public static void Vbif(ArmEmitterContext context)
@ -24,33 +32,64 @@ namespace ARMeilleure.Instructions
public static void Vbsl(ArmEmitterContext context) public static void Vbsl(ArmEmitterContext context)
{ {
EmitVectorTernaryOpZx32(context, (op1, op2, op3) => if (Optimizations.UseSse2)
{ {
return context.BitwiseExclusiveOr( EmitVectorTernaryOpSimd32(context, (d, n, m) =>
context.BitwiseAnd(op1, {
context.BitwiseExclusiveOr(op2, op3)), op3); Operand res = context.AddIntrinsic(Intrinsic.X86Pxor, n, m);
}); res = context.AddIntrinsic(Intrinsic.X86Pand, res, d);
return context.AddIntrinsic(Intrinsic.X86Pxor, res, m);
});
}
else
{
EmitVectorTernaryOpZx32(context, (op1, op2, op3) =>
{
return context.BitwiseExclusiveOr(
context.BitwiseAnd(op1,
context.BitwiseExclusiveOr(op2, op3)), op3);
});
}
} }
public static void Vorr_I(ArmEmitterContext context) public static void Vorr_I(ArmEmitterContext context)
{ {
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseOr(op1, op2)); if (Optimizations.UseSse2)
{
EmitVectorBinaryOpF32(context, Intrinsic.X86Por, Intrinsic.X86Por);
}
else
{
EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseOr(op1, op2));
}
} }
private static void EmitBifBit(ArmEmitterContext context, bool notRm) private static void EmitBifBit(ArmEmitterContext context, bool notRm)
{ {
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp; OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
EmitVectorTernaryOpZx32(context, (d, n, m) => if (Optimizations.UseSse2)
{ {
if (notRm) EmitVectorTernaryOpSimd32(context, (d, n, m) =>
{ {
m = context.BitwiseNot(m); Operand res = context.AddIntrinsic(Intrinsic.X86Pxor, n, d);
} res = context.AddIntrinsic((notRm) ? Intrinsic.X86Pandn : Intrinsic.X86Pand, m, res);
return context.BitwiseExclusiveOr( return context.AddIntrinsic(Intrinsic.X86Pxor, d, res);
context.BitwiseAnd(m, });
context.BitwiseExclusiveOr(d, n)), d); }
}); else
{
EmitVectorTernaryOpZx32(context, (d, n, m) =>
{
if (notRm)
{
m = context.BitwiseNot(m);
}
return context.BitwiseExclusiveOr(
context.BitwiseAnd(m,
context.BitwiseExclusiveOr(d, n)), d);
});
}
} }
} }
} }

View file

@ -1,6 +1,7 @@
using ARMeilleure.Decoders; using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation; using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation; using ARMeilleure.Translation;
using System;
using static ARMeilleure.Instructions.InstEmitHelper; using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper; using static ARMeilleure.Instructions.InstEmitSimdHelper;
@ -11,6 +12,21 @@ namespace ARMeilleure.Instructions
{ {
static partial class InstEmit32 static partial class InstEmit32
{ {
#region "Masks"
// Same as InstEmitSimdMove, as the instructions do the same thing.
private static readonly long[] _masksE0_Uzp = new long[]
{
13L << 56 | 09L << 48 | 05L << 40 | 01L << 32 | 12L << 24 | 08L << 16 | 04L << 8 | 00L << 0,
11L << 56 | 10L << 48 | 03L << 40 | 02L << 32 | 09L << 24 | 08L << 16 | 01L << 8 | 00L << 0
};
private static readonly long[] _masksE1_Uzp = new long[]
{
15L << 56 | 11L << 48 | 07L << 40 | 03L << 32 | 14L << 24 | 10L << 16 | 06L << 8 | 02L << 0,
15L << 56 | 14L << 48 | 07L << 40 | 06L << 32 | 13L << 24 | 12L << 16 | 05L << 8 | 04L << 0
};
#endregion
public static void Vmov_I(ArmEmitterContext context) public static void Vmov_I(ArmEmitterContext context)
{ {
EmitVectorImmUnaryOp32(context, (op1) => op1); EmitVectorImmUnaryOp32(context, (op1) => op1);
@ -128,114 +144,201 @@ namespace ARMeilleure.Instructions
OpCode32SimdTbl op = (OpCode32SimdTbl)context.CurrOp; OpCode32SimdTbl op = (OpCode32SimdTbl)context.CurrOp;
bool extension = op.Opc == 1; bool extension = op.Opc == 1;
int elems = op.GetBytesCount() >> op.Size;
int length = op.Length + 1; int length = op.Length + 1;
(int Qx, int Ix)[] tableTuples = new (int, int)[length]; if (Optimizations.UseSsse3)
for (int i = 0; i < length; i++)
{ {
(int vn, int en) = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize); Operand d = GetVecA32(op.Qd);
tableTuples[i] = (vn, en); Operand m = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qm), op.Vm, 0);
}
int byteLength = length * 8; Operand res;
Operand mask = X86GetAllElements(context, 0x0707070707070707L);
Operand res = GetVecA32(op.Qd); // Fast path for single register table.
Operand m = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
Operand elemRes = null; // Note: This is I64 for ease of calculation.
// TODO: Branching rather than conditional select.
// Get indexed byte.
// To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
// This does have to extract `length` times for every element but certainly not as bad as it could be.
// Which vector number is the index on.
Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
// What should we shift by to extract it.
Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
for (int i = 0; i < length; i++)
{ {
(int qx, int ix) = tableTuples[i]; Operand n = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qn), op.Vn, 0);
// Get the whole vector, we'll get a byte out of it.
Operand lookupResult;
if (qx == op.Qd)
{
// Result contains the current state of the vector.
lookupResult = context.VectorExtract(OperandType.I64, res, ix);
}
else
{
lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
}
lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector. Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, mask);
mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, m);
if (i == 0) res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mMask);
{
elemRes = lookupResult; // First result is always default.
}
else
{
Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
}
} }
Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L); for (int index = 1; index < length; index++)
{
int newVn = (op.Vn + index) & 0x1F;
(int qn, int ind) = GetQuadwordAndSubindex(newVn, op.RegisterSize);
Operand ni = EmitMoveDoubleWordToSide(context, GetVecA32(qn), newVn, 0);
res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0); Operand idxMask = X86GetAllElements(context, 0x0808080808080808L * index);
Operand mSubMask = context.AddIntrinsic(Intrinsic.X86Psubb, m, idxMask);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, mSubMask, mask);
mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, mSubMask);
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, ni, mMask);
res = context.AddIntrinsic(Intrinsic.X86Por, res, res2);
}
if (extension)
{
Operand idxMask = X86GetAllElements(context, (0x0808080808080808L * length) - 0x0101010101010101L);
Operand zeroMask = context.VectorZero();
Operand mPosMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, idxMask);
Operand mNegMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, zeroMask, m);
Operand mMask = context.AddIntrinsic(Intrinsic.X86Por, mPosMask, mNegMask);
Operand dMask = context.AddIntrinsic(Intrinsic.X86Pand, EmitMoveDoubleWordToSide(context, d, op.Vd, 0), mMask);
res = context.AddIntrinsic(Intrinsic.X86Por, res, dMask);
}
res = EmitMoveDoubleWordToSide(context, res, 0, op.Vd);
context.Copy(d, EmitDoubleWordInsert(context, d, res, op.Vd));
} }
else
{
int elems = op.GetBytesCount() >> op.Size;
context.Copy(GetVecA32(op.Qd), res); (int Qx, int Ix)[] tableTuples = new (int, int)[length];
for (int i = 0; i < length; i++)
{
tableTuples[i] = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize);
}
int byteLength = length * 8;
Operand res = GetVecA32(op.Qd);
Operand m = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
Operand elemRes = null; // Note: This is I64 for ease of calculation.
// TODO: Branching rather than conditional select.
// Get indexed byte.
// To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
// This does have to extract `length` times for every element but certainly not as bad as it could be.
// Which vector number is the index on.
Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
// What should we shift by to extract it.
Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
for (int i = 0; i < length; i++)
{
(int qx, int ix) = tableTuples[i];
// Get the whole vector, we'll get a byte out of it.
Operand lookupResult;
if (qx == op.Qd)
{
// Result contains the current state of the vector.
lookupResult = context.VectorExtract(OperandType.I64, res, ix);
}
else
{
lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
}
lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector.
if (i == 0)
{
elemRes = lookupResult; // First result is always default.
}
else
{
Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
}
}
Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L);
res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0);
}
context.Copy(GetVecA32(op.Qd), res);
}
} }
public static void Vtrn(ArmEmitterContext context) public static void Vtrn(ArmEmitterContext context)
{ {
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp; OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size; if (Optimizations.UseSsse3)
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{ {
int pairIndex = index << 1; EmitVectorShuffleOpSimd32(context, (m, d) =>
Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{ {
resM = resD; Operand mask = null;
}
resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size); if (op.Size < 3)
{
long maskE0 = EvenMasks[op.Size];
long maskE1 = OddMasks[op.Size];
if (overlap) mask = X86GetScalar(context, maskE0);
{
resD = resM; mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
} }
if (op.Size < 3)
{
d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
}
Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
return (resM, resD);
});
} }
else
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{ {
context.Copy(GetVecA32(op.Qm), resM); int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{
int pairIndex = index << 1;
Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
} }
} }
@ -243,44 +346,68 @@ namespace ARMeilleure.Instructions
{ {
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp; OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size; if (Optimizations.UseSse2)
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{ {
int pairIndex = index << 1; EmitVectorShuffleOpSimd32(context, (m, d) =>
Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{ {
resM = resD; if (op.RegisterSize == RegisterSize.Simd128)
} {
Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size); return (resM, resD);
resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size); }
else
{
Operand res = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
if (overlap) Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
{ Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
resD = resM; return (resM, resD);
} }
});
} }
else
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{ {
context.Copy(GetVecA32(op.Qm), resM); int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < pairs; index++)
{
int pairIndex = index << 1;
Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size);
resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
} }
} }
@ -288,49 +415,135 @@ namespace ARMeilleure.Instructions
{ {
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp; OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
int elems = op.GetBytesCount() >> op.Size; if (Optimizations.UseSsse3)
int pairs = elems >> 1; {
EmitVectorShuffleOpSimd32(context, (m, d) =>
{
if (op.RegisterSize == RegisterSize.Simd128)
{
Operand mask = null;
if (op.Size < 3)
{
long maskE0 = EvenMasks[op.Size];
long maskE1 = OddMasks[op.Size];
mask = X86GetScalar(context, maskE0);
mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
}
Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, d, m);
Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, d, m);
return (resM, resD);
}
else
{
Intrinsic punpcklInst = X86PunpcklInstruction[op.Size];
Operand res = context.AddIntrinsic(punpcklInst, d, m);
if (op.Size < 2)
{
long maskE0 = _masksE0_Uzp[op.Size];
long maskE1 = _masksE1_Uzp[op.Size];
Operand mask = X86GetScalar(context, maskE0);
mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
res = context.AddIntrinsic(Intrinsic.X86Pshufb, res, mask);
}
Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
return (resM, resD);
}
});
}
else
{
int elems = op.GetBytesCount() >> op.Size;
int pairs = elems >> 1;
bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd);
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{
Operand dIns, mIns;
if (index >= pairs)
{
int pairIndex = index - pairs;
dIns = EmitVectorExtract32(context, op.Qm, (pairIndex << 1) + op.Im, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qm, ((pairIndex << 1) | 1) + op.Im, op.Size, false);
}
else
{
dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
}
resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
}
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
context.Copy(GetVecA32(op.Qm), resM);
}
}
}
public static void EmitVectorShuffleOpSimd32(ArmEmitterContext context, Func<Operand, Operand, (Operand, Operand)> shuffleFunc)
{
OpCode32Simd op = (OpCode32Simd)context.CurrOp;
Operand m = GetVecA32(op.Qm);
Operand d = GetVecA32(op.Qd);
Operand initialM = m;
Operand initialD = d;
if (!op.Q) // Register swap: move relevant doubleword to side 0, for consistency.
{
m = EmitMoveDoubleWordToSide(context, m, op.Vm, 0);
d = EmitMoveDoubleWordToSide(context, d, op.Vd, 0);
}
(Operand resM, Operand resD) = shuffleFunc(m, d);
bool overlap = op.Qm == op.Qd; bool overlap = op.Qm == op.Qd;
Operand resD = GetVecA32(op.Qd); if (!op.Q) // Register insert.
Operand resM = GetVecA32(op.Qm);
for (int index = 0; index < elems; index++)
{ {
Operand dIns, mIns; resM = EmitDoubleWordInsert(context, initialM, EmitMoveDoubleWordToSide(context, resM, 0, op.Vm), op.Vm);
if (index >= pairs) resD = EmitDoubleWordInsert(context, overlap ? resM : initialD, EmitMoveDoubleWordToSide(context, resD, 0, op.Vd), op.Vd);
{
int pind = index - pairs;
dIns = EmitVectorExtract32(context, op.Qm, (pind << 1) + op.Im, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qm, ((pind << 1) | 1) + op.Im, op.Size, false);
}
else
{
dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
}
resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
if (overlap)
{
resM = resD;
}
resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
if (overlap)
{
resD = resM;
}
} }
context.Copy(GetVecA32(op.Qd), resD);
if (!overlap) if (!overlap)
{ {
context.Copy(GetVecA32(op.Qm), resM); context.Copy(initialM, resM);
} }
context.Copy(initialD, resD);
} }
} }
} }

View file

@ -41,6 +41,7 @@ namespace ARMeilleure.IntermediateRepresentation
X86Divss, X86Divss,
X86Haddpd, X86Haddpd,
X86Haddps, X86Haddps,
X86Insertps,
X86Maxpd, X86Maxpd,
X86Maxps, X86Maxps,
X86Maxsd, X86Maxsd,
@ -51,6 +52,7 @@ namespace ARMeilleure.IntermediateRepresentation
X86Minss, X86Minss,
X86Movhlps, X86Movhlps,
X86Movlhps, X86Movlhps,
X86Movss,
X86Mulpd, X86Mulpd,
X86Mulps, X86Mulps,
X86Mulsd, X86Mulsd,