2018-05-12 01:10:27 +02:00
|
|
|
using ChocolArm64.State;
|
|
|
|
using ChocolArm64.Translation;
|
|
|
|
using System;
|
|
|
|
using System.Runtime.CompilerServices;
|
|
|
|
using System.Runtime.Intrinsics;
|
|
|
|
using System.Runtime.Intrinsics.X86;
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
namespace ChocolArm64.Instructions
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
static class VectorHelper
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
public static void EmitCall(ILEmitterCtx context, string name64, string name128)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
bool isSimd64 = context.CurrOp.RegisterSize == RegisterSize.Simd64;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
context.EmitCall(typeof(VectorHelper), isSimd64 ? name64 : name128);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
public static void EmitCall(ILEmitterCtx context, string mthdName)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
context.EmitCall(typeof(VectorHelper), mthdName);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static int SatF32ToS32(float value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (float.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > int.MaxValue ? int.MaxValue :
|
|
|
|
value < int.MinValue ? int.MinValue : (int)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static long SatF32ToS64(float value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (float.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > long.MaxValue ? long.MaxValue :
|
|
|
|
value < long.MinValue ? long.MinValue : (long)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static uint SatF32ToU32(float value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (float.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > uint.MaxValue ? uint.MaxValue :
|
|
|
|
value < uint.MinValue ? uint.MinValue : (uint)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static ulong SatF32ToU64(float value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (float.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > ulong.MaxValue ? ulong.MaxValue :
|
|
|
|
value < ulong.MinValue ? ulong.MinValue : (ulong)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static int SatF64ToS32(double value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (double.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > int.MaxValue ? int.MaxValue :
|
|
|
|
value < int.MinValue ? int.MinValue : (int)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static long SatF64ToS64(double value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (double.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > long.MaxValue ? long.MaxValue :
|
|
|
|
value < long.MinValue ? long.MinValue : (long)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static uint SatF64ToU32(double value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (double.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > uint.MaxValue ? uint.MaxValue :
|
|
|
|
value < uint.MinValue ? uint.MinValue : (uint)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static ulong SatF64ToU64(double value)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (double.IsNaN(value)) return 0;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return value > ulong.MaxValue ? ulong.MaxValue :
|
|
|
|
value < ulong.MinValue ? ulong.MinValue : (ulong)value;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
public static double Round(double value, CpuThreadState state)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
switch (state.FPRoundingMode())
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
case RoundMode.ToNearest: return Math.Round (value);
|
|
|
|
case RoundMode.TowardsPlusInfinity: return Math.Ceiling (value);
|
|
|
|
case RoundMode.TowardsMinusInfinity: return Math.Floor (value);
|
|
|
|
case RoundMode.TowardsZero: return Math.Truncate(value);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new InvalidOperationException();
|
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
public static float RoundF(float value, CpuThreadState state)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
switch (state.FPRoundingMode())
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
case RoundMode.ToNearest: return MathF.Round (value);
|
|
|
|
case RoundMode.TowardsPlusInfinity: return MathF.Ceiling (value);
|
|
|
|
case RoundMode.TowardsMinusInfinity: return MathF.Floor (value);
|
|
|
|
case RoundMode.TowardsZero: return MathF.Truncate(value);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new InvalidOperationException();
|
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl1_V64(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 8, tb0);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl1_V128(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 16, tb0);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl2_V64(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 8, tb0, tb1);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl2_V128(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 16, tb0, tb1);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl3_V64(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1,
|
|
|
|
Vector128<float> tb2)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 8, tb0, tb1, tb2);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl3_V128(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1,
|
|
|
|
Vector128<float> tb2)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 16, tb0, tb1, tb2);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl4_V64(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1,
|
|
|
|
Vector128<float> tb2,
|
|
|
|
Vector128<float> tb3)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 8, tb0, tb1, tb2, tb3);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public static Vector128<float> Tbl4_V128(
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> vector,
|
|
|
|
Vector128<float> tb0,
|
|
|
|
Vector128<float> tb1,
|
|
|
|
Vector128<float> tb2,
|
|
|
|
Vector128<float> tb3)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Tbl(vector, 16, tb0, tb1, tb2, tb3);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
private static Vector128<float> Tbl(Vector128<float> vector, int bytes, params Vector128<float>[] tb)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<float> res = new Vector128<float>();
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
byte[] table = new byte[tb.Length * 16];
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
for (byte index = 0; index < tb.Length; index++)
|
|
|
|
for (byte index2 = 0; index2 < 16; index2++)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
table[index * 16 + index2] = (byte)VectorExtractIntZx(tb[index], index2, 0);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
for (byte index = 0; index < bytes; index++)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
byte tblIdx = (byte)VectorExtractIntZx(vector, index, 0);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
if (tblIdx < table.Length)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
res = VectorInsertInt(table[tblIdx], res, index, 0);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return res;
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static double VectorExtractDouble(Vector128<float> vector, byte index)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return BitConverter.Int64BitsToDouble(Sse41.Extract(Sse.StaticCast<float, long>(vector), index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return BitConverter.Int64BitsToDouble((long)VectorExtractIntZx(vector, index, 3));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static long VectorExtractIntSx(Vector128<float> vector, byte index, int size)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (sbyte)Sse41.Extract(Sse.StaticCast<float, byte>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (short)Sse2.Extract(Sse.StaticCast<float, ushort>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(Sse.StaticCast<float, int>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(Sse.StaticCast<float, long>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (sbyte)VectorExtractIntZx(vector, index, size);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (short)VectorExtractIntZx(vector, index, size);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (int)VectorExtractIntZx(vector, index, size);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (long)VectorExtractIntZx(vector, index, size);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static ulong VectorExtractIntZx(Vector128<float> vector, byte index, int size)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(Sse.StaticCast<float, byte>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse2.Extract(Sse.StaticCast<float, ushort>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(Sse.StaticCast<float, uint>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(Sse.StaticCast<float, ulong>(vector), index);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
int shortIdx = size == 0
|
|
|
|
? index >> 1
|
|
|
|
: index << (size - 1);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
ushort value = Sse2.Extract(Sse.StaticCast<float, ushort>(vector), (byte)shortIdx);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (byte)(value >> (index & 1) * 8);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return value;
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2 || size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
ushort value1 = Sse2.Extract(Sse.StaticCast<float, ushort>(vector), (byte)(shortIdx + 1));
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 2)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return (uint)(value | (value1 << 16));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
ushort value2 = Sse2.Extract(Sse.StaticCast<float, ushort>(vector), (byte)(shortIdx + 2));
|
|
|
|
ushort value3 = Sse2.Extract(Sse.StaticCast<float, ushort>(vector), (byte)(shortIdx + 3));
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return ((ulong)value << 0) |
|
|
|
|
((ulong)value1 << 16) |
|
|
|
|
((ulong)value2 << 32) |
|
|
|
|
((ulong)value3 << 48);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static float VectorExtractSingle(Vector128<float> vector, byte index)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Extract(vector, index);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
2018-06-29 01:52:32 +02:00
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<ushort> shortVector = Sse.StaticCast<float, ushort>(vector);
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
int low = Sse2.Extract(shortVector, (byte)(index * 2 + 0));
|
|
|
|
int high = Sse2.Extract(shortVector, (byte)(index * 2 + 1));
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return BitConverter.Int32BitsToSingle(low | (high << 16));
|
2018-06-29 01:52:32 +02:00
|
|
|
}
|
2018-05-12 01:10:27 +02:00
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInsertDouble(double value, Vector128<float> vector, byte index)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return VectorInsertInt((ulong)BitConverter.DoubleToInt64Bits(value), vector, index, 3);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInsertInt(ulong value, Vector128<float> vector, byte index, int size)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<byte, float>(Sse41.Insert(Sse.StaticCast<float, byte>(vector), (byte)value, index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(Sse2.Insert(Sse.StaticCast<float, ushort>(vector), (ushort)value, index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<uint, float>(Sse41.Insert(Sse.StaticCast<float, uint>(vector), (uint)value, index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ulong, float>(Sse41.Insert(Sse.StaticCast<float, ulong>(vector), value, index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<ushort> shortVector = Sse.StaticCast<float, ushort>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
int shortIdx = size == 0
|
|
|
|
? index >> 1
|
|
|
|
: index << (size - 1);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 0)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
ushort shortVal = Sse2.Extract(Sse.StaticCast<float, ushort>(vector), (byte)shortIdx);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
int shift = (index & 1) * 8;
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
shortVal &= (ushort)(0xff00 >> shift);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
shortVal |= (ushort)((byte)value << shift);
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(Sse2.Insert(shortVector, shortVal, (byte)shortIdx));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(Sse2.Insert(Sse.StaticCast<float, ushort>(vector), (ushort)value, index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (size == 2 || size == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
shortVector = Sse2.Insert(shortVector, (ushort)(value >> 0), (byte)(shortIdx + 0));
|
|
|
|
shortVector = Sse2.Insert(shortVector, (ushort)(value >> 16), (byte)(shortIdx + 1));
|
2018-05-12 01:10:27 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
if (size == 3)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
shortVector = Sse2.Insert(shortVector, (ushort)(value >> 32), (byte)(shortIdx + 2));
|
|
|
|
shortVector = Sse2.Insert(shortVector, (ushort)(value >> 48), (byte)(shortIdx + 3));
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(shortVector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(size));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInsertSingle(float value, Vector128<float> vector, byte index)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse41.IsSupported)
|
|
|
|
{
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
//Note: The if/else if is necessary to enable the JIT to
|
|
|
|
//produce a single INSERTPS instruction instead of the
|
|
|
|
//jump table fallback.
|
2018-10-31 02:43:02 +01:00
|
|
|
if (index == 0)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Insert(vector, value, 0x00);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (index == 1)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Insert(vector, value, 0x10);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (index == 2)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Insert(vector, value, 0x20);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-10-31 02:43:02 +01:00
|
|
|
else if (index == 3)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Insert(vector, value, 0x30);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
throw new ArgumentOutOfRangeException(nameof(index));
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
2018-06-29 01:52:32 +02:00
|
|
|
else if (Sse2.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
int intValue = BitConverter.SingleToInt32Bits(value);
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-11-18 03:41:16 +01:00
|
|
|
ushort low = (ushort)(intValue >> 0);
|
2018-10-31 02:43:02 +01:00
|
|
|
ushort high = (ushort)(intValue >> 16);
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
Vector128<ushort> shortVector = Sse.StaticCast<float, ushort>(vector);
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
shortVector = Sse2.Insert(shortVector, low, (byte)(index * 2 + 0));
|
|
|
|
shortVector = Sse2.Insert(shortVector, high, (byte)(index * 2 + 1));
|
2018-06-29 01:52:32 +02:00
|
|
|
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(shortVector);
|
2018-06-29 01:52:32 +02:00
|
|
|
}
|
2018-05-12 01:10:27 +02:00
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> Sse41VectorInsertScalarSingle(float value, Vector128<float> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
//Note: 0b1110 is the mask to zero the upper bits.
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse41.Insert(vector, value, 0b1110);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<sbyte> VectorSByteZero()
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse2.SetZeroVector128<sbyte>();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<short> VectorInt16Zero()
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse2.SetZeroVector128<short>();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<int> VectorInt32Zero()
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse2.SetZeroVector128<int>();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<long> VectorInt64Zero()
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse2.SetZeroVector128<long>();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<float> VectorSingleZero()
|
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse.SetZeroVector128();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
|
|
|
public static Vector128<double> VectorDoubleZero()
|
|
|
|
{
|
|
|
|
if (Sse2.IsSupported)
|
|
|
|
{
|
|
|
|
return Sse2.SetZeroVector128<double>();
|
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
2018-08-15 04:54:12 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<sbyte> VectorSingleToSByte(Vector128<float> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, sbyte>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<short> VectorSingleToInt16(Vector128<float> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, short>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<int> VectorSingleToInt32(Vector128<float> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, int>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<long> VectorSingleToInt64(Vector128<float> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, long>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<byte> VectorSingleToByte(Vector128<float> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, byte>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<ushort> VectorSingleToUInt16(Vector128<float> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, ushort>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<uint> VectorSingleToUInt32(Vector128<float> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, uint>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<ulong> VectorSingleToUInt64(Vector128<float> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, ulong>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
2018-05-12 01:10:27 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<double> VectorSingleToDouble(Vector128<float> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<float, double>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorSByteToSingle(Vector128<sbyte> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<sbyte, float>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInt16ToSingle(Vector128<short> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<short, float>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInt32ToSingle(Vector128<int> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<int, float>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorInt64ToSingle(Vector128<long> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<long, float>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorByteToSingle(Vector128<byte> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<byte, float>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorUInt16ToSingle(Vector128<ushort> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ushort, float>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorUInt32ToSingle(Vector128<uint> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<uint, float>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorUInt64ToSingle(Vector128<ulong> vector)
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<ulong, float>(vector);
|
Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics (#405)
* Optimize BIC, BSL, BIT, BIF, XTN, ZIP, DUP (Gp), FMADD (Scalar) and FCVT (Scalar) using SSE intrinsics, some CQ improvements
* Remove useless space
* Address PR feedback
* Revert EmitVectorZero32_128 changes
2018-09-27 04:30:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
|
2018-05-12 01:10:27 +02:00
|
|
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
2018-10-31 02:43:02 +01:00
|
|
|
public static Vector128<float> VectorDoubleToSingle(Vector128<double> vector)
|
2018-05-12 01:10:27 +02:00
|
|
|
{
|
|
|
|
if (Sse.IsSupported)
|
|
|
|
{
|
2018-10-31 02:43:02 +01:00
|
|
|
return Sse.StaticCast<double, float>(vector);
|
2018-05-12 01:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
throw new PlatformNotSupportedException();
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 08:31:16 +02:00
|
|
|
}
|