aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/Instructions/InstEmitSimdMove32.cs
diff options
context:
space:
mode:
Diffstat (limited to 'ARMeilleure/Instructions/InstEmitSimdMove32.cs')
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdMove32.cs469
1 files changed, 341 insertions, 128 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdMove32.cs b/ARMeilleure/Instructions/InstEmitSimdMove32.cs
index 3fd42cbf..17ff66b0 100644
--- a/ARMeilleure/Instructions/InstEmitSimdMove32.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdMove32.cs
@@ -1,6 +1,7 @@
using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation;
+using System;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
@@ -11,6 +12,21 @@ namespace ARMeilleure.Instructions
{
static partial class InstEmit32
{
+ #region "Masks"
+ // Same as InstEmitSimdMove, as the instructions do the same thing.
+ private static readonly long[] _masksE0_Uzp = new long[]
+ {
+ 13L << 56 | 09L << 48 | 05L << 40 | 01L << 32 | 12L << 24 | 08L << 16 | 04L << 8 | 00L << 0,
+ 11L << 56 | 10L << 48 | 03L << 40 | 02L << 32 | 09L << 24 | 08L << 16 | 01L << 8 | 00L << 0
+ };
+
+ private static readonly long[] _masksE1_Uzp = new long[]
+ {
+ 15L << 56 | 11L << 48 | 07L << 40 | 03L << 32 | 14L << 24 | 10L << 16 | 06L << 8 | 02L << 0,
+ 15L << 56 | 14L << 48 | 07L << 40 | 06L << 32 | 13L << 24 | 12L << 16 | 05L << 8 | 04L << 0
+ };
+ #endregion
+
public static void Vmov_I(ArmEmitterContext context)
{
EmitVectorImmUnaryOp32(context, (op1) => op1);
@@ -31,7 +47,7 @@ namespace ARMeilleure.Instructions
// To general purpose.
Operand value = context.VectorExtract(OperandType.I32, vec, op.Vn & 0x3);
SetIntA32(context, op.Rt, value);
- }
+ }
else
{
// From general purpose.
@@ -88,7 +104,7 @@ namespace ARMeilleure.Instructions
if (sameOwnerVec)
{
context.Copy(vec, context.VectorInsert(resultVec, highValue, vm1 & 3));
- }
+ }
else
{
context.Copy(vec, resultVec);
@@ -128,114 +144,201 @@ namespace ARMeilleure.Instructions
OpCode32SimdTbl op = (OpCode32SimdTbl)context.CurrOp;
bool extension = op.Opc == 1;
-
- int elems = op.GetBytesCount() >> op.Size;
-
int length = op.Length + 1;
- (int Qx, int Ix)[] tableTuples = new (int, int)[length];
- for (int i = 0; i < length; i++)
+ if (Optimizations.UseSsse3)
{
- (int vn, int en) = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize);
- tableTuples[i] = (vn, en);
- }
+ Operand d = GetVecA32(op.Qd);
+ Operand m = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qm), op.Vm, 0);
- int byteLength = length * 8;
+ Operand res;
+ Operand mask = X86GetAllElements(context, 0x0707070707070707L);
- Operand res = GetVecA32(op.Qd);
- Operand m = GetVecA32(op.Qm);
+ // Fast path for single register table.
+ {
+ Operand n = EmitMoveDoubleWordToSide(context, GetVecA32(op.Qn), op.Vn, 0);
- for (int index = 0; index < elems; index++)
- {
- Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
+ Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, mask);
+ mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, m);
+
+ res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mMask);
+ }
+
+ for (int index = 1; index < length; index++)
+ {
+ int newVn = (op.Vn + index) & 0x1F;
+ (int qn, int ind) = GetQuadwordAndSubindex(newVn, op.RegisterSize);
+ Operand ni = EmitMoveDoubleWordToSide(context, GetVecA32(qn), newVn, 0);
+
+ Operand idxMask = X86GetAllElements(context, 0x0808080808080808L * index);
- Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
- Operand elemRes = null; // Note: This is I64 for ease of calculation.
+ Operand mSubMask = context.AddIntrinsic(Intrinsic.X86Psubb, m, idxMask);
- // TODO: Branching rather than conditional select.
+ Operand mMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, mSubMask, mask);
+ mMask = context.AddIntrinsic(Intrinsic.X86Por, mMask, mSubMask);
- // Get indexed byte.
- // To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
- // This does have to extract `length` times for every element but certainly not as bad as it could be.
+ Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, ni, mMask);
- // Which vector number is the index on.
- Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
- // What should we shift by to extract it.
- Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
+ res = context.AddIntrinsic(Intrinsic.X86Por, res, res2);
+ }
+
+ if (extension)
+ {
+ Operand idxMask = X86GetAllElements(context, (0x0808080808080808L * length) - 0x0101010101010101L);
+ Operand zeroMask = context.VectorZero();
+ Operand mPosMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, m, idxMask);
+ Operand mNegMask = context.AddIntrinsic(Intrinsic.X86Pcmpgtb, zeroMask, m);
+
+ Operand mMask = context.AddIntrinsic(Intrinsic.X86Por, mPosMask, mNegMask);
+
+ Operand dMask = context.AddIntrinsic(Intrinsic.X86Pand, EmitMoveDoubleWordToSide(context, d, op.Vd, 0), mMask);
+
+ res = context.AddIntrinsic(Intrinsic.X86Por, res, dMask);
+ }
+
+ res = EmitMoveDoubleWordToSide(context, res, 0, op.Vd);
+
+ context.Copy(d, EmitDoubleWordInsert(context, d, res, op.Vd));
+ }
+ else
+ {
+ int elems = op.GetBytesCount() >> op.Size;
+
+ (int Qx, int Ix)[] tableTuples = new (int, int)[length];
for (int i = 0; i < length; i++)
{
- (int qx, int ix) = tableTuples[i];
- // Get the whole vector, we'll get a byte out of it.
- Operand lookupResult;
- if (qx == op.Qd)
- {
- // Result contains the current state of the vector.
- lookupResult = context.VectorExtract(OperandType.I64, res, ix);
- }
- else
- {
- lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
- }
-
- lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector.
+ tableTuples[i] = GetQuadwordAndSubindex(op.Vn + i, op.RegisterSize);
+ }
- if (i == 0)
- {
- elemRes = lookupResult; // First result is always default.
- }
- else
+ int byteLength = length * 8;
+
+ Operand res = GetVecA32(op.Qd);
+ Operand m = GetVecA32(op.Qm);
+
+ for (int index = 0; index < elems; index++)
+ {
+ Operand selectedIndex = context.ZeroExtend8(OperandType.I32, context.VectorExtract8(m, index + op.Im));
+
+ Operand inRange = context.ICompareLess(selectedIndex, Const(byteLength));
+ Operand elemRes = null; // Note: This is I64 for ease of calculation.
+
+ // TODO: Branching rather than conditional select.
+
+ // Get indexed byte.
+ // To simplify (ha) the il, we get bytes from every vector and use a nested conditional select to choose the right result.
+ // This does have to extract `length` times for every element but certainly not as bad as it could be.
+
+ // Which vector number is the index on.
+ Operand vecIndex = context.ShiftRightUI(selectedIndex, Const(3));
+ // What should we shift by to extract it.
+ Operand subVecIndexShift = context.ShiftLeft(context.BitwiseAnd(selectedIndex, Const(7)), Const(3));
+
+ for (int i = 0; i < length; i++)
{
- Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
- elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
+ (int qx, int ix) = tableTuples[i];
+ // Get the whole vector, we'll get a byte out of it.
+ Operand lookupResult;
+ if (qx == op.Qd)
+ {
+ // Result contains the current state of the vector.
+ lookupResult = context.VectorExtract(OperandType.I64, res, ix);
+ }
+ else
+ {
+ lookupResult = EmitVectorExtract32(context, qx, ix, 3, false); // I64
+ }
+
+ lookupResult = context.ShiftRightUI(lookupResult, subVecIndexShift); // Get the relevant byte from this vector.
+
+ if (i == 0)
+ {
+ elemRes = lookupResult; // First result is always default.
+ }
+ else
+ {
+ Operand isThisElem = context.ICompareEqual(vecIndex, Const(i));
+ elemRes = context.ConditionalSelect(isThisElem, lookupResult, elemRes);
+ }
}
- }
- Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L);
+ Operand fallback = (extension) ? context.ZeroExtend32(OperandType.I64, EmitVectorExtract32(context, op.Qd, index + op.Id, 0, false)) : Const(0L);
- res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0);
- }
+ res = EmitVectorInsert(context, res, context.ConditionalSelect(inRange, elemRes, fallback), index + op.Id, 0);
+ }
- context.Copy(GetVecA32(op.Qd), res);
+ context.Copy(GetVecA32(op.Qd), res);
+ }
}
public static void Vtrn(ArmEmitterContext context)
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
- int elems = op.GetBytesCount() >> op.Size;
- int pairs = elems >> 1;
+ if (Optimizations.UseSsse3)
+ {
+ EmitVectorShuffleOpSimd32(context, (m, d) =>
+ {
+ Operand mask = null;
- bool overlap = op.Qm == op.Qd;
+ if (op.Size < 3)
+ {
+ long maskE0 = EvenMasks[op.Size];
+ long maskE1 = OddMasks[op.Size];
+
+ mask = X86GetScalar(context, maskE0);
+
+ mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
+ }
+
+ if (op.Size < 3)
+ {
+ d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
+ m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
+ }
- Operand resD = GetVecA32(op.Qd);
- Operand resM = GetVecA32(op.Qm);
+ Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
+ Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
- for (int index = 0; index < pairs; index++)
+ return (resM, resD);
+ });
+ }
+ else
{
- int pairIndex = index << 1;
- Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
- Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
+ int elems = op.GetBytesCount() >> op.Size;
+ int pairs = elems >> 1;
- resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
+ bool overlap = op.Qm == op.Qd;
- if (overlap)
+ Operand resD = GetVecA32(op.Qd);
+ Operand resM = GetVecA32(op.Qm);
+
+ for (int index = 0; index < pairs; index++)
{
- resM = resD;
- }
+ int pairIndex = index << 1;
+ Operand d2 = EmitVectorExtract32(context, op.Qd, pairIndex + 1 + op.Id, op.Size, false);
+ Operand m1 = EmitVectorExtract32(context, op.Qm, pairIndex + op.Im, op.Size, false);
- resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size);
+ resD = EmitVectorInsert(context, resD, m1, pairIndex + 1 + op.Id, op.Size);
- if (overlap)
- {
- resD = resM;
+ if (overlap)
+ {
+ resM = resD;
+ }
+
+ resM = EmitVectorInsert(context, resM, d2, pairIndex + op.Im, op.Size);
+
+ if (overlap)
+ {
+ resD = resM;
+ }
}
- }
- context.Copy(GetVecA32(op.Qd), resD);
- if (!overlap)
- {
- context.Copy(GetVecA32(op.Qm), resM);
+ context.Copy(GetVecA32(op.Qd), resD);
+ if (!overlap)
+ {
+ context.Copy(GetVecA32(op.Qm), resM);
+ }
}
}
@@ -243,44 +346,68 @@ namespace ARMeilleure.Instructions
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
- int elems = op.GetBytesCount() >> op.Size;
- int pairs = elems >> 1;
-
- bool overlap = op.Qm == op.Qd;
+ if (Optimizations.UseSse2)
+ {
+ EmitVectorShuffleOpSimd32(context, (m, d) =>
+ {
+ if (op.RegisterSize == RegisterSize.Simd128)
+ {
+ Operand resD = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
+ Operand resM = context.AddIntrinsic(X86PunpckhInstruction[op.Size], d, m);
- Operand resD = GetVecA32(op.Qd);
- Operand resM = GetVecA32(op.Qm);
+ return (resM, resD);
+ }
+ else
+ {
+ Operand res = context.AddIntrinsic(X86PunpcklInstruction[op.Size], d, m);
- for (int index = 0; index < pairs; index++)
+ Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
+ Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
+ return (resM, resD);
+ }
+ });
+ }
+ else
{
- int pairIndex = index << 1;
- Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
- Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
+ int elems = op.GetBytesCount() >> op.Size;
+ int pairs = elems >> 1;
- Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
- Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
+ bool overlap = op.Qm == op.Qd;
- resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
- resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
+ Operand resD = GetVecA32(op.Qd);
+ Operand resM = GetVecA32(op.Qm);
- if (overlap)
+ for (int index = 0; index < pairs; index++)
{
- resM = resD;
- }
+ int pairIndex = index << 1;
+ Operand dRowD = EmitVectorExtract32(context, op.Qd, index + op.Id, op.Size, false);
+ Operand mRowD = EmitVectorExtract32(context, op.Qm, index + op.Im, op.Size, false);
- resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size);
- resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size);
+ Operand dRowM = EmitVectorExtract32(context, op.Qd, index + op.Id + pairs, op.Size, false);
+ Operand mRowM = EmitVectorExtract32(context, op.Qm, index + op.Im + pairs, op.Size, false);
- if (overlap)
- {
- resD = resM;
+ resD = EmitVectorInsert(context, resD, dRowD, pairIndex + op.Id, op.Size);
+ resD = EmitVectorInsert(context, resD, mRowD, pairIndex + 1 + op.Id, op.Size);
+
+ if (overlap)
+ {
+ resM = resD;
+ }
+
+ resM = EmitVectorInsert(context, resM, dRowM, pairIndex + op.Im, op.Size);
+ resM = EmitVectorInsert(context, resM, mRowM, pairIndex + 1 + op.Im, op.Size);
+
+ if (overlap)
+ {
+ resD = resM;
+ }
}
- }
- context.Copy(GetVecA32(op.Qd), resD);
- if (!overlap)
- {
- context.Copy(GetVecA32(op.Qm), resM);
+ context.Copy(GetVecA32(op.Qd), resD);
+ if (!overlap)
+ {
+ context.Copy(GetVecA32(op.Qm), resM);
+ }
}
}
@@ -288,49 +415,135 @@ namespace ARMeilleure.Instructions
{
OpCode32SimdCmpZ op = (OpCode32SimdCmpZ)context.CurrOp;
- int elems = op.GetBytesCount() >> op.Size;
- int pairs = elems >> 1;
+ if (Optimizations.UseSsse3)
+ {
+ EmitVectorShuffleOpSimd32(context, (m, d) =>
+ {
+ if (op.RegisterSize == RegisterSize.Simd128)
+ {
+ Operand mask = null;
+
+ if (op.Size < 3)
+ {
+ long maskE0 = EvenMasks[op.Size];
+ long maskE1 = OddMasks[op.Size];
- bool overlap = op.Qm == op.Qd;
+ mask = X86GetScalar(context, maskE0);
+ mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
+
+ d = context.AddIntrinsic(Intrinsic.X86Pshufb, d, mask);
+ m = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mask);
+ }
+
+ Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, d, m);
+ Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, d, m);
+
+ return (resM, resD);
+ }
+ else
+ {
+ Intrinsic punpcklInst = X86PunpcklInstruction[op.Size];
+
+ Operand res = context.AddIntrinsic(punpcklInst, d, m);
+
+ if (op.Size < 2)
+ {
+ long maskE0 = _masksE0_Uzp[op.Size];
+ long maskE1 = _masksE1_Uzp[op.Size];
+
+ Operand mask = X86GetScalar(context, maskE0);
+
+ mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3);
- Operand resD = GetVecA32(op.Qd);
- Operand resM = GetVecA32(op.Qm);
+ res = context.AddIntrinsic(Intrinsic.X86Pshufb, res, mask);
+ }
- for (int index = 0; index < elems; index++)
+ Operand resD = context.AddIntrinsic(Intrinsic.X86Punpcklqdq, res, context.VectorZero());
+ Operand resM = context.AddIntrinsic(Intrinsic.X86Punpckhqdq, res, context.VectorZero());
+
+ return (resM, resD);
+ }
+ });
+ }
+ else
{
- Operand dIns, mIns;
- if (index >= pairs)
- {
- int pind = index - pairs;
- dIns = EmitVectorExtract32(context, op.Qm, (pind << 1) + op.Im, op.Size, false);
- mIns = EmitVectorExtract32(context, op.Qm, ((pind << 1) | 1) + op.Im, op.Size, false);
- }
- else
- {
- dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
- mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
- }
+ int elems = op.GetBytesCount() >> op.Size;
+ int pairs = elems >> 1;
- resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
+ bool overlap = op.Qm == op.Qd;
- if (overlap)
+ Operand resD = GetVecA32(op.Qd);
+ Operand resM = GetVecA32(op.Qm);
+
+ for (int index = 0; index < elems; index++)
{
- resM = resD;
- }
+ Operand dIns, mIns;
+ if (index >= pairs)
+ {
+ int pairIndex = index - pairs;
+ dIns = EmitVectorExtract32(context, op.Qm, (pairIndex << 1) + op.Im, op.Size, false);
+ mIns = EmitVectorExtract32(context, op.Qm, ((pairIndex << 1) | 1) + op.Im, op.Size, false);
+ }
+ else
+ {
+ dIns = EmitVectorExtract32(context, op.Qd, (index << 1) + op.Id, op.Size, false);
+ mIns = EmitVectorExtract32(context, op.Qd, ((index << 1) | 1) + op.Id, op.Size, false);
+ }
+
+ resD = EmitVectorInsert(context, resD, dIns, index + op.Id, op.Size);
+
+ if (overlap)
+ {
+ resM = resD;
+ }
- resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
+ resM = EmitVectorInsert(context, resM, mIns, index + op.Im, op.Size);
- if (overlap)
+ if (overlap)
+ {
+ resD = resM;
+ }
+ }
+
+ context.Copy(GetVecA32(op.Qd), resD);
+ if (!overlap)
{
- resD = resM;
+ context.Copy(GetVecA32(op.Qm), resM);
}
}
+ }
+
+ public static void EmitVectorShuffleOpSimd32(ArmEmitterContext context, Func<Operand, Operand, (Operand, Operand)> shuffleFunc)
+ {
+ OpCode32Simd op = (OpCode32Simd)context.CurrOp;
+
+ Operand m = GetVecA32(op.Qm);
+ Operand d = GetVecA32(op.Qd);
+ Operand initialM = m;
+ Operand initialD = d;
+
+ if (!op.Q) // Register swap: move relevant doubleword to side 0, for consistency.
+ {
+ m = EmitMoveDoubleWordToSide(context, m, op.Vm, 0);
+ d = EmitMoveDoubleWordToSide(context, d, op.Vd, 0);
+ }
+
+ (Operand resM, Operand resD) = shuffleFunc(m, d);
+
+ bool overlap = op.Qm == op.Qd;
+
+ if (!op.Q) // Register insert.
+ {
+ resM = EmitDoubleWordInsert(context, initialM, EmitMoveDoubleWordToSide(context, resM, 0, op.Vm), op.Vm);
+ resD = EmitDoubleWordInsert(context, overlap ? resM : initialD, EmitMoveDoubleWordToSide(context, resD, 0, op.Vd), op.Vd);
+ }
- context.Copy(GetVecA32(op.Qd), resD);
if (!overlap)
{
- context.Copy(GetVecA32(op.Qm), resM);
+ context.Copy(initialM, resM);
}
+
+ context.Copy(initialD, resD);
}
}
}