aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/Instructions
diff options
context:
space:
mode:
authorValentin PONS <valx76@gmail.com>2020-07-19 14:11:58 -0400
committerGitHub <noreply@github.com>2020-07-19 15:11:58 -0300
commit3af2ce74ecf76cc8d6fdb9ff19101bfee47af7dd (patch)
treeeae6186fb83ae83da9541a984f6d74b394cda454 /ARMeilleure/Instructions
parent9d65de74fcc19a0e088a53e5fa92710d919028cd (diff)
Implements some 32-bit instructions (VBIC, VTST, VSRA) (#1192)
* Added some 32 bits instructions: * VBIC * VTST * VSRA * Incremented the PTC * Add tests and fix implementation * Fixed VBIC immediate opcode mapping * Hey hey! * Nit. Co-authored-by: gdkchan <gab.dark.100@gmail.com> Co-authored-by: LDj3SNuD <dvitiello@gmail.com> Co-authored-by: LDj3SNuD <35856442+LDj3SNuD@users.noreply.github.com>
Diffstat (limited to 'ARMeilleure/Instructions')
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdHelper32.cs31
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdLogical32.cs63
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdShift32.cs21
-rw-r--r--ARMeilleure/Instructions/InstName.cs3
4 files changed, 114 insertions, 4 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdHelper32.cs b/ARMeilleure/Instructions/InstEmitSimdHelper32.cs
index 9753af66..e045c601 100644
--- a/ARMeilleure/Instructions/InstEmitSimdHelper32.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdHelper32.cs
@@ -1,4 +1,4 @@
-using ARMeilleure.Decoders;
+using ARMeilleure.Decoders;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation;
using System;
@@ -305,6 +305,35 @@ namespace ARMeilleure.Instructions
context.Copy(GetVecA32(op.Qd), res);
}
+ public static void EmitVectorImmBinaryQdQmOpZx32(ArmEmitterContext context, Func2I emit)
+ {
+ EmitVectorImmBinaryQdQmOpI32(context, emit, false);
+ }
+
+ public static void EmitVectorImmBinaryQdQmOpSx32(ArmEmitterContext context, Func2I emit)
+ {
+ EmitVectorImmBinaryQdQmOpI32(context, emit, true);
+ }
+
+ public static void EmitVectorImmBinaryQdQmOpI32(ArmEmitterContext context, Func2I emit, bool signed)
+ {
+ OpCode32SimdShImm op = (OpCode32SimdShImm)context.CurrOp;
+
+ Operand res = GetVecA32(op.Qd);
+
+ int elems = op.GetBytesCount() >> op.Size;
+
+ for (int index = 0; index < elems; index++)
+ {
+ Operand de = EmitVectorExtract32(context, op.Qd, op.Id + index, op.Size, signed);
+ Operand me = EmitVectorExtract32(context, op.Qm, op.Im + index, op.Size, signed);
+
+ res = EmitVectorInsert(context, res, emit(de, me), op.Id + index, op.Size);
+ }
+
+ context.Copy(GetVecA32(op.Qd), res);
+ }
+
public static void EmitVectorTernaryLongOpI32(ArmEmitterContext context, Func3I emit, bool signed)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
diff --git a/ARMeilleure/Instructions/InstEmitSimdLogical32.cs b/ARMeilleure/Instructions/InstEmitSimdLogical32.cs
index 6505e834..2d6bf481 100644
--- a/ARMeilleure/Instructions/InstEmitSimdLogical32.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdLogical32.cs
@@ -15,7 +15,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse2)
{
- EmitVectorBinaryOpF32(context, Intrinsic.X86Pand, Intrinsic.X86Pand);
+ EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pand, n, m));
}
else
{
@@ -23,6 +23,54 @@ namespace ARMeilleure.Instructions
}
}
+ public static void Vbic_I(ArmEmitterContext context)
+ {
+ if (Optimizations.UseSse2)
+ {
+ EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pandn, m, n));
+ }
+ else
+ {
+ EmitVectorBinaryOpZx32(context, (op1, op2) => context.BitwiseAnd(op1, context.BitwiseNot(op2)));
+ }
+ }
+
+ public static void Vbic_II(ArmEmitterContext context)
+ {
+ OpCode32SimdImm op = (OpCode32SimdImm)context.CurrOp;
+
+ long immediate = op.Immediate;
+
+ // Replicate fields to fill the 64-bits, if size is < 64-bits.
+ switch (op.Size)
+ {
+ case 0: immediate *= 0x0101010101010101L; break;
+ case 1: immediate *= 0x0001000100010001L; break;
+ case 2: immediate *= 0x0000000100000001L; break;
+ }
+
+ Operand imm = Const(immediate);
+ Operand res = GetVecA32(op.Qd);
+
+ if (op.Q)
+ {
+ for (int elem = 0; elem < 2; elem++)
+ {
+ Operand de = EmitVectorExtractZx(context, op.Qd, elem, 3);
+
+ res = EmitVectorInsert(context, res, context.BitwiseAnd(de, context.BitwiseNot(imm)), elem, 3);
+ }
+ }
+ else
+ {
+ Operand de = EmitVectorExtractZx(context, op.Qd, op.Vd & 1, 3);
+
+ res = EmitVectorInsert(context, res, context.BitwiseAnd(de, context.BitwiseNot(imm)), op.Vd & 1, 3);
+ }
+
+ context.Copy(GetVecA32(op.Qd), res);
+ }
+
public static void Vbif(ArmEmitterContext context)
{
EmitBifBit(context, true);
@@ -59,7 +107,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse2)
{
- EmitVectorBinaryOpF32(context, Intrinsic.X86Pxor, Intrinsic.X86Pxor);
+ EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Pxor, n, m));
}
else
{
@@ -71,7 +119,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse2)
{
- EmitVectorBinaryOpF32(context, Intrinsic.X86Por, Intrinsic.X86Por);
+ EmitVectorBinaryOpSimd32(context, (n, m) => context.AddIntrinsic(Intrinsic.X86Por, n, m));
}
else
{
@@ -115,6 +163,15 @@ namespace ARMeilleure.Instructions
context.Copy(GetVecA32(op.Qd), res);
}
+ public static void Vtst(ArmEmitterContext context)
+ {
+ EmitVectorBinaryOpZx32(context, (op1, op2) =>
+ {
+ Operand isZero = context.ICompareEqual(context.BitwiseAnd(op1, op2), Const(0));
+ return context.ConditionalSelect(isZero, Const(0), Const(-1));
+ });
+ }
+
private static void EmitBifBit(ArmEmitterContext context, bool notRm)
{
OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
diff --git a/ARMeilleure/Instructions/InstEmitSimdShift32.cs b/ARMeilleure/Instructions/InstEmitSimdShift32.cs
index b9055c30..215fe1e8 100644
--- a/ARMeilleure/Instructions/InstEmitSimdShift32.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdShift32.cs
@@ -129,6 +129,27 @@ namespace ARMeilleure.Instructions
EmitVectorUnaryNarrowOp32(context, (op1) => context.ShiftRightUI(op1, Const(shift)));
}
+ public static void Vsra(ArmEmitterContext context)
+ {
+ OpCode32SimdShImm op = (OpCode32SimdShImm)context.CurrOp;
+ int shift = GetImmShr(op);
+ int maxShift = (8 << op.Size) - 1;
+
+ if (op.U)
+ {
+ EmitVectorImmBinaryQdQmOpZx32(context, (op1, op2) =>
+ {
+ Operand shiftRes = shift > maxShift ? Const(op2.Type, 0) : context.ShiftRightUI(op2, Const(shift));
+
+ return context.Add(op1, shiftRes);
+ });
+ }
+ else
+ {
+ EmitVectorImmBinaryQdQmOpSx32(context, (op1, op2) => context.Add(op1, context.ShiftRightSI(op2, Const(Math.Min(maxShift, shift)))));
+ }
+ }
+
private static Operand EmitShlRegOp(ArmEmitterContext context, Operand op, Operand shiftLsB, int size, bool unsigned)
{
if (shiftLsB.Type == OperandType.I64)
diff --git a/ARMeilleure/Instructions/InstName.cs b/ARMeilleure/Instructions/InstName.cs
index 28041874..d7283029 100644
--- a/ARMeilleure/Instructions/InstName.cs
+++ b/ARMeilleure/Instructions/InstName.cs
@@ -547,6 +547,7 @@ namespace ARMeilleure.Instructions
Vadd,
Vaddw,
Vand,
+ Vbic,
Vbif,
Vbit,
Vbsl,
@@ -611,10 +612,12 @@ namespace ARMeilleure.Instructions
Vrecps,
Vrsqrte,
Vrsqrts,
+ Vsra,
Vsub,
Vsubw,
Vtbl,
Vtrn,
+ Vtst,
Vuzp,
Vzip,
}