diff options
| author | gdkchan <gab.dark.100@gmail.com> | 2020-03-10 02:17:30 -0300 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2020-03-10 16:17:30 +1100 |
| commit | 89ccec197ec9a5db2bb308ef3e9178910d1ab7a8 (patch) | |
| tree | 3f487a86d3495feefd904d4cd7195d9c798c008b /ARMeilleure/Instructions | |
| parent | 08c0e3829bc96932d386de18647bde2768fe26ed (diff) | |
Implement VMOVL and VORR.I32 AArch32 SIMD instructions (#960)
* Implement VMOVL and VORR.I32 AArch32 SIMD instructions
* Rename <dt> to <size> on test description
* Rename Widen to Long and improve VMOVL implementation a bit
Diffstat (limited to 'ARMeilleure/Instructions')
| -rw-r--r-- | ARMeilleure/Instructions/InstEmitSimdLogical32.cs | 39 | ||||
| -rw-r--r-- | ARMeilleure/Instructions/InstEmitSimdMove32.cs | 30 | ||||
| -rw-r--r-- | ARMeilleure/Instructions/InstName.cs | 4 |
3 files changed, 72 insertions, 1 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdLogical32.cs b/ARMeilleure/Instructions/InstEmitSimdLogical32.cs index fef40a17..3698f332 100644 --- a/ARMeilleure/Instructions/InstEmitSimdLogical32.cs +++ b/ARMeilleure/Instructions/InstEmitSimdLogical32.cs @@ -2,7 +2,10 @@ using ARMeilleure.IntermediateRepresentation; using ARMeilleure.Translation; +using static ARMeilleure.Instructions.InstEmitHelper; +using static ARMeilleure.Instructions.InstEmitSimdHelper; using static ARMeilleure.Instructions.InstEmitSimdHelper32; +using static ARMeilleure.IntermediateRepresentation.OperandHelper; namespace ARMeilleure.Instructions { @@ -64,6 +67,42 @@ namespace ARMeilleure.Instructions } } + public static void Vorr_II(ArmEmitterContext context) + { + OpCode32SimdImm op = (OpCode32SimdImm)context.CurrOp; + + long immediate = op.Immediate; + + // Replicate fields to fill the 64-bits, if size is < 64-bits. + switch (op.Size) + { + case 0: immediate *= 0x0101010101010101L; break; + case 1: immediate *= 0x0001000100010001L; break; + case 2: immediate *= 0x0000000100000001L; break; + } + + Operand imm = Const(immediate); + Operand res = GetVecA32(op.Qd); + + if (op.Q) + { + for (int elem = 0; elem < 2; elem++) + { + Operand de = EmitVectorExtractZx(context, op.Qd, elem, 3); + + res = EmitVectorInsert(context, res, context.BitwiseOr(de, imm), elem, 3); + } + } + else + { + Operand de = EmitVectorExtractZx(context, op.Qd, op.Vd & 1, 3); + + res = EmitVectorInsert(context, res, context.BitwiseOr(de, imm), op.Vd & 1, 3); + } + + context.Copy(GetVecA32(op.Qd), res); + } + private static void EmitBifBit(ArmEmitterContext context, bool notRm) { OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp; diff --git a/ARMeilleure/Instructions/InstEmitSimdMove32.cs b/ARMeilleure/Instructions/InstEmitSimdMove32.cs index 17ff66b0..f11f9cc5 100644 --- a/ARMeilleure/Instructions/InstEmitSimdMove32.cs +++ b/ARMeilleure/Instructions/InstEmitSimdMove32.cs @@ -139,6 +139,36 @@ namespace ARMeilleure.Instructions } } + public static void Vmovl(ArmEmitterContext context) + { + OpCode32SimdLong op = (OpCode32SimdLong)context.CurrOp; + + Operand res = context.VectorZero(); + + int elems = op.GetBytesCount() >> op.Size; + + for (int index = 0; index < elems; index++) + { + Operand me = EmitVectorExtract32(context, op.Qm, op.Im + index, op.Size, !op.U); + + if (op.Size == 2) + { + if (op.U) + { + me = context.ZeroExtend32(OperandType.I64, me); + } + else + { + me = context.SignExtend32(OperandType.I64, me); + } + } + + res = EmitVectorInsert(context, res, me, index, op.Size + 1); + } + + context.Copy(GetVecA32(op.Qd), res); + } + public static void Vtbl(ArmEmitterContext context) { OpCode32SimdTbl op = (OpCode32SimdTbl)context.CurrOp; diff --git a/ARMeilleure/Instructions/InstName.cs b/ARMeilleure/Instructions/InstName.cs index 049c956d..69969e9f 100644 --- a/ARMeilleure/Instructions/InstName.cs +++ b/ARMeilleure/Instructions/InstName.cs @@ -81,7 +81,7 @@ namespace ARMeilleure.Instructions Sdiv, Smaddl, Smsubl, - Smul__, + Smulh, Smull, Smulw_, Ssat, @@ -500,6 +500,7 @@ namespace ARMeilleure.Instructions Smlaw_, Smmla, Smmls, + Smul__, Smmul, Stl, Stlb, @@ -560,6 +561,7 @@ namespace ARMeilleure.Instructions Vmla, Vmls, Vmov, + Vmovl, Vmovn, Vmrs, Vmsr, |
