diff options
| author | Wunk <wunkolo@gmail.com> | 2022-10-02 02:17:19 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2022-10-02 11:17:19 +0200 |
| commit | 45ce540b9b756f372840e923b73cfd7e3edd85f8 (patch) | |
| tree | 5908b97b09330f91c893c6c25a3a76519e8651de /ARMeilleure/Instructions/InstEmitSimdShift.cs | |
| parent | 96bf7f8522e38c36d792a6ac2173497c3674e920 (diff) | |
ARMeilleure: Add `gfni` acceleration (#3669)
* ARMeilleure: Add `GFNI` detection
This is intended for utilizing the `gf2p8affineqb` instruction
* ARMeilleure: Add `gf2p8affineqb`
Not using the VEX or EVEX-form of this instruction is intentional. There
are `GFNI`-chips that do not support AVX(so no VEX encoding) such as
Tremont(Lakefield) chips as well as Jasper Lake.
https://github.com/InstLatx64/InstLatx64/blob/13df339fe7150b114929f71b19a6b2fe72fc751e/GenuineIntel/GenuineIntel00806A1_Lakefield_LC_InstLatX64.txt#L1297-L1299
https://github.com/InstLatx64/InstLatx64/blob/13df339fe7150b114929f71b19a6b2fe72fc751e/GenuineIntel/GenuineIntel00906C0_JasperLake_InstLatX64.txt#L1252-L1254
* ARMeilleure: Add `gfni` acceleration of `Rbit_V`
Passes all `Rbit_V*` unit tests on my `i9-11900k`
* ARMeilleure: Add `gfni` acceleration of `S{l,r}i_V`
Also added a fast-path for when the shift amount is greater than the
size of the element.
* ARMeilleure: Add `gfni` acceleration of `Shl_V` and `Sshr_V`
* ARMeilleure: Increment InternalVersion
* ARMeilleure: Fix Intrinsic and Assembler Table alignment
`gf2p8affineqb` is the longest instruction name I know of. It shouldn't
get any wider than this.
* ARMeilleure: Remove SSE2+SHA requirement for GFNI
* ARMeilleure Add `X86GetGf2p8LogicalShiftLeft`
Used to generate GF(2^8) 8x8 bit-matrices for bit-shifting for the `gf2p8affineqb` instruction.
* ARMeilleure: Append `FeatureInfo7Ecx` to `FeatureInfo`
Diffstat (limited to 'ARMeilleure/Instructions/InstEmitSimdShift.cs')
| -rw-r--r-- | ARMeilleure/Instructions/InstEmitSimdShift.cs | 134 |
1 files changed, 129 insertions, 5 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdShift.cs b/ARMeilleure/Instructions/InstEmitSimdShift.cs index 146aeafa..cf3b51bd 100644 --- a/ARMeilleure/Instructions/InstEmitSimdShift.cs +++ b/ARMeilleure/Instructions/InstEmitSimdShift.cs @@ -88,8 +88,35 @@ namespace ARMeilleure.Instructions OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp; int shift = GetImmShl(op); + int eSize = 8 << op.Size; - if (Optimizations.UseSse2 && op.Size > 0) + if (shift >= eSize) + { + if ((op.RegisterSize == RegisterSize.Simd64)) + { + Operand res = context.VectorZeroUpper64(GetVec(op.Rd)); + + context.Copy(GetVec(op.Rd), res); + } + } + else if (Optimizations.UseGfni && op.Size == 0) + { + Operand n = GetVec(op.Rn); + + ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift); + + Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix); + + Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0)); + + if (op.RegisterSize == RegisterSize.Simd64) + { + res = context.VectorZeroUpper64(res); + } + + context.Copy(GetVec(op.Rd), res); + } + else if (Optimizations.UseSse2 && op.Size > 0) { Operand n = GetVec(op.Rn); @@ -396,10 +423,40 @@ namespace ARMeilleure.Instructions { OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp; - if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3) + int shift = GetImmShr(op); + + if (Optimizations.UseGfni && op.Size == 0) { - int shift = GetImmShr(op); + Operand n = GetVec(op.Rn); + ulong bitMatrix; + + if (shift < 8) + { + bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift); + + // Extend sign-bit + bitMatrix |= 0x8080808080808080UL >> (64 - shift * 8); + } + else + { + // Replicate sign-bit into all bits + bitMatrix = 0x8080808080808080UL; + } + + Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix); + + Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0)); + + if (op.RegisterSize == RegisterSize.Simd64) + { + res = context.VectorZeroUpper64(res); + } + + context.Copy(GetVec(op.Rd), res); + } + else if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3) + { Operand n = GetVec(op.Rn); Intrinsic sraInst = X86PsraInstruction[op.Size]; @@ -929,10 +986,44 @@ namespace ARMeilleure.Instructions OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp; int shift = GetImmShl(op); + int eSize = 8 << op.Size; ulong mask = shift != 0 ? ulong.MaxValue >> (64 - shift) : 0UL; - if (Optimizations.UseSse2 && op.Size > 0) + if (shift >= eSize) + { + if ((op.RegisterSize == RegisterSize.Simd64) || scalar) + { + Operand res = context.VectorZeroUpper64(GetVec(op.Rd)); + + context.Copy(GetVec(op.Rd), res); + } + } + else if (Optimizations.UseGfni && op.Size == 0) + { + Operand d = GetVec(op.Rd); + Operand n = GetVec(op.Rn); + + ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift); + + Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix); + + Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0)); + + Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]); + + Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask); + + Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked); + + if ((op.RegisterSize == RegisterSize.Simd64) || scalar) + { + res = context.VectorZeroUpper64(res); + } + + context.Copy(d, res); + } + else if (Optimizations.UseSse2 && op.Size > 0) { Operand d = GetVec(op.Rd); Operand n = GetVec(op.Rn); @@ -988,7 +1079,40 @@ namespace ARMeilleure.Instructions ulong mask = (ulong.MaxValue << (eSize - shift)) & (ulong.MaxValue >> (64 - eSize)); - if (Optimizations.UseSse2 && op.Size > 0) + if (shift >= eSize) + { + if ((op.RegisterSize == RegisterSize.Simd64) || scalar) + { + Operand res = context.VectorZeroUpper64(GetVec(op.Rd)); + + context.Copy(GetVec(op.Rd), res); + } + } + else if (Optimizations.UseGfni && op.Size == 0) + { + Operand d = GetVec(op.Rd); + Operand n = GetVec(op.Rn); + + ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift); + + Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix); + + Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0)); + + Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]); + + Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask); + + Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked); + + if ((op.RegisterSize == RegisterSize.Simd64) || scalar) + { + res = context.VectorZeroUpper64(res); + } + + context.Copy(d, res); + } + else if (Optimizations.UseSse2 && op.Size > 0) { Operand d = GetVec(op.Rd); Operand n = GetVec(op.Rn); |
