diff options
| author | TSRBerry <20988865+TSRBerry@users.noreply.github.com> | 2023-06-28 01:18:19 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-06-28 01:18:19 +0200 |
| commit | fbaf62c2309f2987fa73a2022167ee3e81e31ea9 (patch) | |
| tree | 9d2ef3298843b551a544acc429f2269f101af935 /src/ARMeilleure/Instructions | |
| parent | b186ec9fc5684bd8fa831a1777f6e936b897c352 (diff) | |
Apply new naming rule to all projects except Vp9 (#5407)
Diffstat (limited to 'src/ARMeilleure/Instructions')
| -rw-r--r-- | src/ARMeilleure/Instructions/InstEmitSimdHelper.cs | 32 | ||||
| -rw-r--r-- | src/ARMeilleure/Instructions/InstEmitSimdLogical.cs | 52 | ||||
| -rw-r--r-- | src/ARMeilleure/Instructions/SoftFloat.cs | 66 |
3 files changed, 75 insertions, 75 deletions
diff --git a/src/ARMeilleure/Instructions/InstEmitSimdHelper.cs b/src/ARMeilleure/Instructions/InstEmitSimdHelper.cs index 35052ad1..0b552740 100644 --- a/src/ARMeilleure/Instructions/InstEmitSimdHelper.cs +++ b/src/ARMeilleure/Instructions/InstEmitSimdHelper.cs @@ -1299,17 +1299,17 @@ namespace ARMeilleure.Instructions Debug.Assert((op.Size & 1) == 0 && op.RegisterSize == RegisterSize.Simd128); - const int sm0 = 0 << 6 | 0 << 4 | 0 << 2 | 0 << 0; - const int sm1 = 1 << 6 | 1 << 4 | 1 << 2 | 1 << 0; - const int sm2 = 2 << 6 | 2 << 4 | 2 << 2 | 2 << 0; - const int sm3 = 3 << 6 | 3 << 4 | 3 << 2 | 3 << 0; + const int SM0 = 0 << 6 | 0 << 4 | 0 << 2 | 0 << 0; + const int SM1 = 1 << 6 | 1 << 4 | 1 << 2 | 1 << 0; + const int SM2 = 2 << 6 | 2 << 4 | 2 << 2 | 2 << 0; + const int SM3 = 3 << 6 | 3 << 4 | 3 << 2 | 3 << 0; Operand nCopy = context.Copy(GetVec(op.Rn)); - Operand part0 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(sm0)); - Operand part1 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(sm1)); - Operand part2 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(sm2)); - Operand part3 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(sm3)); + Operand part0 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(SM0)); + Operand part1 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(SM1)); + Operand part2 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(SM2)); + Operand part3 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, nCopy, Const(SM3)); Operand res = emit(emit(part0, part1), emit(part2, part3)); @@ -1340,13 +1340,13 @@ namespace ARMeilleure.Instructions if ((op.Size & 1) == 0) { - const int sm0 = 2 << 6 | 2 << 4 | 2 << 2 | 0 << 0; - const int sm1 = 2 << 6 | 2 << 4 | 2 << 2 | 1 << 0; + const int SM0 = 2 << 6 | 2 << 4 | 2 << 2 | 0 << 0; + const int SM1 = 2 << 6 | 2 << 4 | 2 << 2 | 1 << 0; Operand zeroN = context.VectorZeroUpper64(n); - op0 = context.AddIntrinsic(Intrinsic.X86Pshufd, zeroN, Const(sm0)); - op1 = context.AddIntrinsic(Intrinsic.X86Pshufd, zeroN, Const(sm1)); + op0 = context.AddIntrinsic(Intrinsic.X86Pshufd, zeroN, Const(SM0)); + op1 = context.AddIntrinsic(Intrinsic.X86Pshufd, zeroN, Const(SM1)); } else /* if ((op.Size & 1) == 1) */ { @@ -1412,11 +1412,11 @@ namespace ARMeilleure.Instructions } else /* if (op.RegisterSize == RegisterSize.Simd128) */ { - const int sm0 = 2 << 6 | 0 << 4 | 2 << 2 | 0 << 0; - const int sm1 = 3 << 6 | 1 << 4 | 3 << 2 | 1 << 0; + const int SM0 = 2 << 6 | 0 << 4 | 2 << 2 | 0 << 0; + const int SM1 = 3 << 6 | 1 << 4 | 3 << 2 | 1 << 0; - Operand part0 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, mCopy, Const(sm0)); - Operand part1 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, mCopy, Const(sm1)); + Operand part0 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, mCopy, Const(SM0)); + Operand part1 = context.AddIntrinsic(Intrinsic.X86Shufps, nCopy, mCopy, Const(SM1)); context.Copy(GetVec(op.Rd), emit(part0, part1)); } diff --git a/src/ARMeilleure/Instructions/InstEmitSimdLogical.cs b/src/ARMeilleure/Instructions/InstEmitSimdLogical.cs index 97e3da67..ace8e4c5 100644 --- a/src/ARMeilleure/Instructions/InstEmitSimdLogical.cs +++ b/src/ARMeilleure/Instructions/InstEmitSimdLogical.cs @@ -408,7 +408,7 @@ namespace ARMeilleure.Instructions if (Optimizations.UseGfni) { - const long bitMatrix = + const long BitMatrix = (0b10000000L << 56) | (0b01000000L << 48) | (0b00100000L << 40) | @@ -418,7 +418,7 @@ namespace ARMeilleure.Instructions (0b00000010L << 8) | (0b00000001L << 0); - Operand vBitMatrix = X86GetAllElements(context, bitMatrix); + Operand vBitMatrix = X86GetAllElements(context, BitMatrix); Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, GetVec(op.Rn), vBitMatrix, Const(0)); @@ -469,12 +469,12 @@ namespace ARMeilleure.Instructions Operand n = GetVec(op.Rn); - const long maskE0 = 06L << 56 | 07L << 48 | 04L << 40 | 05L << 32 | 02L << 24 | 03L << 16 | 00L << 8 | 01L << 0; - const long maskE1 = 14L << 56 | 15L << 48 | 12L << 40 | 13L << 32 | 10L << 24 | 11L << 16 | 08L << 8 | 09L << 0; + const long MaskE0 = 06L << 56 | 07L << 48 | 04L << 40 | 05L << 32 | 02L << 24 | 03L << 16 | 00L << 8 | 01L << 0; + const long MaskE1 = 14L << 56 | 15L << 48 | 12L << 40 | 13L << 32 | 10L << 24 | 11L << 16 | 08L << 8 | 09L << 0; - Operand mask = X86GetScalar(context, maskE0); + Operand mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); Operand res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mask); @@ -503,21 +503,21 @@ namespace ARMeilleure.Instructions if (op.Size == 0) { - const long maskE0 = 04L << 56 | 05L << 48 | 06L << 40 | 07L << 32 | 00L << 24 | 01L << 16 | 02L << 8 | 03L << 0; - const long maskE1 = 12L << 56 | 13L << 48 | 14L << 40 | 15L << 32 | 08L << 24 | 09L << 16 | 10L << 8 | 11L << 0; + const long MaskE0 = 04L << 56 | 05L << 48 | 06L << 40 | 07L << 32 | 00L << 24 | 01L << 16 | 02L << 8 | 03L << 0; + const long MaskE1 = 12L << 56 | 13L << 48 | 14L << 40 | 15L << 32 | 08L << 24 | 09L << 16 | 10L << 8 | 11L << 0; - mask = X86GetScalar(context, maskE0); + mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); } else /* if (op.Size == 1) */ { - const long maskE0 = 05L << 56 | 04L << 48 | 07L << 40 | 06L << 32 | 01L << 24 | 00L << 16 | 03L << 8 | 02L << 0; - const long maskE1 = 13L << 56 | 12L << 48 | 15L << 40 | 14L << 32 | 09L << 24 | 08L << 16 | 11L << 8 | 10L << 0; + const long MaskE0 = 05L << 56 | 04L << 48 | 07L << 40 | 06L << 32 | 01L << 24 | 00L << 16 | 03L << 8 | 02L << 0; + const long MaskE1 = 13L << 56 | 12L << 48 | 15L << 40 | 14L << 32 | 09L << 24 | 08L << 16 | 11L << 8 | 10L << 0; - mask = X86GetScalar(context, maskE0); + mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); } Operand res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mask); @@ -547,30 +547,30 @@ namespace ARMeilleure.Instructions if (op.Size == 0) { - const long maskE0 = 00L << 56 | 01L << 48 | 02L << 40 | 03L << 32 | 04L << 24 | 05L << 16 | 06L << 8 | 07L << 0; - const long maskE1 = 08L << 56 | 09L << 48 | 10L << 40 | 11L << 32 | 12L << 24 | 13L << 16 | 14L << 8 | 15L << 0; + const long MaskE0 = 00L << 56 | 01L << 48 | 02L << 40 | 03L << 32 | 04L << 24 | 05L << 16 | 06L << 8 | 07L << 0; + const long MaskE1 = 08L << 56 | 09L << 48 | 10L << 40 | 11L << 32 | 12L << 24 | 13L << 16 | 14L << 8 | 15L << 0; - mask = X86GetScalar(context, maskE0); + mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); } else if (op.Size == 1) { - const long maskE0 = 01L << 56 | 00L << 48 | 03L << 40 | 02L << 32 | 05L << 24 | 04L << 16 | 07L << 8 | 06L << 0; - const long maskE1 = 09L << 56 | 08L << 48 | 11L << 40 | 10L << 32 | 13L << 24 | 12L << 16 | 15L << 8 | 14L << 0; + const long MaskE0 = 01L << 56 | 00L << 48 | 03L << 40 | 02L << 32 | 05L << 24 | 04L << 16 | 07L << 8 | 06L << 0; + const long MaskE1 = 09L << 56 | 08L << 48 | 11L << 40 | 10L << 32 | 13L << 24 | 12L << 16 | 15L << 8 | 14L << 0; - mask = X86GetScalar(context, maskE0); + mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); } else /* if (op.Size == 2) */ { - const long maskE0 = 03L << 56 | 02L << 48 | 01L << 40 | 00L << 32 | 07L << 24 | 06L << 16 | 05L << 8 | 04L << 0; - const long maskE1 = 11L << 56 | 10L << 48 | 09L << 40 | 08L << 32 | 15L << 24 | 14L << 16 | 13L << 8 | 12L << 0; + const long MaskE0 = 03L << 56 | 02L << 48 | 01L << 40 | 00L << 32 | 07L << 24 | 06L << 16 | 05L << 8 | 04L << 0; + const long MaskE1 = 11L << 56 | 10L << 48 | 09L << 40 | 08L << 32 | 15L << 24 | 14L << 16 | 13L << 8 | 12L << 0; - mask = X86GetScalar(context, maskE0); + mask = X86GetScalar(context, MaskE0); - mask = EmitVectorInsert(context, mask, Const(maskE1), 1, 3); + mask = EmitVectorInsert(context, mask, Const(MaskE1), 1, 3); } Operand res = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mask); diff --git a/src/ARMeilleure/Instructions/SoftFloat.cs b/src/ARMeilleure/Instructions/SoftFloat.cs index e8f44ce3..05975d04 100644 --- a/src/ARMeilleure/Instructions/SoftFloat.cs +++ b/src/ARMeilleure/Instructions/SoftFloat.cs @@ -175,10 +175,10 @@ namespace ARMeilleure.Instructions public static ushort FPRoundCv(double real, ExecutionContext context) { - const int minimumExp = -14; + const int MinimumExp = -14; - const int e = 5; - const int f = 10; + const int E = 5; + const int F = 10; bool sign; double mantissa; @@ -208,15 +208,15 @@ namespace ARMeilleure.Instructions exponent++; } - uint biasedExp = (uint)Math.Max(exponent - minimumExp + 1, 0); + uint biasedExp = (uint)Math.Max(exponent - MinimumExp + 1, 0); if (biasedExp == 0u) { - mantissa /= Math.Pow(2d, minimumExp - exponent); + mantissa /= Math.Pow(2d, MinimumExp - exponent); } - uint intMant = (uint)Math.Floor(mantissa * Math.Pow(2d, f)); - double error = mantissa * Math.Pow(2d, f) - (double)intMant; + uint intMant = (uint)Math.Floor(mantissa * Math.Pow(2d, F)); + double error = mantissa * Math.Pow(2d, F) - (double)intMant; if (biasedExp == 0u && (error != 0d || (context.Fpcr & FPCR.Ufe) != 0)) { @@ -256,12 +256,12 @@ namespace ARMeilleure.Instructions { intMant++; - if (intMant == 1u << f) + if (intMant == 1u << F) { biasedExp = 1u; } - if (intMant == 1u << (f + 1)) + if (intMant == 1u << (F + 1)) { biasedExp++; intMant >>= 1; @@ -272,7 +272,7 @@ namespace ARMeilleure.Instructions if ((context.Fpcr & FPCR.Ahp) == 0) { - if (biasedExp >= (1u << e) - 1u) + if (biasedExp >= (1u << E) - 1u) { resultBits = overflowToInf ? FPInfinity(sign) : FPMaxNormal(sign); @@ -287,7 +287,7 @@ namespace ARMeilleure.Instructions } else { - if (biasedExp >= 1u << e) + if (biasedExp >= 1u << E) { resultBits = (ushort)((sign ? 1u : 0u) << 15 | 0x7FFFu); @@ -354,10 +354,10 @@ namespace ARMeilleure.Instructions private static float FPRoundCv(double real, ExecutionContext context) { - const int minimumExp = -126; + const int MinimumExp = -126; - const int e = 8; - const int f = 23; + const int E = 8; + const int F = 23; bool sign; double mantissa; @@ -387,22 +387,22 @@ namespace ARMeilleure.Instructions exponent++; } - if ((context.Fpcr & FPCR.Fz) != 0 && exponent < minimumExp) + if ((context.Fpcr & FPCR.Fz) != 0 && exponent < MinimumExp) { context.Fpsr |= FPSR.Ufc; return SoftFloat32.FPZero(sign); } - uint biasedExp = (uint)Math.Max(exponent - minimumExp + 1, 0); + uint biasedExp = (uint)Math.Max(exponent - MinimumExp + 1, 0); if (biasedExp == 0u) { - mantissa /= Math.Pow(2d, minimumExp - exponent); + mantissa /= Math.Pow(2d, MinimumExp - exponent); } - uint intMant = (uint)Math.Floor(mantissa * Math.Pow(2d, f)); - double error = mantissa * Math.Pow(2d, f) - (double)intMant; + uint intMant = (uint)Math.Floor(mantissa * Math.Pow(2d, F)); + double error = mantissa * Math.Pow(2d, F) - (double)intMant; if (biasedExp == 0u && (error != 0d || (context.Fpcr & FPCR.Ufe) != 0)) { @@ -442,12 +442,12 @@ namespace ARMeilleure.Instructions { intMant++; - if (intMant == 1u << f) + if (intMant == 1u << F) { biasedExp = 1u; } - if (intMant == 1u << (f + 1)) + if (intMant == 1u << (F + 1)) { biasedExp++; intMant >>= 1; @@ -456,7 +456,7 @@ namespace ARMeilleure.Instructions float result; - if (biasedExp >= (1u << e) - 1u) + if (biasedExp >= (1u << E) - 1u) { result = overflowToInf ? SoftFloat32.FPInfinity(sign) : SoftFloat32.FPMaxNormal(sign); @@ -529,10 +529,10 @@ namespace ARMeilleure.Instructions private static double FPRoundCv(double real, ExecutionContext context) { - const int minimumExp = -1022; + const int MinimumExp = -1022; - const int e = 11; - const int f = 52; + const int E = 11; + const int F = 52; bool sign; double mantissa; @@ -562,22 +562,22 @@ namespace ARMeilleure.Instructions exponent++; } - if ((context.Fpcr & FPCR.Fz) != 0 && exponent < minimumExp) + if ((context.Fpcr & FPCR.Fz) != 0 && exponent < MinimumExp) { context.Fpsr |= FPSR.Ufc; return SoftFloat64.FPZero(sign); } - uint biasedExp = (uint)Math.Max(exponent - minimumExp + 1, 0); + uint biasedExp = (uint)Math.Max(exponent - MinimumExp + 1, 0); if (biasedExp == 0u) { - mantissa /= Math.Pow(2d, minimumExp - exponent); + mantissa /= Math.Pow(2d, MinimumExp - exponent); } - ulong intMant = (ulong)Math.Floor(mantissa * Math.Pow(2d, f)); - double error = mantissa * Math.Pow(2d, f) - (double)intMant; + ulong intMant = (ulong)Math.Floor(mantissa * Math.Pow(2d, F)); + double error = mantissa * Math.Pow(2d, F) - (double)intMant; if (biasedExp == 0u && (error != 0d || (context.Fpcr & FPCR.Ufe) != 0)) { @@ -617,12 +617,12 @@ namespace ARMeilleure.Instructions { intMant++; - if (intMant == 1ul << f) + if (intMant == 1ul << F) { biasedExp = 1u; } - if (intMant == 1ul << (f + 1)) + if (intMant == 1ul << (F + 1)) { biasedExp++; intMant >>= 1; @@ -631,7 +631,7 @@ namespace ARMeilleure.Instructions double result; - if (biasedExp >= (1u << e) - 1u) + if (biasedExp >= (1u << E) - 1u) { result = overflowToInf ? SoftFloat64.FPInfinity(sign) : SoftFloat64.FPMaxNormal(sign); |
