mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-10-01 12:30:00 +02:00
parent
d562ba37a0
commit
8c85bdf2ed
16 changed files with 450 additions and 163 deletions
|
@ -75,6 +75,10 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
Add(X86Instruction.And, new InstructionInfo(0x00000021, 0x04000083, 0x04000081, BadOp, 0x00000023, InstructionFlags.None));
|
Add(X86Instruction.And, new InstructionInfo(0x00000021, 0x04000083, 0x04000081, BadOp, 0x00000023, InstructionFlags.None));
|
||||||
Add(X86Instruction.Andnpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
Add(X86Instruction.Andnpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
Add(X86Instruction.Andnps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex));
|
Add(X86Instruction.Andnps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex));
|
||||||
|
Add(X86Instruction.Andpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f54, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
|
Add(X86Instruction.Andps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f54, InstructionFlags.Vex));
|
||||||
|
Add(X86Instruction.Blendvpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3815, InstructionFlags.Prefix66));
|
||||||
|
Add(X86Instruction.Blendvps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3814, InstructionFlags.Prefix66));
|
||||||
Add(X86Instruction.Bsr, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fbd, InstructionFlags.None));
|
Add(X86Instruction.Bsr, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fbd, InstructionFlags.None));
|
||||||
Add(X86Instruction.Bswap, new InstructionInfo(0x00000fc8, BadOp, BadOp, BadOp, BadOp, InstructionFlags.RegOnly));
|
Add(X86Instruction.Bswap, new InstructionInfo(0x00000fc8, BadOp, BadOp, BadOp, BadOp, InstructionFlags.RegOnly));
|
||||||
Add(X86Instruction.Call, new InstructionInfo(0x020000ff, BadOp, BadOp, BadOp, BadOp, InstructionFlags.None));
|
Add(X86Instruction.Call, new InstructionInfo(0x020000ff, BadOp, BadOp, BadOp, BadOp, InstructionFlags.None));
|
||||||
|
@ -245,6 +249,8 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
Add(X86Instruction.Unpckhps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f15, InstructionFlags.Vex));
|
Add(X86Instruction.Unpckhps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f15, InstructionFlags.Vex));
|
||||||
Add(X86Instruction.Unpcklpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
Add(X86Instruction.Unpcklpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
Add(X86Instruction.Unpcklps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex));
|
Add(X86Instruction.Unpcklps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex));
|
||||||
|
Add(X86Instruction.Vblendvpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4b, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
|
Add(X86Instruction.Vblendvps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4a, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
Add(X86Instruction.Vpblendvb, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4c, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
Add(X86Instruction.Vpblendvb, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4c, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
Add(X86Instruction.Xor, new InstructionInfo(0x00000031, 0x06000083, 0x06000081, BadOp, 0x00000033, InstructionFlags.None));
|
Add(X86Instruction.Xor, new InstructionInfo(0x00000031, 0x06000083, 0x06000081, BadOp, 0x00000033, InstructionFlags.None));
|
||||||
Add(X86Instruction.Xorpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f57, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
Add(X86Instruction.Xorpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f57, InstructionFlags.Vex | InstructionFlags.Prefix66));
|
||||||
|
|
|
@ -336,7 +336,15 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
|
|
||||||
Debug.Assert(!dest.Type.IsInteger());
|
Debug.Assert(!dest.Type.IsInteger());
|
||||||
|
|
||||||
if (info.Inst == X86Instruction.Pblendvb && HardwareCapabilities.SupportsVexEncoding)
|
if (info.Inst == X86Instruction.Blendvpd && HardwareCapabilities.SupportsVexEncoding)
|
||||||
|
{
|
||||||
|
context.Assembler.WriteInstruction(X86Instruction.Vblendvpd, dest, src1, src2, src3);
|
||||||
|
}
|
||||||
|
else if (info.Inst == X86Instruction.Blendvps && HardwareCapabilities.SupportsVexEncoding)
|
||||||
|
{
|
||||||
|
context.Assembler.WriteInstruction(X86Instruction.Vblendvps, dest, src1, src2, src3);
|
||||||
|
}
|
||||||
|
else if (info.Inst == X86Instruction.Pblendvb && HardwareCapabilities.SupportsVexEncoding)
|
||||||
{
|
{
|
||||||
context.Assembler.WriteInstruction(X86Instruction.Vpblendvb, dest, src1, src2, src3);
|
context.Assembler.WriteInstruction(X86Instruction.Vpblendvb, dest, src1, src2, src3);
|
||||||
}
|
}
|
||||||
|
@ -1646,7 +1654,7 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
|
|
||||||
for (int offset = PageSize; offset < size; offset += PageSize)
|
for (int offset = PageSize; offset < size; offset += PageSize)
|
||||||
{
|
{
|
||||||
Operand memOp = new MemoryOperand(OperandType.I32, rsp, null, Multiplier.x1, -offset);;
|
Operand memOp = new MemoryOperand(OperandType.I32, rsp, null, Multiplier.x1, -offset);
|
||||||
|
|
||||||
context.Assembler.Mov(temp, memOp, OperandType.I32);
|
context.Assembler.Mov(temp, memOp, OperandType.I32);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,10 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
Add(Intrinsic.X86Addss, new IntrinsicInfo(X86Instruction.Addss, IntrinsicType.Binary));
|
Add(Intrinsic.X86Addss, new IntrinsicInfo(X86Instruction.Addss, IntrinsicType.Binary));
|
||||||
Add(Intrinsic.X86Andnpd, new IntrinsicInfo(X86Instruction.Andnpd, IntrinsicType.Binary));
|
Add(Intrinsic.X86Andnpd, new IntrinsicInfo(X86Instruction.Andnpd, IntrinsicType.Binary));
|
||||||
Add(Intrinsic.X86Andnps, new IntrinsicInfo(X86Instruction.Andnps, IntrinsicType.Binary));
|
Add(Intrinsic.X86Andnps, new IntrinsicInfo(X86Instruction.Andnps, IntrinsicType.Binary));
|
||||||
|
Add(Intrinsic.X86Andpd, new IntrinsicInfo(X86Instruction.Andpd, IntrinsicType.Binary));
|
||||||
|
Add(Intrinsic.X86Andps, new IntrinsicInfo(X86Instruction.Andps, IntrinsicType.Binary));
|
||||||
|
Add(Intrinsic.X86Blendvpd, new IntrinsicInfo(X86Instruction.Blendvpd, IntrinsicType.Ternary));
|
||||||
|
Add(Intrinsic.X86Blendvps, new IntrinsicInfo(X86Instruction.Blendvps, IntrinsicType.Ternary));
|
||||||
Add(Intrinsic.X86Cmppd, new IntrinsicInfo(X86Instruction.Cmppd, IntrinsicType.TernaryImm));
|
Add(Intrinsic.X86Cmppd, new IntrinsicInfo(X86Instruction.Cmppd, IntrinsicType.TernaryImm));
|
||||||
Add(Intrinsic.X86Cmpps, new IntrinsicInfo(X86Instruction.Cmpps, IntrinsicType.TernaryImm));
|
Add(Intrinsic.X86Cmpps, new IntrinsicInfo(X86Instruction.Cmpps, IntrinsicType.TernaryImm));
|
||||||
Add(Intrinsic.X86Cmpsd, new IntrinsicInfo(X86Instruction.Cmpsd, IntrinsicType.TernaryImm));
|
Add(Intrinsic.X86Cmpsd, new IntrinsicInfo(X86Instruction.Cmpsd, IntrinsicType.TernaryImm));
|
||||||
|
|
|
@ -298,8 +298,11 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
{
|
{
|
||||||
IntrinsicOperation intrinOp = (IntrinsicOperation)operation;
|
IntrinsicOperation intrinOp = (IntrinsicOperation)operation;
|
||||||
|
|
||||||
// PBLENDVB last operand is always implied to be XMM0 when VEX is not supported.
|
// BLENDVPD, BLENDVPS, PBLENDVB last operand is always implied to be XMM0 when VEX is not supported.
|
||||||
if (intrinOp.Intrinsic == Intrinsic.X86Pblendvb && !HardwareCapabilities.SupportsVexEncoding)
|
if ((intrinOp.Intrinsic == Intrinsic.X86Blendvpd ||
|
||||||
|
intrinOp.Intrinsic == Intrinsic.X86Blendvps ||
|
||||||
|
intrinOp.Intrinsic == Intrinsic.X86Pblendvb) &&
|
||||||
|
!HardwareCapabilities.SupportsVexEncoding)
|
||||||
{
|
{
|
||||||
Operand xmm0 = Xmm(X86Register.Xmm0, OperandType.V128);
|
Operand xmm0 = Xmm(X86Register.Xmm0, OperandType.V128);
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
And,
|
And,
|
||||||
Andnpd,
|
Andnpd,
|
||||||
Andnps,
|
Andnps,
|
||||||
|
Andpd,
|
||||||
|
Andps,
|
||||||
|
Blendvpd,
|
||||||
|
Blendvps,
|
||||||
Bsr,
|
Bsr,
|
||||||
Bswap,
|
Bswap,
|
||||||
Call,
|
Call,
|
||||||
|
@ -180,6 +184,8 @@ namespace ARMeilleure.CodeGen.X86
|
||||||
Unpckhps,
|
Unpckhps,
|
||||||
Unpcklpd,
|
Unpcklpd,
|
||||||
Unpcklps,
|
Unpcklps,
|
||||||
|
Vblendvpd,
|
||||||
|
Vblendvps,
|
||||||
Vpblendvb,
|
Vpblendvb,
|
||||||
Xor,
|
Xor,
|
||||||
Xorpd,
|
Xorpd,
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
using System.Runtime.CompilerServices;
|
|
||||||
|
|
||||||
namespace ARMeilleure.Common
|
namespace ARMeilleure.Common
|
||||||
{
|
{
|
||||||
static class BitUtils
|
static class BitUtils
|
||||||
{
|
{
|
||||||
private const int DeBrujinSequence = 0x77cb531;
|
private const int DeBrujinSequence = 0x77cb531;
|
||||||
|
|
||||||
private static int[] DeBrujinLbsLut;
|
private static readonly int[] DeBrujinLbsLut;
|
||||||
|
|
||||||
|
private static readonly sbyte[] HbsNibbleLut;
|
||||||
|
|
||||||
static BitUtils()
|
static BitUtils()
|
||||||
{
|
{
|
||||||
|
@ -18,19 +18,27 @@ namespace ARMeilleure.Common
|
||||||
|
|
||||||
DeBrujinLbsLut[lutIndex] = index;
|
DeBrujinLbsLut[lutIndex] = index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HbsNibbleLut = new sbyte[] { -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 };
|
||||||
}
|
}
|
||||||
|
|
||||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
public static int CountBits(int value)
|
||||||
public static int LowestBitSet(int value)
|
|
||||||
{
|
{
|
||||||
if (value == 0)
|
int count = 0;
|
||||||
|
|
||||||
|
while (value != 0)
|
||||||
{
|
{
|
||||||
return -1;
|
value &= ~(value & -value);
|
||||||
|
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
int lsb = value & -value;
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
return DeBrujinLbsLut[(uint)(DeBrujinSequence * lsb) >> 27];
|
public static long FillWithOnes(int bits)
|
||||||
|
{
|
||||||
|
return bits == 64 ? -1L : (1L << bits) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static int HighestBitSet(int value)
|
public static int HighestBitSet(int value)
|
||||||
|
@ -51,9 +59,22 @@ namespace ARMeilleure.Common
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static readonly sbyte[] HbsNibbleLut = { -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 };
|
public static int HighestBitSetNibble(int value)
|
||||||
|
{
|
||||||
|
return HbsNibbleLut[value];
|
||||||
|
}
|
||||||
|
|
||||||
public static int HighestBitSetNibble(int value) => HbsNibbleLut[value & 0b1111];
|
public static int LowestBitSet(int value)
|
||||||
|
{
|
||||||
|
if (value == 0)
|
||||||
|
{
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int lsb = value & -value;
|
||||||
|
|
||||||
|
return DeBrujinLbsLut[(uint)(DeBrujinSequence * lsb) >> 27];
|
||||||
|
}
|
||||||
|
|
||||||
public static long Replicate(long bits, int size)
|
public static long Replicate(long bits, int size)
|
||||||
{
|
{
|
||||||
|
@ -67,25 +88,6 @@ namespace ARMeilleure.Common
|
||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static int CountBits(int value)
|
|
||||||
{
|
|
||||||
int count = 0;
|
|
||||||
|
|
||||||
while (value != 0)
|
|
||||||
{
|
|
||||||
value &= ~(value & -value);
|
|
||||||
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static long FillWithOnes(int bits)
|
|
||||||
{
|
|
||||||
return bits == 64 ? -1L : (1L << bits) - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static int RotateRight(int bits, int shift, int size)
|
public static int RotateRight(int bits, int shift, int size)
|
||||||
{
|
{
|
||||||
return (int)RotateRight((uint)bits, shift, size);
|
return (int)RotateRight((uint)bits, shift, size);
|
||||||
|
|
|
@ -1,10 +1,77 @@
|
||||||
using ARMeilleure.Common;
|
using ARMeilleure.Common;
|
||||||
using System;
|
|
||||||
|
|
||||||
namespace ARMeilleure.Decoders
|
namespace ARMeilleure.Decoders
|
||||||
{
|
{
|
||||||
static class DecoderHelper
|
static class DecoderHelper
|
||||||
{
|
{
|
||||||
|
static DecoderHelper()
|
||||||
|
{
|
||||||
|
Imm8ToFP32Table = BuildImm8ToFP32Table();
|
||||||
|
Imm8ToFP64Table = BuildImm8ToFP64Table();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static readonly uint[] Imm8ToFP32Table;
|
||||||
|
public static readonly ulong[] Imm8ToFP64Table;
|
||||||
|
|
||||||
|
private static uint[] BuildImm8ToFP32Table()
|
||||||
|
{
|
||||||
|
uint[] tbl = new uint[256];
|
||||||
|
|
||||||
|
for (int idx = 0; idx < 256; idx++)
|
||||||
|
{
|
||||||
|
tbl[idx] = ExpandImm8ToFP32((uint)idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tbl;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ulong[] BuildImm8ToFP64Table()
|
||||||
|
{
|
||||||
|
ulong[] tbl = new ulong[256];
|
||||||
|
|
||||||
|
for (int idx = 0; idx < 256; idx++)
|
||||||
|
{
|
||||||
|
tbl[idx] = ExpandImm8ToFP64((ulong)idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tbl;
|
||||||
|
}
|
||||||
|
|
||||||
|
// abcdefgh -> aBbbbbbc defgh000 00000000 00000000 (B = ~b)
|
||||||
|
private static uint ExpandImm8ToFP32(uint imm)
|
||||||
|
{
|
||||||
|
uint MoveBit(uint bits, int from, int to)
|
||||||
|
{
|
||||||
|
return ((bits >> from) & 1U) << to;
|
||||||
|
}
|
||||||
|
|
||||||
|
return MoveBit(imm, 7, 31) | MoveBit(~imm, 6, 30) |
|
||||||
|
MoveBit(imm, 6, 29) | MoveBit( imm, 6, 28) |
|
||||||
|
MoveBit(imm, 6, 27) | MoveBit( imm, 6, 26) |
|
||||||
|
MoveBit(imm, 6, 25) | MoveBit( imm, 5, 24) |
|
||||||
|
MoveBit(imm, 4, 23) | MoveBit( imm, 3, 22) |
|
||||||
|
MoveBit(imm, 2, 21) | MoveBit( imm, 1, 20) |
|
||||||
|
MoveBit(imm, 0, 19);
|
||||||
|
}
|
||||||
|
|
||||||
|
// abcdefgh -> aBbbbbbb bbcdefgh 00000000 00000000 00000000 00000000 00000000 00000000 (B = ~b)
|
||||||
|
private static ulong ExpandImm8ToFP64(ulong imm)
|
||||||
|
{
|
||||||
|
ulong MoveBit(ulong bits, int from, int to)
|
||||||
|
{
|
||||||
|
return ((bits >> from) & 1UL) << to;
|
||||||
|
}
|
||||||
|
|
||||||
|
return MoveBit(imm, 7, 63) | MoveBit(~imm, 6, 62) |
|
||||||
|
MoveBit(imm, 6, 61) | MoveBit( imm, 6, 60) |
|
||||||
|
MoveBit(imm, 6, 59) | MoveBit( imm, 6, 58) |
|
||||||
|
MoveBit(imm, 6, 57) | MoveBit( imm, 6, 56) |
|
||||||
|
MoveBit(imm, 6, 55) | MoveBit( imm, 6, 54) |
|
||||||
|
MoveBit(imm, 5, 53) | MoveBit( imm, 4, 52) |
|
||||||
|
MoveBit(imm, 3, 51) | MoveBit( imm, 2, 50) |
|
||||||
|
MoveBit(imm, 1, 49) | MoveBit( imm, 0, 48);
|
||||||
|
}
|
||||||
|
|
||||||
public struct BitMask
|
public struct BitMask
|
||||||
{
|
{
|
||||||
public long WMask;
|
public long WMask;
|
||||||
|
@ -62,34 +129,6 @@ namespace ARMeilleure.Decoders
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
public static long DecodeImm8Float(long imm, int size)
|
|
||||||
{
|
|
||||||
int e = 0, f = 0;
|
|
||||||
|
|
||||||
switch (size)
|
|
||||||
{
|
|
||||||
case 0: e = 8; f = 23; break;
|
|
||||||
case 1: e = 11; f = 52; break;
|
|
||||||
|
|
||||||
default: throw new ArgumentOutOfRangeException(nameof(size));
|
|
||||||
}
|
|
||||||
|
|
||||||
long value = (imm & 0x3f) << f - 4;
|
|
||||||
|
|
||||||
long eBit = (imm >> 6) & 1;
|
|
||||||
long sBit = (imm >> 7) & 1;
|
|
||||||
|
|
||||||
if (eBit != 0)
|
|
||||||
{
|
|
||||||
value |= (1L << e - 3) - 1 << f + 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
value |= (eBit ^ 1) << f + e - 1;
|
|
||||||
value |= sBit << f + e;
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static long DecodeImm24_2(int opCode)
|
public static long DecodeImm24_2(int opCode)
|
||||||
{
|
{
|
||||||
return ((long)opCode << 40) >> 38;
|
return ((long)opCode << 40) >> 38;
|
||||||
|
|
|
@ -8,16 +8,8 @@ namespace ARMeilleure.Decoders
|
||||||
|
|
||||||
public OpCodeSimdFmov(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode)
|
public OpCodeSimdFmov(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode)
|
||||||
{
|
{
|
||||||
int imm5 = (opCode >> 5) & 0x1f;
|
|
||||||
int type = (opCode >> 22) & 0x3;
|
int type = (opCode >> 22) & 0x3;
|
||||||
|
|
||||||
if (imm5 != 0b00000 || type > 1)
|
|
||||||
{
|
|
||||||
Instruction = InstDescriptor.Undefined;
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Size = type;
|
Size = type;
|
||||||
|
|
||||||
long imm;
|
long imm;
|
||||||
|
@ -25,7 +17,14 @@ namespace ARMeilleure.Decoders
|
||||||
Rd = (opCode >> 0) & 0x1f;
|
Rd = (opCode >> 0) & 0x1f;
|
||||||
imm = (opCode >> 13) & 0xff;
|
imm = (opCode >> 13) & 0xff;
|
||||||
|
|
||||||
Immediate = DecoderHelper.DecodeImm8Float(imm, type);
|
if (type == 0)
|
||||||
|
{
|
||||||
|
Immediate = (long)DecoderHelper.Imm8ToFP32Table[(int)imm];
|
||||||
|
}
|
||||||
|
else /* if (type == 1) */
|
||||||
|
{
|
||||||
|
Immediate = (long)DecoderHelper.Imm8ToFP64Table[(int)imm];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -23,19 +23,19 @@ namespace ARMeilleure.Decoders
|
||||||
|
|
||||||
if (modeHigh == 0b111)
|
if (modeHigh == 0b111)
|
||||||
{
|
{
|
||||||
Size = modeLow != 0 ? op : 3;
|
|
||||||
|
|
||||||
switch (op | (modeLow << 1))
|
switch (op | (modeLow << 1))
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
// 64-bits Immediate.
|
// 64-bits Immediate.
|
||||||
// Transform abcd efgh into abcd efgh abcd efgh ...
|
// Transform abcd efgh into abcd efgh abcd efgh ...
|
||||||
|
Size = 3;
|
||||||
imm = (long)((ulong)imm * 0x0101010101010101);
|
imm = (long)((ulong)imm * 0x0101010101010101);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
// 64-bits Immediate.
|
// 64-bits Immediate.
|
||||||
// Transform abcd efgh into aaaa aaaa bbbb bbbb ...
|
// Transform abcd efgh into aaaa aaaa bbbb bbbb ...
|
||||||
|
Size = 3;
|
||||||
imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4;
|
imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4;
|
||||||
imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2;
|
imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2;
|
||||||
imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1;
|
imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1;
|
||||||
|
@ -49,9 +49,16 @@ namespace ARMeilleure.Decoders
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 2:
|
case 2:
|
||||||
|
// 2 x 32-bits floating point Immediate.
|
||||||
|
Size = 0;
|
||||||
|
imm = (long)DecoderHelper.Imm8ToFP32Table[(int)imm];
|
||||||
|
imm |= imm << 32;
|
||||||
|
break;
|
||||||
|
|
||||||
case 3:
|
case 3:
|
||||||
// Floating point Immediate.
|
// 64-bits floating point Immediate.
|
||||||
imm = DecoderHelper.DecodeImm8Float(imm, Size);
|
Size = 1;
|
||||||
|
imm = (long)DecoderHelper.Imm8ToFP64Table[(int)imm];
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,7 +79,7 @@ namespace ARMeilleure.Decoders
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// 8 bits without shift.
|
// 8-bits without shift.
|
||||||
Size = 0;
|
Size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -268,7 +268,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (setCarry)
|
if (setCarry)
|
||||||
{
|
{
|
||||||
SetFlag(context, PState.CFlag, Const(0));;
|
SetFlag(context, PState.CFlag, Const(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
return Const(0);
|
return Const(0);
|
||||||
|
|
|
@ -384,8 +384,7 @@ namespace ARMeilleure.Instructions
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
OperandType type = sizeF != 0 ? OperandType.FP64
|
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
|
||||||
: OperandType.FP32;
|
|
||||||
|
|
||||||
Operand ne0 = context.VectorExtract(type, GetVec(op.Rn), 0);
|
Operand ne0 = context.VectorExtract(type, GetVec(op.Rn), 0);
|
||||||
Operand ne1 = context.VectorExtract(type, GetVec(op.Rn), 1);
|
Operand ne1 = context.VectorExtract(type, GetVec(op.Rn), 1);
|
||||||
|
@ -455,6 +454,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
||||||
|
|
||||||
|
Operand d = GetVec(op.Rd);
|
||||||
Operand a = GetVec(op.Ra);
|
Operand a = GetVec(op.Ra);
|
||||||
Operand n = GetVec(op.Rn);
|
Operand n = GetVec(op.Rn);
|
||||||
Operand m = GetVec(op.Rm);
|
Operand m = GetVec(op.Rm);
|
||||||
|
@ -462,18 +462,16 @@ namespace ARMeilleure.Instructions
|
||||||
if (op.Size == 0)
|
if (op.Size == 0)
|
||||||
{
|
{
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
||||||
|
|
||||||
res = context.AddIntrinsic(Intrinsic.X86Addss, a, res);
|
res = context.AddIntrinsic(Intrinsic.X86Addss, a, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
|
context.Copy(d, context.VectorZeroUpper96(res));
|
||||||
}
|
}
|
||||||
else /* if (op.Size == 1) */
|
else /* if (op.Size == 1) */
|
||||||
{
|
{
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
||||||
|
|
||||||
res = context.AddIntrinsic(Intrinsic.X86Addsd, a, res);
|
res = context.AddIntrinsic(Intrinsic.X86Addsd, a, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
|
context.Copy(d, context.VectorZeroUpper64(res));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -516,20 +514,34 @@ namespace ARMeilleure.Instructions
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fmaxnm_S(ArmEmitterContext context)
|
public static void Fmaxnm_S(ArmEmitterContext context)
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse41)
|
||||||
|
{
|
||||||
|
EmitSse41MaxMinNumOpF(context, isMaxNum: true, scalar: true);
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
EmitScalarBinaryOpF(context, (op1, op2) =>
|
EmitScalarBinaryOpF(context, (op1, op2) =>
|
||||||
{
|
{
|
||||||
return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
|
return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static void Fmaxnm_V(ArmEmitterContext context)
|
public static void Fmaxnm_V(ArmEmitterContext context)
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse41)
|
||||||
|
{
|
||||||
|
EmitSse41MaxMinNumOpF(context, isMaxNum: true, scalar: false);
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
EmitVectorBinaryOpF(context, (op1, op2) =>
|
EmitVectorBinaryOpF(context, (op1, op2) =>
|
||||||
{
|
{
|
||||||
return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
|
return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static void Fmaxp_V(ArmEmitterContext context)
|
public static void Fmaxp_V(ArmEmitterContext context)
|
||||||
{
|
{
|
||||||
|
@ -577,20 +589,34 @@ namespace ARMeilleure.Instructions
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fminnm_S(ArmEmitterContext context)
|
public static void Fminnm_S(ArmEmitterContext context)
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse41)
|
||||||
|
{
|
||||||
|
EmitSse41MaxMinNumOpF(context, isMaxNum: false, scalar: true);
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
EmitScalarBinaryOpF(context, (op1, op2) =>
|
EmitScalarBinaryOpF(context, (op1, op2) =>
|
||||||
{
|
{
|
||||||
return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
|
return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static void Fminnm_V(ArmEmitterContext context)
|
public static void Fminnm_V(ArmEmitterContext context)
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse41)
|
||||||
|
{
|
||||||
|
EmitSse41MaxMinNumOpF(context, isMaxNum: false, scalar: false);
|
||||||
|
}
|
||||||
|
else
|
||||||
{
|
{
|
||||||
EmitVectorBinaryOpF(context, (op1, op2) =>
|
EmitVectorBinaryOpF(context, (op1, op2) =>
|
||||||
{
|
{
|
||||||
return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
|
return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static void Fminp_V(ArmEmitterContext context)
|
public static void Fminp_V(ArmEmitterContext context)
|
||||||
{
|
{
|
||||||
|
@ -813,6 +839,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
||||||
|
|
||||||
|
Operand d = GetVec(op.Rd);
|
||||||
Operand a = GetVec(op.Ra);
|
Operand a = GetVec(op.Ra);
|
||||||
Operand n = GetVec(op.Rn);
|
Operand n = GetVec(op.Rn);
|
||||||
Operand m = GetVec(op.Rm);
|
Operand m = GetVec(op.Rm);
|
||||||
|
@ -820,18 +847,16 @@ namespace ARMeilleure.Instructions
|
||||||
if (op.Size == 0)
|
if (op.Size == 0)
|
||||||
{
|
{
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
||||||
|
|
||||||
res = context.AddIntrinsic(Intrinsic.X86Subss, a, res);
|
res = context.AddIntrinsic(Intrinsic.X86Subss, a, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
|
context.Copy(d, context.VectorZeroUpper96(res));
|
||||||
}
|
}
|
||||||
else /* if (op.Size == 1) */
|
else /* if (op.Size == 1) */
|
||||||
{
|
{
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
||||||
|
|
||||||
res = context.AddIntrinsic(Intrinsic.X86Subsd, a, res);
|
res = context.AddIntrinsic(Intrinsic.X86Subsd, a, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
|
context.Copy(d, context.VectorZeroUpper64(res));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1034,37 +1059,89 @@ namespace ARMeilleure.Instructions
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fnmadd_S(ArmEmitterContext context) // Fused.
|
public static void Fnmadd_S(ArmEmitterContext context) // Fused.
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
||||||
|
|
||||||
int sizeF = op.Size & 1;
|
Operand d = GetVec(op.Rd);
|
||||||
|
Operand a = GetVec(op.Ra);
|
||||||
|
Operand n = GetVec(op.Rn);
|
||||||
|
Operand m = GetVec(op.Rm);
|
||||||
|
|
||||||
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
|
if (op.Size == 0)
|
||||||
|
{
|
||||||
|
Operand mask = X86GetScalar(context, -0f);
|
||||||
|
|
||||||
Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0);
|
Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorps, mask, a);
|
||||||
Operand me = context.VectorExtract(type, GetVec(op.Rm), 0);
|
|
||||||
Operand ae = context.VectorExtract(type, GetVec(op.Ra), 0);
|
|
||||||
|
|
||||||
Operand res = context.Subtract(context.Multiply(context.Negate(ne), me), ae);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
||||||
|
res = context.AddIntrinsic(Intrinsic.X86Subss, aNeg, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorInsert(context.VectorZero(), res, 0));
|
context.Copy(d, context.VectorZeroUpper96(res));
|
||||||
|
}
|
||||||
|
else /* if (op.Size == 1) */
|
||||||
|
{
|
||||||
|
Operand mask = X86GetScalar(context, -0d);
|
||||||
|
|
||||||
|
Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, a);
|
||||||
|
|
||||||
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
||||||
|
res = context.AddIntrinsic(Intrinsic.X86Subsd, aNeg, res);
|
||||||
|
|
||||||
|
context.Copy(d, context.VectorZeroUpper64(res));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
|
||||||
|
{
|
||||||
|
return EmitSoftFloatCall(context, SoftFloat32.FPNegMulAdd, SoftFloat64.FPNegMulAdd, op1, op2, op3);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fnmsub_S(ArmEmitterContext context) // Fused.
|
public static void Fnmsub_S(ArmEmitterContext context) // Fused.
|
||||||
|
{
|
||||||
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
||||||
|
|
||||||
int sizeF = op.Size & 1;
|
Operand d = GetVec(op.Rd);
|
||||||
|
Operand a = GetVec(op.Ra);
|
||||||
|
Operand n = GetVec(op.Rn);
|
||||||
|
Operand m = GetVec(op.Rm);
|
||||||
|
|
||||||
OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
|
if (op.Size == 0)
|
||||||
|
{
|
||||||
|
Operand mask = X86GetScalar(context, -0f);
|
||||||
|
|
||||||
Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0);
|
Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorps, mask, a);
|
||||||
Operand me = context.VectorExtract(type, GetVec(op.Rm), 0);
|
|
||||||
Operand ae = context.VectorExtract(type, GetVec(op.Ra), 0);
|
|
||||||
|
|
||||||
Operand res = context.Subtract(context.Multiply(ne, me), ae);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
|
||||||
|
res = context.AddIntrinsic(Intrinsic.X86Addss, aNeg, res);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), context.VectorInsert(context.VectorZero(), res, 0));
|
context.Copy(d, context.VectorZeroUpper96(res));
|
||||||
|
}
|
||||||
|
else /* if (op.Size == 1) */
|
||||||
|
{
|
||||||
|
Operand mask = X86GetScalar(context, -0d);
|
||||||
|
|
||||||
|
Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, a);
|
||||||
|
|
||||||
|
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
|
||||||
|
res = context.AddIntrinsic(Intrinsic.X86Addsd, aNeg, res);
|
||||||
|
|
||||||
|
context.Copy(d, context.VectorZeroUpper64(res));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
|
||||||
|
{
|
||||||
|
return EmitSoftFloatCall(context, SoftFloat32.FPNegMulSub, SoftFloat64.FPNegMulSub, op1, op2, op3);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fnmul_S(ArmEmitterContext context)
|
public static void Fnmul_S(ArmEmitterContext context)
|
||||||
|
@ -2067,9 +2144,7 @@ namespace ARMeilleure.Instructions
|
||||||
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
|
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
|
||||||
}
|
}
|
||||||
|
|
||||||
Intrinsic movInst = op.Size == 0
|
Intrinsic movInst = op.Size == 0 ? Intrinsic.X86Pmovsxbw : Intrinsic.X86Pmovsxwd;
|
||||||
? Intrinsic.X86Pmovsxbw
|
|
||||||
: Intrinsic.X86Pmovsxwd;
|
|
||||||
|
|
||||||
n = context.AddIntrinsic(movInst, n);
|
n = context.AddIntrinsic(movInst, n);
|
||||||
m = context.AddIntrinsic(movInst, m);
|
m = context.AddIntrinsic(movInst, m);
|
||||||
|
@ -2694,9 +2769,7 @@ namespace ARMeilleure.Instructions
|
||||||
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
|
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
|
||||||
}
|
}
|
||||||
|
|
||||||
Intrinsic movInst = op.Size == 0
|
Intrinsic movInst = op.Size == 0 ? Intrinsic.X86Pmovzxbw : Intrinsic.X86Pmovzxwd;
|
||||||
? Intrinsic.X86Pmovzxbw
|
|
||||||
: Intrinsic.X86Pmovzxwd;
|
|
||||||
|
|
||||||
n = context.AddIntrinsic(movInst, n);
|
n = context.AddIntrinsic(movInst, n);
|
||||||
m = context.AddIntrinsic(movInst, m);
|
m = context.AddIntrinsic(movInst, m);
|
||||||
|
@ -3011,6 +3084,98 @@ namespace ARMeilleure.Instructions
|
||||||
context.Copy(GetVec(op.Rd), res);
|
context.Copy(GetVec(op.Rd), res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF)
|
||||||
|
{
|
||||||
|
IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;
|
||||||
|
|
||||||
|
if ((op.Size & 1) == 0)
|
||||||
|
{
|
||||||
|
const int QBit = 22;
|
||||||
|
|
||||||
|
Operand qMask = X86GetAllElements(context, 1 << QBit);
|
||||||
|
|
||||||
|
Operand mask1 = context.AddIntrinsic(Intrinsic.X86Cmpps, opF, opF, Const((int)CmpCondition.UnorderedQ));
|
||||||
|
|
||||||
|
Operand mask2 = context.AddIntrinsic(Intrinsic.X86Pand, opF, qMask);
|
||||||
|
mask2 = context.AddIntrinsic(Intrinsic.X86Cmpps, mask2, qMask, Const((int)CmpCondition.Equal));
|
||||||
|
|
||||||
|
return context.AddIntrinsic(Intrinsic.X86Andps, mask1, mask2);
|
||||||
|
}
|
||||||
|
else /* if ((op.Size & 1) == 1) */
|
||||||
|
{
|
||||||
|
const int QBit = 51;
|
||||||
|
|
||||||
|
Operand qMask = X86GetAllElements(context, 1L << QBit);
|
||||||
|
|
||||||
|
Operand mask1 = context.AddIntrinsic(Intrinsic.X86Cmppd, opF, opF, Const((int)CmpCondition.UnorderedQ));
|
||||||
|
|
||||||
|
Operand mask2 = context.AddIntrinsic(Intrinsic.X86Pand, opF, qMask);
|
||||||
|
mask2 = context.AddIntrinsic(Intrinsic.X86Cmppd, mask2, qMask, Const((int)CmpCondition.Equal));
|
||||||
|
|
||||||
|
return context.AddIntrinsic(Intrinsic.X86Andpd, mask1, mask2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void EmitSse41MaxMinNumOpF(ArmEmitterContext context, bool isMaxNum, bool scalar)
|
||||||
|
{
|
||||||
|
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
|
||||||
|
|
||||||
|
Operand d = GetVec(op.Rd);
|
||||||
|
Operand n = GetVec(op.Rn);
|
||||||
|
Operand m = GetVec(op.Rm);
|
||||||
|
|
||||||
|
Operand nQNaNMask = EmitSse2VectorIsQNaNOpF(context, n);
|
||||||
|
Operand mQNaNMask = EmitSse2VectorIsQNaNOpF(context, m);
|
||||||
|
|
||||||
|
Operand nNum = context.Copy(n);
|
||||||
|
Operand mNum = context.Copy(m);
|
||||||
|
|
||||||
|
int sizeF = op.Size & 1;
|
||||||
|
|
||||||
|
if (sizeF == 0)
|
||||||
|
{
|
||||||
|
Operand negInfMask = X86GetAllElements(context, isMaxNum ? float.NegativeInfinity : float.PositiveInfinity);
|
||||||
|
|
||||||
|
Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnps, mQNaNMask, nQNaNMask);
|
||||||
|
Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnps, nQNaNMask, mQNaNMask);
|
||||||
|
|
||||||
|
nNum = context.AddIntrinsic(Intrinsic.X86Blendvps, nNum, negInfMask, nMask);
|
||||||
|
mNum = context.AddIntrinsic(Intrinsic.X86Blendvps, mNum, negInfMask, mMask);
|
||||||
|
|
||||||
|
Operand res = context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxps : Intrinsic.X86Minps, nNum, mNum);
|
||||||
|
|
||||||
|
if (scalar)
|
||||||
|
{
|
||||||
|
res = context.VectorZeroUpper96(res);
|
||||||
|
}
|
||||||
|
else if (op.RegisterSize == RegisterSize.Simd64)
|
||||||
|
{
|
||||||
|
res = context.VectorZeroUpper64(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
context.Copy(d, res);
|
||||||
|
}
|
||||||
|
else /* if (sizeF == 1) */
|
||||||
|
{
|
||||||
|
Operand negInfMask = X86GetAllElements(context, isMaxNum ? double.NegativeInfinity : double.PositiveInfinity);
|
||||||
|
|
||||||
|
Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnpd, mQNaNMask, nQNaNMask);
|
||||||
|
Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnpd, nQNaNMask, mQNaNMask);
|
||||||
|
|
||||||
|
nNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, nNum, negInfMask, nMask);
|
||||||
|
mNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, mNum, negInfMask, mMask);
|
||||||
|
|
||||||
|
Operand res = context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxpd : Intrinsic.X86Minpd, nNum, mNum);
|
||||||
|
|
||||||
|
if (scalar)
|
||||||
|
{
|
||||||
|
res = context.VectorZeroUpper64(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
context.Copy(d, res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private enum AddSub
|
private enum AddSub
|
||||||
{
|
{
|
||||||
None,
|
None,
|
||||||
|
|
|
@ -300,7 +300,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: true);
|
EmitSse2CmpOpF(context, CmpCondition.Equal, scalar: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: false);
|
EmitSse2CmpOpF(context, CmpCondition.Equal, scalar: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -324,7 +324,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseAvx)
|
if (Optimizations.FastFP && Optimizations.UseAvx)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: true);
|
EmitSse2CmpOpF(context, CmpCondition.GreaterThanOrEqual, scalar: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -336,7 +336,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseAvx)
|
if (Optimizations.FastFP && Optimizations.UseAvx)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: false);
|
EmitSse2CmpOpF(context, CmpCondition.GreaterThanOrEqual, scalar: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -348,7 +348,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseAvx)
|
if (Optimizations.FastFP && Optimizations.UseAvx)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: true);
|
EmitSse2CmpOpF(context, CmpCondition.GreaterThan, scalar: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -360,7 +360,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseAvx)
|
if (Optimizations.FastFP && Optimizations.UseAvx)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: false);
|
EmitSse2CmpOpF(context, CmpCondition.GreaterThan, scalar: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -372,7 +372,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.LessThanOrEqual, scalar: true);
|
EmitSse2CmpOpF(context, CmpCondition.LessThanOrEqual, scalar: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -384,7 +384,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.LessThanOrEqual, scalar: false);
|
EmitSse2CmpOpF(context, CmpCondition.LessThanOrEqual, scalar: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -396,7 +396,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.LessThan, scalar: true);
|
EmitSse2CmpOpF(context, CmpCondition.LessThan, scalar: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -408,7 +408,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.FastFP && Optimizations.UseSse2)
|
if (Optimizations.FastFP && Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitCmpSseOrSse2OpF(context, CmpCondition.LessThan, scalar: false);
|
EmitSse2CmpOpF(context, CmpCondition.LessThan, scalar: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -673,7 +673,7 @@ namespace ARMeilleure.Instructions
|
||||||
context.Copy(GetVec(op.Rd), res);
|
context.Copy(GetVec(op.Rd), res);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void EmitCmpSseOrSse2OpF(ArmEmitterContext context, CmpCondition cond, bool scalar)
|
private static void EmitSse2CmpOpF(ArmEmitterContext context, CmpCondition cond, bool scalar)
|
||||||
{
|
{
|
||||||
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
||||||
|
|
||||||
|
|
|
@ -907,7 +907,7 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
Operand res = context.VectorZero();
|
Operand res = context.VectorZero();
|
||||||
|
|
||||||
Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);;
|
Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);
|
||||||
|
|
||||||
int elems = 8 >> op.Size;
|
int elems = 8 >> op.Size;
|
||||||
|
|
||||||
|
@ -939,7 +939,7 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
Operand res = context.VectorZero();
|
Operand res = context.VectorZero();
|
||||||
|
|
||||||
Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);;
|
Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);
|
||||||
|
|
||||||
int elems = 8 >> op.Size;
|
int elems = 8 >> op.Size;
|
||||||
|
|
||||||
|
@ -1114,6 +1114,7 @@ namespace ARMeilleure.Instructions
|
||||||
Equal = 0, // Ordered, non-signaling.
|
Equal = 0, // Ordered, non-signaling.
|
||||||
LessThan = 1, // Ordered, signaling.
|
LessThan = 1, // Ordered, signaling.
|
||||||
LessThanOrEqual = 2, // Ordered, signaling.
|
LessThanOrEqual = 2, // Ordered, signaling.
|
||||||
|
UnorderedQ = 3, // Non-signaling.
|
||||||
NotLessThan = 5, // Unordered, signaling.
|
NotLessThan = 5, // Unordered, signaling.
|
||||||
NotLessThanOrEqual = 6, // Unordered, signaling.
|
NotLessThanOrEqual = 6, // Unordered, signaling.
|
||||||
OrderedQ = 7, // Non-signaling.
|
OrderedQ = 7, // Non-signaling.
|
||||||
|
|
|
@ -177,7 +177,7 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
if (op.RegisterSize == RegisterSize.Simd64)
|
if (op.RegisterSize == RegisterSize.Simd64)
|
||||||
{
|
{
|
||||||
nShifted = context.AddIntrinsic(Intrinsic.X86Movlhps, nShifted, context.VectorZero());
|
nShifted = context.VectorZeroUpper64(nShifted);
|
||||||
}
|
}
|
||||||
|
|
||||||
nShifted = context.AddIntrinsic(Intrinsic.X86Psrldq, nShifted, Const(op.Imm4));
|
nShifted = context.AddIntrinsic(Intrinsic.X86Psrldq, nShifted, Const(op.Imm4));
|
||||||
|
@ -188,7 +188,7 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
if (op.RegisterSize == RegisterSize.Simd64)
|
if (op.RegisterSize == RegisterSize.Simd64)
|
||||||
{
|
{
|
||||||
mShifted = context.AddIntrinsic(Intrinsic.X86Movlhps, mShifted, context.VectorZero());
|
mShifted = context.VectorZeroUpper64(mShifted);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, mShifted);
|
Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, mShifted);
|
||||||
|
@ -277,9 +277,10 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
|
||||||
|
|
||||||
|
Operand d = GetVec(op.Rd);
|
||||||
Operand n = GetIntOrZR(context, op.Rn);
|
Operand n = GetIntOrZR(context, op.Rn);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), EmitVectorInsert(context, GetVec(op.Rd), n, 1, 3));
|
context.Copy(d, EmitVectorInsert(context, d, n, 1, 3));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void Fmov_S(ArmEmitterContext context)
|
public static void Fmov_S(ArmEmitterContext context)
|
||||||
|
@ -311,19 +312,33 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
|
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
|
||||||
|
|
||||||
|
if (Optimizations.UseSse2)
|
||||||
|
{
|
||||||
|
if (op.RegisterSize == RegisterSize.Simd128)
|
||||||
|
{
|
||||||
|
context.Copy(GetVec(op.Rd), X86GetAllElements(context, op.Immediate));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
context.Copy(GetVec(op.Rd), X86GetScalar(context, op.Immediate));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
Operand e = Const(op.Immediate);
|
Operand e = Const(op.Immediate);
|
||||||
|
|
||||||
Operand res = context.VectorZero();
|
Operand res = context.VectorZero();
|
||||||
|
|
||||||
int elems = op.RegisterSize == RegisterSize.Simd128 ? 4 : 2;
|
int elems = op.RegisterSize == RegisterSize.Simd128 ? 2 : 1;
|
||||||
|
|
||||||
for (int index = 0; index < (elems >> op.Size); index++)
|
for (int index = 0; index < elems; index++)
|
||||||
{
|
{
|
||||||
res = EmitVectorInsert(context, res, e, index, op.Size + 2);
|
res = EmitVectorInsert(context, res, e, index, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), res);
|
context.Copy(GetVec(op.Rd), res);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static void Ins_Gp(ArmEmitterContext context)
|
public static void Ins_Gp(ArmEmitterContext context)
|
||||||
{
|
{
|
||||||
|
@ -349,7 +364,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.UseSse2)
|
if (Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitMoviMvni(context, not: false);
|
EmitSse2MoviMvni(context, not: false);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -361,7 +376,7 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
if (Optimizations.UseSse2)
|
if (Optimizations.UseSse2)
|
||||||
{
|
{
|
||||||
EmitMoviMvni(context, not: true);
|
EmitSse2MoviMvni(context, not: true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -430,13 +445,11 @@ namespace ARMeilleure.Instructions
|
||||||
{
|
{
|
||||||
Operand d = GetVec(op.Rd);
|
Operand d = GetVec(op.Rd);
|
||||||
|
|
||||||
Operand res = context.AddIntrinsic(Intrinsic.X86Movlhps, d, context.VectorZero());
|
Operand res = context.VectorZeroUpper64(d);
|
||||||
|
|
||||||
Operand n = GetVec(op.Rn);
|
|
||||||
|
|
||||||
Operand mask = X86GetAllElements(context, _masksE0_TrnUzpXtn[op.Size]);
|
Operand mask = X86GetAllElements(context, _masksE0_TrnUzpXtn[op.Size]);
|
||||||
|
|
||||||
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mask);
|
Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, GetVec(op.Rn), mask);
|
||||||
|
|
||||||
Intrinsic movInst = op.RegisterSize == RegisterSize.Simd128
|
Intrinsic movInst = op.RegisterSize == RegisterSize.Simd128
|
||||||
? Intrinsic.X86Movlhps
|
? Intrinsic.X86Movlhps
|
||||||
|
@ -444,7 +457,7 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
res = context.AddIntrinsic(movInst, res, res2);
|
res = context.AddIntrinsic(movInst, res, res2);
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), res);
|
context.Copy(d, res);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -452,7 +465,9 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0;
|
int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0;
|
||||||
|
|
||||||
Operand res = part == 0 ? context.VectorZero() : context.Copy(GetVec(op.Rd));
|
Operand d = GetVec(op.Rd);
|
||||||
|
|
||||||
|
Operand res = part == 0 ? context.VectorZero() : context.Copy(d);
|
||||||
|
|
||||||
for (int index = 0; index < elems; index++)
|
for (int index = 0; index < elems; index++)
|
||||||
{
|
{
|
||||||
|
@ -461,7 +476,7 @@ namespace ARMeilleure.Instructions
|
||||||
res = EmitVectorInsert(context, res, ne, part + index, op.Size);
|
res = EmitVectorInsert(context, res, ne, part + index, op.Size);
|
||||||
}
|
}
|
||||||
|
|
||||||
context.Copy(GetVec(op.Rd), res);
|
context.Copy(d, res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -475,7 +490,7 @@ namespace ARMeilleure.Instructions
|
||||||
EmitVectorZip(context, part: 1);
|
EmitVectorZip(context, part: 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void EmitMoviMvni(ArmEmitterContext context, bool not)
|
private static void EmitSse2MoviMvni(ArmEmitterContext context, bool not)
|
||||||
{
|
{
|
||||||
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
|
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
|
||||||
|
|
||||||
|
|
|
@ -1089,8 +1089,6 @@ namespace ARMeilleure.Instructions
|
||||||
|
|
||||||
public static float FPMulSub(float valueA, float value1, float value2)
|
public static float FPMulSub(float valueA, float value1, float value2)
|
||||||
{
|
{
|
||||||
ExecutionContext context = NativeInterface.GetContext();
|
|
||||||
|
|
||||||
value1 = value1.FPNeg();
|
value1 = value1.FPNeg();
|
||||||
|
|
||||||
return FPMulAdd(valueA, value1, value2);
|
return FPMulAdd(valueA, value1, value2);
|
||||||
|
@ -1138,6 +1136,21 @@ namespace ARMeilleure.Instructions
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static float FPNegMulAdd(float valueA, float value1, float value2)
|
||||||
|
{
|
||||||
|
valueA = valueA.FPNeg();
|
||||||
|
value1 = value1.FPNeg();
|
||||||
|
|
||||||
|
return FPMulAdd(valueA, value1, value2);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static float FPNegMulSub(float valueA, float value1, float value2)
|
||||||
|
{
|
||||||
|
valueA = valueA.FPNeg();
|
||||||
|
|
||||||
|
return FPMulAdd(valueA, value1, value2);
|
||||||
|
}
|
||||||
|
|
||||||
public static float FPRecipEstimate(float value)
|
public static float FPRecipEstimate(float value)
|
||||||
{
|
{
|
||||||
ExecutionContext context = NativeInterface.GetContext();
|
ExecutionContext context = NativeInterface.GetContext();
|
||||||
|
@ -2196,6 +2209,21 @@ namespace ARMeilleure.Instructions
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static double FPNegMulAdd(double valueA, double value1, double value2)
|
||||||
|
{
|
||||||
|
valueA = valueA.FPNeg();
|
||||||
|
value1 = value1.FPNeg();
|
||||||
|
|
||||||
|
return FPMulAdd(valueA, value1, value2);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static double FPNegMulSub(double valueA, double value1, double value2)
|
||||||
|
{
|
||||||
|
valueA = valueA.FPNeg();
|
||||||
|
|
||||||
|
return FPMulAdd(valueA, value1, value2);
|
||||||
|
}
|
||||||
|
|
||||||
public static double FPRecipEstimate(double value)
|
public static double FPRecipEstimate(double value)
|
||||||
{
|
{
|
||||||
ExecutionContext context = NativeInterface.GetContext();
|
ExecutionContext context = NativeInterface.GetContext();
|
||||||
|
|
|
@ -8,6 +8,10 @@ namespace ARMeilleure.IntermediateRepresentation
|
||||||
X86Addss,
|
X86Addss,
|
||||||
X86Andnpd,
|
X86Andnpd,
|
||||||
X86Andnps,
|
X86Andnps,
|
||||||
|
X86Andpd,
|
||||||
|
X86Andps,
|
||||||
|
X86Blendvpd,
|
||||||
|
X86Blendvps,
|
||||||
X86Cmppd,
|
X86Cmppd,
|
||||||
X86Cmpps,
|
X86Cmpps,
|
||||||
X86Cmpsd,
|
X86Cmpsd,
|
||||||
|
|
Loading…
Reference in a new issue