early-access version 1536
This commit is contained in:
@@ -440,13 +440,13 @@ void EmitX64::EmitVectorAnd(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
static void ArithmeticShiftRightByte(EmitContext& ctx, BlockOfCode& code, const Xbyak::Xmm& result, u8 shift_amount) {
|
||||
if (code.HasAVX512_Icelake()) {
|
||||
// Do a logical shift right upon the 8x8 bit-matrix, but shift in
|
||||
// `0x80` bytes into the matrix to repeat the most significant bit.
|
||||
const u64 zero_extend = ~(0xFFFFFFFFFFFFFFFF << (shift_amount * 8)) & 0x8080808080808080;
|
||||
const u64 shift_matrix = (0x0102040810204080 >> (shift_amount * 8)) | zero_extend;
|
||||
const u64 shift_matrix = shift_amount < 8
|
||||
? (0x0102040810204080 << (shift_amount * 8)) | (0x8080808080808080 >> (64 - shift_amount * 8))
|
||||
: 0x8080808080808080;
|
||||
code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
code.punpckhbw(tmp, result);
|
||||
@@ -1465,20 +1465,21 @@ void EmitX64::EmitVectorLogicalShiftLeft8(EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
const u8 shift_amount = args[1].GetImmediateU8();
|
||||
|
||||
if (shift_amount == 1) {
|
||||
if (shift_amount == 0) {
|
||||
// do nothing
|
||||
} else if (shift_amount >= 8) {
|
||||
code.pxor(result, result);
|
||||
} else if (shift_amount == 1) {
|
||||
code.paddb(result, result);
|
||||
} else if (shift_amount > 0) {
|
||||
if (code.HasAVX512_Icelake()) {
|
||||
// Galois 8x8 identity matrix, bit-shifted by the shift-amount
|
||||
const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8);
|
||||
code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0);
|
||||
} else {
|
||||
const u64 replicand = (0xFFULL << shift_amount) & 0xFF;
|
||||
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
|
||||
} else if (code.HasAVX512_Icelake()) {
|
||||
const u64 shift_matrix = 0x0102040810204080 >> (shift_amount * 8);
|
||||
code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0);
|
||||
} else {
|
||||
const u64 replicand = (0xFFULL << shift_amount) & 0xFF;
|
||||
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
|
||||
|
||||
code.psllw(result, shift_amount);
|
||||
code.pand(result, code.MConst(xword, mask, mask));
|
||||
}
|
||||
code.psllw(result, shift_amount);
|
||||
code.pand(result, code.MConst(xword, mask, mask));
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
@@ -1523,18 +1524,19 @@ void EmitX64::EmitVectorLogicalShiftRight8(EmitContext& ctx, IR::Inst* inst) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
const u8 shift_amount = args[1].GetImmediateU8();
|
||||
|
||||
if (shift_amount > 0) {
|
||||
if (code.HasAVX512_Icelake()) {
|
||||
// Galois 8x8 identity matrix, bit-shifted by the shift-amount
|
||||
const u64 shift_matrix = 0x0102040810204080 << (shift_amount * 8);
|
||||
code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0);
|
||||
} else {
|
||||
const u64 replicand = 0xFEULL >> shift_amount;
|
||||
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
|
||||
if (shift_amount == 0) {
|
||||
// Do nothing
|
||||
} else if (shift_amount >= 8) {
|
||||
code.pxor(result, result);
|
||||
} else if (code.HasAVX512_Icelake()) {
|
||||
const u64 shift_matrix = 0x0102040810204080 << (shift_amount * 8);
|
||||
code.vgf2p8affineqb(result, result, code.MConst(xword_b, shift_matrix), 0);
|
||||
} else {
|
||||
const u64 replicand = 0xFEULL >> shift_amount;
|
||||
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
|
||||
|
||||
code.psrlw(result, shift_amount);
|
||||
code.pand(result, code.MConst(xword, mask, mask));
|
||||
}
|
||||
code.psrlw(result, shift_amount);
|
||||
code.pand(result, code.MConst(xword, mask, mask));
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
@@ -2768,12 +2770,8 @@ void EmitX64::EmitVectorReverseBits(EmitContext& ctx, IR::Inst* inst) {
|
||||
|
||||
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
if (code.HasAVX512_Icelake() && code.HasSSSE3()) {
|
||||
// GFNI(vgf2p8affineqb) and SSSE3(pshuf)
|
||||
// Reverse bits within bytes
|
||||
if (code.HasAVX512_Icelake()) {
|
||||
code.vgf2p8affineqb(data, data, code.MConst(xword_b, 0x8040201008040201), 0);
|
||||
// Reverse bytes within vector
|
||||
code.pshufb(data, code.MConst(xword, 0x0001020304050607, 0x08090a0b0c0d0e0f));
|
||||
} else {
|
||||
const Xbyak::Xmm high_nibble_reg = ctx.reg_alloc.ScratchXmm();
|
||||
code.movdqa(high_nibble_reg, code.MConst(xword, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0));
|
||||
|
Reference in New Issue
Block a user