early-access version 1255
This commit is contained in:
78
externals/dynarmic/src/ir_opt/a32_constant_memory_reads_pass.cpp
vendored
Executable file
78
externals/dynarmic/src/ir_opt/a32_constant_memory_reads_pass.cpp
vendored
Executable file
@@ -0,0 +1,78 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <dynarmic/A32/config.h>
|
||||
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A32ConstantMemoryReads(IR::Block& block, A32::UserCallbacks* cb) {
|
||||
for (auto& inst : block) {
|
||||
switch (inst.GetOpcode()) {
|
||||
case IR::Opcode::A32SetCFlag: {
|
||||
const IR::Value arg = inst.GetArg(0);
|
||||
if (!arg.IsImmediate() && arg.GetInst()->GetOpcode() == IR::Opcode::A32GetCFlag) {
|
||||
inst.Invalidate();
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32ReadMemory8: {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const u32 vaddr = inst.GetArg(0).GetU32();
|
||||
if (cb->IsReadOnlyMemory(vaddr)) {
|
||||
const u8 value_from_memory = cb->MemoryRead8(vaddr);
|
||||
inst.ReplaceUsesWith(IR::Value{value_from_memory});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32ReadMemory16: {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const u32 vaddr = inst.GetArg(0).GetU32();
|
||||
if (cb->IsReadOnlyMemory(vaddr)) {
|
||||
const u16 value_from_memory = cb->MemoryRead16(vaddr);
|
||||
inst.ReplaceUsesWith(IR::Value{value_from_memory});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32ReadMemory32: {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const u32 vaddr = inst.GetArg(0).GetU32();
|
||||
if (cb->IsReadOnlyMemory(vaddr)) {
|
||||
const u32 value_from_memory = cb->MemoryRead32(vaddr);
|
||||
inst.ReplaceUsesWith(IR::Value{value_from_memory});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32ReadMemory64: {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
break;
|
||||
}
|
||||
|
||||
const u32 vaddr = inst.GetArg(0).GetU32();
|
||||
if (cb->IsReadOnlyMemory(vaddr)) {
|
||||
const u64 value_from_memory = cb->MemoryRead64(vaddr);
|
||||
inst.ReplaceUsesWith(IR::Value{value_from_memory});
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
219
externals/dynarmic/src/ir_opt/a32_get_set_elimination_pass.cpp
vendored
Executable file
219
externals/dynarmic/src/ir_opt/a32_get_set_elimination_pass.cpp
vendored
Executable file
@@ -0,0 +1,219 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "frontend/A32/types.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "frontend/ir/value.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A32GetSetElimination(IR::Block& block) {
|
||||
using Iterator = IR::Block::iterator;
|
||||
struct RegisterInfo {
|
||||
IR::Value register_value;
|
||||
bool set_instruction_present = false;
|
||||
Iterator last_set_instruction;
|
||||
};
|
||||
std::array<RegisterInfo, 15> reg_info;
|
||||
std::array<RegisterInfo, 64> ext_reg_singles_info;
|
||||
std::array<RegisterInfo, 32> ext_reg_doubles_info;
|
||||
std::array<RegisterInfo, 32> ext_reg_vector_double_info;
|
||||
std::array<RegisterInfo, 16> ext_reg_vector_quad_info;
|
||||
struct CpsrInfo {
|
||||
RegisterInfo n;
|
||||
RegisterInfo z;
|
||||
RegisterInfo c;
|
||||
RegisterInfo v;
|
||||
RegisterInfo ge;
|
||||
} cpsr_info;
|
||||
|
||||
const auto do_set = [&block](RegisterInfo& info, IR::Value value, Iterator set_inst) {
|
||||
if (info.set_instruction_present) {
|
||||
info.last_set_instruction->Invalidate();
|
||||
block.Instructions().erase(info.last_set_instruction);
|
||||
}
|
||||
|
||||
info.register_value = value;
|
||||
info.set_instruction_present = true;
|
||||
info.last_set_instruction = set_inst;
|
||||
};
|
||||
|
||||
const auto do_get = [](RegisterInfo& info, Iterator get_inst) {
|
||||
if (info.register_value.IsEmpty()) {
|
||||
info.register_value = IR::Value(&*get_inst);
|
||||
return;
|
||||
}
|
||||
get_inst->ReplaceUsesWith(info.register_value);
|
||||
};
|
||||
|
||||
for (auto inst = block.begin(); inst != block.end(); ++inst) {
|
||||
switch (inst->GetOpcode()) {
|
||||
case IR::Opcode::A32SetRegister: {
|
||||
const A32::Reg reg = inst->GetArg(0).GetA32RegRef();
|
||||
if (reg == A32::Reg::PC) {
|
||||
break;
|
||||
}
|
||||
const auto reg_index = static_cast<size_t>(reg);
|
||||
do_set(reg_info[reg_index], inst->GetArg(1), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetRegister: {
|
||||
const A32::Reg reg = inst->GetArg(0).GetA32RegRef();
|
||||
ASSERT(reg != A32::Reg::PC);
|
||||
const size_t reg_index = static_cast<size_t>(reg);
|
||||
do_get(reg_info[reg_index], inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetExtendedRegister32: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
do_set(ext_reg_singles_info[reg_index], inst->GetArg(1), inst);
|
||||
|
||||
ext_reg_doubles_info[reg_index / 2] = {};
|
||||
ext_reg_vector_double_info[reg_index / 2] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 4] = {};
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetExtendedRegister32: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
do_get(ext_reg_singles_info[reg_index], inst);
|
||||
|
||||
ext_reg_doubles_info[reg_index / 2] = {};
|
||||
ext_reg_vector_double_info[reg_index / 2] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 4] = {};
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetExtendedRegister64: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
do_set(ext_reg_doubles_info[reg_index], inst->GetArg(1), inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_vector_double_info[reg_index] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 2] = {};
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetExtendedRegister64: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
do_get(ext_reg_doubles_info[reg_index], inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_vector_double_info[reg_index] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 2] = {};
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetVector: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
if (A32::IsDoubleExtReg(reg)) {
|
||||
do_set(ext_reg_vector_double_info[reg_index], inst->GetArg(1), inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_doubles_info[reg_index] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 2] = {};
|
||||
} else {
|
||||
DEBUG_ASSERT(A32::IsQuadExtReg(reg));
|
||||
|
||||
do_set(ext_reg_vector_quad_info[reg_index], inst->GetArg(1), inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 4 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 1] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 2] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 3] = {};
|
||||
ext_reg_doubles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_doubles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_vector_double_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_vector_double_info[reg_index * 2 + 1] = {};
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetVector: {
|
||||
const A32::ExtReg reg = inst->GetArg(0).GetA32ExtRegRef();
|
||||
const size_t reg_index = A32::RegNumber(reg);
|
||||
if (A32::IsDoubleExtReg(reg)) {
|
||||
do_get(ext_reg_vector_double_info[reg_index], inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_doubles_info[reg_index] = {};
|
||||
ext_reg_vector_quad_info[reg_index / 2] = {};
|
||||
} else {
|
||||
DEBUG_ASSERT(A32::IsQuadExtReg(reg));
|
||||
|
||||
do_get(ext_reg_vector_quad_info[reg_index], inst);
|
||||
|
||||
ext_reg_singles_info[reg_index * 4 + 0] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 1] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 2] = {};
|
||||
ext_reg_singles_info[reg_index * 4 + 3] = {};
|
||||
ext_reg_doubles_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_doubles_info[reg_index * 2 + 1] = {};
|
||||
ext_reg_vector_double_info[reg_index * 2 + 0] = {};
|
||||
ext_reg_vector_double_info[reg_index * 2 + 1] = {};
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetNFlag: {
|
||||
do_set(cpsr_info.n, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetNFlag: {
|
||||
do_get(cpsr_info.n, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetZFlag: {
|
||||
do_set(cpsr_info.z, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetZFlag: {
|
||||
do_get(cpsr_info.z, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetCFlag: {
|
||||
do_set(cpsr_info.c, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetCFlag: {
|
||||
do_get(cpsr_info.c, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetVFlag: {
|
||||
do_set(cpsr_info.v, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetVFlag: {
|
||||
do_get(cpsr_info.v, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetGEFlags: {
|
||||
do_set(cpsr_info.ge, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetGEFlags: {
|
||||
do_get(cpsr_info.ge, inst);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
if (inst->ReadsFromCPSR() || inst->WritesToCPSR()) {
|
||||
cpsr_info = {};
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
57
externals/dynarmic/src/ir_opt/a64_callback_config_pass.cpp
vendored
Executable file
57
externals/dynarmic/src/ir_opt/a64_callback_config_pass.cpp
vendored
Executable file
@@ -0,0 +1,57 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <dynarmic/A64/config.h>
|
||||
|
||||
#include "frontend/A64/ir_emitter.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/microinstruction.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A64CallbackConfigPass(IR::Block& block, const A64::UserConfig& conf) {
|
||||
if (conf.hook_data_cache_operations) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto& inst : block) {
|
||||
if (inst.GetOpcode() != IR::Opcode::A64DataCacheOperationRaised) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto op = static_cast<A64::DataCacheOperation>(inst.GetArg(0).GetU64());
|
||||
if (op == A64::DataCacheOperation::ZeroByVA) {
|
||||
A64::IREmitter ir{block};
|
||||
ir.SetInsertionPoint(&inst);
|
||||
|
||||
size_t bytes = 4 << static_cast<size_t>(conf.dczid_el0 & 0b1111);
|
||||
IR::U64 addr{inst.GetArg(1)};
|
||||
|
||||
const IR::U128 zero_u128 = ir.ZeroExtendToQuad(ir.Imm64(0));
|
||||
while (bytes >= 16) {
|
||||
ir.WriteMemory128(addr, zero_u128);
|
||||
addr = ir.Add(addr, ir.Imm64(16));
|
||||
bytes -= 16;
|
||||
}
|
||||
|
||||
while (bytes >= 8) {
|
||||
ir.WriteMemory64(addr, ir.Imm64(0));
|
||||
addr = ir.Add(addr, ir.Imm64(8));
|
||||
bytes -= 8;
|
||||
}
|
||||
|
||||
while (bytes >= 4) {
|
||||
ir.WriteMemory32(addr, ir.Imm32(0));
|
||||
addr = ir.Add(addr, ir.Imm64(4));
|
||||
bytes -= 4;
|
||||
}
|
||||
}
|
||||
inst.Invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
155
externals/dynarmic/src/ir_opt/a64_get_set_elimination_pass.cpp
vendored
Executable file
155
externals/dynarmic/src/ir_opt/a64_get_set_elimination_pass.cpp
vendored
Executable file
@@ -0,0 +1,155 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "frontend/A64/types.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "frontend/ir/value.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A64GetSetElimination(IR::Block& block) {
|
||||
using Iterator = IR::Block::iterator;
|
||||
|
||||
enum class TrackingType {
|
||||
W, X,
|
||||
S, D, Q,
|
||||
SP, NZCV, NZCVRaw,
|
||||
};
|
||||
struct RegisterInfo {
|
||||
IR::Value register_value;
|
||||
TrackingType tracking_type;
|
||||
bool set_instruction_present = false;
|
||||
Iterator last_set_instruction;
|
||||
};
|
||||
std::array<RegisterInfo, 31> reg_info;
|
||||
std::array<RegisterInfo, 32> vec_info;
|
||||
RegisterInfo sp_info;
|
||||
RegisterInfo nzcv_info;
|
||||
|
||||
const auto do_set = [&block](RegisterInfo& info, IR::Value value, Iterator set_inst, TrackingType tracking_type) {
|
||||
if (info.set_instruction_present) {
|
||||
info.last_set_instruction->Invalidate();
|
||||
block.Instructions().erase(info.last_set_instruction);
|
||||
}
|
||||
|
||||
info.register_value = value;
|
||||
info.tracking_type = tracking_type;
|
||||
info.set_instruction_present = true;
|
||||
info.last_set_instruction = set_inst;
|
||||
};
|
||||
|
||||
const auto do_get = [](RegisterInfo& info, Iterator get_inst, TrackingType tracking_type) {
|
||||
const auto do_nothing = [&] {
|
||||
info = {};
|
||||
info.register_value = IR::Value(&*get_inst);
|
||||
info.tracking_type = tracking_type;
|
||||
};
|
||||
|
||||
if (info.register_value.IsEmpty()) {
|
||||
do_nothing();
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.tracking_type == tracking_type) {
|
||||
get_inst->ReplaceUsesWith(info.register_value);
|
||||
return;
|
||||
}
|
||||
|
||||
do_nothing();
|
||||
};
|
||||
|
||||
for (auto inst = block.begin(); inst != block.end(); ++inst) {
|
||||
switch (inst->GetOpcode()) {
|
||||
case IR::Opcode::A64GetW: {
|
||||
const size_t index = A64::RegNumber(inst->GetArg(0).GetA64RegRef());
|
||||
do_get(reg_info.at(index), inst, TrackingType::W);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetX: {
|
||||
const size_t index = A64::RegNumber(inst->GetArg(0).GetA64RegRef());
|
||||
do_get(reg_info.at(index), inst, TrackingType::X);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetS: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_get(vec_info.at(index), inst, TrackingType::S);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetD: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_get(vec_info.at(index), inst, TrackingType::D);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetQ: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_get(vec_info.at(index), inst, TrackingType::Q);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetSP: {
|
||||
do_get(sp_info, inst, TrackingType::SP);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64GetNZCVRaw: {
|
||||
do_get(nzcv_info, inst, TrackingType::NZCVRaw);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetW: {
|
||||
const size_t index = A64::RegNumber(inst->GetArg(0).GetA64RegRef());
|
||||
do_set(reg_info.at(index), inst->GetArg(1), inst, TrackingType::W);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetX: {
|
||||
const size_t index = A64::RegNumber(inst->GetArg(0).GetA64RegRef());
|
||||
do_set(reg_info.at(index), inst->GetArg(1), inst, TrackingType::X);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetS: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_set(vec_info.at(index), inst->GetArg(1), inst, TrackingType::S);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetD: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_set(vec_info.at(index), inst->GetArg(1), inst, TrackingType::D);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetQ: {
|
||||
const size_t index = A64::VecNumber(inst->GetArg(0).GetA64VecRef());
|
||||
do_set(vec_info.at(index), inst->GetArg(1), inst, TrackingType::Q);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetSP: {
|
||||
do_set(sp_info, inst->GetArg(0), inst, TrackingType::SP);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetNZCV: {
|
||||
do_set(nzcv_info, inst->GetArg(0), inst, TrackingType::NZCV);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A64SetNZCVRaw: {
|
||||
do_set(nzcv_info, inst->GetArg(0), inst, TrackingType::NZCVRaw);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
if (inst->ReadsFromCPSR() || inst->WritesToCPSR()) {
|
||||
nzcv_info = {};
|
||||
}
|
||||
if (inst->ReadsFromCoreRegister() || inst->WritesToCoreRegister()) {
|
||||
reg_info = {};
|
||||
vec_info = {};
|
||||
sp_info = {};
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
52
externals/dynarmic/src/ir_opt/a64_merge_interpret_blocks.cpp
vendored
Executable file
52
externals/dynarmic/src/ir_opt/a64_merge_interpret_blocks.cpp
vendored
Executable file
@@ -0,0 +1,52 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <boost/variant/get.hpp>
|
||||
#include <dynarmic/A64/config.h>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "frontend/A64/location_descriptor.h"
|
||||
#include "frontend/A64/translate/translate.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A64MergeInterpretBlocksPass(IR::Block& block, A64::UserCallbacks* cb) {
|
||||
const auto is_interpret_instruction = [cb](A64::LocationDescriptor location) {
|
||||
const u32 instruction = cb->MemoryReadCode(location.PC());
|
||||
|
||||
IR::Block new_block{location};
|
||||
A64::TranslateSingleInstruction(new_block, location, instruction);
|
||||
|
||||
if (!new_block.Instructions().empty())
|
||||
return false;
|
||||
|
||||
const IR::Terminal terminal = new_block.GetTerminal();
|
||||
if (auto term = boost::get<IR::Term::Interpret>(&terminal)) {
|
||||
return term->next == location;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
IR::Terminal terminal = block.GetTerminal();
|
||||
auto term = boost::get<IR::Term::Interpret>(&terminal);
|
||||
if (!term)
|
||||
return;
|
||||
|
||||
A64::LocationDescriptor location{term->next};
|
||||
size_t num_instructions = 1;
|
||||
|
||||
while (is_interpret_instruction(location.AdvancePC(static_cast<int>(num_instructions * 4)))) {
|
||||
num_instructions++;
|
||||
}
|
||||
|
||||
term->num_instructions = num_instructions;
|
||||
block.ReplaceTerminal(terminal);
|
||||
block.CycleCount() += num_instructions - 1;
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
541
externals/dynarmic/src/ir_opt/constant_propagation_pass.cpp
vendored
Executable file
541
externals/dynarmic/src/ir_opt/constant_propagation_pass.cpp
vendored
Executable file
@@ -0,0 +1,541 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/safe_ops.h"
|
||||
#include "common/common_types.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/ir_emitter.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
using Op = Dynarmic::IR::Opcode;
|
||||
|
||||
namespace {
|
||||
|
||||
// Tiny helper to avoid the need to store based off the opcode
|
||||
// bit size all over the place within folding functions.
|
||||
void ReplaceUsesWith(IR::Inst& inst, bool is_32_bit, u64 value) {
|
||||
if (is_32_bit) {
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u32>(value)});
|
||||
} else {
|
||||
inst.ReplaceUsesWith(IR::Value{value});
|
||||
}
|
||||
}
|
||||
|
||||
IR::Value Value(bool is_32_bit, u64 value) {
|
||||
return is_32_bit ? IR::Value{static_cast<u32>(value)} : IR::Value{value};
|
||||
}
|
||||
|
||||
template <typename ImmFn>
|
||||
bool FoldCommutative(IR::Inst& inst, bool is_32_bit, ImmFn imm_fn) {
|
||||
const auto lhs = inst.GetArg(0);
|
||||
const auto rhs = inst.GetArg(1);
|
||||
|
||||
const bool is_lhs_immediate = lhs.IsImmediate();
|
||||
const bool is_rhs_immediate = rhs.IsImmediate();
|
||||
|
||||
if (is_lhs_immediate && is_rhs_immediate) {
|
||||
const u64 result = imm_fn(lhs.GetImmediateAsU64(), rhs.GetImmediateAsU64());
|
||||
ReplaceUsesWith(inst, is_32_bit, result);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_lhs_immediate && !is_rhs_immediate) {
|
||||
const IR::Inst* rhs_inst = rhs.GetInstRecursive();
|
||||
if (rhs_inst->GetOpcode() == inst.GetOpcode() && rhs_inst->GetArg(1).IsImmediate()) {
|
||||
const u64 combined = imm_fn(lhs.GetImmediateAsU64(), rhs_inst->GetArg(1).GetImmediateAsU64());
|
||||
inst.SetArg(0, rhs_inst->GetArg(0));
|
||||
inst.SetArg(1, Value(is_32_bit, combined));
|
||||
} else {
|
||||
// Normalize
|
||||
inst.SetArg(0, rhs);
|
||||
inst.SetArg(1, lhs);
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_lhs_immediate && is_rhs_immediate) {
|
||||
const IR::Inst* lhs_inst = lhs.GetInstRecursive();
|
||||
if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->GetArg(1).IsImmediate()) {
|
||||
const u64 combined = imm_fn(rhs.GetImmediateAsU64(), lhs_inst->GetArg(1).GetImmediateAsU64());
|
||||
inst.SetArg(0, lhs_inst->GetArg(0));
|
||||
inst.SetArg(1, Value(is_32_bit, combined));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FoldAdd(IR::Inst& inst, bool is_32_bit) {
|
||||
const auto lhs = inst.GetArg(0);
|
||||
const auto rhs = inst.GetArg(1);
|
||||
const auto carry = inst.GetArg(2);
|
||||
|
||||
if (lhs.IsImmediate() && !rhs.IsImmediate()) {
|
||||
// Normalize
|
||||
inst.SetArg(0, rhs);
|
||||
inst.SetArg(1, lhs);
|
||||
FoldAdd(inst, is_32_bit);
|
||||
return;
|
||||
}
|
||||
|
||||
if (inst.HasAssociatedPseudoOperation()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!lhs.IsImmediate() && rhs.IsImmediate()) {
|
||||
const IR::Inst* lhs_inst = lhs.GetInstRecursive();
|
||||
if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->GetArg(1).IsImmediate() && lhs_inst->GetArg(2).IsImmediate()) {
|
||||
const u64 combined = rhs.GetImmediateAsU64() + lhs_inst->GetArg(1).GetImmediateAsU64() + lhs_inst->GetArg(2).GetU1();
|
||||
inst.SetArg(0, lhs_inst->GetArg(0));
|
||||
inst.SetArg(1, Value(is_32_bit, combined));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
const u64 result = lhs.GetImmediateAsU64() + rhs.GetImmediateAsU64() + carry.GetU1();
|
||||
ReplaceUsesWith(inst, is_32_bit, result);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Folds AND operations based on the following:
|
||||
//
|
||||
// 1. imm_x & imm_y -> result
|
||||
// 2. x & 0 -> 0
|
||||
// 3. 0 & y -> 0
|
||||
// 4. x & y -> y (where x has all bits set to 1)
|
||||
// 5. x & y -> x (where y has all bits set to 1)
|
||||
//
|
||||
void FoldAND(IR::Inst& inst, bool is_32_bit) {
|
||||
if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a & b; })) {
|
||||
const auto rhs = inst.GetArg(1);
|
||||
if (rhs.IsZero()) {
|
||||
ReplaceUsesWith(inst, is_32_bit, 0);
|
||||
} else if (rhs.HasAllBitsSet()) {
|
||||
inst.ReplaceUsesWith(inst.GetArg(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Folds byte reversal opcodes based on the following:
|
||||
//
|
||||
// 1. imm -> swap(imm)
|
||||
//
|
||||
void FoldByteReverse(IR::Inst& inst, Op op) {
|
||||
const auto operand = inst.GetArg(0);
|
||||
|
||||
if (!operand.IsImmediate()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (op == Op::ByteReverseWord) {
|
||||
const u32 result = Common::Swap32(static_cast<u32>(operand.GetImmediateAsU64()));
|
||||
inst.ReplaceUsesWith(IR::Value{result});
|
||||
} else if (op == Op::ByteReverseHalf) {
|
||||
const u16 result = Common::Swap16(static_cast<u16>(operand.GetImmediateAsU64()));
|
||||
inst.ReplaceUsesWith(IR::Value{result});
|
||||
} else {
|
||||
const u64 result = Common::Swap64(operand.GetImmediateAsU64());
|
||||
inst.ReplaceUsesWith(IR::Value{result});
|
||||
}
|
||||
}
|
||||
|
||||
// Folds division operations based on the following:
|
||||
//
|
||||
// 1. x / 0 -> 0 (NOTE: This is an ARM-specific behavior defined in the architecture reference manual)
|
||||
// 2. imm_x / imm_y -> result
|
||||
// 3. x / 1 -> x
|
||||
//
|
||||
void FoldDivide(IR::Inst& inst, bool is_32_bit, bool is_signed) {
|
||||
const auto rhs = inst.GetArg(1);
|
||||
|
||||
if (rhs.IsZero()) {
|
||||
ReplaceUsesWith(inst, is_32_bit, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto lhs = inst.GetArg(0);
|
||||
if (lhs.IsImmediate() && rhs.IsImmediate()) {
|
||||
if (is_signed) {
|
||||
const s64 result = lhs.GetImmediateAsS64() / rhs.GetImmediateAsS64();
|
||||
ReplaceUsesWith(inst, is_32_bit, static_cast<u64>(result));
|
||||
} else {
|
||||
const u64 result = lhs.GetImmediateAsU64() / rhs.GetImmediateAsU64();
|
||||
ReplaceUsesWith(inst, is_32_bit, result);
|
||||
}
|
||||
} else if (rhs.IsUnsignedImmediate(1)) {
|
||||
inst.ReplaceUsesWith(IR::Value{lhs});
|
||||
}
|
||||
}
|
||||
|
||||
// Folds EOR operations based on the following:
|
||||
//
|
||||
// 1. imm_x ^ imm_y -> result
|
||||
// 2. x ^ 0 -> x
|
||||
// 3. 0 ^ y -> y
|
||||
//
|
||||
void FoldEOR(IR::Inst& inst, bool is_32_bit) {
|
||||
if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a ^ b; })) {
|
||||
const auto rhs = inst.GetArg(1);
|
||||
if (rhs.IsZero()) {
|
||||
inst.ReplaceUsesWith(inst.GetArg(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FoldLeastSignificantByte(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto operand = inst.GetArg(0);
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u8>(operand.GetImmediateAsU64())});
|
||||
}
|
||||
|
||||
void FoldLeastSignificantHalf(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto operand = inst.GetArg(0);
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u16>(operand.GetImmediateAsU64())});
|
||||
}
|
||||
|
||||
void FoldLeastSignificantWord(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto operand = inst.GetArg(0);
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u32>(operand.GetImmediateAsU64())});
|
||||
}
|
||||
|
||||
void FoldMostSignificantBit(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto operand = inst.GetArg(0);
|
||||
inst.ReplaceUsesWith(IR::Value{(operand.GetImmediateAsU64() >> 31) != 0});
|
||||
}
|
||||
|
||||
void FoldMostSignificantWord(IR::Inst& inst) {
|
||||
IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(Op::GetCarryFromOp);
|
||||
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto operand = inst.GetArg(0);
|
||||
if (carry_inst) {
|
||||
carry_inst->ReplaceUsesWith(IR::Value{Common::Bit<31>(operand.GetImmediateAsU64())});
|
||||
}
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u32>(operand.GetImmediateAsU64() >> 32)});
|
||||
}
|
||||
|
||||
// Folds multiplication operations based on the following:
|
||||
//
|
||||
// 1. imm_x * imm_y -> result
|
||||
// 2. x * 0 -> 0
|
||||
// 3. 0 * y -> 0
|
||||
// 4. x * 1 -> x
|
||||
// 5. 1 * y -> y
|
||||
//
|
||||
void FoldMultiply(IR::Inst& inst, bool is_32_bit) {
|
||||
if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a * b; })) {
|
||||
const auto rhs = inst.GetArg(1);
|
||||
if (rhs.IsZero()) {
|
||||
ReplaceUsesWith(inst, is_32_bit, 0);
|
||||
} else if (rhs.IsUnsignedImmediate(1)) {
|
||||
inst.ReplaceUsesWith(inst.GetArg(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Folds NOT operations if the contained value is an immediate.
|
||||
void FoldNOT(IR::Inst& inst, bool is_32_bit) {
|
||||
const auto operand = inst.GetArg(0);
|
||||
|
||||
if (!operand.IsImmediate()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u64 result = ~operand.GetImmediateAsU64();
|
||||
ReplaceUsesWith(inst, is_32_bit, result);
|
||||
}
|
||||
|
||||
// Folds OR operations based on the following:
|
||||
//
|
||||
// 1. imm_x | imm_y -> result
|
||||
// 2. x | 0 -> x
|
||||
// 3. 0 | y -> y
|
||||
//
|
||||
void FoldOR(IR::Inst& inst, bool is_32_bit) {
|
||||
if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a | b; })) {
|
||||
const auto rhs = inst.GetArg(1);
|
||||
if (rhs.IsZero()) {
|
||||
inst.ReplaceUsesWith(inst.GetArg(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool FoldShifts(IR::Inst& inst) {
|
||||
IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(Op::GetCarryFromOp);
|
||||
|
||||
// The 32-bit variants can contain 3 arguments, while the
|
||||
// 64-bit variants only contain 2.
|
||||
if (inst.NumArgs() == 3 && !carry_inst) {
|
||||
inst.SetArg(2, IR::Value(false));
|
||||
}
|
||||
|
||||
const auto shift_amount = inst.GetArg(1);
|
||||
if (shift_amount.IsZero()) {
|
||||
if (carry_inst) {
|
||||
carry_inst->ReplaceUsesWith(inst.GetArg(2));
|
||||
}
|
||||
inst.ReplaceUsesWith(inst.GetArg(0));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!inst.AreAllArgsImmediates() || carry_inst) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void FoldSignExtendXToWord(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const s64 value = inst.GetArg(0).GetImmediateAsS64();
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u32>(value)});
|
||||
}
|
||||
|
||||
void FoldSignExtendXToLong(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const s64 value = inst.GetArg(0).GetImmediateAsS64();
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u64>(value)});
|
||||
}
|
||||
|
||||
void FoldSub(IR::Inst& inst, bool is_32_bit) {
|
||||
if (!inst.AreAllArgsImmediates() || inst.HasAssociatedPseudoOperation()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto lhs = inst.GetArg(0);
|
||||
const auto rhs = inst.GetArg(1);
|
||||
const auto carry = inst.GetArg(2);
|
||||
|
||||
const u64 result = lhs.GetImmediateAsU64() + (~rhs.GetImmediateAsU64()) + carry.GetU1();
|
||||
ReplaceUsesWith(inst, is_32_bit, result);
|
||||
}
|
||||
|
||||
void FoldZeroExtendXToWord(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u64 value = inst.GetArg(0).GetImmediateAsU64();
|
||||
inst.ReplaceUsesWith(IR::Value{static_cast<u32>(value)});
|
||||
}
|
||||
|
||||
void FoldZeroExtendXToLong(IR::Inst& inst) {
|
||||
if (!inst.AreAllArgsImmediates()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u64 value = inst.GetArg(0).GetImmediateAsU64();
|
||||
inst.ReplaceUsesWith(IR::Value{value});
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void ConstantPropagation(IR::Block& block) {
|
||||
for (auto& inst : block) {
|
||||
const auto opcode = inst.GetOpcode();
|
||||
|
||||
switch (opcode) {
|
||||
case Op::LeastSignificantWord:
|
||||
FoldLeastSignificantWord(inst);
|
||||
break;
|
||||
case Op::MostSignificantWord:
|
||||
FoldMostSignificantWord(inst);
|
||||
break;
|
||||
case Op::LeastSignificantHalf:
|
||||
FoldLeastSignificantHalf(inst);
|
||||
break;
|
||||
case Op::LeastSignificantByte:
|
||||
FoldLeastSignificantByte(inst);
|
||||
break;
|
||||
case Op::MostSignificantBit:
|
||||
FoldMostSignificantBit(inst);
|
||||
break;
|
||||
case Op::IsZero32:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
inst.ReplaceUsesWith(IR::Value{inst.GetArg(0).GetU32() == 0});
|
||||
}
|
||||
break;
|
||||
case Op::IsZero64:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
inst.ReplaceUsesWith(IR::Value{inst.GetArg(0).GetU64() == 0});
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftLeft32:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, true, Safe::LogicalShiftLeft<u32>(inst.GetArg(0).GetU32(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftLeft64:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, false, Safe::LogicalShiftLeft<u64>(inst.GetArg(0).GetU64(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftRight32:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, true, Safe::LogicalShiftRight<u32>(inst.GetArg(0).GetU32(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftRight64:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, false, Safe::LogicalShiftRight<u64>(inst.GetArg(0).GetU64(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::ArithmeticShiftRight32:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, true, Safe::ArithmeticShiftRight<u32>(inst.GetArg(0).GetU32(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::ArithmeticShiftRight64:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, false, Safe::ArithmeticShiftRight<u64>(inst.GetArg(0).GetU64(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::RotateRight32:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, true, Common::RotateRight<u32>(inst.GetArg(0).GetU32(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::RotateRight64:
|
||||
if (FoldShifts(inst)) {
|
||||
ReplaceUsesWith(inst, false, Common::RotateRight<u64>(inst.GetArg(0).GetU64(), inst.GetArg(1).GetU8()));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftLeftMasked32:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, true, inst.GetArg(0).GetU32() << (inst.GetArg(1).GetU32() & 0x1f));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftLeftMasked64:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, false, inst.GetArg(0).GetU64() << (inst.GetArg(1).GetU64() & 0x3f));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftRightMasked32:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, true, inst.GetArg(0).GetU32() >> (inst.GetArg(1).GetU32() & 0x1f));
|
||||
}
|
||||
break;
|
||||
case Op::LogicalShiftRightMasked64:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, false, inst.GetArg(0).GetU64() >> (inst.GetArg(1).GetU64() & 0x3f));
|
||||
}
|
||||
break;
|
||||
case Op::ArithmeticShiftRightMasked32:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, true, static_cast<s32>(inst.GetArg(0).GetU32()) >> (inst.GetArg(1).GetU32() & 0x1f));
|
||||
}
|
||||
break;
|
||||
case Op::ArithmeticShiftRightMasked64:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, false, static_cast<s64>(inst.GetArg(0).GetU64()) >> (inst.GetArg(1).GetU64() & 0x3f));
|
||||
}
|
||||
break;
|
||||
case Op::RotateRightMasked32:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, true, Common::RotateRight<u32>(inst.GetArg(0).GetU32(), inst.GetArg(1).GetU32()));
|
||||
}
|
||||
break;
|
||||
case Op::RotateRightMasked64:
|
||||
if (inst.AreAllArgsImmediates()) {
|
||||
ReplaceUsesWith(inst, false, Common::RotateRight<u64>(inst.GetArg(0).GetU64(), inst.GetArg(1).GetU64()));
|
||||
}
|
||||
break;
|
||||
case Op::Add32:
|
||||
case Op::Add64:
|
||||
FoldAdd(inst, opcode == Op::Add32);
|
||||
break;
|
||||
case Op::Sub32:
|
||||
case Op::Sub64:
|
||||
FoldSub(inst, opcode == Op::Sub32);
|
||||
break;
|
||||
case Op::Mul32:
|
||||
case Op::Mul64:
|
||||
FoldMultiply(inst, opcode == Op::Mul32);
|
||||
break;
|
||||
case Op::SignedDiv32:
|
||||
case Op::SignedDiv64:
|
||||
FoldDivide(inst, opcode == Op::SignedDiv32, true);
|
||||
break;
|
||||
case Op::UnsignedDiv32:
|
||||
case Op::UnsignedDiv64:
|
||||
FoldDivide(inst, opcode == Op::UnsignedDiv32, false);
|
||||
break;
|
||||
case Op::And32:
|
||||
case Op::And64:
|
||||
FoldAND(inst, opcode == Op::And32);
|
||||
break;
|
||||
case Op::Eor32:
|
||||
case Op::Eor64:
|
||||
FoldEOR(inst, opcode == Op::Eor32);
|
||||
break;
|
||||
case Op::Or32:
|
||||
case Op::Or64:
|
||||
FoldOR(inst, opcode == Op::Or32);
|
||||
break;
|
||||
case Op::Not32:
|
||||
case Op::Not64:
|
||||
FoldNOT(inst, opcode == Op::Not32);
|
||||
break;
|
||||
case Op::SignExtendByteToWord:
|
||||
case Op::SignExtendHalfToWord:
|
||||
FoldSignExtendXToWord(inst);
|
||||
break;
|
||||
case Op::SignExtendByteToLong:
|
||||
case Op::SignExtendHalfToLong:
|
||||
case Op::SignExtendWordToLong:
|
||||
FoldSignExtendXToLong(inst);
|
||||
break;
|
||||
case Op::ZeroExtendByteToWord:
|
||||
case Op::ZeroExtendHalfToWord:
|
||||
FoldZeroExtendXToWord(inst);
|
||||
break;
|
||||
case Op::ZeroExtendByteToLong:
|
||||
case Op::ZeroExtendHalfToLong:
|
||||
case Op::ZeroExtendWordToLong:
|
||||
FoldZeroExtendXToLong(inst);
|
||||
break;
|
||||
case Op::ByteReverseWord:
|
||||
case Op::ByteReverseHalf:
|
||||
case Op::ByteReverseDual:
|
||||
FoldByteReverse(inst, opcode);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
22
externals/dynarmic/src/ir_opt/dead_code_elimination_pass.cpp
vendored
Executable file
22
externals/dynarmic/src/ir_opt/dead_code_elimination_pass.cpp
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include "common/iterator_util.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void DeadCodeElimination(IR::Block& block) {
|
||||
// We iterate over the instructions in reverse order.
|
||||
// This is because removing an instruction reduces the number of uses for earlier instructions.
|
||||
for (auto& inst : Common::Reverse(block)) {
|
||||
if (!inst.HasUses() && !inst.MayHaveSideEffects()) {
|
||||
inst.Invalidate();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic
|
45
externals/dynarmic/src/ir_opt/identity_removal_pass.cpp
vendored
Executable file
45
externals/dynarmic/src/ir_opt/identity_removal_pass.cpp
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2020 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/iterator_util.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void IdentityRemovalPass(IR::Block& block) {
|
||||
std::vector<IR::Inst*> to_invalidate;
|
||||
|
||||
auto iter = block.begin();
|
||||
while (iter != block.end()) {
|
||||
IR::Inst& inst = *iter;
|
||||
|
||||
const size_t num_args = inst.NumArgs();
|
||||
for (size_t i = 0; i < num_args; i++) {
|
||||
while (true) {
|
||||
IR::Value arg = inst.GetArg(i);
|
||||
if (!arg.IsIdentity())
|
||||
break;
|
||||
inst.SetArg(i, arg.GetInst()->GetArg(0));
|
||||
}
|
||||
}
|
||||
|
||||
if (inst.GetOpcode() == IR::Opcode::Identity || inst.GetOpcode() == IR::Opcode::Void) {
|
||||
iter = block.Instructions().erase(inst);
|
||||
to_invalidate.push_back(&inst);
|
||||
} else {
|
||||
++iter;
|
||||
}
|
||||
}
|
||||
|
||||
for (IR::Inst* inst : to_invalidate) {
|
||||
inst->Invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic
|
127
externals/dynarmic/src/ir_opt/ir_matcher.h
vendored
Executable file
127
externals/dynarmic/src/ir_opt/ir_matcher.h
vendored
Executable file
@@ -0,0 +1,127 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2020 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
|
||||
#include <mp/metafunction/apply.h>
|
||||
#include <mp/typelist/concat.h>
|
||||
#include <mp/typelist/drop.h>
|
||||
#include <mp/typelist/get.h>
|
||||
#include <mp/typelist/head.h>
|
||||
#include <mp/typelist/list.h>
|
||||
#include <mp/typelist/prepend.h>
|
||||
|
||||
#include "frontend/ir/microinstruction.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "frontend/ir/value.h"
|
||||
|
||||
namespace Dynarmic::Optimization::IRMatcher {
|
||||
|
||||
struct CaptureValue {
|
||||
using ReturnType = std::tuple<IR::Value>;
|
||||
|
||||
static std::optional<ReturnType> Match(IR::Value value) {
|
||||
return std::tuple(value);
|
||||
}
|
||||
};
|
||||
|
||||
struct CaptureInst {
|
||||
using ReturnType = std::tuple<IR::Inst*>;
|
||||
|
||||
static std::optional<ReturnType> Match(IR::Value value) {
|
||||
if (value.IsImmediate())
|
||||
return std::nullopt;
|
||||
return std::tuple(value.GetInstRecursive());
|
||||
}
|
||||
};
|
||||
|
||||
struct CaptureUImm {
|
||||
using ReturnType = std::tuple<u64>;
|
||||
|
||||
static std::optional<ReturnType> Match(IR::Value value) {
|
||||
return std::tuple(value.GetImmediateAsU64());
|
||||
}
|
||||
};
|
||||
|
||||
struct CaptureSImm {
|
||||
using ReturnType = std::tuple<s64>;
|
||||
|
||||
static std::optional<ReturnType> Match(IR::Value value) {
|
||||
return std::tuple(value.GetImmediateAsS64());
|
||||
}
|
||||
};
|
||||
|
||||
template <u64 Value>
|
||||
struct UImm {
|
||||
using ReturnType = std::tuple<>;
|
||||
|
||||
static std::optional<std::tuple<>> Match(IR::Value value) {
|
||||
if (value.GetImmediateAsU64() == Value)
|
||||
return std::tuple();
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
template <s64 Value>
|
||||
struct SImm {
|
||||
using ReturnType = std::tuple<>;
|
||||
|
||||
static std::optional<std::tuple<>> Match(IR::Value value) {
|
||||
if (value.GetImmediateAsS64() == Value)
|
||||
return std::tuple();
|
||||
return std::nullopt;
|
||||
}
|
||||
};
|
||||
|
||||
template <IR::Opcode Opcode, typename... Args>
|
||||
struct Inst {
|
||||
public:
|
||||
using ReturnType = mp::concat<std::tuple<>, typename Args::ReturnType...>;
|
||||
|
||||
static std::optional<ReturnType> Match(const IR::Inst& inst) {
|
||||
if (inst.GetOpcode() != Opcode)
|
||||
return std::nullopt;
|
||||
if (inst.HasAssociatedPseudoOperation())
|
||||
return std::nullopt;
|
||||
return MatchArgs<0>(inst);
|
||||
}
|
||||
|
||||
static std::optional<ReturnType> Match(IR::Value value) {
|
||||
if (value.IsImmediate())
|
||||
return std::nullopt;
|
||||
return Match(*value.GetInstRecursive());
|
||||
}
|
||||
|
||||
private:
|
||||
template <size_t I>
|
||||
static auto MatchArgs(const IR::Inst& inst) -> std::optional<mp::apply<mp::concat, mp::prepend<mp::drop<I, mp::list<typename Args::ReturnType...>>, std::tuple<>>>> {
|
||||
if constexpr (I >= sizeof...(Args)) {
|
||||
return std::tuple();
|
||||
} else {
|
||||
using Arg = mp::get<I, mp::list<Args...>>;
|
||||
|
||||
if (const auto arg = Arg::Match(inst.GetArg(I))) {
|
||||
if (const auto rest = MatchArgs<I + 1>(inst)) {
|
||||
return std::tuple_cat(*arg, *rest);
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline bool IsSameInst(std::tuple<IR::Inst*, IR::Inst*> t) {
|
||||
return std::get<0>(t) == std::get<1>(t);
|
||||
}
|
||||
|
||||
inline bool IsSameInst(std::tuple<IR::Inst*, IR::Inst*, IR::Inst*> t) {
|
||||
return std::get<0>(t) == std::get<1>(t) && std::get<0>(t) == std::get<2>(t);
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization::IRMatcher
|
33
externals/dynarmic/src/ir_opt/passes.h
vendored
Executable file
33
externals/dynarmic/src/ir_opt/passes.h
vendored
Executable file
@@ -0,0 +1,33 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Dynarmic::A32 {
|
||||
struct UserCallbacks;
|
||||
}
|
||||
|
||||
namespace Dynarmic::A64 {
|
||||
struct UserCallbacks;
|
||||
struct UserConfig;
|
||||
}
|
||||
|
||||
namespace Dynarmic::IR {
|
||||
class Block;
|
||||
}
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void A32ConstantMemoryReads(IR::Block& block, A32::UserCallbacks* cb);
|
||||
void A32GetSetElimination(IR::Block& block);
|
||||
void A64CallbackConfigPass(IR::Block& block, const A64::UserConfig& conf);
|
||||
void A64GetSetElimination(IR::Block& block);
|
||||
void A64MergeInterpretBlocksPass(IR::Block& block, A64::UserCallbacks* cb);
|
||||
void ConstantPropagation(IR::Block& block);
|
||||
void DeadCodeElimination(IR::Block& block);
|
||||
void IdentityRemovalPass(IR::Block& block);
|
||||
void VerificationPass(const IR::Block& block);
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
46
externals/dynarmic/src/ir_opt/verification_pass.cpp
vendored
Executable file
46
externals/dynarmic/src/ir_opt/verification_pass.cpp
vendored
Executable file
@@ -0,0 +1,46 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
#include <map>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "frontend/ir/basic_block.h"
|
||||
#include "frontend/ir/microinstruction.h"
|
||||
#include "frontend/ir/opcodes.h"
|
||||
#include "frontend/ir/type.h"
|
||||
#include "ir_opt/passes.h"
|
||||
|
||||
namespace Dynarmic::Optimization {
|
||||
|
||||
void VerificationPass(const IR::Block& block) {
|
||||
for (const auto& inst : block) {
|
||||
for (size_t i = 0; i < inst.NumArgs(); i++) {
|
||||
const IR::Type t1 = inst.GetArg(i).GetType();
|
||||
const IR::Type t2 = IR::GetArgTypeOf(inst.GetOpcode(), i);
|
||||
if (!IR::AreTypesCompatible(t1, t2)) {
|
||||
std::puts(IR::DumpBlock(block).c_str());
|
||||
ASSERT_FALSE("above block failed validation");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::map<IR::Inst*, size_t> actual_uses;
|
||||
for (const auto& inst : block) {
|
||||
for (size_t i = 0; i < inst.NumArgs(); i++) {
|
||||
const auto arg = inst.GetArg(i);
|
||||
if (!arg.IsImmediate()) {
|
||||
actual_uses[arg.GetInst()]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& pair : actual_uses) {
|
||||
ASSERT(pair.first->UseCount() == pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::Optimization
|
Reference in New Issue
Block a user