early-access version 1255
This commit is contained in:
115
externals/dynarmic/include/dynarmic/A32/a32.h
vendored
Executable file
115
externals/dynarmic/include/dynarmic/A32/a32.h
vendored
Executable file
@@ -0,0 +1,115 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <dynarmic/A32/config.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
struct Context;
|
||||
|
||||
class Jit final {
|
||||
public:
|
||||
explicit Jit(UserConfig conf);
|
||||
~Jit();
|
||||
|
||||
/**
|
||||
* Runs the emulated CPU.
|
||||
* Cannot be recursively called.
|
||||
*/
|
||||
void Run();
|
||||
|
||||
/**
|
||||
* Steps the emulated CPU.
|
||||
* Cannot be recursively called.
|
||||
*/
|
||||
void Step();
|
||||
|
||||
/**
|
||||
* Clears the code cache of all compiled code.
|
||||
* Can be called at any time. Halts execution if called within a callback.
|
||||
*/
|
||||
void ClearCache();
|
||||
|
||||
/**
|
||||
* Invalidate the code cache at a range of addresses.
|
||||
* @param start_address The starting address of the range to invalidate.
|
||||
* @param length The length (in bytes) of the range to invalidate.
|
||||
*/
|
||||
void InvalidateCacheRange(std::uint32_t start_address, std::size_t length);
|
||||
|
||||
/**
|
||||
* Reset CPU state to state at startup. Does not clear code cache.
|
||||
* Cannot be called from a callback.
|
||||
*/
|
||||
void Reset();
|
||||
|
||||
/**
|
||||
* Stops execution in Jit::Run.
|
||||
* Can only be called from a callback.
|
||||
*/
|
||||
void HaltExecution();
|
||||
|
||||
/**
|
||||
* HACK:
|
||||
* Exits execution from a callback, the callback must rewind the stack or
|
||||
* never return to dynarmic from it's current stack.
|
||||
*/
|
||||
void ExceptionalExit();
|
||||
|
||||
/// HACK: Change processor ID.
|
||||
void ChangeProcessorID(std::size_t new_processor);
|
||||
|
||||
/// View and modify registers.
|
||||
std::array<std::uint32_t, 16>& Regs();
|
||||
const std::array<std::uint32_t, 16>& Regs() const;
|
||||
std::array<std::uint32_t, 64>& ExtRegs();
|
||||
const std::array<std::uint32_t, 64>& ExtRegs() const;
|
||||
|
||||
/// View and modify CPSR.
|
||||
std::uint32_t Cpsr() const;
|
||||
void SetCpsr(std::uint32_t value);
|
||||
|
||||
/// View and modify FPSCR.
|
||||
std::uint32_t Fpscr() const;
|
||||
void SetFpscr(std::uint32_t value);
|
||||
|
||||
Context SaveContext() const;
|
||||
void SaveContext(Context&) const;
|
||||
void LoadContext(const Context&);
|
||||
|
||||
/// Clears exclusive state for this core.
|
||||
void ClearExclusiveState();
|
||||
|
||||
/**
|
||||
* Returns true if Jit::Run was called but hasn't returned yet.
|
||||
* i.e.: We're in a callback.
|
||||
*/
|
||||
bool IsExecuting() const {
|
||||
return is_executing;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debugging: Disassemble all of compiled code.
|
||||
* @return A string containing disassembly of all host machine code produced.
|
||||
*/
|
||||
std::string Disassemble() const;
|
||||
|
||||
private:
|
||||
bool is_executing = false;
|
||||
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
184
externals/dynarmic/include/dynarmic/A32/config.h
vendored
Executable file
184
externals/dynarmic/include/dynarmic/A32/config.h
vendored
Executable file
@@ -0,0 +1,184 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include <dynarmic/optimization_flags.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
class ExclusiveMonitor;
|
||||
} // namespace Dynarmic
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
using VAddr = std::uint32_t;
|
||||
|
||||
class Coprocessor;
|
||||
|
||||
enum class Exception {
|
||||
/// An UndefinedFault occured due to executing instruction with an unallocated encoding
|
||||
UndefinedInstruction,
|
||||
/// An unpredictable instruction is to be executed. Implementation-defined behaviour should now happen.
|
||||
/// This behaviour is up to the user of this library to define.
|
||||
UnpredictableInstruction,
|
||||
/// A decode error occurred when decoding this instruction. This should never happen.
|
||||
DecodeError,
|
||||
/// A SEV instruction was executed. The event register of all PEs should be set. (Hint instruction.)
|
||||
SendEvent,
|
||||
/// A SEVL instruction was executed. The event register of the current PE should be set. (Hint instruction.)
|
||||
SendEventLocal,
|
||||
/// A WFI instruction was executed. You may now enter a low-power state. (Hint instruction.)
|
||||
WaitForInterrupt,
|
||||
/// A WFE instruction was executed. You may now enter a low-power state if the event register is clear. (Hint instruction.)
|
||||
WaitForEvent,
|
||||
/// A YIELD instruction was executed. (Hint instruction.)
|
||||
Yield,
|
||||
/// A BKPT instruction was executed.
|
||||
Breakpoint,
|
||||
/// A PLD instruction was executed. (Hint instruction.)
|
||||
PreloadData,
|
||||
/// A PLDW instruction was executed. (Hint instruction.)
|
||||
PreloadDataWithIntentToWrite,
|
||||
};
|
||||
|
||||
/// These function pointers may be inserted into compiled code.
|
||||
struct UserCallbacks {
|
||||
virtual ~UserCallbacks() = default;
|
||||
|
||||
// All reads through this callback are 4-byte aligned.
|
||||
// Memory must be interpreted as little endian.
|
||||
virtual std::uint32_t MemoryReadCode(VAddr vaddr) { return MemoryRead32(vaddr); }
|
||||
|
||||
// Reads through these callbacks may not be aligned.
|
||||
// Memory must be interpreted as if ENDIANSTATE == 0, endianness will be corrected by the JIT.
|
||||
virtual std::uint8_t MemoryRead8(VAddr vaddr) = 0;
|
||||
virtual std::uint16_t MemoryRead16(VAddr vaddr) = 0;
|
||||
virtual std::uint32_t MemoryRead32(VAddr vaddr) = 0;
|
||||
virtual std::uint64_t MemoryRead64(VAddr vaddr) = 0;
|
||||
|
||||
// Writes through these callbacks may not be aligned.
|
||||
virtual void MemoryWrite8(VAddr vaddr, std::uint8_t value) = 0;
|
||||
virtual void MemoryWrite16(VAddr vaddr, std::uint16_t value) = 0;
|
||||
virtual void MemoryWrite32(VAddr vaddr, std::uint32_t value) = 0;
|
||||
virtual void MemoryWrite64(VAddr vaddr, std::uint64_t value) = 0;
|
||||
|
||||
// Writes through these callbacks may not be aligned.
|
||||
virtual bool MemoryWriteExclusive8(VAddr /*vaddr*/, std::uint8_t /*value*/, std::uint8_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive16(VAddr /*vaddr*/, std::uint16_t /*value*/, std::uint16_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive32(VAddr /*vaddr*/, std::uint32_t /*value*/, std::uint32_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive64(VAddr /*vaddr*/, std::uint64_t /*value*/, std::uint64_t /*expected*/) { return false; }
|
||||
|
||||
// If this callback returns true, the JIT will assume MemoryRead* callbacks will always
|
||||
// return the same value at any point in time for this vaddr. The JIT may use this information
|
||||
// in optimizations.
|
||||
// A conservative implementation that always returns false is safe.
|
||||
virtual bool IsReadOnlyMemory(VAddr /* vaddr */) { return false; }
|
||||
|
||||
/// The interpreter must execute exactly num_instructions starting from PC.
|
||||
virtual void InterpreterFallback(VAddr pc, size_t num_instructions) = 0;
|
||||
|
||||
// This callback is called whenever a SVC instruction is executed.
|
||||
virtual void CallSVC(std::uint32_t swi) = 0;
|
||||
|
||||
virtual void ExceptionRaised(VAddr pc, Exception exception) = 0;
|
||||
|
||||
// Timing-related callbacks
|
||||
// ticks ticks have passed
|
||||
virtual void AddTicks(std::uint64_t ticks) = 0;
|
||||
// How many more ticks am I allowed to execute?
|
||||
virtual std::uint64_t GetTicksRemaining() = 0;
|
||||
};
|
||||
|
||||
struct UserConfig {
|
||||
UserCallbacks* callbacks;
|
||||
|
||||
size_t processor_id = 0;
|
||||
ExclusiveMonitor* global_monitor = nullptr;
|
||||
|
||||
/// This selects other optimizations than can't otherwise be disabled by setting other
|
||||
/// configuration options. This includes:
|
||||
/// - IR optimizations
|
||||
/// - Block linking optimizations
|
||||
/// - RSB optimizations
|
||||
/// This is intended to be used for debugging.
|
||||
OptimizationFlag optimizations = all_safe_optimizations;
|
||||
|
||||
bool HasOptimization(OptimizationFlag f) const {
|
||||
if (!unsafe_optimizations) {
|
||||
f &= all_safe_optimizations;
|
||||
}
|
||||
return (f & optimizations) != no_optimizations;
|
||||
}
|
||||
|
||||
/// This enables unsafe optimizations that reduce emulation accuracy in favour of speed.
|
||||
/// For safety, in order to enable unsafe optimizations you have to set BOTH this flag
|
||||
/// AND the appropriate flag bits above.
|
||||
/// The prefered and tested mode for this library is with unsafe optimizations disabled.
|
||||
bool unsafe_optimizations = false;
|
||||
|
||||
// Page Table
|
||||
// The page table is used for faster memory access. If an entry in the table is nullptr,
|
||||
// the JIT will fallback to calling the MemoryRead*/MemoryWrite* callbacks.
|
||||
static constexpr std::size_t PAGE_BITS = 12;
|
||||
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
|
||||
std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>* page_table = nullptr;
|
||||
/// Determines if the pointer in the page_table shall be offseted locally or globally.
|
||||
/// 'false' will access page_table[addr >> bits][addr & mask]
|
||||
/// 'true' will access page_table[addr >> bits][addr]
|
||||
/// Note: page_table[addr >> bits] will still be checked to verify active pages.
|
||||
/// So there might be wrongly faulted pages which maps to nullptr.
|
||||
/// This can be avoided by carefully allocating the memory region.
|
||||
bool absolute_offset_page_table = false;
|
||||
/// Determines if we should detect memory accesses via page_table that straddle are
|
||||
/// misaligned. Accesses that straddle page boundaries will fallback to the relevant
|
||||
/// memory callback.
|
||||
/// This value should be the required access sizes this applies to ORed together.
|
||||
/// To detect any access, use: 8 | 16 | 32 | 64.
|
||||
std::uint8_t detect_misaligned_access_via_page_table = 0;
|
||||
/// Determines if the above option only triggers when the misalignment straddles a
|
||||
/// page boundary.
|
||||
bool only_detect_misalignment_via_page_table_on_page_boundary = false;
|
||||
|
||||
// Fastmem Pointer
|
||||
// This should point to the beginning of a 4GB address space which is in arranged just like
|
||||
// what you wish for emulated memory to be. If the host page faults on an address, the JIT
|
||||
// will fallback to calling the MemoryRead*/MemoryWrite* callbacks.
|
||||
void* fastmem_pointer = nullptr;
|
||||
/// Determines if instructions that pagefault should cause recompilation of that block
|
||||
/// with fastmem disabled.
|
||||
bool recompile_on_fastmem_failure = true;
|
||||
|
||||
// Coprocessors
|
||||
std::array<std::shared_ptr<Coprocessor>, 16> coprocessors{};
|
||||
|
||||
/// Hint instructions would cause ExceptionRaised to be called with the appropriate
|
||||
/// argument.
|
||||
bool hook_hint_instructions = false;
|
||||
|
||||
/// This option relates to translation. Generally when we run into an unpredictable
|
||||
/// instruction the ExceptionRaised callback is called. If this is true, we define
|
||||
/// definite behaviour for some unpredictable instructions.
|
||||
bool define_unpredictable_behaviour = false;
|
||||
|
||||
/// HACK:
|
||||
/// This tells the translator a wall clock will be used, thus allowing it
|
||||
/// to avoid writting certain unnecessary code only needed for cycle timers.
|
||||
bool wall_clock_cntpct = false;
|
||||
|
||||
/// This option relates to the CPSR.E flag. Enabling this option disables modification
|
||||
/// of CPSR.E by the emulated program, forcing it to 0.
|
||||
/// NOTE: Calling Jit::SetCpsr with CPSR.E=1 while this option is enabled may result
|
||||
/// in unusual behavior.
|
||||
bool always_little_endian = false;
|
||||
};
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
45
externals/dynarmic/include/dynarmic/A32/context.h
vendored
Executable file
45
externals/dynarmic/include/dynarmic/A32/context.h
vendored
Executable file
@@ -0,0 +1,45 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
struct Context {
|
||||
public:
|
||||
Context();
|
||||
~Context();
|
||||
Context(const Context&);
|
||||
Context(Context&&) noexcept;
|
||||
Context& operator=(const Context&);
|
||||
Context& operator=(Context&&) noexcept;
|
||||
|
||||
/// View and modify registers.
|
||||
std::array<std::uint32_t, 16>& Regs();
|
||||
const std::array<std::uint32_t, 16>& Regs() const;
|
||||
std::array<std::uint32_t, 64>& ExtRegs();
|
||||
const std::array<std::uint32_t, 64>& ExtRegs() const;
|
||||
|
||||
/// View and modify CPSR.
|
||||
std::uint32_t Cpsr() const;
|
||||
void SetCpsr(std::uint32_t value);
|
||||
|
||||
/// View and modify FPSCR.
|
||||
std::uint32_t Fpscr() const;
|
||||
void SetFpscr(std::uint32_t value);
|
||||
|
||||
private:
|
||||
friend class Jit;
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
110
externals/dynarmic/include/dynarmic/A32/coprocessor.h
vendored
Executable file
110
externals/dynarmic/include/dynarmic/A32/coprocessor.h
vendored
Executable file
@@ -0,0 +1,110 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <variant>
|
||||
|
||||
#include <dynarmic/A32/coprocessor_util.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
class Jit;
|
||||
|
||||
class Coprocessor {
|
||||
public:
|
||||
virtual ~Coprocessor() = default;
|
||||
|
||||
struct Callback {
|
||||
/**
|
||||
* @param jit CPU state
|
||||
* @param user_arg Set to Callback::user_arg at runtime
|
||||
* @param arg0 Purpose of this argument depends on type of callback.
|
||||
* @param arg1 Purpose of this argument depends on type of callback.
|
||||
* @return Purpose of return value depends on type of callback.
|
||||
*/
|
||||
std::uint64_t (*function)(Jit* jit, void* user_arg, std::uint32_t arg0, std::uint32_t arg1);
|
||||
/// If std::nullopt, function will be called with a user_arg parameter containing garbage.
|
||||
std::optional<void*> user_arg;
|
||||
};
|
||||
|
||||
/**
|
||||
* std::monostate: coprocessor exception will be compiled
|
||||
* Callback: a call to the Callback will be compiled
|
||||
* std::uint32_t*: a write/read to that memory address will be compiled
|
||||
*/
|
||||
using CallbackOrAccessOneWord = std::variant<std::monostate, Callback, std::uint32_t*>;
|
||||
|
||||
/**
|
||||
* std::monostate: coprocessor exception will be compiled
|
||||
* Callback: a call to the Callback will be compiled
|
||||
* std::array<std::uint32_t*, 2>: a write/read to those memory addresses will be compiled
|
||||
*/
|
||||
using CallbackOrAccessTwoWords = std::variant<std::monostate, Callback, std::array<std::uint32_t*, 2>>;
|
||||
|
||||
/**
|
||||
* Called when compiling CDP or CDP2 for this coprocessor.
|
||||
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
|
||||
* arg0, arg1 and return value of callback are ignored.
|
||||
*/
|
||||
virtual std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling MCR or MCR2 for this coprocessor.
|
||||
* A return value of std::monostate will cause a coprocessor exception to be compiled.
|
||||
* arg0 of the callback will contain the word sent to the coprocessor.
|
||||
* arg1 and return value of the callback are ignored.
|
||||
*/
|
||||
virtual CallbackOrAccessOneWord CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling MCRR or MCRR2 for this coprocessor.
|
||||
* A return value of std::monostate will cause a coprocessor exception to be compiled.
|
||||
* arg0 and arg1 of the callback will contain the words sent to the coprocessor.
|
||||
* The return value of the callback is ignored.
|
||||
*/
|
||||
virtual CallbackOrAccessTwoWords CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling MRC or MRC2 for this coprocessor.
|
||||
* A return value of std::monostate will cause a coprocessor exception to be compiled.
|
||||
* The return value of the callback should contain word from coprocessor.
|
||||
* The low word of the return value will be stored in Rt.
|
||||
* arg0 and arg1 of the callback are ignored.
|
||||
*/
|
||||
virtual CallbackOrAccessOneWord CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling MRRC or MRRC2 for this coprocessor.
|
||||
* A return value of std::monostate will cause a coprocessor exception to be compiled.
|
||||
* The return value of the callback should contain words from coprocessor.
|
||||
* The low word of the return value will be stored in Rt.
|
||||
* The high word of the return value will be stored in Rt2.
|
||||
* arg0 and arg1 of the callback are ignored.
|
||||
*/
|
||||
virtual CallbackOrAccessTwoWords CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling LDC or LDC2 for this coprocessor.
|
||||
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
|
||||
* arg0 of the callback will contain the start address.
|
||||
* arg1 and return value of the callback are ignored.
|
||||
*/
|
||||
virtual std::optional<Callback> CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd, std::optional<std::uint8_t> option) = 0;
|
||||
|
||||
/**
|
||||
* Called when compiling STC or STC2 for this coprocessor.
|
||||
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
|
||||
* arg0 of the callback will contain the start address.
|
||||
* arg1 and return value of the callback are ignored.
|
||||
*/
|
||||
virtual std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, std::optional<std::uint8_t> option) = 0;
|
||||
};
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
16
externals/dynarmic/include/dynarmic/A32/coprocessor_util.h
vendored
Executable file
16
externals/dynarmic/include/dynarmic/A32/coprocessor_util.h
vendored
Executable file
@@ -0,0 +1,16 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
enum class CoprocReg {
|
||||
C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15
|
||||
};
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
18
externals/dynarmic/include/dynarmic/A32/disassembler.h
vendored
Executable file
18
externals/dynarmic/include/dynarmic/A32/disassembler.h
vendored
Executable file
@@ -0,0 +1,18 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2016 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A32 {
|
||||
|
||||
std::string DisassembleArm(std::uint32_t instruction);
|
||||
std::string DisassembleThumb16(std::uint16_t instruction);
|
||||
|
||||
} // namespace A32
|
||||
} // namespace Dynarmic
|
139
externals/dynarmic/include/dynarmic/A64/a64.h
vendored
Executable file
139
externals/dynarmic/include/dynarmic/A64/a64.h
vendored
Executable file
@@ -0,0 +1,139 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <dynarmic/A64/config.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A64 {
|
||||
|
||||
struct Context;
|
||||
|
||||
class Jit final {
|
||||
public:
|
||||
explicit Jit(UserConfig conf);
|
||||
~Jit();
|
||||
|
||||
/**
|
||||
* Runs the emulated CPU.
|
||||
* Cannot be recursively called.
|
||||
*/
|
||||
void Run();
|
||||
|
||||
/**
|
||||
* Step the emulated CPU for one instruction.
|
||||
* Cannot be recursively called.
|
||||
*/
|
||||
void Step();
|
||||
|
||||
/**
|
||||
* Clears the code cache of all compiled code.
|
||||
* Can be called at any time. Halts execution if called within a callback.
|
||||
*/
|
||||
void ClearCache();
|
||||
|
||||
/**
|
||||
* Invalidate the code cache at a range of addresses.
|
||||
* @param start_address The starting address of the range to invalidate.
|
||||
* @param length The length (in bytes) of the range to invalidate.
|
||||
*/
|
||||
void InvalidateCacheRange(std::uint64_t start_address, std::size_t length);
|
||||
|
||||
/**
|
||||
* Reset CPU state to state at startup. Does not clear code cache.
|
||||
* Cannot be called from a callback.
|
||||
*/
|
||||
void Reset();
|
||||
|
||||
/**
|
||||
* Stops execution in Jit::Run.
|
||||
* Can only be called from a callback.
|
||||
*/
|
||||
void HaltExecution();
|
||||
|
||||
/**
|
||||
* HACK:
|
||||
* Exits execution from a callback, the callback must rewind the stack or
|
||||
* never return to dynarmic from it's current stack.
|
||||
*/
|
||||
void ExceptionalExit();
|
||||
|
||||
/// HACK: Change processor ID.
|
||||
void ChangeProcessorID(std::size_t new_processor);
|
||||
|
||||
/// Read Stack Pointer
|
||||
std::uint64_t GetSP() const;
|
||||
/// Modify Stack Pointer
|
||||
void SetSP(std::uint64_t value);
|
||||
|
||||
/// Read Program Counter
|
||||
std::uint64_t GetPC() const;
|
||||
/// Modify Program Counter
|
||||
void SetPC(std::uint64_t value);
|
||||
|
||||
/// Read general-purpose register.
|
||||
std::uint64_t GetRegister(std::size_t index) const;
|
||||
/// Modify general-purpose register.
|
||||
void SetRegister(size_t index, std::uint64_t value);
|
||||
|
||||
/// Read all general-purpose registers.
|
||||
std::array<std::uint64_t, 31> GetRegisters() const;
|
||||
/// Modify all general-purpose registers.
|
||||
void SetRegisters(const std::array<std::uint64_t, 31>& value);
|
||||
|
||||
/// Read floating point and SIMD register.
|
||||
Vector GetVector(std::size_t index) const;
|
||||
/// Modify floating point and SIMD register.
|
||||
void SetVector(std::size_t index, Vector value);
|
||||
|
||||
/// Read all floating point and SIMD registers.
|
||||
std::array<Vector, 32> GetVectors() const;
|
||||
/// Modify all floating point and SIMD registers.
|
||||
void SetVectors(const std::array<Vector, 32>& value);
|
||||
|
||||
/// View FPCR.
|
||||
std::uint32_t GetFpcr() const;
|
||||
/// Modify FPCR.
|
||||
void SetFpcr(std::uint32_t value);
|
||||
|
||||
/// View FPSR.
|
||||
std::uint32_t GetFpsr() const;
|
||||
/// Modify FPSR.
|
||||
void SetFpsr(std::uint32_t value);
|
||||
|
||||
/// View PSTATE
|
||||
std::uint32_t GetPstate() const;
|
||||
/// Modify PSTATE
|
||||
void SetPstate(std::uint32_t value);
|
||||
|
||||
/// Clears exclusive state for this core.
|
||||
void ClearExclusiveState();
|
||||
|
||||
/**
|
||||
* Returns true if Jit::Run was called but hasn't returned yet.
|
||||
* i.e.: We're in a callback.
|
||||
*/
|
||||
bool IsExecuting() const;
|
||||
|
||||
/**
|
||||
* Debugging: Disassemble all of compiled code.
|
||||
* @return A string containing disassembly of all host machine code produced.
|
||||
*/
|
||||
std::string Disassemble() const;
|
||||
|
||||
private:
|
||||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace A64
|
||||
} // namespace Dynarmic
|
230
externals/dynarmic/include/dynarmic/A64/config.h
vendored
Executable file
230
externals/dynarmic/include/dynarmic/A64/config.h
vendored
Executable file
@@ -0,0 +1,230 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include <dynarmic/optimization_flags.h>
|
||||
|
||||
namespace Dynarmic {
|
||||
class ExclusiveMonitor;
|
||||
} // namespace Dynarmic
|
||||
|
||||
namespace Dynarmic {
|
||||
namespace A64 {
|
||||
|
||||
using VAddr = std::uint64_t;
|
||||
|
||||
using Vector = std::array<std::uint64_t, 2>;
|
||||
static_assert(sizeof(Vector) == sizeof(std::uint64_t) * 2, "Vector must be 128 bits in size");
|
||||
|
||||
enum class Exception {
|
||||
/// An UndefinedFault occured due to executing instruction with an unallocated encoding
|
||||
UnallocatedEncoding,
|
||||
/// An UndefinedFault occured due to executing instruction containing a reserved value
|
||||
ReservedValue,
|
||||
/// An unpredictable instruction is to be executed. Implementation-defined behaviour should now happen.
|
||||
/// This behaviour is up to the user of this library to define.
|
||||
/// Note: Constraints on unpredictable behaviour are specified in the ARMv8 ARM.
|
||||
UnpredictableInstruction,
|
||||
/// A WFI instruction was executed. You may now enter a low-power state. (Hint instruction.)
|
||||
WaitForInterrupt,
|
||||
/// A WFE instruction was executed. You may now enter a low-power state if the event register is clear. (Hint instruction.)
|
||||
WaitForEvent,
|
||||
/// A SEV instruction was executed. The event register of all PEs should be set. (Hint instruction.)
|
||||
SendEvent,
|
||||
/// A SEVL instruction was executed. The event register of the current PE should be set. (Hint instruction.)
|
||||
SendEventLocal,
|
||||
/// A YIELD instruction was executed. (Hint instruction.)
|
||||
Yield,
|
||||
/// A BRK instruction was executed. (Hint instruction.)
|
||||
Breakpoint,
|
||||
};
|
||||
|
||||
enum class DataCacheOperation {
|
||||
/// DC CISW
|
||||
CleanAndInvalidateBySetWay,
|
||||
/// DC CIVAC
|
||||
CleanAndInvalidateByVAToPoC,
|
||||
/// DC CSW
|
||||
CleanBySetWay,
|
||||
/// DC CVAC
|
||||
CleanByVAToPoC,
|
||||
/// DC CVAU
|
||||
CleanByVAToPoU,
|
||||
/// DC CVAP
|
||||
CleanByVAToPoP,
|
||||
/// DC ISW
|
||||
InvalidateBySetWay,
|
||||
/// DC IVAC
|
||||
InvalidateByVAToPoC,
|
||||
/// DC ZVA
|
||||
ZeroByVA,
|
||||
};
|
||||
|
||||
struct UserCallbacks {
|
||||
virtual ~UserCallbacks() = default;
|
||||
|
||||
// All reads through this callback are 4-byte aligned.
|
||||
// Memory must be interpreted as little endian.
|
||||
virtual std::uint32_t MemoryReadCode(VAddr vaddr) { return MemoryRead32(vaddr); }
|
||||
|
||||
// Reads through these callbacks may not be aligned.
|
||||
virtual std::uint8_t MemoryRead8(VAddr vaddr) = 0;
|
||||
virtual std::uint16_t MemoryRead16(VAddr vaddr) = 0;
|
||||
virtual std::uint32_t MemoryRead32(VAddr vaddr) = 0;
|
||||
virtual std::uint64_t MemoryRead64(VAddr vaddr) = 0;
|
||||
virtual Vector MemoryRead128(VAddr vaddr) = 0;
|
||||
|
||||
// Writes through these callbacks may not be aligned.
|
||||
virtual void MemoryWrite8(VAddr vaddr, std::uint8_t value) = 0;
|
||||
virtual void MemoryWrite16(VAddr vaddr, std::uint16_t value) = 0;
|
||||
virtual void MemoryWrite32(VAddr vaddr, std::uint32_t value) = 0;
|
||||
virtual void MemoryWrite64(VAddr vaddr, std::uint64_t value) = 0;
|
||||
virtual void MemoryWrite128(VAddr vaddr, Vector value) = 0;
|
||||
|
||||
// Writes through these callbacks may not be aligned.
|
||||
virtual bool MemoryWriteExclusive8(VAddr /*vaddr*/, std::uint8_t /*value*/, std::uint8_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive16(VAddr /*vaddr*/, std::uint16_t /*value*/, std::uint16_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive32(VAddr /*vaddr*/, std::uint32_t /*value*/, std::uint32_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive64(VAddr /*vaddr*/, std::uint64_t /*value*/, std::uint64_t /*expected*/) { return false; }
|
||||
virtual bool MemoryWriteExclusive128(VAddr /*vaddr*/, Vector /*value*/, Vector /*expected*/) { return false; }
|
||||
|
||||
// If this callback returns true, the JIT will assume MemoryRead* callbacks will always
|
||||
// return the same value at any point in time for this vaddr. The JIT may use this information
|
||||
// in optimizations.
|
||||
// A conservative implementation that always returns false is safe.
|
||||
virtual bool IsReadOnlyMemory(VAddr /*vaddr*/) { return false; }
|
||||
|
||||
/// The interpreter must execute exactly num_instructions starting from PC.
|
||||
virtual void InterpreterFallback(VAddr pc, size_t num_instructions) = 0;
|
||||
|
||||
// This callback is called whenever a SVC instruction is executed.
|
||||
virtual void CallSVC(std::uint32_t swi) = 0;
|
||||
|
||||
virtual void ExceptionRaised(VAddr pc, Exception exception) = 0;
|
||||
virtual void DataCacheOperationRaised(DataCacheOperation /*op*/, VAddr /*value*/) {}
|
||||
|
||||
// Timing-related callbacks
|
||||
// ticks ticks have passed
|
||||
virtual void AddTicks(std::uint64_t ticks) = 0;
|
||||
// How many more ticks am I allowed to execute?
|
||||
virtual std::uint64_t GetTicksRemaining() = 0;
|
||||
// Get value in the emulated counter-timer physical count register.
|
||||
virtual std::uint64_t GetCNTPCT() = 0;
|
||||
};
|
||||
|
||||
struct UserConfig {
|
||||
UserCallbacks* callbacks;
|
||||
|
||||
size_t processor_id = 0;
|
||||
ExclusiveMonitor* global_monitor = nullptr;
|
||||
|
||||
/// This selects other optimizations than can't otherwise be disabled by setting other
|
||||
/// configuration options. This includes:
|
||||
/// - IR optimizations
|
||||
/// - Block linking optimizations
|
||||
/// - RSB optimizations
|
||||
/// This is intended to be used for debugging.
|
||||
OptimizationFlag optimizations = all_safe_optimizations;
|
||||
|
||||
bool HasOptimization(OptimizationFlag f) const {
|
||||
if (!unsafe_optimizations) {
|
||||
f &= all_safe_optimizations;
|
||||
}
|
||||
return (f & optimizations) != no_optimizations;
|
||||
}
|
||||
|
||||
/// This enables unsafe optimizations that reduce emulation accuracy in favour of speed.
|
||||
/// For safety, in order to enable unsafe optimizations you have to set BOTH this flag
|
||||
/// AND the appropriate flag bits above.
|
||||
/// The prefered and tested mode for this library is with unsafe optimizations disabled.
|
||||
bool unsafe_optimizations = false;
|
||||
|
||||
/// When set to true, UserCallbacks::DataCacheOperationRaised will be called when any
|
||||
/// data cache instruction is executed. Notably DC ZVA will not implicitly do anything.
|
||||
/// When set to false, UserCallbacks::DataCacheOperationRaised will never be called.
|
||||
/// Executing DC ZVA in this mode will result in zeros being written to memory.
|
||||
bool hook_data_cache_operations = false;
|
||||
|
||||
/// When set to true, UserCallbacks::ExceptionRaised will be called when any hint
|
||||
/// instruction is executed.
|
||||
bool hook_hint_instructions = false;
|
||||
|
||||
/// Counter-timer frequency register. The value of the register is not interpreted by
|
||||
/// dynarmic.
|
||||
std::uint32_t cntfrq_el0 = 600000000;
|
||||
|
||||
/// CTR_EL0<27:24> is log2 of the cache writeback granule in words.
|
||||
/// CTR_EL0<23:20> is log2 of the exclusives reservation granule in words.
|
||||
/// CTR_EL0<19:16> is log2 of the smallest data/unified cacheline in words.
|
||||
/// CTR_EL0<15:14> is the level 1 instruction cache policy.
|
||||
/// CTR_EL0<3:0> is log2 of the smallest instruction cacheline in words.
|
||||
std::uint32_t ctr_el0 = 0x8444c004;
|
||||
|
||||
/// DCZID_EL0<3:0> is log2 of the block size in words
|
||||
/// DCZID_EL0<4> is 0 if the DC ZVA instruction is permitted.
|
||||
std::uint32_t dczid_el0 = 4;
|
||||
|
||||
/// Pointer to where TPIDRRO_EL0 is stored. This pointer will be inserted into
|
||||
/// emitted code.
|
||||
const std::uint64_t* tpidrro_el0 = nullptr;
|
||||
|
||||
/// Pointer to where TPIDR_EL0 is stored. This pointer will be inserted into
|
||||
/// emitted code.
|
||||
const std::uint64_t* tpidr_el0 = nullptr;
|
||||
|
||||
/// Pointer to the page table which we can use for direct page table access.
|
||||
/// If an entry in page_table is null, the relevant memory callback will be called.
|
||||
/// If page_table is nullptr, all memory accesses hit the memory callbacks.
|
||||
void** page_table = nullptr;
|
||||
/// Declares how many valid address bits are there in virtual addresses.
|
||||
/// Determines the size of page_table. Valid values are between 12 and 64 inclusive.
|
||||
/// This is only used if page_table is not nullptr.
|
||||
size_t page_table_address_space_bits = 36;
|
||||
/// Determines what happens if the guest accesses an entry that is off the end of the
|
||||
/// page table. If true, Dynarmic will silently mirror page_table's address space. If
|
||||
/// false, accessing memory outside of page_table bounds will result in a call to the
|
||||
/// relevant memory callback.
|
||||
/// This is only used if page_table is not nullptr.
|
||||
bool silently_mirror_page_table = true;
|
||||
/// Determines if the pointer in the page_table shall be offseted locally or globally.
|
||||
/// 'false' will access page_table[addr >> bits][addr & mask]
|
||||
/// 'true' will access page_table[addr >> bits][addr]
|
||||
/// Note: page_table[addr >> bits] will still be checked to verify active pages.
|
||||
/// So there might be wrongly faulted pages which maps to nullptr.
|
||||
/// This can be avoided by carefully allocating the memory region.
|
||||
bool absolute_offset_page_table = false;
|
||||
/// Determines if we should detect memory accesses via page_table that straddle are
|
||||
/// misaligned. Accesses that straddle page boundaries will fallback to the relevant
|
||||
/// memory callback.
|
||||
/// This value should be the required access sizes this applies to ORed together.
|
||||
/// To detect any access, use: 8 | 16 | 32 | 64 | 128.
|
||||
std::uint8_t detect_misaligned_access_via_page_table = 0;
|
||||
/// Determines if the above option only triggers when the misalignment straddles a
|
||||
/// page boundary.
|
||||
bool only_detect_misalignment_via_page_table_on_page_boundary = false;
|
||||
|
||||
/// This option relates to translation. Generally when we run into an unpredictable
|
||||
/// instruction the ExceptionRaised callback is called. If this is true, we define
|
||||
/// definite behaviour for some unpredictable instructions.
|
||||
bool define_unpredictable_behaviour = false;
|
||||
|
||||
/// HACK:
|
||||
/// This tells the translator a wall clock will be used, thus allowing it
|
||||
/// to avoid writting certain unnecessary code only needed for cycle timers.
|
||||
bool wall_clock_cntpct = false;
|
||||
|
||||
// Determines whether AddTicks and GetTicksRemaining are called.
|
||||
// If false, execution will continue until soon after Jit::HaltExecution is called.
|
||||
// bool enable_ticks = true; // TODO
|
||||
};
|
||||
|
||||
} // namespace A64
|
||||
} // namespace Dynarmic
|
81
externals/dynarmic/include/dynarmic/exclusive_monitor.h
vendored
Executable file
81
externals/dynarmic/include/dynarmic/exclusive_monitor.h
vendored
Executable file
@@ -0,0 +1,81 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2018 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
namespace Dynarmic {
|
||||
|
||||
using VAddr = std::uint64_t;
|
||||
using Vector = std::array<std::uint64_t, 2>;
|
||||
|
||||
class ExclusiveMonitor {
|
||||
public:
|
||||
/// @param processor_count Maximum number of processors using this global
|
||||
/// exclusive monitor. Each processor must have a
|
||||
/// unique id.
|
||||
explicit ExclusiveMonitor(size_t processor_count);
|
||||
|
||||
size_t GetProcessorCount() const;
|
||||
|
||||
/// Marks a region containing [address, address+size) to be exclusive to
|
||||
/// processor processor_id.
|
||||
template <typename T, typename Function>
|
||||
T ReadAndMark(size_t processor_id, VAddr address, Function op) {
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
const VAddr masked_address = address & RESERVATION_GRANULE_MASK;
|
||||
|
||||
Lock();
|
||||
exclusive_addresses[processor_id] = masked_address;
|
||||
const T value = op();
|
||||
std::memcpy(exclusive_values[processor_id].data(), &value, sizeof(T));
|
||||
Unlock();
|
||||
return value;
|
||||
}
|
||||
|
||||
/// Checks to see if processor processor_id has exclusive access to the
|
||||
/// specified region. If it does, executes the operation then clears
|
||||
/// the exclusive state for processors if their exclusive region(s)
|
||||
/// contain [address, address+size).
|
||||
template <typename T, typename Function>
|
||||
bool DoExclusiveOperation(size_t processor_id, VAddr address, Function op) {
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
if (!CheckAndClear(processor_id, address)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
T saved_value;
|
||||
std::memcpy(&saved_value, exclusive_values[processor_id].data(), sizeof(T));
|
||||
const bool result = op(saved_value);
|
||||
|
||||
Unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Unmark everything.
|
||||
void Clear();
|
||||
/// Unmark processor id
|
||||
void ClearProcessor(size_t processor_id);
|
||||
|
||||
private:
|
||||
bool CheckAndClear(size_t processor_id, VAddr address);
|
||||
|
||||
void Lock();
|
||||
void Unlock();
|
||||
|
||||
static constexpr VAddr RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFFFull;
|
||||
static constexpr VAddr INVALID_EXCLUSIVE_ADDRESS = 0xDEAD'DEAD'DEAD'DEADull;
|
||||
std::atomic_flag is_locked;
|
||||
std::vector<VAddr> exclusive_addresses;
|
||||
std::vector<Vector> exclusive_values;
|
||||
};
|
||||
|
||||
} // namespace Dynarmic
|
71
externals/dynarmic/include/dynarmic/optimization_flags.h
vendored
Executable file
71
externals/dynarmic/include/dynarmic/optimization_flags.h
vendored
Executable file
@@ -0,0 +1,71 @@
|
||||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2020 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace Dynarmic {
|
||||
|
||||
enum class OptimizationFlag : std::uint32_t {
|
||||
/// This optimization avoids dispatcher lookups by allowing emitted basic blocks to jump
|
||||
/// directly to other basic blocks if the destination PC is predictable at JIT-time.
|
||||
/// This is a safe optimization.
|
||||
BlockLinking = 0x00000001,
|
||||
/// This optimization avoids dispatcher lookups by emulating a return stack buffer. This
|
||||
/// allows for function returns and syscall returns to be predicted at runtime.
|
||||
/// This is a safe optimization.
|
||||
ReturnStackBuffer = 0x00000002,
|
||||
/// This optimization enables a two-tiered dispatch system.
|
||||
/// A fast dispatcher (written in assembly) first does a look-up in a small MRU cache.
|
||||
/// If this fails, it falls back to the usual slower dispatcher.
|
||||
/// This is a safe optimization.
|
||||
FastDispatch = 0x00000004,
|
||||
/// This is an IR optimization. This optimization eliminates unnecessary emulated CPU state
|
||||
/// context lookups.
|
||||
/// This is a safe optimization.
|
||||
GetSetElimination = 0x00000008,
|
||||
/// This is an IR optimization. This optimization does constant propagation.
|
||||
/// This is a safe optimization.
|
||||
ConstProp = 0x00000010,
|
||||
/// This is enables miscellaneous safe IR optimizations.
|
||||
MiscIROpt = 0x00000020,
|
||||
|
||||
/// This is an UNSAFE optimization that reduces accuracy of fused multiply-add operations.
|
||||
/// This unfuses fused instructions to improve performance on host CPUs without FMA support.
|
||||
Unsafe_UnfuseFMA = 0x00010000,
|
||||
/// This is an UNSAFE optimization that reduces accuracy of certain floating-point instructions.
|
||||
/// This allows results of FRECPE and FRSQRTE to have **less** error than spec allows.
|
||||
Unsafe_ReducedErrorFP = 0x00020000,
|
||||
};
|
||||
|
||||
constexpr OptimizationFlag no_optimizations = static_cast<OptimizationFlag>(0);
|
||||
constexpr OptimizationFlag all_safe_optimizations = static_cast<OptimizationFlag>(0x0000FFFF);
|
||||
|
||||
constexpr OptimizationFlag operator~(OptimizationFlag f) {
|
||||
return static_cast<OptimizationFlag>(~static_cast<std::uint32_t>(f));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator|(OptimizationFlag f1, OptimizationFlag f2) {
|
||||
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) | static_cast<std::uint32_t>(f2));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator&(OptimizationFlag f1, OptimizationFlag f2) {
|
||||
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) & static_cast<std::uint32_t>(f2));
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator|=(OptimizationFlag& result, OptimizationFlag f) {
|
||||
return result = (result | f);
|
||||
}
|
||||
|
||||
constexpr OptimizationFlag operator&=(OptimizationFlag& result, OptimizationFlag f) {
|
||||
return result = (result & f);
|
||||
}
|
||||
|
||||
constexpr bool operator!(OptimizationFlag f) {
|
||||
return f == no_optimizations;
|
||||
}
|
||||
|
||||
} // namespace Dynarmic
|
Reference in New Issue
Block a user