remove old files

This commit is contained in:
mgthepro
2022-01-24 11:43:50 +01:00
parent 87def5b55b
commit ae416cfe9f
8874 changed files with 0 additions and 2090184 deletions

View File

@@ -1,66 +0,0 @@
language: cpp
cache: ccache
matrix:
include:
- env: NAME="Linux Build" GCC_VERSION=7
os: linux
dist: trusty
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-7
- g++-7
- ninja-build
install: ./.travis/build-x86_64-linux/deps.sh
script: ./.travis/build-x86_64-linux/build.sh
- env: NAME="Linux Build" GCC_VERSION=8
os: linux
dist: trusty
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-8
- g++-8
- ninja-build
install: ./.travis/build-x86_64-linux/deps.sh
script: ./.travis/build-x86_64-linux/build.sh
- env: NAME="macOS Build"
os: osx
sudo: false
osx_image: xcode10.2
install: ./.travis/build-x86_64-macos/deps.sh
script: ./.travis/build-x86_64-macos/build.sh
- env: NAME="Test - Fuzz against Unicorn"
os: linux
dist: bionic
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-7
- g++-7
- llvm-dev
- ninja-build
install: ./.travis/test-with-unicorn-on-x86_64-linux/deps.sh
script: ./.travis/test-with-unicorn-on-x86_64-linux/build.sh
- env: NAME="Test - SSE3 only"
os: linux
dist: bionic
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-7
- g++-7
- llvm-dev
- ninja-build
install: ./.travis/sse3-only-on-x86_64-linux/deps.sh
script: ./.travis/sse3-only-on-x86_64-linux/build.sh

View File

@@ -1,15 +0,0 @@
#!/bin/sh
set -e
set -x
export CC=gcc-7
export CXX=g++-7
export PKG_CONFIG_PATH=$HOME/.local/lib/pkgconfig:$PKG_CONFIG_PATH
export UNICORNDIR=$(pwd)/externals/unicorn
mkdir build && cd build
cmake .. -DBoost_INCLUDE_DIRS=${PWD}/../externals/ext-boost -DCMAKE_BUILD_TYPE=Release -DDYNARMIC_USE_LLVM=1 -DDYNARMIC_TESTS_USE_UNICORN=1 -DDYNARMIC_ENABLE_CPU_FEATURE_DETECTION=0 -G Ninja
ninja
./tests/dynarmic_tests --durations yes

View File

@@ -1,18 +0,0 @@
#!/bin/sh
set -e
set -x
python3 --version
# TODO: This isn't ideal.
cd externals
git clone https://github.com/MerryMage/ext-boost
git clone https://github.com/MerryMage/unicorn
cd unicorn
UNICORN_ARCHS=aarch64,arm ./make.sh
cd ../..
mkdir -p $HOME/.local
curl -L https://cmake.org/files/v3.8/cmake-3.8.0-Linux-x86_64.tar.gz \
| tar -xz -C $HOME/.local --strip-components=1

View File

@@ -1,15 +0,0 @@
#!/bin/sh
set -e
set -x
export CC=gcc-7
export CXX=g++-7
export PKG_CONFIG_PATH=$HOME/.local/lib/pkgconfig:$PKG_CONFIG_PATH
export UNICORNDIR=$(pwd)/externals/unicorn
mkdir build && cd build
cmake .. -DBoost_INCLUDE_DIRS=${PWD}/../externals/ext-boost -DCMAKE_BUILD_TYPE=Release -DDYNARMIC_USE_LLVM=1 -DDYNARMIC_TESTS_USE_UNICORN=1 -G Ninja
ninja
./tests/dynarmic_tests --durations yes

View File

@@ -1,18 +0,0 @@
#!/bin/sh
set -e
set -x
python3 --version
# TODO: This isn't ideal.
cd externals
git clone https://github.com/MerryMage/ext-boost
git clone https://github.com/MerryMage/unicorn
cd unicorn
UNICORN_ARCHS="arm aarch64" ./make.sh
cd ../..
mkdir -p $HOME/.local
curl -L https://cmake.org/files/v3.8/cmake-3.8.0-Linux-x86_64.tar.gz \
| tar -xz -C $HOME/.local --strip-components=1

File diff suppressed because it is too large Load Diff

View File

@@ -1,101 +0,0 @@
language: cpp
dist: trusty
sudo: false
os: linux
git:
depth: 1
env:
global:
- secure: |-
a1eovNn4uol9won7ghr67eD3/59oeESN+G9bWE+ecI1V6yRseG9whniGhIpC/YfMW/Qz5I
5sxSmFjaw9bxCISNwUIrL1O5x2AmRYTnFcXk4dFsUvlZg+WeF/aKyBYCNRM8C2ndbBmtAO
o1F2EwFbiso0EmtzhAPs19ujiVxkLn4=
matrix:
include:
# Documentation
- env: BUILD=Doc
sudo: required
# g++ 6 on Linux with C++14
- env: COMPILER=g++-6 BUILD=Debug STANDARD=14
compiler: gcc
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
packages:
- g++-6
- env: COMPILER=g++-6 BUILD=Release STANDARD=14
compiler: gcc
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
packages:
- g++-6
# g++ 8 on Linux with C++17
- env: COMPILER=g++-8 BUILD=Debug STANDARD=17
compiler: gcc
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
packages:
- g++-8
- env: COMPILER=g++-8 BUILD=Release STANDARD=17
compiler: gcc
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
packages:
- g++-8
# Apple clang on OS X with C++14
- env: BUILD=Debug STANDARD=14
compiler: clang
os: osx
- env: BUILD=Release STANDARD=14
compiler: clang
os: osx
# clang 6.0 on Linux with C++14 (builds the fuzzers as well)
- env: COMPILER=clang++-6.0 BUILD=Debug STANDARD=14 ENABLE_FUZZING=1
compiler: clang
addons:
apt:
update: true
packages:
- clang-6.0
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty
- llvm-toolchain-trusty-6.0
# clang 4.0 on Linux with C++14
- env: COMPILER=clang++-4.0 BUILD=Debug STANDARD=11
compiler: clang
addons:
apt:
update: true
packages:
- clang-4.0
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty
- llvm-toolchain-trusty-4.0
# g++ 4.8 on Linux with C++11
- env: COMPILER=g++-4.8 BUILD=Debug STANDARD=11
compiler: gcc
before_script:
- if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then export CXX=${COMPILER}; fi
- if [[ "${BUILD}" != "Doc" ]]; then ${CXX} --version; fi
script:
- support/travis-build.py

View File

@@ -1,2 +0,0 @@
#include "os.h"
#warning "fmt/posix.h is deprecated; use fmt/os.h instead"

View File

@@ -1,27 +0,0 @@
# Staticlib configuration for qmake builds
# For some reason qmake 3.1 fails to identify source dependencies and excludes format.cc and printf.cc
# from compilation so it _MUST_ be called as qmake -nodepend
# A workaround is implemented below: a custom compiler is defined which does not track dependencies
TEMPLATE = lib
TARGET = fmt
QMAKE_EXT_CPP = .cc
CONFIG = staticlib warn_on c++11
FMT_SOURCES = \
../src/format.cc \
../src/posix.cc
fmt.name = libfmt
fmt.input = FMT_SOURCES
fmt.output = ${QMAKE_FILE_BASE}$$QMAKE_EXT_OBJ
fmt.clean = ${QMAKE_FILE_BASE}$$QMAKE_EXT_OBJ
fmt.depends = ${QMAKE_FILE_IN}
# QMAKE_RUN_CXX will not be expanded
fmt.commands = $$QMAKE_CXX -c $$QMAKE_CXXFLAGS $$QMAKE_CXXFLAGS_WARN_ON $$QMAKE_CXXFLAGS_RELEASE_WITH_DEBUGINFO $$QMAKE_CXXFLAGS_CXX11 ${QMAKE_FILE_IN}
fmt.variable_out = OBJECTS
fmt.CONFIG = no_dependencies no_link
QMAKE_EXTRA_COMPILERS += fmt

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env python
# Build the project on Travis CI.
from __future__ import print_function
import errno, os, shutil, subprocess, sys, urllib
from subprocess import call, check_call, Popen, PIPE, STDOUT
def rmtree_if_exists(dir):
try:
shutil.rmtree(dir)
except OSError as e:
if e.errno == errno.ENOENT:
pass
def makedirs_if_not_exist(dir):
try:
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def install_dependencies():
branch = os.environ['TRAVIS_BRANCH']
if branch != 'master':
print('Branch: ' + branch)
exit(0) # Ignore non-master branches
check_call('curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key ' +
'| sudo apt-key add -', shell=True)
check_call('echo "deb https://deb.nodesource.com/node_0.10 precise main" ' +
'| sudo tee /etc/apt/sources.list.d/nodesource.list', shell=True)
check_call(['sudo', 'apt-get', 'update'])
check_call(['sudo', 'apt-get', 'install', 'python-virtualenv', 'nodejs'])
check_call(['sudo', 'npm', 'install', '-g', 'less@2.6.1', 'less-plugin-clean-css'])
deb_file = 'doxygen_1.8.6-2_amd64.deb'
urllib.urlretrieve('http://mirrors.kernel.org/ubuntu/pool/main/d/doxygen/' +
deb_file, deb_file)
check_call(['sudo', 'dpkg', '-i', deb_file])
fmt_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build = os.environ['BUILD']
if build == 'Doc':
travis = 'TRAVIS' in os.environ
if travis:
install_dependencies()
sys.path.insert(0, os.path.join(fmt_dir, 'doc'))
import build
build.create_build_env()
html_dir = build.build_docs()
repo = 'fmtlib.github.io'
if travis and 'KEY' not in os.environ:
# Don't update the repo if building on Travis from an account that
# doesn't have push access.
print('Skipping update of ' + repo)
exit(0)
# Clone the fmtlib.github.io repo.
rmtree_if_exists(repo)
git_url = 'https://github.com/' if travis else 'git@github.com:'
check_call(['git', 'clone', git_url + 'fmtlib/{}.git'.format(repo)])
# Copy docs to the repo.
target_dir = os.path.join(repo, 'dev')
rmtree_if_exists(target_dir)
shutil.copytree(html_dir, target_dir, ignore=shutil.ignore_patterns('.*'))
if travis:
check_call(['git', 'config', '--global', 'user.name', 'amplbot'])
check_call(['git', 'config', '--global', 'user.email', 'viz@ampl.com'])
# Push docs to GitHub pages.
check_call(['git', 'add', '--all'], cwd=repo)
if call(['git', 'diff-index', '--quiet', 'HEAD'], cwd=repo):
check_call(['git', 'commit', '-m', 'Update documentation'], cwd=repo)
cmd = 'git push'
if travis:
cmd += ' https://$KEY@github.com/fmtlib/fmtlib.github.io.git master'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, cwd=repo)
# Print the output without the key.
print(p.communicate()[0].replace(os.environ['KEY'], '$KEY'))
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
exit(0)
standard = os.environ['STANDARD']
install_dir = os.path.join(fmt_dir, "_install")
build_dir = os.path.join(fmt_dir, "_build")
test_build_dir = os.path.join(fmt_dir, "_build_test")
# Configure the library.
makedirs_if_not_exist(build_dir)
cmake_flags = [
'-DCMAKE_INSTALL_PREFIX=' + install_dir, '-DCMAKE_BUILD_TYPE=' + build,
'-DCMAKE_CXX_STANDARD=' + standard
]
# Make sure the fuzzers still compile.
main_cmake_flags = list(cmake_flags)
if 'ENABLE_FUZZING' in os.environ:
main_cmake_flags += ['-DFMT_FUZZ=ON', '-DFMT_FUZZ_LINKMAIN=On']
check_call(['cmake', '-DFMT_DOC=OFF', '-DFMT_PEDANTIC=ON', '-DFMT_WERROR=ON', fmt_dir] +
main_cmake_flags, cwd=build_dir)
# Build the library.
check_call(['cmake', '--build','.'], cwd=build_dir)
# Test the library.
env = os.environ.copy()
env['CTEST_OUTPUT_ON_FAILURE'] = '1'
if call(['make', 'test'], env=env, cwd=build_dir):
with open(os.path.join(build_dir, 'Testing', 'Temporary', 'LastTest.log'), 'r') as f:
print(f.read())
sys.exit(-1)
# Install the library.
check_call(['make', 'install'], cwd=build_dir)
# Test installation.
makedirs_if_not_exist(test_build_dir)
check_call(['cmake', os.path.join(fmt_dir, "test", "find-package-test")] +
cmake_flags, cwd=test_build_dir)
check_call(['make', '-j4'], cwd=test_build_dir)

View File

@@ -1,30 +0,0 @@
#!/usr/bin/env python
# Update the coverity branch from the master branch.
# It is not done automatically because Coverity Scan limits
# the number of submissions per day.
from __future__ import print_function
import shutil, tempfile
from subprocess import check_output, STDOUT
class Git:
def __init__(self, dir):
self.dir = dir
def __call__(self, *args):
output = check_output(['git'] + list(args), cwd=self.dir, stderr=STDOUT)
print(output)
return output
dir = tempfile.mkdtemp()
try:
git = Git(dir)
git('clone', '-b', 'coverity', 'git@github.com:fmtlib/fmt.git', dir)
output = git('merge', '-X', 'theirs', '--no-commit', 'origin/master')
if 'Fast-forward' not in output:
git('reset', 'HEAD', '.travis.yml')
git('checkout', '--', '.travis.yml')
git('commit', '-m', 'Update coverity branch')
git('push')
finally:
shutil.rmtree(dir)

View File

@@ -1,58 +0,0 @@
// Formatting library for C++ - custom argument formatter tests
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "fmt/format.h"
#include "gtest-extra.h"
// MSVC 2013 is known to be broken.
#if !FMT_MSC_VER || FMT_MSC_VER > 1800
// A custom argument formatter that doesn't print `-` for floating-point values
// rounded to 0.
class custom_arg_formatter
: public fmt::detail::arg_formatter<fmt::format_context::iterator, char> {
public:
using base = fmt::detail::arg_formatter<fmt::format_context::iterator, char>;
custom_arg_formatter(fmt::format_context& ctx,
fmt::format_parse_context* parse_ctx,
fmt::format_specs* s = nullptr,
const char* = nullptr)
: base(ctx, parse_ctx, s) {}
using base::operator();
iterator operator()(double value) {
// Comparing a float to 0.0 is safe.
if (round(value * pow(10, specs()->precision)) == 0.0) value = 0;
return base::operator()(value);
}
};
std::string custom_vformat(fmt::string_view format_str, fmt::format_args args) {
fmt::memory_buffer buffer;
fmt::detail::buffer<char>& base = buffer;
// Pass custom argument formatter as a template arg to vwrite.
fmt::vformat_to<custom_arg_formatter>(std::back_inserter(base), format_str,
args);
return std::string(buffer.data(), buffer.size());
}
template <typename... Args>
std::string custom_format(const char* format_str, const Args&... args) {
auto va = fmt::make_format_args(args...);
return custom_vformat(format_str, va);
}
TEST(CustomFormatterTest, Format) {
EXPECT_EQ("0.00", custom_format("{:.2f}", -.00001));
}
#endif

View File

@@ -1,6 +0,0 @@
// Copyright (c) 2020 Vladimir Solontsov
// SPDX-License-Identifier: MIT Licence
#include <fmt/core.h>
#include "gtest-extra.h"

View File

@@ -1,152 +0,0 @@
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <fmt/chrono.h>
#include <cstdint>
#include <limits>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include "fuzzer_common.h"
template <typename Item, typename Ratio>
void invoke_inner(fmt::string_view formatstring, const Item item) {
const std::chrono::duration<Item, Ratio> value(item);
try {
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(formatstring, value);
#else
fmt::memory_buffer buf;
fmt::format_to(buf, formatstring, value);
#endif
} catch (std::exception& /*e*/) {
}
}
// Item is the underlying type for duration (int, long etc)
template <typename Item>
void invoke_outer(const uint8_t* Data, size_t Size, const int scaling) {
// always use a fixed location of the data
using fmt_fuzzer::Nfixed;
constexpr auto N = sizeof(Item);
static_assert(N <= Nfixed, "fixed size is too small");
if (Size <= Nfixed + 1) {
return;
}
const Item item = fmt_fuzzer::assignFromBuf<Item>(Data);
// fast forward
Data += Nfixed;
Size -= Nfixed;
// Data is already allocated separately in libFuzzer so reading past
// the end will most likely be detected anyway
const auto formatstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
// doit_impl<Item,std::yocto>(buf.data(),item);
// doit_impl<Item,std::zepto>(buf.data(),item);
switch (scaling) {
case 1:
invoke_inner<Item, std::atto>(formatstring, item);
break;
case 2:
invoke_inner<Item, std::femto>(formatstring, item);
break;
case 3:
invoke_inner<Item, std::pico>(formatstring, item);
break;
case 4:
invoke_inner<Item, std::nano>(formatstring, item);
break;
case 5:
invoke_inner<Item, std::micro>(formatstring, item);
break;
case 6:
invoke_inner<Item, std::milli>(formatstring, item);
break;
case 7:
invoke_inner<Item, std::centi>(formatstring, item);
break;
case 8:
invoke_inner<Item, std::deci>(formatstring, item);
break;
case 9:
invoke_inner<Item, std::deca>(formatstring, item);
break;
case 10:
invoke_inner<Item, std::kilo>(formatstring, item);
break;
case 11:
invoke_inner<Item, std::mega>(formatstring, item);
break;
case 12:
invoke_inner<Item, std::giga>(formatstring, item);
break;
case 13:
invoke_inner<Item, std::tera>(formatstring, item);
break;
case 14:
invoke_inner<Item, std::peta>(formatstring, item);
break;
case 15:
invoke_inner<Item, std::exa>(formatstring, item);
}
// doit_impl<Item,std::zeta>(buf.data(),item);
// doit_impl<Item,std::yotta>(buf.data(),item);
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) {
if (Size <= 4) {
return 0;
}
const auto representation = Data[0];
const auto scaling = Data[1];
Data += 2;
Size -= 2;
switch (representation) {
case 1:
invoke_outer<char>(Data, Size, scaling);
break;
case 2:
invoke_outer<unsigned char>(Data, Size, scaling);
break;
case 3:
invoke_outer<signed char>(Data, Size, scaling);
break;
case 4:
invoke_outer<short>(Data, Size, scaling);
break;
case 5:
invoke_outer<unsigned short>(Data, Size, scaling);
break;
case 6:
invoke_outer<int>(Data, Size, scaling);
break;
case 7:
invoke_outer<unsigned int>(Data, Size, scaling);
break;
case 8:
invoke_outer<long>(Data, Size, scaling);
break;
case 9:
invoke_outer<unsigned long>(Data, Size, scaling);
break;
case 10:
invoke_outer<float>(Data, Size, scaling);
break;
case 11:
invoke_outer<double>(Data, Size, scaling);
break;
case 12:
invoke_outer<long double>(Data, Size, scaling);
break;
default:
break;
}
return 0;
}

View File

@@ -1,67 +0,0 @@
#ifndef FUZZER_COMMON_H
#define FUZZER_COMMON_H
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <cstdint> // std::uint8_t
#include <cstring> // memcpy
#include <type_traits> // trivially copyable
// one can format to either a string, or a buf. buf is faster,
// but one may be interested in formatting to a string instead to
// verify it works as intended. to avoid a combinatoric explosion,
// select this at compile time instead of dynamically from the fuzz data
#define FMT_FUZZ_FORMAT_TO_STRING 0
// if fmt is given a buffer that is separately allocated,
// chances that address sanitizer detects out of bound reads is
// much higher. However, it slows down the fuzzing.
#define FMT_FUZZ_SEPARATE_ALLOCATION 1
// To let the the fuzzer mutation be efficient at cross pollinating
// between different types, use a fixed size format.
// The same bit pattern, interpreted as another type,
// is likely interesting.
// For this, we must know the size of the largest possible type in use.
// There are some problems on travis, claiming Nfixed is not a constant
// expression which seems to be an issue with older versions of libstdc++
#if _GLIBCXX_RELEASE >= 7
# include <algorithm>
namespace fmt_fuzzer {
constexpr auto Nfixed = std::max(sizeof(long double), sizeof(std::intmax_t));
}
#else
namespace fmt_fuzzer {
constexpr auto Nfixed = 16;
}
#endif
namespace fmt_fuzzer {
// view data as a c char pointer.
template <typename T> inline const char* as_chars(const T* data) {
return static_cast<const char*>(static_cast<const void*>(data));
}
// view data as a byte pointer
template <typename T> inline const std::uint8_t* as_bytes(const T* data) {
return static_cast<const std::uint8_t*>(static_cast<const void*>(data));
}
// blits bytes from Data to form an (assumed trivially constructible) object
// of type Item
template <class Item> inline Item assignFromBuf(const std::uint8_t* Data) {
Item item{};
std::memcpy(&item, Data, sizeof(Item));
return item;
}
// reads a boolean value by looking at the first byte from Data
template <> inline bool assignFromBuf<bool>(const std::uint8_t* Data) {
return !!Data[0];
}
} // namespace fmt_fuzzer
#endif // FUZZER_COMMON_H

View File

@@ -1,21 +0,0 @@
#include <cassert>
#include <fstream>
#include <sstream>
#include <vector>
#include "fuzzer_common.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size);
int main(int argc, char* argv[]) {
for (int i = 1; i < argc; ++i) {
std::ifstream in(argv[i]);
assert(in);
in.seekg(0, std::ios_base::end);
const auto pos = in.tellg();
assert(pos >= 0);
in.seekg(0, std::ios_base::beg);
std::vector<char> buf(static_cast<size_t>(pos));
in.read(buf.data(), static_cast<long>(buf.size()));
assert(in.gcount() == pos);
LLVMFuzzerTestOneInput(fmt_fuzzer::as_bytes(buf.data()), buf.size());
}
}

View File

@@ -1,128 +0,0 @@
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <fmt/chrono.h>
#include <fmt/core.h>
#include <cstdint>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include "fuzzer_common.h"
template <typename Item1>
void invoke_fmt(const uint8_t* Data, size_t Size, unsigned int argsize) {
constexpr auto N1 = sizeof(Item1);
static_assert(N1 <= fmt_fuzzer::Nfixed, "Nfixed too small");
if (Size <= fmt_fuzzer::Nfixed) {
return;
}
const Item1 item1 = fmt_fuzzer::assignFromBuf<Item1>(Data);
Data += fmt_fuzzer::Nfixed;
Size -= fmt_fuzzer::Nfixed;
// how many chars should be used for the argument name?
if (argsize <= 0 || argsize >= Size) {
return;
}
// allocating buffers separately is slower, but increases chances
// of detecting memory errors
#if FMT_FUZZ_SEPARATE_ALLOCATION
std::vector<char> argnamebuffer(argsize + 1);
std::memcpy(argnamebuffer.data(), Data, argsize);
auto argname = argnamebuffer.data();
#else
auto argname = fmt_fuzzer::as_chars(Data);
#endif
Data += argsize;
Size -= argsize;
#if FMT_FUZZ_SEPARATE_ALLOCATION
// allocates as tight as possible, making it easier to catch buffer overruns.
std::vector<char> fmtstringbuffer(Size);
std::memcpy(fmtstringbuffer.data(), Data, Size);
auto fmtstring = fmt::string_view(fmtstringbuffer.data(), Size);
#else
auto fmtstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
#endif
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(fmtstring, fmt::arg(argname, item1));
#else
fmt::memory_buffer outbuf;
fmt::format_to(outbuf, fmtstring, fmt::arg(argname, item1));
#endif
}
// for dynamic dispatching to an explicit instantiation
template <typename Callback> void invoke(int index, Callback callback) {
switch (index) {
case 0:
callback(bool{});
break;
case 1:
callback(char{});
break;
case 2:
using sc = signed char;
callback(sc{});
break;
case 3:
using uc = unsigned char;
callback(uc{});
break;
case 4:
callback(short{});
break;
case 5:
using us = unsigned short;
callback(us{});
break;
case 6:
callback(int{});
break;
case 7:
callback(unsigned{});
break;
case 8:
callback(long{});
break;
case 9:
using ul = unsigned long;
callback(ul{});
break;
case 10:
callback(float{});
break;
case 11:
callback(double{});
break;
case 12:
using LD = long double;
callback(LD{});
break;
}
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) {
if (Size <= 3) {
return 0;
}
// switch types depending on the first byte of the input
const auto first = Data[0] & 0x0F;
const unsigned int second = (Data[0] & 0xF0) >> 4;
Data++;
Size--;
auto outerfcn = [=](auto param1) {
invoke_fmt<decltype(param1)>(Data, Size, second);
};
try {
invoke(first, outerfcn);
} catch (std::exception& /*e*/) {
}
return 0;
}

View File

@@ -1,131 +0,0 @@
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <fmt/core.h>
#include <cstdint>
#include <stdexcept>
#include <type_traits>
#include <vector>
#include <fmt/chrono.h>
#include "fuzzer_common.h"
using fmt_fuzzer::Nfixed;
template <typename Item>
void invoke_fmt(const uint8_t* Data, size_t Size) {
constexpr auto N = sizeof(Item);
static_assert(N <= Nfixed, "Nfixed is too small");
if (Size <= Nfixed) {
return;
}
const Item item = fmt_fuzzer::assignFromBuf<Item>(Data);
Data += Nfixed;
Size -= Nfixed;
#if FMT_FUZZ_SEPARATE_ALLOCATION
// allocates as tight as possible, making it easier to catch buffer overruns.
std::vector<char> fmtstringbuffer(Size);
std::memcpy(fmtstringbuffer.data(), Data, Size);
auto fmtstring = fmt::string_view(fmtstringbuffer.data(), Size);
#else
auto fmtstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
#endif
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(fmtstring, item);
#else
fmt::memory_buffer message;
fmt::format_to(message, fmtstring, item);
#endif
}
void invoke_fmt_time(const uint8_t* Data, size_t Size) {
using Item = std::time_t;
constexpr auto N = sizeof(Item);
static_assert(N <= Nfixed, "Nfixed too small");
if (Size <= Nfixed) {
return;
}
const Item item = fmt_fuzzer::assignFromBuf<Item>(Data);
Data += Nfixed;
Size -= Nfixed;
#if FMT_FUZZ_SEPARATE_ALLOCATION
// allocates as tight as possible, making it easier to catch buffer overruns.
std::vector<char> fmtstringbuffer(Size);
std::memcpy(fmtstringbuffer.data(), Data, Size);
auto fmtstring = fmt::string_view(fmtstringbuffer.data(), Size);
#else
auto fmtstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
#endif
auto* b = std::localtime(&item);
if (b) {
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(fmtstring, *b);
#else
fmt::memory_buffer message;
fmt::format_to(message, fmtstring, *b);
#endif
}
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) {
if (Size <= 3) {
return 0;
}
const auto first = Data[0];
Data++;
Size--;
try {
switch (first) {
case 0:
invoke_fmt<bool>(Data, Size);
break;
case 1:
invoke_fmt<char>(Data, Size);
break;
case 2:
invoke_fmt<unsigned char>(Data, Size);
break;
case 3:
invoke_fmt<signed char>(Data, Size);
break;
case 4:
invoke_fmt<short>(Data, Size);
break;
case 5:
invoke_fmt<unsigned short>(Data, Size);
break;
case 6:
invoke_fmt<int>(Data, Size);
break;
case 7:
invoke_fmt<unsigned int>(Data, Size);
break;
case 8:
invoke_fmt<long>(Data, Size);
break;
case 9:
invoke_fmt<unsigned long>(Data, Size);
break;
case 10:
invoke_fmt<float>(Data, Size);
break;
case 11:
invoke_fmt<double>(Data, Size);
break;
case 12:
invoke_fmt<long double>(Data, Size);
break;
case 13:
invoke_fmt_time(Data, Size);
break;
default:
break;
}
} catch (std::exception& /*e*/) {
}
return 0;
}

View File

@@ -1,116 +0,0 @@
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <fmt/format.h>
#include <fmt/printf.h>
#include <cstdint>
#include <stdexcept>
#include "fuzzer_common.h"
using fmt_fuzzer::Nfixed;
template <typename Item1, typename Item2>
void invoke_fmt(const uint8_t* Data, size_t Size) {
constexpr auto N1 = sizeof(Item1);
constexpr auto N2 = sizeof(Item2);
static_assert(N1 <= Nfixed, "size1 exceeded");
static_assert(N2 <= Nfixed, "size2 exceeded");
if (Size <= Nfixed + Nfixed) {
return;
}
Item1 item1 = fmt_fuzzer::assignFromBuf<Item1>(Data);
Data += Nfixed;
Size -= Nfixed;
Item2 item2 = fmt_fuzzer::assignFromBuf<Item2>(Data);
Data += Nfixed;
Size -= Nfixed;
auto fmtstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(fmtstring, item1, item2);
#else
fmt::memory_buffer message;
fmt::format_to(message, fmtstring, item1, item2);
#endif
}
// for dynamic dispatching to an explicit instantiation
template <typename Callback> void invoke(int index, Callback callback) {
switch (index) {
case 0:
callback(bool{});
break;
case 1:
callback(char{});
break;
case 2:
using sc = signed char;
callback(sc{});
break;
case 3:
using uc = unsigned char;
callback(uc{});
break;
case 4:
callback(short{});
break;
case 5:
using us = unsigned short;
callback(us{});
break;
case 6:
callback(int{});
break;
case 7:
callback(unsigned{});
break;
case 8:
callback(long{});
break;
case 9:
using ul = unsigned long;
callback(ul{});
break;
case 10:
callback(float{});
break;
case 11:
callback(double{});
break;
case 12:
using LD = long double;
callback(LD{});
break;
case 13:
using ptr = void*;
callback(ptr{});
break;
}
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) {
if (Size <= 3) {
return 0;
}
// switch types depending on the first byte of the input
const auto first = Data[0] & 0x0F;
const auto second = (Data[0] & 0xF0) >> 4;
Data++;
Size--;
auto outer = [=](auto param1) {
auto inner = [=](auto param2) {
invoke_fmt<decltype(param1), decltype(param2)>(Data, Size);
};
invoke(second, inner);
};
try {
invoke(first, outer);
} catch (std::exception& /*e*/) {
}
return 0;
}

View File

@@ -1,112 +0,0 @@
// Copyright (c) 2019, Paul Dreik
// License: see LICENSE.rst in the fmt root directory
#include <fmt/format.h>
#include <cstdint>
#include <stdexcept>
#include <type_traits>
#include "fuzzer_common.h"
constexpr auto Nfixed = fmt_fuzzer::Nfixed;
template <typename Item1, typename Item2>
void invoke_fmt(const uint8_t* Data, size_t Size) {
constexpr auto N1 = sizeof(Item1);
constexpr auto N2 = sizeof(Item2);
static_assert(N1 <= Nfixed, "size1 exceeded");
static_assert(N2 <= Nfixed, "size2 exceeded");
if (Size <= Nfixed + Nfixed) {
return;
}
const Item1 item1 = fmt_fuzzer::assignFromBuf<Item1>(Data);
Data += Nfixed;
Size -= Nfixed;
const Item2 item2 = fmt_fuzzer::assignFromBuf<Item2>(Data);
Data += Nfixed;
Size -= Nfixed;
auto fmtstring = fmt::string_view(fmt_fuzzer::as_chars(Data), Size);
#if FMT_FUZZ_FORMAT_TO_STRING
std::string message = fmt::format(fmtstring, item1, item2);
#else
fmt::memory_buffer message;
fmt::format_to(message, fmtstring, item1, item2);
#endif
}
// for dynamic dispatching to an explicit instantiation
template <typename Callback> void invoke(int index, Callback callback) {
switch (index) {
case 0:
callback(bool{});
break;
case 1:
callback(char{});
break;
case 2:
using sc = signed char;
callback(sc{});
break;
case 3:
using uc = unsigned char;
callback(uc{});
break;
case 4:
callback(short{});
break;
case 5:
using us = unsigned short;
callback(us{});
break;
case 6:
callback(int{});
break;
case 7:
callback(unsigned{});
break;
case 8:
callback(long{});
break;
case 9:
using ul = unsigned long;
callback(ul{});
break;
case 10:
callback(float{});
break;
case 11:
callback(double{});
break;
case 12:
using LD = long double;
callback(LD{});
break;
}
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size) {
if (Size <= 3) {
return 0;
}
// switch types depending on the first byte of the input
const auto first = Data[0] & 0x0F;
const auto second = (Data[0] & 0xF0) >> 4;
Data++;
Size--;
auto outer = [=](auto param1) {
auto inner = [=](auto param2) {
invoke_fmt<decltype(param1), decltype(param2)>(Data, Size);
};
invoke(second, inner);
};
try {
invoke(first, outer);
} catch (std::exception& /*e*/) {
}
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,75 +0,0 @@
// Formatting library for C++ - Grisu tests
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#include "fmt/format.h"
#include "gtest.h"
static bool reported_skipped;
#undef TEST
#define TEST(test_fixture, test_name) \
void test_fixture##test_name(); \
GTEST_TEST(test_fixture, test_name) { \
if (FMT_USE_GRISU) { \
test_fixture##test_name(); \
} else if (!reported_skipped) { \
reported_skipped = true; \
fmt::print("Skipping Grisu tests.\n"); \
} \
} \
void test_fixture##test_name()
TEST(GrisuTest, NaN) {
auto nan = std::numeric_limits<double>::quiet_NaN();
EXPECT_EQ("nan", fmt::format("{}", nan));
EXPECT_EQ("-nan", fmt::format("{}", -nan));
}
TEST(GrisuTest, Inf) {
auto inf = std::numeric_limits<double>::infinity();
EXPECT_EQ("inf", fmt::format("{}", inf));
EXPECT_EQ("-inf", fmt::format("{}", -inf));
}
TEST(GrisuTest, Zero) { EXPECT_EQ("0.0", fmt::format("{}", 0.0)); }
TEST(GrisuTest, Round) {
EXPECT_EQ("1.9156918820264798e-56",
fmt::format("{}", 1.9156918820264798e-56));
EXPECT_EQ("0.0000", fmt::format("{:.4f}", 7.2809479766055470e-15));
}
TEST(GrisuTest, Prettify) {
EXPECT_EQ("0.0001", fmt::format("{}", 1e-4));
EXPECT_EQ("1e-05", fmt::format("{}", 1e-5));
EXPECT_EQ("9.999e-05", fmt::format("{}", 9.999e-5));
EXPECT_EQ("10000000000.0", fmt::format("{}", 1e10));
EXPECT_EQ("100000000000.0", fmt::format("{}", 1e11));
EXPECT_EQ("12340000000.0", fmt::format("{}", 1234e7));
EXPECT_EQ("12.34", fmt::format("{}", 1234e-2));
EXPECT_EQ("0.001234", fmt::format("{}", 1234e-6));
EXPECT_EQ("0.1", fmt::format("{}", 0.1f));
EXPECT_EQ("0.10000000149011612", fmt::format("{}", double(0.1f)));
}
TEST(GrisuTest, ZeroPrecision) { EXPECT_EQ("1", fmt::format("{:.0}", 1.0)); }
TEST(GrisuTest, Fallback) {
EXPECT_EQ("1e+23", fmt::format("{}", 1e23));
EXPECT_EQ("9e-265", fmt::format("{}", 9e-265));
EXPECT_EQ("5.423717798060526e+125",
fmt::format("{}", 5.423717798060526e+125));
EXPECT_EQ("1.372371880954233e-288",
fmt::format("{}", 1.372371880954233e-288));
EXPECT_EQ("55388492.622190244", fmt::format("{}", 55388492.622190244));
EXPECT_EQ("2.2506787569811123e-253",
fmt::format("{}", 2.2506787569811123e-253));
EXPECT_EQ("1103618912042992.8", fmt::format("{}", 1103618912042992.8));
// pow(2, -25) - assymetric boundaries:
EXPECT_EQ("2.9802322387695312e-08",
fmt::format("{}", 2.9802322387695312e-08));
}

View File

@@ -1,232 +0,0 @@
// Copyright 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Author: wan@google.com (Zhanyong Wan)
//
// Utilities for testing Google Test itself and code that uses Google Test
// (e.g. frameworks built on top of Google Test).
#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
#include "gtest/gtest.h"
namespace testing {
// This helper class can be used to mock out Google Test failure reporting
// so that we can test Google Test or code that builds on Google Test.
//
// An object of this class appends a TestPartResult object to the
// TestPartResultArray object given in the constructor whenever a Google Test
// failure is reported. It can either intercept only failures that are
// generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor.
class GTEST_API_ ScopedFakeTestPartResultReporter
: public TestPartResultReporterInterface {
public:
// The two possible mocking modes of this object.
enum InterceptMode {
INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
INTERCEPT_ALL_THREADS // Intercepts all failures.
};
// The c'tor sets this object as the test part result reporter used
// by Google Test. The 'result' parameter specifies where to report the
// results. This reporter will only catch failures generated in the current
// thread. DEPRECATED
explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
// Same as above, but you can choose the interception scope of this object.
ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
TestPartResultArray* result);
// The d'tor restores the previous test part result reporter.
virtual ~ScopedFakeTestPartResultReporter();
// Appends the TestPartResult object to the TestPartResultArray
// received in the constructor.
//
// This method is from the TestPartResultReporterInterface
// interface.
virtual void ReportTestPartResult(const TestPartResult& result);
private:
void Init();
const InterceptMode intercept_mode_;
TestPartResultReporterInterface* old_reporter_;
TestPartResultArray* const result_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
};
namespace internal {
// A helper class for implementing EXPECT_FATAL_FAILURE() and
// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
// TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated.
class GTEST_API_ SingleFailureChecker {
public:
// The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray* results,
TestPartResult::Type type,
const string& substr);
~SingleFailureChecker();
private:
const TestPartResultArray* const results_;
const TestPartResult::Type type_;
const string substr_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
};
} // namespace internal
} // namespace testing
// A set of macros for testing Google Test assertions or code that's expected
// to generate Google Test fatal failures. It verifies that the given
// statement will cause exactly one fatal Google Test failure with 'substr'
// being part of the failure message.
//
// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
// affects and considers failures generated in the current thread and
// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
//
// The verification of the assertion is done correctly even when the statement
// throws an exception or aborts the current function.
//
// Known restrictions:
// - 'statement' cannot reference local non-static variables or
// non-static members of the current object.
// - 'statement' cannot return a value.
// - You cannot stream a failure message to this macro.
//
// Note that even though the implementations of the following two
// macros are much alike, we cannot refactor them to use a common
// helper macro, due to some peculiarity in how the preprocessor
// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
// gtest_unittest.cc will fail to compile if we do that.
#define EXPECT_FATAL_FAILURE(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
public:\
static void Execute() { statement; }\
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \
class GTestExpectFatalFailureHelper {\
public:\
static void Execute() { statement; }\
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ALL_THREADS, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
} while (::testing::internal::AlwaysFalse())
// A macro for testing Google Test assertions or code that's expected to
// generate Google Test non-fatal failures. It asserts that the given
// statement will cause exactly one non-fatal Google Test failure with 'substr'
// being part of the failure message.
//
// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
// affects and considers failures generated in the current thread and
// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
//
// 'statement' is allowed to reference local variables and members of
// the current object.
//
// The verification of the assertion is done correctly even when the statement
// throws an exception or aborts the current function.
//
// Known restrictions:
// - You cannot stream a failure message to this macro.
//
// Note that even though the implementations of the following two
// macros are much alike, we cannot refactor them to use a common
// helper macro, due to some peculiarity in how the preprocessor
// works. If we do that, the code won't compile when the user gives
// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
// expands to code containing an unprotected comma. The
// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
// catches that.
//
// For the same reason, we have to write
// if (::testing::internal::AlwaysTrue()) { statement; }
// instead of
// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
// to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
}\
} while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
&gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
(substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
&gtest_failures);\
if (::testing::internal::AlwaysTrue()) { statement; }\
}\
} while (::testing::internal::AlwaysFalse())
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
// Additional translation unit for the header-only configuration test
#include "fmt/core.h"

View File

@@ -1,109 +0,0 @@
// Formatting library for C++ - locale tests
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#include "fmt/locale.h"
#include "gmock.h"
using fmt::detail::max_value;
#ifndef FMT_STATIC_THOUSANDS_SEPARATOR
template <typename Char> struct numpunct : std::numpunct<Char> {
protected:
Char do_decimal_point() const FMT_OVERRIDE { return '?'; }
std::string do_grouping() const FMT_OVERRIDE { return "\03"; }
Char do_thousands_sep() const FMT_OVERRIDE { return '~'; }
};
template <typename Char> struct no_grouping : std::numpunct<Char> {
protected:
Char do_decimal_point() const FMT_OVERRIDE { return '.'; }
std::string do_grouping() const FMT_OVERRIDE { return ""; }
Char do_thousands_sep() const FMT_OVERRIDE { return ','; }
};
template <typename Char> struct special_grouping : std::numpunct<Char> {
protected:
Char do_decimal_point() const FMT_OVERRIDE { return '.'; }
std::string do_grouping() const FMT_OVERRIDE { return "\03\02"; }
Char do_thousands_sep() const FMT_OVERRIDE { return ','; }
};
template <typename Char> struct small_grouping : std::numpunct<Char> {
protected:
Char do_decimal_point() const FMT_OVERRIDE { return '.'; }
std::string do_grouping() const FMT_OVERRIDE { return "\01"; }
Char do_thousands_sep() const FMT_OVERRIDE { return ','; }
};
TEST(LocaleTest, DoubleDecimalPoint) {
std::locale loc(std::locale(), new numpunct<char>());
EXPECT_EQ("1?23", fmt::format(loc, "{:L}", 1.23));
}
TEST(LocaleTest, Format) {
std::locale loc(std::locale(), new numpunct<char>());
EXPECT_EQ("1234567", fmt::format(std::locale(), "{:L}", 1234567));
EXPECT_EQ("1~234~567", fmt::format(loc, "{:L}", 1234567));
EXPECT_EQ("-1~234~567", fmt::format(loc, "{:L}", -1234567));
fmt::format_arg_store<fmt::format_context, int> as{1234567};
EXPECT_EQ("1~234~567", fmt::vformat(loc, "{:L}", fmt::format_args(as)));
std::string s;
fmt::format_to(std::back_inserter(s), loc, "{:L}", 1234567);
EXPECT_EQ("1~234~567", s);
std::locale no_grouping_loc(std::locale(), new no_grouping<char>());
EXPECT_EQ("1234567", fmt::format(no_grouping_loc, "{:L}", 1234567));
std::locale special_grouping_loc(std::locale(), new special_grouping<char>());
EXPECT_EQ("1,23,45,678", fmt::format(special_grouping_loc, "{:L}", 12345678));
EXPECT_EQ("12,345", fmt::format(special_grouping_loc, "{:L}", 12345));
std::locale small_grouping_loc(std::locale(), new small_grouping<char>());
EXPECT_EQ("4,2,9,4,9,6,7,2,9,5",
fmt::format(small_grouping_loc, "{:L}", max_value<uint32_t>()));
}
TEST(LocaleTest, FormatDetaultAlign) {
std::locale special_grouping_loc(std::locale(), new special_grouping<char>());
EXPECT_EQ(" 12,345", fmt::format(special_grouping_loc, "{:8L}", 12345));
}
TEST(LocaleTest, WFormat) {
std::locale loc(std::locale(), new numpunct<wchar_t>());
EXPECT_EQ(L"1234567", fmt::format(std::locale(), L"{:L}", 1234567));
EXPECT_EQ(L"1~234~567", fmt::format(loc, L"{:L}", 1234567));
fmt::format_arg_store<fmt::wformat_context, int> as{1234567};
EXPECT_EQ(L"1~234~567", fmt::vformat(loc, L"{:L}", fmt::wformat_args(as)));
EXPECT_EQ(L"1234567", fmt::format(std::locale("C"), L"{:L}", 1234567));
std::locale no_grouping_loc(std::locale(), new no_grouping<wchar_t>());
EXPECT_EQ(L"1234567", fmt::format(no_grouping_loc, L"{:L}", 1234567));
std::locale special_grouping_loc(std::locale(),
new special_grouping<wchar_t>());
EXPECT_EQ(L"1,23,45,678",
fmt::format(special_grouping_loc, L"{:L}", 12345678));
std::locale small_grouping_loc(std::locale(), new small_grouping<wchar_t>());
EXPECT_EQ(L"4,2,9,4,9,6,7,2,9,5",
fmt::format(small_grouping_loc, L"{:L}", max_value<uint32_t>()));
}
TEST(LocaleTest, DoubleFormatter) {
auto loc = std::locale(std::locale(), new special_grouping<char>());
auto f = fmt::formatter<int>();
auto parse_ctx = fmt::format_parse_context("L");
f.parse(parse_ctx);
char buf[10] = {};
fmt::basic_format_context<char*, char> format_ctx(
buf, {}, fmt::detail::locale_ref(loc));
*f.format(12345, format_ctx) = 0;
EXPECT_STREQ("12,345", buf);
}
#endif // FMT_STATIC_THOUSANDS_SEPARATOR

View File

@@ -1,12 +0,0 @@
sudo: true
dist: bionic
language: cpp
compiler:
- gcc
- clang
addons:
apt:
packages:
- nasm yasm g++-multilib tcsh
script:
- make test

View File

@@ -1,427 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="bf"
ProjectGUID="{654BD79B-59D3-4B10-BBAA-158BAB272828}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/bf.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/bf.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/bf.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/bf.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/bf.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/bf.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/bf.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/bf.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/bf.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/bf.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/bf.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/bf.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/bf.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/bf.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/bf.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/bf.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/bf.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/bf.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/bf.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/bf.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="bf.cpp"
>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,423 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="calc"
ProjectGUID="{5FDDFAA6-B947-491D-A17E-BBD863846579}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/calc.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/calc.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/calc.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/calc.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/calc.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/calc.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/calc.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/calc.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/calc.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/calc.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/calc.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/calc.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/calc.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/calc.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/calc.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/calc.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/calc.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/calc.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/calc.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/calc.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="calc.cpp"
>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,427 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="quantize"
ProjectGUID="{D06753BF-E1F3-4578-9B18-08673327F77C}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/quantize.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/quantize.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/quantize.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/quantize.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/quantize.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/quantize.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/quantize.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/quantize.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/quantize.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/quantize.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/quantize.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/quantize.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/quantize.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/quantize.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/quantize.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/quantize.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/quantize.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/quantize.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/quantize.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/quantize.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="quantize.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,427 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="test0"
ProjectGUID="{1CDE4D2A-BE3A-4B9B-B28F-524A23084A8E}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/test0.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/test0.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/test0.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/test0.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/test0.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/test0.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/test0.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/test0.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/test0.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/test0.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/test0.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/test0.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/test0.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/test0.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/test0.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/test0.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/test0.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/test0.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/test0.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/test0.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="test0.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,427 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="test_util"
ProjectGUID="{CFC9B272-FDA1-4C87-B4EF-CDCA9B57F4DD}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/test_util.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/test_util.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/test_util.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/test_util.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/test_util.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/test_util.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/test_util.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/test_util.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/test_util.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/test_util.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/test_util.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/test_util.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/test_util.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/test_util.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/test_util.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/test_util.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/test_util.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/test_util.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/test_util.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/test_util.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="test_util.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,427 +0,0 @@
<?xml version="1.0" encoding="shift_jis"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="toyvm"
ProjectGUID="{2E41C7AF-39FF-454C-B081-37445378DCB3}"
TargetFrameworkVersion="0"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory=".\Debug"
IntermediateDirectory=".\Debug"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Debug/toyvm.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/toyvm.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/toyvm.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/toyvm.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/toyvm.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory=".\Release"
IntermediateDirectory=".\Release"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TypeLibraryName=".\Release/toyvm.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/toyvm.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/toyvm.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/toyvm.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/toyvm.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Debug/toyvm.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="1"
PrecompiledHeaderFile=".\Debug/toyvm.pch"
AssemblerListingLocation=".\Debug/"
ObjectFile=".\Debug/"
ProgramDataBaseFileName=".\Debug/"
WarningLevel="4"
SuppressStartupBanner="true"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="_DEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Debug/toyvm.exe"
LinkIncremental="2"
SuppressStartupBanner="true"
GenerateDebugInformation="true"
ProgramDatabaseFile=".\Debug/toyvm.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Debug/toyvm.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
InheritedPropertySheets="$(VCInstallDir)VCProjectDefaults\UpgradeFromVC60.vsprops"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="false"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
TypeLibraryName=".\Release/toyvm.tlb"
HeaderFileName=""
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
InlineFunctionExpansion="1"
AdditionalIncludeDirectories="../"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
StringPooling="true"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
PrecompiledHeaderFile=".\Release/toyvm.pch"
AssemblerListingLocation=".\Release/"
ObjectFile=".\Release/"
ProgramDataBaseFileName=".\Release/"
WarningLevel="4"
SuppressStartupBanner="true"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
PreprocessorDefinitions="NDEBUG"
Culture="1041"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
OutputFile=".\Release/toyvm.exe"
LinkIncremental="1"
SuppressStartupBanner="true"
ProgramDatabaseFile=".\Release/toyvm.pdb"
SubSystem="1"
RandomizedBaseAddress="1"
DataExecutionPrevention="0"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
SuppressStartupBanner="true"
OutputFile=".\Release/toyvm.bsc"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<File
RelativePath="toyvm.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|Win32"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Debug|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
<FileConfiguration
Name="Release|x64"
>
<Tool
Name="VCCLCompilerTool"
AdditionalIncludeDirectories=""
PreprocessorDefinitions=""
/>
</FileConfiguration>
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View File

@@ -1,115 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <dynarmic/A32/config.h>
namespace Dynarmic {
namespace A32 {
struct Context;
class Jit final {
public:
explicit Jit(UserConfig conf);
~Jit();
/**
* Runs the emulated CPU.
* Cannot be recursively called.
*/
void Run();
/**
* Steps the emulated CPU.
* Cannot be recursively called.
*/
void Step();
/**
* Clears the code cache of all compiled code.
* Can be called at any time. Halts execution if called within a callback.
*/
void ClearCache();
/**
* Invalidate the code cache at a range of addresses.
* @param start_address The starting address of the range to invalidate.
* @param length The length (in bytes) of the range to invalidate.
*/
void InvalidateCacheRange(std::uint32_t start_address, std::size_t length);
/**
* Reset CPU state to state at startup. Does not clear code cache.
* Cannot be called from a callback.
*/
void Reset();
/**
* Stops execution in Jit::Run.
* Can only be called from a callback.
*/
void HaltExecution();
/**
* HACK:
* Exits execution from a callback, the callback must rewind the stack or
* never return to dynarmic from it's current stack.
*/
void ExceptionalExit();
/// HACK: Change processor ID.
void ChangeProcessorID(std::size_t new_processor);
/// View and modify registers.
std::array<std::uint32_t, 16>& Regs();
const std::array<std::uint32_t, 16>& Regs() const;
std::array<std::uint32_t, 64>& ExtRegs();
const std::array<std::uint32_t, 64>& ExtRegs() const;
/// View and modify CPSR.
std::uint32_t Cpsr() const;
void SetCpsr(std::uint32_t value);
/// View and modify FPSCR.
std::uint32_t Fpscr() const;
void SetFpscr(std::uint32_t value);
Context SaveContext() const;
void SaveContext(Context&) const;
void LoadContext(const Context&);
/// Clears exclusive state for this core.
void ClearExclusiveState();
/**
* Returns true if Jit::Run was called but hasn't returned yet.
* i.e.: We're in a callback.
*/
bool IsExecuting() const {
return is_executing;
}
/**
* Debugging: Disassemble all of compiled code.
* @return A string containing disassembly of all host machine code produced.
*/
std::string Disassemble() const;
private:
bool is_executing = false;
struct Impl;
std::unique_ptr<Impl> impl;
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,23 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace Dynarmic {
namespace A32 {
enum class ArchVersion {
v3,
v4,
v4T,
v5TE,
v6K,
v6T2,
v7,
v8,
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,210 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <dynarmic/A32/arch_version.h>
#include <dynarmic/optimization_flags.h>
namespace Dynarmic {
class ExclusiveMonitor;
} // namespace Dynarmic
namespace Dynarmic {
namespace A32 {
using VAddr = std::uint32_t;
class Coprocessor;
enum class Exception {
/// An UndefinedFault occured due to executing instruction with an unallocated encoding
UndefinedInstruction,
/// An unpredictable instruction is to be executed. Implementation-defined behaviour should now happen.
/// This behaviour is up to the user of this library to define.
UnpredictableInstruction,
/// A decode error occurred when decoding this instruction. This should never happen.
DecodeError,
/// A SEV instruction was executed. The event register of all PEs should be set. (Hint instruction.)
SendEvent,
/// A SEVL instruction was executed. The event register of the current PE should be set. (Hint instruction.)
SendEventLocal,
/// A WFI instruction was executed. You may now enter a low-power state. (Hint instruction.)
WaitForInterrupt,
/// A WFE instruction was executed. You may now enter a low-power state if the event register is clear. (Hint instruction.)
WaitForEvent,
/// A YIELD instruction was executed. (Hint instruction.)
Yield,
/// A BKPT instruction was executed.
Breakpoint,
/// A PLD instruction was executed. (Hint instruction.)
PreloadData,
/// A PLDW instruction was executed. (Hint instruction.)
PreloadDataWithIntentToWrite,
/// A PLI instruction was executed. (Hint instruction.)
PreloadInstruction,
};
/// These function pointers may be inserted into compiled code.
struct UserCallbacks {
virtual ~UserCallbacks() = default;
// All reads through this callback are 4-byte aligned.
// Memory must be interpreted as little endian.
virtual std::uint32_t MemoryReadCode(VAddr vaddr) { return MemoryRead32(vaddr); }
// Reads through these callbacks may not be aligned.
// Memory must be interpreted as if ENDIANSTATE == 0, endianness will be corrected by the JIT.
virtual std::uint8_t MemoryRead8(VAddr vaddr) = 0;
virtual std::uint16_t MemoryRead16(VAddr vaddr) = 0;
virtual std::uint32_t MemoryRead32(VAddr vaddr) = 0;
virtual std::uint64_t MemoryRead64(VAddr vaddr) = 0;
// Writes through these callbacks may not be aligned.
virtual void MemoryWrite8(VAddr vaddr, std::uint8_t value) = 0;
virtual void MemoryWrite16(VAddr vaddr, std::uint16_t value) = 0;
virtual void MemoryWrite32(VAddr vaddr, std::uint32_t value) = 0;
virtual void MemoryWrite64(VAddr vaddr, std::uint64_t value) = 0;
// Writes through these callbacks may not be aligned.
virtual bool MemoryWriteExclusive8(VAddr /*vaddr*/, std::uint8_t /*value*/, std::uint8_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive16(VAddr /*vaddr*/, std::uint16_t /*value*/, std::uint16_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive32(VAddr /*vaddr*/, std::uint32_t /*value*/, std::uint32_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive64(VAddr /*vaddr*/, std::uint64_t /*value*/, std::uint64_t /*expected*/) { return false; }
// If this callback returns true, the JIT will assume MemoryRead* callbacks will always
// return the same value at any point in time for this vaddr. The JIT may use this information
// in optimizations.
// A conservative implementation that always returns false is safe.
virtual bool IsReadOnlyMemory(VAddr /* vaddr */) { return false; }
/// The interpreter must execute exactly num_instructions starting from PC.
virtual void InterpreterFallback(VAddr pc, size_t num_instructions) = 0;
// This callback is called whenever a SVC instruction is executed.
virtual void CallSVC(std::uint32_t swi) = 0;
virtual void ExceptionRaised(VAddr pc, Exception exception) = 0;
virtual void InstructionSynchronizationBarrierRaised() {}
// Timing-related callbacks
// ticks ticks have passed
virtual void AddTicks(std::uint64_t ticks) = 0;
// How many more ticks am I allowed to execute?
virtual std::uint64_t GetTicksRemaining() = 0;
};
struct UserConfig {
UserCallbacks* callbacks;
size_t processor_id = 0;
ExclusiveMonitor* global_monitor = nullptr;
/// Select the architecture version to use.
/// There are minor behavioural differences between versions.
ArchVersion arch_version = ArchVersion::v8;
/// This selects other optimizations than can't otherwise be disabled by setting other
/// configuration options. This includes:
/// - IR optimizations
/// - Block linking optimizations
/// - RSB optimizations
/// This is intended to be used for debugging.
OptimizationFlag optimizations = all_safe_optimizations;
bool HasOptimization(OptimizationFlag f) const {
if (!unsafe_optimizations) {
f &= all_safe_optimizations;
}
return (f & optimizations) != no_optimizations;
}
/// This enables unsafe optimizations that reduce emulation accuracy in favour of speed.
/// For safety, in order to enable unsafe optimizations you have to set BOTH this flag
/// AND the appropriate flag bits above.
/// The prefered and tested mode for this library is with unsafe optimizations disabled.
bool unsafe_optimizations = false;
// Page Table
// The page table is used for faster memory access. If an entry in the table is nullptr,
// the JIT will fallback to calling the MemoryRead*/MemoryWrite* callbacks.
static constexpr std::size_t PAGE_BITS = 12;
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>* page_table = nullptr;
/// Determines if the pointer in the page_table shall be offseted locally or globally.
/// 'false' will access page_table[addr >> bits][addr & mask]
/// 'true' will access page_table[addr >> bits][addr]
/// Note: page_table[addr >> bits] will still be checked to verify active pages.
/// So there might be wrongly faulted pages which maps to nullptr.
/// This can be avoided by carefully allocating the memory region.
bool absolute_offset_page_table = false;
/// Masks out the first N bits in host pointers from the page table.
/// The intention behind this is to allow users of Dynarmic to pack attributes in the
/// same integer and update the pointer attribute pair atomically.
/// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes.
int page_table_pointer_mask_bits = 0;
/// Determines if we should detect memory accesses via page_table that straddle are
/// misaligned. Accesses that straddle page boundaries will fallback to the relevant
/// memory callback.
/// This value should be the required access sizes this applies to ORed together.
/// To detect any access, use: 8 | 16 | 32 | 64.
std::uint8_t detect_misaligned_access_via_page_table = 0;
/// Determines if the above option only triggers when the misalignment straddles a
/// page boundary.
bool only_detect_misalignment_via_page_table_on_page_boundary = false;
// Fastmem Pointer
// This should point to the beginning of a 4GB address space which is in arranged just like
// what you wish for emulated memory to be. If the host page faults on an address, the JIT
// will fallback to calling the MemoryRead*/MemoryWrite* callbacks.
void* fastmem_pointer = nullptr;
/// Determines if instructions that pagefault should cause recompilation of that block
/// with fastmem disabled.
bool recompile_on_fastmem_failure = true;
// Coprocessors
std::array<std::shared_ptr<Coprocessor>, 16> coprocessors{};
/// When set to true, UserCallbacks::InstructionSynchronizationBarrierRaised will be
/// called when an ISB instruction is executed.
/// When set to false, ISB will be treated as a NOP instruction.
bool hook_isb = false;
/// Hint instructions would cause ExceptionRaised to be called with the appropriate
/// argument.
bool hook_hint_instructions = false;
/// This option relates to translation. Generally when we run into an unpredictable
/// instruction the ExceptionRaised callback is called. If this is true, we define
/// definite behaviour for some unpredictable instructions.
bool define_unpredictable_behaviour = false;
/// HACK:
/// This tells the translator a wall clock will be used, thus allowing it
/// to avoid writting certain unnecessary code only needed for cycle timers.
bool wall_clock_cntpct = false;
/// This option relates to the CPSR.E flag. Enabling this option disables modification
/// of CPSR.E by the emulated program, forcing it to 0.
/// NOTE: Calling Jit::SetCpsr with CPSR.E=1 while this option is enabled may result
/// in unusual behavior.
bool always_little_endian = false;
// Minimum size is about 8MiB. Maximum size is about 2GiB. Maximum size is limited by
// the maximum length of a x64 jump.
size_t code_cache_size = 256 * 1024 * 1024; // bytes
// Determines the relative size of the near and far code caches. Must be smaller than
// code_cache_size.
size_t far_code_offset = 200 * 1024 * 1024; // bytes
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,45 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <cstdint>
#include <memory>
namespace Dynarmic {
namespace A32 {
struct Context {
public:
Context();
~Context();
Context(const Context&);
Context(Context&&) noexcept;
Context& operator=(const Context&);
Context& operator=(Context&&) noexcept;
/// View and modify registers.
std::array<std::uint32_t, 16>& Regs();
const std::array<std::uint32_t, 16>& Regs() const;
std::array<std::uint32_t, 64>& ExtRegs();
const std::array<std::uint32_t, 64>& ExtRegs() const;
/// View and modify CPSR.
std::uint32_t Cpsr() const;
void SetCpsr(std::uint32_t value);
/// View and modify FPSCR.
std::uint32_t Fpscr() const;
void SetFpscr(std::uint32_t value);
private:
friend class Jit;
struct Impl;
std::unique_ptr<Impl> impl;
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,110 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstdint>
#include <optional>
#include <variant>
#include <dynarmic/A32/coprocessor_util.h>
namespace Dynarmic {
namespace A32 {
class Jit;
class Coprocessor {
public:
virtual ~Coprocessor() = default;
struct Callback {
/**
* @param jit CPU state
* @param user_arg Set to Callback::user_arg at runtime
* @param arg0 Purpose of this argument depends on type of callback.
* @param arg1 Purpose of this argument depends on type of callback.
* @return Purpose of return value depends on type of callback.
*/
std::uint64_t (*function)(Jit* jit, void* user_arg, std::uint32_t arg0, std::uint32_t arg1);
/// If std::nullopt, function will be called with a user_arg parameter containing garbage.
std::optional<void*> user_arg;
};
/**
* std::monostate: coprocessor exception will be compiled
* Callback: a call to the Callback will be compiled
* std::uint32_t*: a write/read to that memory address will be compiled
*/
using CallbackOrAccessOneWord = std::variant<std::monostate, Callback, std::uint32_t*>;
/**
* std::monostate: coprocessor exception will be compiled
* Callback: a call to the Callback will be compiled
* std::array<std::uint32_t*, 2>: a write/read to those memory addresses will be compiled
*/
using CallbackOrAccessTwoWords = std::variant<std::monostate, Callback, std::array<std::uint32_t*, 2>>;
/**
* Called when compiling CDP or CDP2 for this coprocessor.
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
* arg0, arg1 and return value of callback are ignored.
*/
virtual std::optional<Callback> CompileInternalOperation(bool two, unsigned opc1, CoprocReg CRd, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
/**
* Called when compiling MCR or MCR2 for this coprocessor.
* A return value of std::monostate will cause a coprocessor exception to be compiled.
* arg0 of the callback will contain the word sent to the coprocessor.
* arg1 and return value of the callback are ignored.
*/
virtual CallbackOrAccessOneWord CompileSendOneWord(bool two, unsigned opc1, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
/**
* Called when compiling MCRR or MCRR2 for this coprocessor.
* A return value of std::monostate will cause a coprocessor exception to be compiled.
* arg0 and arg1 of the callback will contain the words sent to the coprocessor.
* The return value of the callback is ignored.
*/
virtual CallbackOrAccessTwoWords CompileSendTwoWords(bool two, unsigned opc, CoprocReg CRm) = 0;
/**
* Called when compiling MRC or MRC2 for this coprocessor.
* A return value of std::monostate will cause a coprocessor exception to be compiled.
* The return value of the callback should contain word from coprocessor.
* The low word of the return value will be stored in Rt.
* arg0 and arg1 of the callback are ignored.
*/
virtual CallbackOrAccessOneWord CompileGetOneWord(bool two, unsigned opc1, CoprocReg CRn, CoprocReg CRm, unsigned opc2) = 0;
/**
* Called when compiling MRRC or MRRC2 for this coprocessor.
* A return value of std::monostate will cause a coprocessor exception to be compiled.
* The return value of the callback should contain words from coprocessor.
* The low word of the return value will be stored in Rt.
* The high word of the return value will be stored in Rt2.
* arg0 and arg1 of the callback are ignored.
*/
virtual CallbackOrAccessTwoWords CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) = 0;
/**
* Called when compiling LDC or LDC2 for this coprocessor.
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
* arg0 of the callback will contain the start address.
* arg1 and return value of the callback are ignored.
*/
virtual std::optional<Callback> CompileLoadWords(bool two, bool long_transfer, CoprocReg CRd, std::optional<std::uint8_t> option) = 0;
/**
* Called when compiling STC or STC2 for this coprocessor.
* A return value of std::nullopt will cause a coprocessor exception to be compiled.
* arg0 of the callback will contain the start address.
* arg1 and return value of the callback are ignored.
*/
virtual std::optional<Callback> CompileStoreWords(bool two, bool long_transfer, CoprocReg CRd, std::optional<std::uint8_t> option) = 0;
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,16 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace Dynarmic {
namespace A32 {
enum class CoprocReg {
C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15
};
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,18 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstdint>
#include <string>
namespace Dynarmic {
namespace A32 {
std::string DisassembleArm(std::uint32_t instruction);
std::string DisassembleThumb16(std::uint16_t instruction);
} // namespace A32
} // namespace Dynarmic

View File

@@ -1,139 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <dynarmic/A64/config.h>
namespace Dynarmic {
namespace A64 {
struct Context;
class Jit final {
public:
explicit Jit(UserConfig conf);
~Jit();
/**
* Runs the emulated CPU.
* Cannot be recursively called.
*/
void Run();
/**
* Step the emulated CPU for one instruction.
* Cannot be recursively called.
*/
void Step();
/**
* Clears the code cache of all compiled code.
* Can be called at any time. Halts execution if called within a callback.
*/
void ClearCache();
/**
* Invalidate the code cache at a range of addresses.
* @param start_address The starting address of the range to invalidate.
* @param length The length (in bytes) of the range to invalidate.
*/
void InvalidateCacheRange(std::uint64_t start_address, std::size_t length);
/**
* Reset CPU state to state at startup. Does not clear code cache.
* Cannot be called from a callback.
*/
void Reset();
/**
* Stops execution in Jit::Run.
* Can only be called from a callback.
*/
void HaltExecution();
/**
* HACK:
* Exits execution from a callback, the callback must rewind the stack or
* never return to dynarmic from it's current stack.
*/
void ExceptionalExit();
/// HACK: Change processor ID.
void ChangeProcessorID(std::size_t new_processor);
/// Read Stack Pointer
std::uint64_t GetSP() const;
/// Modify Stack Pointer
void SetSP(std::uint64_t value);
/// Read Program Counter
std::uint64_t GetPC() const;
/// Modify Program Counter
void SetPC(std::uint64_t value);
/// Read general-purpose register.
std::uint64_t GetRegister(std::size_t index) const;
/// Modify general-purpose register.
void SetRegister(size_t index, std::uint64_t value);
/// Read all general-purpose registers.
std::array<std::uint64_t, 31> GetRegisters() const;
/// Modify all general-purpose registers.
void SetRegisters(const std::array<std::uint64_t, 31>& value);
/// Read floating point and SIMD register.
Vector GetVector(std::size_t index) const;
/// Modify floating point and SIMD register.
void SetVector(std::size_t index, Vector value);
/// Read all floating point and SIMD registers.
std::array<Vector, 32> GetVectors() const;
/// Modify all floating point and SIMD registers.
void SetVectors(const std::array<Vector, 32>& value);
/// View FPCR.
std::uint32_t GetFpcr() const;
/// Modify FPCR.
void SetFpcr(std::uint32_t value);
/// View FPSR.
std::uint32_t GetFpsr() const;
/// Modify FPSR.
void SetFpsr(std::uint32_t value);
/// View PSTATE
std::uint32_t GetPstate() const;
/// Modify PSTATE
void SetPstate(std::uint32_t value);
/// Clears exclusive state for this core.
void ClearExclusiveState();
/**
* Returns true if Jit::Run was called but hasn't returned yet.
* i.e.: We're in a callback.
*/
bool IsExecuting() const;
/**
* Debugging: Disassemble all of compiled code.
* @return A string containing disassembly of all host machine code produced.
*/
std::string Disassemble() const;
private:
struct Impl;
std::unique_ptr<Impl> impl;
};
} // namespace A64
} // namespace Dynarmic

View File

@@ -1,258 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <dynarmic/optimization_flags.h>
namespace Dynarmic {
class ExclusiveMonitor;
} // namespace Dynarmic
namespace Dynarmic {
namespace A64 {
using VAddr = std::uint64_t;
using Vector = std::array<std::uint64_t, 2>;
static_assert(sizeof(Vector) == sizeof(std::uint64_t) * 2, "Vector must be 128 bits in size");
enum class Exception {
/// An UndefinedFault occured due to executing instruction with an unallocated encoding
UnallocatedEncoding,
/// An UndefinedFault occured due to executing instruction containing a reserved value
ReservedValue,
/// An unpredictable instruction is to be executed. Implementation-defined behaviour should now happen.
/// This behaviour is up to the user of this library to define.
/// Note: Constraints on unpredictable behaviour are specified in the ARMv8 ARM.
UnpredictableInstruction,
/// A WFI instruction was executed. You may now enter a low-power state. (Hint instruction.)
WaitForInterrupt,
/// A WFE instruction was executed. You may now enter a low-power state if the event register is clear. (Hint instruction.)
WaitForEvent,
/// A SEV instruction was executed. The event register of all PEs should be set. (Hint instruction.)
SendEvent,
/// A SEVL instruction was executed. The event register of the current PE should be set. (Hint instruction.)
SendEventLocal,
/// A YIELD instruction was executed. (Hint instruction.)
Yield,
/// A BRK instruction was executed. (Hint instruction.)
Breakpoint,
};
enum class DataCacheOperation {
/// DC CISW
CleanAndInvalidateBySetWay,
/// DC CIVAC
CleanAndInvalidateByVAToPoC,
/// DC CSW
CleanBySetWay,
/// DC CVAC
CleanByVAToPoC,
/// DC CVAU
CleanByVAToPoU,
/// DC CVAP
CleanByVAToPoP,
/// DC ISW
InvalidateBySetWay,
/// DC IVAC
InvalidateByVAToPoC,
/// DC ZVA
ZeroByVA,
};
enum class InstructionCacheOperation {
/// IC IVAU
InvalidateByVAToPoU,
/// IC IALLU
InvalidateAllToPoU,
/// IC IALLUIS
InvalidateAllToPoUInnerSharable
};
struct UserCallbacks {
virtual ~UserCallbacks() = default;
// All reads through this callback are 4-byte aligned.
// Memory must be interpreted as little endian.
virtual std::uint32_t MemoryReadCode(VAddr vaddr) { return MemoryRead32(vaddr); }
// Reads through these callbacks may not be aligned.
virtual std::uint8_t MemoryRead8(VAddr vaddr) = 0;
virtual std::uint16_t MemoryRead16(VAddr vaddr) = 0;
virtual std::uint32_t MemoryRead32(VAddr vaddr) = 0;
virtual std::uint64_t MemoryRead64(VAddr vaddr) = 0;
virtual Vector MemoryRead128(VAddr vaddr) = 0;
// Writes through these callbacks may not be aligned.
virtual void MemoryWrite8(VAddr vaddr, std::uint8_t value) = 0;
virtual void MemoryWrite16(VAddr vaddr, std::uint16_t value) = 0;
virtual void MemoryWrite32(VAddr vaddr, std::uint32_t value) = 0;
virtual void MemoryWrite64(VAddr vaddr, std::uint64_t value) = 0;
virtual void MemoryWrite128(VAddr vaddr, Vector value) = 0;
// Writes through these callbacks may not be aligned.
virtual bool MemoryWriteExclusive8(VAddr /*vaddr*/, std::uint8_t /*value*/, std::uint8_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive16(VAddr /*vaddr*/, std::uint16_t /*value*/, std::uint16_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive32(VAddr /*vaddr*/, std::uint32_t /*value*/, std::uint32_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive64(VAddr /*vaddr*/, std::uint64_t /*value*/, std::uint64_t /*expected*/) { return false; }
virtual bool MemoryWriteExclusive128(VAddr /*vaddr*/, Vector /*value*/, Vector /*expected*/) { return false; }
// If this callback returns true, the JIT will assume MemoryRead* callbacks will always
// return the same value at any point in time for this vaddr. The JIT may use this information
// in optimizations.
// A conservative implementation that always returns false is safe.
virtual bool IsReadOnlyMemory(VAddr /*vaddr*/) { return false; }
/// The interpreter must execute exactly num_instructions starting from PC.
virtual void InterpreterFallback(VAddr pc, size_t num_instructions) = 0;
// This callback is called whenever a SVC instruction is executed.
virtual void CallSVC(std::uint32_t swi) = 0;
virtual void ExceptionRaised(VAddr pc, Exception exception) = 0;
virtual void DataCacheOperationRaised(DataCacheOperation /*op*/, VAddr /*value*/) {}
virtual void InstructionCacheOperationRaised(InstructionCacheOperation /*op*/, VAddr /*value*/) {}
virtual void InstructionSynchronizationBarrierRaised() {}
// Timing-related callbacks
// ticks ticks have passed
virtual void AddTicks(std::uint64_t ticks) = 0;
// How many more ticks am I allowed to execute?
virtual std::uint64_t GetTicksRemaining() = 0;
// Get value in the emulated counter-timer physical count register.
virtual std::uint64_t GetCNTPCT() = 0;
};
struct UserConfig {
UserCallbacks* callbacks;
size_t processor_id = 0;
ExclusiveMonitor* global_monitor = nullptr;
/// This selects other optimizations than can't otherwise be disabled by setting other
/// configuration options. This includes:
/// - IR optimizations
/// - Block linking optimizations
/// - RSB optimizations
/// This is intended to be used for debugging.
OptimizationFlag optimizations = all_safe_optimizations;
bool HasOptimization(OptimizationFlag f) const {
if (!unsafe_optimizations) {
f &= all_safe_optimizations;
}
return (f & optimizations) != no_optimizations;
}
/// This enables unsafe optimizations that reduce emulation accuracy in favour of speed.
/// For safety, in order to enable unsafe optimizations you have to set BOTH this flag
/// AND the appropriate flag bits above.
/// The prefered and tested mode for this library is with unsafe optimizations disabled.
bool unsafe_optimizations = false;
/// When set to true, UserCallbacks::DataCacheOperationRaised will be called when any
/// data cache instruction is executed. Notably DC ZVA will not implicitly do anything.
/// When set to false, UserCallbacks::DataCacheOperationRaised will never be called.
/// Executing DC ZVA in this mode will result in zeros being written to memory.
bool hook_data_cache_operations = false;
/// When set to true, UserCallbacks::InstructionSynchronizationBarrierRaised will be
/// called when an ISB instruction is executed.
/// When set to false, ISB will be treated as a NOP instruction.
bool hook_isb = false;
/// When set to true, UserCallbacks::ExceptionRaised will be called when any hint
/// instruction is executed.
bool hook_hint_instructions = false;
/// Counter-timer frequency register. The value of the register is not interpreted by
/// dynarmic.
std::uint32_t cntfrq_el0 = 600000000;
/// CTR_EL0<27:24> is log2 of the cache writeback granule in words.
/// CTR_EL0<23:20> is log2 of the exclusives reservation granule in words.
/// CTR_EL0<19:16> is log2 of the smallest data/unified cacheline in words.
/// CTR_EL0<15:14> is the level 1 instruction cache policy.
/// CTR_EL0<3:0> is log2 of the smallest instruction cacheline in words.
std::uint32_t ctr_el0 = 0x8444c004;
/// DCZID_EL0<3:0> is log2 of the block size in words
/// DCZID_EL0<4> is 0 if the DC ZVA instruction is permitted.
std::uint32_t dczid_el0 = 4;
/// Pointer to where TPIDRRO_EL0 is stored. This pointer will be inserted into
/// emitted code.
const std::uint64_t* tpidrro_el0 = nullptr;
/// Pointer to where TPIDR_EL0 is stored. This pointer will be inserted into
/// emitted code.
const std::uint64_t* tpidr_el0 = nullptr;
/// Pointer to the page table which we can use for direct page table access.
/// If an entry in page_table is null, the relevant memory callback will be called.
/// If page_table is nullptr, all memory accesses hit the memory callbacks.
void** page_table = nullptr;
/// Declares how many valid address bits are there in virtual addresses.
/// Determines the size of page_table. Valid values are between 12 and 64 inclusive.
/// This is only used if page_table is not nullptr.
size_t page_table_address_space_bits = 36;
/// Masks out the first N bits in host pointers from the page table.
/// The intention behind this is to allow users of Dynarmic to pack attributes in the
/// same integer and update the pointer attribute pair atomically.
/// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes.
int page_table_pointer_mask_bits = 0;
/// Determines what happens if the guest accesses an entry that is off the end of the
/// page table. If true, Dynarmic will silently mirror page_table's address space. If
/// false, accessing memory outside of page_table bounds will result in a call to the
/// relevant memory callback.
/// This is only used if page_table is not nullptr.
bool silently_mirror_page_table = true;
/// Determines if the pointer in the page_table shall be offseted locally or globally.
/// 'false' will access page_table[addr >> bits][addr & mask]
/// 'true' will access page_table[addr >> bits][addr]
/// Note: page_table[addr >> bits] will still be checked to verify active pages.
/// So there might be wrongly faulted pages which maps to nullptr.
/// This can be avoided by carefully allocating the memory region.
bool absolute_offset_page_table = false;
/// Determines if we should detect memory accesses via page_table that straddle are
/// misaligned. Accesses that straddle page boundaries will fallback to the relevant
/// memory callback.
/// This value should be the required access sizes this applies to ORed together.
/// To detect any access, use: 8 | 16 | 32 | 64 | 128.
std::uint8_t detect_misaligned_access_via_page_table = 0;
/// Determines if the above option only triggers when the misalignment straddles a
/// page boundary.
bool only_detect_misalignment_via_page_table_on_page_boundary = false;
/// This option relates to translation. Generally when we run into an unpredictable
/// instruction the ExceptionRaised callback is called. If this is true, we define
/// definite behaviour for some unpredictable instructions.
bool define_unpredictable_behaviour = false;
/// HACK:
/// This tells the translator a wall clock will be used, thus allowing it
/// to avoid writting certain unnecessary code only needed for cycle timers.
bool wall_clock_cntpct = false;
// Determines whether AddTicks and GetTicksRemaining are called.
// If false, execution will continue until soon after Jit::HaltExecution is called.
// bool enable_ticks = true; // TODO
// Minimum size is about 8MiB. Maximum size is about 2GiB. Maximum size is limited by
// the maximum length of a x64 jump.
size_t code_cache_size = 256 * 1024 * 1024; // bytes
// Determines the relative size of the near and far code caches. Must be smaller than
// code_cache_size.
size_t far_code_offset = 200 * 1024 * 1024; // bytes
};
} // namespace A64
} // namespace Dynarmic

View File

@@ -1,81 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <atomic>
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <vector>
namespace Dynarmic {
using VAddr = std::uint64_t;
using Vector = std::array<std::uint64_t, 2>;
class ExclusiveMonitor {
public:
/// @param processor_count Maximum number of processors using this global
/// exclusive monitor. Each processor must have a
/// unique id.
explicit ExclusiveMonitor(size_t processor_count);
size_t GetProcessorCount() const;
/// Marks a region containing [address, address+size) to be exclusive to
/// processor processor_id.
template <typename T, typename Function>
T ReadAndMark(size_t processor_id, VAddr address, Function op) {
static_assert(std::is_trivially_copyable_v<T>);
const VAddr masked_address = address & RESERVATION_GRANULE_MASK;
Lock();
exclusive_addresses[processor_id] = masked_address;
const T value = op();
std::memcpy(exclusive_values[processor_id].data(), &value, sizeof(T));
Unlock();
return value;
}
/// Checks to see if processor processor_id has exclusive access to the
/// specified region. If it does, executes the operation then clears
/// the exclusive state for processors if their exclusive region(s)
/// contain [address, address+size).
template <typename T, typename Function>
bool DoExclusiveOperation(size_t processor_id, VAddr address, Function op) {
static_assert(std::is_trivially_copyable_v<T>);
if (!CheckAndClear(processor_id, address)) {
return false;
}
T saved_value;
std::memcpy(&saved_value, exclusive_values[processor_id].data(), sizeof(T));
const bool result = op(saved_value);
Unlock();
return result;
}
/// Unmark everything.
void Clear();
/// Unmark processor id
void ClearProcessor(size_t processor_id);
private:
bool CheckAndClear(size_t processor_id, VAddr address);
void Lock();
void Unlock();
static constexpr VAddr RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFFFull;
static constexpr VAddr INVALID_EXCLUSIVE_ADDRESS = 0xDEAD'DEAD'DEAD'DEADull;
std::atomic_flag is_locked;
std::vector<VAddr> exclusive_addresses;
std::vector<Vector> exclusive_values;
};
} // namespace Dynarmic

View File

@@ -1,74 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstdint>
namespace Dynarmic {
enum class OptimizationFlag : std::uint32_t {
/// This optimization avoids dispatcher lookups by allowing emitted basic blocks to jump
/// directly to other basic blocks if the destination PC is predictable at JIT-time.
/// This is a safe optimization.
BlockLinking = 0x00000001,
/// This optimization avoids dispatcher lookups by emulating a return stack buffer. This
/// allows for function returns and syscall returns to be predicted at runtime.
/// This is a safe optimization.
ReturnStackBuffer = 0x00000002,
/// This optimization enables a two-tiered dispatch system.
/// A fast dispatcher (written in assembly) first does a look-up in a small MRU cache.
/// If this fails, it falls back to the usual slower dispatcher.
/// This is a safe optimization.
FastDispatch = 0x00000004,
/// This is an IR optimization. This optimization eliminates unnecessary emulated CPU state
/// context lookups.
/// This is a safe optimization.
GetSetElimination = 0x00000008,
/// This is an IR optimization. This optimization does constant propagation.
/// This is a safe optimization.
ConstProp = 0x00000010,
/// This is enables miscellaneous safe IR optimizations.
MiscIROpt = 0x00000020,
/// This is an UNSAFE optimization that reduces accuracy of fused multiply-add operations.
/// This unfuses fused instructions to improve performance on host CPUs without FMA support.
Unsafe_UnfuseFMA = 0x00010000,
/// This is an UNSAFE optimization that reduces accuracy of certain floating-point instructions.
/// This allows results of FRECPE and FRSQRTE to have **less** error than spec allows.
Unsafe_ReducedErrorFP = 0x00020000,
/// This is an UNSAFE optimization that causes floating-point instructions to not produce correct NaNs.
/// This may also result in inaccurate results when instructions are given certain special values.
Unsafe_InaccurateNaN = 0x00040000,
};
constexpr OptimizationFlag no_optimizations = static_cast<OptimizationFlag>(0);
constexpr OptimizationFlag all_safe_optimizations = static_cast<OptimizationFlag>(0x0000FFFF);
constexpr OptimizationFlag operator~(OptimizationFlag f) {
return static_cast<OptimizationFlag>(~static_cast<std::uint32_t>(f));
}
constexpr OptimizationFlag operator|(OptimizationFlag f1, OptimizationFlag f2) {
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) | static_cast<std::uint32_t>(f2));
}
constexpr OptimizationFlag operator&(OptimizationFlag f1, OptimizationFlag f2) {
return static_cast<OptimizationFlag>(static_cast<std::uint32_t>(f1) & static_cast<std::uint32_t>(f2));
}
constexpr OptimizationFlag operator|=(OptimizationFlag& result, OptimizationFlag f) {
return result = (result | f);
}
constexpr OptimizationFlag operator&=(OptimizationFlag& result, OptimizationFlag f) {
return result = (result & f);
}
constexpr bool operator!(OptimizationFlag f) {
return f == no_optimizations;
}
} // namespace Dynarmic

View File

@@ -1,383 +0,0 @@
add_library(dynarmic
../include/dynarmic/A32/a32.h
../include/dynarmic/A32/arch_version.h
../include/dynarmic/A32/config.h
../include/dynarmic/A32/coprocessor.h
../include/dynarmic/A32/coprocessor_util.h
../include/dynarmic/A32/disassembler.h
../include/dynarmic/A64/a64.h
../include/dynarmic/A64/config.h
../include/dynarmic/exclusive_monitor.h
../include/dynarmic/optimization_flags.h
common/assert.cpp
common/assert.h
common/bit_util.h
common/cast_util.h
common/common_types.h
common/crypto/aes.cpp
common/crypto/aes.h
common/crypto/crc32.cpp
common/crypto/crc32.h
common/crypto/sm4.cpp
common/crypto/sm4.h
common/fp/fpcr.h
common/fp/fpsr.h
common/fp/fused.cpp
common/fp/fused.h
common/fp/info.h
common/fp/mantissa_util.h
common/fp/op.h
common/fp/op/FPCompare.cpp
common/fp/op/FPCompare.h
common/fp/op/FPConvert.cpp
common/fp/op/FPConvert.h
common/fp/op/FPMulAdd.cpp
common/fp/op/FPMulAdd.h
common/fp/op/FPNeg.h
common/fp/op/FPRecipEstimate.cpp
common/fp/op/FPRecipEstimate.h
common/fp/op/FPRecipExponent.cpp
common/fp/op/FPRecipExponent.h
common/fp/op/FPRecipStepFused.cpp
common/fp/op/FPRecipStepFused.h
common/fp/op/FPRoundInt.cpp
common/fp/op/FPRoundInt.h
common/fp/op/FPRSqrtEstimate.cpp
common/fp/op/FPRSqrtEstimate.h
common/fp/op/FPRSqrtStepFused.cpp
common/fp/op/FPRSqrtStepFused.h
common/fp/op/FPToFixed.cpp
common/fp/op/FPToFixed.h
common/fp/process_exception.cpp
common/fp/process_exception.h
common/fp/process_nan.cpp
common/fp/process_nan.h
common/fp/rounding_mode.h
common/fp/unpacked.cpp
common/fp/unpacked.h
common/fp/util.h
common/intrusive_list.h
common/iterator_util.h
common/llvm_disassemble.cpp
common/llvm_disassemble.h
common/lut_from_list.h
common/macro_util.h
common/math_util.cpp
common/math_util.h
common/memory_pool.cpp
common/memory_pool.h
common/safe_ops.h
common/scope_exit.h
common/string_util.h
common/u128.cpp
common/u128.h
common/variant_util.h
frontend/A32/types.cpp
frontend/A32/types.h
frontend/A64/types.cpp
frontend/A64/types.h
frontend/decoder/decoder_detail.h
frontend/decoder/matcher.h
frontend/imm.cpp
frontend/imm.h
frontend/ir/basic_block.cpp
frontend/ir/basic_block.h
frontend/ir/cond.h
frontend/ir/ir_emitter.cpp
frontend/ir/ir_emitter.h
frontend/ir/location_descriptor.cpp
frontend/ir/location_descriptor.h
frontend/ir/microinstruction.cpp
frontend/ir/microinstruction.h
frontend/ir/opcodes.cpp
frontend/ir/opcodes.h
frontend/ir/opcodes.inc
frontend/ir/terminal.h
frontend/ir/type.cpp
frontend/ir/type.h
frontend/ir/value.cpp
frontend/ir/value.h
ir_opt/constant_propagation_pass.cpp
ir_opt/dead_code_elimination_pass.cpp
ir_opt/identity_removal_pass.cpp
ir_opt/ir_matcher.h
ir_opt/passes.h
ir_opt/verification_pass.cpp
)
if ("A32" IN_LIST DYNARMIC_FRONTENDS)
target_sources(dynarmic PRIVATE
frontend/A32/decoder/arm.h
frontend/A32/decoder/arm.inc
frontend/A32/decoder/asimd.h
frontend/A32/decoder/asimd.inc
frontend/A32/decoder/thumb16.h
frontend/A32/decoder/thumb16.inc
frontend/A32/decoder/thumb32.h
frontend/A32/decoder/thumb32.inc
frontend/A32/decoder/vfp.h
frontend/A32/decoder/vfp.inc
frontend/A32/disassembler/disassembler.h
frontend/A32/disassembler/disassembler_arm.cpp
frontend/A32/disassembler/disassembler_thumb.cpp
frontend/A32/FPSCR.h
frontend/A32/ir_emitter.cpp
frontend/A32/ir_emitter.h
frontend/A32/ITState.h
frontend/A32/location_descriptor.cpp
frontend/A32/location_descriptor.h
frontend/A32/PSR.h
frontend/A32/translate/conditional_state.cpp
frontend/A32/translate/conditional_state.h
frontend/A32/translate/impl/asimd_load_store_structures.cpp
frontend/A32/translate/impl/asimd_misc.cpp
frontend/A32/translate/impl/asimd_one_reg_modified_immediate.cpp
frontend/A32/translate/impl/asimd_three_regs.cpp
frontend/A32/translate/impl/asimd_two_regs_misc.cpp
frontend/A32/translate/impl/asimd_two_regs_scalar.cpp
frontend/A32/translate/impl/asimd_two_regs_shift.cpp
frontend/A32/translate/impl/barrier.cpp
frontend/A32/translate/impl/branch.cpp
frontend/A32/translate/impl/coprocessor.cpp
frontend/A32/translate/impl/crc32.cpp
frontend/A32/translate/impl/data_processing.cpp
frontend/A32/translate/impl/divide.cpp
frontend/A32/translate/impl/exception_generating.cpp
frontend/A32/translate/impl/extension.cpp
frontend/A32/translate/impl/hint.cpp
frontend/A32/translate/impl/load_store.cpp
frontend/A32/translate/impl/misc.cpp
frontend/A32/translate/impl/multiply.cpp
frontend/A32/translate/impl/packing.cpp
frontend/A32/translate/impl/parallel.cpp
frontend/A32/translate/impl/reversal.cpp
frontend/A32/translate/impl/saturated.cpp
frontend/A32/translate/impl/status_register_access.cpp
frontend/A32/translate/impl/synchronization.cpp
frontend/A32/translate/impl/thumb16.cpp
frontend/A32/translate/impl/thumb32_branch.cpp
frontend/A32/translate/impl/thumb32_control.cpp
frontend/A32/translate/impl/thumb32_data_processing_modified_immediate.cpp
frontend/A32/translate/impl/thumb32_data_processing_plain_binary_immediate.cpp
frontend/A32/translate/impl/thumb32_data_processing_register.cpp
frontend/A32/translate/impl/thumb32_data_processing_shifted_register.cpp
frontend/A32/translate/impl/thumb32_load_byte.cpp
frontend/A32/translate/impl/thumb32_load_halfword.cpp
frontend/A32/translate/impl/thumb32_load_store_dual.cpp
frontend/A32/translate/impl/thumb32_load_store_multiple.cpp
frontend/A32/translate/impl/thumb32_load_word.cpp
frontend/A32/translate/impl/thumb32_long_multiply.cpp
frontend/A32/translate/impl/thumb32_misc.cpp
frontend/A32/translate/impl/thumb32_multiply.cpp
frontend/A32/translate/impl/thumb32_parallel.cpp
frontend/A32/translate/impl/thumb32_store_single_data_item.cpp
frontend/A32/translate/impl/translate_arm.h
frontend/A32/translate/impl/translate_thumb.h
frontend/A32/translate/impl/vfp.cpp
frontend/A32/translate/translate.cpp
frontend/A32/translate/translate.h
frontend/A32/translate/translate_arm.cpp
frontend/A32/translate/translate_thumb.cpp
ir_opt/a32_constant_memory_reads_pass.cpp
ir_opt/a32_get_set_elimination_pass.cpp
)
endif()
if ("A64" IN_LIST DYNARMIC_FRONTENDS)
target_sources(dynarmic PRIVATE
frontend/A64/decoder/a64.h
frontend/A64/decoder/a64.inc
frontend/A64/ir_emitter.cpp
frontend/A64/ir_emitter.h
frontend/A64/location_descriptor.cpp
frontend/A64/location_descriptor.h
frontend/A64/translate/impl/branch.cpp
frontend/A64/translate/impl/data_processing_addsub.cpp
frontend/A64/translate/impl/data_processing_bitfield.cpp
frontend/A64/translate/impl/data_processing_conditional_compare.cpp
frontend/A64/translate/impl/data_processing_conditional_select.cpp
frontend/A64/translate/impl/data_processing_crc32.cpp
frontend/A64/translate/impl/data_processing_logical.cpp
frontend/A64/translate/impl/data_processing_multiply.cpp
frontend/A64/translate/impl/data_processing_pcrel.cpp
frontend/A64/translate/impl/data_processing_register.cpp
frontend/A64/translate/impl/data_processing_shift.cpp
frontend/A64/translate/impl/exception_generating.cpp
frontend/A64/translate/impl/floating_point_compare.cpp
frontend/A64/translate/impl/floating_point_conditional_compare.cpp
frontend/A64/translate/impl/floating_point_conditional_select.cpp
frontend/A64/translate/impl/floating_point_conversion_fixed_point.cpp
frontend/A64/translate/impl/floating_point_conversion_integer.cpp
frontend/A64/translate/impl/floating_point_data_processing_one_register.cpp
frontend/A64/translate/impl/floating_point_data_processing_three_register.cpp
frontend/A64/translate/impl/floating_point_data_processing_two_register.cpp
frontend/A64/translate/impl/impl.cpp
frontend/A64/translate/impl/impl.h
frontend/A64/translate/impl/load_store_exclusive.cpp
frontend/A64/translate/impl/load_store_load_literal.cpp
frontend/A64/translate/impl/load_store_multiple_structures.cpp
frontend/A64/translate/impl/load_store_no_allocate_pair.cpp
frontend/A64/translate/impl/load_store_register_immediate.cpp
frontend/A64/translate/impl/load_store_register_pair.cpp
frontend/A64/translate/impl/load_store_register_register_offset.cpp
frontend/A64/translate/impl/load_store_register_unprivileged.cpp
frontend/A64/translate/impl/load_store_single_structure.cpp
frontend/A64/translate/impl/move_wide.cpp
frontend/A64/translate/impl/simd_across_lanes.cpp
frontend/A64/translate/impl/simd_aes.cpp
frontend/A64/translate/impl/simd_copy.cpp
frontend/A64/translate/impl/simd_crypto_four_register.cpp
frontend/A64/translate/impl/simd_crypto_three_register.cpp
frontend/A64/translate/impl/simd_extract.cpp
frontend/A64/translate/impl/simd_modified_immediate.cpp
frontend/A64/translate/impl/simd_permute.cpp
frontend/A64/translate/impl/simd_scalar_pairwise.cpp
frontend/A64/translate/impl/simd_scalar_shift_by_immediate.cpp
frontend/A64/translate/impl/simd_scalar_three_same.cpp
frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp
frontend/A64/translate/impl/simd_scalar_x_indexed_element.cpp
frontend/A64/translate/impl/simd_sha.cpp
frontend/A64/translate/impl/simd_sha512.cpp
frontend/A64/translate/impl/simd_shift_by_immediate.cpp
frontend/A64/translate/impl/simd_table_lookup.cpp
frontend/A64/translate/impl/simd_three_different.cpp
frontend/A64/translate/impl/simd_three_same.cpp
frontend/A64/translate/impl/simd_three_same_extra.cpp
frontend/A64/translate/impl/simd_two_register_misc.cpp
frontend/A64/translate/impl/simd_vector_x_indexed_element.cpp
frontend/A64/translate/impl/sys_dc.cpp
frontend/A64/translate/impl/sys_ic.cpp
frontend/A64/translate/impl/system.cpp
frontend/A64/translate/impl/system_flag_format.cpp
frontend/A64/translate/impl/system_flag_manipulation.cpp
frontend/A64/translate/translate.cpp
frontend/A64/translate/translate.h
ir_opt/a64_callback_config_pass.cpp
ir_opt/a64_get_set_elimination_pass.cpp
ir_opt/a64_merge_interpret_blocks.cpp
)
endif()
if (ARCHITECTURE STREQUAL "x86_64")
target_sources(dynarmic PRIVATE
backend/x64/abi.cpp
backend/x64/abi.h
backend/x64/block_of_code.cpp
backend/x64/block_of_code.h
backend/x64/block_range_information.cpp
backend/x64/block_range_information.h
backend/x64/callback.cpp
backend/x64/callback.h
backend/x64/constant_pool.cpp
backend/x64/constant_pool.h
backend/x64/devirtualize.h
backend/x64/emit_x64.cpp
backend/x64/emit_x64.h
backend/x64/emit_x64_aes.cpp
backend/x64/emit_x64_crc32.cpp
backend/x64/emit_x64_data_processing.cpp
backend/x64/emit_x64_floating_point.cpp
backend/x64/emit_x64_packed.cpp
backend/x64/emit_x64_saturation.cpp
backend/x64/emit_x64_sm4.cpp
backend/x64/emit_x64_vector.cpp
backend/x64/emit_x64_vector_floating_point.cpp
backend/x64/emit_x64_vector_saturation.cpp
backend/x64/exception_handler.h
backend/x64/exclusive_monitor.cpp
backend/x64/hostloc.cpp
backend/x64/hostloc.h
backend/x64/jitstate_info.h
backend/x64/oparg.h
backend/x64/perf_map.cpp
backend/x64/perf_map.h
backend/x64/reg_alloc.cpp
backend/x64/reg_alloc.h
)
if ("A32" IN_LIST DYNARMIC_FRONTENDS)
target_sources(dynarmic PRIVATE
backend/x64/a32_emit_x64.cpp
backend/x64/a32_emit_x64.h
backend/x64/a32_interface.cpp
backend/x64/a32_jitstate.cpp
backend/x64/a32_jitstate.h
)
endif()
if ("A64" IN_LIST DYNARMIC_FRONTENDS)
target_sources(dynarmic PRIVATE
backend/x64/a64_emit_x64.cpp
backend/x64/a64_emit_x64.h
backend/x64/a64_interface.cpp
backend/x64/a64_jitstate.cpp
backend/x64/a64_jitstate.h
)
endif()
if (WIN32)
target_sources(dynarmic PRIVATE backend/x64/exception_handler_windows.cpp)
elseif (APPLE)
find_path(MACH_EXC_DEFS_DIR "mach/mach_exc.defs")
if (NOT MACH_EXC_DEFS_DIR)
message(WARNING "macOS fastmem disabled: unable to find mach/mach_exc.defs")
target_sources(dynarmic PRIVATE backend/x64/exception_handler_generic.cpp)
else()
message(STATUS "mach/mach_exc.defs location: ${MACH_EXC_DEFS_DIR}")
execute_process(
COMMAND
mkdir -p "${CMAKE_CURRENT_SOURCE_DIR}/backend/x64/mig"
COMMAND
mig
-arch x86_64
-user "${CMAKE_CURRENT_SOURCE_DIR}/backend/x64/mig/mach_exc_user.c"
-header "${CMAKE_CURRENT_SOURCE_DIR}/backend/x64/mig/mach_exc_user.h"
-server "${CMAKE_CURRENT_SOURCE_DIR}/backend/x64/mig/mach_exc_server.c"
-sheader "${CMAKE_CURRENT_SOURCE_DIR}/backend/x64/mig/mach_exc_server.h"
"${MACH_EXC_DEFS_DIR}/mach/mach_exc.defs"
)
target_sources(dynarmic PRIVATE
backend/x64/exception_handler_macos.cpp
backend/x64/mig/mach_exc_server.c
backend/x64/mig/mach_exc_server.h
)
endif()
elseif (UNIX)
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
target_link_libraries(dynarmic PUBLIC rt)
endif()
target_sources(dynarmic PRIVATE backend/x64/exception_handler_posix.cpp)
else()
target_sources(dynarmic PRIVATE backend/x64/exception_handler_generic.cpp)
endif()
else()
message(FATAL_ERROR "Unsupported architecture")
endif()
include(CreateDirectoryGroups)
create_target_directory_groups(dynarmic)
target_include_directories(dynarmic
PUBLIC ../include
PRIVATE .)
target_compile_options(dynarmic PRIVATE ${DYNARMIC_CXX_FLAGS})
target_link_libraries(dynarmic
PRIVATE
boost
fmt::fmt
mp
tsl::robin_map
xbyak
$<$<BOOL:DYNARMIC_USE_LLVM>:${llvm_libs}>
)
if (DYNARMIC_ENABLE_CPU_FEATURE_DETECTION)
target_compile_definitions(dynarmic PRIVATE DYNARMIC_ENABLE_CPU_FEATURE_DETECTION=1)
endif()
if (DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT)
target_compile_definitions(dynarmic PRIVATE DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT=1)
endif()
if (CMAKE_SYSTEM_NAME STREQUAL "Windows")
target_compile_definitions(dynarmic PRIVATE FMT_USE_WINDOWS_H=0)
endif()
# Disable this as it relies on a non-standard feature
target_compile_definitions(dynarmic PRIVATE FMT_USE_USER_DEFINED_LITERALS=0)

File diff suppressed because it is too large Load Diff

View File

@@ -1,141 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <optional>
#include <set>
#include <tuple>
#include <tsl/robin_map.h>
#include <dynarmic/A32/a32.h>
#include <dynarmic/A32/config.h>
#include "backend/x64/a32_jitstate.h"
#include "backend/x64/block_range_information.h"
#include "backend/x64/emit_x64.h"
#include "frontend/A32/location_descriptor.h"
#include "frontend/ir/terminal.h"
namespace Dynarmic::Backend::X64 {
class RegAlloc;
struct A32EmitContext final : public EmitContext {
A32EmitContext(const A32::UserConfig& conf, RegAlloc& reg_alloc, IR::Block& block);
A32::LocationDescriptor Location() const;
A32::LocationDescriptor EndLocation() const;
bool IsSingleStep() const;
FP::FPCR FPCR(bool fpcr_controlled = true) const override;
bool HasOptimization(OptimizationFlag flag) const override {
return conf.HasOptimization(flag);
}
const A32::UserConfig& conf;
};
class A32EmitX64 final : public EmitX64 {
public:
A32EmitX64(BlockOfCode& code, A32::UserConfig conf, A32::Jit* jit_interface);
~A32EmitX64() override;
/**
* Emit host machine code for a basic block with intermediate representation `block`.
* @note block is modified.
*/
BlockDescriptor Emit(IR::Block& block);
void ClearCache() override;
void InvalidateCacheRanges(const boost::icl::interval_set<u32>& ranges);
void ChangeProcessorID(size_t value) {
conf.processor_id = value;
}
protected:
A32::UserConfig conf;
A32::Jit* jit_interface;
BlockRangeInformation<u32> block_ranges;
void EmitCondPrelude(const A32EmitContext& ctx);
struct FastDispatchEntry {
u64 location_descriptor = 0xFFFF'FFFF'FFFF'FFFFull;
const void* code_ptr = nullptr;
};
static_assert(sizeof(FastDispatchEntry) == 0x10);
static constexpr u64 fast_dispatch_table_mask = 0xFFFF0;
static constexpr size_t fast_dispatch_table_size = 0x10000;
std::array<FastDispatchEntry, fast_dispatch_table_size> fast_dispatch_table;
void ClearFastDispatchTable();
std::map<std::tuple<size_t, int, int>, void(*)()> read_fallbacks;
std::map<std::tuple<size_t, int, int>, void(*)()> write_fallbacks;
void GenFastmemFallbacks();
const void* terminal_handler_pop_rsb_hint;
const void* terminal_handler_fast_dispatch_hint = nullptr;
FastDispatchEntry& (*fast_dispatch_table_lookup)(u64) = nullptr;
void GenTerminalHandlers();
// Microinstruction emitters
#define OPCODE(...)
#define A32OPC(name, type, ...) void EmitA32##name(A32EmitContext& ctx, IR::Inst* inst);
#define A64OPC(...)
#include "frontend/ir/opcodes.inc"
#undef OPCODE
#undef A32OPC
#undef A64OPC
// Helpers
std::string LocationDescriptorToFriendlyName(const IR::LocationDescriptor&) const override;
// Fastmem information
using DoNotFastmemMarker = std::tuple<IR::LocationDescriptor, std::ptrdiff_t>;
struct FastmemPatchInfo {
u64 resume_rip;
u64 callback;
DoNotFastmemMarker marker;
};
tsl::robin_map<u64, FastmemPatchInfo> fastmem_patch_info;
std::set<DoNotFastmemMarker> do_not_fastmem;
std::optional<DoNotFastmemMarker> ShouldFastmem(A32EmitContext& ctx, IR::Inst* inst) const;
FakeCall FastmemCallback(u64 rip);
// Memory access helpers
template<std::size_t bitsize, auto callback>
void ReadMemory(A32EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize, auto callback>
void WriteMemory(A32EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize, auto callback>
void ExclusiveReadMemory(A32EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize, auto callback>
void ExclusiveWriteMemory(A32EmitContext& ctx, IR::Inst* inst);
// Terminal instruction emitters
void EmitSetUpperLocationDescriptor(IR::LocationDescriptor new_location, IR::LocationDescriptor old_location);
void EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::ReturnToDispatch terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::PopRSBHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::FastDispatchHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::CheckBit terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::CheckHalt terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
// Patching
void Unpatch(const IR::LocationDescriptor& target_desc) override;
void EmitPatchJg(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) override;
void EmitPatchJmp(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) override;
void EmitPatchMovRcx(CodePtr target_code_ptr = nullptr) override;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,350 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <functional>
#include <memory>
#include <boost/icl/interval_set.hpp>
#include <fmt/format.h>
#include <dynarmic/A32/a32.h>
#include <dynarmic/A32/context.h>
#include "backend/x64/a32_emit_x64.h"
#include "backend/x64/a32_jitstate.h"
#include "backend/x64/block_of_code.h"
#include "backend/x64/callback.h"
#include "backend/x64/devirtualize.h"
#include "backend/x64/jitstate_info.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
#include "common/llvm_disassemble.h"
#include "common/scope_exit.h"
#include "frontend/A32/translate/translate.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/location_descriptor.h"
#include "ir_opt/passes.h"
namespace Dynarmic::A32 {
using namespace Backend::X64;
static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) {
return RunCodeCallbacks{
std::make_unique<ArgCallback>(LookupBlock, reinterpret_cast<u64>(arg)),
std::make_unique<ArgCallback>(Devirtualize<&A32::UserCallbacks::AddTicks>(cb)),
std::make_unique<ArgCallback>(Devirtualize<&A32::UserCallbacks::GetTicksRemaining>(cb)),
};
}
static std::function<void(BlockOfCode&)> GenRCP(const A32::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, Common::BitCast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.mov(code.r13, Common::BitCast<u64>(conf.fastmem_pointer));
}
};
}
struct Jit::Impl {
Impl(Jit* jit, A32::UserConfig conf)
: block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state}, conf.code_cache_size, conf.far_code_offset, GenRCP(conf))
, emitter(block_of_code, conf, jit)
, conf(std::move(conf))
, jit_interface(jit)
{}
A32JitState jit_state;
BlockOfCode block_of_code;
A32EmitX64 emitter;
A32::UserConfig conf;
// Requests made during execution to invalidate the cache are queued up here.
size_t invalid_cache_generation = 0;
boost::icl::interval_set<u32> invalid_cache_ranges;
bool invalidate_entire_cache = false;
void Execute() {
const CodePtr current_codeptr = [this]{
// RSB optimization
const u32 new_rsb_ptr = (jit_state.rsb_ptr - 1) & A32JitState::RSBPtrMask;
if (jit_state.GetUniqueHash() == jit_state.rsb_location_descriptors[new_rsb_ptr]) {
jit_state.rsb_ptr = new_rsb_ptr;
return reinterpret_cast<CodePtr>(jit_state.rsb_codeptrs[new_rsb_ptr]);
}
return GetCurrentBlock();
}();
block_of_code.RunCode(&jit_state, current_codeptr);
}
void Step() {
block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
}
void ExceptionalExit() {
ClearExclusiveState();
if (!conf.wall_clock_cntpct) {
const s64 ticks = jit_state.cycles_to_run - jit_state.cycles_remaining;
conf.callbacks->AddTicks(ticks);
}
PerformCacheInvalidation();
}
void ChangeProcessorID(size_t value) {
conf.processor_id = value;
emitter.ChangeProcessorID(value);
}
void ClearExclusiveState() {
jit_state.exclusive_state = 0;
}
std::string Disassemble(const IR::LocationDescriptor& descriptor) {
auto block = GetBasicBlock(descriptor);
std::string result = fmt::format("address: {}\nsize: {} bytes\n", block.entrypoint, block.size);
result += Common::DisassembleX64(block.entrypoint, reinterpret_cast<const char*>(block.entrypoint) + block.size);
return result;
}
void PerformCacheInvalidation() {
if (invalidate_entire_cache) {
jit_state.ResetRSB();
block_of_code.ClearCache();
emitter.ClearCache();
invalid_cache_ranges.clear();
invalidate_entire_cache = false;
invalid_cache_generation++;
return;
}
if (invalid_cache_ranges.empty()) {
return;
}
jit_state.ResetRSB();
emitter.InvalidateCacheRanges(invalid_cache_ranges);
invalid_cache_ranges.clear();
invalid_cache_generation++;
}
void RequestCacheInvalidation() {
if (jit_interface->is_executing) {
jit_state.halt_requested = true;
return;
}
PerformCacheInvalidation();
}
private:
Jit* jit_interface;
static CodePtr GetCurrentBlockThunk(void* this_voidptr) {
Jit::Impl& this_ = *static_cast<Jit::Impl*>(this_voidptr);
return this_.GetCurrentBlock();
}
IR::LocationDescriptor GetCurrentLocation() const {
return IR::LocationDescriptor{jit_state.GetUniqueHash()};
}
CodePtr GetCurrentBlock() {
return GetBasicBlock(GetCurrentLocation()).entrypoint;
}
CodePtr GetCurrentSingleStep() {
return GetBasicBlock(A32::LocationDescriptor{GetCurrentLocation()}.SetSingleStepping(true)).entrypoint;
}
A32EmitX64::BlockDescriptor GetBasicBlock(IR::LocationDescriptor descriptor) {
auto block = emitter.GetBasicBlock(descriptor);
if (block)
return *block;
constexpr size_t MINIMUM_REMAINING_CODESIZE = 1 * 1024 * 1024;
if (block_of_code.SpaceRemaining() < MINIMUM_REMAINING_CODESIZE) {
invalidate_entire_cache = true;
PerformCacheInvalidation();
}
IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, [this](u32 vaddr) { return conf.callbacks->MemoryReadCode(vaddr); }, {conf.arch_version, conf.define_unpredictable_behaviour, conf.hook_hint_instructions});
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
Optimization::A32GetSetElimination(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
Optimization::A32ConstantMemoryReads(ir_block, conf.callbacks);
Optimization::ConstantPropagation(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
Optimization::VerificationPass(ir_block);
return emitter.Emit(ir_block);
}
};
Jit::Jit(UserConfig conf) : impl(std::make_unique<Impl>(this, std::move(conf))) {}
Jit::~Jit() = default;
void Jit::Run() {
ASSERT(!is_executing);
is_executing = true;
SCOPE_EXIT { this->is_executing = false; };
impl->jit_state.halt_requested = false;
impl->Execute();
impl->PerformCacheInvalidation();
}
void Jit::Step() {
ASSERT(!is_executing);
is_executing = true;
SCOPE_EXIT { this->is_executing = false; };
impl->jit_state.halt_requested = true;
impl->Step();
impl->PerformCacheInvalidation();
}
void Jit::ClearCache() {
impl->invalidate_entire_cache = true;
impl->RequestCacheInvalidation();
}
void Jit::InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
impl->invalid_cache_ranges.add(boost::icl::discrete_interval<u32>::closed(start_address, static_cast<u32>(start_address + length - 1)));
impl->RequestCacheInvalidation();
}
void Jit::Reset() {
ASSERT(!is_executing);
impl->jit_state = {};
}
void Jit::HaltExecution() {
impl->jit_state.halt_requested = true;
}
void Jit::ExceptionalExit() {
impl->ExceptionalExit();
is_executing = false;
}
void Jit::ClearExclusiveState() {
impl->ClearExclusiveState();
}
void Jit::ChangeProcessorID(size_t new_processor) {
impl->ChangeProcessorID(new_processor);
}
std::array<u32, 16>& Jit::Regs() {
return impl->jit_state.Reg;
}
const std::array<u32, 16>& Jit::Regs() const {
return impl->jit_state.Reg;
}
std::array<u32, 64>& Jit::ExtRegs() {
return impl->jit_state.ExtReg;
}
const std::array<u32, 64>& Jit::ExtRegs() const {
return impl->jit_state.ExtReg;
}
u32 Jit::Cpsr() const {
return impl->jit_state.Cpsr();
}
void Jit::SetCpsr(u32 value) {
return impl->jit_state.SetCpsr(value);
}
u32 Jit::Fpscr() const {
return impl->jit_state.Fpscr();
}
void Jit::SetFpscr(u32 value) {
return impl->jit_state.SetFpscr(value);
}
Context Jit::SaveContext() const {
Context ctx;
SaveContext(ctx);
return ctx;
}
struct Context::Impl {
A32JitState jit_state;
size_t invalid_cache_generation;
};
Context::Context() : impl(std::make_unique<Context::Impl>()) { impl->jit_state.ResetRSB(); }
Context::~Context() = default;
Context::Context(const Context& ctx) : impl(std::make_unique<Context::Impl>(*ctx.impl)) {}
Context::Context(Context&& ctx) noexcept : impl(std::move(ctx.impl)) {}
Context& Context::operator=(const Context& ctx) {
*impl = *ctx.impl;
return *this;
}
Context& Context::operator=(Context&& ctx) noexcept {
impl = std::move(ctx.impl);
return *this;
}
std::array<std::uint32_t, 16>& Context::Regs() {
return impl->jit_state.Reg;
}
const std::array<std::uint32_t, 16>& Context::Regs() const {
return impl->jit_state.Reg;
}
std::array<std::uint32_t, 64>& Context::ExtRegs() {
return impl->jit_state.ExtReg;
}
const std::array<std::uint32_t, 64>& Context::ExtRegs() const {
return impl->jit_state.ExtReg;
}
std::uint32_t Context::Cpsr() const {
return impl->jit_state.Cpsr();
}
void Context::SetCpsr(std::uint32_t value) {
impl->jit_state.SetCpsr(value);
}
std::uint32_t Context::Fpscr() const {
return impl->jit_state.Fpscr();
}
void Context::SetFpscr(std::uint32_t value) {
return impl->jit_state.SetFpscr(value);
}
void Jit::SaveContext(Context& ctx) const {
ctx.impl->jit_state.TransferJitState(impl->jit_state, false);
ctx.impl->invalid_cache_generation = impl->invalid_cache_generation;
}
void Jit::LoadContext(const Context& ctx) {
bool reset_rsb = ctx.impl->invalid_cache_generation != impl->invalid_cache_generation;
impl->jit_state.TransferJitState(ctx.impl->jit_state, reset_rsb);
}
std::string Jit::Disassemble() const {
return Common::DisassembleX64(impl->block_of_code.GetCodeBegin(), impl->block_of_code.getCurr());
}
} // namespace Dynarmic::A32

View File

@@ -1,204 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/a32_jitstate.h"
#include "backend/x64/block_of_code.h"
#include "backend/x64/nzcv_util.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "frontend/A32/location_descriptor.h"
namespace Dynarmic::Backend::X64 {
/**
* CPSR Bits
* =========
*
* ARM CPSR flags
* --------------
* N bit 31 Negative flag
* Z bit 30 Zero flag
* C bit 29 Carry flag
* V bit 28 oVerflow flag
* Q bit 27 Saturation flag
* IT[1:0] bits 25-26 If-Then execution state (lower 2 bits)
* J bit 24 Jazelle instruction set flag
* GE bits 16-19 Greater than or Equal flags
* IT[7:2] bits 10-15 If-Then execution state (upper 6 bits)
* E bit 9 Data Endianness flag
* A bit 8 Disable imprecise Aborts
* I bit 7 Disable IRQ interrupts
* F bit 6 Disable FIQ interrupts
* T bit 5 Thumb instruction set flag
* M bits 0-4 Processor Mode bits
*
* x64 LAHF+SETO flags
* -------------------
* SF bit 15 Sign flag
* ZF bit 14 Zero flag
* AF bit 12 Auxiliary flag
* PF bit 10 Parity flag
* CF bit 8 Carry flag
* OF bit 0 Overflow flag
*/
u32 A32JitState::Cpsr() const {
DEBUG_ASSERT((cpsr_q & ~1) == 0);
DEBUG_ASSERT((cpsr_jaifm & ~0x010001DF) == 0);
u32 cpsr = 0;
// NZCV flags
cpsr |= NZCV::FromX64(cpsr_nzcv);
// Q flag
cpsr |= cpsr_q ? 1 << 27 : 0;
// GE flags
cpsr |= Common::Bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= Common::Bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= Common::Bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= Common::Bit<7>(cpsr_ge) ? 1 << 16 : 0;
// E flag, T flag
cpsr |= Common::Bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= Common::Bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
// IT state
cpsr |= static_cast<u32>(upper_location_descriptor & 0b11111100'00000000);
cpsr |= static_cast<u32>(upper_location_descriptor & 0b00000011'00000000) << 17;
// Other flags
cpsr |= cpsr_jaifm;
return cpsr;
}
void A32JitState::SetCpsr(u32 cpsr) {
// NZCV flags
cpsr_nzcv = NZCV::ToX64(cpsr);
// Q flag
cpsr_q = Common::Bit<27>(cpsr) ? 1 : 0;
// GE flags
cpsr_ge = 0;
cpsr_ge |= Common::Bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= Common::Bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= Common::Bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= Common::Bit<16>(cpsr) ? 0x000000FF : 0;
upper_location_descriptor &= 0xFFFF0000;
// E flag, T flag
upper_location_descriptor |= Common::Bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= Common::Bit<5>(cpsr) ? 1 : 0;
// IT state
upper_location_descriptor |= (cpsr >> 0) & 0b11111100'00000000;
upper_location_descriptor |= (cpsr >> 17) & 0b00000011'00000000;
// Other flags
cpsr_jaifm = cpsr & 0x010001DF;
}
void A32JitState::ResetRSB() {
rsb_location_descriptors.fill(0xFFFFFFFFFFFFFFFFull);
rsb_codeptrs.fill(0);
}
/**
* Comparing MXCSR and FPSCR
* =========================
*
* SSE MXCSR exception flags
* -------------------------
* PE bit 5 Precision Flag
* UE bit 4 Underflow Flag
* OE bit 3 Overflow Flag
* ZE bit 2 Divide By Zero Flag
* DE bit 1 Denormal Flag // Appears to only be set when MXCSR.DAZ = 0
* IE bit 0 Invalid Operation Flag
*
* VFP FPSCR cumulative exception bits
* -----------------------------------
* IDC bit 7 Input Denormal cumulative exception bit // Only ever set when FPSCR.FTZ = 1
* IXC bit 4 Inexact cumulative exception bit
* UFC bit 3 Underflow cumulative exception bit
* OFC bit 2 Overflow cumulative exception bit
* DZC bit 1 Division by Zero cumulative exception bit
* IOC bit 0 Invalid Operation cumulative exception bit
*
* SSE MSCSR exception masks
* -------------------------
* PM bit 12 Precision Mask
* UM bit 11 Underflow Mask
* OM bit 10 Overflow Mask
* ZM bit 9 Divide By Zero Mask
* DM bit 8 Denormal Mask
* IM bit 7 Invalid Operation Mask
*
* VFP FPSCR exception trap enables
* --------------------------------
* IDE bit 15 Input Denormal exception trap enable
* IXE bit 12 Inexact exception trap enable
* UFE bit 11 Underflow exception trap enable
* OFE bit 10 Overflow exception trap enable
* DZE bit 9 Division by Zero exception trap enable
* IOE bit 8 Invalid Operation exception trap enable
*
* SSE MXCSR mode bits
* -------------------
* FZ bit 15 Flush To Zero
* DAZ bit 6 Denormals Are Zero
* RN bits 13-14 Round to {0 = Nearest, 1 = Negative, 2 = Positive, 3 = Zero}
*
* VFP FPSCR mode bits
* -------------------
* AHP bit 26 Alternate half-precision
* DN bit 25 Default NaN
* FZ bit 24 Flush to Zero
* RMode bits 22-23 Round to {0 = Nearest, 1 = Positive, 2 = Negative, 3 = Zero}
* Stride bits 20-21 Vector stride
* Len bits 16-18 Vector length
*/
// NZCV; QC (ASIMD only), AHP; DN, FZ, RMode, Stride; SBZP; Len; trap enables; cumulative bits
constexpr u32 FPSCR_MODE_MASK = A32::LocationDescriptor::FPSCR_MODE_MASK;
constexpr u32 FPSCR_NZCV_MASK = 0xF0000000;
u32 A32JitState::Fpscr() const {
DEBUG_ASSERT((fpsr_nzcv & ~FPSCR_NZCV_MASK) == 0);
const u32 fpcr_mode = static_cast<u32>(upper_location_descriptor) & FPSCR_MODE_MASK;
const u32 mxcsr = guest_MXCSR | asimd_MXCSR;
u32 FPSCR = fpcr_mode | fpsr_nzcv;
FPSCR |= (mxcsr & 0b0000000000001); // IOC = IE
FPSCR |= (mxcsr & 0b0000000111100) >> 1; // IXC, UFC, OFC, DZC = PE, UE, OE, ZE
FPSCR |= fpsr_exc;
return FPSCR;
}
void A32JitState::SetFpscr(u32 FPSCR) {
// Ensure that only upper half of upper_location_descriptor is used for FPSCR bits.
static_assert((FPSCR_MODE_MASK & 0xFFFF0000) == FPSCR_MODE_MASK);
upper_location_descriptor &= 0x0000FFFF;
upper_location_descriptor |= FPSCR & FPSCR_MODE_MASK;
fpsr_nzcv = FPSCR & FPSCR_NZCV_MASK;
guest_MXCSR = 0x00001f80;
asimd_MXCSR = 0x00009fc0;
// RMode
const std::array<u32, 4> MXCSR_RMode {0x0, 0x4000, 0x2000, 0x6000};
guest_MXCSR |= MXCSR_RMode[(FPSCR >> 22) & 0x3];
// Cumulative flags IDC, IOC, IXC, UFC, OFC, DZC
fpsr_exc = FPSCR & 0x9F;
if (Common::Bit<24>(FPSCR)) {
// VFP Flush to Zero
guest_MXCSR |= (1 << 15); // SSE Flush to Zero
guest_MXCSR |= (1 << 6); // SSE Denormals are Zero
}
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,110 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <xbyak.h>
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4324) // Structure was padded due to alignment specifier
#endif
struct A32JitState {
using ProgramCounterType = u32;
A32JitState() { ResetRSB(); }
std::array<u32, 16> Reg{}; // Current register file.
// TODO: Mode-specific register sets unimplemented.
u32 upper_location_descriptor = 0;
u32 cpsr_ge = 0;
u32 cpsr_q = 0;
u32 cpsr_nzcv = 0;
u32 cpsr_jaifm = 0;
u32 Cpsr() const;
void SetCpsr(u32 cpsr);
alignas(16) std::array<u32, 64> ExtReg{}; // Extension registers.
static constexpr size_t SpillCount = 64;
alignas(16) std::array<std::array<u64, 2>, SpillCount> spill{}; // Spill.
static Xbyak::Address GetSpillLocationFromIndex(size_t i) {
using namespace Xbyak::util;
return xword[r15 + offsetof(A32JitState, spill) + i * sizeof(u64) * 2];
}
// For internal use (See: BlockOfCode::RunCode)
u32 guest_MXCSR = 0x00001f80;
u32 asimd_MXCSR = 0x00009fc0;
u32 save_host_MXCSR = 0;
s64 cycles_to_run = 0;
s64 cycles_remaining = 0;
bool halt_requested = false;
bool check_bit = false;
// Exclusive state
u32 exclusive_state = 0;
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
static constexpr size_t RSBPtrMask = RSBSize - 1;
u32 rsb_ptr = 0;
std::array<u64, RSBSize> rsb_location_descriptors;
std::array<u64, RSBSize> rsb_codeptrs;
void ResetRSB();
u32 fpsr_exc = 0;
u32 fpsr_qc = 0; // Dummy value
u32 fpsr_nzcv = 0;
u32 Fpscr() const;
void SetFpscr(u32 FPSCR);
u64 GetUniqueHash() const noexcept {
return (static_cast<u64>(upper_location_descriptor) << 32) | (static_cast<u64>(Reg[15]));
}
void TransferJitState(const A32JitState& src, bool reset_rsb) {
Reg = src.Reg;
upper_location_descriptor = src.upper_location_descriptor;
cpsr_ge = src.cpsr_ge;
cpsr_q = src.cpsr_q;
cpsr_nzcv = src.cpsr_nzcv;
cpsr_jaifm = src.cpsr_jaifm;
ExtReg = src.ExtReg;
guest_MXCSR = src.guest_MXCSR;
asimd_MXCSR = src.asimd_MXCSR;
fpsr_exc = src.fpsr_exc;
fpsr_qc = src.fpsr_qc;
fpsr_nzcv = src.fpsr_nzcv;
exclusive_state = 0;
if (reset_rsb) {
ResetRSB();
} else {
rsb_ptr = src.rsb_ptr;
rsb_location_descriptors = src.rsb_location_descriptors;
rsb_codeptrs = src.rsb_codeptrs;
}
}
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
using CodePtr = const void*;
} // namespace Dynarmic::Backend::X64

File diff suppressed because it is too large Load Diff

View File

@@ -1,126 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <map>
#include <tuple>
#include <dynarmic/A64/a64.h>
#include <dynarmic/A64/config.h>
#include "backend/x64/a64_jitstate.h"
#include "backend/x64/block_range_information.h"
#include "backend/x64/emit_x64.h"
#include "frontend/A64/location_descriptor.h"
#include "frontend/ir/terminal.h"
namespace Dynarmic::Backend::X64 {
class RegAlloc;
struct A64EmitContext final : public EmitContext {
A64EmitContext(const A64::UserConfig& conf, RegAlloc& reg_alloc, IR::Block& block);
A64::LocationDescriptor Location() const;
bool IsSingleStep() const;
FP::FPCR FPCR(bool fpcr_controlled = true) const override;
bool HasOptimization(OptimizationFlag flag) const override {
return conf.HasOptimization(flag);
}
const A64::UserConfig& conf;
};
class A64EmitX64 final : public EmitX64 {
public:
A64EmitX64(BlockOfCode& code, A64::UserConfig conf, A64::Jit* jit_interface);
~A64EmitX64() override;
/**
* Emit host machine code for a basic block with intermediate representation `block`.
* @note block is modified.
*/
BlockDescriptor Emit(IR::Block& block);
void ClearCache() override;
void InvalidateCacheRanges(const boost::icl::interval_set<u64>& ranges);
void ChangeProcessorID(size_t value) {
conf.processor_id = value;
}
protected:
A64::UserConfig conf;
A64::Jit* jit_interface;
BlockRangeInformation<u64> block_ranges;
struct FastDispatchEntry {
u64 location_descriptor = 0xFFFF'FFFF'FFFF'FFFFull;
const void* code_ptr = nullptr;
};
static_assert(sizeof(FastDispatchEntry) == 0x10);
static constexpr u64 fast_dispatch_table_mask = 0xFFFFF0;
static constexpr size_t fast_dispatch_table_size = 0x100000;
std::array<FastDispatchEntry, fast_dispatch_table_size> fast_dispatch_table;
void ClearFastDispatchTable();
void (*memory_read_128)();
void (*memory_write_128)();
void GenMemory128Accessors();
std::map<std::tuple<size_t, int, int>, void(*)()> read_fallbacks;
std::map<std::tuple<size_t, int, int>, void(*)()> write_fallbacks;
void GenFastmemFallbacks();
const void* terminal_handler_pop_rsb_hint;
const void* terminal_handler_fast_dispatch_hint = nullptr;
FastDispatchEntry& (*fast_dispatch_table_lookup)(u64) = nullptr;
void GenTerminalHandlers();
template<std::size_t bitsize>
void EmitDirectPageTableMemoryRead(A64EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize>
void EmitDirectPageTableMemoryWrite(A64EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize, auto callback>
void EmitExclusiveReadMemory(A64EmitContext& ctx, IR::Inst* inst);
template<std::size_t bitsize, auto callback>
void EmitExclusiveWriteMemory(A64EmitContext& ctx, IR::Inst* inst);
// Microinstruction emitters
void EmitPushRSB(EmitContext& ctx, IR::Inst* inst);
#define OPCODE(...)
#define A32OPC(...)
#define A64OPC(name, type, ...) void EmitA64##name(A64EmitContext& ctx, IR::Inst* inst);
#include "frontend/ir/opcodes.inc"
#undef OPCODE
#undef A32OPC
#undef A64OPC
// Helpers
std::string LocationDescriptorToFriendlyName(const IR::LocationDescriptor&) const override;
// Terminal instruction emitters
void EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::ReturnToDispatch terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::PopRSBHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::FastDispatchHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::CheckBit terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
void EmitTerminalImpl(IR::Term::CheckHalt terminal, IR::LocationDescriptor initial_location, bool is_single_step) override;
// Patching
void Unpatch(const IR::LocationDescriptor& target_desc) override;
void EmitPatchJg(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) override;
void EmitPatchJmp(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) override;
void EmitPatchMovRcx(CodePtr target_code_ptr = nullptr) override;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,425 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <cstring>
#include <memory>
#include <boost/icl/interval_set.hpp>
#include <dynarmic/A64/a64.h>
#include "backend/x64/a64_emit_x64.h"
#include "backend/x64/a64_jitstate.h"
#include "backend/x64/block_of_code.h"
#include "backend/x64/devirtualize.h"
#include "backend/x64/jitstate_info.h"
#include "common/assert.h"
#include "common/llvm_disassemble.h"
#include "common/scope_exit.h"
#include "frontend/A64/translate/translate.h"
#include "frontend/ir/basic_block.h"
#include "ir_opt/passes.h"
namespace Dynarmic::A64 {
using namespace Backend::X64;
static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) {
return RunCodeCallbacks{
std::make_unique<ArgCallback>(LookupBlock, reinterpret_cast<u64>(arg)),
std::make_unique<ArgCallback>(Devirtualize<&A64::UserCallbacks::AddTicks>(cb)),
std::make_unique<ArgCallback>(Devirtualize<&A64::UserCallbacks::GetTicksRemaining>(cb)),
};
}
static std::function<void(BlockOfCode&)> GenRCP(const A64::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, Common::BitCast<u64>(conf.page_table));
}
};
}
struct Jit::Impl final {
public:
Impl(Jit* jit, UserConfig conf)
: conf(conf)
, block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state}, conf.code_cache_size, conf.far_code_offset, GenRCP(conf))
, emitter(block_of_code, conf, jit)
{
ASSERT(conf.page_table_address_space_bits >= 12 && conf.page_table_address_space_bits <= 64);
}
~Impl() = default;
void Run() {
ASSERT(!is_executing);
is_executing = true;
SCOPE_EXIT { this->is_executing = false; };
jit_state.halt_requested = false;
// TODO: Check code alignment
const CodePtr current_code_ptr = [this]{
// RSB optimization
const u32 new_rsb_ptr = (jit_state.rsb_ptr - 1) & A64JitState::RSBPtrMask;
if (jit_state.GetUniqueHash() == jit_state.rsb_location_descriptors[new_rsb_ptr]) {
jit_state.rsb_ptr = new_rsb_ptr;
return reinterpret_cast<CodePtr>(jit_state.rsb_codeptrs[new_rsb_ptr]);
}
return GetCurrentBlock();
}();
block_of_code.RunCode(&jit_state, current_code_ptr);
PerformRequestedCacheInvalidation();
}
void Step() {
ASSERT(!is_executing);
is_executing = true;
SCOPE_EXIT { this->is_executing = false; };
jit_state.halt_requested = true;
block_of_code.StepCode(&jit_state, GetCurrentSingleStep());
PerformRequestedCacheInvalidation();
}
void ExceptionalExit() {
ClearExclusiveState();
if (!conf.wall_clock_cntpct) {
const s64 ticks = jit_state.cycles_to_run - jit_state.cycles_remaining;
conf.callbacks->AddTicks(ticks);
}
PerformRequestedCacheInvalidation();
is_executing = false;
}
void ChangeProcessorID(size_t value) {
conf.processor_id = value;
emitter.ChangeProcessorID(value);
}
void ClearCache() {
invalidate_entire_cache = true;
RequestCacheInvalidation();
}
void InvalidateCacheRange(u64 start_address, size_t length) {
const auto end_address = static_cast<u64>(start_address + length - 1);
const auto range = boost::icl::discrete_interval<u64>::closed(start_address, end_address);
invalid_cache_ranges.add(range);
RequestCacheInvalidation();
}
void Reset() {
ASSERT(!is_executing);
jit_state = {};
}
void HaltExecution() {
jit_state.halt_requested = true;
}
u64 GetSP() const {
return jit_state.sp;
}
void SetSP(u64 value) {
jit_state.sp = value;
}
u64 GetPC() const {
return jit_state.pc;
}
void SetPC(u64 value) {
jit_state.pc = value;
}
u64 GetRegister(size_t index) const {
if (index == 31)
return GetSP();
return jit_state.reg.at(index);
}
void SetRegister(size_t index, u64 value) {
if (index == 31)
return SetSP(value);
jit_state.reg.at(index) = value;
}
std::array<u64, 31> GetRegisters() const {
return jit_state.reg;
}
void SetRegisters(const std::array<u64, 31>& value) {
jit_state.reg = value;
}
Vector GetVector(size_t index) const {
return {jit_state.vec.at(index * 2), jit_state.vec.at(index * 2 + 1)};
}
void SetVector(size_t index, Vector value) {
jit_state.vec.at(index * 2) = value[0];
jit_state.vec.at(index * 2 + 1) = value[1];
}
std::array<Vector, 32> GetVectors() const {
std::array<Vector, 32> ret;
static_assert(sizeof(ret) == sizeof(jit_state.vec));
std::memcpy(ret.data(), jit_state.vec.data(), sizeof(jit_state.vec));
return ret;
}
void SetVectors(const std::array<Vector, 32>& value) {
static_assert(sizeof(value) == sizeof(jit_state.vec));
std::memcpy(jit_state.vec.data(), value.data(), sizeof(jit_state.vec));
}
u32 GetFpcr() const {
return jit_state.GetFpcr();
}
void SetFpcr(u32 value) {
jit_state.SetFpcr(value);
}
u32 GetFpsr() const {
return jit_state.GetFpsr();
}
void SetFpsr(u32 value) {
jit_state.SetFpsr(value);
}
u32 GetPstate() const {
return jit_state.GetPstate();
}
void SetPstate(u32 value) {
jit_state.SetPstate(value);
}
void ClearExclusiveState() {
jit_state.exclusive_state = 0;
}
bool IsExecuting() const {
return is_executing;
}
std::string Disassemble() const {
return Common::DisassembleX64(block_of_code.GetCodeBegin(), block_of_code.getCurr());
}
private:
static CodePtr GetCurrentBlockThunk(void* thisptr) {
Jit::Impl* this_ = static_cast<Jit::Impl*>(thisptr);
return this_->GetCurrentBlock();
}
IR::LocationDescriptor GetCurrentLocation() const {
return IR::LocationDescriptor{jit_state.GetUniqueHash()};
}
CodePtr GetCurrentBlock() {
return GetBlock(GetCurrentLocation());
}
CodePtr GetCurrentSingleStep() {
return GetBlock(A64::LocationDescriptor{GetCurrentLocation()}.SetSingleStepping(true));
}
CodePtr GetBlock(IR::LocationDescriptor current_location) {
if (auto block = emitter.GetBasicBlock(current_location))
return block->entrypoint;
constexpr size_t MINIMUM_REMAINING_CODESIZE = 1 * 1024 * 1024;
if (block_of_code.SpaceRemaining() < MINIMUM_REMAINING_CODESIZE) {
// Immediately evacuate cache
invalidate_entire_cache = true;
PerformRequestedCacheInvalidation();
}
// JIT Compile
const auto get_code = [this](u64 vaddr) { return conf.callbacks->MemoryReadCode(vaddr); };
IR::Block ir_block = A64::Translate(A64::LocationDescriptor{current_location}, get_code,
{conf.define_unpredictable_behaviour, conf.wall_clock_cntpct});
Optimization::A64CallbackConfigPass(ir_block, conf);
if (conf.HasOptimization(OptimizationFlag::GetSetElimination)) {
Optimization::A64GetSetElimination(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
if (conf.HasOptimization(OptimizationFlag::ConstProp)) {
Optimization::ConstantPropagation(ir_block);
Optimization::DeadCodeElimination(ir_block);
}
if (conf.HasOptimization(OptimizationFlag::MiscIROpt)) {
Optimization::A64MergeInterpretBlocksPass(ir_block, conf.callbacks);
}
Optimization::VerificationPass(ir_block);
return emitter.Emit(ir_block).entrypoint;
}
void RequestCacheInvalidation() {
if (is_executing) {
jit_state.halt_requested = true;
return;
}
PerformRequestedCacheInvalidation();
}
void PerformRequestedCacheInvalidation() {
if (!invalidate_entire_cache && invalid_cache_ranges.empty()) {
return;
}
jit_state.ResetRSB();
if (invalidate_entire_cache) {
block_of_code.ClearCache();
emitter.ClearCache();
} else {
emitter.InvalidateCacheRanges(invalid_cache_ranges);
}
invalid_cache_ranges.clear();
invalidate_entire_cache = false;
}
bool is_executing = false;
UserConfig conf;
A64JitState jit_state;
BlockOfCode block_of_code;
A64EmitX64 emitter;
bool invalidate_entire_cache = false;
boost::icl::interval_set<u64> invalid_cache_ranges;
};
Jit::Jit(UserConfig conf)
: impl(std::make_unique<Jit::Impl>(this, conf)) {}
Jit::~Jit() = default;
void Jit::Run() {
impl->Run();
}
void Jit::Step() {
impl->Step();
}
void Jit::ClearCache() {
impl->ClearCache();
}
void Jit::InvalidateCacheRange(u64 start_address, size_t length) {
impl->InvalidateCacheRange(start_address, length);
}
void Jit::Reset() {
impl->Reset();
}
void Jit::HaltExecution() {
impl->HaltExecution();
}
void Jit::ExceptionalExit() {
impl->ExceptionalExit();
}
void Jit::ChangeProcessorID(size_t new_processor) {
impl->ChangeProcessorID(new_processor);
}
u64 Jit::GetSP() const {
return impl->GetSP();
}
void Jit::SetSP(u64 value) {
impl->SetSP(value);
}
u64 Jit::GetPC() const {
return impl->GetPC();
}
void Jit::SetPC(u64 value) {
impl->SetPC(value);
}
u64 Jit::GetRegister(size_t index) const {
return impl->GetRegister(index);
}
void Jit::SetRegister(size_t index, u64 value) {
impl->SetRegister(index, value);
}
std::array<u64, 31> Jit::GetRegisters() const {
return impl->GetRegisters();
}
void Jit::SetRegisters(const std::array<u64, 31>& value) {
impl->SetRegisters(value);
}
Vector Jit::GetVector(size_t index) const {
return impl->GetVector(index);
}
void Jit::SetVector(size_t index, Vector value) {
impl->SetVector(index, value);
}
std::array<Vector, 32> Jit::GetVectors() const {
return impl->GetVectors();
}
void Jit::SetVectors(const std::array<Vector, 32>& value) {
impl->SetVectors(value);
}
u32 Jit::GetFpcr() const {
return impl->GetFpcr();
}
void Jit::SetFpcr(u32 value) {
impl->SetFpcr(value);
}
u32 Jit::GetFpsr() const {
return impl->GetFpsr();
}
void Jit::SetFpsr(u32 value) {
impl->SetFpsr(value);
}
u32 Jit::GetPstate() const {
return impl->GetPstate();
}
void Jit::SetPstate(u32 value) {
impl->SetPstate(value);
}
void Jit::ClearExclusiveState() {
impl->ClearExclusiveState();
}
bool Jit::IsExecuting() const {
return impl->IsExecuting();
}
std::string Jit::Disassemble() const {
return impl->Disassemble();
}
} // namespace Dynarmic::A64

View File

@@ -1,114 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/a64_jitstate.h"
#include "common/bit_util.h"
#include "frontend/A64/location_descriptor.h"
namespace Dynarmic::Backend::X64 {
/**
* Comparing MXCSR and FPCR
* ========================
*
* SSE MSCSR exception masks
* -------------------------
* PM bit 12 Precision Mask
* UM bit 11 Underflow Mask
* OM bit 10 Overflow Mask
* ZM bit 9 Divide By Zero Mask
* DM bit 8 Denormal Mask
* IM bit 7 Invalid Operation Mask
*
* A64 FPCR exception trap enables
* -------------------------------
* IDE bit 15 Input Denormal exception trap enable
* IXE bit 12 Inexact exception trap enable
* UFE bit 11 Underflow exception trap enable
* OFE bit 10 Overflow exception trap enable
* DZE bit 9 Division by Zero exception trap enable
* IOE bit 8 Invalid Operation exception trap enable
*
* SSE MXCSR mode bits
* -------------------
* FZ bit 15 Flush To Zero
* DAZ bit 6 Denormals Are Zero
* RN bits 13-14 Round to {0 = Nearest, 1 = Negative, 2 = Positive, 3 = Zero}
*
* A64 FPCR mode bits
* ------------------
* AHP bit 26 Alternative half-precision
* DN bit 25 Default NaN
* FZ bit 24 Flush to Zero
* RMode bits 22-23 Round to {0 = Nearest, 1 = Positive, 2 = Negative, 3 = Zero}
* FZ16 bit 19 Flush to Zero for half-precision
*/
constexpr u32 FPCR_MASK = 0x07C89F00;
u32 A64JitState::GetFpcr() const {
return fpcr;
}
void A64JitState::SetFpcr(u32 value) {
fpcr = value & FPCR_MASK;
asimd_MXCSR &= 0x0000003D;
guest_MXCSR &= 0x0000003D;
asimd_MXCSR |= 0x00001f80;
guest_MXCSR |= 0x00001f80; // Mask all exceptions
// RMode
const std::array<u32, 4> MXCSR_RMode {0x0, 0x4000, 0x2000, 0x6000};
guest_MXCSR |= MXCSR_RMode[(value >> 22) & 0x3];
if (Common::Bit<24>(value)) {
guest_MXCSR |= (1 << 15); // SSE Flush to Zero
guest_MXCSR |= (1 << 6); // SSE Denormals are Zero
}
}
/**
* Comparing MXCSR and FPSR
* ========================
*
* SSE MXCSR exception flags
* -------------------------
* PE bit 5 Precision Flag
* UE bit 4 Underflow Flag
* OE bit 3 Overflow Flag
* ZE bit 2 Divide By Zero Flag
* DE bit 1 Denormal Flag // Appears to only be set when MXCSR.DAZ = 0
* IE bit 0 Invalid Operation Flag
*
* A64 FPSR cumulative exception bits
* ----------------------------------
* QC bit 27 Cumulative saturation bit
* IDC bit 7 Input Denormal cumulative exception bit // Only ever set when FPCR.FTZ = 1
* IXC bit 4 Inexact cumulative exception bit
* UFC bit 3 Underflow cumulative exception bit
* OFC bit 2 Overflow cumulative exception bit
* DZC bit 1 Division by Zero cumulative exception bit
* IOC bit 0 Invalid Operation cumulative exception bit
*/
u32 A64JitState::GetFpsr() const {
const u32 mxcsr = guest_MXCSR | asimd_MXCSR;
u32 fpsr = 0;
fpsr |= (mxcsr & 0b0000000000001); // IOC = IE
fpsr |= (mxcsr & 0b0000000111100) >> 1; // IXC, UFC, OFC, DZC = PE, UE, OE, ZE
fpsr |= fpsr_exc;
fpsr |= (fpsr_qc == 0 ? 0 : 1) << 27;
return fpsr;
}
void A64JitState::SetFpsr(u32 value) {
guest_MXCSR &= ~0x0000003D;
asimd_MXCSR &= ~0x0000003D;
fpsr_qc = (value >> 27) & 1;
fpsr_exc = value & 0x9F;
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,96 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <xbyak.h>
#include "backend/x64/nzcv_util.h"
#include "common/common_types.h"
#include "frontend/A64/location_descriptor.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4324) // Structure was padded due to alignment specifier
#endif
struct A64JitState {
using ProgramCounterType = u64;
A64JitState() { ResetRSB(); }
std::array<u64, 31> reg{};
u64 sp = 0;
u64 pc = 0;
u32 cpsr_nzcv = 0;
u32 GetPstate() const {
return NZCV::FromX64(cpsr_nzcv);
}
void SetPstate(u32 new_pstate) {
cpsr_nzcv = NZCV::ToX64(new_pstate);
}
alignas(16) std::array<u64, 64> vec{}; // Extension registers.
static constexpr size_t SpillCount = 64;
alignas(16) std::array<std::array<u64, 2>, SpillCount> spill{}; // Spill.
static Xbyak::Address GetSpillLocationFromIndex(size_t i) {
using namespace Xbyak::util;
return xword[r15 + offsetof(A64JitState, spill) + i * sizeof(u64) * 2];
}
// For internal use (See: BlockOfCode::RunCode)
u32 guest_MXCSR = 0x00001f80;
u32 asimd_MXCSR = 0x00009fc0;
u32 save_host_MXCSR = 0;
s64 cycles_to_run = 0;
s64 cycles_remaining = 0;
bool halt_requested = false;
bool check_bit = false;
// Exclusive state
static constexpr u64 RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFF0ull;
u8 exclusive_state = 0;
static constexpr size_t RSBSize = 8; // MUST be a power of 2.
static constexpr size_t RSBPtrMask = RSBSize - 1;
u32 rsb_ptr = 0;
std::array<u64, RSBSize> rsb_location_descriptors;
std::array<u64, RSBSize> rsb_codeptrs;
void ResetRSB() {
rsb_location_descriptors.fill(0xFFFFFFFFFFFFFFFFull);
rsb_codeptrs.fill(0);
}
u32 fpsr_exc = 0;
u32 fpsr_qc = 0;
u32 fpcr = 0;
u32 GetFpcr() const;
u32 GetFpsr() const;
void SetFpcr(u32 value);
void SetFpsr(u32 value);
u64 GetUniqueHash() const noexcept {
const u64 fpcr_u64 = static_cast<u64>(fpcr & A64::LocationDescriptor::fpcr_mask) << A64::LocationDescriptor::fpcr_shift;
const u64 pc_u64 = pc & A64::LocationDescriptor::pc_mask;
return pc_u64 | fpcr_u64;
}
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
using CodePtr = const void*;
} // namespace Dynarmic::Backend::X64

View File

@@ -1,134 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <algorithm>
#include <vector>
#include <xbyak.h>
#include "backend/x64/abi.h"
#include "backend/x64/block_of_code.h"
#include "common/common_types.h"
#include "common/iterator_util.h"
namespace Dynarmic::Backend::X64 {
constexpr size_t XMM_SIZE = 16;
struct FrameInfo {
size_t stack_subtraction;
size_t xmm_offset;
size_t frame_offset;
};
static FrameInfo CalculateFrameInfo(size_t num_gprs, size_t num_xmms, size_t frame_size) {
// We are initially 8 byte aligned because the return value is pushed onto an aligned stack after a call.
const size_t rsp_alignment = (num_gprs % 2 == 0) ? 8 : 0;
const size_t total_xmm_size = num_xmms * XMM_SIZE;
if (frame_size & 0xF) {
frame_size += 0x10 - (frame_size & 0xF);
}
return {
rsp_alignment + total_xmm_size + frame_size + ABI_SHADOW_SPACE,
frame_size + ABI_SHADOW_SPACE,
ABI_SHADOW_SPACE,
};
}
template<typename RegisterArrayT>
void ABI_PushRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size, const RegisterArrayT& regs) {
using namespace Xbyak::util;
const size_t num_gprs = std::count_if(regs.begin(), regs.end(), HostLocIsGPR);
const size_t num_xmms = std::count_if(regs.begin(), regs.end(), HostLocIsXMM);
FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size);
for (HostLoc gpr : regs) {
if (HostLocIsGPR(gpr)) {
code.push(HostLocToReg64(gpr));
}
}
if (frame_info.stack_subtraction != 0) {
code.sub(rsp, u32(frame_info.stack_subtraction));
}
size_t xmm_offset = frame_info.xmm_offset;
for (HostLoc xmm : regs) {
if (HostLocIsXMM(xmm)) {
if (code.HasAVX()) {
code.vmovaps(code.xword[rsp + xmm_offset], HostLocToXmm(xmm));
} else {
code.movaps(code.xword[rsp + xmm_offset], HostLocToXmm(xmm));
}
xmm_offset += XMM_SIZE;
}
}
}
template<typename RegisterArrayT>
void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size, const RegisterArrayT& regs) {
using namespace Xbyak::util;
const size_t num_gprs = std::count_if(regs.begin(), regs.end(), HostLocIsGPR);
const size_t num_xmms = std::count_if(regs.begin(), regs.end(), HostLocIsXMM);
FrameInfo frame_info = CalculateFrameInfo(num_gprs, num_xmms, frame_size);
size_t xmm_offset = frame_info.xmm_offset;
for (HostLoc xmm : regs) {
if (HostLocIsXMM(xmm)) {
if (code.HasAVX()) {
code.vmovaps(HostLocToXmm(xmm), code.xword[rsp + xmm_offset]);
} else {
code.movaps(HostLocToXmm(xmm), code.xword[rsp + xmm_offset]);
}
xmm_offset += XMM_SIZE;
}
}
if (frame_info.stack_subtraction != 0) {
code.add(rsp, u32(frame_info.stack_subtraction));
}
for (HostLoc gpr : Common::Reverse(regs)) {
if (HostLocIsGPR(gpr)) {
code.pop(HostLocToReg64(gpr));
}
}
}
void ABI_PushCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size) {
ABI_PushRegistersAndAdjustStack(code, frame_size, ABI_ALL_CALLEE_SAVE);
}
void ABI_PopCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size) {
ABI_PopRegistersAndAdjustStack(code, frame_size, ABI_ALL_CALLEE_SAVE);
}
void ABI_PushCallerSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size) {
ABI_PushRegistersAndAdjustStack(code, frame_size, ABI_ALL_CALLER_SAVE);
}
void ABI_PopCallerSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size) {
ABI_PopRegistersAndAdjustStack(code, frame_size, ABI_ALL_CALLER_SAVE);
}
void ABI_PushCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception) {
std::vector<HostLoc> regs;
std::remove_copy(ABI_ALL_CALLER_SAVE.begin(), ABI_ALL_CALLER_SAVE.end(), std::back_inserter(regs), exception);
ABI_PushRegistersAndAdjustStack(code, 0, regs);
}
void ABI_PopCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception) {
std::vector<HostLoc> regs;
std::remove_copy(ABI_ALL_CALLER_SAVE.begin(), ABI_ALL_CALLER_SAVE.end(), std::back_inserter(regs), exception);
ABI_PopRegistersAndAdjustStack(code, 0, regs);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,123 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include "backend/x64/hostloc.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
#ifdef _WIN32
constexpr HostLoc ABI_RETURN = HostLoc::RAX;
constexpr HostLoc ABI_PARAM1 = HostLoc::RCX;
constexpr HostLoc ABI_PARAM2 = HostLoc::RDX;
constexpr HostLoc ABI_PARAM3 = HostLoc::R8;
constexpr HostLoc ABI_PARAM4 = HostLoc::R9;
constexpr std::array<HostLoc, 13> ABI_ALL_CALLER_SAVE = {
HostLoc::RAX,
HostLoc::RCX,
HostLoc::RDX,
HostLoc::R8,
HostLoc::R9,
HostLoc::R10,
HostLoc::R11,
HostLoc::XMM0,
HostLoc::XMM1,
HostLoc::XMM2,
HostLoc::XMM3,
HostLoc::XMM4,
HostLoc::XMM5,
};
constexpr std::array<HostLoc, 18> ABI_ALL_CALLEE_SAVE = {
HostLoc::RBX,
HostLoc::RSI,
HostLoc::RDI,
HostLoc::RBP,
HostLoc::R12,
HostLoc::R13,
HostLoc::R14,
HostLoc::R15,
HostLoc::XMM6,
HostLoc::XMM7,
HostLoc::XMM8,
HostLoc::XMM9,
HostLoc::XMM10,
HostLoc::XMM11,
HostLoc::XMM12,
HostLoc::XMM13,
HostLoc::XMM14,
HostLoc::XMM15,
};
constexpr size_t ABI_SHADOW_SPACE = 32; // bytes
#else
constexpr HostLoc ABI_RETURN = HostLoc::RAX;
constexpr HostLoc ABI_PARAM1 = HostLoc::RDI;
constexpr HostLoc ABI_PARAM2 = HostLoc::RSI;
constexpr HostLoc ABI_PARAM3 = HostLoc::RDX;
constexpr HostLoc ABI_PARAM4 = HostLoc::RCX;
constexpr std::array<HostLoc, 25> ABI_ALL_CALLER_SAVE = {
HostLoc::RAX,
HostLoc::RCX,
HostLoc::RDX,
HostLoc::RDI,
HostLoc::RSI,
HostLoc::R8,
HostLoc::R9,
HostLoc::R10,
HostLoc::R11,
HostLoc::XMM0,
HostLoc::XMM1,
HostLoc::XMM2,
HostLoc::XMM3,
HostLoc::XMM4,
HostLoc::XMM5,
HostLoc::XMM6,
HostLoc::XMM7,
HostLoc::XMM8,
HostLoc::XMM9,
HostLoc::XMM10,
HostLoc::XMM11,
HostLoc::XMM12,
HostLoc::XMM13,
HostLoc::XMM14,
HostLoc::XMM15,
};
constexpr std::array<HostLoc, 6> ABI_ALL_CALLEE_SAVE = {
HostLoc::RBX,
HostLoc::RBP,
HostLoc::R12,
HostLoc::R13,
HostLoc::R14,
HostLoc::R15,
};
constexpr size_t ABI_SHADOW_SPACE = 0; // bytes
#endif
static_assert(ABI_ALL_CALLER_SAVE.size() + ABI_ALL_CALLEE_SAVE.size() == 31, "Invalid total number of registers");
void ABI_PushCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size = 0);
void ABI_PopCalleeSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size = 0);
void ABI_PushCallerSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size = 0);
void ABI_PopCallerSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size = 0);
void ABI_PushCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception);
void ABI_PopCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception);
} // namespace Dynarmic::Backend::X64

View File

@@ -1,419 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <array>
#include <cstring>
#include <xbyak.h>
#include "backend/x64/a32_jitstate.h"
#include "backend/x64/abi.h"
#include "backend/x64/block_of_code.h"
#include "backend/x64/perf_map.h"
#include "common/assert.h"
#include "common/bit_util.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/mman.h>
#endif
namespace Dynarmic::Backend::X64 {
#ifdef _WIN32
const Xbyak::Reg64 BlockOfCode::ABI_RETURN = Xbyak::util::rax;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM1 = Xbyak::util::rcx;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM2 = Xbyak::util::rdx;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM3 = Xbyak::util::r8;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM4 = Xbyak::util::r9;
const std::array<Xbyak::Reg64, 4> BlockOfCode::ABI_PARAMS = {BlockOfCode::ABI_PARAM1, BlockOfCode::ABI_PARAM2, BlockOfCode::ABI_PARAM3, BlockOfCode::ABI_PARAM4};
#else
const Xbyak::Reg64 BlockOfCode::ABI_RETURN = Xbyak::util::rax;
const Xbyak::Reg64 BlockOfCode::ABI_RETURN2 = Xbyak::util::rdx;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM1 = Xbyak::util::rdi;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM2 = Xbyak::util::rsi;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM3 = Xbyak::util::rdx;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM4 = Xbyak::util::rcx;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM5 = Xbyak::util::r8;
const Xbyak::Reg64 BlockOfCode::ABI_PARAM6 = Xbyak::util::r9;
const std::array<Xbyak::Reg64, 6> BlockOfCode::ABI_PARAMS = {BlockOfCode::ABI_PARAM1, BlockOfCode::ABI_PARAM2, BlockOfCode::ABI_PARAM3, BlockOfCode::ABI_PARAM4, BlockOfCode::ABI_PARAM5, BlockOfCode::ABI_PARAM6};
#endif
namespace {
constexpr size_t CONSTANT_POOL_SIZE = 2 * 1024 * 1024;
class CustomXbyakAllocator : public Xbyak::Allocator {
public:
#ifdef DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT
bool useProtect() const override { return false; }
#endif
};
// This is threadsafe as Xbyak::Allocator does not contain any state; it is a pure interface.
CustomXbyakAllocator s_allocator;
#ifdef DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT
void ProtectMemory(const void* base, size_t size, bool is_executable) {
#ifdef _WIN32
DWORD oldProtect = 0;
VirtualProtect(const_cast<void*>(base), size, is_executable ? PAGE_EXECUTE_READ : PAGE_READWRITE, &oldProtect);
#else
static const size_t pageSize = sysconf(_SC_PAGESIZE);
const size_t iaddr = reinterpret_cast<size_t>(base);
const size_t roundAddr = iaddr & ~(pageSize - static_cast<size_t>(1));
const int mode = is_executable ? (PROT_READ | PROT_EXEC) : (PROT_READ | PROT_WRITE);
mprotect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), mode);
#endif
}
#endif
} // anonymous namespace
BlockOfCode::BlockOfCode(RunCodeCallbacks cb, JitStateInfo jsi, size_t total_code_size, size_t far_code_offset, std::function<void(BlockOfCode&)> rcp)
: Xbyak::CodeGenerator(total_code_size, nullptr, &s_allocator)
, cb(std::move(cb))
, jsi(jsi)
, far_code_offset(far_code_offset)
, constant_pool(*this, CONSTANT_POOL_SIZE)
{
EnableWriting();
GenRunCode(rcp);
}
void BlockOfCode::PreludeComplete() {
prelude_complete = true;
near_code_begin = getCurr();
far_code_begin = getCurr() + far_code_offset;
ClearCache();
DisableWriting();
}
void BlockOfCode::EnableWriting() {
#ifdef DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT
ProtectMemory(getCode(), maxSize_, false);
#endif
}
void BlockOfCode::DisableWriting() {
#ifdef DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT
ProtectMemory(getCode(), maxSize_, true);
#endif
}
void BlockOfCode::ClearCache() {
ASSERT(prelude_complete);
in_far_code = false;
near_code_ptr = near_code_begin;
far_code_ptr = far_code_begin;
SetCodePtr(near_code_begin);
}
size_t BlockOfCode::SpaceRemaining() const {
ASSERT(prelude_complete);
const u8* current_near_ptr = in_far_code ? reinterpret_cast<const u8*>(near_code_ptr) : getCode<const u8*>();
const u8* current_far_ptr = in_far_code ? getCode<const u8*>() : reinterpret_cast<const u8*>(far_code_ptr);
if (current_near_ptr >= far_code_begin)
return 0;
if (current_far_ptr >= &top_[maxSize_])
return 0;
return std::min(reinterpret_cast<const u8*>(far_code_begin) - current_near_ptr, &top_[maxSize_] - current_far_ptr);
}
void BlockOfCode::RunCode(void* jit_state, CodePtr code_ptr) const {
run_code(jit_state, code_ptr);
}
void BlockOfCode::StepCode(void* jit_state, CodePtr code_ptr) const {
step_code(jit_state, code_ptr);
}
void BlockOfCode::ReturnFromRunCode(bool mxcsr_already_exited) {
size_t index = 0;
if (mxcsr_already_exited)
index |= MXCSR_ALREADY_EXITED;
jmp(return_from_run_code[index]);
}
void BlockOfCode::ForceReturnFromRunCode(bool mxcsr_already_exited) {
size_t index = FORCE_RETURN;
if (mxcsr_already_exited)
index |= MXCSR_ALREADY_EXITED;
jmp(return_from_run_code[index]);
}
void BlockOfCode::GenRunCode(std::function<void(BlockOfCode&)> rcp) {
align();
run_code = getCurr<RunCodeFuncType>();
// This serves two purposes:
// 1. It saves all the registers we as a callee need to save.
// 2. It aligns the stack so that the code the JIT emits can assume
// that the stack is appropriately aligned for CALLs.
ABI_PushCalleeSaveRegistersAndAdjustStack(*this);
mov(r15, ABI_PARAM1);
mov(rbx, ABI_PARAM2); // save temporarily in non-volatile register
cb.GetTicksRemaining->EmitCall(*this);
mov(qword[r15 + jsi.offsetof_cycles_to_run], ABI_RETURN);
mov(qword[r15 + jsi.offsetof_cycles_remaining], ABI_RETURN);
rcp(*this);
SwitchMxcsrOnEntry();
jmp(rbx);
align();
step_code = getCurr<RunCodeFuncType>();
ABI_PushCalleeSaveRegistersAndAdjustStack(*this);
mov(r15, ABI_PARAM1);
mov(qword[r15 + jsi.offsetof_cycles_to_run], 1);
mov(qword[r15 + jsi.offsetof_cycles_remaining], 1);
rcp(*this);
SwitchMxcsrOnEntry();
jmp(ABI_PARAM2);
align();
// Dispatcher loop
Xbyak::Label return_to_caller, return_to_caller_mxcsr_already_exited;
align();
return_from_run_code[0] = getCurr<const void*>();
cmp(qword[r15 + jsi.offsetof_cycles_remaining], 0);
jng(return_to_caller);
cb.LookupBlock->EmitCall(*this);
jmp(ABI_RETURN);
align();
return_from_run_code[MXCSR_ALREADY_EXITED] = getCurr<const void*>();
cmp(qword[r15 + jsi.offsetof_cycles_remaining], 0);
jng(return_to_caller_mxcsr_already_exited);
SwitchMxcsrOnEntry();
cb.LookupBlock->EmitCall(*this);
jmp(ABI_RETURN);
align();
return_from_run_code[FORCE_RETURN] = getCurr<const void*>();
L(return_to_caller);
SwitchMxcsrOnExit();
// fallthrough
return_from_run_code[MXCSR_ALREADY_EXITED | FORCE_RETURN] = getCurr<const void*>();
L(return_to_caller_mxcsr_already_exited);
cb.AddTicks->EmitCall(*this, [this](RegList param) {
mov(param[0], qword[r15 + jsi.offsetof_cycles_to_run]);
sub(param[0], qword[r15 + jsi.offsetof_cycles_remaining]);
});
ABI_PopCalleeSaveRegistersAndAdjustStack(*this);
ret();
PerfMapRegister(run_code, getCurr(), "dynarmic_dispatcher");
}
void BlockOfCode::SwitchMxcsrOnEntry() {
stmxcsr(dword[r15 + jsi.offsetof_save_host_MXCSR]);
ldmxcsr(dword[r15 + jsi.offsetof_guest_MXCSR]);
}
void BlockOfCode::SwitchMxcsrOnExit() {
stmxcsr(dword[r15 + jsi.offsetof_guest_MXCSR]);
ldmxcsr(dword[r15 + jsi.offsetof_save_host_MXCSR]);
}
void BlockOfCode::EnterStandardASIMD() {
stmxcsr(dword[r15 + jsi.offsetof_guest_MXCSR]);
ldmxcsr(dword[r15 + jsi.offsetof_asimd_MXCSR]);
}
void BlockOfCode::LeaveStandardASIMD() {
stmxcsr(dword[r15 + jsi.offsetof_asimd_MXCSR]);
ldmxcsr(dword[r15 + jsi.offsetof_guest_MXCSR]);
}
void BlockOfCode::UpdateTicks() {
cb.AddTicks->EmitCall(*this, [this](RegList param) {
mov(param[0], qword[r15 + jsi.offsetof_cycles_to_run]);
sub(param[0], qword[r15 + jsi.offsetof_cycles_remaining]);
});
cb.GetTicksRemaining->EmitCall(*this);
mov(qword[r15 + jsi.offsetof_cycles_to_run], ABI_RETURN);
mov(qword[r15 + jsi.offsetof_cycles_remaining], ABI_RETURN);
}
void BlockOfCode::LookupBlock() {
cb.LookupBlock->EmitCall(*this);
}
Xbyak::Address BlockOfCode::MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper) {
return constant_pool.GetConstant(frame, lower, upper);
}
void BlockOfCode::SwitchToFarCode() {
ASSERT(prelude_complete);
ASSERT(!in_far_code);
in_far_code = true;
near_code_ptr = getCurr();
SetCodePtr(far_code_ptr);
ASSERT_MSG(near_code_ptr < far_code_begin, "Near code has overwritten far code!");
}
void BlockOfCode::SwitchToNearCode() {
ASSERT(prelude_complete);
ASSERT(in_far_code);
in_far_code = false;
far_code_ptr = getCurr();
SetCodePtr(near_code_ptr);
}
CodePtr BlockOfCode::GetCodeBegin() const {
return near_code_begin;
}
size_t BlockOfCode::GetTotalCodeSize() const {
return maxSize_;
}
void* BlockOfCode::AllocateFromCodeSpace(size_t alloc_size) {
if (size_ + alloc_size >= maxSize_) {
throw Xbyak::Error(Xbyak::ERR_CODE_IS_TOO_BIG);
}
void* ret = getCurr<void*>();
size_ += alloc_size;
memset(ret, 0, alloc_size);
return ret;
}
void BlockOfCode::SetCodePtr(CodePtr code_ptr) {
// The "size" defines where top_, the insertion point, is.
size_t required_size = reinterpret_cast<const u8*>(code_ptr) - getCode();
setSize(required_size);
}
void BlockOfCode::EnsurePatchLocationSize(CodePtr begin, size_t size) {
size_t current_size = getCurr<const u8*>() - reinterpret_cast<const u8*>(begin);
ASSERT(current_size <= size);
nop(size - current_size);
}
bool BlockOfCode::HasSSSE3() const {
return DoesCpuSupport(Xbyak::util::Cpu::tSSSE3);
}
bool BlockOfCode::HasSSE41() const {
return DoesCpuSupport(Xbyak::util::Cpu::tSSE41);
}
bool BlockOfCode::HasSSE42() const {
return DoesCpuSupport(Xbyak::util::Cpu::tSSE42);
}
bool BlockOfCode::HasPCLMULQDQ() const {
return DoesCpuSupport(Xbyak::util::Cpu::tPCLMULQDQ);
}
bool BlockOfCode::HasAVX() const {
return DoesCpuSupport(Xbyak::util::Cpu::tAVX);
}
bool BlockOfCode::HasF16C() const {
return DoesCpuSupport(Xbyak::util::Cpu::tF16C);
}
bool BlockOfCode::HasAESNI() const {
return DoesCpuSupport(Xbyak::util::Cpu::tAESNI);
}
bool BlockOfCode::HasLZCNT() const {
return DoesCpuSupport(Xbyak::util::Cpu::tLZCNT);
}
bool BlockOfCode::HasBMI1() const {
return DoesCpuSupport(Xbyak::util::Cpu::tBMI1);
}
bool BlockOfCode::HasBMI2() const {
return DoesCpuSupport(Xbyak::util::Cpu::tBMI2);
}
bool BlockOfCode::HasFastBMI2() const {
if (DoesCpuSupport(Xbyak::util::Cpu::tBMI2)) {
// BMI2 instructions such as pdep and pext have been very slow up until Zen 3.
// Check for Zen 3 or newer by its family (0x19).
// See also: https://en.wikichip.org/wiki/amd/cpuid
if (DoesCpuSupport(Xbyak::util::Cpu::tAMD)) {
std::array<u32, 4> data{};
cpu_info.getCpuid(1, data.data());
const u32 family_base = Common::Bits< 8, 11>(data[0]);
const u32 family_extended = Common::Bits<20, 27>(data[0]);
const u32 family = family_base + family_extended;
return family >= 0x19;
}
return true;
}
return false;
}
bool BlockOfCode::HasFMA() const {
return DoesCpuSupport(Xbyak::util::Cpu::tFMA);
}
bool BlockOfCode::HasAVX2() const {
return DoesCpuSupport(Xbyak::util::Cpu::tAVX2);
}
bool BlockOfCode::HasAVX512_Skylake() const {
// The feature set formerly known as AVX3.2. (Introduced with Skylake.)
return DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512CD)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512BW)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512DQ)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512VL);
}
bool BlockOfCode::HasAVX512_Icelake() const {
return DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512CD)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512BW)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512DQ)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512VL)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512_VPOPCNTDQ)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512_VNNI)
&& DoesCpuSupport(Xbyak::util::Cpu::tGFNI)
&& DoesCpuSupport(Xbyak::util::Cpu::tVAES)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512_VBMI2)
&& DoesCpuSupport(Xbyak::util::Cpu::tAVX512_BITALG)
&& DoesCpuSupport(Xbyak::util::Cpu::tVPCLMULQDQ);
}
bool BlockOfCode::HasAVX512_BITALG() const {
return DoesCpuSupport(Xbyak::util::Cpu::tAVX512_BITALG);
}
bool BlockOfCode::DoesCpuSupport([[maybe_unused]] Xbyak::util::Cpu::Type type) const {
#ifdef DYNARMIC_ENABLE_CPU_FEATURE_DETECTION
return cpu_info.has(type);
#else
return false;
#endif
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,191 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <functional>
#include <memory>
#include <type_traits>
#include <xbyak.h>
#include <xbyak_util.h>
#include "backend/x64/callback.h"
#include "backend/x64/constant_pool.h"
#include "backend/x64/jitstate_info.h"
#include "common/cast_util.h"
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
using CodePtr = const void*;
struct RunCodeCallbacks {
std::unique_ptr<Callback> LookupBlock;
std::unique_ptr<Callback> AddTicks;
std::unique_ptr<Callback> GetTicksRemaining;
};
class BlockOfCode final : public Xbyak::CodeGenerator {
public:
BlockOfCode(RunCodeCallbacks cb, JitStateInfo jsi, size_t total_code_size, size_t far_code_offset, std::function<void(BlockOfCode&)> rcp);
BlockOfCode(const BlockOfCode&) = delete;
/// Call when external emitters have finished emitting their preludes.
void PreludeComplete();
/// Change permissions to RW. This is required to support systems with W^X enforced.
void EnableWriting();
/// Change permissions to RX. This is required to support systems with W^X enforced.
void DisableWriting();
/// Clears this block of code and resets code pointer to beginning.
void ClearCache();
/// Calculates how much space is remaining to use. This is the minimum of near code and far code.
size_t SpaceRemaining() const;
/// Runs emulated code from code_ptr.
void RunCode(void* jit_state, CodePtr code_ptr) const;
/// Runs emulated code from code_ptr for a single cycle.
void StepCode(void* jit_state, CodePtr code_ptr) const;
/// Code emitter: Returns to dispatcher
void ReturnFromRunCode(bool mxcsr_already_exited = false);
/// Code emitter: Returns to dispatcher, forces return to host
void ForceReturnFromRunCode(bool mxcsr_already_exited = false);
/// Code emitter: Makes guest MXCSR the current MXCSR
void SwitchMxcsrOnEntry();
/// Code emitter: Makes saved host MXCSR the current MXCSR
void SwitchMxcsrOnExit();
/// Code emitter: Enter standard ASIMD MXCSR region
void EnterStandardASIMD();
/// Code emitter: Leave standard ASIMD MXCSR region
void LeaveStandardASIMD();
/// Code emitter: Updates cycles remaining my calling cb.AddTicks and cb.GetTicksRemaining
/// @note this clobbers ABI caller-save registers
void UpdateTicks();
/// Code emitter: Performs a block lookup based on current state
/// @note this clobbers ABI caller-save registers
void LookupBlock();
/// Code emitter: Calls the function
template <typename FunctionPointer>
void CallFunction(FunctionPointer fn) {
static_assert(std::is_pointer_v<FunctionPointer> && std::is_function_v<std::remove_pointer_t<FunctionPointer>>,
"Supplied type must be a pointer to a function");
const u64 address = reinterpret_cast<u64>(fn);
const u64 distance = address - (getCurr<u64>() + 5);
if (distance >= 0x0000000080000000ULL && distance < 0xFFFFFFFF80000000ULL) {
// Far call
mov(rax, address);
call(rax);
} else {
call(fn);
}
}
/// Code emitter: Calls the lambda. Lambda must not have any captures.
template <typename Lambda>
void CallLambda(Lambda l) {
CallFunction(Common::FptrCast(l));
}
Xbyak::Address MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper = 0);
/// Far code sits far away from the near code. Execution remains primarily in near code.
/// "Cold" / Rarely executed instructions sit in far code, so the CPU doesn't fetch them unless necessary.
void SwitchToFarCode();
void SwitchToNearCode();
CodePtr GetCodeBegin() const;
size_t GetTotalCodeSize() const;
const void* GetReturnFromRunCodeAddress() const {
return return_from_run_code[0];
}
const void* GetForceReturnFromRunCodeAddress() const {
return return_from_run_code[FORCE_RETURN];
}
void int3() { db(0xCC); }
/// Allocate memory of `size` bytes from the same block of memory the code is in.
/// This is useful for objects that need to be placed close to or within code.
/// The lifetime of this memory is the same as the code around it.
void* AllocateFromCodeSpace(size_t size);
void SetCodePtr(CodePtr code_ptr);
void EnsurePatchLocationSize(CodePtr begin, size_t size);
// ABI registers
#ifdef _WIN32
static const Xbyak::Reg64 ABI_RETURN;
static const Xbyak::Reg64 ABI_PARAM1;
static const Xbyak::Reg64 ABI_PARAM2;
static const Xbyak::Reg64 ABI_PARAM3;
static const Xbyak::Reg64 ABI_PARAM4;
static const std::array<Xbyak::Reg64, 4> ABI_PARAMS;
#else
static const Xbyak::Reg64 ABI_RETURN;
static const Xbyak::Reg64 ABI_RETURN2;
static const Xbyak::Reg64 ABI_PARAM1;
static const Xbyak::Reg64 ABI_PARAM2;
static const Xbyak::Reg64 ABI_PARAM3;
static const Xbyak::Reg64 ABI_PARAM4;
static const Xbyak::Reg64 ABI_PARAM5;
static const Xbyak::Reg64 ABI_PARAM6;
static const std::array<Xbyak::Reg64, 6> ABI_PARAMS;
#endif
JitStateInfo GetJitStateInfo() const { return jsi; }
bool HasSSSE3() const;
bool HasSSE41() const;
bool HasSSE42() const;
bool HasPCLMULQDQ() const;
bool HasAVX() const;
bool HasF16C() const;
bool HasAESNI() const;
bool HasLZCNT() const;
bool HasBMI1() const;
bool HasBMI2() const;
bool HasFastBMI2() const;
bool HasFMA() const;
bool HasAVX2() const;
bool HasAVX512_Skylake() const;
bool HasAVX512_Icelake() const;
bool HasAVX512_BITALG() const;
private:
RunCodeCallbacks cb;
JitStateInfo jsi;
size_t far_code_offset;
bool prelude_complete = false;
CodePtr near_code_begin = nullptr;
CodePtr far_code_begin = nullptr;
ConstantPool constant_pool;
bool in_far_code = false;
CodePtr near_code_ptr;
CodePtr far_code_ptr;
using RunCodeFuncType = void(*)(void*, CodePtr);
RunCodeFuncType run_code = nullptr;
RunCodeFuncType step_code = nullptr;
static constexpr size_t MXCSR_ALREADY_EXITED = 1 << 0;
static constexpr size_t FORCE_RETURN = 1 << 1;
std::array<const void*, 4> return_from_run_code;
void GenRunCode(std::function<void(BlockOfCode&)> rcp);
Xbyak::util::Cpu cpu_info;
bool DoesCpuSupport(Xbyak::util::Cpu::Type type) const;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,43 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <boost/icl/interval_map.hpp>
#include <boost/icl/interval_set.hpp>
#include <tsl/robin_set.h>
#include "backend/x64/block_range_information.h"
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
template <typename ProgramCounterType>
void BlockRangeInformation<ProgramCounterType>::AddRange(boost::icl::discrete_interval<ProgramCounterType> range, IR::LocationDescriptor location) {
block_ranges.add(std::make_pair(range, std::set<IR::LocationDescriptor>{location}));
}
template <typename ProgramCounterType>
void BlockRangeInformation<ProgramCounterType>::ClearCache() {
block_ranges.clear();
}
template <typename ProgramCounterType>
tsl::robin_set<IR::LocationDescriptor> BlockRangeInformation<ProgramCounterType>::InvalidateRanges(const boost::icl::interval_set<ProgramCounterType>& ranges) {
tsl::robin_set<IR::LocationDescriptor> erase_locations;
for (auto invalidate_interval : ranges) {
auto pair = block_ranges.equal_range(invalidate_interval);
for (auto it = pair.first; it != pair.second; ++it) {
for (const auto &descriptor : it->second) {
erase_locations.insert(descriptor);
}
}
}
// TODO: EFFICIENCY: Remove ranges that are to be erased.
return erase_locations;
}
template class BlockRangeInformation<u32>;
template class BlockRangeInformation<u64>;
} // namespace Dynarmic::Backend::X64

View File

@@ -1,29 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <set>
#include <boost/icl/interval_map.hpp>
#include <boost/icl/interval_set.hpp>
#include <tsl/robin_set.h>
#include "frontend/ir/location_descriptor.h"
namespace Dynarmic::Backend::X64 {
template <typename ProgramCounterType>
class BlockRangeInformation {
public:
void AddRange(boost::icl::discrete_interval<ProgramCounterType> range, IR::LocationDescriptor location);
void ClearCache();
tsl::robin_set<IR::LocationDescriptor> InvalidateRanges(const boost::icl::interval_set<ProgramCounterType>& ranges);
private:
boost::icl::interval_map<ProgramCounterType, std::set<IR::LocationDescriptor>> block_ranges;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,40 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/callback.h"
#include "backend/x64/block_of_code.h"
namespace Dynarmic::Backend::X64 {
Callback::~Callback() = default;
void SimpleCallback::EmitCall(BlockOfCode& code, std::function<void(RegList)> l) const {
l({code.ABI_PARAM1, code.ABI_PARAM2, code.ABI_PARAM3, code.ABI_PARAM4});
code.CallFunction(fn);
}
void SimpleCallback::EmitCallWithReturnPointer(BlockOfCode& code, std::function<void(Xbyak::Reg64, RegList)> l) const {
l(code.ABI_PARAM1, {code.ABI_PARAM2, code.ABI_PARAM3, code.ABI_PARAM4});
code.CallFunction(fn);
}
void ArgCallback::EmitCall(BlockOfCode& code, std::function<void(RegList)> l) const {
l({code.ABI_PARAM2, code.ABI_PARAM3, code.ABI_PARAM4});
code.mov(code.ABI_PARAM1, arg);
code.CallFunction(fn);
}
void ArgCallback::EmitCallWithReturnPointer(BlockOfCode& code, std::function<void(Xbyak::Reg64, RegList)> l) const {
#if defined(WIN32) && !defined(__MINGW64__)
l(code.ABI_PARAM2, {code.ABI_PARAM3, code.ABI_PARAM4});
code.mov(code.ABI_PARAM1, arg);
#else
l(code.ABI_PARAM1, {code.ABI_PARAM3, code.ABI_PARAM4});
code.mov(code.ABI_PARAM2, arg);
#endif
code.CallFunction(fn);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,54 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <functional>
#include <vector>
#include <xbyak.h>
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
using RegList = std::vector<Xbyak::Reg64>;
class BlockOfCode;
class Callback {
public:
virtual ~Callback();
virtual void EmitCall(BlockOfCode& code, std::function<void(RegList)> fn = [](RegList){}) const = 0;
virtual void EmitCallWithReturnPointer(BlockOfCode& code, std::function<void(Xbyak::Reg64, RegList)> fn) const = 0;
};
class SimpleCallback final : public Callback {
public:
template <typename Function>
SimpleCallback(Function fn) : fn(reinterpret_cast<void(*)()>(fn)) {}
void EmitCall(BlockOfCode& code, std::function<void(RegList)> fn = [](RegList){}) const override;
void EmitCallWithReturnPointer(BlockOfCode& code, std::function<void(Xbyak::Reg64, RegList)> fn) const override;
private:
void (*fn)();
};
class ArgCallback final : public Callback {
public:
template <typename Function>
ArgCallback(Function fn, u64 arg) : fn(reinterpret_cast<void(*)()>(fn)), arg(arg) {}
void EmitCall(BlockOfCode& code, std::function<void(RegList)> fn = [](RegList){}) const override;
void EmitCallWithReturnPointer(BlockOfCode& code, std::function<void(Xbyak::Reg64, RegList)> fn) const override;
private:
void (*fn)();
u64 arg;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,34 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <cstring>
#include "backend/x64/block_of_code.h"
#include "backend/x64/constant_pool.h"
#include "common/assert.h"
namespace Dynarmic::Backend::X64 {
ConstantPool::ConstantPool(BlockOfCode& code, size_t size) : code(code), pool_size(size) {
code.int3();
code.align(align_size);
pool_begin = reinterpret_cast<u8*>(code.AllocateFromCodeSpace(size));
current_pool_ptr = pool_begin;
}
Xbyak::Address ConstantPool::GetConstant(const Xbyak::AddressFrame& frame, u64 lower, u64 upper) {
const auto constant = std::make_tuple(lower, upper);
auto iter = constant_info.find(constant);
if (iter == constant_info.end()) {
ASSERT(static_cast<size_t>(current_pool_ptr - pool_begin) < pool_size);
std::memcpy(current_pool_ptr, &lower, sizeof(u64));
std::memcpy(current_pool_ptr + sizeof(u64), &upper, sizeof(u64));
iter = constant_info.emplace(constant, current_pool_ptr).first;
current_pool_ptr += align_size;
}
return frame[code.rip + iter->second];
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,40 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <map>
#include <tuple>
#include <xbyak.h>
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
/// ConstantPool allocates a block of memory from BlockOfCode.
/// It places constants into this block of memory, returning the address
/// of the memory location where the constant is placed. If the constant
/// already exists, its memory location is reused.
class ConstantPool final {
public:
ConstantPool(BlockOfCode& code, size_t size);
Xbyak::Address GetConstant(const Xbyak::AddressFrame& frame, u64 lower, u64 upper = 0);
private:
static constexpr size_t align_size = 16; // bytes
std::map<std::tuple<u64, u64>, void*> constant_info;
BlockOfCode& code;
size_t pool_size;
u8* pool_begin;
u8* current_pool_ptr;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,81 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstring>
#include <utility>
#include <mp/traits/function_info.h>
#include "backend/x64/callback.h"
#include "common/cast_util.h"
#include "common/common_types.h"
namespace Dynarmic {
namespace Backend::X64 {
namespace impl {
template <typename FunctionType, FunctionType mfp>
struct ThunkBuilder;
template <typename C, typename R, typename... Args, R(C::*mfp)(Args...)>
struct ThunkBuilder<R(C::*)(Args...), mfp> {
static R Thunk(C* this_, Args... args) {
return (this_->*mfp)(std::forward<Args>(args)...);
}
};
} // namespace impl
template<auto mfp>
ArgCallback DevirtualizeGeneric(mp::class_type<decltype(mfp)>* this_) {
return ArgCallback{&impl::ThunkBuilder<decltype(mfp), mfp>::Thunk, reinterpret_cast<u64>(this_)};
}
template<auto mfp>
ArgCallback DevirtualizeWindows(mp::class_type<decltype(mfp)>* this_) {
static_assert(sizeof(mfp) == 8);
return ArgCallback{Common::BitCast<u64>(mfp), reinterpret_cast<u64>(this_)};
}
template<auto mfp>
ArgCallback DevirtualizeItanium(mp::class_type<decltype(mfp)>* this_) {
struct MemberFunctionPointer {
/// For a non-virtual function, this is a simple function pointer.
/// For a virtual function, it is (1 + virtual table offset in bytes).
u64 ptr;
/// The required adjustment to `this`, prior to the call.
u64 adj;
} mfp_struct = Common::BitCast<MemberFunctionPointer>(mfp);
static_assert(sizeof(MemberFunctionPointer) == 16);
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
u64 fn_ptr = mfp_struct.ptr;
u64 this_ptr = reinterpret_cast<u64>(this_) + mfp_struct.adj;
if (mfp_struct.ptr & 1) {
u64 vtable = Common::BitCastPointee<u64>(this_ptr);
fn_ptr = Common::BitCastPointee<u64>(vtable + fn_ptr - 1);
}
return ArgCallback{fn_ptr, this_ptr};
}
template<auto mfp>
ArgCallback Devirtualize(mp::class_type<decltype(mfp)>* this_) {
#if defined(__APPLE__) || defined(linux) || defined(__linux) || defined(__linux__)
return DevirtualizeItanium<mfp>(this_);
#elif defined(__MINGW64__)
return DevirtualizeItanium<mfp>(this_);
#elif defined(_WIN32)
return DevirtualizeWindows<mfp>(this_);
#else
return DevirtualizeGeneric<mfp>(this_);
#endif
}
} // namespace Backend::X64
} // namespace Dynarmic

View File

@@ -1,340 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <iterator>
#include <tsl/robin_set.h>
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "backend/x64/nzcv_util.h"
#include "backend/x64/perf_map.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
#include "common/variant_util.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
// TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary.
// TODO: Actually implement that proper instruction selector you've always wanted to sweetheart.
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
EmitContext::EmitContext(RegAlloc& reg_alloc, IR::Block& block)
: reg_alloc(reg_alloc), block(block) {}
size_t EmitContext::GetInstOffset(IR::Inst* inst) const {
return static_cast<size_t>(std::distance(block.begin(), IR::Block::iterator(inst)));
}
void EmitContext::EraseInstruction(IR::Inst* inst) {
block.Instructions().erase(inst);
inst->ClearArgs();
}
EmitX64::EmitX64(BlockOfCode& code) : code(code) {
exception_handler.Register(code);
}
EmitX64::~EmitX64() = default;
std::optional<EmitX64::BlockDescriptor> EmitX64::GetBasicBlock(IR::LocationDescriptor descriptor) const {
const auto iter = block_descriptors.find(descriptor);
if (iter == block_descriptors.end()) {
return std::nullopt;
}
return iter->second;
}
void EmitX64::EmitVoid(EmitContext&, IR::Inst*) {
}
void EmitX64::EmitBreakpoint(EmitContext&, IR::Inst*) {
code.int3();
}
void EmitX64::EmitIdentity(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (!args[0].IsImmediate()) {
ctx.reg_alloc.DefineValue(inst, args[0]);
}
}
void EmitX64::PushRSBHelper(Xbyak::Reg64 loc_desc_reg, Xbyak::Reg64 index_reg, IR::LocationDescriptor target) {
using namespace Xbyak::util;
const auto iter = block_descriptors.find(target);
CodePtr target_code_ptr = iter != block_descriptors.end()
? iter->second.entrypoint
: code.GetReturnFromRunCodeAddress();
code.mov(index_reg.cvt32(), dword[r15 + code.GetJitStateInfo().offsetof_rsb_ptr]);
code.mov(loc_desc_reg, target.Value());
patch_information[target].mov_rcx.emplace_back(code.getCurr());
EmitPatchMovRcx(target_code_ptr);
code.mov(qword[r15 + index_reg * 8 + code.GetJitStateInfo().offsetof_rsb_location_descriptors], loc_desc_reg);
code.mov(qword[r15 + index_reg * 8 + code.GetJitStateInfo().offsetof_rsb_codeptrs], rcx);
code.add(index_reg.cvt32(), 1);
code.and_(index_reg.cvt32(), u32(code.GetJitStateInfo().rsb_ptr_mask));
code.mov(dword[r15 + code.GetJitStateInfo().offsetof_rsb_ptr], index_reg.cvt32());
}
void EmitX64::EmitPushRSB(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[0].IsImmediate());
const u64 unique_hash_of_target = args[0].GetImmediateU64();
ctx.reg_alloc.ScratchGpr(HostLoc::RCX);
const Xbyak::Reg64 loc_desc_reg = ctx.reg_alloc.ScratchGpr();
const Xbyak::Reg64 index_reg = ctx.reg_alloc.ScratchGpr();
PushRSBHelper(loc_desc_reg, index_reg, IR::LocationDescriptor{unique_hash_of_target});
}
void EmitX64::EmitGetCarryFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
}
void EmitX64::EmitGetOverflowFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
}
void EmitX64::EmitGetGEFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
}
void EmitX64::EmitGetUpperFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
}
void EmitX64::EmitGetLowerFromOp(EmitContext&, IR::Inst*) {
ASSERT_MSG(false, "should never happen");
}
void EmitX64::EmitGetNZCVFromOp(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const int bitsize = [&]{
switch (args[0].GetType()) {
case IR::Type::U8:
return 8;
case IR::Type::U16:
return 16;
case IR::Type::U32:
return 32;
case IR::Type::U64:
return 64;
default:
UNREACHABLE();
}
}();
const Xbyak::Reg64 nzcv = ctx.reg_alloc.ScratchGpr(HostLoc::RAX);
const Xbyak::Reg value = ctx.reg_alloc.UseGpr(args[0]).changeBit(bitsize);
code.cmp(value, 0);
code.lahf();
code.seto(code.al);
ctx.reg_alloc.DefineValue(inst, nzcv);
}
void EmitX64::EmitNZCVFromPackedFlags(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (args[0].IsImmediate()) {
const Xbyak::Reg32 nzcv = ctx.reg_alloc.ScratchGpr().cvt32();
u32 value = 0;
value |= Common::Bit<31>(args[0].GetImmediateU32()) ? (1 << 15) : 0;
value |= Common::Bit<30>(args[0].GetImmediateU32()) ? (1 << 14) : 0;
value |= Common::Bit<29>(args[0].GetImmediateU32()) ? (1 << 8) : 0;
value |= Common::Bit<28>(args[0].GetImmediateU32()) ? (1 << 0) : 0;
code.mov(nzcv, value);
ctx.reg_alloc.DefineValue(inst, nzcv);
} else if (code.HasFastBMI2()) {
const Xbyak::Reg32 nzcv = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32();
code.shr(nzcv, 28);
code.mov(tmp, NZCV::x64_mask);
code.pdep(nzcv, nzcv, tmp);
ctx.reg_alloc.DefineValue(inst, nzcv);
} else {
const Xbyak::Reg32 nzcv = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
code.shr(nzcv, 28);
code.imul(nzcv, nzcv, NZCV::to_x64_multiplier);
code.and_(nzcv, NZCV::x64_mask);
ctx.reg_alloc.DefineValue(inst, nzcv);
}
}
void EmitX64::EmitAddCycles(size_t cycles) {
ASSERT(cycles < std::numeric_limits<u32>::max());
code.sub(qword[r15 + code.GetJitStateInfo().offsetof_cycles_remaining], static_cast<u32>(cycles));
}
Xbyak::Label EmitX64::EmitCond(IR::Cond cond) {
Xbyak::Label pass;
code.mov(eax, dword[r15 + code.GetJitStateInfo().offsetof_cpsr_nzcv]);
// sahf restores SF, ZF, CF
// add al, 0x7F restores OF
switch (cond) {
case IR::Cond::EQ: //z
code.sahf();
code.jz(pass);
break;
case IR::Cond::NE: //!z
code.sahf();
code.jnz(pass);
break;
case IR::Cond::CS: //c
code.sahf();
code.jc(pass);
break;
case IR::Cond::CC: //!c
code.sahf();
code.jnc(pass);
break;
case IR::Cond::MI: //n
code.sahf();
code.js(pass);
break;
case IR::Cond::PL: //!n
code.sahf();
code.jns(pass);
break;
case IR::Cond::VS: //v
code.add(al, 0x7F);
code.jo(pass);
break;
case IR::Cond::VC: //!v
code.add(al, 0x7F);
code.jno(pass);
break;
case IR::Cond::HI: //c & !z
code.sahf();
code.cmc();
code.ja(pass);
break;
case IR::Cond::LS: //!c | z
code.sahf();
code.cmc();
code.jna(pass);
break;
case IR::Cond::GE: // n == v
code.add(al, 0x7F);
code.sahf();
code.jge(pass);
break;
case IR::Cond::LT: // n != v
code.add(al, 0x7F);
code.sahf();
code.jl(pass);
break;
case IR::Cond::GT: // !z & (n == v)
code.add(al, 0x7F);
code.sahf();
code.jg(pass);
break;
case IR::Cond::LE: // z | (n != v)
code.add(al, 0x7F);
code.sahf();
code.jle(pass);
break;
default:
ASSERT_MSG(false, "Unknown cond {}", static_cast<size_t>(cond));
break;
}
return pass;
}
EmitX64::BlockDescriptor EmitX64::RegisterBlock(const IR::LocationDescriptor& descriptor, CodePtr entrypoint, CodePtr entrypoint_far, size_t size) {
PerfMapRegister(entrypoint, code.getCurr(), LocationDescriptorToFriendlyName(descriptor));
code.SwitchToFarCode();
PerfMapRegister(entrypoint_far, code.getCurr(), LocationDescriptorToFriendlyName(descriptor) + "_far");
code.SwitchToNearCode();
Patch(descriptor, entrypoint);
BlockDescriptor block_desc{entrypoint, size};
block_descriptors.emplace(descriptor.Value(), block_desc);
return block_desc;
}
void EmitX64::EmitTerminal(IR::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step) {
Common::VisitVariant<void>(terminal, [this, initial_location, is_single_step](auto x) {
using T = std::decay_t<decltype(x)>;
if constexpr (!std::is_same_v<T, IR::Term::Invalid>) {
this->EmitTerminalImpl(x, initial_location, is_single_step);
} else {
ASSERT_MSG(false, "Invalid terminal");
}
});
}
void EmitX64::Patch(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr) {
const CodePtr save_code_ptr = code.getCurr();
const PatchInformation& patch_info = patch_information[target_desc];
for (CodePtr location : patch_info.jg) {
code.SetCodePtr(location);
EmitPatchJg(target_desc, target_code_ptr);
}
for (CodePtr location : patch_info.jmp) {
code.SetCodePtr(location);
EmitPatchJmp(target_desc, target_code_ptr);
}
for (CodePtr location : patch_info.mov_rcx) {
code.SetCodePtr(location);
EmitPatchMovRcx(target_code_ptr);
}
code.SetCodePtr(save_code_ptr);
}
void EmitX64::Unpatch(const IR::LocationDescriptor& target_desc) {
Patch(target_desc, nullptr);
}
void EmitX64::ClearCache() {
block_descriptors.clear();
patch_information.clear();
PerfMapClear();
}
void EmitX64::InvalidateBasicBlocks(const tsl::robin_set<IR::LocationDescriptor>& locations) {
code.EnableWriting();
SCOPE_EXIT { code.DisableWriting(); };
for (const auto &descriptor : locations) {
const auto it = block_descriptors.find(descriptor);
if (it == block_descriptors.end()) {
continue;
}
if (patch_information.count(descriptor)) {
Unpatch(descriptor);
}
block_descriptors.erase(it);
}
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,131 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <optional>
#include <string>
#include <type_traits>
#include <vector>
#include <tsl/robin_map.h>
#include <tsl/robin_set.h>
#include <xbyak_util.h>
#include "backend/x64/exception_handler.h"
#include "backend/x64/reg_alloc.h"
#include "common/bit_util.h"
#include "common/fp/fpcr.h"
#include "frontend/ir/location_descriptor.h"
#include "frontend/ir/terminal.h"
namespace Dynarmic::IR {
class Block;
class Inst;
} // namespace Dynarmic::IR
namespace Dynarmic {
enum class OptimizationFlag : u32;
} // namespace Dynarmic
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
using A64FullVectorWidth = std::integral_constant<size_t, 128>;
// Array alias that always sizes itself according to the given type T
// relative to the size of a vector register. e.g. T = u32 would result
// in a std::array<u32, 4>.
template <typename T>
using VectorArray = std::array<T, A64FullVectorWidth::value / Common::BitSize<T>()>;
template <typename T>
using HalfVectorArray = std::array<T, A64FullVectorWidth::value / Common::BitSize<T>() / 2>;
struct EmitContext {
EmitContext(RegAlloc& reg_alloc, IR::Block& block);
size_t GetInstOffset(IR::Inst* inst) const;
void EraseInstruction(IR::Inst* inst);
virtual FP::FPCR FPCR(bool fpcr_controlled = true) const = 0;
virtual bool HasOptimization(OptimizationFlag flag) const = 0;
RegAlloc& reg_alloc;
IR::Block& block;
};
class EmitX64 {
public:
struct BlockDescriptor {
CodePtr entrypoint; // Entrypoint of emitted code
size_t size; // Length in bytes of emitted code
};
explicit EmitX64(BlockOfCode& code);
virtual ~EmitX64();
/// Looks up an emitted host block in the cache.
std::optional<BlockDescriptor> GetBasicBlock(IR::LocationDescriptor descriptor) const;
/// Empties the entire cache.
virtual void ClearCache();
/// Invalidates a selection of basic blocks.
void InvalidateBasicBlocks(const tsl::robin_set<IR::LocationDescriptor>& locations);
protected:
// Microinstruction emitters
#define OPCODE(name, type, ...) void Emit##name(EmitContext& ctx, IR::Inst* inst);
#define A32OPC(...)
#define A64OPC(...)
#include "frontend/ir/opcodes.inc"
#undef OPCODE
#undef A32OPC
#undef A64OPC
// Helpers
virtual std::string LocationDescriptorToFriendlyName(const IR::LocationDescriptor&) const = 0;
void EmitAddCycles(size_t cycles);
Xbyak::Label EmitCond(IR::Cond cond);
BlockDescriptor RegisterBlock(const IR::LocationDescriptor& location_descriptor, CodePtr entrypoint, CodePtr entrypoint_far, size_t size);
void PushRSBHelper(Xbyak::Reg64 loc_desc_reg, Xbyak::Reg64 index_reg, IR::LocationDescriptor target);
// Terminal instruction emitters
void EmitTerminal(IR::Terminal terminal, IR::LocationDescriptor initial_location, bool is_single_step);
virtual void EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::ReturnToDispatch terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::PopRSBHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::FastDispatchHint terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::CheckBit terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
virtual void EmitTerminalImpl(IR::Term::CheckHalt terminal, IR::LocationDescriptor initial_location, bool is_single_step) = 0;
// Patching
struct PatchInformation {
std::vector<CodePtr> jg;
std::vector<CodePtr> jmp;
std::vector<CodePtr> mov_rcx;
};
void Patch(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr);
virtual void Unpatch(const IR::LocationDescriptor& target_desc);
virtual void EmitPatchJg(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) = 0;
virtual void EmitPatchJmp(const IR::LocationDescriptor& target_desc, CodePtr target_code_ptr = nullptr) = 0;
virtual void EmitPatchMovRcx(CodePtr target_code_ptr = nullptr) = 0;
// State
BlockOfCode& code;
ExceptionHandler exception_handler;
tsl::robin_map<IR::LocationDescriptor, BlockDescriptor> block_descriptors;
tsl::robin_map<IR::LocationDescriptor, PatchInformation> patch_information;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,107 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/abi.h"
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "common/common_types.h"
#include "common/crypto/aes.h"
#include "frontend/ir/microinstruction.h"
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
namespace AES = Common::Crypto::AES;
using AESFn = void(AES::State&, const AES::State&);
static void EmitAESFunction(RegAlloc::ArgumentInfo args, EmitContext& ctx, BlockOfCode& code, IR::Inst* inst, AESFn fn) {
constexpr u32 stack_space = static_cast<u32>(sizeof(AES::State)) * 2;
const Xbyak::Xmm input = ctx.reg_alloc.UseXmm(args[0]);
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
ctx.reg_alloc.EndOfAllocScope();
ctx.reg_alloc.HostCall(nullptr);
code.sub(rsp, stack_space + ABI_SHADOW_SPACE);
code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE]);
code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + sizeof(AES::State)]);
code.movaps(xword[code.ABI_PARAM2], input);
code.CallFunction(fn);
code.movaps(result, xword[rsp + ABI_SHADOW_SPACE]);
code.add(rsp, stack_space + ABI_SHADOW_SPACE);
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitAESDecryptSingleRound(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasAESNI()) {
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm zero = ctx.reg_alloc.ScratchXmm();
code.pxor(zero, zero);
code.aesdeclast(data, zero);
ctx.reg_alloc.DefineValue(inst, data);
return;
}
EmitAESFunction(args, ctx, code, inst, AES::DecryptSingleRound);
}
void EmitX64::EmitAESEncryptSingleRound(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasAESNI()) {
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm zero = ctx.reg_alloc.ScratchXmm();
code.pxor(zero, zero);
code.aesenclast(data, zero);
ctx.reg_alloc.DefineValue(inst, data);
return;
}
EmitAESFunction(args, ctx, code, inst, AES::EncryptSingleRound);
}
void EmitX64::EmitAESInverseMixColumns(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasAESNI()) {
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
code.aesimc(data, data);
ctx.reg_alloc.DefineValue(inst, data);
return;
}
EmitAESFunction(args, ctx, code, inst, AES::InverseMixColumns);
}
void EmitX64::EmitAESMixColumns(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasAESNI()) {
const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm zero = ctx.reg_alloc.ScratchXmm();
code.pxor(zero, zero);
code.aesdeclast(data, zero);
code.aesenc(data, zero);
ctx.reg_alloc.DefineValue(inst, data);
return;
}
EmitAESFunction(args, ctx, code, inst, AES::MixColumns);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,148 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <array>
#include <climits>
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "common/crypto/crc32.h"
#include "frontend/ir/microinstruction.h"
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
namespace CRC32 = Common::Crypto::CRC32;
static void EmitCRC32Castagnoli(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, const int data_size) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasSSE42()) {
const Xbyak::Reg32 crc = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg value = ctx.reg_alloc.UseGpr(args[1]).changeBit(data_size);
code.crc32(crc, value);
ctx.reg_alloc.DefineValue(inst, crc);
return;
}
ctx.reg_alloc.HostCall(inst, args[0], args[1], {});
code.mov(code.ABI_PARAM3, data_size / CHAR_BIT);
code.CallFunction(&CRC32::ComputeCRC32Castagnoli);
}
static void EmitCRC32ISO(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, const int data_size) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.HasPCLMULQDQ() && data_size < 32) {
const Xbyak::Reg32 crc = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg64 value = ctx.reg_alloc.UseScratchGpr(args[1]);
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_tmp = ctx.reg_alloc.ScratchXmm();
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
code.movzx(value.cvt32(), value.changeBit(data_size));
code.xor_(value.cvt32(), crc);
code.movd(xmm_tmp, value.cvt32());
code.pslldq(xmm_tmp, (64 - data_size) / 8);
if (code.HasAVX()) {
code.vpclmulqdq(xmm_value, xmm_tmp, xmm_const, 0x00);
code.pclmulqdq(xmm_value, xmm_const, 0x10);
code.pxor(xmm_value, xmm_tmp);
} else {
code.movdqa(xmm_value, xmm_tmp);
code.pclmulqdq(xmm_value, xmm_const, 0x00);
code.pclmulqdq(xmm_value, xmm_const, 0x10);
code.pxor(xmm_value, xmm_tmp);
}
code.pextrd(crc, xmm_value, 2);
ctx.reg_alloc.DefineValue(inst, crc);
return;
}
if (code.HasPCLMULQDQ() && data_size == 32) {
const Xbyak::Reg32 crc = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 value = ctx.reg_alloc.UseGpr(args[1]).cvt32();
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
code.xor_(crc, value);
code.shl(crc.cvt64(), 32);
code.movq(xmm_value, crc.cvt64());
code.pclmulqdq(xmm_value, xmm_const, 0x00);
code.pclmulqdq(xmm_value, xmm_const, 0x10);
code.pextrd(crc, xmm_value, 2);
ctx.reg_alloc.DefineValue(inst, crc);
return;
}
if (code.HasPCLMULQDQ() && data_size == 64) {
const Xbyak::Reg32 crc = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg64 value = ctx.reg_alloc.UseGpr(args[1]);
const Xbyak::Xmm xmm_value = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_const = ctx.reg_alloc.ScratchXmm();
code.movdqa(xmm_const, code.MConst(xword, 0xb4e5b025'f7011641, 0x00000001'DB710641));
code.mov(crc, crc);
code.xor_(crc.cvt64(), value);
code.movq(xmm_value, crc.cvt64());
code.pclmulqdq(xmm_value, xmm_const, 0x00);
code.pclmulqdq(xmm_value, xmm_const, 0x10);
code.pextrd(crc, xmm_value, 2);
ctx.reg_alloc.DefineValue(inst, crc);
return;
}
ctx.reg_alloc.HostCall(inst, args[0], args[1], {});
code.mov(code.ABI_PARAM3, data_size / CHAR_BIT);
code.CallFunction(&CRC32::ComputeCRC32ISO);
}
void EmitX64::EmitCRC32Castagnoli8(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32Castagnoli(code, ctx, inst, 8);
}
void EmitX64::EmitCRC32Castagnoli16(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32Castagnoli(code, ctx, inst, 16);
}
void EmitX64::EmitCRC32Castagnoli32(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32Castagnoli(code, ctx, inst, 32);
}
void EmitX64::EmitCRC32Castagnoli64(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32Castagnoli(code, ctx, inst, 64);
}
void EmitX64::EmitCRC32ISO8(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32ISO(code, ctx, inst, 8);
}
void EmitX64::EmitCRC32ISO16(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32ISO(code, ctx, inst, 16);
}
void EmitX64::EmitCRC32ISO32(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32ISO(code, ctx, inst, 32);
}
void EmitX64::EmitCRC32ISO64(EmitContext& ctx, IR::Inst* inst) {
EmitCRC32ISO(code, ctx, inst, 64);
}
} // namespace Dynarmic::Backend::X64

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,712 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
code.paddb(xmm_a, xmm_b);
if (ge_inst) {
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm ones = ctx.reg_alloc.ScratchXmm();
code.pcmpeqb(ones, ones);
code.movdqa(xmm_ge, xmm_a);
code.pminub(xmm_ge, xmm_b);
code.pcmpeqb(xmm_ge, xmm_b);
code.pxor(xmm_ge, ones);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
if (ge_inst) {
const Xbyak::Xmm saturated_sum = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.pxor(xmm_ge, xmm_ge);
code.movdqa(saturated_sum, xmm_a);
code.paddsb(saturated_sum, xmm_b);
code.pcmpgtb(xmm_ge, saturated_sum);
code.pcmpeqb(saturated_sum, saturated_sum);
code.pxor(xmm_ge, saturated_sum);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
code.paddb(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
code.paddw(xmm_a, xmm_b);
if (ge_inst) {
if (code.HasSSE41()) {
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm ones = ctx.reg_alloc.ScratchXmm();
code.pcmpeqb(ones, ones);
code.movdqa(xmm_ge, xmm_a);
code.pminuw(xmm_ge, xmm_b);
code.pcmpeqw(xmm_ge, xmm_b);
code.pxor(xmm_ge, ones);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
} else {
const Xbyak::Xmm tmp_a = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp_b = ctx.reg_alloc.ScratchXmm();
// !(b <= a+b) == b > a+b
code.movdqa(tmp_a, xmm_a);
code.movdqa(tmp_b, xmm_b);
code.paddw(tmp_a, code.MConst(xword, 0x80008000));
code.paddw(tmp_b, code.MConst(xword, 0x80008000));
code.pcmpgtw(tmp_b, tmp_a); // *Signed* comparison!
ctx.reg_alloc.DefineValue(ge_inst, tmp_b);
ctx.EraseInstruction(ge_inst);
}
}
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
if (ge_inst) {
const Xbyak::Xmm saturated_sum = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.pxor(xmm_ge, xmm_ge);
code.movdqa(saturated_sum, xmm_a);
code.paddsw(saturated_sum, xmm_b);
code.pcmpgtw(xmm_ge, saturated_sum);
code.pcmpeqw(saturated_sum, saturated_sum);
code.pxor(xmm_ge, saturated_sum);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
code.paddw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
if (ge_inst) {
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.movdqa(xmm_ge, xmm_a);
code.pmaxub(xmm_ge, xmm_b);
code.pcmpeqb(xmm_ge, xmm_a);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
code.psubb(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
if (ge_inst) {
const Xbyak::Xmm saturated_sum = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.pxor(xmm_ge, xmm_ge);
code.movdqa(saturated_sum, xmm_a);
code.psubsb(saturated_sum, xmm_b);
code.pcmpgtb(xmm_ge, saturated_sum);
code.pcmpeqb(saturated_sum, saturated_sum);
code.pxor(xmm_ge, saturated_sum);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
code.psubb(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
if (!ge_inst) {
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
code.psubw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
return;
}
if (code.HasSSE41()) {
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.movdqa(xmm_ge, xmm_a);
code.pmaxuw(xmm_ge, xmm_b); // Requires SSE 4.1
code.pcmpeqw(xmm_ge, xmm_a);
code.psubw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
ctx.reg_alloc.DefineValue(inst, xmm_a);
return;
}
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm ones = ctx.reg_alloc.ScratchXmm();
// (a >= b) == !(b > a)
code.pcmpeqb(ones, ones);
code.paddw(xmm_a, code.MConst(xword, 0x80008000));
code.paddw(xmm_b, code.MConst(xword, 0x80008000));
code.movdqa(xmm_ge, xmm_b);
code.pcmpgtw(xmm_ge, xmm_a); // *Signed* comparison!
code.pxor(xmm_ge, ones);
code.psubw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSubS16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
if (ge_inst) {
const Xbyak::Xmm saturated_diff = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm xmm_ge = ctx.reg_alloc.ScratchXmm();
code.pxor(xmm_ge, xmm_ge);
code.movdqa(saturated_diff, xmm_a);
code.psubsw(saturated_diff, xmm_b);
code.pcmpgtw(xmm_ge, saturated_diff);
code.pcmpeqw(saturated_diff, saturated_diff);
code.pxor(xmm_ge, saturated_diff);
ctx.reg_alloc.DefineValue(ge_inst, xmm_ge);
ctx.EraseInstruction(ge_inst);
}
code.psubw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedHalvingAddU8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (args[0].IsInXmm() || args[1].IsInXmm()) {
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm ones = ctx.reg_alloc.ScratchXmm();
// Since,
// pavg(a, b) == (a + b + 1) >> 1
// Therefore,
// ~pavg(~a, ~b) == (a + b) >> 1
code.pcmpeqb(ones, ones);
code.pxor(xmm_a, ones);
code.pxor(xmm_b, ones);
code.pavgb(xmm_a, xmm_b);
code.pxor(xmm_a, ones);
ctx.reg_alloc.DefineValue(inst, xmm_a);
} else {
const Xbyak::Reg32 reg_a = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 reg_b = ctx.reg_alloc.UseGpr(args[1]).cvt32();
const Xbyak::Reg32 xor_a_b = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 and_a_b = reg_a;
const Xbyak::Reg32 result = reg_a;
// This relies on the equality x+y == ((x&y) << 1) + (x^y).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x+y)/2, we can instead calculate (x&y) + ((x^y)>>1).
// We mask by 0x7F to remove the LSB so that it doesn't leak into the field below.
code.mov(xor_a_b, reg_a);
code.and_(and_a_b, reg_b);
code.xor_(xor_a_b, reg_b);
code.shr(xor_a_b, 1);
code.and_(xor_a_b, 0x7F7F7F7F);
code.add(result, xor_a_b);
ctx.reg_alloc.DefineValue(inst, result);
}
}
void EmitX64::EmitPackedHalvingAddU16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (args[0].IsInXmm() || args[1].IsInXmm()) {
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
code.movdqa(tmp, xmm_a);
code.pand(xmm_a, xmm_b);
code.pxor(tmp, xmm_b);
code.psrlw(tmp, 1);
code.paddw(xmm_a, tmp);
ctx.reg_alloc.DefineValue(inst, xmm_a);
} else {
const Xbyak::Reg32 reg_a = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 reg_b = ctx.reg_alloc.UseGpr(args[1]).cvt32();
const Xbyak::Reg32 xor_a_b = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 and_a_b = reg_a;
const Xbyak::Reg32 result = reg_a;
// This relies on the equality x+y == ((x&y) << 1) + (x^y).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x+y)/2, we can instead calculate (x&y) + ((x^y)>>1).
// We mask by 0x7FFF to remove the LSB so that it doesn't leak into the field below.
code.mov(xor_a_b, reg_a);
code.and_(and_a_b, reg_b);
code.xor_(xor_a_b, reg_b);
code.shr(xor_a_b, 1);
code.and_(xor_a_b, 0x7FFF7FFF);
code.add(result, xor_a_b);
ctx.reg_alloc.DefineValue(inst, result);
}
}
void EmitX64::EmitPackedHalvingAddS8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg32 reg_a = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 reg_b = ctx.reg_alloc.UseGpr(args[1]).cvt32();
const Xbyak::Reg32 xor_a_b = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 and_a_b = reg_a;
const Xbyak::Reg32 result = reg_a;
const Xbyak::Reg32 carry = ctx.reg_alloc.ScratchGpr().cvt32();
// This relies on the equality x+y == ((x&y) << 1) + (x^y).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x+y)/2, we can instead calculate (x&y) + ((x^y)>>1).
// We mask by 0x7F to remove the LSB so that it doesn't leak into the field below.
// carry propagates the sign bit from (x^y)>>1 upwards by one.
code.mov(xor_a_b, reg_a);
code.and_(and_a_b, reg_b);
code.xor_(xor_a_b, reg_b);
code.mov(carry, xor_a_b);
code.and_(carry, 0x80808080);
code.shr(xor_a_b, 1);
code.and_(xor_a_b, 0x7F7F7F7F);
code.add(result, xor_a_b);
code.xor_(result, carry);
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitPackedHalvingAddS16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
// This relies on the equality x+y == ((x&y) << 1) + (x^y).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x+y)/2, we can instead calculate (x&y) + ((x^y)>>>1).
// The arithmetic shift right makes this signed.
code.movdqa(tmp, xmm_a);
code.pand(xmm_a, xmm_b);
code.pxor(tmp, xmm_b);
code.psraw(tmp, 1);
code.paddw(xmm_a, tmp);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedHalvingSubU8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg32 minuend = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 subtrahend = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
// This relies on the equality x-y == (x^y) - (((x^y)&y) << 1).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x+y)/2, we can instead calculate ((x^y)>>1) - ((x^y)&y).
code.xor_(minuend, subtrahend);
code.and_(subtrahend, minuend);
code.shr(minuend, 1);
// At this point,
// minuend := (a^b) >> 1
// subtrahend := (a^b) & b
// We must now perform a partitioned subtraction.
// We can do this because minuend contains 7 bit fields.
// We use the extra bit in minuend as a bit to borrow from; we set this bit.
// We invert this bit at the end as this tells us if that bit was borrowed from.
code.or_(minuend, 0x80808080);
code.sub(minuend, subtrahend);
code.xor_(minuend, 0x80808080);
// minuend now contains the desired result.
ctx.reg_alloc.DefineValue(inst, minuend);
}
void EmitX64::EmitPackedHalvingSubS8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg32 minuend = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 subtrahend = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
const Xbyak::Reg32 carry = ctx.reg_alloc.ScratchGpr().cvt32();
// This relies on the equality x-y == (x^y) - (((x^y)&y) << 1).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x-y)/2, we can instead calculate ((x^y)>>1) - ((x^y)&y).
code.xor_(minuend, subtrahend);
code.and_(subtrahend, minuend);
code.mov(carry, minuend);
code.and_(carry, 0x80808080);
code.shr(minuend, 1);
// At this point,
// minuend := (a^b) >> 1
// subtrahend := (a^b) & b
// carry := (a^b) & 0x80808080
// We must now perform a partitioned subtraction.
// We can do this because minuend contains 7 bit fields.
// We use the extra bit in minuend as a bit to borrow from; we set this bit.
// We invert this bit at the end as this tells us if that bit was borrowed from.
// We then sign extend the result into this bit.
code.or_(minuend, 0x80808080);
code.sub(minuend, subtrahend);
code.xor_(minuend, 0x80808080);
code.xor_(minuend, carry);
ctx.reg_alloc.DefineValue(inst, minuend);
}
void EmitX64::EmitPackedHalvingSubU16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm minuend = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseScratchXmm(args[1]);
// This relies on the equality x-y == (x^y) - (((x^y)&y) << 1).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x-y)/2, we can instead calculate ((x^y)>>1) - ((x^y)&y).
code.pxor(minuend, subtrahend);
code.pand(subtrahend, minuend);
code.psrlw(minuend, 1);
// At this point,
// minuend := (a^b) >> 1
// subtrahend := (a^b) & b
code.psubw(minuend, subtrahend);
ctx.reg_alloc.DefineValue(inst, minuend);
}
void EmitX64::EmitPackedHalvingSubS16(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm minuend = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseScratchXmm(args[1]);
// This relies on the equality x-y == (x^y) - (((x^y)&y) << 1).
// Note that x^y always contains the LSB of the result.
// Since we want to calculate (x-y)/2, we can instead calculate ((x^y)>>>1) - ((x^y)&y).
code.pxor(minuend, subtrahend);
code.pand(subtrahend, minuend);
code.psraw(minuend, 1);
// At this point,
// minuend := (a^b) >>> 1
// subtrahend := (a^b) & b
code.psubw(minuend, subtrahend);
ctx.reg_alloc.DefineValue(inst, minuend);
}
static void EmitPackedSubAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, bool hi_is_sum, bool is_signed, bool is_halving) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
const Xbyak::Reg32 reg_a_hi = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 reg_b_hi = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
const Xbyak::Reg32 reg_a_lo = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 reg_b_lo = ctx.reg_alloc.ScratchGpr().cvt32();
Xbyak::Reg32 reg_sum, reg_diff;
if (is_signed) {
code.movsx(reg_a_lo, reg_a_hi.cvt16());
code.movsx(reg_b_lo, reg_b_hi.cvt16());
code.sar(reg_a_hi, 16);
code.sar(reg_b_hi, 16);
} else {
code.movzx(reg_a_lo, reg_a_hi.cvt16());
code.movzx(reg_b_lo, reg_b_hi.cvt16());
code.shr(reg_a_hi, 16);
code.shr(reg_b_hi, 16);
}
if (hi_is_sum) {
code.sub(reg_a_lo, reg_b_hi);
code.add(reg_a_hi, reg_b_lo);
reg_diff = reg_a_lo;
reg_sum = reg_a_hi;
} else {
code.add(reg_a_lo, reg_b_hi);
code.sub(reg_a_hi, reg_b_lo);
reg_diff = reg_a_hi;
reg_sum = reg_a_lo;
}
if (ge_inst) {
// The reg_b registers are no longer required.
const Xbyak::Reg32 ge_sum = reg_b_hi;
const Xbyak::Reg32 ge_diff = reg_b_lo;
code.mov(ge_sum, reg_sum);
code.mov(ge_diff, reg_diff);
if (!is_signed) {
code.shl(ge_sum, 15);
code.sar(ge_sum, 31);
} else {
code.not_(ge_sum);
code.sar(ge_sum, 31);
}
code.not_(ge_diff);
code.sar(ge_diff, 31);
code.and_(ge_sum, hi_is_sum ? 0xFFFF0000 : 0x0000FFFF);
code.and_(ge_diff, hi_is_sum ? 0x0000FFFF : 0xFFFF0000);
code.or_(ge_sum, ge_diff);
ctx.reg_alloc.DefineValue(ge_inst, ge_sum);
ctx.EraseInstruction(ge_inst);
}
if (is_halving) {
code.shl(reg_a_lo, 15);
code.shr(reg_a_hi, 1);
} else {
code.shl(reg_a_lo, 16);
}
// reg_a_lo now contains the low word and reg_a_hi now contains the high word.
// Merge them.
code.shld(reg_a_hi, reg_a_lo, 16);
ctx.reg_alloc.DefineValue(inst, reg_a_hi);
}
void EmitX64::EmitPackedAddSubU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, true, false, false);
}
void EmitX64::EmitPackedAddSubS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, true, true, false);
}
void EmitX64::EmitPackedSubAddU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, false, false, false);
}
void EmitX64::EmitPackedSubAddS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, false, true, false);
}
void EmitX64::EmitPackedHalvingAddSubU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, true, false, true);
}
void EmitX64::EmitPackedHalvingAddSubS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, true, true, true);
}
void EmitX64::EmitPackedHalvingSubAddU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, false, false, true);
}
void EmitX64::EmitPackedHalvingSubAddS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedSubAdd(code, ctx, inst, false, true, true);
}
static void EmitPackedOperation(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, void (Xbyak::CodeGenerator::*fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
(code.*fn)(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSaturatedAddU8(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::paddusb);
}
void EmitX64::EmitPackedSaturatedAddS8(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::paddsb);
}
void EmitX64::EmitPackedSaturatedSubU8(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::psubusb);
}
void EmitX64::EmitPackedSaturatedSubS8(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::psubsb);
}
void EmitX64::EmitPackedSaturatedAddU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::paddusw);
}
void EmitX64::EmitPackedSaturatedAddS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::paddsw);
}
void EmitX64::EmitPackedSaturatedSubU16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::psubusw);
}
void EmitX64::EmitPackedSaturatedSubS16(EmitContext& ctx, IR::Inst* inst) {
EmitPackedOperation(code, ctx, inst, &Xbyak::CodeGenerator::psubsw);
}
void EmitX64::EmitPackedAbsDiffSumS8(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseScratchXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
// TODO: Optimize with zero-extension detection
code.movaps(tmp, code.MConst(xword, 0xFFFFFFFF));
code.pand(xmm_a, tmp);
code.pand(xmm_b, tmp);
code.psadbw(xmm_a, xmm_b);
ctx.reg_alloc.DefineValue(inst, xmm_a);
}
void EmitX64::EmitPackedSelect(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t num_args_in_xmm = args[0].IsInXmm() + args[1].IsInXmm() + args[2].IsInXmm();
if (num_args_in_xmm >= 2) {
const Xbyak::Xmm ge = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm to = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm from = ctx.reg_alloc.UseScratchXmm(args[2]);
code.pand(from, ge);
code.pandn(ge, to);
code.por(from, ge);
ctx.reg_alloc.DefineValue(inst, from);
} else if (code.HasBMI1()) {
const Xbyak::Reg32 ge = ctx.reg_alloc.UseGpr(args[0]).cvt32();
const Xbyak::Reg32 to = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
const Xbyak::Reg32 from = ctx.reg_alloc.UseScratchGpr(args[2]).cvt32();
code.and_(from, ge);
code.andn(to, ge, to);
code.or_(from, to);
ctx.reg_alloc.DefineValue(inst, from);
} else {
const Xbyak::Reg32 ge = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 to = ctx.reg_alloc.UseGpr(args[1]).cvt32();
const Xbyak::Reg32 from = ctx.reg_alloc.UseScratchGpr(args[2]).cvt32();
code.and_(from, ge);
code.not_(ge);
code.and_(ge, to);
code.or_(from, ge);
ctx.reg_alloc.DefineValue(inst, from);
}
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,315 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <limits>
#include <mp/traits/integer_of_size.h>
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
namespace {
enum class Op {
Add,
Sub,
};
template<Op op, size_t size>
void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(size);
Xbyak::Reg addend = ctx.reg_alloc.UseGpr(args[1]).changeBit(size);
Xbyak::Reg overflow = ctx.reg_alloc.ScratchGpr().changeBit(size);
constexpr u64 int_max = static_cast<u64>(std::numeric_limits<mp::signed_integer_of_size<size>>::max());
if constexpr (size < 64) {
code.xor_(overflow.cvt32(), overflow.cvt32());
code.bt(result.cvt32(), size - 1);
code.adc(overflow.cvt32(), int_max);
} else {
code.mov(overflow, int_max);
code.bt(result, 63);
code.adc(overflow, 0);
}
// overflow now contains 0x7F... if a was positive, or 0x80... if a was negative
if constexpr (op == Op::Add) {
code.add(result, addend);
} else {
code.sub(result, addend);
}
if constexpr (size == 8) {
code.cmovo(result.cvt32(), overflow.cvt32());
} else {
code.cmovo(result, overflow);
}
if (overflow_inst) {
code.seto(overflow.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, result);
}
template<Op op, size_t size>
void EmitUnsignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Reg op_result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(size);
Xbyak::Reg addend = ctx.reg_alloc.UseScratchGpr(args[1]).changeBit(size);
constexpr u64 boundary = op == Op::Add ? std::numeric_limits<mp::unsigned_integer_of_size<size>>::max() : 0;
if constexpr (op == Op::Add) {
code.add(op_result, addend);
} else {
code.sub(op_result, addend);
}
code.mov(addend, boundary);
if constexpr (size == 8) {
code.cmovae(addend.cvt32(), op_result.cvt32());
} else {
code.cmovae(addend, op_result);
}
if (overflow_inst) {
const Xbyak::Reg overflow = ctx.reg_alloc.ScratchGpr();
code.setb(overflow.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, addend);
}
} // anonymous namespace
void EmitX64::EmitSignedSaturatedAdd8(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Add, 8>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Add, 16>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Add, 32>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Add, 64>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg32 x = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
const Xbyak::Reg32 y = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32();
code.movsx(x, x.cvt16());
code.movsx(y, y.cvt16());
code.imul(x, y);
code.lea(y, ptr[x.cvt64() + x.cvt64()]);
code.mov(tmp, x);
code.shr(tmp, 15);
code.xor_(y, x);
code.mov(y, 0x7FFF);
code.cmovns(y, tmp);
if (overflow_inst) {
code.sets(tmp.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, tmp);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, y);
}
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh32(EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Reg64 x = ctx.reg_alloc.UseScratchGpr(args[0]);
const Xbyak::Reg64 y = ctx.reg_alloc.UseScratchGpr(args[1]);
const Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
code.movsxd(x, x.cvt32());
code.movsxd(y, y.cvt32());
code.imul(x, y);
code.lea(y, ptr[x + x]);
code.mov(tmp, x);
code.shr(tmp, 31);
code.xor_(y, x);
code.mov(y.cvt32(), 0x7FFFFFFF);
code.cmovns(y.cvt32(), tmp.cvt32());
if (overflow_inst) {
code.sets(tmp.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, tmp);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, y);
}
void EmitX64::EmitSignedSaturatedSub8(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Sub, 8>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Sub, 16>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Sub, 32>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
EmitSignedSaturatedOp<Op::Sub, 64>(code, ctx, inst);
}
void EmitX64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t N = args[1].GetImmediateU8();
ASSERT(N >= 1 && N <= 32);
if (N == 32) {
if (overflow_inst) {
const auto no_overflow = IR::Value(false);
overflow_inst->ReplaceUsesWith(no_overflow);
}
// TODO: DefineValue directly on Argument
const Xbyak::Reg64 result = ctx.reg_alloc.ScratchGpr();
const Xbyak::Reg64 source = ctx.reg_alloc.UseGpr(args[0]);
code.mov(result.cvt32(), source.cvt32());
ctx.reg_alloc.DefineValue(inst, result);
return;
}
const u32 mask = (1u << N) - 1;
const u32 positive_saturated_value = (1u << (N - 1)) - 1;
const u32 negative_saturated_value = 1u << (N - 1);
const Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 reg_a = ctx.reg_alloc.UseGpr(args[0]).cvt32();
const Xbyak::Reg32 overflow = ctx.reg_alloc.ScratchGpr().cvt32();
// overflow now contains a value between 0 and mask if it was originally between {negative,positive}_saturated_value.
code.lea(overflow, code.ptr[reg_a.cvt64() + negative_saturated_value]);
// Put the appropriate saturated value in result
code.mov(result, reg_a);
code.sar(result, 31);
code.xor_(result, positive_saturated_value);
// Do the saturation
code.cmp(overflow, mask);
code.cmovbe(result, reg_a);
if (overflow_inst) {
code.seta(overflow.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitUnsignedSaturatedAdd8(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Add, 8>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Add, 16>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Add, 32>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Add, 64>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedSub8(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Sub, 8>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Sub, 16>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Sub, 32>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
EmitUnsignedSaturatedOp<Op::Sub, 64>(code, ctx, inst);
}
void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) {
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const size_t N = args[1].GetImmediateU8();
ASSERT(N <= 31);
const u32 saturated_value = (1u << N) - 1;
const Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32();
const Xbyak::Reg32 reg_a = ctx.reg_alloc.UseGpr(args[0]).cvt32();
const Xbyak::Reg32 overflow = ctx.reg_alloc.ScratchGpr().cvt32();
// Pseudocode: result = clamp(reg_a, 0, saturated_value);
code.xor_(overflow, overflow);
code.cmp(reg_a, saturated_value);
code.mov(result, saturated_value);
code.cmovle(result, overflow);
code.cmovbe(result, reg_a);
if (overflow_inst) {
code.seta(overflow.cvt8());
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
ctx.EraseInstruction(overflow_inst);
}
ctx.reg_alloc.DefineValue(inst, result);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,20 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "common/crypto/sm4.h"
#include "frontend/ir/microinstruction.h"
namespace Dynarmic::Backend::X64 {
void EmitX64::EmitSM4AccessSubstitutionBox(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.HostCall(inst, args[0]);
code.CallFunction(&Common::Crypto::SM4::AccessSubstitutionBox);
}
} // namespace Dynarmic::Backend::X64

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,324 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/block_of_code.h"
#include "backend/x64/emit_x64.h"
#include "common/common_types.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
namespace Dynarmic::Backend::X64 {
using namespace Xbyak::util;
namespace {
void EmitVectorSaturatedNative(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, void (Xbyak::CodeGenerator::*saturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*unsaturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*sub_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
code.movaps(xmm0, result);
(code.*saturated_fn)(result, addend);
(code.*unsaturated_fn)(xmm0, addend);
(code.*sub_fn)(xmm0, result);
if (code.HasSSE41()) {
code.ptest(xmm0, xmm0);
} else {
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
code.pxor(tmp, tmp);
code.pcmpeqw(xmm0, tmp);
code.pmovmskb(overflow.cvt32(), xmm0);
code.xor_(overflow.cvt32(), 0xFFFF);
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
ctx.reg_alloc.DefineValue(inst, result);
}
enum class Op {
Add,
Sub,
};
template<Op op, size_t esize>
void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
static_assert(esize == 32 || esize == 64);
constexpr u64 msb_mask = esize == 32 ? 0x8000000080000000 : 0x8000000000000000;
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm arg = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
// TODO AVX-512: vpternlog, vpsraq
// TODO AVX2 implementation
code.movaps(xmm0, result);
code.movaps(tmp, result);
if constexpr (op == Op::Add) {
if constexpr (esize == 32) {
code.paddd(result, arg);
} else {
code.paddq(result, arg);
}
} else {
if constexpr (esize == 32) {
code.psubd(result, arg);
} else {
code.psubq(result, arg);
}
}
code.pxor(tmp, result);
code.pxor(xmm0, arg);
if constexpr (op == Op::Add) {
code.pandn(xmm0, tmp);
} else {
code.pand(xmm0, tmp);
}
code.movaps(tmp, result);
code.psrad(tmp, 31);
if constexpr (esize == 64) {
code.pshufd(tmp, tmp, 0b11110101);
}
code.pxor(tmp, code.MConst(xword, msb_mask, msb_mask));
if (code.HasSSE41()) {
code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask));
} else {
if constexpr (esize == 32) {
code.movmskps(overflow.cvt32(), xmm0);
} else {
code.movmskpd(overflow.cvt32(), xmm0);
}
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
if (code.HasSSE41()) {
if constexpr (esize == 32) {
code.blendvps(result, tmp);
} else {
code.blendvpd(result, tmp);
}
ctx.reg_alloc.DefineValue(inst, result);
} else {
code.psrad(xmm0, 31);
if constexpr (esize == 64) {
code.pshufd(xmm0, xmm0, 0b11110101);
}
code.pand(tmp, xmm0);
code.pandn(xmm0, result);
code.por(tmp, xmm0);
ctx.reg_alloc.DefineValue(inst, tmp);
}
}
} // anonymous namespace
void EmitX64::EmitVectorSignedSaturatedAdd8(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::paddsb, &Xbyak::CodeGenerator::paddb, &Xbyak::CodeGenerator::psubb);
}
void EmitX64::EmitVectorSignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::paddsw, &Xbyak::CodeGenerator::paddw, &Xbyak::CodeGenerator::psubw);
}
void EmitX64::EmitVectorSignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSignedSaturated<Op::Add, 32>(code, ctx, inst);
}
void EmitX64::EmitVectorSignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSignedSaturated<Op::Add, 64>(code, ctx, inst);
}
void EmitX64::EmitVectorSignedSaturatedSub8(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::psubsb, &Xbyak::CodeGenerator::psubb, &Xbyak::CodeGenerator::psubb);
}
void EmitX64::EmitVectorSignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::psubsw, &Xbyak::CodeGenerator::psubw, &Xbyak::CodeGenerator::psubw);
}
void EmitX64::EmitVectorSignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSignedSaturated<Op::Sub, 32>(code, ctx, inst);
}
void EmitX64::EmitVectorSignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSignedSaturated<Op::Sub, 64>(code, ctx, inst);
}
void EmitX64::EmitVectorUnsignedSaturatedAdd8(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::paddusb, &Xbyak::CodeGenerator::paddb, &Xbyak::CodeGenerator::psubb);
}
void EmitX64::EmitVectorUnsignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::paddusw, &Xbyak::CodeGenerator::paddw, &Xbyak::CodeGenerator::psubw);
}
void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
// TODO AVX2, AVX-512: vpternlog
code.movaps(tmp, result);
code.movaps(xmm0, result);
code.pxor(xmm0, addend);
code.pand(tmp, addend);
code.paddd(result, addend);
code.psrld(xmm0, 1);
code.paddd(tmp, xmm0);
code.psrad(tmp, 31);
code.por(result, tmp);
if (code.HasSSE41()) {
code.ptest(tmp, tmp);
} else {
code.movmskps(overflow.cvt32(), tmp);
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
// TODO AVX2, AVX-512: vpternlog
code.movaps(tmp, result);
code.movaps(xmm0, result);
code.pxor(xmm0, addend);
code.pand(tmp, addend);
code.paddq(result, addend);
code.psrlq(xmm0, 1);
code.paddq(tmp, xmm0);
code.psrad(tmp, 31);
code.pshufd(tmp, tmp, 0b11110101);
code.por(result, tmp);
if (code.HasSSE41()) {
code.ptest(tmp, tmp);
} else {
code.movmskpd(overflow.cvt32(), tmp);
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
ctx.reg_alloc.DefineValue(inst, result);
}
void EmitX64::EmitVectorUnsignedSaturatedSub8(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::psubusb, &Xbyak::CodeGenerator::psubb, &Xbyak::CodeGenerator::psubb);
}
void EmitX64::EmitVectorUnsignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst) {
EmitVectorSaturatedNative(code, ctx, inst, &Xbyak::CodeGenerator::psubusw, &Xbyak::CodeGenerator::psubw, &Xbyak::CodeGenerator::psubw);
}
void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
// TODO AVX2, AVX-512: vpternlog
code.movaps(tmp, result);
code.movaps(xmm0, subtrahend);
code.pxor(tmp, subtrahend);
code.psubd(result, subtrahend);
code.pand(xmm0, tmp);
code.psrld(tmp, 1);
code.psubd(tmp, xmm0);
code.psrad(tmp, 31);
if (code.HasSSE41()) {
code.ptest(tmp, tmp);
} else {
code.movmskps(overflow.cvt32(), tmp);
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
code.pandn(tmp, result);
ctx.reg_alloc.DefineValue(inst, tmp);
}
void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
// TODO AVX2, AVX-512: vpternlog
code.movaps(tmp, result);
code.movaps(xmm0, subtrahend);
code.pxor(tmp, subtrahend);
code.psubq(result, subtrahend);
code.pand(xmm0, tmp);
code.psrlq(tmp, 1);
code.psubq(tmp, xmm0);
code.psrad(tmp, 31);
code.pshufd(tmp, tmp, 0b11110101);
if (code.HasSSE41()) {
code.ptest(tmp, tmp);
} else {
code.movmskpd(overflow.cvt32(), tmp);
code.test(overflow.cvt32(), overflow.cvt32());
}
code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
code.pandn(tmp, result);
ctx.reg_alloc.DefineValue(inst, tmp);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,37 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <functional>
#include <memory>
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;
struct FakeCall {
u64 call_rip;
u64 ret_rip;
};
class ExceptionHandler final {
public:
ExceptionHandler();
~ExceptionHandler();
void Register(BlockOfCode& code);
bool SupportsFastmem() const noexcept;
void SetFastmemCallback(std::function<FakeCall(u64)> cb);
private:
struct Impl;
std::unique_ptr<Impl> impl;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,28 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/exception_handler.h"
namespace Dynarmic::Backend::X64 {
struct ExceptionHandler::Impl final {
};
ExceptionHandler::ExceptionHandler() = default;
ExceptionHandler::~ExceptionHandler() = default;
void ExceptionHandler::Register(BlockOfCode&) {
// Do nothing
}
bool ExceptionHandler::SupportsFastmem() const noexcept {
return false;
}
void ExceptionHandler::SetFastmemCallback(std::function<FakeCall(u64)>) {
// Do nothing
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,230 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2019 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/exception_handler.h"
#include <mach/mach.h>
#include <mach/message.h>
#include <cstring>
#include <functional>
#include <memory>
#include <mutex>
#include <thread>
#include <vector>
#include <fmt/format.h>
#include "backend/x64/block_of_code.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
#define mig_external extern "C"
#include "backend/x64/mig/mach_exc_server.h"
namespace Dynarmic::Backend::X64 {
namespace {
struct CodeBlockInfo {
u64 code_begin, code_end;
std::function<FakeCall(u64)> cb;
};
struct MachMessage {
mach_msg_header_t head;
char data[2048]; ///< Arbitrary size
};
class MachHandler final {
public:
MachHandler();
~MachHandler();
kern_return_t HandleRequest(x86_thread_state64_t* thread_state);
void AddCodeBlock(CodeBlockInfo info);
void RemoveCodeBlock(u64 rip);
private:
auto FindCodeBlockInfo(u64 rip) {
return std::find_if(code_block_infos.begin(), code_block_infos.end(), [&](const auto& x) { return x.code_begin <= rip && x.code_end > rip; });
}
std::vector<CodeBlockInfo> code_block_infos;
std::mutex code_block_infos_mutex;
std::thread thread;
mach_port_t server_port;
void MessagePump();
};
MachHandler::MachHandler() {
#define KCHECK(x) ASSERT_MSG((x) == KERN_SUCCESS, "dynarmic: macOS MachHandler: init failure at {}", #x)
KCHECK(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_port));
KCHECK(mach_port_insert_right(mach_task_self(), server_port, server_port, MACH_MSG_TYPE_MAKE_SEND));
KCHECK(task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, server_port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, x86_THREAD_STATE64));
// The below doesn't actually work, and I'm not sure why; since this doesn't work we'll have a spurious error message upon shutdown.
mach_port_t prev;
KCHECK(mach_port_request_notification(mach_task_self(), server_port, MACH_NOTIFY_PORT_DESTROYED, 0, server_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &prev));
#undef KCHECK
thread = std::thread(&MachHandler::MessagePump, this);
}
MachHandler::~MachHandler() {
mach_port_destroy(mach_task_self(), server_port);
thread.join();
}
void MachHandler::MessagePump() {
mach_msg_return_t mr;
MachMessage request;
MachMessage reply;
while (true) {
mr = mach_msg(&request.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(request), server_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to receive mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
if (!mach_exc_server(&request.head, &reply.head)) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Unexpected mach message\n");
return;
}
mr = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS){
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to send mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
}
}
kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex);
const auto iter = FindCodeBlockInfo(ts->__rip);
if (iter == code_block_infos.end()) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Exception was not in registered code blocks (rip {:#016x})\n", ts->__rip);
return KERN_FAILURE;
}
FakeCall fc = iter->cb(ts->__rip);
ts->__rsp -= sizeof(u64);
*Common::BitCast<u64*>(ts->__rsp) = fc.ret_rip;
ts->__rip = fc.call_rip;
return KERN_SUCCESS;
}
void MachHandler::AddCodeBlock(CodeBlockInfo cbi) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex);
if (auto iter = FindCodeBlockInfo(cbi.code_begin); iter != code_block_infos.end()) {
code_block_infos.erase(iter);
}
code_block_infos.push_back(cbi);
}
void MachHandler::RemoveCodeBlock(u64 rip) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex);
const auto iter = FindCodeBlockInfo(rip);
if (iter == code_block_infos.end()) {
return;
}
code_block_infos.erase(iter);
}
MachHandler mach_handler;
} // anonymous namespace
mig_external kern_return_t catch_mach_exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t) {
fmt::print(stderr, "dynarmic: Unexpected mach message: mach_exception_raise\n");
return KERN_FAILURE;
}
mig_external kern_return_t catch_mach_exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, int*, thread_state_t, mach_msg_type_number_t, thread_state_t, mach_msg_type_number_t*) {
fmt::print(stderr, "dynarmic: Unexpected mach message: mach_exception_raise_state_identity\n");
return KERN_FAILURE;
}
mig_external kern_return_t catch_mach_exception_raise_state(
mach_port_t /*exception_port*/,
exception_type_t exception,
const mach_exception_data_t /*code*/, // code[0] is as per kern_return.h, code[1] is rip.
mach_msg_type_number_t /*codeCnt*/,
int* flavor,
const thread_state_t old_state,
mach_msg_type_number_t old_stateCnt,
thread_state_t new_state,
mach_msg_type_number_t* new_stateCnt
) {
if (!flavor || !new_stateCnt) {
fmt::print(stderr, "dynarmic: catch_mach_exception_raise_state: Invalid arguments.\n");
return KERN_INVALID_ARGUMENT;
}
if (*flavor != x86_THREAD_STATE64 || old_stateCnt != x86_THREAD_STATE64_COUNT || *new_stateCnt < x86_THREAD_STATE64_COUNT) {
fmt::print(stderr, "dynarmic: catch_mach_exception_raise_state: Unexpected flavor.\n");
return KERN_INVALID_ARGUMENT;
}
if (exception != EXC_BAD_ACCESS) {
fmt::print(stderr, "dynarmic: catch_mach_exception_raise_state: Unexpected exception type.\n");
return KERN_FAILURE;
}
x86_thread_state64_t* ts = reinterpret_cast<x86_thread_state64_t*>(new_state);
std::memcpy(ts, reinterpret_cast<const x86_thread_state64_t*>(old_state), sizeof(x86_thread_state64_t));
*new_stateCnt = x86_THREAD_STATE64_COUNT;
return mach_handler.HandleRequest(ts);
}
struct ExceptionHandler::Impl final {
Impl(BlockOfCode& code)
: code_begin(Common::BitCast<u64>(code.getCode()))
, code_end(code_begin + code.GetTotalCodeSize())
{}
void SetCallback(std::function<FakeCall(u64)> cb) {
CodeBlockInfo cbi;
cbi.code_begin = code_begin;
cbi.code_end = code_end;
cbi.cb = cb;
mach_handler.AddCodeBlock(cbi);
}
~Impl() {
mach_handler.RemoveCodeBlock(code_begin);
}
private:
u64 code_begin, code_end;
};
ExceptionHandler::ExceptionHandler() = default;
ExceptionHandler::~ExceptionHandler() = default;
void ExceptionHandler::Register(BlockOfCode& code) {
impl = std::make_unique<Impl>(code);
}
bool ExceptionHandler::SupportsFastmem() const noexcept {
return static_cast<bool>(impl);
}
void ExceptionHandler::SetFastmemCallback(std::function<FakeCall(u64)> cb) {
impl->SetCallback(cb);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,207 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2019 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "backend/x64/exception_handler.h"
#include <cstring>
#include <functional>
#include <memory>
#include <mutex>
#include <vector>
#include <signal.h>
#ifdef __APPLE__
#include <sys/ucontext.h>
#else
#include <ucontext.h>
#endif
#include "backend/x64/block_of_code.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
namespace {
struct CodeBlockInfo {
u64 code_begin, code_end;
std::function<FakeCall(u64)> cb;
};
class SigHandler {
public:
SigHandler();
~SigHandler();
void AddCodeBlock(CodeBlockInfo info);
void RemoveCodeBlock(u64 rip);
bool SupportsFastmem() const { return supports_fast_mem; }
private:
auto FindCodeBlockInfo(u64 rip) {
return std::find_if(code_block_infos.begin(), code_block_infos.end(), [&](const auto& x) { return x.code_begin <= rip && x.code_end > rip; });
}
bool supports_fast_mem = true;
void* signal_stack_memory = nullptr;
std::vector<CodeBlockInfo> code_block_infos;
std::mutex code_block_infos_mutex;
struct sigaction old_sa_segv;
struct sigaction old_sa_bus;
static void SigAction(int sig, siginfo_t* info, void* raw_context);
};
SigHandler sig_handler;
SigHandler::SigHandler() {
constexpr size_t signal_stack_size = std::max(SIGSTKSZ, 2 * 1024 * 1024);
signal_stack_memory = std::malloc(signal_stack_size);
stack_t signal_stack;
signal_stack.ss_sp = signal_stack_memory;
signal_stack.ss_size = signal_stack_size;
signal_stack.ss_flags = 0;
if (sigaltstack(&signal_stack, nullptr) != 0) {
fmt::print(stderr, "dynarmic: POSIX SigHandler: init failure at sigaltstack\n");
supports_fast_mem = false;
return;
}
struct sigaction sa;
sa.sa_handler = nullptr;
sa.sa_sigaction = &SigHandler::SigAction;
sa.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGSEGV, &sa, &old_sa_segv) != 0) {
fmt::print(stderr, "dynarmic: POSIX SigHandler: could not set SIGSEGV handler\n");
supports_fast_mem = false;
return;
}
#ifdef __APPLE__
if (sigaction(SIGBUS, &sa, &old_sa_bus) != 0) {
fmt::print(stderr, "dynarmic: POSIX SigHandler: could not set SIGBUS handler\n");
supports_fast_mem = false;
return;
}
#endif
}
SigHandler::~SigHandler() {
std::free(signal_stack_memory);
}
void SigHandler::AddCodeBlock(CodeBlockInfo cbi) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex);
if (auto iter = FindCodeBlockInfo(cbi.code_begin); iter != code_block_infos.end()) {
code_block_infos.erase(iter);
}
code_block_infos.push_back(cbi);
}
void SigHandler::RemoveCodeBlock(u64 rip) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex);
const auto iter = FindCodeBlockInfo(rip);
if (iter == code_block_infos.end()) {
return;
}
code_block_infos.erase(iter);
}
void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
ASSERT(sig == SIGSEGV || sig == SIGBUS);
#if defined(__APPLE__)
#define CTX_RIP (((ucontext_t*)raw_context)->uc_mcontext->__ss.__rip)
#define CTX_RSP (((ucontext_t*)raw_context)->uc_mcontext->__ss.__rsp)
#elif defined(__linux__)
#define CTX_RIP (((ucontext_t*)raw_context)->uc_mcontext.gregs[REG_RIP])
#define CTX_RSP (((ucontext_t*)raw_context)->uc_mcontext.gregs[REG_RSP])
#elif defined(__FreeBSD__)
#define CTX_RIP (((ucontext_t*)raw_context)->uc_mcontext.mc_rip)
#define CTX_RSP (((ucontext_t*)raw_context)->uc_mcontext.mc_rsp)
#else
#error "Unknown platform"
#endif
{
std::lock_guard<std::mutex> guard(sig_handler.code_block_infos_mutex);
const auto iter = sig_handler.FindCodeBlockInfo(CTX_RIP);
if (iter != sig_handler.code_block_infos.end()) {
FakeCall fc = iter->cb(CTX_RIP);
CTX_RSP -= sizeof(u64);
*Common::BitCast<u64*>(CTX_RSP) = fc.ret_rip;
CTX_RIP = fc.call_rip;
return;
}
}
fmt::print(stderr, "dynarmic: POSIX SigHandler: Exception was not in registered code blocks (rip {:#016x})\n", CTX_RIP);
struct sigaction* retry_sa = sig == SIGSEGV ? &sig_handler.old_sa_segv : &sig_handler.old_sa_bus;
if (retry_sa->sa_flags & SA_SIGINFO) {
retry_sa->sa_sigaction(sig, info, raw_context);
return;
}
if (retry_sa->sa_handler == SIG_DFL) {
signal(sig, SIG_DFL);
return;
}
if (retry_sa->sa_handler == SIG_IGN) {
return;
}
retry_sa->sa_handler(sig);
}
} // anonymous namespace
struct ExceptionHandler::Impl final {
Impl(BlockOfCode& code)
: code_begin(Common::BitCast<u64>(code.getCode()))
, code_end(code_begin + code.GetTotalCodeSize())
{}
void SetCallback(std::function<FakeCall(u64)> cb) {
CodeBlockInfo cbi;
cbi.code_begin = code_begin;
cbi.code_end = code_end;
cbi.cb = cb;
sig_handler.AddCodeBlock(cbi);
}
~Impl() {
sig_handler.RemoveCodeBlock(code_begin);
}
private:
u64 code_begin, code_end;
};
ExceptionHandler::ExceptionHandler() = default;
ExceptionHandler::~ExceptionHandler() = default;
void ExceptionHandler::Register(BlockOfCode& code) {
impl = std::make_unique<Impl>(code);
}
bool ExceptionHandler::SupportsFastmem() const noexcept {
return static_cast<bool>(impl) && sig_handler.SupportsFastmem();
}
void ExceptionHandler::SetFastmemCallback(std::function<FakeCall(u64)> cb) {
impl->SetCallback(cb);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,262 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <cstring>
#include <vector>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include "backend/x64/block_of_code.h"
#include "backend/x64/exception_handler.h"
#include "common/assert.h"
#include "common/cast_util.h"
#include "common/common_types.h"
#include "common/safe_ops.h"
using UBYTE = u8;
enum UNWIND_REGISTER_CODES {
UWRC_RAX,
UWRC_RCX,
UWRC_RDX,
UWRC_RBX,
UWRC_RSP,
UWRC_RBP,
UWRC_RSI,
UWRC_RDI,
UWRC_R8,
UWRC_R9,
UWRC_R10,
UWRC_R11,
UWRC_R12,
UWRC_R13,
UWRC_R14,
UWRC_R15,
};
enum UNWIND_OPCODE {
UWOP_PUSH_NONVOL = 0,
UWOP_ALLOC_LARGE = 1,
UWOP_ALLOC_SMALL = 2,
UWOP_SET_FPREG = 3,
UWOP_SAVE_NONVOL = 4,
UWOP_SAVE_NONVOL_FAR = 5,
UWOP_SAVE_XMM128 = 8,
UWOP_SAVE_XMM128_FAR = 9,
UWOP_PUSH_MACHFRAME = 10,
};
union UNWIND_CODE {
struct {
UBYTE CodeOffset;
UBYTE UnwindOp : 4;
UBYTE OpInfo : 4;
} code;
USHORT FrameOffset;
};
// UNWIND_INFO is a tail-padded structure
struct UNWIND_INFO {
UBYTE Version : 3;
UBYTE Flags : 5;
UBYTE SizeOfProlog;
UBYTE CountOfCodes;
UBYTE FrameRegister : 4;
UBYTE FrameOffset : 4;
// UNWIND_CODE UnwindCode[];
// With Flags == 0 there are no additional fields.
// OPTIONAL UNW_EXCEPTION_INFO ExceptionInfo;
};
struct UNW_EXCEPTION_INFO {
ULONG ExceptionHandler;
// OPTIONAL ARBITRARY HandlerData;
};
namespace Dynarmic::Backend::X64 {
struct PrologueInformation {
std::vector<UNWIND_CODE> unwind_code;
size_t number_of_unwind_code_entries;
u8 prolog_size;
};
static PrologueInformation GetPrologueInformation() {
PrologueInformation ret;
const auto next_entry = [&]() -> UNWIND_CODE& {
ret.unwind_code.emplace_back();
return ret.unwind_code.back();
};
const auto push_nonvol = [&](u8 offset, UNWIND_REGISTER_CODES reg) {
auto& entry = next_entry();
entry.code.CodeOffset = offset;
entry.code.UnwindOp = UWOP_PUSH_NONVOL;
entry.code.OpInfo = reg;
};
const auto alloc_large = [&](u8 offset, size_t size) {
ASSERT(size % 8 == 0);
size /= 8;
auto& entry = next_entry();
entry.code.CodeOffset = offset;
entry.code.UnwindOp = UWOP_ALLOC_LARGE;
if (size <= 0xFFFF) {
entry.code.OpInfo = 0;
auto& size_entry = next_entry();
size_entry.FrameOffset = static_cast<USHORT>(size);
} else {
entry.code.OpInfo = 1;
auto& size_entry_1 = next_entry();
size_entry_1.FrameOffset = static_cast<USHORT>(size);
auto& size_entry_2 = next_entry();
size_entry_2.FrameOffset = static_cast<USHORT>(size >> 16);
}
};
const auto save_xmm128 = [&](u8 offset, u8 reg, size_t frame_offset) {
ASSERT(frame_offset % 16 == 0);
auto& entry = next_entry();
entry.code.CodeOffset = offset;
entry.code.UnwindOp = UWOP_SAVE_XMM128;
entry.code.OpInfo = reg;
auto& offset_entry = next_entry();
offset_entry.FrameOffset = static_cast<USHORT>(frame_offset / 16);
};
// This is a list of operations that occur in the prologue.
// The debugger uses this information to retrieve register values and
// to calculate the size of the stack frame.
ret.prolog_size = 89;
save_xmm128(89, 15, 0xB0); // +050 44 0F 29 BC 24 B0 00 00 00 movaps xmmword ptr [rsp+0B0h],xmm15
save_xmm128(80, 14, 0xA0); // +047 44 0F 29 B4 24 A0 00 00 00 movaps xmmword ptr [rsp+0A0h],xmm14
save_xmm128(71, 13, 0x90); // +03E 44 0F 29 AC 24 90 00 00 00 movaps xmmword ptr [rsp+90h],xmm13
save_xmm128(62, 12, 0x80); // +035 44 0F 29 A4 24 80 00 00 00 movaps xmmword ptr [rsp+80h],xmm12
save_xmm128(53, 11, 0x70); // +02F 44 0F 29 5C 24 70 movaps xmmword ptr [rsp+70h],xmm11
save_xmm128(47, 10, 0x60); // +029 44 0F 29 54 24 60 movaps xmmword ptr [rsp+60h],xmm10
save_xmm128(41, 9, 0x50); // +023 44 0F 29 4C 24 50 movaps xmmword ptr [rsp+50h],xmm9
save_xmm128(35, 8, 0x40); // +01D 44 0F 29 44 24 40 movaps xmmword ptr [rsp+40h],xmm8
save_xmm128(29, 7, 0x30); // +018 0F 29 7C 24 30 movaps xmmword ptr [rsp+30h],xmm7
save_xmm128(24, 6, 0x20); // +013 0F 29 74 24 20 movaps xmmword ptr [rsp+20h],xmm6
alloc_large(19, 0xC8); // +00C 48 81 EC C8 00 00 00 sub rsp,0C8h
push_nonvol(12, UWRC_R15); // +00A 41 57 push r15
push_nonvol(10, UWRC_R14); // +008 41 56 push r14
push_nonvol(8, UWRC_R13); // +006 41 55 push r13
push_nonvol(6, UWRC_R12); // +004 41 54 push r12
push_nonvol(4, UWRC_RBP); // +003 55 push rbp
push_nonvol(3, UWRC_RDI); // +002 57 push rdi
push_nonvol(2, UWRC_RSI); // +001 56 push rsi
push_nonvol(1, UWRC_RBX); // +000 53 push rbx
ret.number_of_unwind_code_entries = ret.unwind_code.size();
// The Windows API requires the size of the unwind_code array
// to be a multiple of two for alignment reasons.
if (ret.unwind_code.size() % 2 == 1) {
auto& last_entry = next_entry();
last_entry.FrameOffset = 0;
}
ASSERT(ret.unwind_code.size() % 2 == 0);
return ret;
}
struct ExceptionHandler::Impl final {
Impl(BlockOfCode& code) {
const auto prolog_info = GetPrologueInformation();
code.align(16);
const u8* exception_handler_without_cb = code.getCurr<u8*>();
code.mov(code.eax, static_cast<u32>(ExceptionContinueSearch));
code.ret();
code.align(16);
const u8* exception_handler_with_cb = code.getCurr<u8*>();
// Our 3rd argument is a PCONTEXT.
// If not within our codeblock, ignore this exception.
code.mov(code.rax, Safe::Negate(Common::BitCast<u64>(code.getCode())));
code.add(code.rax, code.qword[code.ABI_PARAM3 + Xbyak::RegExp(offsetof(CONTEXT, Rip))]);
code.cmp(code.rax, static_cast<u32>(code.GetTotalCodeSize()));
code.ja(exception_handler_without_cb);
code.sub(code.rsp, 8);
code.mov(code.ABI_PARAM1, Common::BitCast<u64>(&cb));
code.mov(code.ABI_PARAM2, code.ABI_PARAM3);
code.CallLambda(
[](const std::function<FakeCall(u64)>& cb_, PCONTEXT ctx){
FakeCall fc = cb_(ctx->Rip);
ctx->Rsp -= sizeof(u64);
*Common::BitCast<u64*>(ctx->Rsp) = fc.ret_rip;
ctx->Rip = fc.call_rip;
}
);
code.add(code.rsp, 8);
code.mov(code.eax, static_cast<u32>(ExceptionContinueExecution));
code.ret();
exception_handler_without_cb_offset = static_cast<ULONG>(exception_handler_without_cb - code.getCode<u8*>());
exception_handler_with_cb_offset = static_cast<ULONG>(exception_handler_with_cb - code.getCode<u8*>());
code.align(16);
UNWIND_INFO* unwind_info = static_cast<UNWIND_INFO*>(code.AllocateFromCodeSpace(sizeof(UNWIND_INFO)));
unwind_info->Version = 1;
unwind_info->Flags = UNW_FLAG_EHANDLER;
unwind_info->SizeOfProlog = prolog_info.prolog_size;
unwind_info->CountOfCodes = static_cast<UBYTE>(prolog_info.number_of_unwind_code_entries);
unwind_info->FrameRegister = 0; // No frame register present
unwind_info->FrameOffset = 0; // Unused because FrameRegister == 0
// UNWIND_INFO::UnwindCode field:
const size_t size_of_unwind_code = sizeof(UNWIND_CODE) * prolog_info.unwind_code.size();
UNWIND_CODE* unwind_code = static_cast<UNWIND_CODE*>(code.AllocateFromCodeSpace(size_of_unwind_code));
memcpy(unwind_code, prolog_info.unwind_code.data(), size_of_unwind_code);
// UNWIND_INFO::ExceptionInfo field:
except_info = static_cast<UNW_EXCEPTION_INFO*>(code.AllocateFromCodeSpace(sizeof(UNW_EXCEPTION_INFO)));
except_info->ExceptionHandler = exception_handler_without_cb_offset;
code.align(16);
rfuncs = static_cast<RUNTIME_FUNCTION*>(code.AllocateFromCodeSpace(sizeof(RUNTIME_FUNCTION)));
rfuncs->BeginAddress = static_cast<DWORD>(0);
rfuncs->EndAddress = static_cast<DWORD>(code.GetTotalCodeSize());
rfuncs->UnwindData = static_cast<DWORD>(reinterpret_cast<u8*>(unwind_info) - code.getCode());
RtlAddFunctionTable(rfuncs, 1, reinterpret_cast<DWORD64>(code.getCode()));
}
void SetCallback(std::function<FakeCall(u64)> new_cb) {
cb = new_cb;
except_info->ExceptionHandler = cb ? exception_handler_with_cb_offset : exception_handler_without_cb_offset;
}
~Impl() {
RtlDeleteFunctionTable(rfuncs);
}
private:
RUNTIME_FUNCTION* rfuncs;
std::function<FakeCall(u64)> cb;
UNW_EXCEPTION_INFO* except_info;
ULONG exception_handler_without_cb_offset;
ULONG exception_handler_with_cb_offset;
};
ExceptionHandler::ExceptionHandler() = default;
ExceptionHandler::~ExceptionHandler() = default;
void ExceptionHandler::Register(BlockOfCode& code) {
impl = std::make_unique<Impl>(code);
}
bool ExceptionHandler::SupportsFastmem() const noexcept {
return static_cast<bool>(impl);
}
void ExceptionHandler::SetFastmemCallback(std::function<FakeCall(u64)> cb) {
impl->SetCallback(cb);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,59 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <algorithm>
#include <dynarmic/exclusive_monitor.h>
#include "common/assert.h"
namespace Dynarmic {
ExclusiveMonitor::ExclusiveMonitor(size_t processor_count) :
exclusive_addresses(processor_count, INVALID_EXCLUSIVE_ADDRESS), exclusive_values(processor_count) {
Unlock();
}
size_t ExclusiveMonitor::GetProcessorCount() const {
return exclusive_addresses.size();
}
void ExclusiveMonitor::Lock() {
while (is_locked.test_and_set(std::memory_order_acquire)) {}
}
void ExclusiveMonitor::Unlock() {
is_locked.clear(std::memory_order_release);
}
bool ExclusiveMonitor::CheckAndClear(size_t processor_id, VAddr address) {
const VAddr masked_address = address & RESERVATION_GRANULE_MASK;
Lock();
if (exclusive_addresses[processor_id] != masked_address) {
Unlock();
return false;
}
for (VAddr& other_address : exclusive_addresses) {
if (other_address == masked_address) {
other_address = INVALID_EXCLUSIVE_ADDRESS;
}
}
return true;
}
void ExclusiveMonitor::Clear() {
Lock();
std::fill(exclusive_addresses.begin(), exclusive_addresses.end(), INVALID_EXCLUSIVE_ADDRESS);
Unlock();
}
void ExclusiveMonitor::ClearProcessor(size_t processor_id) {
Lock();
exclusive_addresses[processor_id] = INVALID_EXCLUSIVE_ADDRESS;
Unlock();
}
} // namespace Dynarmic

View File

@@ -1,22 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <xbyak.h>
#include "backend/x64/hostloc.h"
namespace Dynarmic::Backend::X64 {
Xbyak::Reg64 HostLocToReg64(HostLoc loc) {
ASSERT(HostLocIsGPR(loc));
return Xbyak::Reg64(static_cast<int>(loc));
}
Xbyak::Xmm HostLocToXmm(HostLoc loc) {
ASSERT(HostLocIsXMM(loc));
return Xbyak::Xmm(static_cast<int>(loc) - static_cast<int>(HostLoc::XMM0));
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,124 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <xbyak.h>
#include "common/assert.h"
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
enum class HostLoc {
// Ordering of the registers is intentional. See also: HostLocToX64.
RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15,
CF, PF, AF, ZF, SF, OF,
FirstSpill,
};
constexpr size_t NonSpillHostLocCount = static_cast<size_t>(HostLoc::FirstSpill);
inline bool HostLocIsGPR(HostLoc reg) {
return reg >= HostLoc::RAX && reg <= HostLoc::R15;
}
inline bool HostLocIsXMM(HostLoc reg) {
return reg >= HostLoc::XMM0 && reg <= HostLoc::XMM15;
}
inline bool HostLocIsRegister(HostLoc reg) {
return HostLocIsGPR(reg) || HostLocIsXMM(reg);
}
inline bool HostLocIsFlag(HostLoc reg) {
return reg >= HostLoc::CF && reg <= HostLoc::OF;
}
inline HostLoc HostLocRegIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15);
return static_cast<HostLoc>(idx);
}
inline HostLoc HostLocXmmIdx(int idx) {
ASSERT(idx >= 0 && idx <= 15);
return static_cast<HostLoc>(static_cast<size_t>(HostLoc::XMM0) + idx);
}
inline HostLoc HostLocSpill(size_t i) {
return static_cast<HostLoc>(static_cast<size_t>(HostLoc::FirstSpill) + i);
}
inline bool HostLocIsSpill(HostLoc reg) {
return reg >= HostLoc::FirstSpill;
}
inline size_t HostLocBitWidth(HostLoc loc) {
if (HostLocIsGPR(loc))
return 64;
if (HostLocIsXMM(loc))
return 128;
if (HostLocIsSpill(loc))
return 128;
if (HostLocIsFlag(loc))
return 1;
UNREACHABLE();
}
using HostLocList = std::initializer_list<HostLoc>;
// RSP is preserved for function calls
// R15 contains the JitState pointer
const HostLocList any_gpr = {
HostLoc::RAX,
HostLoc::RBX,
HostLoc::RCX,
HostLoc::RDX,
HostLoc::RSI,
HostLoc::RDI,
HostLoc::RBP,
HostLoc::R8,
HostLoc::R9,
HostLoc::R10,
HostLoc::R11,
HostLoc::R12,
HostLoc::R13,
HostLoc::R14,
};
// XMM0 is reserved for use by instructions that implicitly use it as an argument
const HostLocList any_xmm = {
HostLoc::XMM1,
HostLoc::XMM2,
HostLoc::XMM3,
HostLoc::XMM4,
HostLoc::XMM5,
HostLoc::XMM6,
HostLoc::XMM7,
HostLoc::XMM8,
HostLoc::XMM9,
HostLoc::XMM10,
HostLoc::XMM11,
HostLoc::XMM12,
HostLoc::XMM13,
HostLoc::XMM14,
HostLoc::XMM15,
};
Xbyak::Reg64 HostLocToReg64(HostLoc loc);
Xbyak::Xmm HostLocToXmm(HostLoc loc);
template <typename JitStateType>
Xbyak::Address SpillToOpArg(HostLoc loc) {
ASSERT(HostLocIsSpill(loc));
size_t i = static_cast<size_t>(loc) - static_cast<size_t>(HostLoc::FirstSpill);
ASSERT_MSG(i < JitStateType::SpillCount, "Spill index greater than number of available spill locations");
return JitStateType::GetSpillLocationFromIndex(i);
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,43 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
namespace Dynarmic::Backend::X64 {
struct JitStateInfo {
template <typename JitStateType>
JitStateInfo(const JitStateType&)
: offsetof_cycles_remaining(offsetof(JitStateType, cycles_remaining))
, offsetof_cycles_to_run(offsetof(JitStateType, cycles_to_run))
, offsetof_save_host_MXCSR(offsetof(JitStateType, save_host_MXCSR))
, offsetof_guest_MXCSR(offsetof(JitStateType, guest_MXCSR))
, offsetof_asimd_MXCSR(offsetof(JitStateType, asimd_MXCSR))
, offsetof_rsb_ptr(offsetof(JitStateType, rsb_ptr))
, rsb_ptr_mask(JitStateType::RSBPtrMask)
, offsetof_rsb_location_descriptors(offsetof(JitStateType, rsb_location_descriptors))
, offsetof_rsb_codeptrs(offsetof(JitStateType, rsb_codeptrs))
, offsetof_cpsr_nzcv(offsetof(JitStateType, cpsr_nzcv))
, offsetof_fpsr_exc(offsetof(JitStateType, fpsr_exc))
, offsetof_fpsr_qc(offsetof(JitStateType, fpsr_qc))
{}
const size_t offsetof_cycles_remaining;
const size_t offsetof_cycles_to_run;
const size_t offsetof_save_host_MXCSR;
const size_t offsetof_guest_MXCSR;
const size_t offsetof_asimd_MXCSR;
const size_t offsetof_rsb_ptr;
const size_t rsb_ptr_mask;
const size_t offsetof_rsb_location_descriptors;
const size_t offsetof_rsb_codeptrs;
const size_t offsetof_cpsr_nzcv;
const size_t offsetof_fpsr_exc;
const size_t offsetof_fpsr_qc;
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,53 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include "common/common_types.h"
#include "common/bit_util.h"
namespace Dynarmic::Backend::X64::NZCV {
constexpr u32 arm_mask = 0xF000'0000;
constexpr u32 x64_mask = 0xC101;
constexpr size_t x64_n_flag_bit = 15;
constexpr size_t x64_z_flag_bit = 14;
constexpr size_t x64_c_flag_bit = 8;
constexpr size_t x64_v_flag_bit = 0;
/// This is a constant used to create the x64 flags format from the ARM format.
/// NZCV * multiplier: NZCV0NZCV000NZCV
/// x64_flags format: NZ-----C-------V
constexpr u32 to_x64_multiplier = 0x1081;
/// This is a constant used to create the ARM format from the x64 flags format.
constexpr u32 from_x64_multiplier = 0x1021'0000;
inline u32 ToX64(u32 nzcv) {
/* Naive implementation:
u32 x64_flags = 0;
x64_flags |= Common::Bit<31>(cpsr) ? 1 << 15 : 0;
x64_flags |= Common::Bit<30>(cpsr) ? 1 << 14 : 0;
x64_flags |= Common::Bit<29>(cpsr) ? 1 << 8 : 0;
x64_flags |= Common::Bit<28>(cpsr) ? 1 : 0;
return x64_flags;
*/
return ((nzcv >> 28) * to_x64_multiplier) & x64_mask;
}
inline u32 FromX64(u32 x64_flags) {
/* Naive implementation:
u32 nzcv = 0;
nzcv |= Common::Bit<15>(x64_flags) ? 1 << 31 : 0;
nzcv |= Common::Bit<14>(x64_flags) ? 1 << 30 : 0;
nzcv |= Common::Bit<8>(x64_flags) ? 1 << 29 : 0;
nzcv |= Common::Bit<0>(x64_flags) ? 1 << 28 : 0;
return nzcv;
*/
return ((x64_flags & x64_mask) * from_x64_multiplier) & arm_mask;
}
} // namespace Dynarmic::Backend::X64::NZCV

View File

@@ -1,77 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <xbyak.h>
#include "common/assert.h"
namespace Dynarmic::Backend::X64 {
struct OpArg {
OpArg() : type(Type::Operand), inner_operand() {}
/* implicit */ OpArg(const Xbyak::Address& address) : type(Type::Address), inner_address(address) {}
/* implicit */ OpArg(const Xbyak::Reg& reg) : type(Type::Reg), inner_reg(reg) {}
Xbyak::Operand& operator*() {
switch (type) {
case Type::Address:
return inner_address;
case Type::Operand:
return inner_operand;
case Type::Reg:
return inner_reg;
}
UNREACHABLE();
}
void setBit(int bits) {
switch (type) {
case Type::Address:
inner_address.setBit(bits);
return;
case Type::Operand:
inner_operand.setBit(bits);
return;
case Type::Reg:
switch (bits) {
case 8:
inner_reg = inner_reg.cvt8();
return;
case 16:
inner_reg = inner_reg.cvt16();
return;
case 32:
inner_reg = inner_reg.cvt32();
return;
case 64:
inner_reg = inner_reg.cvt64();
return;
default:
ASSERT_MSG(false, "Invalid bits");
return;
}
}
UNREACHABLE();
}
private:
enum class Type {
Operand,
Address,
Reg,
};
Type type;
union {
Xbyak::Operand inner_operand;
Xbyak::Address inner_address;
Xbyak::Reg inner_reg;
};
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,95 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <cstddef>
#include <string>
#include "backend/x64/perf_map.h"
#ifdef __linux__
#include <cstdio>
#include <cstdlib>
#include <mutex>
#include <sys/types.h>
#include <unistd.h>
#include <fmt/format.h>
#include "common/common_types.h"
namespace Dynarmic::Backend::X64 {
namespace {
std::mutex mutex;
std::FILE* file = nullptr;
void OpenFile() {
const char* perf_dir = std::getenv("PERF_BUILDID_DIR");
if (!perf_dir) {
file = nullptr;
return;
}
const pid_t pid = getpid();
const std::string filename = fmt::format("{:s}/perf-{:d}.map", perf_dir, pid);
file = std::fopen(filename.c_str(), "w");
if (!file) {
return;
}
std::setvbuf(file, nullptr, _IONBF, 0);
}
} // anonymous namespace
namespace detail {
void PerfMapRegister(const void* start, const void* end, std::string_view friendly_name) {
if (start == end) {
// Nothing to register
return;
}
std::lock_guard guard{mutex};
if (!file) {
OpenFile();
if (!file) {
return;
}
}
const std::string line = fmt::format("{:016x} {:016x} {:s}\n", reinterpret_cast<u64>(start), reinterpret_cast<u64>(end) - reinterpret_cast<u64>(start), friendly_name);
std::fwrite(line.data(), sizeof *line.data(), line.size(), file);
}
} // namespace detail
void PerfMapClear() {
std::lock_guard guard{mutex};
if (!file) {
return;
}
std::fclose(file);
file = nullptr;
OpenFile();
}
} // namespace Dynarmic::Backend::X64
#else
namespace Dynarmic::Backend::X64 {
namespace detail {
void PerfMapRegister(const void*, const void*, std::string_view) {}
} // namespace detail
void PerfMapClear() {}
} // namespace Dynarmic::Backend::X64
#endif

View File

@@ -1,25 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <string_view>
#include "common/cast_util.h"
namespace Dynarmic::Backend::X64 {
namespace detail {
void PerfMapRegister(const void* start, const void* end, std::string_view friendly_name);
} // namespace detail
template<typename T>
void PerfMapRegister(T start, const void* end, std::string_view friendly_name) {
detail::PerfMapRegister(Common::BitCast<const void*>(start), end, friendly_name);
}
void PerfMapClear();
} // namespace Dynarmic::Backend::X64

View File

@@ -1,696 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <algorithm>
#include <numeric>
#include <utility>
#include <fmt/ostream.h>
#include <xbyak.h>
#include "backend/x64/abi.h"
#include "backend/x64/reg_alloc.h"
#include "common/assert.h"
namespace Dynarmic::Backend::X64 {
#define MAYBE_AVX(OPCODE, ...) \
[&] { \
if (code.HasAVX()) { \
code.v##OPCODE(__VA_ARGS__); \
} else { \
code.OPCODE(__VA_ARGS__); \
} \
}()
static bool CanExchange(HostLoc a, HostLoc b) {
return HostLocIsGPR(a) && HostLocIsGPR(b);
}
// Minimum number of bits required to represent a type
static size_t GetBitWidth(IR::Type type) {
switch (type) {
case IR::Type::A32Reg:
case IR::Type::A32ExtReg:
case IR::Type::A64Reg:
case IR::Type::A64Vec:
case IR::Type::CoprocInfo:
case IR::Type::Cond:
case IR::Type::Void:
case IR::Type::Table:
ASSERT_FALSE("Type {} cannot be represented at runtime", type);
case IR::Type::Opaque:
ASSERT_FALSE("Not a concrete type");
case IR::Type::U1:
return 8;
case IR::Type::U8:
return 8;
case IR::Type::U16:
return 16;
case IR::Type::U32:
return 32;
case IR::Type::U64:
return 64;
case IR::Type::U128:
return 128;
case IR::Type::NZCVFlags:
return 32; // TODO: Update to 16 when flags optimization is done
}
UNREACHABLE();
}
static bool IsValuelessType(IR::Type type) {
switch (type) {
case IR::Type::Table:
return true;
default:
return false;
}
}
bool HostLocInfo::IsLocked() const {
return is_being_used_count > 0;
}
bool HostLocInfo::IsEmpty() const {
return is_being_used_count == 0 && values.empty();
}
bool HostLocInfo::IsLastUse() const {
return is_being_used_count == 0 && current_references == 1 && accumulated_uses + 1 == total_uses;
}
void HostLocInfo::ReadLock() {
ASSERT(!is_scratch);
is_being_used_count++;
}
void HostLocInfo::WriteLock() {
ASSERT(is_being_used_count == 0);
is_being_used_count++;
is_scratch = true;
}
void HostLocInfo::AddArgReference() {
current_references++;
ASSERT(accumulated_uses + current_references <= total_uses);
}
void HostLocInfo::ReleaseOne() {
is_being_used_count--;
is_scratch = false;
if (current_references == 0)
return;
accumulated_uses++;
current_references--;
if (current_references == 0)
ReleaseAll();
}
void HostLocInfo::ReleaseAll() {
accumulated_uses += current_references;
current_references = 0;
ASSERT(total_uses == std::accumulate(values.begin(), values.end(), size_t(0), [](size_t sum, IR::Inst* inst) { return sum + inst->UseCount(); }));
if (total_uses == accumulated_uses) {
values.clear();
accumulated_uses = 0;
total_uses = 0;
max_bit_width = 0;
}
is_being_used_count = 0;
is_scratch = false;
}
bool HostLocInfo::ContainsValue(const IR::Inst* inst) const {
return std::find(values.begin(), values.end(), inst) != values.end();
}
size_t HostLocInfo::GetMaxBitWidth() const {
return max_bit_width;
}
void HostLocInfo::AddValue(IR::Inst* inst) {
values.push_back(inst);
total_uses += inst->UseCount();
max_bit_width = std::max(max_bit_width, GetBitWidth(inst->GetType()));
}
IR::Type Argument::GetType() const {
return value.GetType();
}
bool Argument::IsImmediate() const {
return value.IsImmediate();
}
bool Argument::IsVoid() const {
return GetType() == IR::Type::Void;
}
bool Argument::FitsInImmediateU32() const {
if (!IsImmediate())
return false;
const u64 imm = value.GetImmediateAsU64();
return imm < 0x100000000;
}
bool Argument::FitsInImmediateS32() const {
if (!IsImmediate())
return false;
const s64 imm = static_cast<s64>(value.GetImmediateAsU64());
return -s64(0x80000000) <= imm && imm <= s64(0x7FFFFFFF);
}
bool Argument::GetImmediateU1() const {
return value.GetU1();
}
u8 Argument::GetImmediateU8() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100);
return u8(imm);
}
u16 Argument::GetImmediateU16() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x10000);
return u16(imm);
}
u32 Argument::GetImmediateU32() const {
const u64 imm = value.GetImmediateAsU64();
ASSERT(imm < 0x100000000);
return u32(imm);
}
u64 Argument::GetImmediateS32() const {
ASSERT(FitsInImmediateS32());
return value.GetImmediateAsU64();
}
u64 Argument::GetImmediateU64() const {
return value.GetImmediateAsU64();
}
IR::Cond Argument::GetImmediateCond() const {
ASSERT(IsImmediate() && GetType() == IR::Type::Cond);
return value.GetCond();
}
bool Argument::IsInGpr() const {
if (IsImmediate())
return false;
return HostLocIsGPR(*reg_alloc.ValueLocation(value.GetInst()));
}
bool Argument::IsInXmm() const {
if (IsImmediate())
return false;
return HostLocIsXMM(*reg_alloc.ValueLocation(value.GetInst()));
}
bool Argument::IsInMemory() const {
if (IsImmediate())
return false;
return HostLocIsSpill(*reg_alloc.ValueLocation(value.GetInst()));
}
RegAlloc::RegAlloc(BlockOfCode& code, size_t num_spills, std::function<Xbyak::Address(HostLoc)> spill_to_addr, std::vector<HostLoc> gpr_order, std::vector<HostLoc> xmm_order)
: gpr_order(gpr_order)
, xmm_order(xmm_order)
, hostloc_info(NonSpillHostLocCount + num_spills)
, code(code)
, spill_to_addr(std::move(spill_to_addr))
{}
RegAlloc::ArgumentInfo RegAlloc::GetArgumentInfo(IR::Inst* inst) {
ArgumentInfo ret = {Argument{*this}, Argument{*this}, Argument{*this}, Argument{*this}};
for (size_t i = 0; i < inst->NumArgs(); i++) {
const IR::Value arg = inst->GetArg(i);
ret[i].value = arg;
if (!arg.IsImmediate() && !IsValuelessType(arg.GetType())) {
ASSERT_MSG(ValueLocation(arg.GetInst()), "argument must already been defined");
LocInfo(*ValueLocation(arg.GetInst())).AddArgReference();
}
}
return ret;
}
Xbyak::Reg64 RegAlloc::UseGpr(Argument& arg) {
ASSERT(!arg.allocated);
arg.allocated = true;
return HostLocToReg64(UseImpl(arg.value, gpr_order));
}
Xbyak::Xmm RegAlloc::UseXmm(Argument& arg) {
ASSERT(!arg.allocated);
arg.allocated = true;
return HostLocToXmm(UseImpl(arg.value, xmm_order));
}
OpArg RegAlloc::UseOpArg(Argument& arg) {
return UseGpr(arg);
}
void RegAlloc::Use(Argument& arg, HostLoc host_loc) {
ASSERT(!arg.allocated);
arg.allocated = true;
UseImpl(arg.value, {host_loc});
}
Xbyak::Reg64 RegAlloc::UseScratchGpr(Argument& arg) {
ASSERT(!arg.allocated);
arg.allocated = true;
return HostLocToReg64(UseScratchImpl(arg.value, gpr_order));
}
Xbyak::Xmm RegAlloc::UseScratchXmm(Argument& arg) {
ASSERT(!arg.allocated);
arg.allocated = true;
return HostLocToXmm(UseScratchImpl(arg.value, xmm_order));
}
void RegAlloc::UseScratch(Argument& arg, HostLoc host_loc) {
ASSERT(!arg.allocated);
arg.allocated = true;
UseScratchImpl(arg.value, {host_loc});
}
void RegAlloc::DefineValue(IR::Inst* inst, const Xbyak::Reg& reg) {
ASSERT(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
const auto hostloc = static_cast<HostLoc>(reg.getIdx() + static_cast<size_t>(reg.getKind() == Xbyak::Operand::XMM ? HostLoc::XMM0 : HostLoc::RAX));
DefineValueImpl(inst, hostloc);
}
void RegAlloc::DefineValue(IR::Inst* inst, Argument& arg) {
ASSERT(!arg.allocated);
arg.allocated = true;
DefineValueImpl(inst, arg.value);
}
void RegAlloc::Release(const Xbyak::Reg& reg) {
ASSERT(reg.getKind() == Xbyak::Operand::XMM || reg.getKind() == Xbyak::Operand::REG);
const auto hostloc = static_cast<HostLoc>(reg.getIdx() + static_cast<size_t>(reg.getKind() == Xbyak::Operand::XMM ? HostLoc::XMM0 : HostLoc::RAX));
LocInfo(hostloc).ReleaseOne();
}
Xbyak::Reg64 RegAlloc::ScratchGpr() {
return HostLocToReg64(ScratchImpl(gpr_order));
}
Xbyak::Reg64 RegAlloc::ScratchGpr(HostLoc desired_location) {
return HostLocToReg64(ScratchImpl({desired_location}));
}
Xbyak::Xmm RegAlloc::ScratchXmm() {
return HostLocToXmm(ScratchImpl(xmm_order));
}
Xbyak::Xmm RegAlloc::ScratchXmm(HostLoc desired_location) {
return HostLocToXmm(ScratchImpl({desired_location}));
}
HostLoc RegAlloc::UseImpl(IR::Value use_value, const std::vector<HostLoc>& desired_locations) {
if (use_value.IsImmediate()) {
return LoadImmediate(use_value, ScratchImpl(desired_locations));
}
const IR::Inst* use_inst = use_value.GetInst();
const HostLoc current_location = *ValueLocation(use_inst);
const size_t max_bit_width = LocInfo(current_location).GetMaxBitWidth();
const bool can_use_current_location = std::find(desired_locations.begin(), desired_locations.end(), current_location) != desired_locations.end();
if (can_use_current_location) {
LocInfo(current_location).ReadLock();
return current_location;
}
if (LocInfo(current_location).IsLocked()) {
return UseScratchImpl(use_value, desired_locations);
}
const HostLoc destination_location = SelectARegister(desired_locations);
if (max_bit_width > HostLocBitWidth(destination_location)) {
return UseScratchImpl(use_value, desired_locations);
} else if (CanExchange(destination_location, current_location)) {
Exchange(destination_location, current_location);
} else {
MoveOutOfTheWay(destination_location);
Move(destination_location, current_location);
}
LocInfo(destination_location).ReadLock();
return destination_location;
}
HostLoc RegAlloc::UseScratchImpl(IR::Value use_value, const std::vector<HostLoc>& desired_locations) {
if (use_value.IsImmediate()) {
return LoadImmediate(use_value, ScratchImpl(desired_locations));
}
const IR::Inst* use_inst = use_value.GetInst();
const HostLoc current_location = *ValueLocation(use_inst);
const size_t bit_width = GetBitWidth(use_inst->GetType());
const bool can_use_current_location = std::find(desired_locations.begin(), desired_locations.end(), current_location) != desired_locations.end();
if (can_use_current_location && !LocInfo(current_location).IsLocked()) {
if (!LocInfo(current_location).IsLastUse()) {
MoveOutOfTheWay(current_location);
}
LocInfo(current_location).WriteLock();
return current_location;
}
const HostLoc destination_location = SelectARegister(desired_locations);
MoveOutOfTheWay(destination_location);
CopyToScratch(bit_width, destination_location, current_location);
LocInfo(destination_location).WriteLock();
return destination_location;
}
HostLoc RegAlloc::ScratchImpl(const std::vector<HostLoc>& desired_locations) {
const HostLoc location = SelectARegister(desired_locations);
MoveOutOfTheWay(location);
LocInfo(location).WriteLock();
return location;
}
void RegAlloc::HostCall(IR::Inst* result_def, std::optional<Argument::copyable_reference> arg0,
std::optional<Argument::copyable_reference> arg1,
std::optional<Argument::copyable_reference> arg2,
std::optional<Argument::copyable_reference> arg3) {
constexpr size_t args_count = 4;
constexpr std::array<HostLoc, args_count> args_hostloc = { ABI_PARAM1, ABI_PARAM2, ABI_PARAM3, ABI_PARAM4 };
const std::array<std::optional<Argument::copyable_reference>, args_count> args = { arg0, arg1, arg2, arg3 };
static const std::vector<HostLoc> other_caller_save = [args_hostloc]() {
std::vector<HostLoc> ret(ABI_ALL_CALLER_SAVE.begin(), ABI_ALL_CALLER_SAVE.end());
ret.erase(std::find(ret.begin(), ret.end(), ABI_RETURN));
for (auto hostloc : args_hostloc) {
ret.erase(std::find(ret.begin(), ret.end(), hostloc));
}
return ret;
}();
ScratchGpr(ABI_RETURN);
if (result_def) {
DefineValueImpl(result_def, ABI_RETURN);
}
for (size_t i = 0; i < args_count; i++) {
if (args[i]) {
UseScratch(*args[i], args_hostloc[i]);
#if defined(__llvm__) && !defined(_WIN32)
// LLVM puts the burden of zero-extension of 8 and 16 bit values on the caller instead of the callee
const Xbyak::Reg64 reg = HostLocToReg64(args_hostloc[i]);
switch (args[i]->get().GetType()) {
case IR::Type::U8:
code.movzx(reg.cvt32(), reg.cvt8());
break;
case IR::Type::U16:
code.movzx(reg.cvt32(), reg.cvt16());
break;
default:
break; // Nothing needs to be done
}
#endif
}
}
for (size_t i = 0; i < args_count; i++) {
if (!args[i]) {
// TODO: Force spill
ScratchGpr(args_hostloc[i]);
}
}
for (HostLoc caller_saved : other_caller_save) {
ScratchImpl({caller_saved});
}
}
void RegAlloc::EndOfAllocScope() {
for (auto& iter : hostloc_info) {
iter.ReleaseAll();
}
}
void RegAlloc::AssertNoMoreUses() {
ASSERT(std::all_of(hostloc_info.begin(), hostloc_info.end(), [](const auto& i) { return i.IsEmpty(); }));
}
HostLoc RegAlloc::SelectARegister(const std::vector<HostLoc>& desired_locations) const {
std::vector<HostLoc> candidates = desired_locations;
// Find all locations that have not been allocated..
const auto allocated_locs = std::partition(candidates.begin(), candidates.end(), [this](auto loc) {
return !this->LocInfo(loc).IsLocked();
});
candidates.erase(allocated_locs, candidates.end());
ASSERT_MSG(!candidates.empty(), "All candidate registers have already been allocated");
// Selects the best location out of the available locations.
// TODO: Actually do LRU or something. Currently we just try to pick something without a value if possible.
std::partition(candidates.begin(), candidates.end(), [this](auto loc) {
return this->LocInfo(loc).IsEmpty();
});
return candidates.front();
}
std::optional<HostLoc> RegAlloc::ValueLocation(const IR::Inst* value) const {
for (size_t i = 0; i < hostloc_info.size(); i++) {
if (hostloc_info[i].ContainsValue(value)) {
return static_cast<HostLoc>(i);
}
}
return std::nullopt;
}
void RegAlloc::DefineValueImpl(IR::Inst* def_inst, HostLoc host_loc) {
ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined");
LocInfo(host_loc).AddValue(def_inst);
}
void RegAlloc::DefineValueImpl(IR::Inst* def_inst, const IR::Value& use_inst) {
ASSERT_MSG(!ValueLocation(def_inst), "def_inst has already been defined");
if (use_inst.IsImmediate()) {
const HostLoc location = ScratchImpl(gpr_order);
DefineValueImpl(def_inst, location);
LoadImmediate(use_inst, location);
return;
}
ASSERT_MSG(ValueLocation(use_inst.GetInst()), "use_inst must already be defined");
const HostLoc location = *ValueLocation(use_inst.GetInst());
DefineValueImpl(def_inst, location);
}
HostLoc RegAlloc::LoadImmediate(IR::Value imm, HostLoc host_loc) {
ASSERT_MSG(imm.IsImmediate(), "imm is not an immediate");
if (HostLocIsGPR(host_loc)) {
const Xbyak::Reg64 reg = HostLocToReg64(host_loc);
const u64 imm_value = imm.GetImmediateAsU64();
if (imm_value == 0) {
code.xor_(reg.cvt32(), reg.cvt32());
} else {
code.mov(reg, imm_value);
}
return host_loc;
}
if (HostLocIsXMM(host_loc)) {
const Xbyak::Xmm reg = HostLocToXmm(host_loc);
const u64 imm_value = imm.GetImmediateAsU64();
if (imm_value == 0) {
MAYBE_AVX(xorps, reg, reg);
} else {
MAYBE_AVX(movaps, reg, code.MConst(code.xword, imm_value));
}
return host_loc;
}
UNREACHABLE();
}
void RegAlloc::Move(HostLoc to, HostLoc from) {
const size_t bit_width = LocInfo(from).GetMaxBitWidth();
ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsLocked());
ASSERT(bit_width <= HostLocBitWidth(to));
if (LocInfo(from).IsEmpty()) {
return;
}
EmitMove(bit_width, to, from);
LocInfo(to) = std::exchange(LocInfo(from), {});
}
void RegAlloc::CopyToScratch(size_t bit_width, HostLoc to, HostLoc from) {
ASSERT(LocInfo(to).IsEmpty() && !LocInfo(from).IsEmpty());
EmitMove(bit_width, to, from);
}
void RegAlloc::Exchange(HostLoc a, HostLoc b) {
ASSERT(!LocInfo(a).IsLocked() && !LocInfo(b).IsLocked());
ASSERT(LocInfo(a).GetMaxBitWidth() <= HostLocBitWidth(b));
ASSERT(LocInfo(b).GetMaxBitWidth() <= HostLocBitWidth(a));
if (LocInfo(a).IsEmpty()) {
Move(a, b);
return;
}
if (LocInfo(b).IsEmpty()) {
Move(b, a);
return;
}
EmitExchange(a, b);
std::swap(LocInfo(a), LocInfo(b));
}
void RegAlloc::MoveOutOfTheWay(HostLoc reg) {
ASSERT(!LocInfo(reg).IsLocked());
if (!LocInfo(reg).IsEmpty()) {
SpillRegister(reg);
}
}
void RegAlloc::SpillRegister(HostLoc loc) {
ASSERT_MSG(HostLocIsRegister(loc), "Only registers can be spilled");
ASSERT_MSG(!LocInfo(loc).IsEmpty(), "There is no need to spill unoccupied registers");
ASSERT_MSG(!LocInfo(loc).IsLocked(), "Registers that have been allocated must not be spilt");
const HostLoc new_loc = FindFreeSpill();
Move(new_loc, loc);
}
HostLoc RegAlloc::FindFreeSpill() const {
for (size_t i = static_cast<size_t>(HostLoc::FirstSpill); i < hostloc_info.size(); i++) {
const auto loc = static_cast<HostLoc>(i);
if (LocInfo(loc).IsEmpty()) {
return loc;
}
}
ASSERT_FALSE("All spill locations are full");
}
HostLocInfo& RegAlloc::LocInfo(HostLoc loc) {
ASSERT(loc != HostLoc::RSP && loc != HostLoc::R15);
return hostloc_info[static_cast<size_t>(loc)];
}
const HostLocInfo& RegAlloc::LocInfo(HostLoc loc) const {
ASSERT(loc != HostLoc::RSP && loc != HostLoc::R15);
return hostloc_info[static_cast<size_t>(loc)];
}
void RegAlloc::EmitMove(size_t bit_width, HostLoc to, HostLoc from) {
if (HostLocIsXMM(to) && HostLocIsXMM(from)) {
MAYBE_AVX(movaps, HostLocToXmm(to), HostLocToXmm(from));
} else if (HostLocIsGPR(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
if (bit_width == 64) {
code.mov(HostLocToReg64(to), HostLocToReg64(from));
} else {
code.mov(HostLocToReg64(to).cvt32(), HostLocToReg64(from).cvt32());
}
} else if (HostLocIsXMM(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
if (bit_width == 64) {
MAYBE_AVX(movq, HostLocToXmm(to), HostLocToReg64(from));
} else {
MAYBE_AVX(movd, HostLocToXmm(to), HostLocToReg64(from).cvt32());
}
} else if (HostLocIsGPR(to) && HostLocIsXMM(from)) {
ASSERT(bit_width != 128);
if (bit_width == 64) {
MAYBE_AVX(movq, HostLocToReg64(to), HostLocToXmm(from));
} else {
MAYBE_AVX(movd, HostLocToReg64(to).cvt32(), HostLocToXmm(from));
}
} else if (HostLocIsXMM(to) && HostLocIsSpill(from)) {
const Xbyak::Address spill_addr = spill_to_addr(from);
ASSERT(spill_addr.getBit() >= bit_width);
switch (bit_width) {
case 128:
MAYBE_AVX(movaps, HostLocToXmm(to), spill_addr);
break;
case 64:
MAYBE_AVX(movsd, HostLocToXmm(to), spill_addr);
break;
case 32:
case 16:
case 8:
MAYBE_AVX(movss, HostLocToXmm(to), spill_addr);
break;
default:
UNREACHABLE();
}
} else if (HostLocIsSpill(to) && HostLocIsXMM(from)) {
const Xbyak::Address spill_addr = spill_to_addr(to);
ASSERT(spill_addr.getBit() >= bit_width);
switch (bit_width) {
case 128:
MAYBE_AVX(movaps, spill_addr, HostLocToXmm(from));
break;
case 64:
MAYBE_AVX(movsd, spill_addr, HostLocToXmm(from));
break;
case 32:
case 16:
case 8:
MAYBE_AVX(movss, spill_addr, HostLocToXmm(from));
break;
default:
UNREACHABLE();
}
} else if (HostLocIsGPR(to) && HostLocIsSpill(from)) {
ASSERT(bit_width != 128);
if (bit_width == 64) {
code.mov(HostLocToReg64(to), spill_to_addr(from));
} else {
code.mov(HostLocToReg64(to).cvt32(), spill_to_addr(from));
}
} else if (HostLocIsSpill(to) && HostLocIsGPR(from)) {
ASSERT(bit_width != 128);
if (bit_width == 64) {
code.mov(spill_to_addr(to), HostLocToReg64(from));
} else {
code.mov(spill_to_addr(to), HostLocToReg64(from).cvt32());
}
} else {
ASSERT_FALSE("Invalid RegAlloc::EmitMove");
}
}
void RegAlloc::EmitExchange(HostLoc a, HostLoc b) {
if (HostLocIsGPR(a) && HostLocIsGPR(b)) {
code.xchg(HostLocToReg64(a), HostLocToReg64(b));
} else if (HostLocIsXMM(a) && HostLocIsXMM(b)) {
ASSERT_FALSE("Check your code: Exchanging XMM registers is unnecessary");
} else {
ASSERT_FALSE("Invalid RegAlloc::EmitExchange");
}
}
} // namespace Dynarmic::Backend::X64

View File

@@ -1,168 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <array>
#include <functional>
#include <optional>
#include <utility>
#include <vector>
#include <xbyak.h>
#include "backend/x64/block_of_code.h"
#include "backend/x64/hostloc.h"
#include "backend/x64/oparg.h"
#include "common/common_types.h"
#include "frontend/ir/cond.h"
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/value.h"
namespace Dynarmic::Backend::X64 {
class RegAlloc;
struct HostLocInfo {
public:
bool IsLocked() const;
bool IsEmpty() const;
bool IsLastUse() const;
void ReadLock();
void WriteLock();
void AddArgReference();
void ReleaseOne();
void ReleaseAll();
bool ContainsValue(const IR::Inst* inst) const;
size_t GetMaxBitWidth() const;
void AddValue(IR::Inst* inst);
private:
// Current instruction state
size_t is_being_used_count = 0;
bool is_scratch = false;
// Block state
size_t current_references = 0;
size_t accumulated_uses = 0;
size_t total_uses = 0;
// Value state
std::vector<IR::Inst*> values;
size_t max_bit_width = 0;
};
struct Argument {
public:
using copyable_reference = std::reference_wrapper<Argument>;
IR::Type GetType() const;
bool IsImmediate() const;
bool IsVoid() const;
bool FitsInImmediateU32() const;
bool FitsInImmediateS32() const;
bool GetImmediateU1() const;
u8 GetImmediateU8() const;
u16 GetImmediateU16() const;
u32 GetImmediateU32() const;
u64 GetImmediateS32() const;
u64 GetImmediateU64() const;
IR::Cond GetImmediateCond() const;
/// Is this value currently in a GPR?
bool IsInGpr() const;
/// Is this value currently in a XMM?
bool IsInXmm() const;
/// Is this value currently in memory?
bool IsInMemory() const;
private:
friend class RegAlloc;
explicit Argument(RegAlloc& reg_alloc) : reg_alloc(reg_alloc) {}
bool allocated = false;
RegAlloc& reg_alloc;
IR::Value value;
};
class RegAlloc final {
public:
using ArgumentInfo = std::array<Argument, IR::max_arg_count>;
explicit RegAlloc(BlockOfCode& code, size_t num_spills, std::function<Xbyak::Address(HostLoc)> spill_to_addr, std::vector<HostLoc> gpr_order, std::vector<HostLoc> xmm_order);
ArgumentInfo GetArgumentInfo(IR::Inst* inst);
Xbyak::Reg64 UseGpr(Argument& arg);
Xbyak::Xmm UseXmm(Argument& arg);
OpArg UseOpArg(Argument& arg);
void Use(Argument& arg, HostLoc host_loc);
Xbyak::Reg64 UseScratchGpr(Argument& arg);
Xbyak::Xmm UseScratchXmm(Argument& arg);
void UseScratch(Argument& arg, HostLoc host_loc);
void DefineValue(IR::Inst* inst, const Xbyak::Reg& reg);
void DefineValue(IR::Inst* inst, Argument& arg);
void Release(const Xbyak::Reg& reg);
Xbyak::Reg64 ScratchGpr();
Xbyak::Reg64 ScratchGpr(HostLoc desired_location);
Xbyak::Xmm ScratchXmm();
Xbyak::Xmm ScratchXmm(HostLoc desired_location);
void HostCall(IR::Inst* result_def = nullptr,
std::optional<Argument::copyable_reference> arg0 = {},
std::optional<Argument::copyable_reference> arg1 = {},
std::optional<Argument::copyable_reference> arg2 = {},
std::optional<Argument::copyable_reference> arg3 = {});
// TODO: Values in host flags
void EndOfAllocScope();
void AssertNoMoreUses();
private:
friend struct Argument;
std::vector<HostLoc> gpr_order;
std::vector<HostLoc> xmm_order;
HostLoc SelectARegister(const std::vector<HostLoc>& desired_locations) const;
std::optional<HostLoc> ValueLocation(const IR::Inst* value) const;
HostLoc UseImpl(IR::Value use_value, const std::vector<HostLoc>& desired_locations);
HostLoc UseScratchImpl(IR::Value use_value, const std::vector<HostLoc>& desired_locations);
HostLoc ScratchImpl(const std::vector<HostLoc>& desired_locations);
void DefineValueImpl(IR::Inst* def_inst, HostLoc host_loc);
void DefineValueImpl(IR::Inst* def_inst, const IR::Value& use_inst);
HostLoc LoadImmediate(IR::Value imm, HostLoc host_loc);
void Move(HostLoc to, HostLoc from);
void CopyToScratch(size_t bit_width, HostLoc to, HostLoc from);
void Exchange(HostLoc a, HostLoc b);
void MoveOutOfTheWay(HostLoc reg);
void SpillRegister(HostLoc loc);
HostLoc FindFreeSpill() const;
std::vector<HostLocInfo> hostloc_info;
HostLocInfo& LocInfo(HostLoc loc);
const HostLocInfo& LocInfo(HostLoc loc) const;
BlockOfCode& code;
std::function<Xbyak::Address(HostLoc)> spill_to_addr;
void EmitMove(size_t bit_width, HostLoc to, HostLoc from);
void EmitExchange(HostLoc a, HostLoc b);
};
} // namespace Dynarmic::Backend::X64

View File

@@ -1,21 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <cstdio>
#include <exception>
#include <fmt/format.h>
#include "common/assert.h"
namespace Dynarmic::Common {
[[noreturn]] void Terminate(fmt::string_view msg, fmt::format_args args) {
fmt::print(stderr, "dynarmic assertion failed: ");
fmt::vprint(stderr, msg, args);
std::terminate();
}
} // namespace Dynarmic::Common

View File

@@ -1,69 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <fmt/format.h>
#include "common/unlikely.h"
namespace Dynarmic::Common {
[[noreturn]] void Terminate(fmt::string_view msg, fmt::format_args args);
namespace detail {
template <typename... Ts>
[[noreturn]] void TerminateHelper(fmt::string_view msg, Ts... args) {
Terminate(msg, fmt::make_format_args(args...));
}
} // namespace detail
} // namespace Dynarmic::Common
#if defined(NDEBUG)
#if defined(__clang) || defined(__GNUC__)
#define UNREACHABLE() __builtin_unreachable()
#define ASSUME(expr) [&]{ if (!(expr)) __builtin_unreachable(); }()
#elif defined(_MSC_VER)
#define UNREACHABLE() __assume(0)
#define ASSUME(expr) __assume(expr)
#else
#define UNREACHABLE() ASSERT_FALSE("Unreachable code!")
#define ASSUME(expr)
#endif
#else
#define UNREACHABLE() ASSERT_FALSE("Unreachable code!")
#define ASSUME(expr)
#endif
#ifdef DYNARMIC_IGNORE_ASSERTS
#define ASSERT(expr) ASSUME(expr)
#define ASSERT_MSG(expr, ...) ASSUME(expr)
#define ASSERT_FALSE(...) UNREACHABLE()
#else
#define ASSERT(expr) \
[&]{ \
if (UNLIKELY(!(expr))) { \
::Dynarmic::Common::detail::TerminateHelper(#expr); \
} \
}()
#define ASSERT_MSG(expr, ...) \
[&]{ \
if (UNLIKELY(!(expr))) { \
::Dynarmic::Common::detail::TerminateHelper(#expr "\nMessage: " __VA_ARGS__); \
} \
}()
#define ASSERT_FALSE(...) ::Dynarmic::Common::detail::TerminateHelper("false\nMessage: " __VA_ARGS__)
#endif
#if defined(NDEBUG) || defined(DYNARMIC_IGNORE_ASSERTS)
#define DEBUG_ASSERT(expr) ASSUME(expr)
#define DEBUG_ASSERT_MSG(expr, ...) ASSUME(expr)
#else
#define DEBUG_ASSERT(expr) ASSERT(expr)
#define DEBUG_ASSERT_MSG(expr, ...) ASSERT_MSG(expr, __VA_ARGS__)
#endif

View File

@@ -1,248 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <bitset>
#include <climits>
#include <cstddef>
#include <type_traits>
#include "common/assert.h"
#include "common/common_types.h"
namespace Dynarmic::Common {
/// The size of a type in terms of bits
template<typename T>
constexpr size_t BitSize() {
return sizeof(T) * CHAR_BIT;
}
template <typename T>
constexpr T Ones(size_t count) {
ASSERT_MSG(count <= BitSize<T>(), "count larger than bitsize of T");
if (count == BitSize<T>())
return static_cast<T>(~static_cast<T>(0));
return ~(static_cast<T>(~static_cast<T>(0)) << count);
}
/// Extract bits [begin_bit, end_bit] inclusive from value of type T.
template<typename T>
constexpr T Bits(const size_t begin_bit, const size_t end_bit, const T value) {
ASSERT_MSG(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
ASSERT_MSG(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
ASSERT_MSG(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return (value >> begin_bit) & Ones<T>(end_bit - begin_bit + 1);
}
/// Extract bits [begin_bit, end_bit] inclusive from value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T Bits(const T value) {
static_assert(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
static_assert(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
static_assert(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return (value >> begin_bit) & Ones<T>(end_bit - begin_bit + 1);
}
/// Create a mask of type T for bits [begin_bit, end_bit] inclusive.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T Mask() {
static_assert(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
static_assert(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
static_assert(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return Ones<T>(end_bit - begin_bit + 1) << begin_bit;
}
/// Clears bits [begin_bit, end_bit] inclusive of value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T ClearBits(const T value) {
return value & ~Mask<begin_bit, end_bit, T>();
}
/// Modifies bits [begin_bit, end_bit] inclusive of value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T ModifyBits(const T value, const T new_bits) {
return ClearBits<begin_bit, end_bit, T>(value) | ((new_bits << begin_bit) & Mask<begin_bit, end_bit, T>());
}
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4554)
#endif
/// Extracts a single bit at bit_position from value of type T.
template<typename T>
inline bool Bit(size_t bit_position, const T value) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ((value >> bit_position) & 1) != 0;
}
/// Extracts a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr bool Bit(const T value) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return Bit<T>(bit_position, value);
}
/// Clears a single bit at bit_position from value of type T.
template<typename T>
inline T ClearBit(size_t bit_position, const T value) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return value & ~(static_cast<T>(1) << bit_position);
}
/// Clears a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr T ClearBit(const T value) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ClearBit<T>(bit_position, value);
}
/// Modifies a single bit at bit_position from value of type T.
template<typename T>
inline T ModifyBit(size_t bit_position, const T value, bool new_bit) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ClearBit<T>(bit_position, value) | (static_cast<T>(new_bit) << bit_position);
}
/// Modifies a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr T ModifyBit(const T value, bool new_bit) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ModifyBit<T>(bit_position, value, new_bit);
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
/// Sign-extends a value that has bit_count bits to the full bitwidth of type T.
template<size_t bit_count, typename T>
constexpr T SignExtend(const T value) {
static_assert(bit_count <= BitSize<T>(), "bit_count larger than bitsize of T");
constexpr T mask = static_cast<T>(1ULL << bit_count) - 1;
const bool signbit = Bit<bit_count - 1, T>(value);
if (signbit) {
return value | ~mask;
}
return value;
}
/// Sign-extends a value that has bit_count bits to the full bitwidth of type T.
template<typename T>
inline T SignExtend(const size_t bit_count, const T value) {
ASSERT_MSG(bit_count <= BitSize<T>(), "bit_count larger than bitsize of T");
const T mask = static_cast<T>(1ULL << bit_count) - 1;
const bool signbit = Bit<T>(bit_count - 1, value);
if (signbit) {
return value | ~mask;
}
return value;
}
template <typename Integral>
inline size_t BitCount(Integral value) {
return std::bitset<BitSize<Integral>()>(value).count();
}
template <typename T>
constexpr size_t CountLeadingZeros(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
size_t result = BitSize<T>();
while (x != 0) {
x >>= 1;
result--;
}
return result;
}
template <typename T>
constexpr int HighestSetBit(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
int result = -1;
while (x != 0) {
x >>= 1;
result++;
}
return result;
}
template <typename T>
constexpr size_t LowestSetBit(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
if (x == 0)
return BitSize<T>();
size_t result = 0;
while ((x & 1) == 0) {
x >>= 1;
result++;
}
return result;
}
template <typename T>
constexpr bool MostSignificantBit(T value) {
return Bit<BitSize<T>() - 1, T>(value);
}
template <typename T>
inline T Replicate(T value, size_t element_size) {
ASSERT_MSG(BitSize<T>() % element_size == 0, "bitsize of T not divisible by element_size");
if (element_size == BitSize<T>())
return value;
return Replicate(value | (value << element_size), element_size * 2);
}
template <typename T>
constexpr T RotateRight(T value, size_t amount) {
amount %= BitSize<T>();
if (amount == 0) {
return value;
}
auto x = static_cast<std::make_unsigned_t<T>>(value);
return static_cast<T>((x >> amount) | (x << (BitSize<T>() - amount)));
}
constexpr u32 SwapHalves32(u32 value) {
return ((value & 0xFFFF0000U) >> 16) |
((value & 0x0000FFFFU) << 16);
}
constexpr u16 SwapBytes16(u16 value) {
return static_cast<u16>(u32{value} >> 8 | u32{value} << 8);
}
constexpr u32 SwapBytes32(u32 value) {
return ((value & 0xFF000000U) >> 24) |
((value & 0x00FF0000U) >> 8) |
((value & 0x0000FF00U) << 8) |
((value & 0x000000FFU) << 24);
}
constexpr u64 SwapBytes64(u64 value) {
return ((value & 0xFF00000000000000ULL) >> 56) |
((value & 0x00FF000000000000ULL) >> 40) |
((value & 0x0000FF0000000000ULL) >> 24) |
((value & 0x000000FF00000000ULL) >> 8) |
((value & 0x00000000FF000000ULL) << 8) |
((value & 0x0000000000FF0000ULL) << 24) |
((value & 0x000000000000FF00ULL) << 40) |
((value & 0x00000000000000FFULL) << 56);
}
} // namespace Dynarmic::Common

View File

@@ -1,45 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstring>
#include <type_traits>
#include <mp/traits/function_info.h>
namespace Dynarmic::Common {
/// Reinterpret objects of one type as another by bit-casting between object representations.
template <class Dest, class Source>
inline Dest BitCast(const Source& source) noexcept {
static_assert(sizeof(Dest) == sizeof(Source), "size of destination and source objects must be equal");
static_assert(std::is_trivially_copyable_v<Dest>, "destination type must be trivially copyable.");
static_assert(std::is_trivially_copyable_v<Source>, "source type must be trivially copyable");
std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest;
std::memcpy(&dest, &source, sizeof(dest));
return reinterpret_cast<Dest&>(dest);
}
/// Reinterpret objects of any arbitrary type as another type by bit-casting between object representations.
/// Note that here we do not verify if source has enough bytes to read from.
template <class Dest, class SourcePtr>
inline Dest BitCastPointee(const SourcePtr source) noexcept {
static_assert(sizeof(SourcePtr) == sizeof(void*), "source pointer must have size of a pointer");
static_assert(std::is_trivially_copyable_v<Dest>, "destination type must be trivially copyable.");
std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest;
std::memcpy(&dest, BitCast<void*>(source), sizeof(dest));
return reinterpret_cast<Dest&>(dest);
}
/// Cast a lambda into an equivalent function pointer.
template <class Function>
inline auto FptrCast(Function f) noexcept {
return static_cast<mp::equivalent_function_type<Function>*>(f);
}
} // namespace Dynarmic::Common

View File

@@ -1,28 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <cstdint>
using u8 = std::uint8_t;
using u16 = std::uint16_t;
using u32 = std::uint32_t;
using u64 = std::uint64_t;
using uptr = std::uintptr_t;
using s8 = std::int8_t;
using s16 = std::int16_t;
using s32 = std::int32_t;
using s64 = std::int64_t;
using sptr = std::intptr_t;
using size_t = std::size_t;
using f32 = float;
using f64 = double;
static_assert(sizeof(f32) == sizeof(u32), "f32 must be 32 bits wide");
static_assert(sizeof(f64) == sizeof(u64), "f64 must be 64 bits wide");

View File

@@ -1,181 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include <array>
#include "common/common_types.h"
#include "common/crypto/aes.h"
namespace Dynarmic::Common::Crypto::AES {
using SubstitutionTable = std::array<u8, 256>;
// See section 5.1.1 Figure 7 in FIPS 197
constexpr SubstitutionTable substitution_box{{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
}};
// See section 5.3.2 Figure 14 in FIPS 197
constexpr SubstitutionTable inverse_substitution_box{{
// 0 1 2 3 4 5 6 7 8 9 A B C D E F
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
}};
// See section 4.2.1 in FIPS 197.
static constexpr u8 xtime(u8 x) {
return static_cast<u8>((x << 1) ^ (((x >> 7) & 1) * 0x1B));
}
// Galois Field multiplication.
static constexpr u8 Multiply(u8 x, u8 y) {
return static_cast<u8>(((y & 1) * x) ^
((y >> 1 & 1) * xtime(x)) ^
((y >> 2 & 1) * xtime(xtime(x))) ^
((y >> 3 & 1) * xtime(xtime(xtime(x)))) ^
((y >> 4 & 1) * xtime(xtime(xtime(xtime(x))))));
}
static void ShiftRows(State& out_state, const State& state) {
// Move zeroth row over
out_state[0] = state[0];
out_state[4] = state[4];
out_state[8] = state[8];
out_state[12] = state[12];
// Rotate first row 1 column left.
u8 temp = state[1];
out_state[1] = state[5];
out_state[5] = state[9];
out_state[9] = state[13];
out_state[13] = temp;
// Rotate second row 2 columns left
temp = state[2];
out_state[2] = state[10];
out_state[10] = temp;
temp = state[6];
out_state[6] = state[14];
out_state[14] = temp;
// Rotate third row 3 columns left
temp = state[3];
out_state[3] = state[15];
out_state[15] = state[11];
out_state[11] = state[7];
out_state[7] = temp;
}
static void InverseShiftRows(State& out_state, const State& state) {
// Move zeroth row over
out_state[0] = state[0];
out_state[4] = state[4];
out_state[8] = state[8];
out_state[12] = state[12];
// Rotate first row 1 column right.
u8 temp = state[13];
out_state[13] = state[9];
out_state[9] = state[5];
out_state[5] = state[1];
out_state[1] = temp;
// Rotate second row 2 columns right
temp = state[2];
out_state[2] = state[10];
out_state[10] = temp;
temp = state[6];
out_state[6] = state[14];
out_state[14] = temp;
// Rotate third row 3 columns right
temp = state[3];
out_state[3] = state[7];
out_state[7] = state[11];
out_state[11] = state[15];
out_state[15] = temp;
}
static void SubBytes(State& state, const SubstitutionTable& table) {
for (size_t i = 0; i < 4; i++) {
for (size_t j = 0; j < 4; j++) {
state[4 * i + j] = table[state[4 * i + j]];
}
}
}
void DecryptSingleRound(State& out_state, const State& state) {
InverseShiftRows(out_state, state);
SubBytes(out_state, inverse_substitution_box);
}
void EncryptSingleRound(State& out_state, const State& state) {
ShiftRows(out_state, state);
SubBytes(out_state, substitution_box);
}
void MixColumns(State& out_state, const State& state) {
for (size_t i = 0; i < out_state.size(); i += 4) {
const u8 a = state[i];
const u8 b = state[i + 1];
const u8 c = state[i + 2];
const u8 d = state[i + 3];
const u8 tmp = a ^ b ^ c ^ d;
out_state[i + 0] = a ^ xtime(a ^ b) ^ tmp;
out_state[i + 1] = b ^ xtime(b ^ c) ^ tmp;
out_state[i + 2] = c ^ xtime(c ^ d) ^ tmp;
out_state[i + 3] = d ^ xtime(d ^ a) ^ tmp;
}
}
void InverseMixColumns(State& out_state, const State& state) {
for (size_t i = 0; i < out_state.size(); i += 4) {
const u8 a = state[i];
const u8 b = state[i + 1];
const u8 c = state[i + 2];
const u8 d = state[i + 3];
out_state[i + 0] = Multiply(a, 0x0E) ^ Multiply(b, 0x0B) ^ Multiply(c, 0x0D) ^ Multiply(d, 0x09);
out_state[i + 1] = Multiply(a, 0x09) ^ Multiply(b, 0x0E) ^ Multiply(c, 0x0B) ^ Multiply(d, 0x0D);
out_state[i + 2] = Multiply(a, 0x0D) ^ Multiply(b, 0x09) ^ Multiply(c, 0x0E) ^ Multiply(d, 0x0B);
out_state[i + 3] = Multiply(a, 0x0B) ^ Multiply(b, 0x0D) ^ Multiply(c, 0x09) ^ Multiply(d, 0x0E);
}
}
} // namespace Dynarmic::Common::Crypto::AES

Some files were not shown because too many files have changed in this diff Show More