remove obsolete files

This commit is contained in:
mgthepro
2022-07-31 12:49:57 +02:00
parent 117076fd71
commit de846fb71e
3631 changed files with 0 additions and 9433291 deletions

View File

@@ -1,2 +0,0 @@
#include "xchar.h"
#warning fmt/locale.h is deprecated, include fmt/format.h or fmt/xchar.h instead

View File

@@ -1,856 +0,0 @@
// Formatting library for C++ - the standard API implementation
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_FORMAT_
#define FMT_FORMAT_
#include <algorithm>
#include <cassert>
#include <variant>
#include "fmt/format.h"
// This implementation verifies the correctness of the standard API proposed in
// P0645 Text Formatting and is optimized for copy-pasting from the paper, not
// for efficiency or readability. An efficient implementation should not use
// std::variant and should store packed argument type tags separately from
// values in basic_format_args for small number of arguments.
namespace std {
template<class T>
constexpr bool Integral = is_integral_v<T>;
template <class O>
using iter_difference_t = ptrdiff_t;
}
// https://fmt.dev/Text%20Formatting.html#format.syn
namespace std {
// [format.error], class format_error
class format_error;
// [format.formatter], formatter
template<class charT> class basic_format_parse_context;
using format_parse_context = basic_format_parse_context<char>;
using wformat_parse_context = basic_format_parse_context<wchar_t>;
template<class Out, class charT> class basic_format_context;
using format_context = basic_format_context<
/* unspecified */ fmt::detail::buffer_appender<char>, char>;
using wformat_context = basic_format_context<
/* unspecified */ fmt::detail::buffer_appender<wchar_t>, wchar_t>;
template<class T, class charT = char> struct formatter {
formatter() = delete;
};
// [format.arguments], arguments
template<class Context> class basic_format_arg;
template<class Visitor, class Context>
/* see below */ auto visit_format_arg(Visitor&& vis, basic_format_arg<Context> arg);
template<class Context, class... Args> struct format_arg_store; // exposition only
template<class Context> class basic_format_args;
using format_args = basic_format_args<format_context>;
using wformat_args = basic_format_args<wformat_context>;
template<class Out, class charT>
using format_args_t = basic_format_args<basic_format_context<Out, charT>>;
template<class Context = format_context, class... Args>
format_arg_store<Context, Args...>
make_format_args(const Args&... args);
template<class... Args>
format_arg_store<wformat_context, Args...>
make_wformat_args(const Args&... args);
// [format.functions], formatting functions
template<class... Args>
string format(string_view fmt, const Args&... args);
template<class... Args>
wstring format(wstring_view fmt, const Args&... args);
string vformat(string_view fmt, format_args args);
wstring vformat(wstring_view fmt, wformat_args args);
template<class Out, class... Args>
Out format_to(Out out, string_view fmt, const Args&... args);
template<class Out, class... Args>
Out format_to(Out out, wstring_view fmt, const Args&... args);
template<class Out>
Out vformat_to(Out out, string_view fmt, format_args_t<fmt::type_identity_t<Out>, char> args);
template<class Out>
Out vformat_to(Out out, wstring_view fmt, format_args_t<fmt::type_identity_t<Out>, wchar_t> args);
template<class Out>
struct format_to_n_result {
Out out;
iter_difference_t<Out> size;
};
template<class Out, class... Args>
format_to_n_result<Out> format_to_n(Out out, iter_difference_t<Out> n,
string_view fmt, const Args&... args);
template<class Out, class... Args>
format_to_n_result<Out> format_to_n(Out out, iter_difference_t<Out> n,
wstring_view fmt, const Args&... args);
template<class... Args>
size_t formatted_size(string_view fmt, const Args&... args);
template<class... Args>
size_t formatted_size(wstring_view fmt, const Args&... args);
}
// https://fmt.dev/Text%20Formatting.html#format.error
namespace std {
class format_error : public runtime_error {
public:
explicit format_error(const string& what_arg) : runtime_error(what_arg) {}
explicit format_error(const char* what_arg) : runtime_error(what_arg) {}
};
}
namespace std {
namespace detail {
struct error_handler {
// This function is intentionally not constexpr to give a compile-time error.
void on_error(const char* message) {
throw std::format_error(message);
}
};
}
}
// https://fmt.dev/Text%20Formatting.html#format.parse_context
namespace std {
template<class charT>
class basic_format_parse_context {
public:
using char_type = charT;
using const_iterator = typename basic_string_view<charT>::const_iterator;
using iterator = const_iterator;
private:
iterator begin_; // exposition only
iterator end_; // exposition only
enum indexing { unknown, manual, automatic }; // exposition only
indexing indexing_; // exposition only
size_t next_arg_id_; // exposition only
size_t num_args_; // exposition only
public:
explicit constexpr basic_format_parse_context(basic_string_view<charT> fmt,
size_t num_args = 0) noexcept;
basic_format_parse_context(const basic_format_parse_context&) = delete;
basic_format_parse_context& operator=(const basic_format_parse_context&) = delete;
constexpr const_iterator begin() const noexcept;
constexpr const_iterator end() const noexcept;
constexpr void advance_to(const_iterator it);
constexpr size_t next_arg_id();
constexpr void check_arg_id(size_t id);
// Implementation detail:
constexpr void check_arg_id(fmt::string_view) {}
detail::error_handler error_handler() const { return {}; }
void on_error(const char* msg) { error_handler().on_error(msg); }
};
}
namespace std {
template<class charT>
/* explicit */ constexpr basic_format_parse_context<charT>::
basic_format_parse_context(basic_string_view<charT> fmt,
size_t num_args) noexcept
: begin_(fmt.begin()), end_(fmt.end()), indexing_(unknown), next_arg_id_(0), num_args_(num_args) {}
template<class charT>
constexpr typename basic_format_parse_context<charT>::const_iterator basic_format_parse_context<charT>::begin() const noexcept { return begin_; }
template<class charT>
constexpr typename basic_format_parse_context<charT>::const_iterator basic_format_parse_context<charT>::end() const noexcept { return end_; }
template<class charT>
constexpr void basic_format_parse_context<charT>::advance_to(typename basic_format_parse_context<charT>::iterator it) { begin_ = it; }
template<class charT>
constexpr size_t basic_format_parse_context<charT>::next_arg_id() {
if (indexing_ == manual)
throw format_error("manual to automatic indexing");
if (indexing_ == unknown)
indexing_ = automatic;
return next_arg_id_++;
}
template<class charT>
constexpr void basic_format_parse_context<charT>::check_arg_id(size_t id) {
// clang doesn't support __builtin_is_constant_evaluated yet
//if (!(!__builtin_is_constant_evaluated() || id < num_args_))
// throw format_error(invalid index is out of range");
if (indexing_ == automatic)
throw format_error("automatic to manual indexing");
if (indexing_ == unknown)
indexing_ = manual;
}
}
// https://fmt.dev/Text%20Formatting.html#format.context
namespace std {
template<class Out, class charT>
class basic_format_context {
basic_format_args<basic_format_context> args_; // exposition only
Out out_; // exposition only
public:
using iterator = Out;
using char_type = charT;
template<class T> using formatter_type = formatter<T, charT>;
basic_format_arg<basic_format_context> arg(size_t id) const;
iterator out();
void advance_to(iterator it);
// Implementation details:
using format_arg = basic_format_arg<basic_format_context>;
basic_format_context(Out out, basic_format_args<basic_format_context> args, fmt::detail::locale_ref)
: args_(args), out_(out) {}
detail::error_handler error_handler() const { return {}; }
basic_format_arg<basic_format_context> arg(fmt::basic_string_view<charT>) const {
return {}; // unused: named arguments are not supported yet
}
void on_error(const char* msg) { error_handler().on_error(msg); }
};
}
namespace std {
template<class O, class charT>
basic_format_arg<basic_format_context<O, charT>> basic_format_context<O, charT>::arg(size_t id) const { return args_.get(id); }
template<class O, class charT>
typename basic_format_context<O, charT>::iterator basic_format_context<O, charT>::out() { return out_; }
template<class O, class charT>
void basic_format_context<O, charT>::advance_to(typename basic_format_context<O, charT>::iterator it) { out_ = it; }
}
namespace std {
namespace detail {
template <typename T>
constexpr bool is_standard_integer_v =
std::is_same_v<T, signed char> ||
std::is_same_v<T, short int> ||
std::is_same_v<T, int> ||
std::is_same_v<T, long int> ||
std::is_same_v<T, long long int>;
template <typename T>
constexpr bool is_standard_unsigned_integer_v =
std::is_same_v<T, unsigned char> ||
std::is_same_v<T, unsigned short int> ||
std::is_same_v<T, unsigned int> ||
std::is_same_v<T, unsigned long int> ||
std::is_same_v<T, unsigned long long int>;
template <typename T, typename Char> struct formatter;
}
}
// https://fmt.dev/Text%20Formatting.html#format.arg
namespace std {
template<class Context>
class basic_format_arg {
public:
class handle;
private:
using char_type = typename Context::char_type; // exposition only
variant<monostate, bool, char_type,
int, unsigned int, long long int, unsigned long long int,
double, long double,
const char_type*, basic_string_view<char_type>,
const void*, handle> value; // exposition only
template<typename T,
typename = enable_if_t<
std::is_same_v<T, bool> ||
std::is_same_v<T, char_type> ||
(std::is_same_v<T, char> && std::is_same_v<char_type, wchar_t>) ||
detail::is_standard_integer_v<T> ||
detail::is_standard_unsigned_integer_v<T> ||
sizeof(typename Context::template formatter_type<T>().format(declval<const T&>(), declval<Context&>())) != 0
>> explicit basic_format_arg(const T& v) noexcept; // exposition only
explicit basic_format_arg(float n) noexcept; // exposition only
explicit basic_format_arg(double n) noexcept; // exposition only
explicit basic_format_arg(long double n) noexcept; // exposition only
explicit basic_format_arg(const char_type* s); // exposition only
template<class traits>
explicit basic_format_arg(
basic_string_view<char_type, traits> s) noexcept; // exposition only
template<class traits, class Allocator>
explicit basic_format_arg(
const basic_string<char_type, traits, Allocator>& s) noexcept; // exposition only
explicit basic_format_arg(nullptr_t) noexcept; // exposition only
template<class T, typename = enable_if_t<is_void_v<T>>>
explicit basic_format_arg(const T* p) noexcept; // exposition only
// Fails due to a bug in clang
//template<class Visitor, class Ctx>
// friend auto visit_format_arg(Visitor&& vis,
// basic_format_arg<Ctx> arg); // exposition only
friend auto get_value(basic_format_arg arg) {
return arg.value;
}
template <typename T, typename Char> friend struct detail::formatter;
template<class Ctx, class... Args>
friend format_arg_store<Ctx, Args...>
make_format_args(const Args&... args); // exposition only
public:
basic_format_arg() noexcept;
explicit operator bool() const noexcept;
};
}
namespace std {
template<class Context>
basic_format_arg<Context>::basic_format_arg() noexcept {}
template<class Context>
template<class T, typename> /* explicit */ basic_format_arg<Context>::basic_format_arg(const T& v) noexcept {
if constexpr (std::is_same_v<T, bool> || std::is_same_v<T, char_type>)
value = v;
else if constexpr (std::is_same_v<T, char> && std::is_same_v<char_type, wchar_t>)
value = static_cast<wchar_t>(v);
else if constexpr (detail::is_standard_integer_v<T> && sizeof(T) <= sizeof(int))
value = static_cast<int>(v);
else if constexpr (detail::is_standard_unsigned_integer_v<T> && sizeof(T) <= sizeof(unsigned))
value = static_cast<unsigned>(v);
else if constexpr (detail::is_standard_integer_v<T>)
value = static_cast<long long int>(v);
else if constexpr (detail::is_standard_unsigned_integer_v<T>)
value = static_cast<unsigned long long int>(v);
else if constexpr (sizeof(typename Context::template formatter_type<T>().format(declval<const T&>(), declval<Context&>())) != 0)
value = handle(v);
}
template<class Context>
/* explicit */ basic_format_arg<Context>::basic_format_arg(float n) noexcept
: value(static_cast<double>(n)) {}
template<class Context>
/* explicit */ basic_format_arg<Context>::basic_format_arg(double n) noexcept
: value(n) {}
template<class Context>
/* explicit */ basic_format_arg<Context>::basic_format_arg(long double n) noexcept
: value(n) {}
template<class Context>
/* explicit */ basic_format_arg<Context>::basic_format_arg(const typename basic_format_arg<Context>::char_type* s)
: value(s) {
assert(s != nullptr);
}
template<class Context>
template<class traits>
/* explicit */ basic_format_arg<Context>::basic_format_arg(basic_string_view<char_type, traits> s) noexcept
: value(s) {}
template<class Context>
template<class traits, class Allocator>
/* explicit */ basic_format_arg<Context>::basic_format_arg(
const basic_string<char_type, traits, Allocator>& s) noexcept
: value(basic_string_view<char_type>(s.data(), s.size())) {}
template<class Context>
/* explicit */ basic_format_arg<Context>::basic_format_arg(nullptr_t) noexcept
: value(static_cast<const void*>(nullptr)) {}
template<class Context>
template<class T, typename> /* explicit */ basic_format_arg<Context>::basic_format_arg(const T* p) noexcept
: value(p) {}
template<class Context>
/* explicit */ basic_format_arg<Context>::operator bool() const noexcept {
return !holds_alternative<monostate>(value);
}
}
namespace std {
template<class Context>
class basic_format_arg<Context>::handle {
const void* ptr_; // exposition only
void (*format_)(basic_format_parse_context<char_type>&,
Context&, const void*); // exposition only
template<class T> explicit handle(const T& val) noexcept; // exposition only
friend class basic_format_arg<Context>; // exposition only
public:
void format(basic_format_parse_context<char_type>&, Context& ctx) const;
};
}
namespace std {
template<class Context>
template<class T> /* explicit */ basic_format_arg<Context>::handle::handle(const T& val) noexcept
: ptr_(&val), format_([](basic_format_parse_context<char_type>& parse_ctx, Context& format_ctx, const void* ptr) {
typename Context::template formatter_type<T> f;
parse_ctx.advance_to(f.parse(parse_ctx));
format_ctx.advance_to(f.format(*static_cast<const T*>(ptr), format_ctx));
}) {}
template<class Context>
void basic_format_arg<Context>::handle::format(basic_format_parse_context<char_type>& parse_ctx, Context& format_ctx) const {
format_(parse_ctx, format_ctx, ptr_);
}
// https://fmt.dev/Text%20Formatting.html#format.visit
template<class Visitor, class Context>
auto visit_format_arg(Visitor&& vis, basic_format_arg<Context> arg) {
return visit(vis, get_value(arg));
}
}
// https://fmt.dev/Text%20Formatting.html#format.store
namespace std {
template<class Context, class... Args>
struct format_arg_store { // exposition only
array<basic_format_arg<Context>, sizeof...(Args)> args;
};
}
// https://fmt.dev/Text%20Formatting.html#format.basic_args
namespace std {
template<class Context>
class basic_format_args {
size_t size_; // exposition only
const basic_format_arg<Context>* data_; // exposition only
public:
basic_format_args() noexcept;
template<class... Args>
basic_format_args(const format_arg_store<Context, Args...>& store) noexcept;
basic_format_arg<Context> get(size_t i) const noexcept;
};
}
namespace std {
template<class Context>
basic_format_args<Context>::basic_format_args() noexcept : size_(0) {}
template<class Context>
template<class... Args>
basic_format_args<Context>::basic_format_args(const format_arg_store<Context, Args...>& store) noexcept
: size_(sizeof...(Args)), data_(store.args.data()) {}
template<class Context>
basic_format_arg<Context> basic_format_args<Context>::get(size_t i) const noexcept {
return i < size_ ? data_[i] : basic_format_arg<Context>();
}
}
namespace std {
// https://fmt.dev/Text%20Formatting.html#format.make_args
template<class Context /*= format_context*/, class... Args>
format_arg_store<Context, Args...> make_format_args(const Args&... args) {
return {basic_format_arg<Context>(args)...};
}
// https://fmt.dev/Text%20Formatting.html#format.make_wargs
template<class... Args>
format_arg_store<wformat_context, Args...> make_wformat_args(const Args&... args) {
return make_format_args<wformat_context>(args...);
}
}
namespace std {
namespace detail {
template <typename OutputIt, typename Char>
class arg_formatter
: public fmt::detail::arg_formatter_base<OutputIt, Char, error_handler> {
private:
using char_type = Char;
using base = fmt::detail::arg_formatter_base<OutputIt, Char, error_handler>;
using format_context = std::basic_format_context<OutputIt, Char>;
using parse_context = basic_format_parse_context<Char>;
parse_context* parse_ctx_;
format_context& ctx_;
public:
using iterator = OutputIt;
using format_specs = typename base::format_specs;
/**
\rst
Constructs an argument formatter object.
*ctx* is a reference to the formatting context,
*spec* contains format specifier information for standard argument types.
\endrst
*/
arg_formatter(format_context& ctx, parse_context* parse_ctx = nullptr, fmt::format_specs* spec = nullptr)
: base(ctx.out(), spec, {}), parse_ctx_(parse_ctx), ctx_(ctx) {}
using base::operator();
/** Formats an argument of a user-defined type. */
iterator operator()(typename std::basic_format_arg<format_context>::handle handle) {
handle.format(*parse_ctx_, ctx_);
return this->out();
}
iterator operator()(monostate) {
throw format_error("");
}
};
template <typename Context>
inline fmt::detail::type get_type(basic_format_arg<Context> arg) {
return visit_format_arg([&] (auto val) {
using char_type = typename Context::char_type;
using T = decltype(val);
if (std::is_same_v<T, monostate>)
return fmt::detail::type::none_type;
if (std::is_same_v<T, bool>)
return fmt::detail::type::bool_type;
if (std::is_same_v<T, char_type>)
return fmt::detail::type::char_type;
if (std::is_same_v<T, int>)
return fmt::detail::type::int_type;
if (std::is_same_v<T, unsigned int>)
return fmt::detail::type::uint_type;
if (std::is_same_v<T, long long int>)
return fmt::detail::type::long_long_type;
if (std::is_same_v<T, unsigned long long int>)
return fmt::detail::type::ulong_long_type;
if (std::is_same_v<T, double>)
return fmt::detail::type::double_type;
if (std::is_same_v<T, long double>)
return fmt::detail::type::long_double_type;
if (std::is_same_v<T, const char_type*>)
return fmt::detail::type::cstring_type;
if (std::is_same_v<T, basic_string_view<char_type>>)
return fmt::detail::type::string_type;
if (std::is_same_v<T, const void*>)
return fmt::detail::type::pointer_type;
assert(get_value(arg).index() == 12);
return fmt::detail::type::custom_type;
}, arg);
}
template <typename Context>
class custom_formatter {
private:
using parse_context = basic_format_parse_context<typename Context::char_type>;
parse_context& parse_ctx_;
Context& format_ctx_;
public:
custom_formatter(parse_context& parse_ctx, Context& ctx) : parse_ctx_(parse_ctx), format_ctx_(ctx) {}
bool operator()(typename basic_format_arg<Context>::handle h) const {
h.format(parse_ctx_, format_ctx_);
return true;
}
template <typename T> bool operator()(T) const { return false; }
};
template <typename ArgFormatter, typename Char, typename Context>
struct format_handler : detail::error_handler {
using iterator = typename ArgFormatter::iterator;
format_handler(iterator out, basic_string_view<Char> str,
basic_format_args<Context> format_args,
fmt::detail::locale_ref loc)
: parse_ctx(str), context(out, format_args, loc) {}
void on_text(const Char* begin, const Char* end) {
auto size = fmt::detail::to_unsigned(end - begin);
auto out = context.out();
auto&& it = fmt::detail::reserve(out, size);
it = std::copy_n(begin, size, it);
context.advance_to(out);
}
int on_arg_id() { return parse_ctx.next_arg_id(); }
int on_arg_id(unsigned id) { return parse_ctx.check_arg_id(id), id; }
int on_arg_id(fmt::basic_string_view<Char>) { return 0; }
void on_replacement_field(int id, const Char* p) {
auto arg = context.arg(id);
parse_ctx.advance_to(parse_ctx.begin() + (p - &*parse_ctx.begin()));
custom_formatter<Context> f(parse_ctx, context);
if (!visit_format_arg(f, arg))
context.advance_to(visit_format_arg(ArgFormatter(context, &parse_ctx), arg));
}
const Char* on_format_specs(int id, const Char* begin, const Char* end) {
auto arg = context.arg(id);
parse_ctx.advance_to(parse_ctx.begin() + (begin - &*parse_ctx.begin()));
custom_formatter<Context> f(parse_ctx, context);
if (visit_format_arg(f, arg)) return &*parse_ctx.begin();
fmt::basic_format_specs<Char> specs;
using fmt::detail::specs_handler;
using parse_context = basic_format_parse_context<Char>;
fmt::detail::specs_checker<specs_handler<parse_context, Context>> handler(
specs_handler<parse_context, Context>(specs, parse_ctx, context), get_type(arg));
begin = parse_format_specs(begin, end, handler);
if (begin == end || *begin != '}') on_error("missing '}' in format string");
parse_ctx.advance_to(parse_ctx.begin() + (begin - &*parse_ctx.begin()));
context.advance_to(visit_format_arg(ArgFormatter(context, &parse_ctx, &specs), arg));
return begin;
}
basic_format_parse_context<Char> parse_ctx;
Context context;
};
template <typename T, typename Char>
struct formatter {
// Parses format specifiers stopping either at the end of the range or at the
// terminating '}'.
template <typename ParseContext>
FMT_CONSTEXPR typename ParseContext::iterator parse(ParseContext& ctx) {
namespace detail = fmt::detail;
typedef detail::dynamic_specs_handler<ParseContext> handler_type;
auto type = detail::mapped_type_constant<T, fmt::buffer_context<Char>>::value;
detail::specs_checker<handler_type> handler(handler_type(specs_, ctx),
type);
auto it = parse_format_specs(ctx.begin(), ctx.end(), handler);
auto type_spec = specs_.type;
auto eh = ctx.error_handler();
switch (type) {
case detail::type::none_type:
FMT_ASSERT(false, "invalid argument type");
break;
case detail::type::int_type:
case detail::type::uint_type:
case detail::type::long_long_type:
case detail::type::ulong_long_type:
case detail::type::bool_type:
handle_int_type_spec(type_spec,
detail::int_type_checker<decltype(eh)>(eh));
break;
case detail::type::char_type:
handle_char_specs(
&specs_, detail::char_specs_checker<decltype(eh)>(type_spec, eh));
break;
case detail::type::double_type:
case detail::type::long_double_type:
detail::parse_float_type_spec(specs_, eh);
break;
case detail::type::cstring_type:
detail::handle_cstring_type_spec(
type_spec, detail::cstring_type_checker<decltype(eh)>(eh));
break;
case detail::type::string_type:
detail::check_string_type_spec(type_spec, eh);
break;
case detail::type::pointer_type:
detail::check_pointer_type_spec(type_spec, eh);
break;
case detail::type::custom_type:
// Custom format specifiers should be checked in parse functions of
// formatter specializations.
break;
}
return it;
}
template <typename FormatContext>
auto format(const T& val, FormatContext& ctx) -> decltype(ctx.out()) {
fmt::detail::handle_dynamic_spec<fmt::detail::width_checker>(
specs_.width, specs_.width_ref, ctx);
fmt::detail::handle_dynamic_spec<fmt::detail::precision_checker>(
specs_.precision, specs_.precision_ref, ctx);
using af = arg_formatter<typename FormatContext::iterator,
typename FormatContext::char_type>;
return visit_format_arg(af(ctx, nullptr, &specs_),
basic_format_arg<FormatContext>(val));
}
private:
fmt::detail::dynamic_format_specs<Char> specs_;
};
} // namespace detail
// https://fmt.dev/Text%20Formatting.html#format.functions
template<class... Args>
string format(string_view fmt, const Args&... args) {
return vformat(fmt, make_format_args(args...));
}
template<class... Args>
wstring format(wstring_view fmt, const Args&... args) {
return vformat(fmt, make_wformat_args(args...));
}
string vformat(string_view fmt, format_args args) {
fmt::memory_buffer mbuf;
fmt::detail::buffer<char>& buf = mbuf;
using af = detail::arg_formatter<fmt::format_context::iterator, char>;
detail::format_handler<af, char, format_context>
h(fmt::detail::buffer_appender<char>(buf), fmt, args, {});
fmt::detail::parse_format_string<false>(fmt::to_string_view(fmt), h);
return to_string(mbuf);
}
wstring vformat(wstring_view fmt, wformat_args args);
template<class Out, class... Args>
Out format_to(Out out, string_view fmt, const Args&... args) {
using context = basic_format_context<Out, decltype(fmt)::value_type>;
return vformat_to(out, fmt, make_format_args<context>(args...));
}
template<class Out, class... Args>
Out format_to(Out out, wstring_view fmt, const Args&... args) {
using context = basic_format_context<Out, decltype(fmt)::value_type>;
return vformat_to(out, fmt, make_format_args<context>(args...));
}
template<class Out>
Out vformat_to(Out out, string_view fmt, format_args_t<fmt::type_identity_t<Out>, char> args) {
using af = detail::arg_formatter<Out, char>;
detail::format_handler<af, char, basic_format_context<Out, char>>
h(out, fmt, args, {});
fmt::detail::parse_format_string<false>(fmt::to_string_view(fmt), h);
return h.context.out();
}
template<class Out>
Out vformat_to(Out out, wstring_view fmt, format_args_t<fmt::type_identity_t<Out>, wchar_t> args);
template<class Out, class... Args>
format_to_n_result<Out> format_to_n(Out out, iter_difference_t<Out> n,
string_view fmt, const Args&... args);
template<class Out, class... Args>
format_to_n_result<Out> format_to_n(Out out, iter_difference_t<Out> n,
wstring_view fmt, const Args&... args);
template<class... Args>
size_t formatted_size(string_view fmt, const Args&... args);
template<class... Args>
size_t formatted_size(wstring_view fmt, const Args&... args);
#define charT char
template<> struct formatter<charT, charT> : detail::formatter<charT, charT> {};
template<> struct formatter<char, wchar_t>;
template<> struct formatter<charT*, charT> : detail::formatter<const charT*, charT> {};
template<> struct formatter<const charT*, charT> : detail::formatter<const charT*, charT> {};
template<size_t N> struct formatter<const charT[N], charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template<class traits, class Allocator>
struct formatter<basic_string<charT, traits, Allocator>, charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template<class traits>
struct formatter<basic_string_view<charT, traits>, charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template <> struct formatter<nullptr_t, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<void*, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<const void*, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<bool, charT> : detail::formatter<bool, charT> {};
template <> struct formatter<signed char, charT> : detail::formatter<int, charT> {};
template <> struct formatter<short, charT> : detail::formatter<int, charT> {};
template <> struct formatter<int, charT> : detail::formatter<int, charT> {};
template <> struct formatter<long, charT>
: detail::formatter<std::conditional_t<sizeof(long) == sizeof(int), int, long long>, charT> {};
template <> struct formatter<long long, charT> : detail::formatter<long long, charT> {};
template <> struct formatter<unsigned char, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned short, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned int, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned long, charT>
: detail::formatter<std::conditional_t<sizeof(long) == sizeof(int), unsigned, unsigned long long>, charT> {};
template <> struct formatter<unsigned long long, charT> : detail::formatter<unsigned long long, charT> {};
template <> struct formatter<float, charT> : detail::formatter<double, charT> {};
template <> struct formatter<double, charT> : detail::formatter<double, charT> {};
template <> struct formatter<long double, charT> : detail::formatter<long double, charT> {};
#undef charT
#define charT wchar_t
template<> struct formatter<charT, charT> : detail::formatter<charT, charT> {};
template<> struct formatter<char, wchar_t> : detail::formatter<charT, charT> {};
template<> struct formatter<charT*, charT> : detail::formatter<const charT*, charT> {};
template<> struct formatter<const charT*, charT> : detail::formatter<const charT*, charT> {};
template<size_t N> struct formatter<const charT[N], charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template<class traits, class Allocator>
struct formatter<std::basic_string<charT, traits, Allocator>, charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template<class traits>
struct formatter<std::basic_string_view<charT, traits>, charT>
: detail::formatter<std::basic_string_view<charT>, charT> {};
template <> struct formatter<nullptr_t, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<void*, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<const void*, charT> : detail::formatter<const void*, charT> {};
template <> struct formatter<bool, charT> : detail::formatter<bool, charT> {};
template <> struct formatter<signed char, charT> : detail::formatter<int, charT> {};
template <> struct formatter<short, charT> : detail::formatter<int, charT> {};
template <> struct formatter<int, charT> : detail::formatter<int, charT> {};
template <> struct formatter<long, charT>
: detail::formatter<std::conditional_t<sizeof(long) == sizeof(int), int, long long>, charT> {};
template <> struct formatter<long long, charT> : detail::formatter<long long, charT> {};
template <> struct formatter<unsigned char, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned short, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned int, charT> : detail::formatter<unsigned int, charT> {};
template <> struct formatter<unsigned long, charT>
: detail::formatter<std::conditional_t<sizeof(long) == sizeof(int), unsigned, unsigned long long>, charT> {};
template <> struct formatter<unsigned long long, charT> : detail::formatter<unsigned long long, charT> {};
template <> struct formatter<float, charT> : detail::formatter<double, charT> {};
template <> struct formatter<double, charT> : detail::formatter<double, charT> {};
template <> struct formatter<long double, charT> : detail::formatter<long double, charT> {};
#undef charT
template<> struct formatter<const wchar_t, char> {
formatter() = delete;
};
}
#endif // FMT_FORMAT_

View File

@@ -1,161 +0,0 @@
#include <format>
#include "gtest/gtest.h"
TEST(std_format_test, escaping) {
using namespace std;
string s = format("{0}-{{", 8); // s == "8-{"
EXPECT_EQ(s, "8-{");
}
TEST(std_format_test, indexing) {
using namespace std;
string s0 = format("{} to {}", "a", "b"); // OK: automatic indexing
string s1 = format("{1} to {0}", "a", "b"); // OK: manual indexing
EXPECT_EQ(s0, "a to b");
EXPECT_EQ(s1, "b to a");
// Error: mixing automatic and manual indexing
EXPECT_THROW(string s2 = format("{0} to {}", "a", "b"), std::format_error);
// Error: mixing automatic and manual indexing
EXPECT_THROW(string s3 = format("{} to {1}", "a", "b"), std::format_error);
}
TEST(std_format_test, alignment) {
using namespace std;
char c = 120;
string s0 = format("{:6}", 42); // s0 == " 42"
string s1 = format("{:6}", 'x'); // s1 == "x "
string s2 = format("{:*<6}", 'x'); // s2 == "x*****"
string s3 = format("{:*>6}", 'x'); // s3 == "*****x"
string s4 = format("{:*^6}", 'x'); // s4 == "**x***"
// Error: '=' with charT and no integer presentation type
EXPECT_THROW(string s5 = format("{:=6}", 'x'), std::format_error);
string s6 = format("{:6d}", c); // s6 == " 120"
string s7 = format("{:6}", true); // s9 == "true "
EXPECT_EQ(s0, " 42");
EXPECT_EQ(s1, "x ");
EXPECT_EQ(s2, "x*****");
EXPECT_EQ(s3, "*****x");
EXPECT_EQ(s4, "**x***");
EXPECT_EQ(s6, " 120");
EXPECT_EQ(s7, "true ");
}
TEST(std_format_test, float) {
using namespace std;
double inf = numeric_limits<double>::infinity();
double nan = numeric_limits<double>::quiet_NaN();
string s0 = format("{0:} {0:+} {0:-} {0: }", 1); // s0 == "1 +1 1 1"
string s1 = format("{0:} {0:+} {0:-} {0: }", -1); // s1 == "-1 -1 -1 -1"
string s2 =
format("{0:} {0:+} {0:-} {0: }", inf); // s2 == "inf +inf inf inf"
string s3 =
format("{0:} {0:+} {0:-} {0: }", nan); // s3 == "nan +nan nan nan"
EXPECT_EQ(s0, "1 +1 1 1");
EXPECT_EQ(s1, "-1 -1 -1 -1");
EXPECT_EQ(s2, "inf +inf inf inf");
EXPECT_EQ(s3, "nan +nan nan nan");
}
TEST(std_format_test, int) {
using namespace std;
string s0 = format("{}", 42); // s0 == "42"
string s1 = format("{0:b} {0:d} {0:o} {0:x}", 42); // s1 == "101010 42 52 2a"
string s2 = format("{0:#x} {0:#X}", 42); // s2 == "0x2a 0X2A"
string s3 = format("{:L}", 1234); // s3 == "1234" (depends on the locale)
EXPECT_EQ(s0, "42");
EXPECT_EQ(s1, "101010 42 52 2a");
EXPECT_EQ(s2, "0x2a 0X2A");
EXPECT_EQ(s3, "1234");
}
#include <format>
enum color { red, green, blue };
const char* color_names[] = {"red", "green", "blue"};
template <> struct std::formatter<color> : std::formatter<const char*> {
auto format(color c, format_context& ctx) {
return formatter<const char*>::format(color_names[c], ctx);
}
};
struct err {};
TEST(std_format_test, formatter) {
std::string s0 = std::format("{}", 42); // OK: library-provided formatter
// std::string s1 = std::format("{}", L"foo"); // Ill-formed: disabled
// formatter
std::string s2 = std::format("{}", red); // OK: user-provided formatter
// std::string s3 = std::format("{}", err{}); // Ill-formed: disabled
// formatter
EXPECT_EQ(s0, "42");
EXPECT_EQ(s2, "red");
}
struct S {
int value;
};
template <> struct std::formatter<S> {
size_t width_arg_id = 0;
// Parses a width argument id in the format { <digit> }.
constexpr auto parse(format_parse_context& ctx) {
constexpr auto is_ascii_digit = [](const char c) {
return c >= '0' && c <= '9';
};
auto iter = ctx.begin();
// auto get_char = [&]() { return iter != ctx.end() ? *iter : 0; };
auto get_char = [&]() { return iter != ctx.end() ? *iter : '\0'; };
if (get_char() != '{') return iter;
++iter;
char c = get_char();
if (!is_ascii_digit(c) || (++iter, get_char()) != '}')
throw format_error("invalid format");
width_arg_id = fmt::detail::to_unsigned(c - '0');
ctx.check_arg_id(width_arg_id);
return ++iter;
}
// Formats S with width given by the argument width_arg_id.
auto format(S s, format_context& ctx) {
int width = visit_format_arg(
[](auto value) -> int {
using type = decltype(value);
if constexpr (!is_integral_v<type> || is_same_v<type, bool>)
throw format_error("width is not integral");
// else if (value < 0 || value > numeric_limits<int>::max())
else if (fmt::detail::is_negative(value) ||
value > numeric_limits<int>::max())
throw format_error("invalid width");
else
return static_cast<int>(value);
},
ctx.arg(width_arg_id));
return format_to(ctx.out(), "{0:{1}}", s.value, width);
}
};
TEST(std_format_test, parsing) {
std::string s = std::format("{0:{1}}", S{42}, 10); // s == " 42"
EXPECT_EQ(s, " 42");
}
#if FMT_USE_INT128
template <> struct std::formatter<__int128_t> : std::formatter<long long> {
auto format(__int128_t n, format_context& ctx) {
// Format as a long long since we only want to check if it is possible to
// specialize formatter for __int128_t.
return formatter<long long>::format(static_cast<long long>(n), ctx);
}
};
TEST(std_format_test, int128) {
__int128_t n = 42;
auto s = std::format("{}", n);
EXPECT_EQ(s, "42");
}
#endif // FMT_USE_INT128

View File

@@ -1,6 +0,0 @@
// This file is part of the mcl project.
// Copyright (c) 2022 merryhime
// SPDX-License-Identifier: MIT
#define CATCH_CONFIG_MAIN
#include "catch2/catch.hpp"

View File

@@ -1,19 +0,0 @@
language: cpp
os: linux
matrix:
- compiler: clang
env: CXX=clang
dist: bionic
- compiler: g++-8
env: CXX=g++-8
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-8
script:
- $CXX --version
- $CXX -I./include -std=c++17 -Wall -Wextra -Wcast-qual -pedantic -pedantic-errors -Werror tests/all_tests.cpp

View File

@@ -1,12 +0,0 @@
Copyright (C) 2017 MerryMage
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@@ -1,122 +0,0 @@
mp
===
A small, 0BSD-licensed metaprogramming library for C++17.
This is intended to be a lightweight and easy to understand implementation of a subset of useful metaprogramming utilities.
Usage
-----
Just add the `include` directory to your include path. That's it.
`typelist`
----------
A `mp::list` is a list of types. This set of headers provide metafunctions for manipulating lists of types.
### Constructor
* `mp::list`: Constructs a list.
* `mp::lift_sequence`: Lifts a value sequence into a list. Intended for use on `std::integral_sequence`.
### Element access
* `mp::get`: Gets a numbered element of a list.
* `mp::head`: Gets the first element of a list.
* `mp::tail`: Gets all-but-the-first-element as a list.
### Properties
* `mp::length`: Gets the length of a list.
* `mp::contains`: Determines if this list contains a specified element.
### Modifiers
* `mp::append`: Constructs a list with the provided elements appended to it.
* `mp::prepend`: Constructs a list with the provided elements prepended to it.
### Operations
* `mp::concat`: Concantenates multiple lists together.
* `mp::cartesian_product`: Construct a list containing the [cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) of the provided lists.
### Conversions
* `mp::lower_to_tuple`: This operation only works on a list solely containing metavalues. Results in a `std::tuple` with equivalent values.
`metavalue`
-----------
A metavalue is a type of template `std::integral_constant`.
### Constants
* mp::true_type: Aliases to [`std::true_type`](https://en.cppreference.com/w/cpp/types/integral_constant)
* mp::false_type: Aliases to [`mp::false_type`](https://en.cppreference.com/w/cpp/types/integral_constant)
### Constructor
* mp::value: Aliases to [`std::integral_constant`](https://en.cppreference.com/w/cpp/types/integral_constant)
* mp::bool_value: Aliases to [`std::bool_constant`](https://en.cppreference.com/w/cpp/types/integral_constant)
* mp::size_value: Constructs a metavalue with value of type std::size_t
* `mp::lift_value`: Lifts a value of any arbitrary type to become a metavalue
### Conversions
* `mp::value_cast`
### Operations
* `mp::value_equal`: Compares value equality, ignores type. Use `std::is_same` for strict comparison.
* `mp::logic_if`: Like std::conditional but has a bool metavalue as first argument.
* `mp::bit_not`: Bitwise not
* `mp::bit_and`: Bitwise and
* `mp::bit_or`: Bitwise or
* `mp::bit_xor`: Bitwise xor
* `mp::logic_not`: Logical not
* `mp::logic_and`: Logical conjunction (no short circuiting, always results in a mp:bool_value)
* `mp::logic_or`: Logical disjunction (no short circuiting, always results in a mp:bool_value)
* `mp::conjunction`: Logical conjunction (with short circuiting, preserves type)
* `mp::disjunction`: Logical disjunction (with short circuiting, preserves type)
* `mp::sum`: Sum of values
* `mp::product`: Product of values
`metafunction`
--------------
* `std::void_t`: Always returns `void`.
* `mp::identity`: Identity metafunction. Can be used to establish a non-deduced context. See also C++20 `std::type_identity`.
* `mp::apply`: Invoke a provided metafunction with arguments specified in a list.
* `mp::map`: Apply a provided metafunction to each element of a list.
* `mp::bind`: Curry a metafunction. A macro `MM_MP_BIND` is provided to make this a little prettier.
`traits`
--------
Type traits not in the standard library.
### `function_info`
* `mp::parameter_count_v`: Number of parameters a function has
* `mp::parameter_list`: Get a typelist of the parameter types
* `mp::get_parameter`: Get the type of a parameter by index
* `mp::equivalent_function_type`: Get an equivalent function type (for MFPs this does not include the class)
* `mp::equivalent_function_type_with_class`: Get an equivalent function type with explicit `this` argument (MFPs only)
* `mp::return_type`: Return type of the function
* `mp::class_type`: Only valid for member function pointer types. Gets the class the member function is associated with.
### `integer_of_size`
* `mp::signed_integer_of_size`: Gets a signed integer of the specified bit-size (if it exists)
* `mp::unsigned_integer_of_size`: Gets an unsigned integer of the specified bit-size (if it exists)
### Misc
* `mp::is_instance_of_template`: Checks if a type is an instance of a template class.
License
-------
Please see [LICENSE-0BSD](LICENSE-0BSD).

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<template<class...> class F, class L>
struct apply_impl;
template<template<class...> class F, template<class...> class LT, class... Es>
struct apply_impl<F, LT<Es...>> {
using type = F<Es...>;
};
} // namespace detail
/// Invokes metafunction F where the arguments are all the members of list L
template<template<class...> class F, class L>
using apply = typename detail::apply_impl<F, L>::type;
} // namespace mp

View File

@@ -1,19 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
/// Binds the first sizeof...(A) arguments of metafunction F with arguments A
template<template<class...> class F, class... As>
struct bind {
template<class... Rs>
using type = F<As..., Rs...>;
};
} // namespace mp
#define MM_MP_BIND(...) ::mp::bind<__VA_ARGS__>::template type

View File

@@ -1,23 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<class T>
struct identity_impl {
using type = T;
};
} // namespace detail
/// Identity metafunction
template<class T>
using identity = typename identity_impl<T>::type;
} // namespace mp

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<template<class...> class F, class L>
struct map_impl;
template<template<class...> class F, template<class...> class LT, class... Es>
struct map_impl<F, LT<Es...>> {
using type = LT<F<Es>...>;
};
} // namespace detail
/// Applies each element of list L to metafunction F
template<template<class...> class F, class L>
using map = typename detail::map_impl<F, L>::type;
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Bitwise and of metavalues Vs
template<class... Vs>
using bit_and = lift_value<(Vs::value & ...)>;
/// Bitwise and of metavalues Vs
template<class... Vs>
constexpr auto bit_and_v = (Vs::value & ...);
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Bitwise not of metavalue V
template<class V>
using bit_not = lift_value<~V::value>;
/// Bitwise not of metavalue V
template<class V>
constexpr auto bit_not_v = ~V::value;
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Bitwise or of metavalues Vs
template<class... Vs>
using bit_or = lift_value<(Vs::value | ...)>;
/// Bitwise or of metavalues Vs
template<class... Vs>
constexpr auto bit_or_v = (Vs::value | ...);
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Bitwise xor of metavalues Vs
template<class... Vs>
using bit_xor = lift_value<(Vs::value ^ ...)>;
/// Bitwise xor of metavalues Vs
template<class... Vs>
constexpr auto bit_xor_v = (Vs::value ^ ...);
} // namespace mp

View File

@@ -1,43 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
#include <mp/metavalue/logic_if.h>
namespace mp {
namespace detail {
template<class...>
struct conjunction_impl;
template<>
struct conjunction_impl<> {
using type = false_type;
};
template<class V>
struct conjunction_impl<V> {
using type = V;
};
template<class V1, class... Vs>
struct conjunction_impl<V1, Vs...> {
using type = logic_if<V1, typename conjunction_impl<Vs...>::type, V1>;
};
} // namespace detail
/// Conjunction of metavalues Vs with short-circuiting and type preservation.
template<class... Vs>
using conjunction = typename detail::conjunction_impl<Vs...>::type;
/// Conjunction of metavalues Vs with short-circuiting and type preservation.
template<class... Vs>
constexpr auto conjunction_v = conjunction<Vs...>::value;
} // namespace mp

View File

@@ -1,43 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
#include <mp/metavalue/logic_if.h>
namespace mp {
namespace detail {
template<class...>
struct disjunction_impl;
template<>
struct disjunction_impl<> {
using type = false_type;
};
template<class V>
struct disjunction_impl<V> {
using type = V;
};
template<class V1, class... Vs>
struct disjunction_impl<V1, Vs...> {
using type = logic_if<V1, V1, typename disjunction_impl<Vs...>::type>;
};
} // namespace detail
/// Disjunction of metavalues Vs with short-circuiting and type preservation.
template<class... Vs>
using disjunction = typename detail::disjunction_impl<Vs...>::type;
/// Disjunction of metavalues Vs with short-circuiting and type preservation.
template<class... Vs>
constexpr auto disjunction_v = disjunction<Vs...>::value;
} // namespace mp

View File

@@ -1,16 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
namespace mp {
/// Lifts a value into a type (a metavalue)
template<auto V>
using lift_value = std::integral_constant<decltype(V), V>;
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
namespace mp {
/// Logical conjunction of metavalues Vs without short-circuiting or type presevation.
template<class... Vs>
using logic_and = bool_value<(true && ... && Vs::value)>;
/// Logical conjunction of metavalues Vs without short-circuiting or type presevation.
template<class... Vs>
constexpr bool logic_and_v = (true && ... && Vs::value);
} // namespace mp

View File

@@ -1,21 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
#include <mp/metavalue/value.h>
namespace mp {
/// Conditionally select between types T and F based on boolean metavalue V
template<class V, class T, class F>
using logic_if = std::conditional_t<bool(V::value), T, F>;
/// Conditionally select between metavalues T and F based on boolean metavalue V
template<class V, class TV, class FV>
constexpr auto logic_if_v = logic_if<V, TV, FV>::value;
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
namespace mp {
/// Logical negation of metavalue V.
template<class V>
using logic_not = bool_value<!bool(V::value)>;
/// Logical negation of metavalue V.
template<class V>
constexpr bool logic_not_v = !bool(V::value);
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
namespace mp {
/// Logical disjunction of metavalues Vs without short-circuiting or type presevation.
template<class... Vs>
using logic_or = bool_value<(false || ... || Vs::value)>;
/// Logical disjunction of metavalues Vs without short-circuiting or type presevation.
template<class... Vs>
constexpr bool logic_or_v = (false || ... || Vs::value);
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Product of metavalues Vs
template<class... Vs>
using product = lift_value<(Vs::value * ...)>;
/// Product of metavalues Vs
template<class... Vs>
constexpr auto product_v = (Vs::value * ...);
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Sum of metavalues Vs
template<class... Vs>
using sum = lift_value<(Vs::value + ...)>;
/// Sum of metavalues Vs
template<class... Vs>
constexpr auto sum_v = (Vs::value + ...);
} // namespace mp

View File

@@ -1,31 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <type_traits>
namespace mp {
/// A metavalue (of type VT and value v).
template<class VT, VT v>
using value = std::integral_constant<VT, v>;
/// A metavalue of type std::size_t (and value v).
template<std::size_t v>
using size_value = value<std::size_t, v>;
/// A metavalue of type bool (and value v). (Aliases to std::bool_constant.)
template<bool v>
using bool_value = value<bool, v>;
/// true metavalue (Aliases to std::true_type).
using true_type = bool_value<true>;
/// false metavalue (Aliases to std::false_type).
using false_type = bool_value<false>;
} // namespace mp

View File

@@ -1,16 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
namespace mp {
/// Casts a metavalue from one type to another
template<class T, class V>
using value_cast = std::integral_constant<T, static_cast<T>(V::value)>;
} // namespace mp

View File

@@ -1,16 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
namespace mp {
/// Do two metavalues contain the same value?
template<class V1, class V2>
using value_equal = std::bool_constant<V1::value == V2::value>;
} // namespace mp

View File

@@ -1,20 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/lift_value.h>
namespace mp {
/// Metafunction that returns the number of arguments it has
template<typename... Ts>
using argument_count = lift_value<sizeof...(Ts)>;
/// Metafunction that returns the number of arguments it has
template<typename... Ts>
constexpr auto argument_count_v = sizeof...(Ts);
} // namespace mp

View File

@@ -1,71 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <tuple>
#include <mp/typelist/list.h>
namespace mp {
template<class F>
struct function_info : function_info<decltype(&F::operator())> {};
template<class R, class... As>
struct function_info<R(As...)> {
using return_type = R;
using parameter_list = list<As...>;
static constexpr std::size_t parameter_count = sizeof...(As);
using equivalent_function_type = R(As...);
template<std::size_t I>
struct parameter {
static_assert(I < parameter_count, "Non-existent parameter");
using type = std::tuple_element_t<I, std::tuple<As...>>;
};
};
template<class R, class... As>
struct function_info<R(*)(As...)> : function_info<R(As...)> {};
template<class C, class R, class... As>
struct function_info<R(C::*)(As...)> : function_info<R(As...)> {
using class_type = C;
using equivalent_function_type_with_class = R(C*, As...);
};
template<class C, class R, class... As>
struct function_info<R(C::*)(As...) const> : function_info<R(As...)> {
using class_type = C;
using equivalent_function_type_with_class = R(C*, As...);
};
template<class F>
constexpr size_t parameter_count_v = function_info<F>::parameter_count;
template<class F>
using parameter_list = typename function_info<F>::parameter_list;
template<class F, std::size_t I>
using get_parameter = typename function_info<F>::template parameter<I>::type;
template<class F>
using equivalent_function_type = typename function_info<F>::equivalent_function_type;
template<class F>
using equivalent_function_type_with_class = typename function_info<F>::equivalent_function_type_with_class;
template<class F>
using return_type = typename function_info<F>::return_type;
template<class F>
using class_type = typename function_info<F>::class_type;
} // namespace mp

View File

@@ -1,50 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <cstdint>
namespace mp {
namespace detail {
template<std::size_t size>
struct integer_of_size_impl{};
template<>
struct integer_of_size_impl<8> {
using unsigned_type = std::uint8_t;
using signed_type = std::int8_t;
};
template<>
struct integer_of_size_impl<16> {
using unsigned_type = std::uint16_t;
using signed_type = std::int16_t;
};
template<>
struct integer_of_size_impl<32> {
using unsigned_type = std::uint32_t;
using signed_type = std::int32_t;
};
template<>
struct integer_of_size_impl<64> {
using unsigned_type = std::uint64_t;
using signed_type = std::int64_t;
};
} // namespace detail
template<std::size_t size>
using unsigned_integer_of_size = typename detail::integer_of_size_impl<size>::unsigned_type;
template<std::size_t size>
using signed_integer_of_size = typename detail::integer_of_size_impl<size>::signed_type;
} // namespace mp

View File

@@ -1,23 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
namespace mp {
/// Is type T an instance of template class C?
template <template <class...> class, class>
struct is_instance_of_template : false_type {};
template <template <class...> class C, class... As>
struct is_instance_of_template<C, C<As...>> : true_type {};
/// Is type T an instance of template class C?
template<template <class...> class C, class T>
constexpr bool is_instance_of_template_v = is_instance_of_template<C, T>::value;
} // namespace mp

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<class... L>
struct append_impl;
template<template<class...> class LT, class... E1s, class... E2s>
struct append_impl<LT<E1s...>, E2s...> {
using type = LT<E1s..., E2s...>;
};
} // namespace detail
/// Append items E to list L
template<class L, class... Es>
using append = typename detail::append_impl<L, Es...>::type;
} // namespace mp

View File

@@ -1,50 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metafunction/bind.h>
#include <mp/metafunction/map.h>
#include <mp/typelist/append.h>
#include <mp/typelist/concat.h>
#include <mp/typelist/list.h>
namespace mp {
namespace detail {
template<class... Ls>
struct cartesian_product_impl;
template<class RL>
struct cartesian_product_impl<RL> {
using type = RL;
};
template<template<class...> class LT, class... REs, class... E2s>
struct cartesian_product_impl<LT<REs...>, LT<E2s...>> {
using type = concat<
map<MM_MP_BIND(append, REs), list<E2s...>>...
>;
};
template<class RL, class L2, class L3, class... Ls>
struct cartesian_product_impl<RL, L2, L3, Ls...> {
using type = typename cartesian_product_impl<
typename cartesian_product_impl<RL, L2>::type,
L3,
Ls...
>::type;
};
} // namespace detail
/// Produces the cartesian product of a set of lists
/// For example:
/// cartesian_product<list<A, B>, list<D, E>> == list<list<A, D>, list<A, E>, list<B, D>, list<B, E>
template<typename L1, typename... Ls>
using cartesian_product = typename detail::cartesian_product_impl<map<list, L1>, Ls...>::type;
} // namespace mp

View File

@@ -1,56 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/typelist/list.h>
namespace mp {
namespace detail {
template<class... Ls>
struct concat_impl;
template<>
struct concat_impl<> {
using type = list<>;
};
template<class L>
struct concat_impl<L> {
using type = L;
};
template<template<class...> class LT, class... E1s, class... E2s, class... Ls>
struct concat_impl<LT<E1s...>, LT<E2s...>, Ls...> {
using type = typename concat_impl<LT<E1s..., E2s...>, Ls...>::type;
};
template<template<class...> class LT,
class... E1s, class... E2s, class... E3s, class... E4s, class... E5s, class... E6s, class... E7s, class... E8s,
class... E9s, class... E10s, class... E11s, class... E12s, class... E13s, class... E14s, class... E15s, class... E16s,
class... Ls>
struct concat_impl<
LT<E1s...>, LT<E2s...>, LT<E3s...>, LT<E4s...>, LT<E5s...>, LT<E6s...>, LT<E7s...>, LT<E8s...>,
LT<E9s...>, LT<E10s...>, LT<E11s...>, LT<E12s...>, LT<E13s...>, LT<E14s...>, LT<E15s...>, LT<E16s...>,
Ls...>
{
using type = typename concat_impl<
LT<
E1s..., E2s..., E3s..., E4s..., E5s..., E6s..., E7s..., E8s...,
E9s..., E10s..., E11s..., E12s..., E13s..., E14s..., E15s..., E16s...
>,
Ls...
>::type;
};
} // namespace detail
/// Concatenate lists together
template<class... Ls>
using concat = typename detail::concat_impl<Ls...>::type;
} // namespace mp

View File

@@ -1,25 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metavalue/value.h>
namespace mp {
/// Does list L contain an element which is same as type T?
template<class L, class T>
struct contains;
template<template<class...> class LT, class... Ts, class T>
struct contains<LT<Ts...>, T>
: bool_value<(false || ... || std::is_same_v<Ts, T>)>
{};
/// Does list L contain an element which is same as type T?
template<class L, class T>
constexpr bool contains_v = contains<L, T>::value;
} // namespace mp

View File

@@ -1,34 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <type_traits>
namespace mp {
namespace detail {
template<size_t N, class L>
struct drop_impl;
template<size_t N, template<class...> class LT>
struct drop_impl<N, LT<>> {
using type = LT<>;
};
template<size_t N, template<class...> class LT, class E1, class... Es>
struct drop_impl<N, LT<E1, Es...>> {
using type = std::conditional_t<N == 0, LT<E1, Es...>, typename drop_impl<N - 1, LT<Es...>>::type>;
};
} // namespace detail
/// Drops the first N elements of list L
template<std::size_t N, class L>
using drop = typename detail::drop_impl<N, L>::type;
} // namespace mp

View File

@@ -1,19 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <tuple>
#include <mp/metafunction/apply.h>
namespace mp {
/// Get element I from list L
template<std::size_t I, class L>
using get = std::tuple_element_t<I, apply<std::tuple, L>>;
} // namespace mp

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<class L>
struct head_impl;
template<template<class...> class LT, class E1, class... Es>
struct head_impl<LT<E1, Es...>> {
using type = E1;
};
} // namespace detail
/// Gets the tail/cdr/all-but-the-first-element of list L
template<class L>
using head = typename detail::head_impl<L>::type;
} // namespace mp

View File

@@ -1,21 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <mp/metafunction/apply.h>
#include <mp/misc/argument_count.h>
namespace mp {
/// Length of list L
template<class L>
using length = apply<argument_count, L>;
/// Length of list L
template<class L>
constexpr auto length_v = length<L>::value;
} // namespace mp

View File

@@ -1,30 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
#include <mp/typelist/list.h>
namespace mp {
namespace detail {
template<class VL>
struct lift_sequence_impl;
template<class T, template <class, T...> class VLT, T... values>
struct lift_sequence_impl<VLT<T, values...>> {
using type = list<std::integral_constant<T, values>...>;
};
} // namespace detail
/// Lifts values in value list VL to create a type list.
template<class VL>
using lift_sequence = typename detail::lift_sequence_impl<VL>::type;
} // namespace mp

View File

@@ -1,14 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
/// Contains a list of types
template<class... E>
struct list {};
} // namespace mp

View File

@@ -1,25 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <tuple>
namespace mp {
/// Converts a list of metavalues to a tuple.
template<class L>
struct lower_to_tuple;
template<template<class...> class LT, class... Es>
struct lower_to_tuple<LT<Es...>> {
static constexpr auto value = std::make_tuple(static_cast<typename Es::value_type>(Es::value)...);
};
/// Converts a list of metavalues to a tuple.
template<class L>
constexpr auto lower_to_tuple_v = lower_to_tuple<L>::value;
} // namespace mp

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<class... L>
struct prepend_impl;
template<template<class...> class LT, class... E1s, class... E2s>
struct prepend_impl<LT<E1s...>, E2s...> {
using type = LT<E2s..., E1s...>;
};
} // namespace detail
/// Prepend items E to list L
template<class L, class... Es>
using prepend = typename detail::prepend_impl<L, Es...>::type;
} // namespace mp

View File

@@ -1,26 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2017 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
namespace mp {
namespace detail {
template<class L>
struct tail_impl;
template<template<class...> class LT, class E1, class... Es>
struct tail_impl<LT<E1, Es...>> {
using type = LT<Es...>;
};
} // namespace detail
/// Gets the first type of list L
template<class L>
using tail = typename detail::tail_impl<L>::type;
} // namespace mp

View File

@@ -1,12 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "metavalue_tests.h"
#include "traits_tests.h"
#include "typelist_tests.h"
int main() {
return 0;
}

View File

@@ -1,93 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <type_traits>
#include <mp/metavalue/bit_and.h>
#include <mp/metavalue/bit_not.h>
#include <mp/metavalue/bit_or.h>
#include <mp/metavalue/bit_xor.h>
#include <mp/metavalue/conjunction.h>
#include <mp/metavalue/disjunction.h>
#include <mp/metavalue/lift_value.h>
#include <mp/metavalue/logic_and.h>
#include <mp/metavalue/logic_not.h>
#include <mp/metavalue/logic_or.h>
#include <mp/metavalue/product.h>
#include <mp/metavalue/sum.h>
#include <mp/metavalue/value.h>
#include <mp/metavalue/value_cast.h>
#include <mp/metavalue/value_equal.h>
using namespace mp;
// bit_and
static_assert(bit_and<lift_value<3>, lift_value<1>>::value == 1);
// bit_not
static_assert(bit_not<lift_value<0>>::value == ~0);
// bit_or
static_assert(bit_or<lift_value<1>, lift_value<3>>::value == 3);
// bit_xor
static_assert(bit_xor<lift_value<1>, lift_value<3>>::value == 2);
// conjunction
static_assert(std::is_same_v<conjunction<std::true_type>, std::true_type>);
static_assert(std::is_same_v<conjunction<std::true_type, lift_value<0>>, lift_value<0>>);
static_assert(std::is_same_v<conjunction<std::true_type, lift_value<42>, std::true_type>, std::true_type>);
// disjunction
static_assert(std::is_same_v<disjunction<std::true_type>, std::true_type>);
static_assert(std::is_same_v<disjunction<std::false_type, lift_value<0>>, lift_value<0>>);
static_assert(std::is_same_v<disjunction<std::false_type, lift_value<42>, std::true_type>, lift_value<42>>);
// lift_value
static_assert(std::is_same_v<lift_value<3>, std::integral_constant<int, 3>>);
static_assert(std::is_same_v<lift_value<false>, std::false_type>);
// logic_and
static_assert(std::is_same_v<logic_and<>, std::true_type>);
static_assert(std::is_same_v<logic_and<std::true_type>, std::true_type>);
static_assert(std::is_same_v<logic_and<lift_value<1>>, std::true_type>);
static_assert(std::is_same_v<logic_and<std::true_type, std::false_type>, std::false_type>);
// logic_not
static_assert(std::is_same_v<logic_not<std::false_type>, std::true_type>);
// logic_or
static_assert(std::is_same_v<logic_or<>, std::false_type>);
static_assert(std::is_same_v<logic_or<std::true_type>, std::true_type>);
static_assert(std::is_same_v<logic_or<lift_value<0>>, std::false_type>);
static_assert(std::is_same_v<logic_or<std::true_type, std::false_type>, std::true_type>);
// product
static_assert(product<lift_value<1>, lift_value<2>, lift_value<3>, lift_value<4>>::value == 24);
// sum
static_assert(sum<lift_value<1>, lift_value<2>, lift_value<3>, lift_value<4>>::value == 10);
// value_cast
static_assert(std::is_same_v<value_cast<int, std::true_type>, std::integral_constant<int, 1>>);
// value_equal
static_assert(std::is_same_v<value_equal<std::true_type, std::integral_constant<int, 1>>, std::true_type>);

View File

@@ -1,42 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <tuple>
#include <type_traits>
#include <mp/traits/function_info.h>
#include <mp/traits/is_instance_of_template.h>
using namespace mp;
// function_info
struct Bar {
int frob(double a) { return a; }
};
static_assert(parameter_count_v<void()> == 0);
static_assert(parameter_count_v<void(int, int, int)> == 3);
static_assert(std::is_same_v<get_parameter<void(*)(bool, int, double), 2>, double>);
static_assert(std::is_same_v<equivalent_function_type<void(*)(bool, int, double)>, void(bool, int, double)>);
static_assert(std::is_same_v<return_type<void(*)(bool, int, double)>, void>);
static_assert(std::is_same_v<equivalent_function_type<decltype(&Bar::frob)>, int(double)>);
static_assert(std::is_same_v<class_type<decltype(&Bar::frob)>, Bar>);
// is_instance_of_template
template<class, class...>
class Foo {};
template<class, class>
class Pair {};
static_assert(is_instance_of_template_v<std::tuple, std::tuple<int, bool>>);
static_assert(!is_instance_of_template_v<std::tuple, bool>);
static_assert(is_instance_of_template_v<Foo, Foo<bool>>);
static_assert(is_instance_of_template_v<Pair, Pair<bool, int>>);
static_assert(!is_instance_of_template_v<Pair, Foo<bool, int>>);

View File

@@ -1,113 +0,0 @@
/* This file is part of the mp project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <tuple>
#include <type_traits>
#include <utility>
#include <mp/metavalue/value.h>
#include <mp/typelist/append.h>
#include <mp/typelist/cartesian_product.h>
#include <mp/typelist/concat.h>
#include <mp/typelist/contains.h>
#include <mp/typelist/drop.h>
#include <mp/typelist/get.h>
#include <mp/typelist/head.h>
#include <mp/typelist/length.h>
#include <mp/typelist/lift_sequence.h>
#include <mp/typelist/list.h>
#include <mp/typelist/lower_to_tuple.h>
#include <mp/typelist/prepend.h>
#include <mp/typelist/tail.h>
using namespace mp;
// append
static_assert(std::is_same_v<append<list<int, bool>, double>, list<int, bool, double>>);
static_assert(std::is_same_v<append<list<>, int, int>, list<int, int>>);
// cartesian_product
static_assert(
std::is_same_v<
cartesian_product<list<int, bool>, list<double, float>, list<char, unsigned>>,
list<
list<int, double, char>,
list<int, double, unsigned>,
list<int, float, char>,
list<int, float, unsigned>,
list<bool, double, char>,
list<bool, double, unsigned>,
list<bool, float, char>,
list<bool, float, unsigned>
>
>
);
// concat
static_assert(std::is_same_v<concat<list<int, bool>, list<double>>, list<int, bool, double>>);
static_assert(std::is_same_v<concat<list<>, list<int>, list<int>>, list<int, int>>);
// contains
static_assert(contains_v<list<int>, int>);
static_assert(!contains_v<list<>, int>);
static_assert(!contains_v<list<double>, int>);
static_assert(contains_v<list<double, int>, int>);
// drop
static_assert(std::is_same_v<list<>, drop<3, list<int, int>>>);
static_assert(std::is_same_v<list<>, drop<3, list<int, int, int>>>);
static_assert(std::is_same_v<list<int>, drop<3, list<int, int, int, int>>>);
static_assert(std::is_same_v<list<double>, drop<3, list<int, int, int, double>>>);
static_assert(std::is_same_v<list<int, double, bool>, drop<0, list<int, double, bool>>>);
// get
static_assert(std::is_same_v<get<0, list<int, double>>, int>);
static_assert(std::is_same_v<get<1, list<int, double>>, double>);
// head
static_assert(std::is_same_v<head<list<int, double>>, int>);
static_assert(std::is_same_v<head<list<int>>, int>);
// length
static_assert(length_v<list<>> == 0);
static_assert(length_v<list<int>> == 1);
static_assert(length_v<list<int, int, int>> == 3);
// lift_sequence
static_assert(
std::is_same_v<
lift_sequence<std::make_index_sequence<3>>,
list<size_value<0>, size_value<1>, size_value<2>>
>
);
// lower_to_tuple
static_assert(lower_to_tuple_v<list<size_value<0>, size_value<1>, size_value<2>>> == std::tuple<std::size_t, std::size_t, std::size_t>(0, 1, 2));
static_assert(lower_to_tuple_v<list<std::true_type, std::false_type>> == std::make_tuple(true, false));
// prepend
static_assert(std::is_same_v<prepend<list<int, int>, double>, list<double, int, int>>);
static_assert(std::is_same_v<prepend<list<>, double>, list<double>>);
static_assert(std::is_same_v<prepend<list<int>, double, bool>, list<double, bool, int>>);
// tail
static_assert(std::is_same_v<tail<list<int, double>>, list<double>>);
static_assert(std::is_same_v<tail<list<int>>, list<>>);

View File

@@ -1,52 +0,0 @@
add_library(vixl
vixl/src/aarch64/abi-aarch64.h
vixl/src/aarch64/assembler-aarch64.cc
vixl/src/aarch64/assembler-aarch64.h
vixl/src/aarch64/constants-aarch64.h
vixl/src/aarch64/cpu-aarch64.cc
vixl/src/aarch64/cpu-aarch64.h
vixl/src/aarch64/cpu-features-auditor-aarch64.cc
vixl/src/aarch64/cpu-features-auditor-aarch64.h
vixl/src/aarch64/decoder-aarch64.cc
vixl/src/aarch64/decoder-aarch64.h
vixl/src/aarch64/decoder-constants-aarch64.h
vixl/src/aarch64/disasm-aarch64.cc
vixl/src/aarch64/disasm-aarch64.h
vixl/src/aarch64/instructions-aarch64.cc
vixl/src/aarch64/instructions-aarch64.h
vixl/src/aarch64/logic-aarch64.cc
vixl/src/aarch64/macro-assembler-aarch64.cc
vixl/src/aarch64/macro-assembler-aarch64.h
vixl/src/aarch64/operands-aarch64.cc
vixl/src/aarch64/operands-aarch64.h
vixl/src/aarch64/pointer-auth-aarch64.cc
vixl/src/aarch64/simulator-aarch64.cc
vixl/src/aarch64/simulator-aarch64.h
vixl/src/aarch64/simulator-constants-aarch64.h
vixl/src/assembler-base-vixl.h
vixl/src/code-buffer-vixl.cc
vixl/src/code-buffer-vixl.h
vixl/src/code-generation-scopes-vixl.h
vixl/src/compiler-intrinsics-vixl.cc
vixl/src/compiler-intrinsics-vixl.h
vixl/src/cpu-features.cc
vixl/src/cpu-features.h
vixl/src/globals-vixl.h
vixl/src/invalset-vixl.h
vixl/src/macro-assembler-interface.h
vixl/src/platform-vixl.h
vixl/src/pool-manager-impl.h
vixl/src/pool-manager.h
vixl/src/utils-vixl.cc
vixl/src/utils-vixl.h
)
target_include_directories(vixl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vixl/src)
target_compile_definitions(vixl PUBLIC VIXL_INCLUDE_TARGET_AARCH64=1)
if (MSVC)
target_compile_options(vixl PUBLIC /Zc:__cplusplus)
endif()
if (UNIX AND NOT APPLE)
target_compile_definitions(vixl PUBLIC VIXL_CODE_BUFFER_MMAP=1)
else()
target_compile_definitions(vixl PUBLIC VIXL_CODE_BUFFER_MALLOC=1)
endif()

View File

@@ -1,54 +0,0 @@
# Copyright 2016, VIXL authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
BasedOnStyle: Google
# We keep two empty lines between functions in `.cc` files.
MaxEmptyLinesToKeep: 2
# Either fit all arguments on the same line, or have one per line.
# Ideally we would like to allow grouping them when it makes sense. But
# `clang-format` cannot know what 'makes sense'.
BinPackArguments: false
BinPackParameters: false
PenaltyBreakBeforeFirstCallParameter: 500
PenaltyBreakString: 100
# Do not format comments that contain the `NOLINT` pragma for `cpplint.py`.
CommentPragmas: NOLINT
# Order of #include directives. clang-format will stop at the first rule that
# matches so the order in which they are declared is important.
IncludeCategories:
- Regex: '".*aarch32.*"'
Priority: 3
- Regex: '".*aarch64.*"'
Priority: 3
- Regex: '<.*>'
Priority: 1
- Regex: '".*"'
Priority: 2

View File

@@ -1,33 +0,0 @@
---
# We use the clang-tidy defaults and the Google styles as a baseline, with a
# few changes specific to VIXL:
# -clang-analyzer-security.insecureAPI.rand:
# This warns against the use of mrand48 (etc) and suggests replacing them
# with arc4random. However, we are using these to drive tests and debug
# tools, and we need the ability to manually seed the generator. This is
# not possible with arc4random, and we do not need particularly robust
# random numbers, so we continue to use mrand48.
# -google-readability-todo:
# We don't put names on TODOs.
# -google-readability-function-size:
# There are cases where we need (or generate) very long functions,
# particularly involving macro-generated encoding tables and so on.
# -google-build-using-namespace:
# We do this in internal contexts (typically in .cc files), but clang-tidy
# cannot tell the difference.
# -google-explicit-constructor:
# We follow this rule, but have some exceptions that are annotated using
# cpplint's NOLINT format.
#
# TODO: The following _should_ be enabled, but currently show several failures:
# google-readability-braces-around-statements
# google-readability-namespace-comments
# google-readability-casting
#
# TODO: Also consider enabling other rules, such as bugprone-* and cert-*.
Checks: '-clang-analyzer-security.insecureAPI.rand,google-*,-google-readability-todo,-google-readability-function-size,-google-build-using-namespace,-google-explicit-constructor,-google-readability-braces-around-statements,-google-readability-namespace-comments,-google-readability-casting'
HeaderFilterRegex: '\.h$'
AnalyzeTemporaryDtors: false
CheckOptions:
...

View File

@@ -1,7 +0,0 @@
# ignore python compiled object
*.pyc
*.html
.sconsign.dblite
log/
obj/
tools/.cached_lint_results.pkl

View File

@@ -1,4 +0,0 @@
[gerrit]
host=review.linaro.org
port=29418
project=arm/vixl

View File

@@ -1,38 +0,0 @@
# Vim YouCompleteMe completion configuration.
#
# See doc/topics/ycm.md for details.
import os
import platform
repo_root = os.path.dirname(os.path.abspath(__file__))
# Paths in the compilation flags must be absolute to allow ycm to find them from
# any working directory.
def AbsolutePath(path):
return os.path.join(repo_root, path)
flags = [
'-I', AbsolutePath('src'),
'-I', AbsolutePath('test'),
'-DVIXL_DEBUG'
'-Wall',
'-Werror',
'-Wextra',
'-pedantic',
'-Wno-newline-eof',
'-Wwrite-strings',
'-std=c++11',
'-x', 'c++'
]
if platform.machine() != 'aarch64':
flags.append('-DVIXL_INCLUDE_SIMULATOR_AARCH64')
def FlagsForFile(filename, **kwargs):
return {
'flags': flags,
'do_cache': True
}

View File

@@ -1,8 +0,0 @@
# Below is a list of people and organisations that have contributed to the VIXL
# project. Entries should be added to the list as:
#
# Name/Organization <email address>
ARM Ltd. <*@arm.com>
Google Inc. <*@google.com>
Linaro <*@linaro.org>

View File

@@ -1,38 +0,0 @@
# Stop cpplint for looking for CPPLINT.cfg outside of vixl.
set noparent
filter=+build/class
filter=+build/deprecated
filter=+build/forward_decl
filter=+build/include_order
filter=+build/printf_format
filter=+build/storage_class
filter=+legal/copyright
filter=+readability/boost
filter=+readability/braces
filter=+readability/casting
filter=+readability/constructors
filter=+readability/fn_size
filter=+readability/function
filter=+readability/multiline_comment
filter=+readability/multiline_string
filter=+readability/streams
filter=+readability/utf8
filter=+runtime/arrays
filter=+runtime/casting
filter=+runtime/deprecated_fn
filter=+runtime/explicit
filter=+runtime/int
filter=+runtime/memset
filter=+runtime/mutex
filter=+runtime/nonconf
filter=+runtime/printf
filter=+runtime/printf_format
filter=+runtime/references
filter=+runtime/rtti
filter=+runtime/sizeof
filter=+runtime/string
filter=+runtime/virtual
filter=+runtime/vlog
# cpplint.py enables these filters in reversed order.
filter=-
linelength=80

View File

@@ -1,30 +0,0 @@
LICENCE
=======
The software in this repository is covered by the following licence.
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,226 +0,0 @@
VIXL: Armv8 Runtime Code Generation Library, Version 5.1.0
==========================================================
Contents:
* Overview
* Licence
* Requirements
* Known limitations
* Bug reports
* Usage
Overview
========
VIXL contains three components.
1. Programmatic **assemblers** to generate A64, A32 or T32 code at runtime. The
assemblers abstract some of the constraints of each ISA; for example, most
instructions support any immediate.
2. **Disassemblers** that can print any instruction emitted by the assemblers.
3. A **simulator** that can simulate any instruction emitted by the A64
assembler. The simulator allows generated code to be run on another
architecture without the need for a full ISA model.
The VIXL git repository can be found [on 'https://git.linaro.org'][vixl].
Changes from previous versions of VIXL can be found in the
[Changelog](doc/changelog.md).
Licence
=======
This software is covered by the licence described in the [LICENCE](LICENCE)
file.
Requirements
============
To build VIXL the following software is required:
1. Python 2.7
2. SCons 2.0
3. GCC 4.8+ or Clang 4.0+
A 64-bit host machine is required, implementing an LP64 data model. VIXL has
been tested using GCC on AArch64 Debian, GCC and Clang on amd64 Ubuntu
systems.
To run the linter and code formatting stages of the tests, the following
software is also required:
1. Git
2. [Google's `cpplint.py`][cpplint]
3. clang-format-4.0
4. clang-tidy-4.0
Refer to the 'Usage' section for details.
Note that in Ubuntu 18.04, clang-tidy-4.0 will only work if the clang-4.0
package is also installed.
Known Limitations
=================
VIXL was developed for JavaScript engines so a number of features from A64 were
deemed unnecessary:
* Limited rounding mode support for floating point.
* Limited support for synchronisation instructions.
* Limited support for system instructions.
* A few miscellaneous integer and floating point instructions are missing.
The VIXL simulator supports only those instructions that the VIXL assembler can
generate. The `doc` directory contains a
[list of supported A64 instructions](doc/aarch64/supported-instructions-aarch64.md).
The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it
builds and mostly works for 32-bit x86 platforms, there are a number of
floating-point operations which do not work correctly, and a number of tests
fail as a result.
Debug Builds
------------
Your project's build system must define `VIXL_DEBUG` (eg. `-DVIXL_DEBUG`)
when using a VIXL library that has been built with debug enabled.
Some classes defined in VIXL header files contain fields that are only present
in debug builds, so if `VIXL_DEBUG` is defined when the library is built, but
not defined for the header files included in your project, you will see runtime
failures.
Exclusive-Access Instructions
-----------------------------
All exclusive-access instructions are supported, but the simulator cannot
accurately simulate their behaviour as described in the ARMv8 Architecture
Reference Manual.
* A local monitor is simulated, so simulated exclusive loads and stores execute
as expected in a single-threaded environment.
* The global monitor is simulated by occasionally causing exclusive-access
instructions to fail regardless of the local monitor state.
* Load-acquire, store-release semantics are approximated by issuing a host
memory barrier after loads or before stores. The built-in
`__sync_synchronize()` is used for this purpose.
The simulator tries to be strict, and implements the following restrictions that
the ARMv8 ARM allows:
* A pair of load-/store-exclusive instructions will only succeed if they have
the same address and access size.
* Most of the time, cache-maintenance operations or explicit memory accesses
will clear the exclusive monitor.
* To ensure that simulated code does not depend on this behaviour, the
exclusive monitor will sometimes be left intact after these instructions.
Instructions affected by these limitations:
`stxrb`, `stxrh`, `stxr`, `ldxrb`, `ldxrh`, `ldxr`, `stxp`, `ldxp`, `stlxrb`,
`stlxrh`, `stlxr`, `ldaxrb`, `ldaxrh`, `ldaxr`, `stlxp`, `ldaxp`, `stlrb`,
`stlrh`, `stlr`, `ldarb`, `ldarh`, `ldar`, `clrex`.
Security Considerations
-----------------------
VIXL allows callers to generate any code they want. The generated code is
arbitrary, and can therefore call back into any other component in the process.
As with any self-modifying code, vulnerabilities in the client or in VIXL itself
could lead to arbitrary code generation.
For performance reasons, VIXL's Assembler only performs debug-mode checking of
instruction operands (such as immediate field encodability). This can minimise
code-generation overheads for advanced compilers that already model instructions
accurately, and might consider the Assembler's checks to be redundant. The
Assembler should only be used directly where encodability is independently
checked, and where fine control over all generated code is required.
The MacroAssembler synthesises multiple-instruction sequences to support _some_
unencodable operand combinations. The MacroAssembler can provide a useful safety
check in cases where the Assembler's precision is not required; an unexpected
unencodable operand should result in a macro with the correct behaviour, rather
than an invalid instruction.
In general, the MacroAssembler handles operands which are likely to vary with
user-supplied data, but does not usually handle inputs which are likely to be
easily covered by tests. For example, move-immediate arguments are likely to be
data-dependent, but register types (e.g. `x` vs `w`) are not.
We recommend that _all_ users use the MacroAssembler, using `ExactAssemblyScope`
to invoke the Assembler when specific instruction sequences are required. This
approach is recommended even in cases where a compiler can model the
instructions precisely, because, subject to the limitations described above, it
offers an additional layer of protection against logic bugs in instruction
selection.
Bug reports
===========
Bug reports may be sent to vixl@arm.com. Please provide any steps required to
recreate a bug, along with build environment and host system information.
Usage
=====
Running all Tests
-----------------
The helper script `tools/test.py` will build and run every test that is provided
with VIXL, in both release and debug mode. It is a useful script for verifying
that all of VIXL's dependencies are in place and that VIXL is working as it
should.
By default, the `tools/test.py` script runs a linter to check that the source
code conforms with the code style guide, and to detect several common errors
that the compiler may not warn about. This is most useful for VIXL developers.
The linter has the following dependencies:
1. Git must be installed, and the VIXL project must be in a valid Git
repository, such as one produced using `git clone`.
2. `cpplint.py`, [as provided by Google][cpplint], must be available (and
executable) on the `PATH`.
It is possible to tell `tools/test.py` to skip the linter stage by passing
`--nolint`. This removes the dependency on `cpplint.py` and Git. The `--nolint`
option is implied if the VIXL project is a snapshot (with no `.git` directory).
Additionally, `tools/test.py` tests code formatting using `clang-format-4.0`,
and performs static analysis using `clang-tidy-4.0`. If you don't have these
tools, disable the test using `--noclang-format` or `--noclang-tidy`,
respectively.
Also note that the tests for the tracing features depend upon external `diff`
and `sed` tools. If these tools are not available in `PATH`, these tests will
fail.
Getting Started
---------------
We have separate guides for introducing VIXL, depending on what architecture you
are targeting. A guide for working with AArch32 can be found
[here][getting-started-aarch32], while the AArch64 guide is
[here][getting-started-aarch64]. Example source code is provided in the
[examples](examples) directory. You can build examples with either `scons
aarch32_examples` or `scons aarch64_examples` from the root directory, or use
`scons --help` to get a detailed list of available build targets.
[cpplint]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"Google's cpplint.py script."
[vixl]: https://git.linaro.org/arm/vixl.git
"The VIXL repository at 'https://git.linaro.org'."
[getting-started-aarch32]: doc/aarch32/getting-started-aarch32.md
"Introduction to VIXL for AArch32."
[getting-started-aarch64]: doc/aarch64/getting-started-aarch64.md
"Introduction to VIXL for AArch64."

View File

@@ -1,597 +0,0 @@
# Copyright 2015, VIXL authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import itertools
import os
from os.path import join
import platform
import subprocess
import sys
from collections import OrderedDict
root_dir = os.path.dirname(File('SConstruct').rfile().abspath)
sys.path.insert(0, join(root_dir, 'tools'))
import config
import util
from SCons.Errors import UserError
Help('''
Build system for the VIXL project.
See README.md for documentation and details about the build system.
''')
# We track top-level targets to automatically generate help and alias them.
class VIXLTargets:
def __init__(self):
self.targets = []
self.help_messages = []
def Add(self, target, help_message):
self.targets.append(target)
self.help_messages.append(help_message)
def Help(self):
res = ""
for i in range(len(self.targets)):
res += '\t{0:<{1}}{2:<{3}}\n'.format(
'scons ' + self.targets[i],
len('scons ') + max(map(len, self.targets)),
' : ' + self.help_messages[i],
len(' : ') + max(map(len, self.help_messages)))
return res
top_level_targets = VIXLTargets()
# Build options ----------------------------------------------------------------
# Store all the options in a dictionary.
# The SConstruct will check the build variables and construct the build
# environment as appropriate.
options = {
'all' : { # Unconditionally processed.
'CCFLAGS' : ['-Wall',
'-Werror',
'-fdiagnostics-show-option',
'-Wextra',
'-Wredundant-decls',
'-pedantic',
'-Wwrite-strings',
'-Wunused',
'-Wno-missing-noreturn'],
'CPPPATH' : [config.dir_src_vixl]
},
# 'build_option:value' : {
# 'environment_key' : 'values to append'
# },
'mode:debug' : {
'CCFLAGS' : ['-DVIXL_DEBUG', '-O0']
},
'mode:release' : {
'CCFLAGS' : ['-O3'],
},
'simulator:aarch64' : {
'CCFLAGS' : ['-DVIXL_INCLUDE_SIMULATOR_AARCH64'],
},
'symbols:on' : {
'CCFLAGS' : ['-g'],
'LINKFLAGS' : ['-g']
},
'negative_testing:on' : {
'CCFLAGS' : ['-DVIXL_NEGATIVE_TESTING']
},
'code_buffer_allocator:mmap' : {
'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MMAP']
},
'code_buffer_allocator:malloc' : {
'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MALLOC']
},
'ubsan:on' : {
'CCFLAGS': ['-fsanitize=undefined'],
'LINKFLAGS': ['-fsanitize=undefined']
}
}
# A `DefaultVariable` has a default value that depends on elements not known
# when variables are first evaluated.
# Each `DefaultVariable` has a handler that will compute the default value for
# the given environment.
def modifiable_flags_handler(env):
env['modifiable_flags'] = \
'on' if 'mode' in env and env['mode'] == 'debug' else 'off'
def symbols_handler(env):
env['symbols'] = 'on' if 'mode' in env and env['mode'] == 'debug' else 'off'
def Is32BitHost(env):
return env['host_arch'] in ['aarch32', 'i386']
def IsAArch64Host(env):
return env['host_arch'] == 'aarch64'
def CanTargetA32(env):
return 'a32' in env['target']
def CanTargetT32(env):
return 't32' in env['target']
def CanTargetAArch32(env):
return CanTargetA32(env) or CanTargetT32(env)
def CanTargetA64(env):
return 'a64' in env['target']
def CanTargetAArch64(env):
return CanTargetA64(env)
# By default, include the simulator only if AArch64 is targeted and we are not
# building VIXL natively for AArch64.
def simulator_handler(env):
if not IsAArch64Host(env) and CanTargetAArch64(env):
env['simulator'] = 'aarch64'
else:
env['simulator'] = 'none'
# 'mmap' is required for use with 'mprotect', which is needed for the tests
# (when running natively), so we use it by default where we can.
def code_buffer_allocator_handler(env):
directives = util.GetCompilerDirectives(env)
if '__linux__' in directives:
env['code_buffer_allocator'] = 'mmap'
else:
env['code_buffer_allocator'] = 'malloc'
# A validator checks the consistency of provided options against the environment.
def default_validator(env):
pass
def simulator_validator(env):
if env['simulator'] == 'aarch64' and not CanTargetAArch64(env):
raise UserError('Building an AArch64 simulator implies that VIXL targets '
'AArch64. Set `target` to include `aarch64` or `a64`.')
# Default variables may depend on each other, therefore we need this dictionnary
# to be ordered.
vars_default_handlers = OrderedDict({
# variable_name : [ 'default val', 'handler', 'validator']
'symbols' : [ 'mode==debug', symbols_handler, default_validator ],
'modifiable_flags' : [ 'mode==debug', modifiable_flags_handler, default_validator],
'simulator' : [ 'on if the target architectures include AArch64 but '
'the host is not AArch64, else off',
simulator_handler, simulator_validator ],
'code_buffer_allocator' : [ 'mmap with __linux__, malloc otherwise',
code_buffer_allocator_handler, default_validator ]
})
def DefaultVariable(name, help, allowed_values):
help = '%s (%s)' % (help, '|'.join(allowed_values))
default_value = vars_default_handlers[name][0]
def validator(name, value, env):
if value != default_value and value not in allowed_values:
raise UserError('Invalid value for option {name}: {value}. '
'Valid values are: {allowed_values}'.format(
name, value, allowed_values))
return (name, help, default_value, validator)
def SortListVariable(iterator):
# Previously this code relied on the order of items in a list
# converted from a set. However in Python 3 the order changes each run.
# Here we do a custom partial sort to ensure that the build directory
# name is stable, the same across Python 2 and 3, and the same as the
# old code.
result = list(sorted(iterator))
result = sorted(result, key=lambda x: x == 't32', reverse=True)
result = sorted(result, key=lambda x: x == 'a32', reverse=True)
result = sorted(result, key=lambda x: x == 'a64', reverse=True)
return result
def AliasedListVariable(name, help, default_value, allowed_values, aliasing):
help = '%s (all|auto|comma-separated list) (any combination from [%s])' % \
(help, ', '.join(allowed_values))
def validator(name, value, env):
# Here list has been converted to space separated strings.
if value == '': return # auto
for v in value.split():
if v not in allowed_values:
raise UserError('Invalid value for %s: %s' % (name, value))
def converter(value):
if value == 'auto': return []
if value == 'all':
translated = [aliasing[v] for v in allowed_values]
return SortListVariable(itertools.chain.from_iterable(translated))
# The validator is run later hence the get.
translated = [aliasing.get(v, v) for v in value.split(',')]
return SortListVariable(itertools.chain.from_iterable(translated))
return (name, help, default_value, validator, converter)
vars = Variables()
# Define command line build options.
vars.AddVariables(
AliasedListVariable('target', 'Target ISA/Architecture', 'auto',
['aarch32', 'a32', 't32', 'aarch64', 'a64'],
{'aarch32' : ['a32', 't32'],
'a32' : ['a32'], 't32' : ['t32'],
'aarch64' : ['a64'], 'a64' : ['a64']}),
EnumVariable('mode', 'Build mode',
'release', allowed_values=config.build_options_modes),
EnumVariable('ubsan', 'Enable undefined behavior checks',
'off', allowed_values=['on', 'off']),
EnumVariable('negative_testing',
'Enable negative testing (needs exceptions)',
'off', allowed_values=['on', 'off']),
DefaultVariable('symbols', 'Include debugging symbols in the binaries',
['on', 'off']),
DefaultVariable('simulator', 'Simulators to include', ['aarch64', 'none']),
DefaultVariable('code_buffer_allocator',
'Configure the allocation mechanism in the CodeBuffer',
['malloc', 'mmap']),
('std',
'C++ standard. The standards tested are: %s.' % \
', '.join(config.tested_cpp_standards),
config.tested_cpp_standards[0]),
('compiler_wrapper', 'Command to prefix to the C and C++ compiler (e.g ccache)', '')
)
# We use 'variant directories' to avoid recompiling multiple times when build
# options are changed, different build paths are used depending on the options
# set. These are the options that should be reflected in the build directory
# path.
options_influencing_build_path = [
'target', 'mode', 'symbols', 'compiler', 'std', 'simulator', 'negative_testing',
'code_buffer_allocator'
]
# Build helpers ----------------------------------------------------------------
def RetrieveEnvironmentVariables(env):
for key in ['CC', 'CXX', 'AR', 'RANLIB', 'LD']:
if os.getenv(key): env[key] = os.getenv(key)
if os.getenv('LD_LIBRARY_PATH'): env['LIBPATH'] = os.getenv('LD_LIBRARY_PATH')
if os.getenv('CCFLAGS'):
env.Append(CCFLAGS = os.getenv('CCFLAGS').split())
if os.getenv('CXXFLAGS'):
env.Append(CXXFLAGS = os.getenv('CXXFLAGS').split())
if os.getenv('LINKFLAGS'):
env.Append(LINKFLAGS = os.getenv('LINKFLAGS').split())
# The architecture targeted by default will depend on the compiler being
# used. 'host_arch' is extracted from the compiler while 'target' can be
# set by the user.
# By default, we target both AArch32 and AArch64 unless the compiler targets a
# 32-bit architecture. At the moment, we cannot build VIXL's AArch64 support on
# a 32-bit platform.
# TODO: Port VIXL to build on a 32-bit platform.
def target_handler(env):
# Auto detect
if Is32BitHost(env):
# We use list(set(...)) to keep the same order as if it was specify as
# an option.
env['target'] = SortListVariable(['a32', 't32'])
else:
env['target'] = SortListVariable(['a64', 'a32', 't32'])
def target_validator(env):
# TODO: Port VIXL64 to work on a 32-bit platform.
if Is32BitHost(env) and CanTargetAArch64(env):
raise UserError('Building VIXL for AArch64 in 32-bit is not supported. Set '
'`target` to `aarch32`')
# The target option is handled differently from the rest.
def ProcessTargetOption(env):
if env['target'] == []: target_handler(env)
if 'a32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A32']
if 't32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_T32']
if 'a64' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A64']
target_validator(env)
def ProcessBuildOptions(env):
# 'all' is unconditionally processed.
if 'all' in options:
for var in options['all']:
if var in env and env[var]:
env[var] += options['all'][var]
else:
env[var] = options['all'][var]
# The target option *must* be processed before the options defined in
# vars_default_handlers.
ProcessTargetOption(env)
# Other build options must match 'option:value'
env_dict = env.Dictionary()
# First apply the default variables handlers in order.
for key, value in vars_default_handlers.items():
default = value[0]
handler = value[1]
if env_dict.get(key) == default:
handler(env_dict)
# Second, run the series of validators, to check for errors.
for _, value in vars_default_handlers.items():
validator = value[2]
validator(env)
for key in env_dict.keys():
# Then update the environment according to the value of the variable.
key_val_couple = key + ':%s' % env_dict[key]
if key_val_couple in options:
for var in options[key_val_couple]:
env[var] += options[key_val_couple][var]
def ConfigureEnvironmentForCompiler(env):
compiler = util.CompilerInformation(env)
if compiler == 'clang':
# These warnings only work for Clang.
# -Wimplicit-fallthrough only works when compiling the code base as C++11 or
# newer. The compiler does not complain if the option is passed when
# compiling earlier C++ standards.
env.Append(CPPFLAGS = ['-Wimplicit-fallthrough', '-Wshorten-64-to-32'])
# The '-Wunreachable-code' flag breaks builds for clang 3.4.
if compiler != 'clang-3.4':
env.Append(CPPFLAGS = ['-Wunreachable-code'])
if env['ubsan'] == 'on':
env.Append(LINKFLAGS = ['-fuse-ld=lld'])
# GCC 4.8 has a bug which produces a warning saying that an anonymous Operand
# object might be used uninitialized:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57045
# The bug does not seem to appear in GCC 4.7, or in debug builds with GCC 4.8.
if env['mode'] == 'release':
if compiler == 'gcc-4.8':
env.Append(CPPFLAGS = ['-Wno-maybe-uninitialized'])
# GCC 6 and higher is able to detect throwing from inside a destructor and
# reports a warning. However, if negative testing is enabled then assertions
# will throw exceptions.
if env['negative_testing'] == 'on' and env['mode'] == 'debug' \
and compiler >= 'gcc-6':
env.Append(CPPFLAGS = ['-Wno-terminate'])
# Suggest missing override keywords on methods.
if compiler >= 'gcc-5':
env.Append(CPPFLAGS = ['-Wsuggest-override'])
elif compiler >= 'clang-3.6':
env.Append(CPPFLAGS = ['-Winconsistent-missing-override'])
def ConfigureEnvironment(env):
RetrieveEnvironmentVariables(env)
env['compiler'] = env['CXX']
if env['compiler_wrapper'] != '':
env['CXX'] = env['compiler_wrapper'] + ' ' + env['CXX']
env['CC'] = env['compiler_wrapper'] + ' ' + env['CC']
env['host_arch'] = util.GetHostArch(env)
ProcessBuildOptions(env)
if 'std' in env:
env.Append(CPPFLAGS = ['-std=' + env['std']])
std_path = env['std']
ConfigureEnvironmentForCompiler(env)
def TargetBuildDir(env):
# Build-time option values are embedded in the build path to avoid requiring a
# full build when an option changes.
build_dir = config.dir_build
for option in options_influencing_build_path:
option_value = ''.join(env[option]) if option in env else ''
build_dir = join(build_dir, option + '_'+ option_value)
return build_dir
def PrepareVariantDir(location, build_dir):
location_build_dir = join(build_dir, location)
VariantDir(location_build_dir, location)
return location_build_dir
def VIXLLibraryTarget(env):
build_dir = TargetBuildDir(env)
# Create a link to the latest build directory.
# Use `-r` to avoid failure when `latest` exists and is a directory.
subprocess.check_call(["rm", "-rf", config.dir_build_latest])
util.ensure_dir(build_dir)
subprocess.check_call(["ln", "-s", build_dir, config.dir_build_latest])
# Source files are in `src` and in `src/aarch64/`.
variant_dir_vixl = PrepareVariantDir(join('src'), build_dir)
sources = [Glob(join(variant_dir_vixl, '*.cc'))]
if CanTargetAArch32(env):
variant_dir_aarch32 = PrepareVariantDir(join('src', 'aarch32'), build_dir)
sources.append(Glob(join(variant_dir_aarch32, '*.cc')))
if CanTargetAArch64(env):
variant_dir_aarch64 = PrepareVariantDir(join('src', 'aarch64'), build_dir)
sources.append(Glob(join(variant_dir_aarch64, '*.cc')))
return env.Library(join(build_dir, 'vixl'), sources)
# Build ------------------------------------------------------------------------
# The VIXL library, built by default.
env = Environment(variables = vars,
BUILDERS = {
'Markdown': Builder(action = 'markdown $SOURCE > $TARGET',
suffix = '.html')
}, ENV = os.environ)
# Abort the build if any command line option is unknown or invalid.
unknown_build_options = vars.UnknownVariables()
if unknown_build_options:
print('Unknown build options: ' + str(unknown_build_options.keys()))
Exit(1)
if env['negative_testing'] == 'on' and env['mode'] != 'debug':
print('negative_testing only works in debug mode')
Exit(1)
ConfigureEnvironment(env)
Help(vars.GenerateHelpText(env))
libvixl = VIXLLibraryTarget(env)
Default(libvixl)
env.Alias('libvixl', libvixl)
top_level_targets.Add('', 'Build the VIXL library.')
# Common test code.
test_build_dir = PrepareVariantDir('test', TargetBuildDir(env))
test_objects = [env.Object(Glob(join(test_build_dir, '*.cc')))]
# AArch32 support
if CanTargetAArch32(env):
# The examples.
aarch32_example_names = util.ListCCFilesWithoutExt(config.dir_aarch32_examples)
aarch32_examples_build_dir = PrepareVariantDir('examples/aarch32', TargetBuildDir(env))
aarch32_example_targets = []
for example in aarch32_example_names:
prog = env.Program(join(aarch32_examples_build_dir, example),
join(aarch32_examples_build_dir, example + '.cc'),
LIBS=[libvixl])
aarch32_example_targets.append(prog)
env.Alias('aarch32_examples', aarch32_example_targets)
top_level_targets.Add('aarch32_examples', 'Build the examples for AArch32.')
# The benchmarks
aarch32_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch32_benchmarks)
aarch32_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch32', TargetBuildDir(env))
aarch32_benchmark_targets = []
for bench in aarch32_benchmark_names:
prog = env.Program(join(aarch32_benchmarks_build_dir, bench),
join(aarch32_benchmarks_build_dir, bench + '.cc'),
LIBS=[libvixl])
aarch32_benchmark_targets.append(prog)
env.Alias('aarch32_benchmarks', aarch32_benchmark_targets)
top_level_targets.Add('aarch32_benchmarks', 'Build the benchmarks for AArch32.')
# The tests.
test_aarch32_build_dir = PrepareVariantDir(join('test', 'aarch32'), TargetBuildDir(env))
test_objects.append(env.Object(
Glob(join(test_aarch32_build_dir, '*.cc')),
CPPPATH = env['CPPPATH'] + [config.dir_tests],
CCFLAGS = [flag for flag in env['CCFLAGS'] if flag != '-O3']))
# AArch64 support
if CanTargetAArch64(env):
# The benchmarks.
aarch64_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch64_benchmarks)
aarch64_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch64', TargetBuildDir(env))
aarch64_benchmark_targets = []
bench_utils = env.Object(join(aarch64_benchmarks_build_dir, 'bench-utils.o'),
join(aarch64_benchmarks_build_dir, 'bench-utils.cc'))
for bench in aarch64_benchmark_names:
if bench != 'bench-utils':
prog = env.Program(join(aarch64_benchmarks_build_dir, bench),
[join(aarch64_benchmarks_build_dir, bench + '.cc'), bench_utils],
LIBS=[libvixl])
aarch64_benchmark_targets.append(prog)
env.Alias('aarch64_benchmarks', aarch64_benchmark_targets)
top_level_targets.Add('aarch64_benchmarks', 'Build the benchmarks for AArch64.')
# The examples.
aarch64_example_names = util.ListCCFilesWithoutExt(config.dir_aarch64_examples)
aarch64_examples_build_dir = PrepareVariantDir('examples/aarch64', TargetBuildDir(env))
aarch64_example_targets = []
for example in aarch64_example_names:
prog = env.Program(join(aarch64_examples_build_dir, example),
join(aarch64_examples_build_dir, example + '.cc'),
LIBS=[libvixl])
aarch64_example_targets.append(prog)
env.Alias('aarch64_examples', aarch64_example_targets)
top_level_targets.Add('aarch64_examples', 'Build the examples for AArch64.')
# The tests.
test_aarch64_build_dir = PrepareVariantDir(join('test', 'aarch64'), TargetBuildDir(env))
test_objects.append(env.Object(
Glob(join(test_aarch64_build_dir, '*.cc')),
CPPPATH = env['CPPPATH'] + [config.dir_tests],
CCFLAGS = [flag for flag in env['CCFLAGS'] if flag != '-O3']))
# The test requires building the example files with specific options, so we
# create a separate variant dir for the example objects built this way.
test_aarch64_examples_vdir = join(TargetBuildDir(env), 'test', 'aarch64', 'test_examples')
VariantDir(test_aarch64_examples_vdir, '.')
test_aarch64_examples_obj = env.Object(
[Glob(join(test_aarch64_examples_vdir, join('test', 'aarch64', 'examples', '*.cc'))),
Glob(join(test_aarch64_examples_vdir, join('examples/aarch64', '*.cc')))],
CCFLAGS = env['CCFLAGS'] + ['-DTEST_EXAMPLES'],
CPPPATH = env['CPPPATH'] + [config.dir_aarch64_examples] + [config.dir_tests])
test_objects.append(test_aarch64_examples_obj)
test = env.Program(join(test_build_dir, 'test-runner'), test_objects,
LIBS=[libvixl])
env.Alias('tests', test)
top_level_targets.Add('tests', 'Build the tests.')
env.Alias('all', top_level_targets.targets)
top_level_targets.Add('all', 'Build all the targets above.')
Help('\n\nAvailable top level targets:\n' + top_level_targets.Help())
extra_targets = VIXLTargets()
# Build documentation
doc = [
env.Markdown('README.md'),
env.Markdown('doc/changelog.md'),
env.Markdown('doc/aarch32/getting-started-aarch32.md'),
env.Markdown('doc/aarch32/design/code-generation-aarch32.md'),
env.Markdown('doc/aarch32/design/literal-pool-aarch32.md'),
env.Markdown('doc/aarch64/supported-instructions-aarch64.md'),
env.Markdown('doc/aarch64/getting-started-aarch64.md'),
env.Markdown('doc/aarch64/topics/ycm.md'),
env.Markdown('doc/aarch64/topics/extending-the-disassembler.md'),
env.Markdown('doc/aarch64/topics/index.md'),
]
env.Alias('doc', doc)
extra_targets.Add('doc', 'Convert documentation to HTML (requires the '
'`markdown` program).')
Help('\nAvailable extra targets:\n' + extra_targets.Help())

View File

@@ -1,30 +0,0 @@
Versioning
==========
Since version 3.0.0, VIXL uses [Semantic Versioning 2.0.0][semver].
Briefly:
- Backwards-incompatible changes update the _major_ version.
- New features update the _minor_ version.
- Bug fixes update the _patch_ version.
Why 3.0.0?
----------
VIXL was originally released as 1.x using snapshot releases. When we moved VIXL
into Linaro, we started working directly on `master` and stopped tagging
named releases. However, we informally called this "VIXL 2", so we are skipping
2.0.0 to avoid potential confusion.
Using `master`
--------------
Users who want to take the latest development version of VIXL can still take
commits from `master`. Our day-to-day development process hasn't changed and
these commits should still pass their own tests. However, note that commits not
explicitly tagged with a given version should be considered to be unversioned,
with no backwards-compatibility guarantees.
[semver]: https://semver.org/spec/v2.0.0.html
"Semantic Versioning 2.0.0 Specification"

View File

@@ -1,108 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
using namespace vixl;
using namespace vixl::aarch32;
#ifdef VIXL_DEBUG
static const int kDefaultIterationsCount = 10000;
#else
static const int kDefaultIterationsCount = 100000;
#endif
// This program focuses on the emission of branches and veneers.
void benchmark(int iterations, InstructionSet isa) {
const int buffer_size = 256 * KBytes;
timeval start;
gettimeofday(&start, NULL);
MacroAssembler masm(buffer_size);
masm.UseInstructionSet(isa);
#define __ masm.
Label target_1, target_2, target_3, target_4;
for (int i = 0; i < iterations; i++) {
__ B(&target_1);
}
__ Bind(&target_1);
for (int i = 0; i < iterations; i++) {
__ B(eq, &target_2);
}
__ Bind(&target_2);
for (int i = 0; i < iterations; i++) {
__ Bl(&target_3);
}
__ Bind(&target_3);
for (int i = 0; i < iterations; i++) {
__ Blx(&target_4);
}
__ Bind(&target_4);
masm.FinalizeCode();
timeval end;
gettimeofday(&end, NULL);
double delta = (end.tv_sec - start.tv_sec) +
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
printf("%s: time for %d iterations: %gs\n",
isa == T32 ? "T32" : "A32",
iterations,
delta);
}
int main(int argc, char* argv[]) {
int iterations = 0;
switch (argc) {
case 1:
iterations = kDefaultIterationsCount;
break;
case 2:
iterations = atoi(argv[1]);
break;
default:
printf("Usage: %s [#iterations]\n", argv[0]);
exit(1);
}
#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(iterations, A32);
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(iterations, T32);
#endif
return 0;
}

View File

@@ -1,98 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
using namespace vixl;
using namespace vixl::aarch32;
static const int kDefaultIterationCount = 100000;
// This program focuses on emitting branch instructions targeting a label bound
// very closely. Here the MacroAssembler is used. This exercises label binding
// and patching mechanisms, as well as the veneer resolving mechanisms for
// branches not requiring veneers.
void benchmark(int iterations, InstructionSet isa) {
const int buffer_size = 256 * KBytes;
timeval start;
gettimeofday(&start, NULL);
MacroAssembler masm(buffer_size);
masm.UseInstructionSet(isa);
#define __ masm.
for (int i = 0; i < iterations; i++) {
Label target;
__ B(&target);
__ B(eq, &target);
__ Bl(&target);
__ Blx(&target);
__ Bind(&target);
}
masm.FinalizeCode();
timeval end;
gettimeofday(&end, NULL);
double delta = (end.tv_sec - start.tv_sec) +
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
printf("%s: time for %d iterations: %gs\n",
isa == T32 ? "T32" : "A32",
iterations,
delta);
}
int main(int argc, char* argv[]) {
int iterations = 0;
switch (argc) {
case 1:
iterations = kDefaultIterationCount;
break;
case 2:
iterations = atoi(argv[1]);
break;
default:
printf("Usage: %s [#iterations]\n", argv[0]);
exit(1);
}
#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(iterations, A32);
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(iterations, T32);
#endif
return 0;
}

View File

@@ -1,93 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
using namespace vixl;
using namespace vixl::aarch32;
static const unsigned kDefaultInstructionCount = 100000;
// This program focuses on emitting simple instructions.
//
// This code will emit a given number of 'add r0, r1, r2' in a buffer.
// This code therefore focuses on Emit and Operand.
void benchmark(unsigned instructions, InstructionSet isa) {
const unsigned buffer_size = 256 * KBytes;
timeval start;
gettimeofday(&start, NULL);
MacroAssembler masm(buffer_size);
masm.UseInstructionSet(isa);
#define __ masm.
for (unsigned i = 0; i < instructions; ++i) {
__ Add(r0, r1, Operand(r2));
}
masm.FinalizeCode();
timeval end;
gettimeofday(&end, NULL);
double delta = (end.tv_sec - start.tv_sec) +
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
printf("%s: time for %d instructions: %gs\n",
isa == T32 ? "T32" : "A32",
instructions,
delta);
}
int main(int argc, char* argv[]) {
unsigned instructions = 0;
switch (argc) {
case 1:
instructions = kDefaultInstructionCount;
break;
case 2:
instructions = atoi(argv[1]);
break;
default:
printf("Usage: %s [#instructions]\n", argv[0]);
exit(1);
}
#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(instructions, A32);
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(instructions, T32);
#endif
return 0;
}

View File

@@ -1,105 +0,0 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdint.h>
#include <stdio.h>
#include <sys/time.h>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
using namespace vixl;
using namespace vixl::aarch32;
static const int kDefaultIterationCount = 1000;
static const int kDefaultLiteralCount = 100;
// This program focuses on emitting branch instructions targeting a label bound
// very closely. Here the MacroAssembler is used. This exercises label binding
// and patching mechanisms, as well as the veneer resolving mechanisms for
// branches not requiring veneers.
void benchmark(int iterations, int literals, InstructionSet isa) {
const int buffer_size = 256 * KBytes;
timeval start;
gettimeofday(&start, NULL);
MacroAssembler masm(buffer_size);
masm.UseInstructionSet(isa);
#define __ masm.
// Load a number of distinct literals, for a number of iterations, forcing
// pool emission in between.
for (int i = 0; i < iterations; i++) {
for (int j = 0; j < literals; j++) {
__ Ldr(r0, j);
__ FinalizeCode();
}
}
timeval end;
gettimeofday(&end, NULL);
double delta = (end.tv_sec - start.tv_sec) +
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
printf("%s: time for %d iterations: %gs\n",
isa == T32 ? "T32" : "A32",
iterations,
delta);
}
int main(int argc, char* argv[]) {
int iterations = 0;
int literals = 0;
switch (argc) {
case 1:
iterations = kDefaultIterationCount;
literals = kDefaultLiteralCount;
break;
case 2:
iterations = atoi(argv[1]);
literals = kDefaultLiteralCount;
break;
case 3:
iterations = atoi(argv[1]);
literals = atoi(argv[2]);
break;
default:
printf("Usage: %s [#iterations] [#literals]\n", argv[0]);
exit(1);
}
#ifdef VIXL_INCLUDE_TARGET_A32
benchmark(iterations, literals, A32);
#endif
#ifdef VIXL_INCLUDE_TARGET_T32
benchmark(iterations, literals, T32);
#endif
return 0;
}

View File

@@ -1,78 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// This program focuses on the emission of branches and veneers.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
const size_t instructions_per_iteration = 4;
const size_t max_buffer_iterations =
buffer_size / (instructions_per_iteration * kInstructionSize);
MacroAssembler masm(buffer_size);
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
Label target_1, target_2, target_3, target_4;
for (size_t i = 0; i < max_buffer_iterations; i++) {
masm.B(&target_1);
}
masm.Bind(&target_1);
for (size_t i = 0; i < max_buffer_iterations; i++) {
masm.B(eq, &target_2);
}
masm.Bind(&target_2);
for (size_t i = 0; i < max_buffer_iterations; i++) {
masm.Cbz(x2, &target_3);
}
masm.Bind(&target_3);
for (size_t i = 0; i < max_buffer_iterations; i++) {
masm.Tbz(x3, 2, &target_4);
}
masm.Bind(&target_4);
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,66 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// Bind many branches to the same label, like bench-branch.cc but with a single
// label. This stresses the label-linking mechanisms.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
MacroAssembler masm(buffer_size);
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
{
ExactAssemblyScope scope(&masm, buffer_size);
Label target;
for (size_t i = 0; i < buffer_instruction_count; ++i) {
masm.b(&target);
}
masm.bind(&target);
}
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,70 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// This program focuses on emitting branch instructions targeting a label bound
// very closely. Here the MacroAssembler is used. This exercises label binding
// and patching mechanisms, as well as the veneer resolving mechanisms for
// branches not requiring veneers.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
const size_t instructions_per_iteration = 4;
const size_t max_buffer_iterations =
buffer_size / (instructions_per_iteration * kInstructionSize);
MacroAssembler masm(buffer_size);
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
for (size_t i = 0; i < max_buffer_iterations; ++i) {
Label target;
masm.B(&target);
masm.B(eq, &target);
masm.Cbz(x2, &target);
masm.Tbz(x3, 2, &target);
masm.Bind(&target);
}
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,71 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// This program focuses on emitting branch instructions.
//
// This code will emit a given number of branch immediate to the next
// instructions in a fixed size buffer, looping over the buffer if necessary.
// This code therefore focuses on Emit and label binding/patching.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
MacroAssembler masm(buffer_size);
// We emit a branch to the next instruction.
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
{
ExactAssemblyScope scope(&masm, buffer_size);
for (size_t i = 0; i < buffer_instruction_count; ++i) {
Label target;
masm.b(&target);
masm.bind(&target);
}
}
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,67 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// This program focuses on emitting simple instructions.
//
// This code will emit a given number of 'add x0, x1, x2' in a fixed size
// buffer, looping over the buffer if necessary. This code therefore focuses
// on Emit and Operand.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
MacroAssembler masm(buffer_size);
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
{
ExactAssemblyScope scope(&masm, buffer_size);
for (size_t i = 0; i < buffer_instruction_count; ++i) {
masm.add(x0, x1, Operand(x2));
}
}
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,93 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// Like PrintDisassembler, but to prevent the I/O overhead from dominating the
// benchmark, don't actually print anything.
class BenchDisassembler : public Disassembler {
public:
BenchDisassembler() : Disassembler(), generated_chars_(0) {}
size_t GetGeneratedCharCount() const { return generated_chars_; }
protected:
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE {
USE(instr);
generated_chars_ += strlen(GetOutput());
}
size_t generated_chars_;
};
// This program measures the performance of the disassembler, using the same
// code sequence used in bench-mixed-masm.cc.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
MacroAssembler masm(buffer_size);
masm.SetCPUFeatures(CPUFeatures::All());
BenchCodeGenerator generator(&masm);
masm.Reset();
generator.Generate(buffer_size);
masm.FinalizeCode();
const Instruction* start =
masm.GetBuffer()->GetStartAddress<const Instruction*>();
const Instruction* end =
masm.GetBuffer()->GetEndAddress<const Instruction*>();
Decoder decoder;
BenchDisassembler disasm;
decoder.AppendVisitor(&disasm);
BenchTimer timer;
size_t iterations = 0;
size_t generated_chars = 0;
do {
decoder.Decode(start, end);
generated_chars += disasm.GetGeneratedCharCount();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
printf("Disassembled %" PRIu64 " characters.\n",
static_cast<uint64_t>(generated_chars));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,62 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
// This program measures code generation time using the MacroAssembler. It aims
// to be representative of realistic usage, but is not based on real traces.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
MacroAssembler masm(buffer_size);
masm.SetCPUFeatures(CPUFeatures::All());
BenchCodeGenerator generator(&masm);
BenchTimer timer;
size_t iterations = 0;
do {
masm.Reset();
generator.Generate(buffer_size);
masm.FinalizeCode();
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}

View File

@@ -1,96 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "globals-vixl.h"
#include "aarch64/instructions-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "aarch64/simulator-aarch64.h"
#include "bench-utils.h"
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
using namespace vixl;
using namespace vixl::aarch64;
// Like PrintDisassembler, but to prevent the I/O overhead from dominating the
// benchmark, don't actually print anything.
class BenchDisassembler : public Disassembler {
public:
BenchDisassembler() : Disassembler(), generated_chars_(0) {}
size_t GetGeneratedCharCount() const { return generated_chars_; }
protected:
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE {
USE(instr);
generated_chars_ += strlen(GetOutput());
}
size_t generated_chars_;
};
// This program measures the performance of the disassembler, using the same
// code sequence used in bench-mixed-masm.cc.
int main(int argc, char* argv[]) {
BenchCLI cli(argc, argv);
if (cli.ShouldExitEarly()) return cli.GetExitCode();
const size_t buffer_size = 256 * KBytes;
MacroAssembler masm(buffer_size);
masm.SetCPUFeatures(CPUFeatures::All());
BenchCodeGenerator generator(&masm);
masm.Reset();
generator.Generate(buffer_size);
masm.FinalizeCode();
const Instruction* start =
masm.GetBuffer()->GetStartAddress<const Instruction*>();
Decoder decoder;
Simulator simulator(&decoder);
simulator.SetCPUFeatures(CPUFeatures::All());
BenchTimer timer;
size_t iterations = 0;
do {
simulator.RunFrom(start);
iterations++;
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
cli.PrintResults(iterations, timer.GetElapsedSeconds());
return cli.GetExitCode();
}
#else // VIXL_INCLUDE_SIMULATOR_AARCH64
int main(void) {
printf("This benchmark requires AArch64 simulator support.\n");
return EXIT_FAILURE;
}
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64

View File

@@ -1,399 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include "globals-vixl.h"
#include "aarch64/macro-assembler-aarch64.h"
#include "bench-utils.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm_->
const Register BenchCodeGenerator::scratch = x28;
Register BenchCodeGenerator::PickR(unsigned size_in_bits) {
// Only select caller-saved registers [x0, x15].
return Register(static_cast<unsigned>(GetRandomBits(4)), size_in_bits);
}
VRegister BenchCodeGenerator::PickV(unsigned size_in_bits) {
// Only select caller-saved registers [v0, v7] or [v16, v31].
// The resulting distribution is not uniform.
unsigned code = static_cast<unsigned>(GetRandomBits(5));
if (code < 16) code &= 0x7; // [v8, v15] -> [v0, v7]
return VRegister(code, size_in_bits);
}
uint64_t BenchCodeGenerator::GetRandomBits(int bits) {
VIXL_ASSERT((bits >= 0) && (bits <= 64));
uint64_t result = 0;
while (bits >= 32) {
// For big chunks, call jrand48 directly.
result = (result << 32) | jrand48(rand_state_); // [-2^31, 2^31]
bits -= 32;
}
if (bits == 0) return result;
// We often only want a few bits at a time, so use stored entropy to avoid
// frequent calls to jrand48.
if (bits > rnd_bits_) {
// We want more bits than we have.
result = (result << rnd_bits_) | rnd_;
bits -= rnd_bits_;
rnd_ = static_cast<uint32_t>(jrand48(rand_state_)); // [-2^31, 2^31]
rnd_bits_ = 32;
}
VIXL_ASSERT(bits <= rnd_bits_);
result = (result << bits) | (rnd_ % (UINT32_C(1) << bits));
rnd_ >>= bits;
rnd_bits_ -= bits;
return result;
}
unsigned BenchCodeGenerator::PickRSize() {
return PickBool() ? kWRegSize : kXRegSize;
}
unsigned BenchCodeGenerator::PickFPSize() {
uint64_t entropy = GetRandomBits(4);
// Doubles and floats are common in most languages, so use half-precision
// types only rarely.
if (entropy == 0) return kHRegSize;
return ((entropy & 1) == 0) ? kSRegSize : kDRegSize;
}
void BenchCodeGenerator::Generate(size_t min_size_in_bytes) {
Label start;
__ Bind(&start);
call_depth_++;
GeneratePrologue();
while (masm_->GetSizeOfCodeGeneratedSince(&start) < min_size_in_bytes) {
GenerateArbitrarySequence();
}
GenerateEpilogue();
call_depth_--;
// Make sure that any labels (created by GenerateBranchSequence) are bound
// before we exit.
if (call_depth_ == 0) BindAllPendingLabels();
}
void BenchCodeGenerator::GeneratePrologue() {
// Construct a normal frame.
VIXL_ASSERT(masm_->StackPointer().Is(sp));
__ Push(lr, x29); // x29 is the frame pointer (fp).
__ Mov(x29, sp);
VIXL_ASSERT(call_depth_ > 0);
if (call_depth_ == 1) {
__ Push(scratch, xzr);
// Claim space to use for load and stores.
// - We need at least 4 * kQRegSize bytes for Ld4/St4.
// - The architecture requires that we allocate a multiple of 16 bytes.
// - There is no hard upper limit, but the Simulator has a limited stack
// space.
__ Claim((4 * kQRegSize) + (16 * GetRandomBits(3)));
__ Mov(scratch, sp);
}
}
void BenchCodeGenerator::GenerateEpilogue() {
VIXL_ASSERT(call_depth_ > 0);
if (call_depth_ == 1) {
__ Sub(sp, x29, 2 * kXRegSizeInBytes); // Drop the scratch space.
__ Pop(xzr, scratch);
}
__ Pop(x29, lr);
__ Ret();
}
void BenchCodeGenerator::GenerateArbitrarySequence() {
// Bind pending labels, and remove them from the list.
// Recently-linked labels are much more likely to be bound than old ones. This
// should produce a mix of long- (veneered) and short-range branches.
uint32_t bind_mask = static_cast<uint32_t>(
GetRandomBits(8) | (GetRandomBits(7) << 1) | (GetRandomBits(6) << 2));
BindPendingLabels(bind_mask);
// If we are at the top call level (call_depth_ == 1), generate nested calls
// 1/4 of the time, and halve the chance for each call level below that.
VIXL_ASSERT(call_depth_ > 0);
if (GetRandomBits(call_depth_ + 1) == 0) {
GenerateCallReturnSequence();
return;
}
// These weightings should be roughly representative of real functions.
switch (GetRandomBits(4)) {
case 0x0:
case 0x1:
GenerateTrivialSequence();
return;
case 0x2:
case 0x3:
case 0x4:
case 0x5:
GenerateOperandSequence();
return;
case 0x6:
case 0x7:
case 0x8:
GenerateMemOperandSequence();
return;
case 0xb:
case 0x9:
case 0xa:
GenerateImmediateSequence();
return;
case 0xc:
case 0xd:
GenerateBranchSequence();
return;
case 0xe:
GenerateFPSequence();
return;
case 0xf:
GenerateNEONSequence();
return;
}
}
void BenchCodeGenerator::GenerateTrivialSequence() {
unsigned size = PickRSize();
__ Asr(PickR(size), PickR(size), 4);
__ Bfi(PickR(size), PickR(size), 5, 14);
__ Bfc(PickR(size), 5, 14);
__ Cinc(PickR(size), PickR(size), ge);
__ Cinv(PickR(size), PickR(size), ne);
__ Cls(PickR(size), PickR(size));
__ Cneg(PickR(size), PickR(size), lt);
__ Mrs(PickX(), NZCV);
__ Nop();
__ Mul(PickR(size), PickR(size), PickR(size));
__ Rbit(PickR(size), PickR(size));
__ Rev(PickR(size), PickR(size));
__ Sdiv(PickR(size), PickR(size), PickR(size));
if (!labels_.empty()) {
__ Adr(PickX(), labels_.begin()->target);
}
}
void BenchCodeGenerator::GenerateOperandSequence() {
unsigned size = PickRSize();
// The cast to Operand is normally implicit for simple registers, but we
// explicitly specify it in every case here to ensure that the benchmark does
// what we expect.
__ And(PickR(size), PickR(size), Operand(PickR(size)));
__ Bics(PickR(size), PickR(size), Operand(PickR(size)));
__ Orr(PickR(size), PickR(size), Operand(PickR(size)));
__ Eor(PickR(size), PickR(size), Operand(PickR(size)));
__ Tst(PickR(size), Operand(PickR(size)));
__ Eon(PickR(size), PickR(size), Operand(PickR(size)));
__ Cmp(PickR(size), Operand(PickR(size)));
__ Negs(PickR(size), Operand(PickR(size)));
__ Mvn(PickR(size), Operand(PickR(size)));
__ Ccmp(PickR(size), Operand(PickR(size)), NoFlag, eq);
__ Ccmn(PickR(size), Operand(PickR(size)), NoFlag, eq);
__ Csel(PickR(size), Operand(PickR(size)), Operand(PickR(size)), lt);
{
// Ensure that `claim` doesn't alias any PickR().
UseScratchRegisterScope temps(masm_);
Register claim = temps.AcquireX();
// We should only claim a 16-byte-aligned amount, since we're using the
// system stack pointer.
__ Mov(claim, GetRandomBits(4) * 16);
__ Claim(Operand(claim));
// Also claim a bit more, so we can store at sp+claim.
__ Claim(Operand(32));
__ Poke(PickR(size), Operand(claim));
__ Peek(PickR(size), Operand(8));
__ Poke(PickR(size), Operand(16));
__ Peek(PickR(size), Operand(claim.W(), UXTW));
__ Drop(Operand(32));
__ Drop(Operand(claim));
}
}
void BenchCodeGenerator::GenerateMemOperandSequence() {
unsigned size = PickRSize();
RegList store_list = GetRandomBits(16); // Restrict to [x0, x15].
__ StoreCPURegList(CPURegList(CPURegister::kRegister, size, store_list),
MemOperand(scratch));
RegList load_list = GetRandomBits(16); // Restrict to [x0, x15].
__ LoadCPURegList(CPURegList(CPURegister::kRegister, size, load_list),
MemOperand(scratch));
__ Str(PickX(), MemOperand(scratch));
__ Strb(PickW(), MemOperand(scratch, 42));
__ Strh(PickW(), MemOperand(scratch, 42, PostIndex));
__ Ldrsw(PickX(), MemOperand(scratch, -42, PreIndex));
__ Ldr(PickR(size), MemOperand(scratch, 19)); // Translated to ldur.
__ Push(PickX(), PickX());
// Ensure unique registers (in [x0, x15]) for Pop.
__ Pop(Register(static_cast<int>(GetRandomBits(2)) + 0, kWRegSize),
Register(static_cast<int>(GetRandomBits(2)) + 4, kWRegSize),
Register(static_cast<int>(GetRandomBits(2)) + 8, kWRegSize),
Register(static_cast<int>(GetRandomBits(2)) + 12, kWRegSize));
}
void BenchCodeGenerator::GenerateImmediateSequence() {
unsigned size = PickRSize();
__ And(PickR(size), PickR(size), GetRandomBits(size));
__ Sub(PickR(size), PickR(size), GetRandomBits(size));
__ Mov(PickR(size), GetRandomBits(size));
__ Movk(PickX(), GetRandomBits(16), static_cast<int>(GetRandomBits(2)) * 16);
}
void BenchCodeGenerator::BindPendingLabels(uint64_t bind_mask) {
if (bind_mask == 0) return;
// The labels we bind here jump back to just after each branch that refers
// to them. This allows a simple, linear execution path, whilst still
// benchmarking long-range labels.
//
// Ensure that code falling through into this sequence does not jump
// back to an earlier point in the execution path.
Label done;
__ B(&done);
std::list<LabelPair>::iterator it = labels_.begin();
while ((it != labels_.end()) && (bind_mask != 0)) {
if ((bind_mask & 1) != 0) {
// Bind the label and jump back to its source.
__ Bind(it->target);
__ B(it->cont);
delete it->target;
delete it->cont;
it = labels_.erase(it);
} else {
++it; // Don't bind this one.
}
bind_mask >>= 1;
}
__ Bind(&done);
}
void BenchCodeGenerator::BindAllPendingLabels() {
while (!labels_.empty()) {
// BindPendingLables generates a branch over each block of bound labels.
// This will be repeated for each call here, but the effect is minimal and
// (empirically) we rarely accumulate more than 64 pending labels anyway.
BindPendingLabels(UINT64_MAX);
}
}
void BenchCodeGenerator::GenerateBranchSequence() {
{
LabelPair pair = {new Label(), new Label()};
__ B(lt, pair.target);
__ Bind(pair.cont);
labels_.push_front(pair);
}
{
LabelPair pair = {new Label(), new Label()};
__ Tbz(PickX(),
static_cast<int>(GetRandomBits(kXRegSizeLog2)),
pair.target);
__ Bind(pair.cont);
labels_.push_front(pair);
}
{
LabelPair pair = {new Label(), new Label()};
__ Cbz(PickX(), pair.target);
__ Bind(pair.cont);
labels_.push_front(pair);
}
}
void BenchCodeGenerator::GenerateCallReturnSequence() {
Label fn, done;
if (PickBool()) {
__ Bl(&fn);
} else {
Register reg = PickX();
__ Adr(reg, &fn);
__ Blr(reg);
}
__ B(&done);
__ Bind(&fn);
// Recurse with a randomised (but fairly small) minimum size.
Generate(GetRandomBits(8));
__ Bind(&done);
}
void BenchCodeGenerator::GenerateFPSequence() {
unsigned size = PickFPSize();
unsigned other_size = PickBool() ? size * 2 : size / 2;
if (other_size < kHRegSize) other_size = kDRegSize;
if (other_size > kDRegSize) other_size = kHRegSize;
__ Fadd(PickV(size), PickV(size), PickV(size));
__ Fmul(PickV(size), PickV(size), PickV(size));
__ Fcvt(PickV(other_size), PickV(size));
__ Fjcvtzs(PickW(), PickD());
__ Fccmp(PickV(size), PickV(size), NCVFlag, pl);
__ Fdiv(PickV(size), PickV(size), PickV(size));
__ Fmov(PickV(size), 1.25 * GetRandomBits(2));
__ Fmsub(PickV(size), PickV(size), PickV(size), PickV(size));
__ Frintn(PickV(size), PickV(size));
}
void BenchCodeGenerator::GenerateNEONSequence() {
__ And(PickV().V16B(), PickV().V16B(), PickV().V16B());
__ Sqrshl(PickV().V8H(), PickV().V8H(), PickV().V8H());
__ Umull(PickV().V2D(), PickV().V2S(), PickV().V2S());
__ Sqdmlal2(PickV().V4S(), PickV().V8H(), PickV().V8H());
// For structured loads and stores, we have to specify sequential (wrapped)
// registers, so start with [v16, v31] and allow them to wrap in to the
// [v0, v7] range.
VRegister vt(16 + static_cast<unsigned>(GetRandomBits(4)), kQRegSize);
VRegister vt2((vt.GetCode() + 1) % kNumberOfVRegisters, kQRegSize);
VRegister vt3((vt.GetCode() + 2) % kNumberOfVRegisters, kQRegSize);
VRegister vt4((vt.GetCode() + 3) % kNumberOfVRegisters, kQRegSize);
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt));
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt2));
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt3));
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt4));
__ Ld3(vt.V4S(), vt2.V4S(), vt3.V4S(), MemOperand(scratch));
__ St4(vt.V16B(), vt2.V16B(), vt3.V16B(), vt4.V16B(), MemOperand(scratch));
__ Fmaxv(PickV().H(), PickV().V8H());
__ Fminp(PickV().V4S(), PickV().V4S(), PickV().V4S());
}

View File

@@ -1,267 +0,0 @@
// Copyright 2019, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_BENCH_UTILS_H_
#define VIXL_AARCH64_BENCH_UTILS_H_
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <list>
#include <vector>
#include "globals-vixl.h"
#include "aarch64/macro-assembler-aarch64.h"
class BenchTimer {
public:
BenchTimer() { gettimeofday(&start_, NULL); }
double GetElapsedSeconds() const {
timeval elapsed = GetElapsed();
double sec = elapsed.tv_sec;
double usec = elapsed.tv_usec;
return sec + (usec / 1000000.0);
}
bool HasRunFor(uint32_t seconds) {
timeval elapsed = GetElapsed();
VIXL_ASSERT(elapsed.tv_sec >= 0);
return static_cast<uint64_t>(elapsed.tv_sec) >= seconds;
}
private:
timeval GetElapsed() const {
VIXL_ASSERT(timerisset(&start_));
timeval now, elapsed;
gettimeofday(&now, NULL);
timersub(&now, &start_, &elapsed);
return elapsed;
}
timeval start_;
};
// Provide a standard command-line interface for all benchmarks.
class BenchCLI {
public:
// Set default values.
BenchCLI(int argc, char* argv[])
: run_time_(kDefaultRunTime), status_(kRunBenchmark) {
for (int i = 1; i < argc; i++) {
if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) {
PrintUsage(argv[0]);
status_ = kExitSuccess;
return;
}
}
// Use the default run time.
if (argc == 1) return;
if (argc != 2) {
if (argc > 0) PrintUsage(argv[0]);
status_ = kExitFailure;
return;
}
char* end;
unsigned long run_time = strtoul(argv[1], &end, 0); // NOLINT(runtime/int)
if ((end == argv[1]) || (run_time > UINT32_MAX)) {
PrintUsage(argv[0]);
status_ = kExitFailure;
return;
}
run_time_ = static_cast<uint32_t>(run_time);
}
void PrintUsage(char* name) {
printf("USAGE: %s [OPTIONS]... [RUN_TIME]\n", name);
printf("\n");
printf("Run a single VIXL benchmark for approximately RUN_TIME seconds,\n");
printf("or %" PRIu32 " seconds if unspecified.\n", kDefaultRunTime);
printf("\n");
#ifdef VIXL_DEBUG
printf("This is a DEBUG build. VIXL's assertions will be enabled, and\n");
printf("extra debug information may be printed. The benchmark results\n");
printf("are not representative of expected VIXL deployments.\n");
printf("\n");
#endif
printf("OPTIONS:\n");
printf("\n");
printf(" -h, --help\n");
printf(" Print this help message.\n");
}
void PrintResults(uint64_t iterations, double elapsed_seconds) {
double score = iterations / elapsed_seconds;
printf("%g iteration%s per second (%" PRIu64 " / %g)",
score,
(score == 1.0) ? "" : "s",
iterations,
elapsed_seconds);
#ifdef VIXL_DEBUG
printf(" [Warning: DEBUG build]");
#endif
printf("\n");
}
bool ShouldExitEarly() const {
switch (status_) {
case kRunBenchmark:
return false;
case kExitFailure:
case kExitSuccess:
return true;
}
VIXL_UNREACHABLE();
return true;
}
int GetExitCode() const {
switch (status_) {
case kExitFailure:
return EXIT_FAILURE;
case kExitSuccess:
case kRunBenchmark:
return EXIT_SUCCESS;
}
VIXL_UNREACHABLE();
return EXIT_FAILURE;
}
uint32_t GetRunTimeInSeconds() const { return run_time_; }
private:
static const uint32_t kDefaultRunTime = 5;
uint32_t run_time_;
enum { kRunBenchmark, kExitSuccess, kExitFailure } status_;
};
// Generate random, but valid (and simulatable) instruction sequences.
//
// The effect of the generated code is meaningless, but not harmful. That is,
// it will not abort, callee-saved registers are properly preserved and so on.
// It is possible to call it as a `void fn(void)` function.
class BenchCodeGenerator {
public:
explicit BenchCodeGenerator(vixl::aarch64::MacroAssembler* masm)
: masm_(masm), rnd_(0), rnd_bits_(0), call_depth_(0) {
// Arbitrarily initialise rand_state_ using the behaviour of srand48(42).
rand_state_[2] = 0;
rand_state_[1] = 42;
rand_state_[0] = 0x330e;
}
void Generate(size_t min_size_in_bytes);
private:
void GeneratePrologue();
void GenerateEpilogue();
// Arbitrarily pick one of the other Generate*Sequence() functions.
// TODO: Consider allowing this to be biased, so that a benchmark can focus on
// a subset of sequences.
void GenerateArbitrarySequence();
// Instructions with a trivial pass-through to Emit().
void GenerateTrivialSequence();
// Instructions using the Operand and MemOperand abstractions. These have a
// run-time cost, and many common VIXL APIs use them.
void GenerateOperandSequence();
void GenerateMemOperandSequence();
// Generate instructions taking immediates that require analysis (and may
// result in multiple instructions per macro).
void GenerateImmediateSequence();
// Immediate-offset and register branches. This also (necessarily) covers adr.
void GenerateBranchSequence();
// Generate nested, conventional (blr+ret) calls.
void GenerateCallReturnSequence();
void GenerateFPSequence();
void GenerateNEONSequence();
// To exercise veneer pools, GenerateBranchSequence links labels that are
// expected to be bound later. This helper binds them.
// The Nth youngest label is bound if bit <N> is set in `bind_mask`. That
// means that this helper can bind at most 64 pending labels.
void BindPendingLabels(uint64_t bind_mask);
// As above, but unconditionally bind all pending labels (even if there are
// more than 64 of them).
void BindAllPendingLabels();
// Argument selection helpers. These only return caller-saved registers.
uint64_t GetRandomBits(int bits);
bool PickBool() { return GetRandomBits(1) != 0; }
unsigned PickRSize();
unsigned PickFPSize();
vixl::aarch64::Register PickR(unsigned size_in_bits);
vixl::aarch64::VRegister PickV(
unsigned size_in_bits = vixl::aarch64::kQRegSize);
vixl::aarch64::Register PickW() { return PickR(vixl::aarch64::kWRegSize); }
vixl::aarch64::Register PickX() { return PickR(vixl::aarch64::kXRegSize); }
vixl::aarch64::VRegister PickH() { return PickV(vixl::aarch64::kHRegSize); }
vixl::aarch64::VRegister PickS() { return PickV(vixl::aarch64::kSRegSize); }
vixl::aarch64::VRegister PickD() { return PickV(vixl::aarch64::kDRegSize); }
vixl::aarch64::MacroAssembler* masm_;
// State for *rand48(), used to randomise code generation.
unsigned short rand_state_[3]; // NOLINT(runtime/int)
uint32_t rnd_;
int rnd_bits_;
// The generator can produce nested calls. The probability of it doing this is
// influenced by the current call depth, so we have to track it here.
int call_depth_;
struct LabelPair {
// We can't copy labels, so we have to allocate them dynamically to store
// them in a std::list.
vixl::aarch64::Label* target;
vixl::aarch64::Label* cont;
};
std::list<LabelPair> labels_;
// Some sequences need a scratch area. Space for this is allocated on the
// stack, and stored in this register.
static const vixl::aarch64::Register scratch;
};
#endif // VIXL_AARCH64_BENCH_UTILS_H_

View File

@@ -1,239 +0,0 @@
Getting Started with VIXL for AArch32
=====================================
This guide will show you how to use the VIXL framework for AArch32. We will see
how to set up the VIXL assembler and generate some code. We will also go into
details on a few useful features provided by VIXL and see how to run the
generated code.
The source code of the example developed in this guide can be found in the
`examples/aarch32` directory (`examples/aarch32/getting-started.cc`).
Creating the macro assembler.
-----------------------------
First of all you need to make sure that the header files for the assembler are
included. You should have the following lines at the beginning of your source
file:
// You may use <cstdint> if using C++11 or later.
extern "C" {
#include <stdint.h>
}
#include <cstdio>
#include <string>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
In our case, those files are included by "examples.h".
All VIXL components are declared in the `vixl::aarch32` namespace, so let's add
this to the beginning of the file for convenience (once again, done in
"examples.h"):
using namespace vixl::aarch32;
Now we are ready to create and initialise the different components.
First of all we need to create a macro assembler object.
MacroAssembler masm;
Generating some code.
---------------------
We are now ready to generate some code. The macro assembler provides methods
for all the instructions that you can use. As it's a macro assembler,
the instructions that you tell it to generate may not directly map to a single
hardware instruction. Instead, it can produce a short sequence of instructions
that has the same effect.
Before looking at how to generate some code, let's introduce a simple but handy
macro:
#define __ masm->
It allows us to write `__ Mov(r0, 42);` instead of `masm->Mov(r0, 42);` to
generate code.
Now we are going to write a C++ function to generate our first assembly
code fragment.
void GenerateDemo(MacroAssembler *masm) {
__ Ldr(r1, 0x12345678);
__ And(r0, r0, r1);
__ Bx(lr);
}
The generated code corresponds to a function with the following C prototype:
uint32_t demo(uint32_t x);
This function doesn't perform any useful operation. It loads the value
0x12345678 into r1 and performs a bitwise `and` operation with
the function's argument (stored in r0). The result of this `and` operation
is returned by the function in r0.
Now in our program main function, we only need to create a label to represent
the entry point of the assembly function and to call `GenerateDemo` to
generate the code.
Label demo;
masm.Bind(&demo);
GenerateDemo(&masm);
masm.Finalize();
Now we are going to learn a bit more on a couple of interesting VIXL features
which are used in this example.
### Label
VIXL's assembler provides a mechanism to represent labels with `Label` objects.
They are easy to use: simply create the C++ object and bind it to a location in
the generated instruction stream.
Creating a label is easy, since you only need to define the variable and bind it
to a location using the macro assembler.
Label my_label; // Create the label object.
__ Bind(&my_label); // Bind it to the current location.
The target of a branch using a label will be the address to which it has been
bound. For example, let's consider the following code fragment:
Label foo;
__ B(&foo); // Branch to foo.
__ Mov(r0, 42);
__ Bind(&foo); // Actual address of foo is here.
__ Mov(r1, 0xc001);
If we run this code fragment the `Mov(r0, 42)` will never be executed since
the first thing this code does is to jump to `foo`, which correspond to the
`Mov(r1, 0xc001)` instruction.
When working with labels you need to know that they are only to be used for
local branches, and should be passed around with care. The major reason is
that they cannot safely be passed or returned by value because this can trigger
multiple constructor and destructor calls. The destructor has assertions
to check that we don't try to branch to a label that hasn't been bound.
### Literal Pool
On AArch32 instructions are 16 or 32 bits long, thus immediate values encoded in
the instructions have limited size. If you want to load a constant bigger than
this limit you have two possibilities:
1. Use multiple instructions to load the constant in multiple steps. This
solution is already handled in VIXL. For instance you can write:
`__ Mov(r0, 0x12345678);`
The previous instruction would not be legal since the immediate value is too
big. However, VIXL's macro assembler will automatically rewrite this line into
multiple instructions efficiently generate the value, ultimately setting 'r0'
with the correct value.
2. Store the constant in memory and load this value from the memory. The value
needs to be written near the code that will load it since we use a PC-relative
offset to indicate the address of this value. This solution has the advantage
of making the value easily modifiable at run-time; since it does not reside
in the instruction stream, it doesn't require cache maintenance when updated.
VIXL also provides a way to do this:
`__ Ldr(r0, 0x12345678);`
The assembler will store the immediate value in a "literal pool", a set of
constants embedded in the code. VIXL will emit the literal pool when needed.
The literal pool is emitted regularly, such that they are within range of the
instructions that refer to it. However, you can force the literal pool to be
emitted using `masm.EmitLiteralPool()`. It generates a branch to skip the
pool.
Running the code.
-----------------
We first need to run a few operations to get executable code. The
`ExecutableMemory` helper takes care of it:
byte* code = masm.GetBuffer().GetBuffer();
uint32_t code_size = masm.GetBuffer().GetSizeInBytes();
ExecutableMemory memory(code, code_size);
Then we compute a pointer to the function we just generated and copy:
uint32_t (*demo_function)(uint32_t) =
memory.GetOffsetAddress<uint32_t (*)(uint32_t)>(0);
Now, we can call this function pointer exactly as if it were a pointer on a C
function:
uint32_t input_value = 0x89abcdef;
uint32_t output_value = (*demo_function)(input_value);
A little trace:
printf("native: abs(%08x) = %08x\n", input_value, output_value);
The example shown in this tutorial is very simple, because the goal was to
demonstrate the basics of the VIXL framework. There are more complex code
examples in the VIXL `examples/aarch32` directory showing more features of both the
macro assembler and the AArch32 architecture.
Disassembling the generated code.
---------------------------------
Once you have generated something with the macro-assembler, you may want to
disassemble it.
First, you must include iostream.
#include <iostream>
And the disassembler header file:
#include "aarch32/disasm-aarch32.h"
Then you have to define the pc used to disassemble (the one which is used to
display the addresses not the location of the instructions):
uint32_t display_pc = 0x1000;
Or, if you running on a 32 bit host, you can use the real address:
uint32_t display_pc = static_cast<uintptr_t>(masm.GetBuffer().GetBuffer());
Then you can disassemble the macro assembler's buffer:
PrintDisassembler disasm(std::cout, display_pc);
disasm.DisassembleA32Buffer(
masm.GetBuffer().GetOffsetAddress<uint32_t*>(0), masm.GetCursorOffset());
If you generated T32 code instead of A32 code, you must use
DisassembleT32Buffer. Warning: if your buffer contains some data or contains
mixed T32 and A32 code, the result won't be accurate (everything will be
disassembled as T32 or A32 code).
Example of disassembly:
0x00001000 e30f0fff mov r0, #65535
0x00001004 e34f0fff movt r0, #65535
0x00001008 e3041567 mov r1, #17767
0x0000100c e3401123 movt r1, #291
0x00001010 e3a02000 mov r2, #0
0x00001014 e7c2001f bfc r0, #0, #3
0x00001018 e7d4081f bfc r0, #16, #5
0x0000101c e7c72011 bfi r2, r1, #0, #8
0x00001020 e7df2811 bfi r2, r1, #16, #16
0x00001024 e1000070 hlt 0

View File

@@ -1,207 +0,0 @@
Getting Started with VIXL AArch64
=================================
This guide will show you how to use the VIXL framework for AArch64. We will see
how to set up the VIXL assembler and generate some code. We will also go into
details on a few useful features provided by VIXL and see how to run the
generated code in the VIXL simulator.
The source code of the example developed in this guide can be found in the
`examples/aarch64` directory (`examples/aarch64/getting-started.cc`).
Creating the macro assembler and the simulator.
-----------------------------------------------
First of all you need to make sure that the header files for the assembler and
the simulator are included. You should have the following lines at the beginning
of your source file:
#include "aarch64/simulator-aarch64.h"
#include "aarch64/macro-assembler-aarch64.h"
All VIXL components are declared in the `vixl::aarch64` namespace, so let's add
this to the beginning of the file for convenience:
using namespace vixl::aarch64;
Creating a macro assembler is as simple as
MacroAssembler masm;
VIXL's assembler will generate some code at run-time, and this code needs to
be stored in a buffer. By default the assembler will automatically manage
the code buffer. However constructors are available that allow manual management
of the code buffer.
We also need to set up the simulator. The simulator uses a Decoder object to
read and decode the instructions from the code buffer. We need to create a
decoder and bind our simulator to this decoder.
Decoder decoder;
Simulator simulator(&decoder);
Generating some code.
---------------------
We are now ready to generate some code. The macro assembler provides methods
for all the instructions that you can use. As it's a macro assembler,
the instructions that you tell it to generate may not directly map to a single
hardware instruction. Instead, it can produce a short sequence of instructions
that has the same effect.
For instance, the hardware `add` instruction can only take a 12-bit immediate
optionally shifted by 12, but the macro assembler can generate one or more
instructions to handle any 64-bit immediate. For example, `Add(x0, x0, -1)`
will be turned into `Sub(x0, x0, 1)`.
Before looking at how to generate some code, let's introduce a simple but handy
macro:
#define __ masm->
It allows us to write `__ Mov(x0, 42);` instead of `masm->Mov(x0, 42);` to
generate code.
Now we are going to write a C++ function to generate our first assembly
code fragment.
void GenerateDemoFunction(MacroAssembler *masm) {
__ Ldr(x1, 0x1122334455667788);
__ And(x0, x0, x1);
__ Ret();
}
The generated code corresponds to a function with the following C prototype:
uint64_t demo_function(uint64_t x);
This function doesn't perform any useful operation. It loads the value
0x1122334455667788 into x1 and performs a bitwise `and` operation with
the function's argument (stored in x0). The result of this `and` operation
is returned by the function in x0.
Now in our program main function, we only need to create a label to represent
the entry point of the assembly function and to call `GenerateDemoFunction` to
generate the code.
Label demo_function;
masm.Bind(&demo_function);
GenerateDemoFunction(&masm);
masm.Finalize();
Now we are going to learn a bit more on a couple of interesting VIXL features
which are used in this example.
### Label
VIXL's assembler provides a mechanism to represent labels with `Label` objects.
They are easy to use: simply create the C++ object and bind it to a location in
the generated instruction stream.
Creating a label is easy, since you only need to define the variable and bind it
to a location using the macro assembler.
Label my_label; // Create the label object.
__ Bind(&my_label); // Bind it to the current location.
The target of a branch using a label will be the address to which it has been
bound. For example, let's consider the following code fragment:
Label foo;
__ B(&foo); // Branch to foo.
__ Mov(x0, 42);
__ Bind(&foo); // Actual address of foo is here.
__ Mov(x1, 0xc001);
If we run this code fragment the `Mov(x0, 42)` will never be executed since
the first thing this code does is to jump to `foo`, which correspond to the
`Mov(x1, 0xc001)` instruction.
When working with labels you need to know that they are only to be used for
local branches, and should be passed around with care. There are two reasons
for this:
- They can't safely be passed or returned by value because this can trigger
multiple constructor and destructor calls. The destructor has assertions
to check that we don't try to branch to a label that hasn't been bound.
- The `B` instruction does not branch to labels which are out of range of the
branch. The `B` instruction has a range of 2^28 bytes, but other variants
(such as conditional or `CBZ`-like branches) have smaller ranges. Confining
them to local ranges doesn't mean that we won't hit these limits, but it
makes the lifetime of the labels much shorter and eases the debugging of
these kinds of issues.
### Literal Pool
On ARMv8 instructions are 32 bits long, thus immediate values encoded in the
instructions have limited size. If you want to load a constant bigger than this
limit you have two possibilities:
1. Use multiple instructions to load the constant in multiple steps. This
solution is already handled in VIXL. For instance you can write:
`__ Mov(x0, 0x1122334455667788);`
The previous instruction would not be legal since the immediate value is too
big. However, VIXL's macro assembler will automatically rewrite this line into
multiple instructions to efficiently generate the value.
2. Store the constant in memory and load this value from the memory. The value
needs to be written near the code that will load it since we use a PC-relative
offset to indicate the address of this value. This solution has the advantage
of making the value easily modifiable at run-time; since it does not reside
in the instruction stream, it doesn't require cache maintenance when updated.
VIXL also provides a way to do this:
`__ Ldr(x0, 0x1122334455667788);`
The assembler will store the immediate value in a "literal pool", a set of
constants embedded in the code. VIXL will emit literal pools after natural
breaks in the control flow, such as unconditional branches or return
instructions.
Literal pools are emitted regularly, such that they are within range of the
instructions that refer to them. However, you can force a literal pool to be
emitted using `masm.EmitLiteralPool()`.
Running the code in the simulator.
----------------------------------
Now we are going to see how to use the simulator to run the code that we
generated previously.
Use the simulator to assign a value to the registers. Our previous code example
uses the register x0 as an input, so let's set the value of this register.
simulator.WriteXRegister(0, 0x8899aabbccddeeff);
Now we can jump to the "entry" label to execute the code:
simulator.RunFrom(entry.target());
When the execution is finished and the simulator returned, you can inspect
the value of the registers after the execution. For instance:
printf("x0 = %" PRIx64 "\n", simulator.ReadXRegister(0));
The example shown in this tutorial is very simple, because the goal was to
demonstrate the basics of the VIXL framework. There are more complex code
examples in the VIXL `examples/aarch64` directory showing more features of both the
macro assembler and the ARMv8 architecture.
Extras
------
In addition to this document and the [examples](/examples/aarch64), you can find
documentation and guides on various topics that may be helpful
[here](/doc/aarch64/topics/index.md).

View File

@@ -1,60 +0,0 @@
Extending the disassembler
==========================
The output of the disassembler can be extended and customized. This may be
useful for example to add comments and annotations to the disassembly, print
aliases for register names, or add an offset to disassembled addresses.
The general procedure to achieve this is to create a sub-class of
`Disassembler` and override the appropriate virtual functions.
The `Disassembler` class provides virtual methods that implement how specific
disassembly elements are printed. See
[src/aarch64/disasm-aarch64.h](/src/aarch64/disasm-aarch64.h) for details.
These include functions like:
virtual void AppendRegisterNameToOutput(const Instruction* instr,
const CPURegister& reg);
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
int64_t offset);
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr);
They can be overridden for example to use different register names and annotate
code addresses.
More complex modifications can be performed by overriding the visitor functions
of the disassembler. The VIXL `Decoder` uses a visitor pattern implementation,
so the `Disassembler` (as a sub-class of `DecoderVisitor`) must provide a
visitor function for each sub-type of instructions. The complete list of
visitors is defined by the macro `VISITOR_LIST` in
[src/aarch64/decoder-aarch64.h](/src/aarch64/decoder-aarch64.h).
The [/examples/custom-disassembler.h](/examples/custom-disassembler.h) and
[/examples/custom-disassembler.cc](/examples/custom-disassembler.cc) example
files show how the methods can be overridden to use different register names,
map code addresses, annotate code addresses, and add comments:
VIXL disasm 0x7fff04cb05e0: add x10, x16, x17
custom disasm -0x8: add x10, ip0, ip1 // add/sub to x10
VIXL disasm 0x7fff04cb05e4: cbz x10, #+0x28 (addr 0x7fff04cb060c)
custom disasm -0x4: cbz x10, #+0x28 (addr 0x24 ; label: somewhere)
VIXL disasm 0x7fff04cb05e8: add x11, x16, x17
custom disasm 0x0: add x11, ip0, ip1
VIXL disasm 0x7fff04cb05ec: add w5, w6, w30
custom disasm 0x4: add w5, w6, w30
VIXL disasm 0x7fff04cb05f0: tbz w10, #2, #-0x10 (addr 0x7fff04cb05e0)
custom disasm 0x8: tbz w10, #2, #-0x10 (addr -0x8)
One can refer to the implementation of visitor functions for the `Disassembler`
(in [src/aarch64/disasm-aarch64.cc](/src/aarch64/disasm-aarch64.cc)) or even
for the `Simulator`
(in [src/aarch64/simulator-aarch64.cc](/src/aarch64/simulator-aarch64.cc))
to see how to extract information from instructions.

View File

@@ -1,8 +0,0 @@
We will try to add documentation for topics that may be useful to VIXL users. If
you think of any topic that may be useful and is not listed here, please contact
us at <vixl@arm.com>.
You can also have a look at the ['getting started' page](../getting-started-aarch64.md).
* [Extending and customizing the disassembler](extending-the-disassembler.md)
* [Using VIM YouCompleteMe with VIXL](ycm.md)

View File

@@ -1,231 +0,0 @@
AArch64 Simulator state trace
=============================
The AArch64 Simulator can be configured to produce traces of instruction
execution, register contents, and memory accesses. The trace is designed to be
intuitive for human readers, but this document describes the format of the
trace, so that post-processing tools can confidently parse the output.
In VIXL's own test runner, the trace is controlled by the `--trace*` options.
Run `test-runner --help` for details.
Basic structure
---------------
Executed instructions show the address, the encoding of the instruction and the
disassembly (as produced by VIXL's Disassembler). For example:
0x00007fbe2a6a9044 d299d200 mov x0, #0xce90
The first field is the address of the instruction, with exactly 16 hexadecimal
characters and a leading 0x, and is followed by two spaces. The second field is
the instruction encoding, with exactly eight hexadecimal characters (and no
leading 0x). This is followed by two _tab_ characters, and the instruction
disassembly. The following regular expression can be used to capture each field:
(0x[0-9a-f]{16}) ([0-9a-f]{8})\t\t(.*)
Following each instruction are zero or more lines of state update. Most notably,
these represent the register state updates and memory accesses that occurred
during simulation of the instruction. All of these lines begin with a '#'
character, so that they can be easily identified, and filtered if necessary. For
example:
0x00007fd2221c907c 8b82200e add x14, x0, x2, asr #8
# x14: 0xfffedcba98765432
0x00007fd2221c9080 0b81200f add w15, w0, w1, asr #8
# w15: 0xff89abcd
Note that the Simulator uses these state update lines to describe its initial
state. As a result, there will be state trace output before the first simulated
instruction, and parsers need to be tolerant of this.
Note that padding white space is used liberally to keep values vertically
aligned throughout the trace (as shown with the write to `w15` in the example
above). Similarly, some compound values are split into parts using the C++14
literal separator (`'`) character. Refer to the "Memory accesses" section
(below) for examples.
Ordering
--------
VIXL guarantees that each instruction is printed before its associated state
trace.
State trace must be interpreted sequentially, line by line. VIXL avoids updating
the same register more than once (because it makes the trace hard for humans to
read), but this can occur in some situations, and should be supported by
parsers.
The state is intended to be consistent with architectural execution at the start
of each instruction and at the end of the whole trace, but no such guarantees
are made about the traced state _between_ instructions. VIXL prioritises
human-readability when choosing the ordering of state updates.
If simulated registers are modified externally, for example using
`WriteRegister` from C++ code, their state will (by default) be logged
immediately. In the full trace, it will appear as though the (runtime) call or
return instruction modified the state. This is consistent with the guarantees
above, but it can result in single instructions appearing to generate a large
number of state updates.
There is no upper limit on the number of state update lines that any one
instruction can generate.
Whole register trace
--------------------
The simplest form of state trace has the form "`REG: VALUE`", meaning that
the register `REG` has the specified value, and any high-order bits in aliased
registers are set to zero.
0x00007fd2221c907c 8b82200e add x14, x0, x2, asr #8
# x14: 0xfffedcba98765432
Note that to correctly track state, parsers need to be aware of architectural
register aliasing rules. Also, VIXL uses some standard register aliases, such as
`lr` (`x30`). To avoid misinterpreting a register alias (and thereby potentially
missing an aliased register update), some tools may need to treat an
unrecognised register name as an error.
This trace format attempts to represent _architectural_ register writes.
However, this is not strictly checked or enforced.
`VALUE` is always shown in hexadecimal (raw bits) form, with a leading `0x` and
enough digits to exactly fill `REG`. `VALUE` may also include annotations (for
example to show FP arithmetic values) in parentheses. These annotations are for
the benefit of human readers, and parsers may ignore them.
Note that SVE registers _always_ use the partial register trace format,
described below, so a plain `z` or `p` register will never be used in a whole
register trace. This is true even if the vector length is configured to 16
bytes.
Partial register trace
----------------------
Sometimes, VIXL needs to show _part_ of a register without implying that the
rest of the register is zeroed. A partial register value is indicated by a bit
range in angled brackets after the register name: "`REG<MSB:LSB>: VALUE`".
This format is used for stores, for example.
SVE register updates are split across multiple lines, and therefore always use
the partial register trace format. For example (with a 384-bit VL):
0x00007fb1978da044 04214000 index z0.b, #0, #1
# z0<383:256>: 0x2f2e2d2c2b2a29282726252423222120
# z0<255:128>: 0x1f1e1d1c1b1a19181716151413121110
# z0<127:0>: 0x0f0e0d0c0b0a09080706050403020100
Note that VIXL will omit whole lines where they are unnecessary, for example if
they have no active (predicated) lanes. Parsers should not assume that every
part of a register will appear in such cases.
The `VALUE` has the same format as in the whole register trace, except in the
case of SVE `p` registers (as described below).
SVE `p` registers
-----------------
For `p` registers, we try to keep the lanes vertically aligned with the
corresponding parts of the `z` registers that they affect. To do this, we use a
binary format, with a leading `0b`, and spaces between each digit. For example:
0x00007f66e539b0b8 04f54607 index z7.d, x16, #-11
# z7<127:0>: 0x00000000000000150000000000000020
0x00007f66e539b0bc 25d8e3a7 ptrue p7.d, all
# p7<15:0>: 0b 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1
Memory accesses
---------------
The pattern for a memory access is "`VALUE OP ADDRESS`", where:
- `VALUE` is a hexadecimal value, with visual separators (') between
structure components,
- `OP` is `"->"` for a store, or `"<-"` for a load,
- `ADDRESS` is the (hexadecimal) address of the access.
Accesses shown in this style are always contiguous, and with little-endian
semantics. However, a given instruction might have multiple lines of memory
access trace, particularly if the instruction performs non-contiguous accesses.
In the case of simple accesses, the `VALUE` is shared with register value trace:
0x00007f3835372058 e400e401 st1b { z1.b }, p1, [x0]
# z1<127:0>: 0xd4d7dadde0e3e6e9eceff2f5f8fbfe01 -> 0x000055d170298e90
Sign-extending loads show the whole resulting register value, with the (smaller)
access represented on a separate line. This makes the (differing) values in the
register and in memory unambiguous, without parsers needing to understand the
instruction set:
0x00007f47922d0068 79800306 ldrsh x6, [x24]
# x6: 0xffffffffffff8080
# ╙─ 0x8080 <- 0x00007fffbc197708
Some instructions access several different memory locations. In these cases,
each access is given its own line, with the highest lane index first so that
(for contiguous accesses) the lowest address ends up at the bottom:
0x00007fa6001e9060 e4217c0a st2b { z10.b, z11.b }, p7, [x0, x1]
# z10<127:0>: 0x0f0e0d0c0b0a09080706050403020100
# z11<127:0>: 0x1f1e1d1c1b1a19181716151413121110
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─ 0x10'00 -> 0x00007ffe485d2f90
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─── 0x11'01 -> 0x00007ffe485d2f92
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───── 0x12'02 -> 0x00007ffe485d2f94
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─────── 0x13'03 -> 0x00007ffe485d2f96
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───────── 0x14'04 -> 0x00007ffe485d2f98
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─────────── 0x15'05 -> 0x00007ffe485d2f9a
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───────────── 0x16'06 -> 0x00007ffe485d2f9c
# ║ ║ ║ ║ ║ ║ ║ ║ ╙─────────────── 0x17'07 -> 0x00007ffe485d2f9e
# ║ ║ ║ ║ ║ ║ ║ ╙───────────────── 0x18'08 -> 0x00007ffe485d2fa0
# ║ ║ ║ ║ ║ ║ ╙─────────────────── 0x19'09 -> 0x00007ffe485d2fa2
# ║ ║ ║ ║ ║ ╙───────────────────── 0x1a'0a -> 0x00007ffe485d2fa4
# ║ ║ ║ ║ ╙─────────────────────── 0x1b'0b -> 0x00007ffe485d2fa6
# ║ ║ ║ ╙───────────────────────── 0x1c'0c -> 0x00007ffe485d2fa8
# ║ ║ ╙─────────────────────────── 0x1d'0d -> 0x00007ffe485d2faa
# ║ ╙───────────────────────────── 0x1e'0e -> 0x00007ffe485d2fac
# ╙─────────────────────────────── 0x1f'0f -> 0x00007ffe485d2fae
The line-drawing characters are encoded as UTF-8 (as is this document). There is
currently no locale handling in VIXL, so this is not configurable. However,
since these annotations are for the benefit of human readers, parsers can safely
ignore them, and treat the whole trace as an ASCII byte stream (ignoring 8-bit
characters). This is useful in situations where UTF-8 handling carries an
unacceptable performance cost.
In the future, VIXL may offer an option to avoid printing these annotations, so
that the trace is restricted to single-byte characters.
Floating-point value annotations
--------------------------------
Some floating-point operations produce register trace that annotates the raw
values with the corresponding FP arithmetic values. This is for the benefit of
human readers (and has limited precision). Such annotations follow the `VALUE`
in parentheses.
Scalar form:
# s1: 0x3f800000 (1.000) <- 0x00007ffdc64d2314
Vector form, updating all S lanes using a load:
# v16: 0x1211100f0e0d0c0b0a09080706050403 (4.577e-28, 1.739e-30, 6.598e-33, 2.502e-35)
# ╙─ 0x06050403 <- 0x00007ffe56fd7863
# ╙───────── 0x0a090807 <- 0x00007ffe56fd7867
# ╙───────────────── 0x0e0d0c0b <- 0x00007ffe56fd786b
# ╙───────────────────────── 0x1211100f <- 0x00007ffe56fd786f
Vector form, updating a single S lane using a load:
# v2: 0x03020100040302017ff0f0027f80f000 (..., 1.540e-36, ...)
# ╙───────────────── 0x04030201 <- 0x00007ffc7b2e3ca1
Vector form, replicating a single struct load to all S lanes:
# v15: 0x100f0e0d100f0e0d100f0e0d100f0e0d (2.821e-29, 2.821e-29, 2.821e-29, 2.821e-29)
# v16: 0x14131211141312111413121114131211 (7.425e-27, 7.425e-27, 7.425e-27, 7.425e-27)
# v17: 0x18171615181716151817161518171615 (1.953e-24, 1.953e-24, 1.953e-24, 1.953e-24)
# ╙───────╨───────╨───────╨─ 0x18171615'14131211'100f0e0d <- 0x00007ffdd64d847d

View File

@@ -1,9 +0,0 @@
VIM YouCompleteMe for VIXL
==========================
[YouCompleteMe](https://github.com/Valloric/YouCompleteMe) is a code completion
engine for VIM. VIXL includes a `.ycm_extra_conf.py` to configure YCM to work in
the VIXL repository.
All you need to do to get things working is to [install YCM](https://github.com/Valloric/YouCompleteMe#full-installation-guide),
preferably with semantic completion for C-family languages.

View File

@@ -1,124 +0,0 @@
VIXL Change Log
===============
* 1.13
+ Improve code formatting and add tests using clang-format.
+ Fix bugs in disassembly of unallocated instruction encodings.
+ Fix some execution trace bugs, and add tests.
+ Other small bug fixes and improvements.
* 1.12
+ Bug fixes for toolchain compatibility.
* 1.11
+ Fix bug in simulation of add with carry.
+ Fix use-after-free bug in Literal handling.
+ Build system updates for Android.
+ Add option to run test.py under Valgrind.
+ Other small bug fixes and improvements.
* 1.10
+ Improved support for externally managed literals.
+ Reworked build and test infrastructure.
+ Other small bug fixes and improvements.
* 1.9
+ Improved compatibility with Android build system.
+ Improved compatibility with Clang toolchain.
+ Added support for `umulh` instruction.
+ Added support for `fcmpe` and `fccmpe` instructions.
+ Other small bug fixes and improvements.
* 1.8
+ Complete NEON instruction set support.
+ Support long branches using veneers.
+ Improved handling of literal pools.
+ Support some `ic` and `dc` cache op instructions.
+ Support CRC32 instructions.
+ Support half-precision floating point instructions.
+ MacroAssembler support for `bfm`, `ubfm` and `sbfm`.
+ Other small bug fixes and improvements.
* 1.7
+ Added support for `prfm` prefetch instructions.
+ Added support for all `frint` instruction variants.
+ Add support for disassembling as an offset from a given address.
+ Fixed the disassembly of `movz` and `movn`.
+ Provide static helpers for immediate generation.
+ Provide helpers to create CPURegList from list unions or intersections.
+ Improved register value tracing.
+ Multithreading test fixes.
+ Other small bug fixes and build system improvements.
* 1.6
+ Make literal pool management the responsibility of the macro assembler.
+ Move code buffer management out of the Assembler.
+ Support `ldrsw` for literals.
+ Support binding a label to a specific offset.
+ Add macro assembler support for load/store pair with arbitrary offset.
+ Support Peek and Poke for CPURegLists.
+ Fix disassembly of branch targets.
+ Fix Decoder visitor insertion order.
+ Separate Decoder visitors into const and non-const variants.
+ Fix simulator for branches to tagged addresses.
+ Add a VIM YouCompleteMe configuration file.
+ Other small bug fixes and build system improvements.
* 1.5
+ Tagged pointer support.
+ Implement support for exclusive access instructions.
+ Implement support for `adrp` instruction.
+ Faster code for logical immediate identification.
+ Generate better code for immediates passed to shift-capable instructions.
+ Allow explicit use of unscaled-offset loads and stores.
+ Build and test infrastructure improvements.
+ Corrected computation of cache line size.
+ Fix simulation of `extr` instruction.
+ Fixed a bug when moving kWMinInt to a register.
+ Other small bug fixes.
* 1.4
+ Added support for `frintm`.
+ Fixed simulation of `frintn` and `frinta` for corner cases.
+ Added more tests for floating point instruction simulation.
+ Modified `CalleeSave()` and `CalleeRestore()` to push general purpose
registers before floating point registers on the stack.
+ Fixed Printf for mixed argument types, and use on real hardware.
+ Improved compatibility with some 32-bit compilers.
* 1.3
+ Address inaccuracies in the simulated floating point instructions.
+ Implement Default-NaN floating point mode.
+ Introduce `UseScratchRegisterScope` for controlling the use of temporary
registers.
+ Enable building VIXL on 32-bit hosts.
+ Other small bug fixes and improvements.
* 1.2
+ Added support for `fmadd`, `fnmadd`, `fnmsub`, `fminnm`, `fmaxnm`,
`frinta`, `fcvtau` and `fcvtas`.
+ Added support for assembling and disassembling `isb`, `dsb` and `dmb`.
+ Added support for automatic inversion of compare instructions when using
negative immediates.
+ Added support for using `movn` when generating immediates.
+ Added explicit flag-setting 'S' instructions, and removed
`SetFlags` and `LeaveFlags` arguments.
+ Added support for `Movk` in macro assembler.
+ Added support for W register parameters to `Tbz` and `Tbnz`.
+ Added support for using immediate operands with `Csel`.
+ Added new debugger syntax for memory inspection.
+ Fixed `smull`, `fmsub` and `sdiv` simulation.
+ Fixed sign extension for W->X conversions using `sxtb`, `sxth` and `sxtw`.
+ Prevented code generation for certain side-effect free operations,
such as `add r, r, #0`, in the macro assembler.
+ Other small bug fixes.
* 1.1
+ Improved robustness of instruction decoder and disassembler.
+ Added support for double-to-float conversions using `fcvt`.
+ Added support for more fixed-point to floating-point conversions (`ucvtf`
and `scvtf`).
+ Added instruction statistics collection class `instrument-a64.cc`.
* 1.0
+ Initial release.

View File

@@ -1,71 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch32;
#define __ masm->
void GenerateAbs(MacroAssembler* masm) {
// int32_t abs(int32_t x)
// Argument location:
// x -> r0
__ Cmp(r0, 0);
// If r0 is negative, negate r0.
__ Rsb(mi, r0, r0, 0);
__ Bx(lr);
}
#ifndef TEST_EXAMPLES
int main() {
MacroAssembler masm(A32);
// Generate the code for the example function.
Label abs;
masm.Bind(&abs);
GenerateAbs(&masm);
masm.FinalizeCode();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// There is no simulator defined for VIXL AArch32.
printf("This example cannot be simulated\n");
#else
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
uint32_t code_size = masm.GetSizeOfCodeGenerated();
ExecutableMemory memory(code, code_size);
// Run the example function.
int32_t (*abs_function)(int32_t) =
memory.GetEntryPoint<int32_t (*)(int32_t)>(abs,
masm.GetInstructionSetInUse());
int32_t input_value = -42;
int32_t output_value = (*abs_function)(input_value);
printf("native: abs(%d) = %d\n", input_value, output_value);
#endif
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,151 +0,0 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <iostream>
#include <map>
#include <string>
#include "aarch32/constants-aarch32.h"
#include "aarch32/disasm-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
#define __ masm.
namespace vixl {
namespace aarch32 {
// Example of disassembly customization.
class CustomStream : public Disassembler::DisassemblerStream {
std::map<Location::Offset, const char*> symbols_;
public:
CustomStream() : Disassembler::DisassemblerStream(std::cout) {}
std::map<Location::Offset, const char*>& GetSymbols() { return symbols_; }
virtual DisassemblerStream& operator<<(const Disassembler::PrintLabel& label)
VIXL_OVERRIDE {
std::map<Location::Offset, const char*>::iterator symbol =
symbols_.find(label.GetLocation());
// If the label was named, print the name instead of the address.
if (symbol != symbols_.end()) {
os() << symbol->second;
return *this;
}
os() << label;
return *this;
}
virtual DisassemblerStream& operator<<(const Register reg) VIXL_OVERRIDE {
// Print all the core registers with an upper-case letter instead of the
// default lower-case.
os() << "R" << reg.GetCode();
return *this;
}
};
class CustomDisassembler : public PrintDisassembler {
public:
explicit CustomDisassembler(CustomStream* stream)
: PrintDisassembler(stream) {}
CustomStream* GetStream() const {
return reinterpret_cast<CustomStream*>(&os());
}
virtual void PrintCodeAddress(uint32_t pc) VIXL_OVERRIDE {
// If the address matches a label, then print the label. Otherwise, print
// nothing.
std::map<Location::Offset, const char*>::iterator symbol =
GetStream()->GetSymbols().find(pc);
if (symbol != GetStream()->GetSymbols().end()) {
os().os() << symbol->second << ":" << std::endl;
}
// Add indentation for instructions.
os() << " ";
}
virtual void PrintOpcode16(uint32_t opcode) VIXL_OVERRIDE {
// Do not print opcodes.
USE(opcode);
}
virtual void PrintOpcode32(uint32_t opcode) VIXL_OVERRIDE {
// Do not print opcodes.
USE(opcode);
}
};
class NamedLabel : public Label {
CustomStream* stream_;
const char* name_;
public:
NamedLabel(CustomStream* stream, const char* name)
: stream_(stream), name_(name) {}
~NamedLabel() {
if (IsBound()) {
stream_->GetSymbols().insert(
std::pair<Location::Offset, const char*>(GetLocation(), name_));
}
}
};
void RunCustomDisassemblerTest() {
CustomStream stream;
MacroAssembler masm;
{
NamedLabel loop(&stream, "loop");
NamedLabel end(&stream, "end");
__ Mov(r0, 0);
__ Mov(r1, 0);
__ Bind(&loop);
__ Cmp(r1, 20);
__ B(gt, &end);
__ Add(r0, r0, r1);
__ Add(r1, r1, 1);
__ B(&loop);
__ Bind(&end);
__ Bx(lr);
__ FinalizeCode();
}
std::cout << "Custom disassembly:" << std::endl;
CustomDisassembler custom_disassembler(&stream);
custom_disassembler
.DisassembleA32Buffer(masm.GetBuffer()->GetOffsetAddress<uint32_t*>(0),
masm.GetBuffer()->GetSizeInBytes());
std::cout << std::endl;
std::cout << "Standard disassembly:" << std::endl;
PrintDisassembler print_disassembler(std::cout);
print_disassembler
.DisassembleA32Buffer(masm.GetBuffer()->GetOffsetAddress<uint32_t*>(0),
masm.GetBuffer()->GetSizeInBytes());
}
} // namespace aarch32
} // namespace vixl
#ifndef TEST_EXAMPLES
int main() {
vixl::aarch32::RunCustomDisassemblerTest();
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,325 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The example assumes support for ELF binaries.
#ifdef __linux__
extern "C" {
#include <elf.h>
#include <fcntl.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
}
#include <cerrno>
#include <iostream>
#include <map>
#include <string>
#include "globals-vixl.h"
#include "aarch32/disasm-aarch32.h"
#include "aarch32/instructions-aarch32.h"
class Symbol {
Elf32_Addr addr_;
int32_t offset_;
uint32_t size_;
int section_;
std::string name_;
public:
Symbol(const char* name,
Elf32_Addr addr,
int32_t offset,
uint32_t size,
int section)
: addr_(addr),
offset_(offset),
size_(size),
section_(section),
name_(name) {}
Symbol(const Symbol& ref)
: addr_(ref.addr_),
offset_(ref.offset_),
size_(ref.size_),
section_(ref.section_),
name_(ref.name_) {}
Elf32_Addr GetAddress() const { return addr_; }
Elf32_Addr GetMemoryAddress() const { return (addr_ & ~1) + offset_; }
uint32_t GetSize() const { return size_; }
const std::string& GetName() const { return name_; }
int GetSection() const { return section_; }
};
class SymbolTable : public std::map<Elf32_Addr, Symbol> {
public:
void insert(const Symbol& sym) {
VIXL_ASSERT(find(sym.GetAddress()) == end());
std::map<Elf32_Addr, Symbol>::insert(
std::make_pair(sym.GetMemoryAddress(), sym));
}
};
class SectionLocator {
const Elf32_Shdr* shdr_;
int nsections_;
const char* shstrtab_;
public:
explicit SectionLocator(const Elf32_Ehdr* ehdr) {
shdr_ = reinterpret_cast<const Elf32_Shdr*>(
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff);
// shstrtab holds the section names as an offset in the file.
shstrtab_ =
reinterpret_cast<const char*>(ehdr) + shdr_[ehdr->e_shstrndx].sh_offset;
nsections_ = ehdr->e_shnum;
}
const Elf32_Shdr* Locate(Elf32_Word type,
const std::string& section_name) const {
for (int shnum = 1; shnum < nsections_; shnum++) {
if ((shdr_[shnum].sh_type == type) &&
std::string(shstrtab_ + shdr_[shnum].sh_name) == section_name) {
return &shdr_[shnum];
}
}
return NULL;
}
};
template <typename VISITOR>
void LocateSymbols(const Elf32_Ehdr* ehdr,
const Elf32_Shdr* symtab,
const Elf32_Shdr* strtab,
VISITOR* visitor) {
if ((symtab != NULL) && (strtab != NULL)) {
const Elf32_Shdr* shdr = reinterpret_cast<const Elf32_Shdr*>(
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff);
const char* symnames =
reinterpret_cast<const char*>(ehdr) + strtab->sh_offset;
VIXL_CHECK(symnames != NULL);
int nsym = symtab->sh_size / symtab->sh_entsize;
const Elf32_Sym* sym = reinterpret_cast<const Elf32_Sym*>(
reinterpret_cast<const char*>(ehdr) + symtab->sh_offset);
for (int snum = 0; snum < nsym; snum++) {
if ((sym[snum].st_shndx > 0) && (sym[snum].st_shndx < ehdr->e_shnum) &&
(sym[snum].st_value != 0) &&
(shdr[sym[snum].st_shndx].sh_type == SHT_PROGBITS) &&
((ELF32_ST_BIND(sym[snum].st_info) == STB_LOCAL) ||
(ELF32_ST_BIND(sym[snum].st_info) == STB_GLOBAL)) &&
(ELF32_ST_TYPE(sym[snum].st_info) == STT_FUNC)) {
visitor->visit(symnames + sym[snum].st_name, sym[snum]);
}
}
}
}
class DynamicSymbolVisitor {
SymbolTable* symbols_;
public:
explicit DynamicSymbolVisitor(SymbolTable* symbols) : symbols_(symbols) {}
void visit(const char* symname, const Elf32_Sym& sym) {
symbols_->insert(
Symbol(symname, sym.st_value, 0, sym.st_size, sym.st_shndx));
}
};
class StaticSymbolVisitor {
const Elf32_Ehdr* ehdr_;
const Elf32_Shdr* shdr_;
SymbolTable* symbols_;
public:
StaticSymbolVisitor(const Elf32_Ehdr* ehdr, SymbolTable* symbols)
: ehdr_(ehdr),
shdr_(reinterpret_cast<const Elf32_Shdr*>(
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff)),
symbols_(symbols) {}
void visit(const char* symname, const Elf32_Sym& sym) {
if (ehdr_->e_type == ET_REL) {
symbols_->insert(Symbol(symname,
sym.st_value,
shdr_[sym.st_shndx].sh_offset,
sym.st_size,
sym.st_shndx));
} else {
symbols_->insert(
Symbol(symname,
sym.st_value,
shdr_[sym.st_shndx].sh_offset - shdr_[sym.st_shndx].sh_addr,
sym.st_size,
sym.st_shndx));
}
}
};
void usage() {
std::cout << "usage: disasm-a32 <file>\n"
"where <file> is an ELF ARM binaryfile, either an executable, "
"a shared object, or an object file."
<< std::endl;
}
int main(int argc, char** argv) {
const int kErrorNotARMELF32 = -1;
const int kErrorArguments = -2;
if (argc < 2) {
usage();
return kErrorArguments;
}
const char* filename = argv[1];
struct stat sb;
if (lstat(filename, &sb) == -1) {
std::cerr << "Cannot stat this file" << filename << std::endl;
return errno;
}
if (S_ISLNK(sb.st_mode)) {
static char linkname[4096];
filename = realpath(argv[1], linkname);
if (lstat(linkname, &sb) == -1) {
std::cerr << "Cannot stat this file: " << linkname << std::endl;
return errno;
}
}
int elf_in;
if ((elf_in = open(filename, O_RDONLY)) < 0) {
std::cerr << "Cannot open: " << argv[1];
if (filename != argv[1]) std::cerr << " aka " << filename;
std::cerr << std::endl;
return errno;
}
char* base_addr;
VIXL_CHECK((base_addr = reinterpret_cast<char*>(
mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, elf_in, 0))) !=
0);
const Elf32_Ehdr* ehdr = reinterpret_cast<const Elf32_Ehdr*>(base_addr);
if ((ehdr->e_ident[0] != 0x7f) || (ehdr->e_ident[1] != 'E') ||
(ehdr->e_ident[2] != 'L') || (ehdr->e_ident[3] != 'F') ||
(ehdr->e_ehsize != sizeof(Elf32_Ehdr))) {
std::cerr << "This file is not an 32-bit ELF file." << std::endl;
munmap(base_addr, sb.st_size);
return kErrorNotARMELF32;
}
if (ehdr->e_machine != EM_ARM) {
std::cerr << "This file is not using the ARM isa." << std::endl;
munmap(base_addr, sb.st_size);
return kErrorNotARMELF32;
}
// shstrtab holds the section names as an offset in the file.
const Elf32_Shdr* shdr =
reinterpret_cast<const Elf32_Shdr*>(base_addr + ehdr->e_shoff);
SectionLocator section_locator(ehdr);
SymbolTable symbol_names;
// Traverse the dynamic symbols defined in any text section
DynamicSymbolVisitor dynamic_visitor(&symbol_names);
LocateSymbols(ehdr,
section_locator.Locate(SHT_DYNSYM, ".dynsym"),
section_locator.Locate(SHT_STRTAB, ".dynstr"),
&dynamic_visitor);
// Traverse the static symbols defined in the any test section
StaticSymbolVisitor static_visitor(ehdr, &symbol_names);
LocateSymbols(ehdr,
section_locator.Locate(SHT_SYMTAB, ".symtab"),
section_locator.Locate(SHT_STRTAB, ".strtab"),
&static_visitor);
vixl::aarch32::PrintDisassembler dis(std::cout, 0);
for (SymbolTable::iterator sres = symbol_names.begin();
sres != symbol_names.end();
sres++) {
const Symbol& symbol = sres->second;
uint32_t func_addr = symbol.GetAddress();
uint32_t func_size = symbol.GetSize();
if (func_size == 0) {
SymbolTable::iterator next_func = sres;
next_func++;
if (next_func == symbol_names.end()) {
const Elf32_Shdr& shndx = shdr[sres->second.GetSection()];
func_size = (shndx.sh_offset + shndx.sh_size) - sres->first;
} else {
func_size = next_func->first - sres->first;
}
}
std::cout << "--- " << symbol.GetName() << ":" << std::endl;
if ((func_addr & 1) == 1) {
func_addr &= ~1;
dis.SetCodeAddress(func_addr);
dis.DisassembleT32Buffer(reinterpret_cast<uint16_t*>(
base_addr + symbol.GetMemoryAddress()),
func_size);
} else {
dis.SetCodeAddress(func_addr);
dis.DisassembleA32Buffer(reinterpret_cast<uint32_t*>(
base_addr + symbol.GetMemoryAddress()),
func_size);
}
}
munmap(base_addr, sb.st_size);
return 0;
}
#else
#include "globals-vixl.h"
// TODO: Implement this example for macOS.
int main(void) {
VIXL_WARNING("This example has not been implemented for macOS.");
return 0;
}
#endif // __linux__

View File

@@ -1,96 +0,0 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_EXAMPLE_EXAMPLES_H_
#define VIXL_EXAMPLE_EXAMPLES_H_
extern "C" {
#include <stdint.h>
#ifndef VIXL_INCLUDE_SIMULATOR_AARCH32
#include <sys/mman.h>
#endif
}
#include <cstdio>
#include <string>
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
#ifndef VIXL_INCLUDE_SIMULATOR_AARCH32
class ExecutableMemory {
public:
ExecutableMemory(const byte* code_start, size_t size)
: size_(size),
buffer_(reinterpret_cast<byte*>(mmap(NULL,
size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0))) {
VIXL_ASSERT(reinterpret_cast<intptr_t>(buffer_) != -1);
memcpy(buffer_, code_start, size_);
__builtin___clear_cache(buffer_, buffer_ + size_);
}
~ExecutableMemory() { munmap(buffer_, size_); }
template <typename T>
T GetEntryPoint(const Label& entry_point, InstructionSet isa) const {
int32_t location = entry_point.GetLocation();
if (isa == T32) location += 1;
return GetOffsetAddress<T>(location);
}
protected:
template <typename T>
T GetOffsetAddress(int32_t offset) const {
VIXL_ASSERT((offset >= 0) && (static_cast<size_t>(offset) <= size_));
T function_address;
byte* buffer_address = buffer_ + offset;
memcpy(&function_address, &buffer_address, sizeof(T));
return function_address;
}
private:
size_t size_;
byte* buffer_;
};
#endif
// Generate a function with the following prototype:
// int32_t abs(int32_t x)
//
// The generated function computes the absolute value of an integer.
void GenerateAbs(vixl::aarch32::MacroAssembler* masm);
// Generate a function with the following prototype:
// uint32_t demo_function(uint32_t x)
//
// This is the example used in doc/getting-started-aarch32.md
void GenerateDemo(vixl::aarch32::MacroAssembler* masm);
#endif // VIXL_EXAMPLE_EXAMPLES_H_

View File

@@ -1,73 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch32;
#define __ masm->
void GenerateDemo(MacroAssembler* masm) {
// uint32_t demo(uint32_t x)
// Load a constant in r1 using the literal pool.
__ Ldr(r1, 0x12345678);
__ And(r0, r0, r1);
__ Bx(lr);
}
#ifndef TEST_EXAMPLES
int main() {
MacroAssembler masm;
// Generate the code for the example function.
Label demo;
// Tell the macro assembler that the label "demo" refer to the current
// location in the buffer.
masm.Bind(&demo);
GenerateDemo(&masm);
// Ensure that everything is generated and that the generated buffer is
// ready to use.
masm.FinalizeCode();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// There is no simulator defined for VIXL AArch32.
#else
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
uint32_t code_size = masm.GetSizeOfCodeGenerated();
ExecutableMemory memory(code, code_size);
// Run the example function.
uint32_t (*demo_function)(uint32_t) =
memory
.GetEntryPoint<uint32_t (*)(uint32_t)>(demo,
masm.GetInstructionSetInUse());
uint32_t input_value = 0x89abcdef;
uint32_t output_value = (*demo_function)(input_value);
printf("native: demo(0x%08x) = 0x%08x\n", input_value, output_value);
#endif
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,222 +0,0 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch32;
#define __ masm->
void GenerateMandelBrot(MacroAssembler* masm) {
const QRegister kCReal = q0;
const QRegister kCImag = q1;
const QRegister kCRealStep = q13;
const QRegister kCImagStep = q14;
const QRegister kModSqLimit = q15;
// Save register values.
__ Push(RegisterList(r4, r5, r6));
__ Vmov(F32, kCRealStep, 0.125);
__ Vmov(F32, kCImagStep, 0.0625);
const Register kZero = r2;
__ Mov(kZero, 0);
const DRegister kStars = d6;
const DRegister kSpaces = d7;
// Output characters - packed 4 characters into 32 bits.
__ Vmov(I8, kStars, '*');
__ Vmov(I8, kSpaces, ' ');
const DRegisterLane kNegTwo = DRegisterLane(d7, 1);
__ Vmov(s15, -2.0);
// Imaginary part of c.
__ Vdup(Untyped32, kCImag, kNegTwo);
// Max modulus squared.
__ Vmov(F32, kModSqLimit, 4.0);
// Height of output in characters.
__ Mov(r4, 64);
// String length will be 129, so need 132 bytes of space.
const uint32_t kStringLength = 132;
// Make space for our string.
__ Sub(sp, sp, kStringLength);
// Set up a starting pointer for the string.
const Register kStringPtr = r6;
__ Mov(kStringPtr, sp);
// Loop over imaginary values of c from -2 to 2, taking
// 64 equally spaced values in the range.
{
Label c_imag_loop;
__ Bind(&c_imag_loop);
// Real part of c.
// Store 4 equally spaced values in q0 (kCReal) to use SIMD.
__ Vmov(s0, -2.0);
__ Vmov(s1, -1.96875);
__ Vmov(s2, -1.9375);
__ Vmov(s3, -1.90625);
// Width of output in terms of sets of 4 characters - twice that
// of height to compensate for ratio of character height to width.
__ Mov(r5, 32);
const Register kWriteCursor = r3;
// Set a cursor ready to write the next line.
__ Mov(kWriteCursor, kStringPtr);
// Loop over real values of c from -2 to 2, processing
// 4 different values simultaneously using SIMD.
{
const QRegister kFlags = q2;
const DRegister kLowerFlags = d4;
Label c_real_loop;
__ Bind(&c_real_loop);
// Get number of iterations.
__ Add(r1, r0, 1);
// Perform the iterations of z(n+1) = zn^2 + c using SIMD.
// If the result is that c is in the set, the element of
// kFlags will be 0, else ~0.
{
const QRegister kZReal = q8;
const QRegister kZImag = q9;
// Real part of z.
__ Vmov(F32, kZReal, 0.0);
// Imaginary part of z.
__ Vmov(F32, kZImag, 0.0);
__ Vmov(F32, kFlags, 0.0);
Label iterative_formula_start, iterative_formula_end;
__ Bind(&iterative_formula_start);
__ Subs(r1, r1, 1);
__ B(le, &iterative_formula_end);
// z(n+1) = zn^2 + c.
// re(z(n+1)) = re(c) + re(zn)^2 - im(zn)^2.
// im(z(n+1)) = im(c) + 2 * re(zn) * im(zn)
__ Vmul(F32, q10, kZReal, kZImag); // re(zn) * im(zn)
__ Vmul(F32, kZReal, kZReal, kZReal); // re(zn)^2
__ Vadd(F32, kZReal, kCReal, kZReal); // re(c) + re(zn)^2
__ Vmls(F32, kZReal, kZImag, kZImag); // re(c) + re(zn)^2 - im(zn)^2
__ Vmov(F32, kZImag, kCImag); // im(c)
__ Vmls(F32, kZImag, q10, kNegTwo); // im(c) + 2 * re(zn) * im(zn)
__ Vmul(F32, q10, kZReal, kZReal); // re(z(n+1))^2
__ Vmla(F32, q10, kZImag, kZImag); // re(z(n+1))^2 + im(z(n+1))^2
__ Vcgt(F32, q10, q10, kModSqLimit); // |z(n+1)|^2 > 4 ? ~0 : 0
__ Vorr(F32, kFlags, kFlags, q10); // (~0/0) | above result
__ B(&iterative_formula_start);
__ Bind(&iterative_formula_end);
}
// Narrow twice so that each mask is 8 bits, packed into
// a single 32 bit register s4.
// kLowerFlags is the lower half of kFlags, so the second narrow will
// be working on the results of the first to halve the size of each
// representation again.
__ Vmovn(I32, kLowerFlags, kFlags);
__ Vmovn(I16, kLowerFlags, kFlags);
// '*' if in set, ' ' if not.
__ Vbsl(Untyped32, kLowerFlags, kSpaces, kStars);
// Add this to the string.
__ Vst1(Untyped32,
NeonRegisterList(kLowerFlags, 0),
AlignedMemOperand(kWriteCursor, k32BitAlign, PostIndex));
// Increase real part of c.
__ Vadd(F32, kCReal, kCReal, kCRealStep);
__ Subs(r5, r5, 1);
__ B(ne, &c_real_loop);
}
// Put terminating character.
__ Strb(kZero, MemOperand(kWriteCursor));
// Print the string.
__ Printf("%s\n", kStringPtr);
// Increase imaginary part of c.
__ Vadd(F32, kCImag, kCImag, kCImagStep);
__ Subs(r4, r4, 1);
__ B(ne, &c_imag_loop);
}
// Restore stack pointer.
__ Add(sp, sp, kStringLength);
// Restore register values.
__ Pop(RegisterList(r4, r5, r6));
__ Bx(lr);
}
#ifndef TEST_EXAMPLES
int main() {
MacroAssembler masm;
// Generate the code for the example function.
Label mandelbrot;
masm.Bind(&mandelbrot);
GenerateMandelBrot(&masm);
masm.FinalizeCode();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// There is no simulator defined for VIXL AArch32.
printf("This example cannot be simulated\n");
#else
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
uint32_t code_size = masm.GetSizeOfCodeGenerated();
ExecutableMemory memory(code, code_size);
// Run the example function.
double (*mandelbrot_func)(uint32_t) =
memory.GetEntryPoint<double (*)(uint32_t)>(mandelbrot,
masm.GetInstructionSetInUse());
uint32_t iterations = 1000;
(*mandelbrot_func)(iterations);
#endif
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,110 +0,0 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch32;
#define __ masm->
void GenerateApproximatePi(MacroAssembler* masm) {
// double ApproximatePi(uint32_t iterations)
// Very rough approximation of pi
// pi/4 = 1 - 1/3 + 1/5 - 1/7 + ... + (-1)^n / (2n + 1)
__ Cmp(r0, 0);
__ Bx(eq, lr);
__ Vpush(Untyped64, DRegisterList(d8, 8));
__ Vldr(d0, 1.0);
__ Vldr(d1, 3.0);
__ Vldr(d2, 5.0);
__ Vldr(d3, 7.0);
__ Vmov(d4, 8.0);
__ Vmov(d5, 1.0);
__ Vmov(I64, d10, 0); // d10 = 0.0;
__ Vmov(I64, d11, 0); // d11 = 0.0;
__ Vmov(I64, d12, 0); // d12 = 0.0;
__ Vmov(I64, d13, 0); // d13 = 0.0
Label loop;
__ Bind(&loop);
__ Vdiv(F64, d6, d5, d0);
__ Vdiv(F64, d7, d5, d1);
__ Vdiv(F64, d8, d5, d2);
__ Vdiv(F64, d9, d5, d3);
__ Vadd(F64, d10, d10, d6);
__ Vadd(F64, d11, d11, d7);
__ Vadd(F64, d12, d12, d8);
__ Vadd(F64, d13, d13, d9);
__ Vadd(F64, d0, d0, d4);
__ Vadd(F64, d1, d1, d4);
__ Vadd(F64, d2, d2, d4);
__ Vadd(F64, d3, d3, d4);
__ Subs(r0, r0, 1);
__ B(ne, &loop);
__ Vmov(F64, d4, 4.0);
__ Vadd(F64, d10, d10, d12);
__ Vadd(F64, d11, d11, d13);
__ Vsub(F64, d10, d10, d11);
__ Vmul(F64, d0, d10, d4);
__ Vpop(Untyped64, DRegisterList(d8, 8));
__ Bx(lr);
}
#ifndef TEST_EXAMPLES
int main() {
MacroAssembler masm;
// Generate the code for the example function.
Label pi_approx;
masm.Bind(&pi_approx);
GenerateApproximatePi(&masm);
masm.FinalizeCode();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// There is no simulator defined for VIXL AArch32.
printf("This example cannot be simulated\n");
#else
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
uint32_t code_size = masm.GetSizeOfCodeGenerated();
ExecutableMemory memory(code, code_size);
// Run the example function.
double (*pi_function)(uint32_t) =
memory.GetEntryPoint<double (*)(uint32_t)>(pi_approx,
masm.GetInstructionSetInUse());
uint32_t repeat = 10000000;
double output_value = (*pi_function)(repeat);
printf("native: pi_approx(%u) = %3.10f\n", repeat, output_value);
#endif
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,74 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
void GenerateAbs(MacroAssembler* masm) {
// int64_t abs(int64_t x)
// Argument location:
// x -> x0
// This example uses a conditional instruction (cneg) to compute the
// absolute value of an integer.
__ Cmp(x0, 0);
__ Cneg(x0, x0, mi);
__ Ret();
}
#ifndef TEST_EXAMPLES
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
int main(void) {
MacroAssembler masm;
Decoder decoder;
Simulator simulator(&decoder);
// Generate the code for the example function.
Label abs;
masm.Bind(&abs);
GenerateAbs(&masm);
masm.FinalizeCode();
// Run the example function.
int64_t input_value = -42;
simulator.WriteXRegister(0, input_value);
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&abs));
printf("abs(%" PRId64 ") = %" PRId64 "\n",
input_value,
simulator.ReadXRegister(0));
return 0;
}
#else
// Without the simulator there is nothing to test.
int main(void) { return 0; }
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
#endif // TEST_EXAMPLES

View File

@@ -1,161 +0,0 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
// Macro to compute the number of elements in a vector.
#define ARRAY_SIZE(Array) (sizeof(Array) / sizeof((Array)[0]))
#define __ masm->
/*
* This example adds two vectors with 1-byte elements using NEON instructions,
* and returns the results in the first vector.
*/
void GenerateAdd2Vectors(MacroAssembler* masm) {
// void add2_vectors(uint8_t *vec_a, const uint8_t *vec_b, unsigned size)
// Argument locations:
// vec_a (pointer) -> x0
// vec_b (pointer) -> x1
// size (integer) -> w2
// Result returned in vec_a.
Label loop16, loopr, end;
// Loop to add vector elements in 16-byte chunks.
__ Bind(&loop16);
// Handle vectors smaller than 16-bytes in the remainder loop.
__ Cmp(w2, 16);
__ B(lo, &loopr);
__ Sub(w2, w2, 16);
// Add vectors in 16-byte chunks.
__ Ld1(v0.V16B(), MemOperand(x0));
__ Ld1(v1.V16B(), MemOperand(x1, 16, PostIndex));
__ Add(v0.V16B(), v0.V16B(), v1.V16B());
__ St1(v0.V16B(), MemOperand(x0, 16, PostIndex));
__ B(&loop16);
// Loop to add the remaining vector elements.
__ Bind(&loopr);
// If there are no more vector elements to process, then exit.
__ Cbz(w2, &end);
__ Sub(w2, w2, 1);
// Add remaining vector elements in 1-byte chunks.
__ Ldrb(w5, MemOperand(x0));
__ Ldrb(w6, MemOperand(x1, 1, PostIndex));
__ Add(w5, w5, w6);
__ Strb(w5, MemOperand(x0, 1, PostIndex));
__ B(&loopr);
__ Bind(&end);
__ Ret();
}
void PrintVector(const uint8_t* vec, unsigned num) {
unsigned i;
printf("( ");
if (num > 0) {
for (i = 0; i < num - 1; ++i) {
printf("%d, ", vec[i]);
}
printf("%d", vec[i]);
}
printf(" )\n");
}
#ifndef TEST_EXAMPLES
int main(void) {
MacroAssembler masm;
// Generate native code for the example function.
Label add2_vectors;
masm.Bind(&add2_vectors);
GenerateAdd2Vectors(&masm);
masm.FinalizeCode();
// Initialize input data for the example function.
// clang-format: off
uint8_t vec_a[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
uint8_t vec_b[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36};
// clang-format on
uint8_t vec_c[ARRAY_SIZE(vec_a)];
// Check whether the number of elements in both vectors match.
VIXL_CHECK(ARRAY_SIZE(vec_a) == ARRAY_SIZE(vec_b));
// Compute the result in C.
for (unsigned i = 0; i < ARRAY_SIZE(vec_a); i++) {
vec_c[i] = vec_a[i] + vec_b[i];
}
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
uintptr_t vec_a_addr = reinterpret_cast<uintptr_t>(vec_a);
uintptr_t vec_b_addr = reinterpret_cast<uintptr_t>(vec_b);
// Configure register environment in the simulator.
Decoder decoder;
Simulator simulator(&decoder);
simulator.WriteXRegister(0, vec_a_addr);
simulator.WriteXRegister(1, vec_b_addr);
simulator.WriteXRegister(2, ARRAY_SIZE(vec_a));
PrintVector(vec_a, ARRAY_SIZE(vec_a));
printf(" +\n");
PrintVector(vec_b, ARRAY_SIZE(vec_b));
// Run the example function in the simulator.
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add2_vectors));
printf(" =\n");
PrintVector(vec_a, ARRAY_SIZE(vec_a));
// Check that the computed value in NEON matches the C version.
for (unsigned i = 0; i < ARRAY_SIZE(vec_a); i++) {
VIXL_CHECK(vec_c[i] == vec_a[i]);
}
#else
USE(vec_c);
// Placeholder to run test natively.
printf("Running tests natively is not supported yet.\n");
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,77 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
void GenerateAdd3Double(MacroAssembler* masm) {
// double add3_double(double x, double y, double z)
// Argument locations:
// x -> d0
// y -> d1
// z -> d2
__ Fadd(d0, d0, d1); // d0 <- x + y
__ Fadd(d0, d0, d2); // d0 <- d0 + z
// The return value is already in d0.
__ Ret();
}
#ifndef TEST_EXAMPLES
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
int main(void) {
MacroAssembler masm;
Decoder decoder;
Simulator simulator(&decoder);
// Generate the code for the example function.
Label add3_double;
masm.Bind(&add3_double);
GenerateAdd3Double(&masm);
masm.FinalizeCode();
// Run the example function.
double a = 498.36547;
double b = 23.369;
double c = 7964.697954;
simulator.WriteDRegister(0, a);
simulator.WriteDRegister(1, b);
simulator.WriteDRegister(2, c);
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add3_double));
printf("%f + %f + %f = %f\n", a, b, c, simulator.ReadDRegister(0));
return 0;
}
#else
// Without the simulator there is nothing to test.
int main(void) { return 0; }
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
#endif // TEST_EXAMPLES

View File

@@ -1,90 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
void GenerateAdd4Double(MacroAssembler* masm) {
// double Add4Double(uint64_t a, double b, uint64_t c, double d)
// Argument locations:
// a -> x0
// b -> d0
// c -> x1
// d -> d1
// Turn 'a' and 'c' into double values.
__ Ucvtf(d2, x0);
__ Ucvtf(d3, x1);
// Add everything together.
__ Fadd(d0, d0, d1);
__ Fadd(d2, d2, d3);
__ Fadd(d0, d0, d2);
// The return value is in d0.
__ Ret();
}
#ifndef TEST_EXAMPLES
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
int main(void) {
MacroAssembler masm;
Decoder decoder;
Simulator simulator(&decoder);
// Generate the code for the example function.
Label add4_double;
masm.Bind(&add4_double);
GenerateAdd4Double(&masm);
masm.FinalizeCode();
// Run the example function.
uint64_t a = 21;
double b = 987.3654;
uint64_t c = 4387;
double d = 36.698754;
simulator.WriteXRegister(0, a);
simulator.WriteDRegister(0, b);
simulator.WriteXRegister(1, c);
simulator.WriteDRegister(1, d);
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add4_double));
// clang-format off
printf("%" PRIu64 " + %f + %" PRIu64 " + %f = %f\n",
a, b, c, d, simulator.ReadDRegister(0));
// clang-format on
return 0;
}
#else
// Without the simulator there is nothing to test.
int main(void) { return 0; }
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
#endif // TEST_EXAMPLES

View File

@@ -1,106 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
void GenerateCheckBounds(MacroAssembler* masm) {
// uint64_t check_bounds(uint64_t value, uint64_t low, uint64_t high)
// Argument locations:
// value -> x0
// low -> x1
// high -> x2
// First we compare 'value' with the 'low' bound. If x1 <= x0 the N flag will
// be cleared. This configuration can be checked with the 'pl' condition.
__ Cmp(x0, x1);
// Now we will compare 'value' and 'high' (x0 and x2) but only if the 'pl'
// condition is verified. If the condition is not verified, we will clear
// all the flags except the carry one (C flag).
__ Ccmp(x0, x2, CFlag, pl);
// We set x0 to 1 only if the 'ls' condition is satisfied.
// 'ls' performs the following test: !(C==1 && Z==0). If the previous
// comparison has been skipped we have C==1 and Z==0, so the 'ls' test
// will fail and x0 will be set to 0.
// Otherwise if the previous comparison occurred, x0 will be set to 1
// only if x0 is less than or equal to x2.
__ Cset(x0, ls);
__ Ret();
}
#ifndef TEST_EXAMPLES
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
void run_function(Simulator* simulator,
Instruction* function,
uint64_t value,
uint64_t low,
uint64_t high) {
simulator->WriteXRegister(0, value);
simulator->WriteXRegister(1, low);
simulator->WriteXRegister(2, high);
simulator->RunFrom(function);
printf("%" PRIu64 " %s between %" PRIu64 " and %" PRIu64 "\n",
value,
simulator->ReadXRegister(0) ? "is" : "is not",
low,
high);
simulator->ResetState();
}
int main(void) {
MacroAssembler masm;
Decoder decoder;
Simulator simulator(&decoder);
// Generate the code for the example function.
Label check_bounds;
masm.Bind(&check_bounds);
GenerateCheckBounds(&masm);
masm.FinalizeCode();
// Run the example function.
Instruction* function = masm.GetLabelAddress<Instruction*>(&check_bounds);
run_function(&simulator, function, 546, 50, 1000);
run_function(&simulator, function, 62, 100, 200);
run_function(&simulator, function, 200, 100, 200);
return 0;
}
#else
// Without the simulator there is nothing to test.
int main(void) { return 0; }
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
#endif // TEST_EXAMPLES

View File

@@ -1,53 +0,0 @@
// Copyright 2020, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
// Demonstrate the use of VIXL's CPU feature detection, printing the features
// that VIXL detects.
#ifndef TEST_EXAMPLES
int main() {
// Simple native deployments should initialise CPU features using
// `InferFromOS()`. If not on an AArch64 host, this returns nothing.
std::cout << "==== CPUFeatures::InferFromOS() ====\n";
std::cout << CPUFeatures::InferFromOS() << "\n";
// VIXL assumes support for FP, NEON and CRC32 by default. These features were
// implemented before the CPUFeatures mechanism.
std::cout << "==== CPUFeatures::AArch64LegacyBaseline() ====\n";
std::cout << CPUFeatures::AArch64LegacyBaseline() << "\n";
// Retrieve a list of all supported CPU features.
std::cout << "==== CPUFeatures::All() ====\n";
std::cout << CPUFeatures::All() << "\n";
return 0;
}
#endif

View File

@@ -1,106 +0,0 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
void GenerateCrc32(MacroAssembler* masm) {
// uint32_t crc32(const char *msg, size_t length)
// Argument location:
// msg (pointer) -> x0
// length (integer) -> x1
// This example computes CRC32CB on an input array of a given size
// and returns the resulting checksum in w0.
Label loop, end;
// Move input array to temp register so we can re-use w0 as return register.
__ Mov(x2, x0);
// Initial remainder for the checksum. If length=0, then this value will be
// returned.
__ Mov(w0, 0xffffffff);
// Loop for iterating through the array, starting at msg[0].
__ Bind(&loop);
// If no more elements to process, then exit function.
__ Cbz(x1, &end);
__ Sub(x1, x1, 1);
// Compute checksum for msg[i].
__ Ldrb(w3, MemOperand(x2, 1, PostIndex));
__ Crc32b(w0, w0, w3);
__ B(&loop);
__ Bind(&end);
__ Ret();
}
#ifndef TEST_EXAMPLES
void RunExample(const char* msg) {
MacroAssembler masm;
// Generate the code for the example function.
Label func;
masm.Bind(&func);
GenerateCrc32(&masm);
masm.FinalizeCode();
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
// Run example function in the simulator.
uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
size_t msg_size = strlen(msg);
Decoder decoder;
Simulator simulator(&decoder);
simulator.WriteXRegister(0, msg_addr);
simulator.WriteXRegister(1, msg_size);
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&func));
printf("crc32(\"%s\")=0x%x\n", msg, simulator.ReadWRegister(0));
#else
// Run example function natively.
printf("Not yet implemented.\n");
USE(msg);
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
}
int main(void) {
RunExample("Hello World!");
RunExample("do");
RunExample("1");
RunExample("");
return 0;
}
#endif // TEST_EXAMPLES

View File

@@ -1,193 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "custom-disassembler.h"
#include "examples.h"
using namespace vixl;
using namespace vixl::aarch64;
#define __ masm->
// We override this method to specify how register names should be disassembled.
void CustomDisassembler::AppendRegisterNameToOutput(const Instruction* instr,
const CPURegister& reg) {
USE(instr);
if (reg.IsRegister()) {
switch (reg.GetCode()) {
case 16:
AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0");
return;
case 17:
AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1");
return;
case 30:
AppendToOutput(reg.Is64Bits() ? "lr" : "w30");
return;
case kSPRegInternalCode:
AppendToOutput(reg.Is64Bits() ? "x_stack_pointer" : "w_stack_pointer");
return;
case 31:
AppendToOutput(reg.Is64Bits() ? "x_zero_reg" : "w_zero_reg");
return;
default:
// Fall through.
break;
}
}
// Print other register names as usual.
Disassembler::AppendRegisterNameToOutput(instr, reg);
}
static const char* FakeLookupTargetDescription(const void* address) {
USE(address);
// We fake looking up the address.
static int i = 0;
const char* desc = NULL;
if (i == 0) {
desc = "label: somewhere";
} else if (i == 2) {
desc = "label: somewhere else";
}
i++;
return desc;
}
// We override this method to add a description to addresses that we know about.
// In this example we fake looking up a description, but in practice one could
// for example use a table mapping addresses to function names.
void CustomDisassembler::AppendCodeRelativeCodeAddressToOutput(
const Instruction* instr, const void* addr) {
USE(instr);
// Print the address.
int64_t rel_addr = CodeRelativeAddress(addr);
if (rel_addr >= 0) {
AppendToOutput("(addr 0x%" PRIx64, rel_addr);
} else {
AppendToOutput("(addr -0x%" PRIx64, -rel_addr);
}
// If available, print a description of the address.
const char* address_desc = FakeLookupTargetDescription(addr);
if (address_desc != NULL) {
Disassembler::AppendToOutput(" ; %s", address_desc);
}
AppendToOutput(")");
}
// We override this method to add a comment to this type of instruction. Helpers
// from the vixl::Instruction class can be used to analyse the instruction being
// disasssembled.
void CustomDisassembler::VisitAddSubShifted(const Instruction* instr) {
vixl::aarch64::Disassembler::VisitAddSubShifted(instr);
if (instr->GetRd() == 10) {
AppendToOutput(" // add/sub to x10");
}
ProcessOutput(instr);
}
void GenerateCustomDisassemblerTestCode(MacroAssembler* masm) {
// Generate some code to illustrate how the modified disassembler changes the
// disassembly output.
Label begin, end;
__ Bind(&begin);
__ Add(x10, x16, x17);
__ Cbz(x10, &end);
__ Add(x11, ip0, ip1);
__ Add(w5, w6, w30);
__ Tbz(x10, 2, &begin);
__ Tbnz(x10, 3, &begin);
__ Br(x30);
__ Br(lr);
__ Fadd(d30, d16, d17);
__ Push(xzr, xzr);
__ Pop(x16, x20);
__ Bind(&end);
}
void TestCustomDisassembler() {
MacroAssembler masm;
// Generate the code.
Label code_start, code_end;
masm.Bind(&code_start);
GenerateCustomDisassemblerTestCode(&masm);
masm.Bind(&code_end);
masm.FinalizeCode();
Instruction* instr_start = masm.GetLabelAddress<Instruction*>(&code_start);
Instruction* instr_end = masm.GetLabelAddress<Instruction*>(&code_end);
// Instantiate a standard disassembler, our custom disassembler, and register
// them with a decoder.
Decoder decoder;
Disassembler disasm;
CustomDisassembler custom_disasm;
decoder.AppendVisitor(&disasm);
decoder.AppendVisitor(&custom_disasm);
// In our custom disassembler, disassemble as if the base address was -0x8.
// Note that this can also be achieved with
// custom_disasm.MapCodeAddress(0x0, instr_start + 2 * kInstructionSize);
// Users may generally want to map the start address to 0x0. Mapping to a
// negative offset can be used to focus on the section of the
// disassembly at address 0x0.
custom_disasm.MapCodeAddress(-0x8, instr_start);
// Iterate through the instructions to show the difference in the disassembly.
Instruction* instr;
for (instr = instr_start; instr < instr_end; instr += kInstructionSize) {
decoder.Decode(instr);
printf("\n");
printf("VIXL disasm\t %p:\t%s\n",
reinterpret_cast<void*>(instr),
disasm.GetOutput());
int64_t rel_addr =
custom_disasm.CodeRelativeAddress(reinterpret_cast<void*>(instr));
char rel_addr_sign_char = ' ';
if (rel_addr < 0) {
rel_addr_sign_char = '-';
rel_addr = -rel_addr;
}
printf("custom disasm\t%c0x%" PRIx64 ":\t%s\n",
rel_addr_sign_char,
rel_addr,
custom_disasm.GetOutput());
}
}
#ifndef TEST_EXAMPLES
int main() {
TestCustomDisassembler();
return 0;
}
#endif

View File

@@ -1,56 +0,0 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_EXAMPLES_CUSTOM_DISASSEMBLER_H_
#define VIXL_EXAMPLES_CUSTOM_DISASSEMBLER_H_
#include "aarch64/disasm-aarch64.h"
void TestCustomDisassembler();
// We want to change three things in the disassembly:
// - Add comments to some add/sub instructions.
// - Use aliases for register names.
// - Add descriptions for code addresses.
class CustomDisassembler : public vixl::aarch64::Disassembler {
public:
CustomDisassembler() : vixl::aarch64::Disassembler() {}
virtual ~CustomDisassembler() {}
virtual void VisitAddSubShifted(const vixl::aarch64::Instruction* instr)
VIXL_OVERRIDE;
protected:
virtual void AppendRegisterNameToOutput(
const vixl::aarch64::Instruction* instr,
const vixl::aarch64::CPURegister& reg) VIXL_OVERRIDE;
virtual void AppendCodeRelativeCodeAddressToOutput(
const vixl::aarch64::Instruction* instr, const void* addr) VIXL_OVERRIDE;
};
#endif

View File

@@ -1,134 +0,0 @@
// Copyright 2020, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "code-buffer-vixl.h"
#include "aarch64/decoder-aarch64.h"
#include "aarch64/disasm-aarch64.h"
// This example is interactive, and isn't tested systematically.
#ifndef TEST_EXAMPLES
using namespace vixl;
using namespace vixl::aarch64;
void PrintUsage(char const* name) {
printf("Usage: %s [OPTION]... <INSTRUCTION>...\n", name);
printf("\n");
printf("Disassemble ad-hoc A64 instructions.\n");
printf("\n");
printf(
"Options:\n"
" --start-at <address>\n"
" Start disassembling from <address> Any signed 64-bit value\n"
" accepted by strtoll can be specified. The address is printed\n"
" alongside each instruction, and it is also used to decode\n"
" PC-relative offsets.\n"
"\n"
" Defaults to 0.\n"
"\n");
printf(
"<instruction>\n"
" A hexadecimal representation of an A64 instruction. The leading '0x'\n"
" (or '0X') is optional.\n"
"\n"
" Multiple instructions can be provided; they will be disassembled as\n"
" if they were read sequentially from memory.\n"
"\n");
printf("Examples:\n");
printf(" $ %s d2824685\n", name);
printf(" 0x0000000000000000: d2824685 movz x5, #0x1234\n");
printf("\n");
printf(" $ %s --start-at -4 0x10fffe85 0xd61f00a0\n", name);
printf(" -0x0000000000000004: 10fffe85 adr x5, #-0x30 (addr -0x34)\n");
printf(" 0x0000000000000000: d61f00a0 br x5\n");
}
Instr ParseInstr(char const* arg) {
// TODO: Error handling for out-of-range inputs.
return (Instr)strtoul(arg, NULL, 16);
}
int64_t ParseInt64(char const* arg) {
// TODO: Error handling for out-of-range inputs.
return (int64_t)strtoll(arg, NULL, 0);
}
int main(int argc, char* argv[]) {
for (int i = 1; i < argc; i++) {
char const* arg = argv[i];
if ((strcmp(arg, "--help") == 0) || (strcmp(arg, "-h") == 0)) {
PrintUsage(argv[0]);
return 0;
}
}
// Assume an address of 0, unless otherwise specified.
int64_t start_address = 0;
// Allocate space for one instruction per argument.
CodeBuffer buffer((argc - 1) * kInstructionSize);
bool expect_start_at = false;
for (int i = 1; i < argc; i++) {
char* arg = argv[i];
if (expect_start_at) {
start_address = ParseInt64(arg);
expect_start_at = false;
} else if (strcmp(arg, "--start-at") == 0) {
expect_start_at = true;
} else {
// Assume that everything else is an instruction.
buffer.Emit(ParseInstr(arg));
}
}
buffer.SetClean();
if (expect_start_at) {
printf("No address given. Use: --start-at <address>\n");
return 1;
}
if (buffer.GetSizeInBytes() == 0) {
printf("Nothing to disassemble.\n");
return 0;
}
// Disassemble the buffer.
const Instruction* start = buffer.GetStartAddress<Instruction*>();
const Instruction* end = buffer.GetEndAddress<Instruction*>();
vixl::aarch64::PrintDisassembler disasm(stdout);
disasm.PrintSignedAddresses(true);
disasm.MapCodeAddress(start_address, start);
disasm.DisassembleBuffer(start, end);
return 0;
}
#endif // TEST_EXAMPLES

Some files were not shown because too many files have changed in this diff Show More