remove obsolete files

This commit is contained in:
mgthepro
2022-07-31 12:49:57 +02:00
parent 117076fd71
commit de846fb71e
3631 changed files with 0 additions and 9433291 deletions

View File

@@ -1,79 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#define _USE_MATH_DEFINES
#include <algorithm>
#include <array>
#include <cmath>
#include <vector>
#include "audio_core/algorithm/filter.h"
#include "common/common_types.h"
namespace AudioCore {
Filter Filter::LowPass(double cutoff, double Q) {
const double w0 = 2.0 * M_PI * cutoff;
const double sin_w0 = std::sin(w0);
const double cos_w0 = std::cos(w0);
const double alpha = sin_w0 / (2 * Q);
const double a0 = 1 + alpha;
const double a1 = -2.0 * cos_w0;
const double a2 = 1 - alpha;
const double b0 = 0.5 * (1 - cos_w0);
const double b1 = 1.0 * (1 - cos_w0);
const double b2 = 0.5 * (1 - cos_w0);
return {a0, a1, a2, b0, b1, b2};
}
Filter::Filter() : Filter(1.0, 0.0, 0.0, 1.0, 0.0, 0.0) {}
Filter::Filter(double a0_, double a1_, double a2_, double b0_, double b1_, double b2_)
: a1(a1_ / a0_), a2(a2_ / a0_), b0(b0_ / a0_), b1(b1_ / a0_), b2(b2_ / a0_) {}
void Filter::Process(std::vector<s16>& signal) {
const std::size_t num_frames = signal.size() / 2;
for (std::size_t i = 0; i < num_frames; i++) {
std::rotate(in.begin(), in.end() - 1, in.end());
std::rotate(out.begin(), out.end() - 1, out.end());
for (std::size_t ch = 0; ch < channel_count; ch++) {
in[0][ch] = signal[i * channel_count + ch];
out[0][ch] = b0 * in[0][ch] + b1 * in[1][ch] + b2 * in[2][ch] - a1 * out[1][ch] -
a2 * out[2][ch];
signal[i * 2 + ch] = static_cast<s16>(std::clamp(out[0][ch], -32768.0, 32767.0));
}
}
}
/// Calculates the appropriate Q for each biquad in a cascading filter.
/// @param total_count The total number of biquads to be cascaded.
/// @param index 0-index of the biquad to calculate the Q value for.
static double CascadingBiquadQ(std::size_t total_count, std::size_t index) {
const auto pole =
M_PI * static_cast<double>(2 * index + 1) / (4.0 * static_cast<double>(total_count));
return 1.0 / (2.0 * std::cos(pole));
}
CascadingFilter CascadingFilter::LowPass(double cutoff, std::size_t cascade_size) {
std::vector<Filter> cascade(cascade_size);
for (std::size_t i = 0; i < cascade_size; i++) {
cascade[i] = Filter::LowPass(cutoff, CascadingBiquadQ(cascade_size, i));
}
return CascadingFilter{std::move(cascade)};
}
CascadingFilter::CascadingFilter() = default;
CascadingFilter::CascadingFilter(std::vector<Filter> filters_) : filters(std::move(filters_)) {}
void CascadingFilter::Process(std::vector<s16>& signal) {
for (auto& filter : filters) {
filter.Process(signal);
}
}
} // namespace AudioCore

View File

@@ -1,61 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/// Digital biquad filter:
///
/// b0 + b1 z^-1 + b2 z^-2
/// H(z) = ------------------------
/// a0 + a1 z^-1 + b2 z^-2
class Filter {
public:
/// Creates a low-pass filter.
/// @param cutoff Determines the cutoff frequency. A value from 0.0 to 1.0.
/// @param Q Determines the quality factor of this filter.
static Filter LowPass(double cutoff, double Q = 0.7071);
/// Passthrough filter.
Filter();
Filter(double a0_, double a1_, double a2_, double b0_, double b1_, double b2_);
void Process(std::vector<s16>& signal);
private:
static constexpr std::size_t channel_count = 2;
/// Coefficients are in normalized form (a0 = 1.0).
double a1, a2, b0, b1, b2;
/// Input History
std::array<std::array<double, channel_count>, 3> in;
/// Output History
std::array<std::array<double, channel_count>, 3> out;
};
/// Cascade filters to build up higher-order filters from lower-order ones.
class CascadingFilter {
public:
/// Creates a cascading low-pass filter.
/// @param cutoff Determines the cutoff frequency. A value from 0.0 to 1.0.
/// @param cascade_size Number of biquads in cascade.
static CascadingFilter LowPass(double cutoff, std::size_t cascade_size);
/// Passthrough.
CascadingFilter();
explicit CascadingFilter(std::vector<Filter> filters_);
void Process(std::vector<s16>& signal);
private:
std::vector<Filter> filters;
};
} // namespace AudioCore

View File

@@ -1,232 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#define _USE_MATH_DEFINES
#include <algorithm>
#include <climits>
#include <cmath>
#include <vector>
#include "audio_core/algorithm/interpolate.h"
#include "common/common_types.h"
#include "common/logging/log.h"
namespace AudioCore {
constexpr std::array<s16, 512> curve_lut0{
6600, 19426, 6722, 3, 6479, 19424, 6845, 9, 6359, 19419, 6968, 15, 6239,
19412, 7093, 22, 6121, 19403, 7219, 28, 6004, 19391, 7345, 34, 5888, 19377,
7472, 41, 5773, 19361, 7600, 48, 5659, 19342, 7728, 55, 5546, 19321, 7857,
62, 5434, 19298, 7987, 69, 5323, 19273, 8118, 77, 5213, 19245, 8249, 84,
5104, 19215, 8381, 92, 4997, 19183, 8513, 101, 4890, 19148, 8646, 109, 4785,
19112, 8780, 118, 4681, 19073, 8914, 127, 4579, 19031, 9048, 137, 4477, 18988,
9183, 147, 4377, 18942, 9318, 157, 4277, 18895, 9454, 168, 4179, 18845, 9590,
179, 4083, 18793, 9726, 190, 3987, 18738, 9863, 202, 3893, 18682, 10000, 215,
3800, 18624, 10137, 228, 3709, 18563, 10274, 241, 3618, 18500, 10411, 255, 3529,
18436, 10549, 270, 3441, 18369, 10687, 285, 3355, 18300, 10824, 300, 3269, 18230,
10962, 317, 3186, 18157, 11100, 334, 3103, 18082, 11238, 351, 3022, 18006, 11375,
369, 2942, 17927, 11513, 388, 2863, 17847, 11650, 408, 2785, 17765, 11788, 428,
2709, 17681, 11925, 449, 2635, 17595, 12062, 471, 2561, 17507, 12198, 494, 2489,
17418, 12334, 517, 2418, 17327, 12470, 541, 2348, 17234, 12606, 566, 2280, 17140,
12741, 592, 2213, 17044, 12876, 619, 2147, 16946, 13010, 647, 2083, 16846, 13144,
675, 2020, 16745, 13277, 704, 1958, 16643, 13409, 735, 1897, 16539, 13541, 766,
1838, 16434, 13673, 798, 1780, 16327, 13803, 832, 1723, 16218, 13933, 866, 1667,
16109, 14062, 901, 1613, 15998, 14191, 937, 1560, 15885, 14318, 975, 1508, 15772,
14445, 1013, 1457, 15657, 14571, 1052, 1407, 15540, 14695, 1093, 1359, 15423, 14819,
1134, 1312, 15304, 14942, 1177, 1266, 15185, 15064, 1221, 1221, 15064, 15185, 1266,
1177, 14942, 15304, 1312, 1134, 14819, 15423, 1359, 1093, 14695, 15540, 1407, 1052,
14571, 15657, 1457, 1013, 14445, 15772, 1508, 975, 14318, 15885, 1560, 937, 14191,
15998, 1613, 901, 14062, 16109, 1667, 866, 13933, 16218, 1723, 832, 13803, 16327,
1780, 798, 13673, 16434, 1838, 766, 13541, 16539, 1897, 735, 13409, 16643, 1958,
704, 13277, 16745, 2020, 675, 13144, 16846, 2083, 647, 13010, 16946, 2147, 619,
12876, 17044, 2213, 592, 12741, 17140, 2280, 566, 12606, 17234, 2348, 541, 12470,
17327, 2418, 517, 12334, 17418, 2489, 494, 12198, 17507, 2561, 471, 12062, 17595,
2635, 449, 11925, 17681, 2709, 428, 11788, 17765, 2785, 408, 11650, 17847, 2863,
388, 11513, 17927, 2942, 369, 11375, 18006, 3022, 351, 11238, 18082, 3103, 334,
11100, 18157, 3186, 317, 10962, 18230, 3269, 300, 10824, 18300, 3355, 285, 10687,
18369, 3441, 270, 10549, 18436, 3529, 255, 10411, 18500, 3618, 241, 10274, 18563,
3709, 228, 10137, 18624, 3800, 215, 10000, 18682, 3893, 202, 9863, 18738, 3987,
190, 9726, 18793, 4083, 179, 9590, 18845, 4179, 168, 9454, 18895, 4277, 157,
9318, 18942, 4377, 147, 9183, 18988, 4477, 137, 9048, 19031, 4579, 127, 8914,
19073, 4681, 118, 8780, 19112, 4785, 109, 8646, 19148, 4890, 101, 8513, 19183,
4997, 92, 8381, 19215, 5104, 84, 8249, 19245, 5213, 77, 8118, 19273, 5323,
69, 7987, 19298, 5434, 62, 7857, 19321, 5546, 55, 7728, 19342, 5659, 48,
7600, 19361, 5773, 41, 7472, 19377, 5888, 34, 7345, 19391, 6004, 28, 7219,
19403, 6121, 22, 7093, 19412, 6239, 15, 6968, 19419, 6359, 9, 6845, 19424,
6479, 3, 6722, 19426, 6600};
constexpr std::array<s16, 512> curve_lut1{
-68, 32639, 69, -5, -200, 32630, 212, -15, -328, 32613, 359, -26, -450,
32586, 512, -36, -568, 32551, 669, -47, -680, 32507, 832, -58, -788, 32454,
1000, -69, -891, 32393, 1174, -80, -990, 32323, 1352, -92, -1084, 32244, 1536,
-103, -1173, 32157, 1724, -115, -1258, 32061, 1919, -128, -1338, 31956, 2118, -140,
-1414, 31844, 2322, -153, -1486, 31723, 2532, -167, -1554, 31593, 2747, -180, -1617,
31456, 2967, -194, -1676, 31310, 3192, -209, -1732, 31157, 3422, -224, -1783, 30995,
3657, -240, -1830, 30826, 3897, -256, -1874, 30649, 4143, -272, -1914, 30464, 4393,
-289, -1951, 30272, 4648, -307, -1984, 30072, 4908, -325, -2014, 29866, 5172, -343,
-2040, 29652, 5442, -362, -2063, 29431, 5716, -382, -2083, 29203, 5994, -403, -2100,
28968, 6277, -424, -2114, 28727, 6565, -445, -2125, 28480, 6857, -468, -2133, 28226,
7153, -490, -2139, 27966, 7453, -514, -2142, 27700, 7758, -538, -2142, 27428, 8066,
-563, -2141, 27151, 8378, -588, -2136, 26867, 8694, -614, -2130, 26579, 9013, -641,
-2121, 26285, 9336, -668, -2111, 25987, 9663, -696, -2098, 25683, 9993, -724, -2084,
25375, 10326, -753, -2067, 25063, 10662, -783, -2049, 24746, 11000, -813, -2030, 24425,
11342, -844, -2009, 24100, 11686, -875, -1986, 23771, 12033, -907, -1962, 23438, 12382,
-939, -1937, 23103, 12733, -972, -1911, 22764, 13086, -1005, -1883, 22422, 13441, -1039,
-1855, 22077, 13798, -1072, -1825, 21729, 14156, -1107, -1795, 21380, 14516, -1141, -1764,
21027, 14877, -1176, -1732, 20673, 15239, -1211, -1700, 20317, 15602, -1246, -1667, 19959,
15965, -1282, -1633, 19600, 16329, -1317, -1599, 19239, 16694, -1353, -1564, 18878, 17058,
-1388, -1530, 18515, 17423, -1424, -1495, 18151, 17787, -1459, -1459, 17787, 18151, -1495,
-1424, 17423, 18515, -1530, -1388, 17058, 18878, -1564, -1353, 16694, 19239, -1599, -1317,
16329, 19600, -1633, -1282, 15965, 19959, -1667, -1246, 15602, 20317, -1700, -1211, 15239,
20673, -1732, -1176, 14877, 21027, -1764, -1141, 14516, 21380, -1795, -1107, 14156, 21729,
-1825, -1072, 13798, 22077, -1855, -1039, 13441, 22422, -1883, -1005, 13086, 22764, -1911,
-972, 12733, 23103, -1937, -939, 12382, 23438, -1962, -907, 12033, 23771, -1986, -875,
11686, 24100, -2009, -844, 11342, 24425, -2030, -813, 11000, 24746, -2049, -783, 10662,
25063, -2067, -753, 10326, 25375, -2084, -724, 9993, 25683, -2098, -696, 9663, 25987,
-2111, -668, 9336, 26285, -2121, -641, 9013, 26579, -2130, -614, 8694, 26867, -2136,
-588, 8378, 27151, -2141, -563, 8066, 27428, -2142, -538, 7758, 27700, -2142, -514,
7453, 27966, -2139, -490, 7153, 28226, -2133, -468, 6857, 28480, -2125, -445, 6565,
28727, -2114, -424, 6277, 28968, -2100, -403, 5994, 29203, -2083, -382, 5716, 29431,
-2063, -362, 5442, 29652, -2040, -343, 5172, 29866, -2014, -325, 4908, 30072, -1984,
-307, 4648, 30272, -1951, -289, 4393, 30464, -1914, -272, 4143, 30649, -1874, -256,
3897, 30826, -1830, -240, 3657, 30995, -1783, -224, 3422, 31157, -1732, -209, 3192,
31310, -1676, -194, 2967, 31456, -1617, -180, 2747, 31593, -1554, -167, 2532, 31723,
-1486, -153, 2322, 31844, -1414, -140, 2118, 31956, -1338, -128, 1919, 32061, -1258,
-115, 1724, 32157, -1173, -103, 1536, 32244, -1084, -92, 1352, 32323, -990, -80,
1174, 32393, -891, -69, 1000, 32454, -788, -58, 832, 32507, -680, -47, 669,
32551, -568, -36, 512, 32586, -450, -26, 359, 32613, -328, -15, 212, 32630,
-200, -5, 69, 32639, -68};
constexpr std::array<s16, 512> curve_lut2{
3195, 26287, 3329, -32, 3064, 26281, 3467, -34, 2936, 26270, 3608, -38, 2811,
26253, 3751, -42, 2688, 26230, 3897, -46, 2568, 26202, 4046, -50, 2451, 26169,
4199, -54, 2338, 26130, 4354, -58, 2227, 26085, 4512, -63, 2120, 26035, 4673,
-67, 2015, 25980, 4837, -72, 1912, 25919, 5004, -76, 1813, 25852, 5174, -81,
1716, 25780, 5347, -87, 1622, 25704, 5522, -92, 1531, 25621, 5701, -98, 1442,
25533, 5882, -103, 1357, 25440, 6066, -109, 1274, 25342, 6253, -115, 1193, 25239,
6442, -121, 1115, 25131, 6635, -127, 1040, 25018, 6830, -133, 967, 24899, 7027,
-140, 897, 24776, 7227, -146, 829, 24648, 7430, -153, 764, 24516, 7635, -159,
701, 24379, 7842, -166, 641, 24237, 8052, -174, 583, 24091, 8264, -181, 526,
23940, 8478, -187, 472, 23785, 8695, -194, 420, 23626, 8914, -202, 371, 23462,
9135, -209, 324, 23295, 9358, -215, 279, 23123, 9583, -222, 236, 22948, 9809,
-230, 194, 22769, 10038, -237, 154, 22586, 10269, -243, 117, 22399, 10501, -250,
81, 22208, 10735, -258, 47, 22015, 10970, -265, 15, 21818, 11206, -271, -16,
21618, 11444, -277, -44, 21415, 11684, -283, -71, 21208, 11924, -290, -97, 20999,
12166, -296, -121, 20786, 12409, -302, -143, 20571, 12653, -306, -163, 20354, 12898,
-311, -183, 20134, 13143, -316, -201, 19911, 13389, -321, -218, 19686, 13635, -325,
-234, 19459, 13882, -328, -248, 19230, 14130, -332, -261, 18998, 14377, -335, -273,
18765, 14625, -337, -284, 18531, 14873, -339, -294, 18295, 15121, -341, -302, 18057,
15369, -341, -310, 17817, 15617, -341, -317, 17577, 15864, -340, -323, 17335, 16111,
-340, -328, 17092, 16357, -338, -332, 16848, 16603, -336, -336, 16603, 16848, -332,
-338, 16357, 17092, -328, -340, 16111, 17335, -323, -340, 15864, 17577, -317, -341,
15617, 17817, -310, -341, 15369, 18057, -302, -341, 15121, 18295, -294, -339, 14873,
18531, -284, -337, 14625, 18765, -273, -335, 14377, 18998, -261, -332, 14130, 19230,
-248, -328, 13882, 19459, -234, -325, 13635, 19686, -218, -321, 13389, 19911, -201,
-316, 13143, 20134, -183, -311, 12898, 20354, -163, -306, 12653, 20571, -143, -302,
12409, 20786, -121, -296, 12166, 20999, -97, -290, 11924, 21208, -71, -283, 11684,
21415, -44, -277, 11444, 21618, -16, -271, 11206, 21818, 15, -265, 10970, 22015,
47, -258, 10735, 22208, 81, -250, 10501, 22399, 117, -243, 10269, 22586, 154,
-237, 10038, 22769, 194, -230, 9809, 22948, 236, -222, 9583, 23123, 279, -215,
9358, 23295, 324, -209, 9135, 23462, 371, -202, 8914, 23626, 420, -194, 8695,
23785, 472, -187, 8478, 23940, 526, -181, 8264, 24091, 583, -174, 8052, 24237,
641, -166, 7842, 24379, 701, -159, 7635, 24516, 764, -153, 7430, 24648, 829,
-146, 7227, 24776, 897, -140, 7027, 24899, 967, -133, 6830, 25018, 1040, -127,
6635, 25131, 1115, -121, 6442, 25239, 1193, -115, 6253, 25342, 1274, -109, 6066,
25440, 1357, -103, 5882, 25533, 1442, -98, 5701, 25621, 1531, -92, 5522, 25704,
1622, -87, 5347, 25780, 1716, -81, 5174, 25852, 1813, -76, 5004, 25919, 1912,
-72, 4837, 25980, 2015, -67, 4673, 26035, 2120, -63, 4512, 26085, 2227, -58,
4354, 26130, 2338, -54, 4199, 26169, 2451, -50, 4046, 26202, 2568, -46, 3897,
26230, 2688, -42, 3751, 26253, 2811, -38, 3608, 26270, 2936, -34, 3467, 26281,
3064, -32, 3329, 26287, 3195};
std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio) {
if (input.size() < 2)
return {};
if (ratio <= 0) {
LOG_ERROR(Audio, "Nonsensical interpolation ratio {}", ratio);
return input;
}
const s32 step{static_cast<s32>(ratio * 0x8000)};
const std::array<s16, 512>& lut = [step] {
if (step > 0xaaaa) {
return curve_lut0;
}
if (step <= 0x8000) {
return curve_lut1;
}
return curve_lut2;
}();
const std::size_t num_frames{input.size() / 2};
std::vector<s16> output;
output.reserve(static_cast<std::size_t>(static_cast<double>(input.size()) / ratio +
InterpolationState::taps));
for (std::size_t frame{}; frame < num_frames; ++frame) {
const std::size_t lut_index{(state.fraction >> 8) * InterpolationState::taps};
std::rotate(state.history.begin(), state.history.end() - 1, state.history.end());
state.history[0][0] = input[frame * 2 + 0];
state.history[0][1] = input[frame * 2 + 1];
while (state.position <= 1.0) {
const s32 left{state.history[0][0] * lut[lut_index + 0] +
state.history[1][0] * lut[lut_index + 1] +
state.history[2][0] * lut[lut_index + 2] +
state.history[3][0] * lut[lut_index + 3]};
const s32 right{state.history[0][1] * lut[lut_index + 0] +
state.history[1][1] * lut[lut_index + 1] +
state.history[2][1] * lut[lut_index + 2] +
state.history[3][1] * lut[lut_index + 3]};
const s32 new_offset{state.fraction + step};
state.fraction = new_offset & 0x7fff;
output.emplace_back(static_cast<s16>(std::clamp(left >> 15, SHRT_MIN, SHRT_MAX)));
output.emplace_back(static_cast<s16>(std::clamp(right >> 15, SHRT_MIN, SHRT_MAX)));
state.position += ratio;
}
state.position -= 1.0;
}
return output;
}
void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count) {
const std::array<s16, 512>& lut = [pitch] {
if (pitch > 0xaaaa) {
return curve_lut0;
}
if (pitch <= 0x8000) {
return curve_lut1;
}
return curve_lut2;
}();
std::size_t index{};
for (std::size_t i = 0; i < sample_count; i++) {
const std::size_t lut_index{(static_cast<std::size_t>(fraction) >> 8) * 4};
const auto l0 = lut[lut_index + 0];
const auto l1 = lut[lut_index + 1];
const auto l2 = lut[lut_index + 2];
const auto l3 = lut[lut_index + 3];
const auto s0 = static_cast<s32>(input[index + 0]);
const auto s1 = static_cast<s32>(input[index + 1]);
const auto s2 = static_cast<s32>(input[index + 2]);
const auto s3 = static_cast<s32>(input[index + 3]);
output[i] = (l0 * s0 + l1 * s1 + l2 * s2 + l3 * s3) >> 15;
fraction += pitch;
index += (fraction >> 15);
fraction &= 0x7fff;
}
}
} // namespace AudioCore

View File

@@ -1,43 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
struct InterpolationState {
static constexpr std::size_t taps{4};
static constexpr std::size_t history_size{taps * 2 - 1};
std::array<std::array<s16, 2>, history_size> history{};
double position{};
s32 fraction{};
};
/// Interpolates input signal to produce output signal.
/// @param input The signal to interpolate.
/// @param ratio Interpolation ratio.
/// ratio > 1.0 results in fewer output samples.
/// ratio < 1.0 results in more output samples.
/// @returns Output signal.
std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio);
/// Interpolates input signal to produce output signal.
/// @param input The signal to interpolate.
/// @param input_rate The sample rate of input.
/// @param output_rate The desired sample rate of the output.
/// @returns Output signal.
inline std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
u32 input_rate, u32 output_rate) {
const double ratio = static_cast<double>(input_rate) / static_cast<double>(output_rate);
return Interpolate(state, std::move(input), ratio);
}
/// Nintendo Switchs DSP resampling algorithm. Based on a single channel
void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count);
} // namespace AudioCore

View File

@@ -1,62 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_out.h"
#include "audio_core/sink.h"
#include "audio_core/sink_details.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/settings.h"
namespace AudioCore {
/// Returns the stream format from the specified number of channels
static Stream::Format ChannelsToStreamFormat(u32 num_channels) {
switch (num_channels) {
case 1:
return Stream::Format::Mono16;
case 2:
return Stream::Format::Stereo16;
case 6:
return Stream::Format::Multi51Channel16;
}
UNIMPLEMENTED_MSG("Unimplemented num_channels={}", num_channels);
return {};
}
StreamPtr AudioOut::OpenStream(Core::Timing::CoreTiming& core_timing, u32 sample_rate,
u32 num_channels, std::string&& name,
Stream::ReleaseCallback&& release_callback) {
if (!sink) {
sink = CreateSinkFromID(Settings::values.sink_id.GetValue(),
Settings::values.audio_device_id.GetValue());
}
return std::make_shared<Stream>(
core_timing, sample_rate, ChannelsToStreamFormat(num_channels), std::move(release_callback),
sink->AcquireSinkStream(sample_rate, num_channels, name), std::move(name));
}
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream,
std::size_t max_count) {
return stream->GetTagsAndReleaseBuffers(max_count);
}
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream) {
return stream->GetTagsAndReleaseBuffers();
}
void AudioOut::StartStream(StreamPtr stream) {
stream->Play();
}
void AudioOut::StopStream(StreamPtr stream) {
stream->Stop();
}
bool AudioOut::QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<s16>&& data) {
return stream->QueueBuffer(std::make_shared<Buffer>(tag, std::move(data)));
}
} // namespace AudioCore

View File

@@ -1,49 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "audio_core/buffer.h"
#include "audio_core/sink.h"
#include "audio_core/stream.h"
#include "common/common_types.h"
namespace Core::Timing {
class CoreTiming;
}
namespace AudioCore {
/**
* Represents an audio playback interface, used to open and play audio streams
*/
class AudioOut {
public:
/// Opens a new audio stream
StreamPtr OpenStream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, u32 num_channels,
std::string&& name, Stream::ReleaseCallback&& release_callback);
/// Returns a vector of recently released buffers specified by tag for the specified stream
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream, std::size_t max_count);
/// Returns a vector of all recently released buffers specified by tag for the specified stream
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream);
/// Starts an audio stream for playback
void StartStream(StreamPtr stream);
/// Stops an audio stream that is currently playing
void StopStream(StreamPtr stream);
/// Queues a buffer into the specified audio stream, returns true on success
bool QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<s16>&& data);
private:
SinkPtr sink;
};
} // namespace AudioCore

View File

@@ -1,339 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <limits>
#include <vector>
#include "audio_core/audio_out.h"
#include "audio_core/audio_renderer.h"
#include "audio_core/common.h"
#include "audio_core/info_updater.h"
#include "audio_core/voice_context.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "core/core_timing.h"
#include "core/memory.h"
namespace {
[[nodiscard]] static constexpr s16 ClampToS16(s32 value) {
return static_cast<s16>(std::clamp(value, s32{std::numeric_limits<s16>::min()},
s32{std::numeric_limits<s16>::max()}));
}
[[nodiscard]] static constexpr s16 Mix2To1(s16 l_channel, s16 r_channel) {
// Mix 50% from left and 50% from right channel
constexpr float l_mix_amount = 50.0f / 100.0f;
constexpr float r_mix_amount = 50.0f / 100.0f;
return ClampToS16(static_cast<s32>((static_cast<float>(l_channel) * l_mix_amount) +
(static_cast<float>(r_channel) * r_mix_amount)));
}
[[maybe_unused, nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2(
s16 fl_channel, s16 fr_channel, s16 fc_channel, [[maybe_unused]] s16 lf_channel, s16 bl_channel,
s16 br_channel) {
// Front channels are mixed 36.94%, Center channels are mixed to be 26.12% & the back channels
// are mixed to be 36.94%
constexpr float front_mix_amount = 36.94f / 100.0f;
constexpr float center_mix_amount = 26.12f / 100.0f;
constexpr float back_mix_amount = 36.94f / 100.0f;
// Mix 50% from left and 50% from right channel
const auto left = front_mix_amount * static_cast<float>(fl_channel) +
center_mix_amount * static_cast<float>(fc_channel) +
back_mix_amount * static_cast<float>(bl_channel);
const auto right = front_mix_amount * static_cast<float>(fr_channel) +
center_mix_amount * static_cast<float>(fc_channel) +
back_mix_amount * static_cast<float>(br_channel);
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
}
[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2WithCoefficients(
s16 fl_channel, s16 fr_channel, s16 fc_channel, s16 lf_channel, s16 bl_channel, s16 br_channel,
const std::array<float_le, 4>& coeff) {
const auto left =
static_cast<float>(fl_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(bl_channel) * coeff[3];
const auto right =
static_cast<float>(fr_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(br_channel) * coeff[3];
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
}
} // namespace
namespace AudioCore {
constexpr s32 NUM_BUFFERS = 2;
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_,
AudioCommon::AudioRendererParameter params,
Stream::ReleaseCallback&& release_callback,
std::size_t instance_number)
: worker_params{params}, memory_pool_info(params.effect_count + params.voice_count * 4),
voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
sink_context(params.sink_count), splitter_context(),
voices(params.voice_count), memory{memory_},
command_generator(worker_params, voice_context, mix_context, splitter_context, effect_context,
memory),
core_timing{core_timing_} {
behavior_info.SetUserRevision(params.revision);
splitter_context.Initialize(behavior_info, params.splitter_count,
params.num_splitter_send_channels);
mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
audio_out = std::make_unique<AudioCore::AudioOut>();
stream = audio_out->OpenStream(
core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
fmt::format("AudioRenderer-Instance{}", instance_number), std::move(release_callback));
process_event = Core::Timing::CreateEvent(
fmt::format("AudioRenderer-Instance{}-Process", instance_number),
[this](std::uintptr_t, std::chrono::nanoseconds) { ReleaseAndQueueBuffers(); });
for (s32 i = 0; i < NUM_BUFFERS; ++i) {
QueueMixedBuffer(i);
}
}
AudioRenderer::~AudioRenderer() = default;
Result AudioRenderer::Start() {
audio_out->StartStream(stream);
ReleaseAndQueueBuffers();
return ResultSuccess;
}
Result AudioRenderer::Stop() {
audio_out->StopStream(stream);
return ResultSuccess;
}
u32 AudioRenderer::GetSampleRate() const {
return worker_params.sample_rate;
}
u32 AudioRenderer::GetSampleCount() const {
return worker_params.sample_count;
}
u32 AudioRenderer::GetMixBufferCount() const {
return worker_params.mix_buffer_count;
}
Stream::State AudioRenderer::GetStreamState() const {
return stream->GetState();
}
Result AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
std::vector<u8>& output_params) {
std::scoped_lock lock{mutex};
InfoUpdater info_updater{input_params, output_params, behavior_info};
if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update behavior info input parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
LOG_ERROR(Audio, "Failed to update memory pool parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
LOG_ERROR(Audio, "Failed to update voice parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Deal with stopped audio renderer but updates still taking place
if (!info_updater.UpdateEffects(effect_context, true)) {
LOG_ERROR(Audio, "Failed to update effect parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsSplitterSupported()) {
if (!info_updater.UpdateSplitterInfo(splitter_context)) {
LOG_ERROR(Audio, "Failed to update splitter parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
const auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
splitter_context, effect_context);
if (mix_result.IsError()) {
LOG_ERROR(Audio, "Failed to update mix parameters");
return mix_result;
}
// TODO(ogniK): Sinks
if (!info_updater.UpdateSinks(sink_context)) {
LOG_ERROR(Audio, "Failed to update sink parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Performance buffer
if (!info_updater.UpdatePerformanceBuffer()) {
LOG_ERROR(Audio, "Failed to update performance buffer parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateErrorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update error info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsElapsedFrameCountSupported()) {
if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
LOG_ERROR(Audio, "Failed to update renderer info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
// TODO(ogniK): Statistics
if (!info_updater.WriteOutputHeader()) {
LOG_ERROR(Audio, "Failed to write output header");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Check when all sections are implemented
if (!info_updater.CheckConsumedSize()) {
LOG_ERROR(Audio, "Audio buffers were not consumed!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
return ResultSuccess;
}
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
command_generator.PreCommand();
// Clear mix buffers before our next operation
command_generator.ClearMixBuffers();
// If the splitter is not in use, sort our mixes
if (!splitter_context.UsingSplitter()) {
mix_context.SortInfo();
}
// Sort our voices
voice_context.SortInfo();
// Handle samples
command_generator.GenerateVoiceCommands();
command_generator.GenerateSubMixCommands();
command_generator.GenerateFinalMixCommands();
command_generator.PostCommand();
// Base sample size
std::size_t BUFFER_SIZE{worker_params.sample_count};
// Samples, making sure to clear
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels(), 0);
if (sink_context.InUse()) {
const auto stream_channel_count = stream->GetNumChannels();
const auto buffer_offsets = sink_context.OutputBuffers();
const auto channel_count = buffer_offsets.size();
const auto& final_mix = mix_context.GetFinalMixInfo();
const auto& in_params = final_mix.GetInParams();
std::vector<std::span<s32>> mix_buffers(channel_count);
for (std::size_t i = 0; i < channel_count; i++) {
mix_buffers[i] =
command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
}
for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
if (channel_count == 1) {
const auto sample = ClampToS16(mix_buffers[0][i]);
// Place sample in all channels
for (u32 channel = 0; channel < stream_channel_count; channel++) {
buffer[i * stream_channel_count + channel] = sample;
}
if (stream_channel_count == 6) {
// Output stream has a LF channel, mute it!
buffer[i * stream_channel_count + 3] = 0;
}
} else if (channel_count == 2) {
const auto l_sample = ClampToS16(mix_buffers[0][i]);
const auto r_sample = ClampToS16(mix_buffers[1][i]);
if (stream_channel_count == 1) {
buffer[i * stream_channel_count + 0] = Mix2To1(l_sample, r_sample);
} else if (stream_channel_count == 2) {
buffer[i * stream_channel_count + 0] = l_sample;
buffer[i * stream_channel_count + 1] = r_sample;
} else if (stream_channel_count == 6) {
buffer[i * stream_channel_count + 0] = l_sample;
buffer[i * stream_channel_count + 1] = r_sample;
// Combine both left and right channels to the center channel
buffer[i * stream_channel_count + 2] = Mix2To1(l_sample, r_sample);
buffer[i * stream_channel_count + 4] = l_sample;
buffer[i * stream_channel_count + 5] = r_sample;
}
} else if (channel_count == 6) {
const auto fl_sample = ClampToS16(mix_buffers[0][i]);
const auto fr_sample = ClampToS16(mix_buffers[1][i]);
const auto fc_sample = ClampToS16(mix_buffers[2][i]);
const auto lf_sample = ClampToS16(mix_buffers[3][i]);
const auto bl_sample = ClampToS16(mix_buffers[4][i]);
const auto br_sample = ClampToS16(mix_buffers[5][i]);
if (stream_channel_count == 1) {
// Games seem to ignore the center channel half the time, we use the front left
// and right channel for mixing as that's where majority of the audio goes
buffer[i * stream_channel_count + 0] = Mix2To1(fl_sample, fr_sample);
} else if (stream_channel_count == 2) {
// Mix all channels into 2 channels
const auto [left, right] = Mix6To2WithCoefficients(
fl_sample, fr_sample, fc_sample, lf_sample, bl_sample, br_sample,
sink_context.GetDownmixCoefficients());
buffer[i * stream_channel_count + 0] = left;
buffer[i * stream_channel_count + 1] = right;
} else if (stream_channel_count == 6) {
// Pass through
buffer[i * stream_channel_count + 0] = fl_sample;
buffer[i * stream_channel_count + 1] = fr_sample;
buffer[i * stream_channel_count + 2] = fc_sample;
buffer[i * stream_channel_count + 3] = lf_sample;
buffer[i * stream_channel_count + 4] = bl_sample;
buffer[i * stream_channel_count + 5] = br_sample;
}
}
}
}
audio_out->QueueBuffer(stream, tag, std::move(buffer));
elapsed_frame_count++;
voice_context.UpdateStateByDspShared();
}
void AudioRenderer::ReleaseAndQueueBuffers() {
if (!stream->IsPlaying()) {
return;
}
{
std::scoped_lock lock{mutex};
const auto released_buffers{audio_out->GetTagsAndReleaseBuffers(stream)};
for (const auto& tag : released_buffers) {
QueueMixedBuffer(tag);
}
}
const f32 sample_rate = static_cast<f32>(GetSampleRate());
const f32 sample_count = static_cast<f32>(GetSampleCount());
const f32 consume_rate = sample_rate / (sample_count * (sample_count / 240));
const s32 ms = (1000 / static_cast<s32>(consume_rate)) - 1;
const std::chrono::milliseconds next_event_time(std::max(ms / NUM_BUFFERS, 1));
core_timing.ScheduleEvent(next_event_time, process_event, {});
}
} // namespace AudioCore

View File

@@ -1,78 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <memory>
#include <mutex>
#include <vector>
#include "audio_core/behavior_info.h"
#include "audio_core/command_generator.h"
#include "audio_core/common.h"
#include "audio_core/effect_context.h"
#include "audio_core/memory_pool.h"
#include "audio_core/mix_context.h"
#include "audio_core/sink_context.h"
#include "audio_core/splitter_context.h"
#include "audio_core/stream.h"
#include "audio_core/voice_context.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/result.h"
namespace Core::Timing {
class CoreTiming;
}
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
using DSPStateHolder = std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>;
class AudioOut;
class AudioRenderer {
public:
AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
AudioCommon::AudioRendererParameter params,
Stream::ReleaseCallback&& release_callback, std::size_t instance_number);
~AudioRenderer();
[[nodiscard]] Result UpdateAudioRenderer(const std::vector<u8>& input_params,
std::vector<u8>& output_params);
[[nodiscard]] Result Start();
[[nodiscard]] Result Stop();
void QueueMixedBuffer(Buffer::Tag tag);
void ReleaseAndQueueBuffers();
[[nodiscard]] u32 GetSampleRate() const;
[[nodiscard]] u32 GetSampleCount() const;
[[nodiscard]] u32 GetMixBufferCount() const;
[[nodiscard]] Stream::State GetStreamState() const;
private:
BehaviorInfo behavior_info{};
AudioCommon::AudioRendererParameter worker_params;
std::vector<ServerMemoryPoolInfo> memory_pool_info;
VoiceContext voice_context;
EffectContext effect_context;
MixContext mix_context;
SinkContext sink_context;
SplitterContext splitter_context;
std::vector<VoiceState> voices;
std::unique_ptr<AudioOut> audio_out;
StreamPtr stream;
Core::Memory::Memory& memory;
CommandGenerator command_generator;
std::size_t elapsed_frame_count{};
Core::Timing::CoreTiming& core_timing;
std::shared_ptr<Core::Timing::EventType> process_event;
std::mutex mutex;
};
} // namespace AudioCore

View File

@@ -1,104 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <cstring>
#include "audio_core/behavior_info.h"
#include "audio_core/common.h"
#include "common/logging/log.h"
namespace AudioCore {
BehaviorInfo::BehaviorInfo() : process_revision(AudioCommon::CURRENT_PROCESS_REVISION) {}
BehaviorInfo::~BehaviorInfo() = default;
bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) {
if (!AudioCommon::CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
OutParams params{};
std::memcpy(params.errors.data(), errors.data(), sizeof(ErrorInfo) * errors.size());
params.error_count = static_cast<u32_le>(error_count);
std::memcpy(buffer.data() + offset, &params, sizeof(OutParams));
return true;
}
void BehaviorInfo::ClearError() {
error_count = 0;
}
void BehaviorInfo::UpdateFlags(u64_le dest_flags) {
flags = dest_flags;
}
void BehaviorInfo::SetUserRevision(u32_le revision) {
user_revision = revision;
}
u32_le BehaviorInfo::GetUserRevision() const {
return user_revision;
}
u32_le BehaviorInfo::GetProcessRevision() const {
return process_revision;
}
bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsSplitterSupported() const {
return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsLongSizePreDelaySupported() const {
return AudioCommon::IsRevisionSupported(3, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit80PercentSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit75PercentSupported() const {
return AudioCommon::IsRevisionSupported(4, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit70PercentSupported() const {
return AudioCommon::IsRevisionSupported(1, user_revision);
}
bool BehaviorInfo::IsElapsedFrameCountSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const {
return (flags & 1) != 0;
}
bool BehaviorInfo::IsFlushVoiceWaveBuffersSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsVoicePlayedSampleCountResetAtLoopPointSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsVoicePitchAndSrcSkippedSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsMixInParameterDirtyOnlyUpdateSupported() const {
return AudioCommon::IsRevisionSupported(7, user_revision);
}
bool BehaviorInfo::IsSplitterBugFixed() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
void BehaviorInfo::CopyErrorInfo(BehaviorInfo::OutParams& dst) {
dst.error_count = static_cast<u32>(error_count);
std::copy(errors.begin(), errors.begin() + error_count, dst.errors.begin());
}
} // namespace AudioCore

View File

@@ -1,71 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class BehaviorInfo {
public:
struct ErrorInfo {
u32_le result{};
INSERT_PADDING_WORDS(1);
u64_le result_info{};
};
static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
struct InParams {
u32_le revision{};
u32_le padding{};
u64_le flags{};
};
static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
struct OutParams {
std::array<ErrorInfo, 10> errors{};
u32_le error_count{};
INSERT_PADDING_BYTES(12);
};
static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
explicit BehaviorInfo();
~BehaviorInfo();
bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset);
void ClearError();
void UpdateFlags(u64_le dest_flags);
void SetUserRevision(u32_le revision);
[[nodiscard]] u32_le GetUserRevision() const;
[[nodiscard]] u32_le GetProcessRevision() const;
[[nodiscard]] bool IsAdpcmLoopContextBugFixed() const;
[[nodiscard]] bool IsSplitterSupported() const;
[[nodiscard]] bool IsLongSizePreDelaySupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit80PercentSupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit75PercentSupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit70PercentSupported() const;
[[nodiscard]] bool IsElapsedFrameCountSupported() const;
[[nodiscard]] bool IsMemoryPoolForceMappingEnabled() const;
[[nodiscard]] bool IsFlushVoiceWaveBuffersSupported() const;
[[nodiscard]] bool IsVoicePlayedSampleCountResetAtLoopPointSupported() const;
[[nodiscard]] bool IsVoicePitchAndSrcSkippedSupported() const;
[[nodiscard]] bool IsMixInParameterDirtyOnlyUpdateSupported() const;
[[nodiscard]] bool IsSplitterBugFixed() const;
void CopyErrorInfo(OutParams& dst);
private:
u32_le process_revision{};
u32_le user_revision{};
u64_le flags{};
std::array<ErrorInfo, 10> errors{};
std::size_t error_count{};
};
} // namespace AudioCore

View File

@@ -1,44 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/**
* Represents a buffer of audio samples to be played in an audio stream
*/
class Buffer {
public:
using Tag = u64;
Buffer(Tag tag_, std::vector<s16>&& samples_) : tag{tag_}, samples{std::move(samples_)} {}
/// Returns the raw audio data for the buffer
std::vector<s16>& GetSamples() {
return samples;
}
/// Returns the raw audio data for the buffer
const std::vector<s16>& GetSamples() const {
return samples;
}
/// Returns the buffer tag, this is provided by the game to the audout service
Tag GetTag() const {
return tag;
}
private:
Tag tag;
std::vector<s16> samples;
};
using BufferPtr = std::shared_ptr<Buffer>;
} // namespace AudioCore

View File

@@ -1,77 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "audio_core/codec.h"
namespace AudioCore::Codec {
std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM_Coeff& coeff,
ADPCMState& state) {
// GC-ADPCM with scale factor and variable coefficients.
// Frames are 8 bytes long containing 14 samples each.
// Samples are 4 bits (one nibble) long.
constexpr std::size_t FRAME_LEN = 8;
constexpr std::size_t SAMPLES_PER_FRAME = 14;
static constexpr std::array<int, 16> SIGNED_NIBBLES{
0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1,
};
const std::size_t sample_count = (size / FRAME_LEN) * SAMPLES_PER_FRAME;
const std::size_t ret_size =
sample_count % 2 == 0 ? sample_count : sample_count + 1; // Ensure multiple of two.
std::vector<s16> ret(ret_size);
int yn1 = state.yn1, yn2 = state.yn2;
const std::size_t NUM_FRAMES =
(sample_count + (SAMPLES_PER_FRAME - 1)) / SAMPLES_PER_FRAME; // Round up.
for (std::size_t framei = 0; framei < NUM_FRAMES; framei++) {
const int frame_header = data[framei * FRAME_LEN];
const int scale = 1 << (frame_header & 0xF);
const int idx = (frame_header >> 4) & 0x7;
// Coefficients are fixed point with 11 bits fractional part.
const int coef1 = coeff[idx * 2 + 0];
const int coef2 = coeff[idx * 2 + 1];
// Decodes an audio sample. One nibble produces one sample.
const auto decode_sample = [&](const int nibble) -> s16 {
const int xn = nibble * scale;
// We first transform everything into 11 bit fixed point, perform the second order
// digital filter, then transform back.
// 0x400 == 0.5 in 11 bit fixed point.
// Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2]
int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11;
// Clamp to output range.
val = std::clamp<s32>(val, -32768, 32767);
// Advance output feedback.
yn2 = yn1;
yn1 = val;
return static_cast<s16>(val);
};
std::size_t outputi = framei * SAMPLES_PER_FRAME;
std::size_t datai = framei * FRAME_LEN + 1;
for (std::size_t i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
const s16 sample1 = decode_sample(SIGNED_NIBBLES[data[datai] >> 4]);
ret[outputi] = sample1;
outputi++;
const s16 sample2 = decode_sample(SIGNED_NIBBLES[data[datai] & 0xF]);
ret[outputi] = sample2;
outputi++;
datai++;
}
}
state.yn1 = static_cast<s16>(yn1);
state.yn2 = static_cast<s16>(yn2);
return ret;
}
} // namespace AudioCore::Codec

View File

@@ -1,43 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore::Codec {
enum class PcmFormat : u32 {
Invalid = 0,
Int8 = 1,
Int16 = 2,
Int24 = 3,
Int32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
/// See: Codec::DecodeADPCM
struct ADPCMState {
// Two historical samples from previous processed buffer,
// required for ADPCM decoding
s16 yn1; ///< y[n-1]
s16 yn2; ///< y[n-2]
};
using ADPCM_Coeff = std::array<s16, 16>;
/**
* @param data Pointer to buffer that contains ADPCM data to decode
* @param size Size of buffer in bytes
* @param coeff ADPCM coefficients
* @param state ADPCM state, this is updated with new state
* @return Decoded stereo signed PCM16 data, sample_count in length
*/
std::vector<s16> DecodeADPCM(const u8* data, std::size_t size, const ADPCM_Coeff& coeff,
ADPCMState& state);
}; // namespace AudioCore::Codec

File diff suppressed because it is too large Load Diff

View File

@@ -1,110 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <span>
#include "audio_core/common.h"
#include "audio_core/voice_context.h"
#include "common/common_types.h"
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
class MixContext;
class SplitterContext;
class ServerSplitterDestinationData;
class ServerMixInfo;
class EffectContext;
class EffectBase;
struct AuxInfoDSP;
struct I3dl2ReverbParams;
struct I3dl2ReverbState;
using MixVolumeBuffer = std::array<float, AudioCommon::MAX_MIX_BUFFERS>;
class CommandGenerator {
public:
explicit CommandGenerator(AudioCommon::AudioRendererParameter& worker_params_,
VoiceContext& voice_context_, MixContext& mix_context_,
SplitterContext& splitter_context_, EffectContext& effect_context_,
Core::Memory::Memory& memory_);
~CommandGenerator();
void ClearMixBuffers();
void GenerateVoiceCommands();
void GenerateVoiceCommand(ServerVoiceInfo& voice_info);
void GenerateSubMixCommands();
void GenerateFinalMixCommands();
void PreCommand();
void PostCommand();
[[nodiscard]] std::span<s32> GetChannelMixBuffer(s32 channel);
[[nodiscard]] std::span<const s32> GetChannelMixBuffer(s32 channel) const;
[[nodiscard]] std::span<s32> GetMixBuffer(std::size_t index);
[[nodiscard]] std::span<const s32> GetMixBuffer(std::size_t index) const;
[[nodiscard]] std::size_t GetMixChannelBufferOffset(s32 channel) const;
[[nodiscard]] std::size_t GetTotalMixBufferCount() const;
private:
void GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 channel);
void GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 mix_buffer_count, s32 channel);
void GenerateVolumeRampCommand(float last_volume, float current_volume, s32 channel,
s32 node_id);
void GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
const MixVolumeBuffer& last_mix_volumes, VoiceState& dsp_state,
s32 mix_buffer_offset, s32 mix_buffer_count, s32 voice_index,
s32 node_id);
void GenerateSubMixCommand(ServerMixInfo& mix_info);
void GenerateMixCommands(ServerMixInfo& mix_info);
void GenerateMixCommand(std::size_t output_offset, std::size_t input_offset, float volume,
s32 node_id);
void GenerateFinalMixCommand();
void GenerateBiquadFilterCommand(s32 mix_buffer, const BiquadFilterParameter& params,
std::array<s64, 2>& state, std::size_t input_offset,
std::size_t output_offset, s32 sample_count, s32 node_id);
void GenerateDepopPrepareCommand(VoiceState& dsp_state, std::size_t mix_buffer_count,
std::size_t mix_buffer_offset);
void GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_count,
std::size_t mix_buffer_offset, s32 sample_rate);
void GenerateEffectCommand(ServerMixInfo& mix_info);
void GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
void GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
void GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
[[nodiscard]] ServerSplitterDestinationData* GetDestinationData(s32 splitter_id, s32 index);
s32 WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples,
std::span<const s32> data, u32 sample_count, u32 write_offset,
u32 write_count);
s32 ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples,
std::span<s32> out_data, u32 sample_count, u32 read_offset, u32 read_count);
void InitializeI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state,
std::vector<u8>& work_buffer);
void UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state, bool should_clear);
// DSP Code
template <typename T>
s32 DecodePcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_start_offset,
s32 sample_end_offset, s32 sample_count, s32 channel, std::size_t mix_offset);
s32 DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_start_offset,
s32 sample_end_offset, s32 sample_count, s32 channel, std::size_t mix_offset);
void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::span<s32> output,
VoiceState& dsp_state, s32 channel, s32 target_sample_rate,
s32 sample_count, s32 node_id);
AudioCommon::AudioRendererParameter& worker_params;
VoiceContext& voice_context;
MixContext& mix_context;
SplitterContext& splitter_context;
EffectContext& effect_context;
Core::Memory::Memory& memory;
std::vector<s32> mix_buffer{};
std::vector<s32> sample_buffer{};
std::vector<s32> depop_buffer{};
bool dumping_frame{false};
};
} // namespace AudioCore

View File

@@ -1,132 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/result.h"
namespace AudioCommon {
namespace Audren {
constexpr Result ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
constexpr Result ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
} // namespace Audren
constexpr u8 BASE_REVISION = '0';
constexpr u32_le CURRENT_PROCESS_REVISION =
Common::MakeMagic('R', 'E', 'V', static_cast<u8>(BASE_REVISION + 0xA));
constexpr std::size_t MAX_MIX_BUFFERS = 24;
constexpr std::size_t MAX_BIQUAD_FILTERS = 2;
constexpr std::size_t MAX_CHANNEL_COUNT = 6;
constexpr std::size_t MAX_WAVE_BUFFERS = 4;
constexpr std::size_t MAX_SAMPLE_HISTORY = 4;
constexpr u32 STREAM_SAMPLE_RATE = 48000;
constexpr u32 STREAM_NUM_CHANNELS = 2;
constexpr s32 NO_SPLITTER = -1;
constexpr s32 NO_MIX = 0x7fffffff;
constexpr s32 NO_FINAL_MIX = std::numeric_limits<s32>::min();
constexpr s32 FINAL_MIX = 0;
constexpr s32 NO_EFFECT_ORDER = -1;
constexpr std::size_t TEMP_MIX_BASE_SIZE = 0x3f00; // TODO(ogniK): Work out this constant
// Any size checks seem to take the sample history into account
// and our const ends up being 0x3f04, the 4 bytes are most
// likely the sample history
constexpr std::size_t TOTAL_TEMP_MIX_SIZE = TEMP_MIX_BASE_SIZE + AudioCommon::MAX_SAMPLE_HISTORY;
constexpr f32 I3DL2REVERB_MAX_LEVEL = 5000.0f;
constexpr f32 I3DL2REVERB_MIN_REFLECTION_DURATION = 0.02f;
constexpr std::size_t I3DL2REVERB_TAPS = 20;
constexpr std::size_t I3DL2REVERB_DELAY_LINE_COUNT = 4;
using Fractional = s32;
template <typename T>
constexpr Fractional ToFractional(T x) {
return static_cast<Fractional>(x * static_cast<T>(0x4000));
}
constexpr Fractional MultiplyFractional(Fractional lhs, Fractional rhs) {
return static_cast<Fractional>(static_cast<s64>(lhs) * rhs >> 14);
}
constexpr s32 FractionalToFixed(Fractional x) {
const auto s = x & (1 << 13);
return static_cast<s32>(x >> 14) + s;
}
constexpr s32 CalculateDelaySamples(s32 sample_rate_khz, float time) {
return FractionalToFixed(MultiplyFractional(ToFractional(sample_rate_khz), ToFractional(time)));
}
static constexpr u32 VersionFromRevision(u32_le rev) {
// "REV7" -> 7
return ((rev >> 24) & 0xff) - 0x30;
}
static constexpr bool IsRevisionSupported(u32 required, u32_le user_revision) {
const auto base = VersionFromRevision(user_revision);
return required <= base;
}
static constexpr bool IsValidRevision(u32_le revision) {
const auto base = VersionFromRevision(revision);
constexpr auto max_rev = VersionFromRevision(CURRENT_PROCESS_REVISION);
return base <= max_rev;
}
static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std::size_t required) {
if (offset > size) {
return false;
}
if (size < required) {
return false;
}
if ((size - offset) < required) {
return false;
}
return true;
}
struct UpdateDataSizes {
u32_le behavior{};
u32_le memory_pool{};
u32_le voice{};
u32_le voice_channel_resource{};
u32_le effect{};
u32_le mixer{};
u32_le sink{};
u32_le performance{};
u32_le splitter{};
u32_le render_info{};
INSERT_PADDING_WORDS(4);
};
static_assert(sizeof(UpdateDataSizes) == 0x38, "UpdateDataSizes is an invalid size");
struct UpdateDataHeader {
u32_le revision{};
UpdateDataSizes size{};
u32_le total_size{};
};
static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader is an invalid size");
struct AudioRendererParameter {
u32_le sample_rate;
u32_le sample_count;
u32_le mix_buffer_count;
u32_le submix_count;
u32_le voice_count;
u32_le sink_count;
u32_le effect_count;
u32_le performance_frame_count;
u8 is_voice_drop_enabled;
u8 unknown_21;
u8 unknown_22;
u8 execution_mode;
u32_le splitter_count;
u32_le num_splitter_send_channels;
u32_le unknown_30;
u32_le revision;
};
static_assert(sizeof(AudioRendererParameter) == 52, "AudioRendererParameter is an invalid size");
} // namespace AudioCommon

View File

@@ -1,249 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <atomic>
#include <cstring>
#include "audio_core/cubeb_sink.h"
#include "audio_core/stream.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/ring_buffer.h"
#include "common/settings.h"
#ifdef _WIN32
#include <objbase.h>
#endif
namespace AudioCore {
class CubebSinkStream final : public SinkStream {
public:
CubebSinkStream(cubeb* ctx_, u32 sample_rate, u32 num_channels_, cubeb_devid output_device,
const std::string& name)
: ctx{ctx_}, num_channels{std::min(num_channels_, 6u)} {
cubeb_stream_params params{};
params.rate = sample_rate;
params.channels = num_channels;
params.format = CUBEB_SAMPLE_S16NE;
params.prefs = CUBEB_STREAM_PREF_PERSIST;
switch (num_channels) {
case 1:
params.layout = CUBEB_LAYOUT_MONO;
break;
case 2:
params.layout = CUBEB_LAYOUT_STEREO;
break;
case 6:
params.layout = CUBEB_LAYOUT_3F2_LFE;
break;
}
u32 minimum_latency{};
if (cubeb_get_min_latency(ctx, &params, &minimum_latency) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error getting minimum latency");
}
if (cubeb_stream_init(ctx, &stream_backend, name.c_str(), nullptr, nullptr, output_device,
&params, std::max(512u, minimum_latency),
&CubebSinkStream::DataCallback, &CubebSinkStream::StateCallback,
this) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error initializing cubeb stream");
return;
}
if (cubeb_stream_start(stream_backend) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error starting cubeb stream");
return;
}
}
~CubebSinkStream() override {
if (!ctx) {
return;
}
if (cubeb_stream_stop(stream_backend) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error stopping cubeb stream");
}
cubeb_stream_destroy(stream_backend);
}
void EnqueueSamples(u32 source_num_channels, const std::vector<s16>& samples) override {
if (source_num_channels > num_channels) {
// Downsample 6 channels to 2
ASSERT_MSG(source_num_channels == 6, "Channel count must be 6");
std::vector<s16> buf;
buf.reserve(samples.size() * num_channels / source_num_channels);
for (std::size_t i = 0; i < samples.size(); i += source_num_channels) {
// Downmixing implementation taken from the ATSC standard
const s16 left{samples[i + 0]};
const s16 right{samples[i + 1]};
const s16 center{samples[i + 2]};
const s16 surround_left{samples[i + 4]};
const s16 surround_right{samples[i + 5]};
// Not used in the ATSC reference implementation
[[maybe_unused]] const s16 low_frequency_effects{samples[i + 3]};
constexpr s32 clev{707}; // center mixing level coefficient
constexpr s32 slev{707}; // surround mixing level coefficient
buf.push_back(static_cast<s16>(left + (clev * center / 1000) +
(slev * surround_left / 1000)));
buf.push_back(static_cast<s16>(right + (clev * center / 1000) +
(slev * surround_right / 1000)));
}
queue.Push(buf);
return;
}
queue.Push(samples);
}
std::size_t SamplesInQueue(u32 channel_count) const override {
if (!ctx)
return 0;
return queue.Size() / channel_count;
}
void Flush() override {
should_flush = true;
}
u32 GetNumChannels() const {
return num_channels;
}
private:
std::vector<std::string> device_list;
cubeb* ctx{};
cubeb_stream* stream_backend{};
u32 num_channels{};
Common::RingBuffer<s16, 0x10000> queue;
std::array<s16, 2> last_frame{};
std::atomic<bool> should_flush{};
static long DataCallback(cubeb_stream* stream, void* user_data, const void* input_buffer,
void* output_buffer, long num_frames);
static void StateCallback(cubeb_stream* stream, void* user_data, cubeb_state state);
};
CubebSink::CubebSink(std::string_view target_device_name) {
// Cubeb requires COM to be initialized on the thread calling cubeb_init on Windows
#ifdef _WIN32
com_init_result = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
#endif
if (cubeb_init(&ctx, "yuzu", nullptr) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
return;
}
if (target_device_name != auto_device_name && !target_device_name.empty()) {
cubeb_device_collection collection;
if (cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection) != CUBEB_OK) {
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
} else {
const auto collection_end{collection.device + collection.count};
const auto device{
std::find_if(collection.device, collection_end, [&](const cubeb_device_info& info) {
return info.friendly_name != nullptr &&
target_device_name == info.friendly_name;
})};
if (device != collection_end) {
output_device = device->devid;
}
cubeb_device_collection_destroy(ctx, &collection);
}
}
}
CubebSink::~CubebSink() {
if (!ctx) {
return;
}
for (auto& sink_stream : sink_streams) {
sink_stream.reset();
}
cubeb_destroy(ctx);
#ifdef _WIN32
if (SUCCEEDED(com_init_result)) {
CoUninitialize();
}
#endif
}
SinkStream& CubebSink::AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) {
sink_streams.push_back(
std::make_unique<CubebSinkStream>(ctx, sample_rate, num_channels, output_device, name));
return *sink_streams.back();
}
long CubebSinkStream::DataCallback([[maybe_unused]] cubeb_stream* stream, void* user_data,
[[maybe_unused]] const void* input_buffer, void* output_buffer,
long num_frames) {
auto* impl = static_cast<CubebSinkStream*>(user_data);
auto* buffer = static_cast<u8*>(output_buffer);
if (!impl) {
return {};
}
const std::size_t num_channels = impl->GetNumChannels();
const std::size_t samples_to_write = num_channels * num_frames;
const std::size_t samples_written = impl->queue.Pop(buffer, samples_to_write);
if (samples_written >= num_channels) {
std::memcpy(&impl->last_frame[0], buffer + (samples_written - num_channels) * sizeof(s16),
num_channels * sizeof(s16));
}
// Fill the rest of the frames with last_frame
for (std::size_t i = samples_written; i < samples_to_write; i += num_channels) {
std::memcpy(buffer + i * sizeof(s16), &impl->last_frame[0], num_channels * sizeof(s16));
}
return num_frames;
}
void CubebSinkStream::StateCallback([[maybe_unused]] cubeb_stream* stream,
[[maybe_unused]] void* user_data,
[[maybe_unused]] cubeb_state state) {}
std::vector<std::string> ListCubebSinkDevices() {
std::vector<std::string> device_list;
cubeb* ctx;
if (cubeb_init(&ctx, "yuzu Device Enumerator", nullptr) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
return {};
}
cubeb_device_collection collection;
if (cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection) != CUBEB_OK) {
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
} else {
for (std::size_t i = 0; i < collection.count; i++) {
const cubeb_device_info& device = collection.device[i];
if (device.friendly_name) {
device_list.emplace_back(device.friendly_name);
}
}
cubeb_device_collection_destroy(ctx, &collection);
}
cubeb_destroy(ctx);
return device_list;
}
} // namespace AudioCore

View File

@@ -1,35 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include <vector>
#include <cubeb/cubeb.h>
#include "audio_core/sink.h"
namespace AudioCore {
class CubebSink final : public Sink {
public:
explicit CubebSink(std::string_view device_id);
~CubebSink() override;
SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) override;
private:
cubeb* ctx{};
cubeb_devid output_device{};
std::vector<SinkStreamPtr> sink_streams;
#ifdef _WIN32
u32 com_init_result = 0;
#endif
};
std::vector<std::string> ListCubebSinkDevices();
} // namespace AudioCore

View File

@@ -1,107 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <cstring>
#include "audio_core/delay_line.h"
namespace AudioCore {
DelayLineBase::DelayLineBase() = default;
DelayLineBase::~DelayLineBase() = default;
void DelayLineBase::Initialize(s32 max_delay_, float* src_buffer) {
buffer = src_buffer;
buffer_end = buffer + max_delay_;
max_delay = max_delay_;
output = buffer;
SetDelay(max_delay_);
Clear();
}
void DelayLineBase::SetDelay(s32 new_delay) {
if (max_delay < new_delay) {
return;
}
delay = new_delay;
input = (buffer + ((output - buffer) + new_delay) % (max_delay + 1));
}
s32 DelayLineBase::GetDelay() const {
return delay;
}
s32 DelayLineBase::GetMaxDelay() const {
return max_delay;
}
f32 DelayLineBase::TapOut(s32 last_sample) {
const float* ptr = input - (last_sample + 1);
if (ptr < buffer) {
ptr += (max_delay + 1);
}
return *ptr;
}
f32 DelayLineBase::Tick(f32 sample) {
*(input++) = sample;
const auto out_sample = *(output++);
if (buffer_end < input) {
input = buffer;
}
if (buffer_end < output) {
output = buffer;
}
return out_sample;
}
float* DelayLineBase::GetInput() {
return input;
}
const float* DelayLineBase::GetInput() const {
return input;
}
f32 DelayLineBase::GetOutputSample() const {
return *output;
}
void DelayLineBase::Clear() {
std::memset(buffer, 0, sizeof(float) * max_delay);
}
void DelayLineBase::Reset() {
buffer = nullptr;
buffer_end = nullptr;
max_delay = 0;
input = nullptr;
output = nullptr;
delay = 0;
}
DelayLineAllPass::DelayLineAllPass() = default;
DelayLineAllPass::~DelayLineAllPass() = default;
void DelayLineAllPass::Initialize(u32 delay_, float coeffcient_, f32* src_buffer) {
DelayLineBase::Initialize(delay_, src_buffer);
SetCoefficient(coeffcient_);
}
void DelayLineAllPass::SetCoefficient(float coeffcient_) {
coefficient = coeffcient_;
}
f32 DelayLineAllPass::Tick(f32 sample) {
const auto temp = sample - coefficient * *output;
return coefficient * temp + DelayLineBase::Tick(temp);
}
void DelayLineAllPass::Reset() {
coefficient = 0.0f;
DelayLineBase::Reset();
}
} // namespace AudioCore

View File

@@ -1,49 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_types.h"
namespace AudioCore {
class DelayLineBase {
public:
DelayLineBase();
~DelayLineBase();
void Initialize(s32 max_delay_, float* src_buffer);
void SetDelay(s32 new_delay);
s32 GetDelay() const;
s32 GetMaxDelay() const;
f32 TapOut(s32 last_sample);
f32 Tick(f32 sample);
float* GetInput();
const float* GetInput() const;
f32 GetOutputSample() const;
void Clear();
void Reset();
protected:
float* buffer{nullptr};
float* buffer_end{nullptr};
s32 max_delay{};
float* input{nullptr};
float* output{nullptr};
s32 delay{};
};
class DelayLineAllPass final : public DelayLineBase {
public:
DelayLineAllPass();
~DelayLineAllPass();
void Initialize(u32 delay, float coeffcient_, f32* src_buffer);
void SetCoefficient(float coeffcient_);
f32 Tick(f32 sample);
void Reset();
private:
float coefficient{};
};
} // namespace AudioCore

View File

@@ -1,320 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "audio_core/effect_context.h"
namespace AudioCore {
namespace {
bool ValidChannelCountForEffect(s32 channel_count) {
return channel_count == 1 || channel_count == 2 || channel_count == 4 || channel_count == 6;
}
} // namespace
EffectContext::EffectContext(std::size_t effect_count_) : effect_count(effect_count_) {
effects.reserve(effect_count);
std::generate_n(std::back_inserter(effects), effect_count,
[] { return std::make_unique<EffectStubbed>(); });
}
EffectContext::~EffectContext() = default;
std::size_t EffectContext::GetCount() const {
return effect_count;
}
EffectBase* EffectContext::GetInfo(std::size_t i) {
return effects.at(i).get();
}
EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
switch (effect) {
case EffectType::Invalid:
effects[i] = std::make_unique<EffectStubbed>();
break;
case EffectType::BufferMixer:
effects[i] = std::make_unique<EffectBufferMixer>();
break;
case EffectType::Aux:
effects[i] = std::make_unique<EffectAuxInfo>();
break;
case EffectType::Delay:
effects[i] = std::make_unique<EffectDelay>();
break;
case EffectType::Reverb:
effects[i] = std::make_unique<EffectReverb>();
break;
case EffectType::I3dl2Reverb:
effects[i] = std::make_unique<EffectI3dl2Reverb>();
break;
case EffectType::BiquadFilter:
effects[i] = std::make_unique<EffectBiquadFilter>();
break;
default:
ASSERT_MSG(false, "Unimplemented effect {}", effect);
effects[i] = std::make_unique<EffectStubbed>();
}
return GetInfo(i);
}
const EffectBase* EffectContext::GetInfo(std::size_t i) const {
return effects.at(i).get();
}
EffectStubbed::EffectStubbed() : EffectBase(EffectType::Invalid) {}
EffectStubbed::~EffectStubbed() = default;
void EffectStubbed::Update([[maybe_unused]] EffectInfo::InParams& in_params) {}
void EffectStubbed::UpdateForCommandGeneration() {}
EffectBase::EffectBase(EffectType effect_type_) : effect_type(effect_type_) {}
EffectBase::~EffectBase() = default;
UsageState EffectBase::GetUsage() const {
return usage;
}
EffectType EffectBase::GetType() const {
return effect_type;
}
bool EffectBase::IsEnabled() const {
return enabled;
}
s32 EffectBase::GetMixID() const {
return mix_id;
}
s32 EffectBase::GetProcessingOrder() const {
return processing_order;
}
std::vector<u8>& EffectBase::GetWorkBuffer() {
return work_buffer;
}
const std::vector<u8>& EffectBase::GetWorkBuffer() const {
return work_buffer;
}
EffectI3dl2Reverb::EffectI3dl2Reverb() : EffectGeneric(EffectType::I3dl2Reverb) {}
EffectI3dl2Reverb::~EffectI3dl2Reverb() = default;
void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *reverb_params;
if (!ValidChannelCountForEffect(reverb_params->channel_count)) {
params.channel_count = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
if (!skipped) {
auto& cur_work_buffer = GetWorkBuffer();
// Has two buffers internally
cur_work_buffer.resize(in_params.buffer_size * 2);
std::fill(cur_work_buffer.begin(), cur_work_buffer.end(), 0);
}
}
}
void EffectI3dl2Reverb::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
I3dl2ReverbState& EffectI3dl2Reverb::GetState() {
return state;
}
const I3dl2ReverbState& EffectI3dl2Reverb::GetState() const {
return state;
}
EffectBiquadFilter::EffectBiquadFilter() : EffectGeneric(EffectType::BiquadFilter) {}
EffectBiquadFilter::~EffectBiquadFilter() = default;
void EffectBiquadFilter::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* biquad_params = reinterpret_cast<BiquadFilterParams*>(in_params.raw.data());
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *biquad_params;
enabled = in_params.is_enabled;
}
void EffectBiquadFilter::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
EffectAuxInfo::EffectAuxInfo() : EffectGeneric(EffectType::Aux) {}
EffectAuxInfo::~EffectAuxInfo() = default;
void EffectAuxInfo::Update(EffectInfo::InParams& in_params) {
const auto* aux_params = reinterpret_cast<AuxInfo*>(in_params.raw.data());
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
GetParams() = *aux_params;
enabled = in_params.is_enabled;
if (in_params.is_new || skipped) {
skipped = aux_params->send_buffer_info == 0 || aux_params->return_buffer_info == 0;
if (skipped) {
return;
}
// There's two AuxInfos which are an identical size, the first one is managed by the cpu,
// the second is managed by the dsp. All we care about is managing the DSP one
send_info = aux_params->send_buffer_info + sizeof(AuxInfoDSP);
send_buffer = aux_params->send_buffer_info + (sizeof(AuxInfoDSP) * 2);
recv_info = aux_params->return_buffer_info + sizeof(AuxInfoDSP);
recv_buffer = aux_params->return_buffer_info + (sizeof(AuxInfoDSP) * 2);
}
}
void EffectAuxInfo::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
}
VAddr EffectAuxInfo::GetSendInfo() const {
return send_info;
}
VAddr EffectAuxInfo::GetSendBuffer() const {
return send_buffer;
}
VAddr EffectAuxInfo::GetRecvInfo() const {
return recv_info;
}
VAddr EffectAuxInfo::GetRecvBuffer() const {
return recv_buffer;
}
EffectDelay::EffectDelay() : EffectGeneric(EffectType::Delay) {}
EffectDelay::~EffectDelay() = default;
void EffectDelay::Update(EffectInfo::InParams& in_params) {
const auto* delay_params = reinterpret_cast<DelayParams*>(in_params.raw.data());
auto& params = GetParams();
if (!ValidChannelCountForEffect(delay_params->max_channels)) {
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *delay_params;
if (!ValidChannelCountForEffect(delay_params->channels)) {
params.channels = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
}
}
void EffectDelay::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
EffectBufferMixer::EffectBufferMixer() : EffectGeneric(EffectType::BufferMixer) {}
EffectBufferMixer::~EffectBufferMixer() = default;
void EffectBufferMixer::Update(EffectInfo::InParams& in_params) {
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
GetParams() = *reinterpret_cast<BufferMixerParams*>(in_params.raw.data());
enabled = in_params.is_enabled;
}
void EffectBufferMixer::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
}
EffectReverb::EffectReverb() : EffectGeneric(EffectType::Reverb) {}
EffectReverb::~EffectReverb() = default;
void EffectReverb::Update(EffectInfo::InParams& in_params) {
const auto* reverb_params = reinterpret_cast<ReverbParams*>(in_params.raw.data());
auto& params = GetParams();
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *reverb_params;
if (!ValidChannelCountForEffect(reverb_params->channels)) {
params.channels = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
}
}
void EffectReverb::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
} // namespace AudioCore

View File

@@ -1,349 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <memory>
#include <vector>
#include "audio_core/common.h"
#include "audio_core/delay_line.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
enum class EffectType : u8 {
Invalid = 0,
BufferMixer = 1,
Aux = 2,
Delay = 3,
Reverb = 4,
I3dl2Reverb = 5,
BiquadFilter = 6,
};
enum class UsageStatus : u8 {
Invalid = 0,
New = 1,
Initialized = 2,
Used = 3,
Removed = 4,
};
enum class UsageState {
Invalid = 0,
Initialized = 1,
Running = 2,
Stopped = 3,
};
enum class ParameterStatus : u8 {
Initialized = 0,
Updating = 1,
Updated = 2,
};
struct BufferMixerParams {
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input{};
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> volume{};
s32_le count{};
};
static_assert(sizeof(BufferMixerParams) == 0x94, "BufferMixerParams is an invalid size");
struct AuxInfoDSP {
u32_le read_offset{};
u32_le write_offset{};
u32_le remaining{};
INSERT_PADDING_WORDS(13);
};
static_assert(sizeof(AuxInfoDSP) == 0x40, "AuxInfoDSP is an invalid size");
struct AuxInfo {
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input_mix_buffers{};
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output_mix_buffers{};
u32_le count{};
s32_le sample_rate{};
s32_le sample_count{};
s32_le mix_buffer_count{};
u64_le send_buffer_info{};
u64_le send_buffer_base{};
u64_le return_buffer_info{};
u64_le return_buffer_base{};
};
static_assert(sizeof(AuxInfo) == 0x60, "AuxInfo is an invalid size");
struct I3dl2ReverbParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channel_count{};
INSERT_PADDING_BYTES(1);
u32_le sample_rate{};
f32 room_hf{};
f32 hf_reference{};
f32 decay_time{};
f32 hf_decay_ratio{};
f32 room{};
f32 reflection{};
f32 reverb{};
f32 diffusion{};
f32 reflection_delay{};
f32 reverb_delay{};
f32 density{};
f32 dry_gain{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(I3dl2ReverbParams) == 0x4c, "I3dl2ReverbParams is an invalid size");
struct BiquadFilterParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
std::array<s16_le, 3> numerator;
std::array<s16_le, 2> denominator;
s8 channel_count{};
ParameterStatus status{};
};
static_assert(sizeof(BiquadFilterParams) == 0x18, "BiquadFilterParams is an invalid size");
struct DelayParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channels{};
s32_le max_delay{};
s32_le delay{};
s32_le sample_rate{};
s32_le gain{};
s32_le feedback_gain{};
s32_le out_gain{};
s32_le dry_gain{};
s32_le channel_spread{};
s32_le low_pass{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(DelayParams) == 0x38, "DelayParams is an invalid size");
struct ReverbParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channels{};
s32_le sample_rate{};
s32_le mode0{};
s32_le mode0_gain{};
s32_le pre_delay{};
s32_le mode1{};
s32_le mode1_gain{};
s32_le decay{};
s32_le hf_decay_ratio{};
s32_le coloration{};
s32_le reverb_gain{};
s32_le out_gain{};
s32_le dry_gain{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(ReverbParams) == 0x44, "ReverbParams is an invalid size");
class EffectInfo {
public:
struct InParams {
EffectType type{};
u8 is_new{};
u8 is_enabled{};
INSERT_PADDING_BYTES(1);
s32_le mix_id{};
u64_le buffer_address{};
u64_le buffer_size{};
s32_le processing_order{};
INSERT_PADDING_BYTES(4);
union {
std::array<u8, 0xa0> raw;
};
};
static_assert(sizeof(InParams) == 0xc0, "InParams is an invalid size");
struct OutParams {
UsageStatus status{};
INSERT_PADDING_BYTES(15);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams is an invalid size");
};
struct AuxAddress {
VAddr send_dsp_info{};
VAddr send_buffer_base{};
VAddr return_dsp_info{};
VAddr return_buffer_base{};
};
class EffectBase {
public:
explicit EffectBase(EffectType effect_type_);
virtual ~EffectBase();
virtual void Update(EffectInfo::InParams& in_params) = 0;
virtual void UpdateForCommandGeneration() = 0;
[[nodiscard]] UsageState GetUsage() const;
[[nodiscard]] EffectType GetType() const;
[[nodiscard]] bool IsEnabled() const;
[[nodiscard]] s32 GetMixID() const;
[[nodiscard]] s32 GetProcessingOrder() const;
[[nodiscard]] std::vector<u8>& GetWorkBuffer();
[[nodiscard]] const std::vector<u8>& GetWorkBuffer() const;
protected:
UsageState usage{UsageState::Invalid};
EffectType effect_type{};
s32 mix_id{};
s32 processing_order{};
bool enabled = false;
std::vector<u8> work_buffer{};
};
template <typename T>
class EffectGeneric : public EffectBase {
public:
explicit EffectGeneric(EffectType effect_type_) : EffectBase(effect_type_) {}
T& GetParams() {
return internal_params;
}
const T& GetParams() const {
return internal_params;
}
private:
T internal_params{};
};
class EffectStubbed : public EffectBase {
public:
explicit EffectStubbed();
~EffectStubbed() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
struct I3dl2ReverbState {
f32 lowpass_0{};
f32 lowpass_1{};
f32 lowpass_2{};
DelayLineBase early_delay_line{};
std::array<u32, AudioCommon::I3DL2REVERB_TAPS> early_tap_steps{};
f32 early_gain{};
f32 late_gain{};
u32 early_to_late_taps{};
std::array<DelayLineBase, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> fdn_delay_line{};
std::array<DelayLineAllPass, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> decay_delay_line0{};
std::array<DelayLineAllPass, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> decay_delay_line1{};
f32 last_reverb_echo{};
DelayLineBase center_delay_line{};
std::array<std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT>, 3> lpf_coefficients{};
std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> shelf_filter{};
f32 dry_gain{};
};
class EffectI3dl2Reverb : public EffectGeneric<I3dl2ReverbParams> {
public:
explicit EffectI3dl2Reverb();
~EffectI3dl2Reverb() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
I3dl2ReverbState& GetState();
const I3dl2ReverbState& GetState() const;
private:
bool skipped = false;
I3dl2ReverbState state{};
};
class EffectBiquadFilter : public EffectGeneric<BiquadFilterParams> {
public:
explicit EffectBiquadFilter();
~EffectBiquadFilter() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
class EffectAuxInfo : public EffectGeneric<AuxInfo> {
public:
explicit EffectAuxInfo();
~EffectAuxInfo() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
[[nodiscard]] VAddr GetSendInfo() const;
[[nodiscard]] VAddr GetSendBuffer() const;
[[nodiscard]] VAddr GetRecvInfo() const;
[[nodiscard]] VAddr GetRecvBuffer() const;
private:
VAddr send_info{};
VAddr send_buffer{};
VAddr recv_info{};
VAddr recv_buffer{};
bool skipped = false;
AuxAddress addresses{};
};
class EffectDelay : public EffectGeneric<DelayParams> {
public:
explicit EffectDelay();
~EffectDelay() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
private:
bool skipped = false;
};
class EffectBufferMixer : public EffectGeneric<BufferMixerParams> {
public:
explicit EffectBufferMixer();
~EffectBufferMixer() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
class EffectReverb : public EffectGeneric<ReverbParams> {
public:
explicit EffectReverb();
~EffectReverb() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
private:
bool skipped = false;
};
class EffectContext {
public:
explicit EffectContext(std::size_t effect_count_);
~EffectContext();
[[nodiscard]] std::size_t GetCount() const;
[[nodiscard]] EffectBase* GetInfo(std::size_t i);
[[nodiscard]] EffectBase* RetargetEffect(std::size_t i, EffectType effect);
[[nodiscard]] const EffectBase* GetInfo(std::size_t i) const;
private:
std::size_t effect_count{};
std::vector<std::unique_ptr<EffectBase>> effects;
};
} // namespace AudioCore

View File

@@ -1,511 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/behavior_info.h"
#include "audio_core/effect_context.h"
#include "audio_core/info_updater.h"
#include "audio_core/memory_pool.h"
#include "audio_core/mix_context.h"
#include "audio_core/sink_context.h"
#include "audio_core/splitter_context.h"
#include "audio_core/voice_context.h"
#include "common/logging/log.h"
namespace AudioCore {
InfoUpdater::InfoUpdater(const std::vector<u8>& in_params_, std::vector<u8>& out_params_,
BehaviorInfo& behavior_info_)
: in_params(in_params_), out_params(out_params_), behavior_info(behavior_info_) {
ASSERT(
AudioCommon::CanConsumeBuffer(in_params.size(), 0, sizeof(AudioCommon::UpdateDataHeader)));
std::memcpy(&input_header, in_params.data(), sizeof(AudioCommon::UpdateDataHeader));
output_header.total_size = sizeof(AudioCommon::UpdateDataHeader);
}
InfoUpdater::~InfoUpdater() = default;
bool InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& in_behavior_info) {
if (input_header.size.behavior != sizeof(BehaviorInfo::InParams)) {
LOG_ERROR(Audio, "Behavior info is an invalid size, expecting 0x{:X} but got 0x{:X}",
sizeof(BehaviorInfo::InParams), input_header.size.behavior);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
sizeof(BehaviorInfo::InParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
BehaviorInfo::InParams behavior_in{};
std::memcpy(&behavior_in, in_params.data() + input_offset, sizeof(BehaviorInfo::InParams));
input_offset += sizeof(BehaviorInfo::InParams);
// Make sure it's an audio revision we can actually support
if (!AudioCommon::IsValidRevision(behavior_in.revision)) {
LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", behavior_in.revision);
return false;
}
// Make sure that our behavior info revision matches the input
if (in_behavior_info.GetUserRevision() != behavior_in.revision) {
LOG_ERROR(Audio,
"User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
in_behavior_info.GetUserRevision(), behavior_in.revision);
return false;
}
// Update behavior info flags
in_behavior_info.ClearError();
in_behavior_info.UpdateFlags(behavior_in.flags);
return true;
}
bool InfoUpdater::UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info) {
const auto memory_pool_count = memory_pool_info.size();
const auto total_memory_pool_in = sizeof(ServerMemoryPoolInfo::InParams) * memory_pool_count;
const auto total_memory_pool_out = sizeof(ServerMemoryPoolInfo::OutParams) * memory_pool_count;
if (input_header.size.memory_pool != total_memory_pool_in) {
LOG_ERROR(Audio, "Memory pools are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_memory_pool_in, input_header.size.memory_pool);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_memory_pool_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::vector<ServerMemoryPoolInfo::InParams> mempool_in(memory_pool_count);
std::vector<ServerMemoryPoolInfo::OutParams> mempool_out(memory_pool_count);
std::memcpy(mempool_in.data(), in_params.data() + input_offset, total_memory_pool_in);
input_offset += total_memory_pool_in;
// Update our memory pools
for (std::size_t i = 0; i < memory_pool_count; i++) {
if (!memory_pool_info[i].Update(mempool_in[i], mempool_out[i])) {
LOG_ERROR(Audio, "Failed to update memory pool {}!", i);
return false;
}
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset,
sizeof(BehaviorInfo::InParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, mempool_out.data(), total_memory_pool_out);
output_offset += total_memory_pool_out;
output_header.size.memory_pool = static_cast<u32>(total_memory_pool_out);
return true;
}
bool InfoUpdater::UpdateVoiceChannelResources(VoiceContext& voice_context) {
const auto voice_count = voice_context.GetVoiceCount();
const auto voice_size = voice_count * sizeof(VoiceChannelResource::InParams);
std::vector<VoiceChannelResource::InParams> resources_in(voice_count);
if (input_header.size.voice_channel_resource != voice_size) {
LOG_ERROR(Audio, "VoiceChannelResource is an invalid size, expecting 0x{:X} but got 0x{:X}",
voice_size, input_header.size.voice_channel_resource);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(resources_in.data(), in_params.data() + input_offset, voice_size);
input_offset += voice_size;
// Update our channel resources
for (std::size_t i = 0; i < voice_count; i++) {
// Grab our channel resource
auto& resource = voice_context.GetChannelResource(i);
resource.Update(resources_in[i]);
}
return true;
}
bool InfoUpdater::UpdateVoices(VoiceContext& voice_context,
[[maybe_unused]] std::vector<ServerMemoryPoolInfo>& memory_pool_info,
[[maybe_unused]] VAddr audio_codec_dsp_addr) {
const auto voice_count = voice_context.GetVoiceCount();
std::vector<VoiceInfo::InParams> voice_in(voice_count);
std::vector<VoiceInfo::OutParams> voice_out(voice_count);
const auto voice_in_size = voice_count * sizeof(VoiceInfo::InParams);
const auto voice_out_size = voice_count * sizeof(VoiceInfo::OutParams);
if (input_header.size.voice != voice_in_size) {
LOG_ERROR(Audio, "Voices are an invalid size, expecting 0x{:X} but got 0x{:X}",
voice_in_size, input_header.size.voice);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_in_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(voice_in.data(), in_params.data() + input_offset, voice_in_size);
input_offset += voice_in_size;
// Set all voices to not be in use
for (std::size_t i = 0; i < voice_count; i++) {
voice_context.GetInfo(i).GetInParams().in_use = false;
}
// Update our voices
for (std::size_t i = 0; i < voice_count; i++) {
auto& voice_in_params = voice_in[i];
const auto channel_count = static_cast<std::size_t>(voice_in_params.channel_count);
// Skip if it's not currently in use
if (!voice_in_params.is_in_use) {
continue;
}
// Voice states for each channel
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> voice_states{};
ASSERT(static_cast<std::size_t>(voice_in_params.id) < voice_count);
// Grab our current voice info
auto& voice_info = voice_context.GetInfo(static_cast<std::size_t>(voice_in_params.id));
ASSERT(channel_count <= AudioCommon::MAX_CHANNEL_COUNT);
// Get all our channel voice states
for (std::size_t channel = 0; channel < channel_count; channel++) {
voice_states[channel] =
&voice_context.GetState(voice_in_params.voice_channel_resource_ids[channel]);
}
if (voice_in_params.is_new) {
// Default our values for our voice
voice_info.Initialize();
// Zero out our voice states
for (std::size_t channel = 0; channel < channel_count; channel++) {
std::memset(voice_states[channel], 0, sizeof(VoiceState));
}
}
// Update our voice
voice_info.UpdateParameters(voice_in_params, behavior_info);
// TODO(ogniK): Handle mapping errors with behavior info based on in params response
// Update our wave buffers
voice_info.UpdateWaveBuffers(voice_in_params, voice_states, behavior_info);
voice_info.WriteOutStatus(voice_out[i], voice_in_params, voice_states);
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, voice_out_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, voice_out.data(), voice_out_size);
output_offset += voice_out_size;
output_header.size.voice = static_cast<u32>(voice_out_size);
return true;
}
bool InfoUpdater::UpdateEffects(EffectContext& effect_context, bool is_active) {
const auto effect_count = effect_context.GetCount();
std::vector<EffectInfo::InParams> effect_in(effect_count);
std::vector<EffectInfo::OutParams> effect_out(effect_count);
const auto total_effect_in = effect_count * sizeof(EffectInfo::InParams);
const auto total_effect_out = effect_count * sizeof(EffectInfo::OutParams);
if (input_header.size.effect != total_effect_in) {
LOG_ERROR(Audio, "Effects are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_effect_in, input_header.size.effect);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_effect_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(effect_in.data(), in_params.data() + input_offset, total_effect_in);
input_offset += total_effect_in;
// Update effects
for (std::size_t i = 0; i < effect_count; i++) {
auto* info = effect_context.GetInfo(i);
if (effect_in[i].type != info->GetType()) {
info = effect_context.RetargetEffect(i, effect_in[i].type);
}
info->Update(effect_in[i]);
if ((!is_active && info->GetUsage() != UsageState::Initialized) ||
info->GetUsage() == UsageState::Stopped) {
effect_out[i].status = UsageStatus::Removed;
} else {
effect_out[i].status = UsageStatus::Used;
}
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_effect_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, effect_out.data(), total_effect_out);
output_offset += total_effect_out;
output_header.size.effect = static_cast<u32>(total_effect_out);
return true;
}
bool InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
std::size_t start_offset = input_offset;
std::size_t bytes_read{};
// Update splitter context
if (!splitter_context.Update(in_params, input_offset, bytes_read)) {
LOG_ERROR(Audio, "Failed to update splitter context!");
return false;
}
const auto consumed = input_offset - start_offset;
if (input_header.size.splitter != consumed) {
LOG_ERROR(Audio, "Splitters is an invalid size, expecting 0x{:X} but got 0x{:X}",
bytes_read, input_header.size.splitter);
return false;
}
return true;
}
Result InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
SplitterContext& splitter_context, EffectContext& effect_context) {
std::vector<MixInfo::InParams> mix_in_params;
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
// If we're not dirty, get ALL mix in parameters
const auto context_mix_count = mix_context.GetCount();
const auto total_mix_in = context_mix_count * sizeof(MixInfo::InParams);
if (input_header.size.mixer != total_mix_in) {
LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
total_mix_in, input_header.size.mixer);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_mix_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
mix_in_params.resize(context_mix_count);
std::memcpy(mix_in_params.data(), in_params.data() + input_offset, total_mix_in);
input_offset += total_mix_in;
} else {
// Only update the "dirty" mixes
MixInfo::DirtyHeader dirty_header{};
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
sizeof(MixInfo::DirtyHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
std::memcpy(&dirty_header, in_params.data() + input_offset, sizeof(MixInfo::DirtyHeader));
input_offset += sizeof(MixInfo::DirtyHeader);
const auto total_mix_in =
dirty_header.mixer_count * sizeof(MixInfo::InParams) + sizeof(MixInfo::DirtyHeader);
if (input_header.size.mixer != total_mix_in) {
LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
total_mix_in, input_header.size.mixer);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (dirty_header.mixer_count != 0) {
mix_in_params.resize(dirty_header.mixer_count);
std::memcpy(mix_in_params.data(), in_params.data() + input_offset,
mix_in_params.size() * sizeof(MixInfo::InParams));
input_offset += mix_in_params.size() * sizeof(MixInfo::InParams);
}
}
// Get our total input count
const auto mix_count = mix_in_params.size();
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
// Only verify our buffer count if we're not dirty
std::size_t total_buffer_count{};
for (std::size_t i = 0; i < mix_count; i++) {
const auto& in = mix_in_params[i];
total_buffer_count += in.buffer_count;
if (static_cast<std::size_t>(in.dest_mix_id) > mix_count &&
in.dest_mix_id != AudioCommon::NO_MIX && in.mix_id != AudioCommon::FINAL_MIX) {
LOG_ERROR(
Audio,
"Invalid mix destination, mix_id={:X}, dest_mix_id={:X}, mix_buffer_count={:X}",
in.mix_id, in.dest_mix_id, mix_buffer_count);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
if (total_buffer_count > mix_buffer_count) {
LOG_ERROR(Audio,
"Too many mix buffers used! mix_buffer_count={:X}, requesting_buffers={:X}",
mix_buffer_count, total_buffer_count);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
if (mix_buffer_count == 0) {
LOG_ERROR(Audio, "No mix buffers!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
bool should_sort = false;
for (std::size_t i = 0; i < mix_count; i++) {
const auto& mix_in = mix_in_params[i];
std::size_t target_mix{};
if (behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
target_mix = mix_in.mix_id;
} else {
// Non dirty supported games just use i instead of the actual mix_id
target_mix = i;
}
auto& mix_info = mix_context.GetInfo(target_mix);
auto& mix_info_params = mix_info.GetInParams();
if (mix_info_params.in_use != mix_in.in_use) {
mix_info_params.in_use = mix_in.in_use;
mix_info.ResetEffectProcessingOrder();
should_sort = true;
}
if (mix_in.in_use) {
should_sort |= mix_info.Update(mix_context.GetEdgeMatrix(), mix_in, behavior_info,
splitter_context, effect_context);
}
}
if (should_sort && behavior_info.IsSplitterSupported()) {
// Sort our splitter data
if (!mix_context.TsortInfo(splitter_context)) {
return AudioCommon::Audren::ERR_SPLITTER_SORT_FAILED;
}
}
// TODO(ogniK): Sort when splitter is suppoorted
return ResultSuccess;
}
bool InfoUpdater::UpdateSinks(SinkContext& sink_context) {
const auto sink_count = sink_context.GetCount();
std::vector<SinkInfo::InParams> sink_in_params(sink_count);
const auto total_sink_in = sink_count * sizeof(SinkInfo::InParams);
if (input_header.size.sink != total_sink_in) {
LOG_ERROR(Audio, "Sinks are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_sink_in, input_header.size.effect);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_sink_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(sink_in_params.data(), in_params.data() + input_offset, total_sink_in);
input_offset += total_sink_in;
// TODO(ogniK): Properly update sinks
if (!sink_in_params.empty()) {
sink_context.UpdateMainSink(sink_in_params[0]);
}
output_header.size.sink = static_cast<u32>(0x20 * sink_count);
output_offset += 0x20 * sink_count;
return true;
}
bool InfoUpdater::UpdatePerformanceBuffer() {
output_header.size.performance = 0x10;
output_offset += 0x10;
return true;
}
bool InfoUpdater::UpdateErrorInfo([[maybe_unused]] BehaviorInfo& in_behavior_info) {
const auto total_beahvior_info_out = sizeof(BehaviorInfo::OutParams);
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_beahvior_info_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
BehaviorInfo::OutParams behavior_info_out{};
behavior_info.CopyErrorInfo(behavior_info_out);
std::memcpy(out_params.data() + output_offset, &behavior_info_out, total_beahvior_info_out);
output_offset += total_beahvior_info_out;
output_header.size.behavior = total_beahvior_info_out;
return true;
}
struct RendererInfo {
u64_le elasped_frame_count{};
INSERT_PADDING_WORDS(2);
};
static_assert(sizeof(RendererInfo) == 0x10, "RendererInfo is an invalid size");
bool InfoUpdater::UpdateRendererInfo(std::size_t elapsed_frame_count) {
const auto total_renderer_info_out = sizeof(RendererInfo);
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_renderer_info_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
RendererInfo out{};
out.elasped_frame_count = elapsed_frame_count;
std::memcpy(out_params.data() + output_offset, &out, total_renderer_info_out);
output_offset += total_renderer_info_out;
output_header.size.render_info = total_renderer_info_out;
return true;
}
bool InfoUpdater::CheckConsumedSize() const {
if (output_offset != out_params.size()) {
LOG_ERROR(Audio, "Output is not consumed! Consumed {}, but requires {}. {} bytes remaining",
output_offset, out_params.size(), out_params.size() - output_offset);
return false;
}
/*if (input_offset != in_params.size()) {
LOG_ERROR(Audio, "Input is not consumed!");
return false;
}*/
return true;
}
bool InfoUpdater::WriteOutputHeader() {
if (!AudioCommon::CanConsumeBuffer(out_params.size(), 0,
sizeof(AudioCommon::UpdateDataHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
output_header.revision = AudioCommon::CURRENT_PROCESS_REVISION;
const auto& sz = output_header.size;
output_header.total_size += sz.behavior + sz.memory_pool + sz.voice +
sz.voice_channel_resource + sz.effect + sz.mixer + sz.sink +
sz.performance + sz.splitter + sz.render_info;
std::memcpy(out_params.data(), &output_header, sizeof(AudioCommon::UpdateDataHeader));
return true;
}
} // namespace AudioCore

View File

@@ -1,57 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <vector>
#include "audio_core/common.h"
#include "common/common_types.h"
namespace AudioCore {
class BehaviorInfo;
class ServerMemoryPoolInfo;
class VoiceContext;
class EffectContext;
class MixContext;
class SinkContext;
class SplitterContext;
class InfoUpdater {
public:
// TODO(ogniK): Pass process handle when we support it
InfoUpdater(const std::vector<u8>& in_params_, std::vector<u8>& out_params_,
BehaviorInfo& behavior_info_);
~InfoUpdater();
bool UpdateBehaviorInfo(BehaviorInfo& in_behavior_info);
bool UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info);
bool UpdateVoiceChannelResources(VoiceContext& voice_context);
bool UpdateVoices(VoiceContext& voice_context,
std::vector<ServerMemoryPoolInfo>& memory_pool_info,
VAddr audio_codec_dsp_addr);
bool UpdateEffects(EffectContext& effect_context, bool is_active);
bool UpdateSplitterInfo(SplitterContext& splitter_context);
Result UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
SplitterContext& splitter_context, EffectContext& effect_context);
bool UpdateSinks(SinkContext& sink_context);
bool UpdatePerformanceBuffer();
bool UpdateErrorInfo(BehaviorInfo& in_behavior_info);
bool UpdateRendererInfo(std::size_t elapsed_frame_count);
bool CheckConsumedSize() const;
bool WriteOutputHeader();
private:
const std::vector<u8>& in_params;
std::vector<u8>& out_params;
BehaviorInfo& behavior_info;
AudioCommon::UpdateDataHeader input_header{};
AudioCommon::UpdateDataHeader output_header{};
std::size_t input_offset{sizeof(AudioCommon::UpdateDataHeader)};
std::size_t output_offset{sizeof(AudioCommon::UpdateDataHeader)};
};
} // namespace AudioCore

View File

@@ -1,60 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/memory_pool.h"
#include "common/logging/log.h"
namespace AudioCore {
ServerMemoryPoolInfo::ServerMemoryPoolInfo() = default;
ServerMemoryPoolInfo::~ServerMemoryPoolInfo() = default;
bool ServerMemoryPoolInfo::Update(const InParams& in_params, OutParams& out_params) {
// Our state does not need to be changed
if (in_params.state != State::RequestAttach && in_params.state != State::RequestDetach) {
return true;
}
// Address or size is null
if (in_params.address == 0 || in_params.size == 0) {
LOG_ERROR(Audio, "Memory pool address or size is zero! address={:X}, size={:X}",
in_params.address, in_params.size);
return false;
}
// Address or size is not aligned
if ((in_params.address % 0x1000) != 0 || (in_params.size % 0x1000) != 0) {
LOG_ERROR(Audio, "Memory pool address or size is not aligned! address={:X}, size={:X}",
in_params.address, in_params.size);
return false;
}
if (in_params.state == State::RequestAttach) {
cpu_address = in_params.address;
size = in_params.size;
used = true;
out_params.state = State::Attached;
} else {
// Unexpected address
if (cpu_address != in_params.address) {
LOG_ERROR(Audio, "Memory pool address differs! Expecting {:X} but address is {:X}",
cpu_address, in_params.address);
return false;
}
if (size != in_params.size) {
LOG_ERROR(Audio, "Memory pool size differs! Expecting {:X} but size is {:X}", size,
in_params.size);
return false;
}
cpu_address = 0;
size = 0;
used = false;
out_params.state = State::Detached;
}
return true;
}
} // namespace AudioCore

View File

@@ -1,51 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class ServerMemoryPoolInfo {
public:
ServerMemoryPoolInfo();
~ServerMemoryPoolInfo();
enum class State : u32_le {
Invalid = 0x0,
Aquired = 0x1,
RequestDetach = 0x2,
Detached = 0x3,
RequestAttach = 0x4,
Attached = 0x5,
Released = 0x6,
};
struct InParams {
u64_le address{};
u64_le size{};
State state{};
INSERT_PADDING_WORDS(3);
};
static_assert(sizeof(InParams) == 0x20, "InParams are an invalid size");
struct OutParams {
State state{};
INSERT_PADDING_WORDS(3);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams are an invalid size");
bool Update(const InParams& in_params, OutParams& out_params);
private:
// There's another entry here which is the DSP address, however since we're not talking to the
// DSP we can just use the same address provided by the guest without needing to remap
u64_le cpu_address{};
u64_le size{};
bool used{};
};
} // namespace AudioCore

View File

@@ -1,297 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "audio_core/behavior_info.h"
#include "audio_core/common.h"
#include "audio_core/effect_context.h"
#include "audio_core/mix_context.h"
#include "audio_core/splitter_context.h"
namespace AudioCore {
MixContext::MixContext() = default;
MixContext::~MixContext() = default;
void MixContext::Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
std::size_t effect_count) {
info_count = mix_count;
infos.resize(info_count);
auto& final_mix = GetInfo(AudioCommon::FINAL_MIX);
final_mix.GetInParams().mix_id = AudioCommon::FINAL_MIX;
sorted_info.reserve(infos.size());
for (auto& info : infos) {
sorted_info.push_back(&info);
}
for (auto& info : infos) {
info.SetEffectCount(effect_count);
}
// Only initialize our edge matrix and node states if splitters are supported
if (behavior_info.IsSplitterSupported()) {
node_states.Initialize(mix_count);
edge_matrix.Initialize(mix_count);
}
}
void MixContext::UpdateDistancesFromFinalMix() {
// Set all distances to be invalid
for (std::size_t i = 0; i < info_count; i++) {
GetInfo(i).GetInParams().final_mix_distance = AudioCommon::NO_FINAL_MIX;
}
for (std::size_t i = 0; i < info_count; i++) {
auto& info = GetInfo(i);
auto& in_params = info.GetInParams();
// Populate our sorted info
sorted_info[i] = &info;
if (!in_params.in_use) {
continue;
}
auto mix_id = in_params.mix_id;
// Needs to be referenced out of scope
s32 distance_to_final_mix{AudioCommon::FINAL_MIX};
for (; distance_to_final_mix < static_cast<s32>(info_count); distance_to_final_mix++) {
if (mix_id == AudioCommon::FINAL_MIX) {
// If we're at the final mix, we're done
break;
} else if (mix_id == AudioCommon::NO_MIX) {
// If we have no more mix ids, we're done
distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
break;
} else {
const auto& dest_mix = GetInfo(mix_id);
const auto dest_mix_distance = dest_mix.GetInParams().final_mix_distance;
if (dest_mix_distance == AudioCommon::NO_FINAL_MIX) {
// If our current mix isn't pointing to a final mix, follow through
mix_id = dest_mix.GetInParams().dest_mix_id;
} else {
// Our current mix + 1 = final distance
distance_to_final_mix = dest_mix_distance + 1;
break;
}
}
}
// If we're out of range for our distance, mark it as no final mix
if (distance_to_final_mix >= static_cast<s32>(info_count)) {
distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
}
in_params.final_mix_distance = distance_to_final_mix;
}
}
void MixContext::CalcMixBufferOffset() {
s32 offset{};
for (std::size_t i = 0; i < info_count; i++) {
auto& info = GetSortedInfo(i);
auto& in_params = info.GetInParams();
if (in_params.in_use) {
// Only update if in use
in_params.buffer_offset = offset;
offset += in_params.buffer_count;
}
}
}
void MixContext::SortInfo() {
// Get the distance to the final mix
UpdateDistancesFromFinalMix();
// Sort based on the distance to the final mix
std::sort(sorted_info.begin(), sorted_info.end(),
[](const ServerMixInfo* lhs, const ServerMixInfo* rhs) {
return lhs->GetInParams().final_mix_distance >
rhs->GetInParams().final_mix_distance;
});
// Calculate the mix buffer offset
CalcMixBufferOffset();
}
bool MixContext::TsortInfo(SplitterContext& splitter_context) {
// If we're not using mixes, just calculate the mix buffer offset
if (!splitter_context.UsingSplitter()) {
CalcMixBufferOffset();
return true;
}
// Sort our node states
if (!node_states.Tsort(edge_matrix)) {
return false;
}
// Get our sorted list
const auto sorted_list = node_states.GetIndexList();
std::size_t info_id{};
for (auto itr = sorted_list.rbegin(); itr != sorted_list.rend(); ++itr) {
// Set our sorted info
sorted_info[info_id++] = &GetInfo(*itr);
}
// Calculate the mix buffer offset
CalcMixBufferOffset();
return true;
}
std::size_t MixContext::GetCount() const {
return info_count;
}
ServerMixInfo& MixContext::GetInfo(std::size_t i) {
ASSERT(i < info_count);
return infos.at(i);
}
const ServerMixInfo& MixContext::GetInfo(std::size_t i) const {
ASSERT(i < info_count);
return infos.at(i);
}
ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) {
ASSERT(i < info_count);
return *sorted_info.at(i);
}
const ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) const {
ASSERT(i < info_count);
return *sorted_info.at(i);
}
ServerMixInfo& MixContext::GetFinalMixInfo() {
return infos.at(AudioCommon::FINAL_MIX);
}
const ServerMixInfo& MixContext::GetFinalMixInfo() const {
return infos.at(AudioCommon::FINAL_MIX);
}
EdgeMatrix& MixContext::GetEdgeMatrix() {
return edge_matrix;
}
const EdgeMatrix& MixContext::GetEdgeMatrix() const {
return edge_matrix;
}
ServerMixInfo::ServerMixInfo() {
Cleanup();
}
ServerMixInfo::~ServerMixInfo() = default;
const ServerMixInfo::InParams& ServerMixInfo::GetInParams() const {
return in_params;
}
ServerMixInfo::InParams& ServerMixInfo::GetInParams() {
return in_params;
}
bool ServerMixInfo::Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
BehaviorInfo& behavior_info, SplitterContext& splitter_context,
EffectContext& effect_context) {
in_params.volume = mix_in.volume;
in_params.sample_rate = mix_in.sample_rate;
in_params.buffer_count = mix_in.buffer_count;
in_params.in_use = mix_in.in_use;
in_params.mix_id = mix_in.mix_id;
in_params.node_id = mix_in.node_id;
for (std::size_t i = 0; i < mix_in.mix_volume.size(); i++) {
std::copy(mix_in.mix_volume[i].begin(), mix_in.mix_volume[i].end(),
in_params.mix_volume[i].begin());
}
bool require_sort = false;
if (behavior_info.IsSplitterSupported()) {
require_sort = UpdateConnection(edge_matrix, mix_in, splitter_context);
} else {
in_params.dest_mix_id = mix_in.dest_mix_id;
in_params.splitter_id = AudioCommon::NO_SPLITTER;
}
ResetEffectProcessingOrder();
const auto effect_count = effect_context.GetCount();
for (std::size_t i = 0; i < effect_count; i++) {
auto* effect_info = effect_context.GetInfo(i);
if (effect_info->GetMixID() == in_params.mix_id) {
effect_processing_order[effect_info->GetProcessingOrder()] = static_cast<s32>(i);
}
}
// TODO(ogniK): Update effect processing order
return require_sort;
}
bool ServerMixInfo::HasAnyConnection() const {
return in_params.splitter_id != AudioCommon::NO_SPLITTER ||
in_params.mix_id != AudioCommon::NO_MIX;
}
void ServerMixInfo::Cleanup() {
in_params.volume = 0.0f;
in_params.sample_rate = 0;
in_params.buffer_count = 0;
in_params.in_use = false;
in_params.mix_id = AudioCommon::NO_MIX;
in_params.node_id = 0;
in_params.buffer_offset = 0;
in_params.dest_mix_id = AudioCommon::NO_MIX;
in_params.splitter_id = AudioCommon::NO_SPLITTER;
std::memset(in_params.mix_volume.data(), 0, sizeof(float) * in_params.mix_volume.size());
}
void ServerMixInfo::SetEffectCount(std::size_t count) {
effect_processing_order.resize(count);
ResetEffectProcessingOrder();
}
void ServerMixInfo::ResetEffectProcessingOrder() {
for (auto& order : effect_processing_order) {
order = AudioCommon::NO_EFFECT_ORDER;
}
}
s32 ServerMixInfo::GetEffectOrder(std::size_t i) const {
return effect_processing_order.at(i);
}
bool ServerMixInfo::UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
SplitterContext& splitter_context) {
// Mixes are identical
if (in_params.dest_mix_id == mix_in.dest_mix_id &&
in_params.splitter_id == mix_in.splitter_id &&
((in_params.splitter_id == AudioCommon::NO_SPLITTER) ||
!splitter_context.GetInfo(in_params.splitter_id).HasNewConnection())) {
return false;
}
// Remove current edges for mix id
edge_matrix.RemoveEdges(in_params.mix_id);
if (mix_in.dest_mix_id != AudioCommon::NO_MIX) {
// If we have a valid destination mix id, set our edge matrix
edge_matrix.Connect(in_params.mix_id, mix_in.dest_mix_id);
} else if (mix_in.splitter_id != AudioCommon::NO_SPLITTER) {
// Recurse our splitter linked and set our edges
auto& splitter_info = splitter_context.GetInfo(mix_in.splitter_id);
const auto length = splitter_info.GetLength();
for (s32 i = 0; i < length; i++) {
const auto* splitter_destination =
splitter_context.GetDestinationData(mix_in.splitter_id, i);
if (splitter_destination == nullptr) {
continue;
}
if (splitter_destination->ValidMixId()) {
edge_matrix.Connect(in_params.mix_id, splitter_destination->GetMixId());
}
}
}
in_params.dest_mix_id = mix_in.dest_mix_id;
in_params.splitter_id = mix_in.splitter_id;
return true;
}
} // namespace AudioCore

View File

@@ -1,113 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common.h"
#include "audio_core/splitter_context.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace AudioCore {
class BehaviorInfo;
class EffectContext;
class MixInfo {
public:
struct DirtyHeader {
u32_le magic{};
u32_le mixer_count{};
INSERT_PADDING_BYTES(0x18);
};
static_assert(sizeof(DirtyHeader) == 0x20, "MixInfo::DirtyHeader is an invalid size");
struct InParams {
float_le volume{};
s32_le sample_rate{};
s32_le buffer_count{};
bool in_use{};
INSERT_PADDING_BYTES(3);
s32_le mix_id{};
s32_le effect_count{};
u32_le node_id{};
INSERT_PADDING_WORDS(2);
std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
mix_volume{};
s32_le dest_mix_id{};
s32_le splitter_id{};
INSERT_PADDING_WORDS(1);
};
static_assert(sizeof(MixInfo::InParams) == 0x930, "MixInfo::InParams is an invalid size");
};
class ServerMixInfo {
public:
struct InParams {
float volume{};
s32 sample_rate{};
s32 buffer_count{};
bool in_use{};
s32 mix_id{};
u32 node_id{};
std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
mix_volume{};
s32 dest_mix_id{};
s32 splitter_id{};
s32 buffer_offset{};
s32 final_mix_distance{};
};
ServerMixInfo();
~ServerMixInfo();
[[nodiscard]] const ServerMixInfo::InParams& GetInParams() const;
[[nodiscard]] ServerMixInfo::InParams& GetInParams();
bool Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
BehaviorInfo& behavior_info, SplitterContext& splitter_context,
EffectContext& effect_context);
[[nodiscard]] bool HasAnyConnection() const;
void Cleanup();
void SetEffectCount(std::size_t count);
void ResetEffectProcessingOrder();
[[nodiscard]] s32 GetEffectOrder(std::size_t i) const;
private:
std::vector<s32> effect_processing_order;
InParams in_params{};
bool UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
SplitterContext& splitter_context);
};
class MixContext {
public:
MixContext();
~MixContext();
void Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
std::size_t effect_count);
void SortInfo();
bool TsortInfo(SplitterContext& splitter_context);
[[nodiscard]] std::size_t GetCount() const;
[[nodiscard]] ServerMixInfo& GetInfo(std::size_t i);
[[nodiscard]] const ServerMixInfo& GetInfo(std::size_t i) const;
[[nodiscard]] ServerMixInfo& GetSortedInfo(std::size_t i);
[[nodiscard]] const ServerMixInfo& GetSortedInfo(std::size_t i) const;
[[nodiscard]] ServerMixInfo& GetFinalMixInfo();
[[nodiscard]] const ServerMixInfo& GetFinalMixInfo() const;
[[nodiscard]] EdgeMatrix& GetEdgeMatrix();
[[nodiscard]] const EdgeMatrix& GetEdgeMatrix() const;
private:
void CalcMixBufferOffset();
void UpdateDistancesFromFinalMix();
NodeStates node_states{};
EdgeMatrix edge_matrix{};
std::size_t info_count{};
std::vector<ServerMixInfo> infos{};
std::vector<ServerMixInfo*> sorted_info{};
};
} // namespace AudioCore

View File

@@ -1,32 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "audio_core/sink.h"
namespace AudioCore {
class NullSink final : public Sink {
public:
explicit NullSink(std::string_view) {}
~NullSink() override = default;
SinkStream& AcquireSinkStream(u32 /*sample_rate*/, u32 /*num_channels*/,
const std::string& /*name*/) override {
return null_sink_stream;
}
private:
struct NullSinkStreamImpl final : SinkStream {
void EnqueueSamples(u32 /*num_channels*/, const std::vector<s16>& /*samples*/) override {}
std::size_t SamplesInQueue(u32 /*num_channels*/) const override {
return 0;
}
void Flush() override {}
} null_sink_stream;
};
} // namespace AudioCore

View File

@@ -1,93 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_aux_info.h"
namespace AudioCore::AudioRenderer {
void AuxInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
const bool send_unmapped{!pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_specific->send_buffer_info_address,
sizeof(AuxBufferInfo) + in_specific->count_max * sizeof(s32))};
const bool return_unmapped{!pool_mapper.TryAttachBuffer(
error_info, workbuffers[1], in_specific->return_buffer_info_address,
sizeof(AuxBufferInfo) + in_specific->count_max * sizeof(s32))};
buffer_unmapped = send_unmapped || return_unmapped;
if (!buffer_unmapped) {
auto send{workbuffers[0].GetReference(false)};
send_buffer_info = send + sizeof(AuxInfoDsp);
send_buffer = send + sizeof(AuxBufferInfo);
auto ret{workbuffers[1].GetReference(false)};
return_buffer_info = ret + sizeof(AuxInfoDsp);
return_buffer = ret + sizeof(AuxBufferInfo);
}
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void AuxInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion2*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion2*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion2));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
const bool send_unmapped{!pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], params->send_buffer_info_address,
sizeof(AuxBufferInfo) + params->count_max * sizeof(s32))};
const bool return_unmapped{!pool_mapper.TryAttachBuffer(
error_info, workbuffers[1], params->return_buffer_info_address,
sizeof(AuxBufferInfo) + params->count_max * sizeof(s32))};
buffer_unmapped = send_unmapped || return_unmapped;
if (!buffer_unmapped) {
auto send{workbuffers[0].GetReference(false)};
send_buffer_info = send + sizeof(AuxInfoDsp);
send_buffer = send + sizeof(AuxBufferInfo);
auto ret{workbuffers[1].GetReference(false)};
return_buffer_info = ret + sizeof(AuxInfoDsp);
return_buffer = ret + sizeof(AuxBufferInfo);
}
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void AuxInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
}
void AuxInfo::InitializeResultState(EffectResultState& result_state) {}
void AuxInfo::UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) {}
CpuAddr AuxInfo::GetWorkbuffer(s32 index) {
return workbuffers[index].GetReference(true);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,123 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
/**
* Auxiliary Buffer used for Aux commands.
* Send and return buffers are available (names from the game's perspective).
* Send is read by the host, containing a buffer of samples to be used for whatever purpose.
* Return is written by the host, writing a mix buffer back to the game.
* This allows the game to use pre-processed samples skipping the other render processing,
* and to examine or modify what the audio renderer has generated.
*/
class AuxInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxMixBuffers> inputs;
/* 0x18 */ std::array<s8, MaxMixBuffers> outputs;
/* 0x30 */ u32 mix_buffer_count;
/* 0x34 */ u32 sample_rate;
/* 0x38 */ u32 count_max;
/* 0x3C */ u32 mix_buffer_count_max;
/* 0x40 */ CpuAddr send_buffer_info_address;
/* 0x48 */ CpuAddr send_buffer_address;
/* 0x50 */ CpuAddr return_buffer_info_address;
/* 0x58 */ CpuAddr return_buffer_address;
/* 0x60 */ u32 mix_buffer_sample_size;
/* 0x64 */ u32 sample_count;
/* 0x68 */ u32 mix_buffer_sample_count;
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"AuxInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxMixBuffers> inputs;
/* 0x18 */ std::array<s8, MaxMixBuffers> outputs;
/* 0x30 */ u32 mix_buffer_count;
/* 0x34 */ u32 sample_rate;
/* 0x38 */ u32 count_max;
/* 0x3C */ u32 mix_buffer_count_max;
/* 0x40 */ CpuAddr send_buffer_info_address;
/* 0x48 */ CpuAddr send_buffer_address;
/* 0x50 */ CpuAddr return_buffer_info_address;
/* 0x58 */ CpuAddr return_buffer_address;
/* 0x60 */ u32 mix_buffer_sample_size;
/* 0x64 */ u32 sample_count;
/* 0x68 */ u32 mix_buffer_sample_count;
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"AuxInfo::ParameterVersion2 has the wrong size!");
struct AuxInfoDsp {
/* 0x00 */ u32 read_offset;
/* 0x04 */ u32 write_offset;
/* 0x08 */ u32 lost_sample_count;
/* 0x0C */ u32 total_sample_count;
/* 0x10 */ char unk10[0x30];
};
static_assert(sizeof(AuxInfoDsp) == 0x40, "AuxInfo::AuxInfoDsp has the wrong size!");
struct AuxBufferInfo {
/* 0x00 */ AuxInfoDsp cpu_info;
/* 0x40 */ AuxInfoDsp dsp_info;
};
static_assert(sizeof(AuxBufferInfo) == 0x80, "AuxInfo::AuxBufferInfo has the wrong size!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,52 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_biquad_filter_info.h"
namespace AudioCore::AudioRenderer {
void BiquadFilterInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion1& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void BiquadFilterInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion2& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion2*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion2*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion2));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void BiquadFilterInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
params->state = ParameterState::Updated;
}
void BiquadFilterInfo::InitializeResultState(EffectResultState& result_state) {}
void BiquadFilterInfo::UpdateResultState(EffectResultState& cpu_state,
EffectResultState& dsp_state) {}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,79 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
class BiquadFilterInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ std::array<s16, 3> b;
/* 0x12 */ std::array<s16, 2> a;
/* 0x16 */ s8 channel_count;
/* 0x17 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"BiquadFilterInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ std::array<s16, 3> b;
/* 0x12 */ std::array<s16, 2> a;
/* 0x16 */ s8 channel_count;
/* 0x17 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"BiquadFilterInfo::ParameterVersion2 has the wrong size!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,49 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_buffer_mixer_info.h"
namespace AudioCore::AudioRenderer {
void BufferMixerInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion1& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void BufferMixerInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion2& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion2*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion2*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion2));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void BufferMixerInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
}
void BufferMixerInfo::InitializeResultState(EffectResultState& result_state) {}
void BufferMixerInfo::UpdateResultState(EffectResultState& cpu_state,
EffectResultState& dsp_state) {}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,75 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
class BufferMixerInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxMixBuffers> inputs;
/* 0x18 */ std::array<s8, MaxMixBuffers> outputs;
/* 0x30 */ std::array<f32, MaxMixBuffers> volumes;
/* 0x90 */ u32 mix_count;
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"BufferMixerInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxMixBuffers> inputs;
/* 0x18 */ std::array<s8, MaxMixBuffers> outputs;
/* 0x30 */ std::array<f32, MaxMixBuffers> volumes;
/* 0x90 */ u32 mix_count;
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"BufferMixerInfo::ParameterVersion2 has the wrong size!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,82 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_aux_info.h"
#include "audio_core/renderer/effect/effect_capture_info.h"
namespace AudioCore::AudioRenderer {
void CaptureInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{
reinterpret_cast<const AuxInfo::ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<AuxInfo::ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(AuxInfo::ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_specific->send_buffer_info_address,
in_specific->count_max * sizeof(s32) + sizeof(AuxInfo::AuxBufferInfo));
if (!buffer_unmapped) {
const auto send_address{workbuffers[0].GetReference(false)};
send_buffer_info = send_address + sizeof(AuxInfo::AuxInfoDsp);
send_buffer = send_address + sizeof(AuxInfo::AuxBufferInfo);
return_buffer_info = 0;
return_buffer = 0;
}
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void CaptureInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{
reinterpret_cast<const AuxInfo::ParameterVersion2*>(in_params.specific.data())};
auto params{reinterpret_cast<AuxInfo::ParameterVersion2*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(AuxInfo::ParameterVersion2));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], params->send_buffer_info_address,
params->count_max * sizeof(s32) + sizeof(AuxInfo::AuxBufferInfo));
if (!buffer_unmapped) {
const auto send_address{workbuffers[0].GetReference(false)};
send_buffer_info = send_address + sizeof(AuxInfo::AuxInfoDsp);
send_buffer = send_address + sizeof(AuxInfo::AuxBufferInfo);
return_buffer_info = 0;
return_buffer = 0;
}
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void CaptureInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
}
void CaptureInfo::InitializeResultState(EffectResultState& result_state) {}
void CaptureInfo::UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) {}
CpuAddr CaptureInfo::GetWorkbuffer(s32 index) {
return workbuffers[index].GetReference(true);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,65 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
class CaptureInfo : public EffectInfoBase {
public:
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,93 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_delay_info.h"
namespace AudioCore::AudioRenderer {
void DelayInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void DelayInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void DelayInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
params->state = ParameterState::Updated;
}
void DelayInfo::InitializeResultState(EffectResultState& result_state) {}
void DelayInfo::UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) {}
CpuAddr DelayInfo::GetWorkbuffer(s32 index) {
return GetSingleBuffer(index);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,135 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
#include "common/fixed_point.h"
namespace AudioCore::AudioRenderer {
class DelayInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x10 */ u32 delay_time_max;
/* 0x14 */ u32 delay_time;
/* 0x18 */ Common::FixedPoint<18, 14> sample_rate;
/* 0x1C */ Common::FixedPoint<18, 14> in_gain;
/* 0x20 */ Common::FixedPoint<18, 14> feedback_gain;
/* 0x24 */ Common::FixedPoint<18, 14> wet_gain;
/* 0x28 */ Common::FixedPoint<18, 14> dry_gain;
/* 0x2C */ Common::FixedPoint<18, 14> channel_spread;
/* 0x30 */ Common::FixedPoint<18, 14> lowpass_amount;
/* 0x34 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"DelayInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ s16 channel_count_max;
/* 0x0E */ s16 channel_count;
/* 0x10 */ s32 delay_time_max;
/* 0x14 */ s32 delay_time;
/* 0x18 */ s32 sample_rate;
/* 0x1C */ s32 in_gain;
/* 0x20 */ s32 feedback_gain;
/* 0x24 */ s32 wet_gain;
/* 0x28 */ s32 dry_gain;
/* 0x2C */ s32 channel_spread;
/* 0x30 */ s32 lowpass_amount;
/* 0x34 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"DelayInfo::ParameterVersion2 has the wrong size!");
struct DelayLine {
Common::FixedPoint<50, 14> Read() const {
return buffer[buffer_pos];
}
void Write(const Common::FixedPoint<50, 14> value) {
buffer[buffer_pos] = value;
buffer_pos = static_cast<u32>((buffer_pos + 1) % buffer.size());
}
s32 sample_count_max{};
s32 sample_count{};
std::vector<Common::FixedPoint<50, 14>> buffer{};
u32 buffer_pos{};
Common::FixedPoint<18, 14> decay_rate{};
};
struct State {
/* 0x000 */ std::array<s32, 8> unk_000;
/* 0x020 */ std::array<DelayLine, MaxChannels> delay_lines;
/* 0x0B0 */ Common::FixedPoint<18, 14> feedback_gain;
/* 0x0B4 */ Common::FixedPoint<18, 14> delay_feedback_gain;
/* 0x0B8 */ Common::FixedPoint<18, 14> delay_feedback_cross_gain;
/* 0x0BC */ Common::FixedPoint<18, 14> lowpass_gain;
/* 0x0C0 */ Common::FixedPoint<18, 14> lowpass_feedback_gain;
/* 0x0C4 */ std::array<Common::FixedPoint<50, 14>, MaxChannels> lowpass_z;
};
static_assert(sizeof(State) <= sizeof(EffectInfoBase::State),
"DelayInfo::State has the wrong size!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,94 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_i3dl2_info.h"
namespace AudioCore::AudioRenderer {
void I3dl2ReverbInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion1& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void I3dl2ReverbInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion2& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void I3dl2ReverbInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
params->state = ParameterState::Updated;
}
void I3dl2ReverbInfo::InitializeResultState(EffectResultState& result_state) {}
void I3dl2ReverbInfo::UpdateResultState(EffectResultState& cpu_state,
EffectResultState& dsp_state) {}
CpuAddr I3dl2ReverbInfo::GetWorkbuffer(s32 index) {
return GetSingleBuffer(index);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,200 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
#include "common/fixed_point.h"
namespace AudioCore::AudioRenderer {
class I3dl2ReverbInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x10 */ char unk10[0x4];
/* 0x14 */ u32 sample_rate;
/* 0x18 */ f32 room_HF_gain;
/* 0x1C */ f32 reference_HF;
/* 0x20 */ f32 late_reverb_decay_time;
/* 0x24 */ f32 late_reverb_HF_decay_ratio;
/* 0x28 */ f32 room_gain;
/* 0x2C */ f32 reflection_gain;
/* 0x30 */ f32 reverb_gain;
/* 0x34 */ f32 late_reverb_diffusion;
/* 0x38 */ f32 reflection_delay;
/* 0x3C */ f32 late_reverb_delay_time;
/* 0x40 */ f32 late_reverb_density;
/* 0x44 */ f32 dry_gain;
/* 0x48 */ ParameterState state;
/* 0x49 */ char unk49[0x3];
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"I3dl2ReverbInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x10 */ char unk10[0x4];
/* 0x14 */ u32 sample_rate;
/* 0x18 */ f32 room_HF_gain;
/* 0x1C */ f32 reference_HF;
/* 0x20 */ f32 late_reverb_decay_time;
/* 0x24 */ f32 late_reverb_HF_decay_ratio;
/* 0x28 */ f32 room_gain;
/* 0x2C */ f32 reflection_gain;
/* 0x30 */ f32 reverb_gain;
/* 0x34 */ f32 late_reverb_diffusion;
/* 0x38 */ f32 reflection_delay;
/* 0x3C */ f32 late_reverb_delay_time;
/* 0x40 */ f32 late_reverb_density;
/* 0x44 */ f32 dry_gain;
/* 0x48 */ ParameterState state;
/* 0x49 */ char unk49[0x3];
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"I3dl2ReverbInfo::ParameterVersion2 has the wrong size!");
static constexpr u32 MaxDelayLines = 4;
static constexpr u32 MaxDelayTaps = 20;
struct I3dl2DelayLine {
void Initialize(const s32 delay_time) {
max_delay = delay_time;
buffer.resize(delay_time + 1, 0);
buffer_end = &buffer[delay_time];
output = &buffer[0];
SetDelay(delay_time);
wet_gain = 0.0f;
}
void SetDelay(const s32 delay_time) {
if (max_delay < delay_time) {
return;
}
delay = delay_time;
input = &buffer[(output - buffer.data() + delay) % (max_delay + 1)];
}
Common::FixedPoint<50, 14> Tick(const Common::FixedPoint<50, 14> sample) {
Write(sample);
auto out_sample{Read()};
output++;
if (output >= buffer_end) {
output = buffer.data();
}
return out_sample;
}
Common::FixedPoint<50, 14> Read() {
return *output;
}
void Write(const Common::FixedPoint<50, 14> sample) {
*(input++) = sample;
if (input >= buffer_end) {
input = buffer.data();
}
}
Common::FixedPoint<50, 14> TapOut(const s32 index) {
auto out{input - (index + 1)};
if (out < buffer.data()) {
out += max_delay + 1;
}
return *out;
}
std::vector<Common::FixedPoint<50, 14>> buffer{};
Common::FixedPoint<50, 14>* buffer_end{};
s32 max_delay{};
Common::FixedPoint<50, 14>* input{};
Common::FixedPoint<50, 14>* output{};
s32 delay{};
f32 wet_gain{};
};
struct State {
f32 lowpass_0;
f32 lowpass_1;
f32 lowpass_2;
I3dl2DelayLine early_delay_line;
std::array<s32, MaxDelayTaps> early_tap_steps;
f32 early_gain;
f32 late_gain;
s32 early_to_late_taps;
std::array<I3dl2DelayLine, MaxDelayLines> fdn_delay_lines;
std::array<I3dl2DelayLine, MaxDelayLines> decay_delay_lines0;
std::array<I3dl2DelayLine, MaxDelayLines> decay_delay_lines1;
f32 last_reverb_echo;
I3dl2DelayLine center_delay_line;
std::array<std::array<f32, 3>, MaxDelayLines> lowpass_coeff;
std::array<f32, MaxDelayLines> shelf_filter;
f32 dry_gain;
};
static_assert(sizeof(State) <= sizeof(EffectInfoBase::State),
"I3dl2ReverbInfo::State is too large!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,81 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_light_limiter_info.h"
namespace AudioCore::AudioRenderer {
void LightLimiterInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion1& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void LightLimiterInfo::Update(BehaviorInfo::ErrorInfo& error_info,
const InParameterVersion2& in_params, const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
} else {
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
}
void LightLimiterInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
params->state = ParameterState::Updated;
params->statistics_reset_required = false;
}
void LightLimiterInfo::InitializeResultState(EffectResultState& result_state) {
auto result_state_{reinterpret_cast<StatisticsInternal*>(result_state.state.data())};
result_state_->channel_max_sample.fill(0);
result_state_->channel_compression_gain_min.fill(1.0f);
}
void LightLimiterInfo::UpdateResultState(EffectResultState& cpu_state,
EffectResultState& dsp_state) {
auto cpu_statistics{reinterpret_cast<StatisticsInternal*>(cpu_state.state.data())};
auto dsp_statistics{reinterpret_cast<StatisticsInternal*>(dsp_state.state.data())};
*cpu_statistics = *dsp_statistics;
}
CpuAddr LightLimiterInfo::GetWorkbuffer(s32 index) {
return GetSingleBuffer(index);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,133 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
#include "common/fixed_point.h"
namespace AudioCore::AudioRenderer {
class LightLimiterInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x0C */ u32 sample_rate;
/* 0x14 */ s32 look_ahead_time_max;
/* 0x18 */ s32 attack_time;
/* 0x1C */ s32 release_time;
/* 0x20 */ s32 look_ahead_time;
/* 0x24 */ f32 attack_coeff;
/* 0x28 */ f32 release_coeff;
/* 0x2C */ f32 threshold;
/* 0x30 */ f32 input_gain;
/* 0x34 */ f32 output_gain;
/* 0x38 */ s32 look_ahead_samples_min;
/* 0x3C */ s32 look_ahead_samples_max;
/* 0x40 */ ParameterState state;
/* 0x41 */ bool statistics_enabled;
/* 0x42 */ bool statistics_reset_required;
/* 0x43 */ char unk43[0x1];
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"LightLimiterInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x0C */ u32 sample_rate;
/* 0x14 */ s32 look_ahead_time_max;
/* 0x18 */ s32 attack_time;
/* 0x1C */ s32 release_time;
/* 0x20 */ s32 look_ahead_time;
/* 0x24 */ f32 attack_coeff;
/* 0x28 */ f32 release_coeff;
/* 0x2C */ f32 threshold;
/* 0x30 */ f32 input_gain;
/* 0x34 */ f32 output_gain;
/* 0x38 */ s32 look_ahead_samples_min;
/* 0x3C */ s32 look_ahead_samples_max;
/* 0x40 */ ParameterState state;
/* 0x41 */ bool statistics_enabled;
/* 0x42 */ bool statistics_reset_required;
/* 0x43 */ char unk43[0x1];
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"LightLimiterInfo::ParameterVersion2 has the wrong size!");
struct State {
std::array<Common::FixedPoint<49, 15>, MaxChannels> samples_average;
std::array<Common::FixedPoint<49, 15>, MaxChannels> compression_gain;
std::array<s32, MaxChannels> look_ahead_sample_offsets;
std::array<std::vector<Common::FixedPoint<49, 15>>, MaxChannels> look_ahead_sample_buffers;
};
static_assert(sizeof(State) <= sizeof(EffectInfoBase::State),
"LightLimiterInfo::State has the wrong size!");
struct StatisticsInternal {
/* 0x00 */ std::array<f32, MaxChannels> channel_max_sample;
/* 0x18 */ std::array<f32, MaxChannels> channel_compression_gain_min;
};
static_assert(sizeof(StatisticsInternal) == 0x30,
"LightLimiterInfo::StatisticsInternal has the wrong size!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new limiter statistics result state. Version 2 only.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side limiter statistics with the ADSP-side one. Version 2 only.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,93 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/effect/effect_reverb_info.h"
namespace AudioCore::AudioRenderer {
void ReverbInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion1*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion1));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void ReverbInfo::Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) {
auto in_specific{reinterpret_cast<const ParameterVersion2*>(in_params.specific.data())};
auto params{reinterpret_cast<ParameterVersion2*>(parameter.data())};
if (IsChannelCountValid(in_specific->channel_count_max)) {
const auto old_state{params->state};
std::memcpy(params, in_specific, sizeof(ParameterVersion2));
mix_id = in_params.mix_id;
process_order = in_params.process_order;
enabled = in_params.enabled;
if (!IsChannelCountValid(in_specific->channel_count)) {
params->channel_count = params->channel_count_max;
}
if (!IsChannelCountValid(in_specific->channel_count) ||
old_state != ParameterState::Updated) {
params->state = old_state;
}
if (buffer_unmapped || in_params.is_new) {
usage_state = UsageState::New;
params->state = ParameterState::Initialized;
buffer_unmapped = !pool_mapper.TryAttachBuffer(
error_info, workbuffers[0], in_params.workbuffer, in_params.workbuffer_size);
return;
}
}
error_info.error_code = ResultSuccess;
error_info.address = CpuAddr(0);
}
void ReverbInfo::UpdateForCommandGeneration() {
if (enabled) {
usage_state = UsageState::Enabled;
} else {
usage_state = UsageState::Disabled;
}
auto params{reinterpret_cast<ParameterVersion1*>(parameter.data())};
params->state = ParameterState::Updated;
}
void ReverbInfo::InitializeResultState(EffectResultState& result_state) {}
void ReverbInfo::UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) {}
CpuAddr ReverbInfo::GetWorkbuffer(s32 index) {
return GetSingleBuffer(index);
}
} // namespace AudioCore::AudioRenderer

View File

@@ -1,190 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common/common.h"
#include "audio_core/renderer/effect/effect_info_base.h"
#include "common/common_types.h"
#include "common/fixed_point.h"
namespace AudioCore::AudioRenderer {
class ReverbInfo : public EffectInfoBase {
public:
struct ParameterVersion1 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x10 */ u32 sample_rate;
/* 0x14 */ u32 early_mode;
/* 0x18 */ s32 early_gain;
/* 0x1C */ s32 pre_delay;
/* 0x20 */ s32 late_mode;
/* 0x24 */ s32 late_gain;
/* 0x28 */ s32 decay_time;
/* 0x2C */ s32 high_freq_Decay_ratio;
/* 0x30 */ s32 colouration;
/* 0x34 */ s32 base_gain;
/* 0x38 */ s32 wet_gain;
/* 0x3C */ s32 dry_gain;
/* 0x40 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion1) <= sizeof(EffectInfoBase::InParameterVersion1),
"ReverbInfo::ParameterVersion1 has the wrong size!");
struct ParameterVersion2 {
/* 0x00 */ std::array<s8, MaxChannels> inputs;
/* 0x06 */ std::array<s8, MaxChannels> outputs;
/* 0x0C */ u16 channel_count_max;
/* 0x0E */ u16 channel_count;
/* 0x10 */ u32 sample_rate;
/* 0x14 */ u32 early_mode;
/* 0x18 */ s32 early_gain;
/* 0x1C */ s32 pre_delay;
/* 0x20 */ s32 late_mode;
/* 0x24 */ s32 late_gain;
/* 0x28 */ s32 decay_time;
/* 0x2C */ s32 high_freq_decay_ratio;
/* 0x30 */ s32 colouration;
/* 0x34 */ s32 base_gain;
/* 0x38 */ s32 wet_gain;
/* 0x3C */ s32 dry_gain;
/* 0x40 */ ParameterState state;
};
static_assert(sizeof(ParameterVersion2) <= sizeof(EffectInfoBase::InParameterVersion2),
"ReverbInfo::ParameterVersion2 has the wrong size!");
static constexpr u32 MaxDelayLines = 4;
static constexpr u32 MaxDelayTaps = 10;
static constexpr u32 NumEarlyModes = 5;
static constexpr u32 NumLateModes = 5;
struct ReverbDelayLine {
void Initialize(const s32 delay_time, const f32 decay_rate) {
buffer.resize(delay_time + 1, 0);
buffer_end = &buffer[delay_time];
output = &buffer[0];
decay = decay_rate;
sample_count_max = delay_time;
SetDelay(delay_time);
}
void SetDelay(const s32 delay_time) {
if (sample_count_max < delay_time) {
return;
}
sample_count = delay_time;
input = &buffer[(output - buffer.data() + sample_count) % (sample_count_max + 1)];
}
Common::FixedPoint<50, 14> Tick(const Common::FixedPoint<50, 14> sample) {
Write(sample);
auto out_sample{Read()};
output++;
if (output >= buffer_end) {
output = buffer.data();
}
return out_sample;
}
Common::FixedPoint<50, 14> Read() {
return *output;
}
void Write(const Common::FixedPoint<50, 14> sample) {
*(input++) = sample;
if (input >= buffer_end) {
input = buffer.data();
}
}
Common::FixedPoint<50, 14> TapOut(const s32 index) {
auto out{input - (index + 1)};
if (out < buffer.data()) {
out += sample_count;
}
return *out;
}
s32 sample_count{};
s32 sample_count_max{};
std::vector<Common::FixedPoint<50, 14>> buffer{};
Common::FixedPoint<50, 14>* buffer_end;
Common::FixedPoint<50, 14>* input{};
Common::FixedPoint<50, 14>* output{};
Common::FixedPoint<50, 14> decay{};
};
struct State {
ReverbDelayLine pre_delay_line;
ReverbDelayLine center_delay_line;
std::array<s32, MaxDelayTaps> early_delay_times;
std::array<Common::FixedPoint<50, 14>, MaxDelayTaps> early_gains;
s32 pre_delay_time;
std::array<ReverbDelayLine, MaxDelayLines> decay_delay_lines;
std::array<ReverbDelayLine, MaxDelayLines> fdn_delay_lines;
std::array<Common::FixedPoint<50, 14>, MaxDelayLines> hf_decay_gain;
std::array<Common::FixedPoint<50, 14>, MaxDelayLines> hf_decay_prev_gain;
std::array<Common::FixedPoint<50, 14>, MaxDelayLines> prev_feedback_output;
};
static_assert(sizeof(State) <= sizeof(EffectInfoBase::State),
"ReverbInfo::State is too large!");
/**
* Update the info with new parameters, version 1.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion1& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info with new parameters, version 2.
*
* @param error_info - Used to write call result code.
* @param in_params - New parameters to update the info with.
* @param pool_mapper - Pool for mapping buffers.
*/
void Update(BehaviorInfo::ErrorInfo& error_info, const InParameterVersion2& in_params,
const PoolMapper& pool_mapper) override;
/**
* Update the info after command generation. Usually only changes its state.
*/
void UpdateForCommandGeneration() override;
/**
* Initialize a new result state. Version 2 only, unused.
*
* @param result_state - Result state to initialize.
*/
void InitializeResultState(EffectResultState& result_state) override;
/**
* Update the host-side state with the ADSP-side state. Version 2 only, unused.
*
* @param cpu_state - Host-side result state to update.
* @param dsp_state - AudioRenderer-side result state to update from.
*/
void UpdateResultState(EffectResultState& cpu_state, EffectResultState& dsp_state) override;
/**
* Get a workbuffer assigned to this effect with the given index.
*
* @param index - Workbuffer index.
* @return Address of the buffer.
*/
CpuAddr GetWorkbuffer(s32 index) override;
};
} // namespace AudioCore::AudioRenderer

View File

@@ -1,160 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <atomic>
#include <cstring>
#include "audio_core/sdl2_sink.h"
#include "audio_core/stream.h"
#include "common/assert.h"
#include "common/logging/log.h"
//#include "common/settings.h"
// Ignore -Wimplicit-fallthrough due to https://github.com/libsdl-org/SDL/issues/4307
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wimplicit-fallthrough"
#endif
#include <SDL.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
namespace AudioCore {
class SDLSinkStream final : public SinkStream {
public:
SDLSinkStream(u32 sample_rate, u32 num_channels_, const std::string& output_device)
: num_channels{std::min(num_channels_, 6u)} {
SDL_AudioSpec spec;
spec.freq = sample_rate;
spec.channels = static_cast<u8>(num_channels);
spec.format = AUDIO_S16SYS;
spec.samples = 4096;
spec.callback = nullptr;
SDL_AudioSpec obtained;
if (output_device.empty()) {
dev = SDL_OpenAudioDevice(nullptr, 0, &spec, &obtained, 0);
} else {
dev = SDL_OpenAudioDevice(output_device.c_str(), 0, &spec, &obtained, 0);
}
if (dev == 0) {
LOG_CRITICAL(Audio_Sink, "Error opening sdl audio device: {}", SDL_GetError());
return;
}
SDL_PauseAudioDevice(dev, 0);
}
~SDLSinkStream() override {
if (dev == 0) {
return;
}
SDL_CloseAudioDevice(dev);
}
void EnqueueSamples(u32 source_num_channels, const std::vector<s16>& samples) override {
if (source_num_channels > num_channels) {
// Downsample 6 channels to 2
ASSERT_MSG(source_num_channels == 6, "Channel count must be 6");
std::vector<s16> buf;
buf.reserve(samples.size() * num_channels / source_num_channels);
for (std::size_t i = 0; i < samples.size(); i += source_num_channels) {
// Downmixing implementation taken from the ATSC standard
const s16 left{samples[i + 0]};
const s16 right{samples[i + 1]};
const s16 center{samples[i + 2]};
const s16 surround_left{samples[i + 4]};
const s16 surround_right{samples[i + 5]};
// Not used in the ATSC reference implementation
[[maybe_unused]] const s16 low_frequency_effects{samples[i + 3]};
constexpr s32 clev{707}; // center mixing level coefficient
constexpr s32 slev{707}; // surround mixing level coefficient
buf.push_back(static_cast<s16>(left + (clev * center / 1000) +
(slev * surround_left / 1000)));
buf.push_back(static_cast<s16>(right + (clev * center / 1000) +
(slev * surround_right / 1000)));
}
int ret = SDL_QueueAudio(dev, static_cast<const void*>(buf.data()),
static_cast<u32>(buf.size() * sizeof(s16)));
if (ret < 0)
LOG_WARNING(Audio_Sink, "Could not queue audio buffer: {}", SDL_GetError());
return;
}
int ret = SDL_QueueAudio(dev, static_cast<const void*>(samples.data()),
static_cast<u32>(samples.size() * sizeof(s16)));
if (ret < 0)
LOG_WARNING(Audio_Sink, "Could not queue audio buffer: {}", SDL_GetError());
}
std::size_t SamplesInQueue(u32 channel_count) const override {
if (dev == 0)
return 0;
return SDL_GetQueuedAudioSize(dev) / (channel_count * sizeof(s16));
}
void Flush() override {
should_flush = true;
}
u32 GetNumChannels() const {
return num_channels;
}
private:
SDL_AudioDeviceID dev = 0;
u32 num_channels{};
std::atomic<bool> should_flush{};
};
SDLSink::SDLSink(std::string_view target_device_name) {
if (!SDL_WasInit(SDL_INIT_AUDIO)) {
if (SDL_InitSubSystem(SDL_INIT_AUDIO) < 0) {
LOG_CRITICAL(Audio_Sink, "SDL_InitSubSystem audio failed: {}", SDL_GetError());
return;
}
}
if (target_device_name != auto_device_name && !target_device_name.empty()) {
output_device = target_device_name;
} else {
output_device.clear();
}
}
SDLSink::~SDLSink() = default;
SinkStream& SDLSink::AcquireSinkStream(u32 sample_rate, u32 num_channels, const std::string&) {
sink_streams.push_back(
std::make_unique<SDLSinkStream>(sample_rate, num_channels, output_device));
return *sink_streams.back();
}
std::vector<std::string> ListSDLSinkDevices() {
std::vector<std::string> device_list;
if (!SDL_WasInit(SDL_INIT_AUDIO)) {
if (SDL_InitSubSystem(SDL_INIT_AUDIO) < 0) {
LOG_CRITICAL(Audio_Sink, "SDL_InitSubSystem audio failed: {}", SDL_GetError());
return {};
}
}
const int device_count = SDL_GetNumAudioDevices(0);
for (int i = 0; i < device_count; ++i) {
device_list.emplace_back(SDL_GetAudioDeviceName(i, 0));
}
return device_list;
}
} // namespace AudioCore

View File

@@ -1,28 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include <vector>
#include "audio_core/sink.h"
namespace AudioCore {
class SDLSink final : public Sink {
public:
explicit SDLSink(std::string_view device_id);
~SDLSink() override;
SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) override;
private:
std::string output_device;
std::vector<SinkStreamPtr> sink_streams;
};
std::vector<std::string> ListSDLSinkDevices();
} // namespace AudioCore

View File

@@ -1,30 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <string>
#include "audio_core/sink_stream.h"
#include "common/common_types.h"
namespace AudioCore {
constexpr char auto_device_name[] = "auto";
/**
* This class is an interface for an audio sink. An audio sink accepts samples in stereo signed
* PCM16 format to be output. Sinks *do not* handle resampling and expect the correct sample rate.
* They are dumb outputs.
*/
class Sink {
public:
virtual ~Sink() = default;
virtual SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) = 0;
};
using SinkPtr = std::unique_ptr<Sink>;
} // namespace AudioCore

View File

@@ -1,47 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/sink_context.h"
namespace AudioCore {
SinkContext::SinkContext(std::size_t sink_count_) : sink_count{sink_count_} {}
SinkContext::~SinkContext() = default;
std::size_t SinkContext::GetCount() const {
return sink_count;
}
void SinkContext::UpdateMainSink(const SinkInfo::InParams& in) {
ASSERT(in.type == SinkTypes::Device);
if (in.device.down_matrix_enabled) {
downmix_coefficients = in.device.down_matrix_coef;
} else {
downmix_coefficients = {
1.0f, // front
0.707f, // center
0.0f, // lfe
0.707f, // back
};
}
in_use = in.in_use;
use_count = in.device.input_count;
buffers = in.device.input;
}
bool SinkContext::InUse() const {
return in_use;
}
std::vector<u8> SinkContext::OutputBuffers() const {
std::vector<u8> buffer_ret(use_count);
std::memcpy(buffer_ret.data(), buffers.data(), use_count);
return buffer_ret;
}
const DownmixCoefficients& SinkContext::GetDownmixCoefficients() const {
return downmix_coefficients;
}
} // namespace AudioCore

View File

@@ -1,95 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "audio_core/common.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
using DownmixCoefficients = std::array<float_le, 4>;
enum class SinkTypes : u8 {
Invalid = 0,
Device = 1,
Circular = 2,
};
enum class SinkSampleFormat : u32_le {
None = 0,
Pcm8 = 1,
Pcm16 = 2,
Pcm24 = 3,
Pcm32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
class SinkInfo {
public:
struct CircularBufferIn {
u64_le address;
u32_le size;
u32_le input_count;
u32_le sample_count;
u32_le previous_position;
SinkSampleFormat sample_format;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
bool in_use;
INSERT_PADDING_BYTES_NOINIT(5);
};
static_assert(sizeof(CircularBufferIn) == 0x28,
"SinkInfo::CircularBufferIn is in invalid size");
struct DeviceIn {
std::array<u8, 255> device_name;
INSERT_PADDING_BYTES_NOINIT(1);
s32_le input_count;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
INSERT_PADDING_BYTES_NOINIT(1);
bool down_matrix_enabled;
DownmixCoefficients down_matrix_coef;
};
static_assert(sizeof(DeviceIn) == 0x11c, "SinkInfo::DeviceIn is an invalid size");
struct InParams {
SinkTypes type{};
bool in_use{};
INSERT_PADDING_BYTES(2);
u32_le node_id{};
INSERT_PADDING_WORDS(6);
union {
// std::array<u8, 0x120> raw{};
DeviceIn device;
CircularBufferIn circular_buffer;
};
};
static_assert(sizeof(InParams) == 0x140, "SinkInfo::InParams are an invalid size!");
};
class SinkContext {
public:
explicit SinkContext(std::size_t sink_count_);
~SinkContext();
[[nodiscard]] std::size_t GetCount() const;
void UpdateMainSink(const SinkInfo::InParams& in);
[[nodiscard]] bool InUse() const;
[[nodiscard]] std::vector<u8> OutputBuffers() const;
[[nodiscard]] const DownmixCoefficients& GetDownmixCoefficients() const;
private:
bool in_use{false};
s32 use_count{};
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> buffers{};
std::size_t sink_count{};
DownmixCoefficients downmix_coefficients{};
};
} // namespace AudioCore

View File

@@ -1,90 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "audio_core/null_sink.h"
#include "audio_core/sink_details.h"
#ifdef HAVE_CUBEB
#include "audio_core/cubeb_sink.h"
#endif
#ifdef HAVE_SDL2
#include "audio_core/sdl2_sink.h"
#endif
#include "common/logging/log.h"
namespace AudioCore {
namespace {
struct SinkDetails {
using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view);
using ListDevicesFn = std::vector<std::string> (*)();
/// Name for this sink.
const char* id;
/// A method to call to construct an instance of this type of sink.
FactoryFn factory;
/// A method to call to list available devices.
ListDevicesFn list_devices;
};
// sink_details is ordered in terms of desirability, with the best choice at the top.
constexpr SinkDetails sink_details[] = {
#ifdef HAVE_CUBEB
SinkDetails{"cubeb",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<CubebSink>(device_id);
},
&ListCubebSinkDevices},
#endif
#ifdef HAVE_SDL2
SinkDetails{"sdl2",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<SDLSink>(device_id);
},
&ListSDLSinkDevices},
#endif
SinkDetails{"null",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<NullSink>(device_id);
},
[] { return std::vector<std::string>{"null"}; }},
};
const SinkDetails& GetSinkDetails(std::string_view sink_id) {
auto iter =
std::find_if(std::begin(sink_details), std::end(sink_details),
[sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; });
if (sink_id == "auto" || iter == std::end(sink_details)) {
if (sink_id != "auto") {
LOG_ERROR(Audio, "AudioCore::SelectSink given invalid sink_id {}", sink_id);
}
// Auto-select.
// sink_details is ordered in terms of desirability, with the best choice at the front.
iter = std::begin(sink_details);
}
return *iter;
}
} // Anonymous namespace
std::vector<const char*> GetSinkIDs() {
std::vector<const char*> sink_ids(std::size(sink_details));
std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids),
[](const auto& sink) { return sink.id; });
return sink_ids;
}
std::vector<std::string> GetDeviceListForSink(std::string_view sink_id) {
return GetSinkDetails(sink_id).list_devices();
}
std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id) {
return GetSinkDetails(sink_id).factory(device_id);
}
} // namespace AudioCore

View File

@@ -1,23 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include <string_view>
#include <vector>
namespace AudioCore {
class Sink;
/// Retrieves the IDs for all available audio sinks.
std::vector<const char*> GetSinkIDs();
/// Gets the list of devices for a particular sink identified by the given ID.
std::vector<std::string> GetDeviceListForSink(std::string_view sink_id);
/// Creates an audio sink identified by the given device ID.
std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id);
} // namespace AudioCore

View File

@@ -1,35 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/**
* Accepts samples in stereo signed PCM16 format to be output. Sinks *do not* handle resampling and
* expect the correct sample rate. They are dumb outputs.
*/
class SinkStream {
public:
virtual ~SinkStream() = default;
/**
* Feed stereo samples to sink.
* @param num_channels Number of channels used.
* @param samples Samples in interleaved stereo PCM16 format.
*/
virtual void EnqueueSamples(u32 num_channels, const std::vector<s16>& samples) = 0;
virtual std::size_t SamplesInQueue(u32 num_channels) const = 0;
virtual void Flush() = 0;
};
using SinkStreamPtr = std::unique_ptr<SinkStream>;
} // namespace AudioCore

View File

@@ -1,616 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/behavior_info.h"
#include "audio_core/splitter_context.h"
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
namespace AudioCore {
ServerSplitterDestinationData::ServerSplitterDestinationData(s32 id_) : id{id_} {}
ServerSplitterDestinationData::~ServerSplitterDestinationData() = default;
void ServerSplitterDestinationData::Update(SplitterInfo::InDestinationParams& header) {
// Log error as these are not actually failure states
if (header.magic != SplitterMagic::DataHeader) {
LOG_ERROR(Audio, "Splitter destination header is invalid!");
return;
}
// Incorrect splitter id
if (header.splitter_id != id) {
LOG_ERROR(Audio, "Splitter destination ids do not match!");
return;
}
mix_id = header.mix_id;
// Copy our mix volumes
std::copy(header.mix_volumes.begin(), header.mix_volumes.end(), current_mix_volumes.begin());
if (!in_use && header.in_use) {
// Update mix volumes
std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
needs_update = false;
}
in_use = header.in_use;
}
ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() {
return next;
}
const ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() const {
return next;
}
void ServerSplitterDestinationData::SetNextDestination(ServerSplitterDestinationData* dest) {
next = dest;
}
bool ServerSplitterDestinationData::ValidMixId() const {
return GetMixId() != AudioCommon::NO_MIX;
}
s32 ServerSplitterDestinationData::GetMixId() const {
return mix_id;
}
bool ServerSplitterDestinationData::IsConfigured() const {
return in_use && ValidMixId();
}
float ServerSplitterDestinationData::GetMixVolume(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return current_mix_volumes.at(i);
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerSplitterDestinationData::CurrentMixVolumes() const {
return current_mix_volumes;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerSplitterDestinationData::LastMixVolumes() const {
return last_mix_volumes;
}
void ServerSplitterDestinationData::MarkDirty() {
needs_update = true;
}
void ServerSplitterDestinationData::UpdateInternalState() {
if (in_use && needs_update) {
std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
}
needs_update = false;
}
ServerSplitterInfo::ServerSplitterInfo(s32 id_) : id(id_) {}
ServerSplitterInfo::~ServerSplitterInfo() = default;
void ServerSplitterInfo::InitializeInfos() {
send_length = 0;
head = nullptr;
new_connection = true;
}
void ServerSplitterInfo::ClearNewConnectionFlag() {
new_connection = false;
}
std::size_t ServerSplitterInfo::Update(SplitterInfo::InInfoPrams& header) {
if (header.send_id != id) {
return 0;
}
sample_rate = header.sample_rate;
new_connection = true;
// We need to update the size here due to the splitter bug being present and providing an
// incorrect size. We're suppose to also update the header here but we just ignore and continue
return (sizeof(s32_le) * (header.length - 1)) + (sizeof(s32_le) * 3);
}
ServerSplitterDestinationData* ServerSplitterInfo::GetHead() {
return head;
}
const ServerSplitterDestinationData* ServerSplitterInfo::GetHead() const {
return head;
}
ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) {
auto* current_head = head;
for (std::size_t i = 0; i < depth; i++) {
if (current_head == nullptr) {
return nullptr;
}
current_head = current_head->GetNextDestination();
}
return current_head;
}
const ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) const {
auto* current_head = head;
for (std::size_t i = 0; i < depth; i++) {
if (current_head == nullptr) {
return nullptr;
}
current_head = current_head->GetNextDestination();
}
return current_head;
}
bool ServerSplitterInfo::HasNewConnection() const {
return new_connection;
}
s32 ServerSplitterInfo::GetLength() const {
return send_length;
}
void ServerSplitterInfo::SetHead(ServerSplitterDestinationData* new_head) {
head = new_head;
}
void ServerSplitterInfo::SetHeadDepth(s32 length) {
send_length = length;
}
SplitterContext::SplitterContext() = default;
SplitterContext::~SplitterContext() = default;
void SplitterContext::Initialize(BehaviorInfo& behavior_info, std::size_t _info_count,
std::size_t _data_count) {
if (!behavior_info.IsSplitterSupported() || _data_count == 0 || _info_count == 0) {
Setup(0, 0, false);
return;
}
// Only initialize if we're using splitters
Setup(_info_count, _data_count, behavior_info.IsSplitterBugFixed());
}
bool SplitterContext::Update(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
if (info_count == 0 || data_count == 0) {
bytes_read = 0;
return true;
}
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InHeader header{};
std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InHeader));
UpdateOffsets(sizeof(SplitterInfo::InHeader));
if (header.magic != SplitterMagic::SplitterHeader) {
LOG_ERROR(Audio, "Invalid header magic! Expecting {:X} but got {:X}",
SplitterMagic::SplitterHeader, header.magic);
return false;
}
// Clear all connections
for (auto& info : infos) {
info.ClearNewConnectionFlag();
}
UpdateInfo(input, input_offset, bytes_read, header.info_count);
UpdateData(input, input_offset, bytes_read, header.data_count);
const auto aligned_bytes_read = Common::AlignUp(bytes_read, 16);
input_offset += aligned_bytes_read - bytes_read;
bytes_read = aligned_bytes_read;
return true;
}
bool SplitterContext::UsingSplitter() const {
return info_count > 0 && data_count > 0;
}
ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) {
ASSERT(i < info_count);
return infos.at(i);
}
const ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) const {
ASSERT(i < info_count);
return infos.at(i);
}
ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) {
ASSERT(i < data_count);
return datas.at(i);
}
const ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) const {
ASSERT(i < data_count);
return datas.at(i);
}
ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
std::size_t data) {
ASSERT(info < info_count);
auto& cur_info = GetInfo(info);
return cur_info.GetData(data);
}
const ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
std::size_t data) const {
ASSERT(info < info_count);
const auto& cur_info = GetInfo(info);
return cur_info.GetData(data);
}
void SplitterContext::UpdateInternalState() {
if (data_count == 0) {
return;
}
for (auto& data : datas) {
data.UpdateInternalState();
}
}
std::size_t SplitterContext::GetInfoCount() const {
return info_count;
}
std::size_t SplitterContext::GetDataCount() const {
return data_count;
}
void SplitterContext::Setup(std::size_t info_count_, std::size_t data_count_,
bool is_splitter_bug_fixed) {
info_count = info_count_;
data_count = data_count_;
for (std::size_t i = 0; i < info_count; i++) {
auto& splitter = infos.emplace_back(static_cast<s32>(i));
splitter.InitializeInfos();
}
for (std::size_t i = 0; i < data_count; i++) {
datas.emplace_back(static_cast<s32>(i));
}
bug_fixed = is_splitter_bug_fixed;
}
bool SplitterContext::UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_splitter_count) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
for (s32 i = 0; i < in_splitter_count; i++) {
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InInfoPrams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InInfoPrams header{};
std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InInfoPrams));
// Logged as warning as these don't actually cause a bailout for some reason
if (header.magic != SplitterMagic::InfoHeader) {
LOG_ERROR(Audio, "Bad splitter data header");
break;
}
if (header.send_id < 0 || static_cast<std::size_t>(header.send_id) > info_count) {
LOG_ERROR(Audio, "Bad splitter data id");
break;
}
UpdateOffsets(sizeof(SplitterInfo::InInfoPrams));
auto& info = GetInfo(header.send_id);
if (!RecomposeDestination(info, header, input, input_offset)) {
LOG_ERROR(Audio, "Failed to recompose destination for splitter!");
return false;
}
const std::size_t read = info.Update(header);
bytes_read += read;
input_offset += read;
}
return true;
}
bool SplitterContext::UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_data_count) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
for (s32 i = 0; i < in_data_count; i++) {
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InDestinationParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InDestinationParams header{};
std::memcpy(&header, input.data() + input_offset,
sizeof(SplitterInfo::InDestinationParams));
UpdateOffsets(sizeof(SplitterInfo::InDestinationParams));
// Logged as warning as these don't actually cause a bailout for some reason
if (header.magic != SplitterMagic::DataHeader) {
LOG_ERROR(Audio, "Bad splitter data header");
break;
}
if (header.splitter_id < 0 || static_cast<std::size_t>(header.splitter_id) > data_count) {
LOG_ERROR(Audio, "Bad splitter data id");
break;
}
GetData(header.splitter_id).Update(header);
}
return true;
}
bool SplitterContext::RecomposeDestination(ServerSplitterInfo& info,
SplitterInfo::InInfoPrams& header,
const std::vector<u8>& input,
const std::size_t& input_offset) {
// Clear our current destinations
auto* current_head = info.GetHead();
while (current_head != nullptr) {
auto* next_head = current_head->GetNextDestination();
current_head->SetNextDestination(nullptr);
current_head = next_head;
}
info.SetHead(nullptr);
s32 size = header.length;
// If the splitter bug is present, calculate fixed size
if (!bug_fixed) {
if (info_count > 0) {
const auto factor = data_count / info_count;
size = std::min(header.length, static_cast<s32>(factor));
} else {
size = 0;
}
}
if (size < 1) {
LOG_ERROR(Audio, "Invalid splitter info size! size={:X}", size);
return true;
}
auto* start_head = &GetData(header.resource_id_base);
current_head = start_head;
std::vector<s32_le> resource_ids(size - 1);
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
resource_ids.size() * sizeof(s32_le))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(resource_ids.data(), input.data() + input_offset,
resource_ids.size() * sizeof(s32_le));
for (auto resource_id : resource_ids) {
auto* head = &GetData(resource_id);
current_head->SetNextDestination(head);
current_head = head;
}
info.SetHead(start_head);
info.SetHeadDepth(size);
return true;
}
NodeStates::NodeStates() = default;
NodeStates::~NodeStates() = default;
void NodeStates::Initialize(std::size_t node_count_) {
// Setup our work parameters
node_count = node_count_;
was_node_found.resize(node_count);
was_node_completed.resize(node_count);
index_list.resize(node_count);
index_stack.Reset(node_count * node_count);
}
bool NodeStates::Tsort(EdgeMatrix& edge_matrix) {
return DepthFirstSearch(edge_matrix);
}
std::size_t NodeStates::GetIndexPos() const {
return index_pos;
}
const std::vector<s32>& NodeStates::GetIndexList() const {
return index_list;
}
void NodeStates::PushTsortResult(s32 index) {
ASSERT(index < static_cast<s32>(node_count));
index_list[index_pos++] = index;
}
bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
ResetState();
for (std::size_t i = 0; i < node_count; i++) {
const auto node_id = static_cast<s32>(i);
// If we don't have a state, send to our index stack for work
if (GetState(i) == NodeStates::State::NoState) {
index_stack.push(node_id);
}
// While we have work to do in our stack
while (index_stack.Count() > 0) {
// Get the current node
const auto current_stack_index = index_stack.top();
// Check if we've seen the node yet
const auto index_state = GetState(current_stack_index);
if (index_state == NodeStates::State::NoState) {
// Mark the node as seen
UpdateState(NodeStates::State::InFound, current_stack_index);
} else if (index_state == NodeStates::State::InFound) {
// We've seen this node before, mark it as completed
UpdateState(NodeStates::State::InCompleted, current_stack_index);
// Update our index list
PushTsortResult(current_stack_index);
// Pop the stack
index_stack.pop();
continue;
} else if (index_state == NodeStates::State::InCompleted) {
// If our node is already sorted, clear it
index_stack.pop();
continue;
}
const auto edge_node_count = edge_matrix.GetNodeCount();
for (s32 j = 0; j < static_cast<s32>(edge_node_count); j++) {
// Check if our node is connected to our edge matrix
if (!edge_matrix.Connected(current_stack_index, j)) {
continue;
}
// Check if our node exists
const auto node_state = GetState(j);
if (node_state == NodeStates::State::NoState) {
// Add more work
index_stack.push(j);
} else if (node_state == NodeStates::State::InFound) {
ASSERT_MSG(false, "Node start marked as found");
ResetState();
return false;
}
}
}
}
return true;
}
void NodeStates::ResetState() {
// Reset to the start of our index stack
index_pos = 0;
for (std::size_t i = 0; i < node_count; i++) {
// Mark all nodes as not found
was_node_found[i] = false;
// Mark all nodes as uncompleted
was_node_completed[i] = false;
// Mark all indexes as invalid
index_list[i] = -1;
}
}
void NodeStates::UpdateState(NodeStates::State state, std::size_t i) {
switch (state) {
case NodeStates::State::NoState:
was_node_found[i] = false;
was_node_completed[i] = false;
break;
case NodeStates::State::InFound:
was_node_found[i] = true;
was_node_completed[i] = false;
break;
case NodeStates::State::InCompleted:
was_node_found[i] = false;
was_node_completed[i] = true;
break;
}
}
NodeStates::State NodeStates::GetState(std::size_t i) {
ASSERT(i < node_count);
if (was_node_found[i]) {
// If our node exists in our found list
return NodeStates::State::InFound;
} else if (was_node_completed[i]) {
// If node is in the completed list
return NodeStates::State::InCompleted;
} else {
// If in neither
return NodeStates::State::NoState;
}
}
NodeStates::Stack::Stack() = default;
NodeStates::Stack::~Stack() = default;
void NodeStates::Stack::Reset(std::size_t size) {
// Mark our stack as empty
stack.resize(size);
stack_size = size;
stack_pos = 0;
std::fill(stack.begin(), stack.end(), 0);
}
void NodeStates::Stack::push(s32 val) {
ASSERT(stack_pos < stack_size);
stack[stack_pos++] = val;
}
std::size_t NodeStates::Stack::Count() const {
return stack_pos;
}
s32 NodeStates::Stack::top() const {
ASSERT(stack_pos > 0);
return stack[stack_pos - 1];
}
s32 NodeStates::Stack::pop() {
ASSERT(stack_pos > 0);
stack_pos--;
return stack[stack_pos];
}
EdgeMatrix::EdgeMatrix() = default;
EdgeMatrix::~EdgeMatrix() = default;
void EdgeMatrix::Initialize(std::size_t _node_count) {
node_count = _node_count;
edge_matrix.resize(node_count * node_count);
}
bool EdgeMatrix::Connected(s32 a, s32 b) {
return GetState(a, b);
}
void EdgeMatrix::Connect(s32 a, s32 b) {
SetState(a, b, true);
}
void EdgeMatrix::Disconnect(s32 a, s32 b) {
SetState(a, b, false);
}
void EdgeMatrix::RemoveEdges(s32 edge) {
for (std::size_t i = 0; i < node_count; i++) {
SetState(edge, static_cast<s32>(i), false);
}
}
std::size_t EdgeMatrix::GetNodeCount() const {
return node_count;
}
void EdgeMatrix::SetState(s32 a, s32 b, bool state) {
ASSERT(InRange(a, b));
edge_matrix.at(a * node_count + b) = state;
}
bool EdgeMatrix::GetState(s32 a, s32 b) {
ASSERT(InRange(a, b));
return edge_matrix.at(a * node_count + b);
}
bool EdgeMatrix::InRange(s32 a, s32 b) const {
const std::size_t pos = a * node_count + b;
return pos < (node_count * node_count);
}
} // namespace AudioCore

View File

@@ -1,218 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <stack>
#include <vector>
#include "audio_core/common.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class BehaviorInfo;
class EdgeMatrix {
public:
EdgeMatrix();
~EdgeMatrix();
void Initialize(std::size_t _node_count);
bool Connected(s32 a, s32 b);
void Connect(s32 a, s32 b);
void Disconnect(s32 a, s32 b);
void RemoveEdges(s32 edge);
std::size_t GetNodeCount() const;
private:
void SetState(s32 a, s32 b, bool state);
bool GetState(s32 a, s32 b);
bool InRange(s32 a, s32 b) const;
std::vector<bool> edge_matrix{};
std::size_t node_count{};
};
class NodeStates {
public:
enum class State {
NoState = 0,
InFound = 1,
InCompleted = 2,
};
// Looks to be a fixed size stack. Placed within the NodeStates class based on symbols
class Stack {
public:
Stack();
~Stack();
void Reset(std::size_t size);
void push(s32 val);
std::size_t Count() const;
s32 top() const;
s32 pop();
private:
std::vector<s32> stack{};
std::size_t stack_size{};
std::size_t stack_pos{};
};
NodeStates();
~NodeStates();
void Initialize(std::size_t node_count_);
bool Tsort(EdgeMatrix& edge_matrix);
std::size_t GetIndexPos() const;
const std::vector<s32>& GetIndexList() const;
private:
void PushTsortResult(s32 index);
bool DepthFirstSearch(EdgeMatrix& edge_matrix);
void ResetState();
void UpdateState(State state, std::size_t i);
State GetState(std::size_t i);
std::size_t node_count{};
std::vector<bool> was_node_found{};
std::vector<bool> was_node_completed{};
std::size_t index_pos{};
std::vector<s32> index_list{};
Stack index_stack{};
};
enum class SplitterMagic : u32_le {
SplitterHeader = Common::MakeMagic('S', 'N', 'D', 'H'),
DataHeader = Common::MakeMagic('S', 'N', 'D', 'D'),
InfoHeader = Common::MakeMagic('S', 'N', 'D', 'I'),
};
class SplitterInfo {
public:
struct InHeader {
SplitterMagic magic{};
s32_le info_count{};
s32_le data_count{};
INSERT_PADDING_WORDS(5);
};
static_assert(sizeof(InHeader) == 0x20, "SplitterInfo::InHeader is an invalid size");
struct InInfoPrams {
SplitterMagic magic{};
s32_le send_id{};
s32_le sample_rate{};
s32_le length{};
s32_le resource_id_base{};
};
static_assert(sizeof(InInfoPrams) == 0x14, "SplitterInfo::InInfoPrams is an invalid size");
struct InDestinationParams {
SplitterMagic magic{};
s32_le splitter_id{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volumes{};
s32_le mix_id{};
bool in_use{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(InDestinationParams) == 0x70,
"SplitterInfo::InDestinationParams is an invalid size");
};
class ServerSplitterDestinationData {
public:
explicit ServerSplitterDestinationData(s32 id_);
~ServerSplitterDestinationData();
void Update(SplitterInfo::InDestinationParams& header);
ServerSplitterDestinationData* GetNextDestination();
const ServerSplitterDestinationData* GetNextDestination() const;
void SetNextDestination(ServerSplitterDestinationData* dest);
bool ValidMixId() const;
s32 GetMixId() const;
bool IsConfigured() const;
float GetMixVolume(std::size_t i) const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& CurrentMixVolumes() const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& LastMixVolumes() const;
void MarkDirty();
void UpdateInternalState();
private:
bool needs_update{};
bool in_use{};
s32 id{};
s32 mix_id{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> current_mix_volumes{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volumes{};
ServerSplitterDestinationData* next = nullptr;
};
class ServerSplitterInfo {
public:
explicit ServerSplitterInfo(s32 id_);
~ServerSplitterInfo();
void InitializeInfos();
void ClearNewConnectionFlag();
std::size_t Update(SplitterInfo::InInfoPrams& header);
ServerSplitterDestinationData* GetHead();
const ServerSplitterDestinationData* GetHead() const;
ServerSplitterDestinationData* GetData(std::size_t depth);
const ServerSplitterDestinationData* GetData(std::size_t depth) const;
bool HasNewConnection() const;
s32 GetLength() const;
void SetHead(ServerSplitterDestinationData* new_head);
void SetHeadDepth(s32 length);
private:
s32 sample_rate{};
s32 id{};
s32 send_length{};
ServerSplitterDestinationData* head = nullptr;
bool new_connection{};
};
class SplitterContext {
public:
SplitterContext();
~SplitterContext();
void Initialize(BehaviorInfo& behavior_info, std::size_t splitter_count,
std::size_t data_count);
bool Update(const std::vector<u8>& input, std::size_t& input_offset, std::size_t& bytes_read);
bool UsingSplitter() const;
ServerSplitterInfo& GetInfo(std::size_t i);
const ServerSplitterInfo& GetInfo(std::size_t i) const;
ServerSplitterDestinationData& GetData(std::size_t i);
const ServerSplitterDestinationData& GetData(std::size_t i) const;
ServerSplitterDestinationData* GetDestinationData(std::size_t info, std::size_t data);
const ServerSplitterDestinationData* GetDestinationData(std::size_t info,
std::size_t data) const;
void UpdateInternalState();
std::size_t GetInfoCount() const;
std::size_t GetDataCount() const;
private:
void Setup(std::size_t info_count, std::size_t data_count, bool is_splitter_bug_fixed);
bool UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_splitter_count);
bool UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_data_count);
bool RecomposeDestination(ServerSplitterInfo& info, SplitterInfo::InInfoPrams& header,
const std::vector<u8>& input, const std::size_t& input_offset);
std::vector<ServerSplitterInfo> infos{};
std::vector<ServerSplitterDestinationData> datas{};
std::size_t info_count{};
std::size_t data_count{};
bool bug_fixed{};
};
} // namespace AudioCore

View File

@@ -1,187 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cmath>
#include "audio_core/sink.h"
#include "audio_core/sink_details.h"
#include "audio_core/sink_stream.h"
#include "audio_core/stream.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "core/core_timing.h"
namespace AudioCore {
constexpr std::size_t MaxAudioBufferCount{32};
u32 Stream::GetNumChannels() const {
switch (format) {
case Format::Mono16:
return 1;
case Format::Stereo16:
return 2;
case Format::Multi51Channel16:
return 6;
}
UNIMPLEMENTED_MSG("Unimplemented format={}", static_cast<u32>(format));
return {};
}
Stream::Stream(Core::Timing::CoreTiming& core_timing_, u32 sample_rate_, Format format_,
ReleaseCallback&& release_callback_, SinkStream& sink_stream_, std::string&& name_)
: sample_rate{sample_rate_}, format{format_}, release_callback{std::move(release_callback_)},
sink_stream{sink_stream_}, core_timing{core_timing_}, name{std::move(name_)} {
release_event =
Core::Timing::CreateEvent(name, [this](std::uintptr_t, std::chrono::nanoseconds ns_late) {
ReleaseActiveBuffer(ns_late);
});
}
void Stream::Play() {
state = State::Playing;
PlayNextBuffer();
}
void Stream::Stop() {
state = State::Stopped;
UNIMPLEMENTED();
}
bool Stream::Flush() {
const bool had_buffers = !queued_buffers.empty();
while (!queued_buffers.empty()) {
queued_buffers.pop();
}
return had_buffers;
}
void Stream::SetVolume(float volume) {
game_volume = volume;
}
Stream::State Stream::GetState() const {
return state;
}
std::chrono::nanoseconds Stream::GetBufferReleaseNS(const Buffer& buffer) const {
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
return std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
}
static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
const float volume{std::clamp(Settings::Volume() - (1.0f - game_volume), 0.0f, 1.0f)};
if (volume == 1.0f) {
return;
}
// Perceived volume is not the same as the volume level
const float volume_scale_factor = (0.85f * ((volume * volume) - volume)) + volume;
for (auto& sample : samples) {
sample = static_cast<s16>(sample * volume_scale_factor);
}
}
void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
#ifndef _WIN32
auto now = std::chrono::steady_clock::now();
auto duration = now.time_since_epoch();
auto nanoseconds = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
if (nanoseconds > expected_cb_time) {
ns_late = nanoseconds - expected_cb_time;
}
#endif
if (!IsPlaying()) {
// Ensure we are in playing state before playing the next buffer
sink_stream.Flush();
return;
}
if (active_buffer) {
// Do not queue a new buffer if we are already playing a buffer
return;
}
if (queued_buffers.empty()) {
// No queued buffers - we are effectively paused
sink_stream.Flush();
return;
}
active_buffer = queued_buffers.front();
queued_buffers.pop();
auto& samples = active_buffer->GetSamples();
VolumeAdjustSamples(samples, game_volume);
sink_stream.EnqueueSamples(GetNumChannels(), samples);
played_samples += samples.size();
const auto buffer_release_ns = GetBufferReleaseNS(*active_buffer);
// If ns_late is higher than the update rate ignore the delay
if (ns_late > buffer_release_ns) {
ns_late = {};
}
#ifndef _WIN32
expected_cb_time = nanoseconds + (buffer_release_ns - ns_late);
#endif
core_timing.ScheduleEvent(buffer_release_ns - ns_late, release_event, {});
}
void Stream::ReleaseActiveBuffer(std::chrono::nanoseconds ns_late) {
ASSERT(active_buffer);
released_buffers.push(std::move(active_buffer));
release_callback();
PlayNextBuffer(ns_late);
}
bool Stream::QueueBuffer(BufferPtr&& buffer) {
if (queued_buffers.size() < MaxAudioBufferCount) {
queued_buffers.push(std::move(buffer));
PlayNextBuffer();
return true;
}
return false;
}
bool Stream::ContainsBuffer([[maybe_unused]] Buffer::Tag tag) const {
UNIMPLEMENTED();
return {};
}
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(std::size_t max_count) {
std::vector<Buffer::Tag> tags;
for (std::size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
if (released_buffers.front()) {
tags.push_back(released_buffers.front()->GetTag());
} else {
ASSERT_MSG(false, "Invalid tag in released_buffers!");
}
released_buffers.pop();
}
return tags;
}
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers() {
std::vector<Buffer::Tag> tags;
tags.reserve(released_buffers.size());
while (!released_buffers.empty()) {
if (released_buffers.front()) {
tags.push_back(released_buffers.front()->GetTag());
} else {
ASSERT_MSG(false, "Invalid tag in released_buffers!");
}
released_buffers.pop();
}
return tags;
}
} // namespace AudioCore

View File

@@ -1,133 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <chrono>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <queue>
#include "audio_core/buffer.h"
#include "common/common_types.h"
namespace Core::Timing {
class CoreTiming;
struct EventType;
} // namespace Core::Timing
namespace AudioCore {
class SinkStream;
/**
* Represents an audio stream, which is a sequence of queued buffers, to be outputed by AudioOut
*/
class Stream {
public:
/// Audio format of the stream
enum class Format {
Mono16,
Stereo16,
Multi51Channel16,
};
/// Current state of the stream
enum class State {
Stopped,
Playing,
};
/// Callback function type, used to change guest state on a buffer being released
using ReleaseCallback = std::function<void()>;
Stream(Core::Timing::CoreTiming& core_timing_, u32 sample_rate_, Format format_,
ReleaseCallback&& release_callback_, SinkStream& sink_stream_, std::string&& name_);
/// Plays the audio stream
void Play();
/// Stops the audio stream
void Stop();
/// Queues a buffer into the audio stream, returns true on success
bool QueueBuffer(BufferPtr&& buffer);
/// Flush audio buffers
bool Flush();
/// Returns true if the audio stream contains a buffer with the specified tag
[[nodiscard]] bool ContainsBuffer(Buffer::Tag tag) const;
/// Returns a vector of recently released buffers specified by tag
[[nodiscard]] std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(std::size_t max_count);
/// Returns a vector of all recently released buffers specified by tag
[[nodiscard]] std::vector<Buffer::Tag> GetTagsAndReleaseBuffers();
void SetVolume(float volume);
[[nodiscard]] float GetVolume() const {
return game_volume;
}
/// Returns true if the stream is currently playing
[[nodiscard]] bool IsPlaying() const {
return state == State::Playing;
}
/// Returns the number of queued buffers
[[nodiscard]] std::size_t GetQueueSize() const {
return queued_buffers.size();
}
/// Gets the sample rate
[[nodiscard]] u32 GetSampleRate() const {
return sample_rate;
}
/// Gets the number of samples played so far
[[nodiscard]] u64 GetPlayedSampleCount() const {
return played_samples;
}
/// Gets the number of channels
[[nodiscard]] u32 GetNumChannels() const;
/// Get the state
[[nodiscard]] State GetState() const;
private:
/// Plays the next queued buffer in the audio stream, starting playback if necessary
void PlayNextBuffer(std::chrono::nanoseconds ns_late = {});
/// Releases the actively playing buffer, signalling that it has been completed
void ReleaseActiveBuffer(std::chrono::nanoseconds ns_late = {});
/// Gets the number of core cycles when the specified buffer will be released
[[nodiscard]] std::chrono::nanoseconds GetBufferReleaseNS(const Buffer& buffer) const;
u32 sample_rate; ///< Sample rate of the stream
u64 played_samples{}; ///< The current played sample count
Format format; ///< Format of the stream
float game_volume = 1.0f; ///< The volume the game currently has set
ReleaseCallback release_callback; ///< Buffer release callback for the stream
State state{State::Stopped}; ///< Playback state of the stream
std::shared_ptr<Core::Timing::EventType>
release_event; ///< Core timing release event for the stream
BufferPtr active_buffer; ///< Actively playing buffer in the stream
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
SinkStream& sink_stream; ///< Output sink for the stream
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
std::string name; ///< Name of the stream, must be unique
#ifndef _WIN32
std::chrono::nanoseconds expected_cb_time = {}; ///< Estimated time of next callback
#endif
};
using StreamPtr = std::shared_ptr<Stream>;
} // namespace AudioCore

View File

@@ -1,68 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <cmath>
#include <cstddef>
#include "audio_core/time_stretch.h"
#include "common/logging/log.h"
namespace AudioCore {
TimeStretcher::TimeStretcher(u32 sample_rate, u32 channel_count) : m_sample_rate{sample_rate} {
m_sound_touch.setChannels(channel_count);
m_sound_touch.setSampleRate(sample_rate);
m_sound_touch.setPitch(1.0);
m_sound_touch.setTempo(1.0);
}
void TimeStretcher::Clear() {
m_sound_touch.clear();
}
void TimeStretcher::Flush() {
m_sound_touch.flush();
}
std::size_t TimeStretcher::Process(const s16* in, std::size_t num_in, s16* out,
std::size_t num_out) {
const double time_delta = static_cast<double>(num_out) / m_sample_rate; // seconds
// We were given actual_samples number of samples, and num_samples were requested from us.
double current_ratio = static_cast<double>(num_in) / static_cast<double>(num_out);
const double max_latency = 0.25; // seconds
const double max_backlog = m_sample_rate * max_latency;
const double backlog_fullness = m_sound_touch.numSamples() / max_backlog;
if (backlog_fullness > 4.0) {
// Too many samples in backlog: Don't push anymore on
num_in = 0;
}
// We ideally want the backlog to be about 50% full.
// This gives some headroom both ways to prevent underflow and overflow.
// We tweak current_ratio to encourage this.
constexpr double tweak_time_scale = 0.05; // seconds
const double tweak_correction = (backlog_fullness - 0.5) * (time_delta / tweak_time_scale);
current_ratio *= std::pow(1.0 + 2.0 * tweak_correction, tweak_correction < 0 ? 3.0 : 1.0);
// This low-pass filter smoothes out variance in the calculated stretch ratio.
// The time-scale determines how responsive this filter is.
constexpr double lpf_time_scale = 0.712; // seconds
const double lpf_gain = 1.0 - std::exp(-time_delta / lpf_time_scale);
m_stretch_ratio += lpf_gain * (current_ratio - m_stretch_ratio);
// Place a lower limit of 5% speed. When a game boots up, there will be
// many silence samples. These do not need to be timestretched.
m_stretch_ratio = std::max(m_stretch_ratio, 0.05);
m_sound_touch.setTempo(m_stretch_ratio);
LOG_TRACE(Audio, "{:5}/{:5} ratio:{:0.6f} backlog:{:0.6f}", num_in, num_out, m_stretch_ratio,
backlog_fullness);
m_sound_touch.putSamples(in, static_cast<u32>(num_in));
return m_sound_touch.receiveSamples(out, static_cast<u32>(num_out));
}
} // namespace AudioCore

View File

@@ -1,34 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <SoundTouch.h>
#include "common/common_types.h"
namespace AudioCore {
class TimeStretcher {
public:
TimeStretcher(u32 sample_rate, u32 channel_count);
/// @param in Input sample buffer
/// @param num_in Number of input frames in `in`
/// @param out Output sample buffer
/// @param num_out Desired number of output frames in `out`
/// @returns Actual number of frames written to `out`
std::size_t Process(const s16* in, std::size_t num_in, s16* out, std::size_t num_out);
void Clear();
void Flush();
private:
u32 m_sample_rate;
soundtouch::SoundTouch m_sound_touch;
double m_stretch_ratio = 1.0;
};
} // namespace AudioCore

View File

@@ -1,579 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "audio_core/behavior_info.h"
#include "audio_core/voice_context.h"
#include "core/memory.h"
namespace AudioCore {
ServerVoiceChannelResource::ServerVoiceChannelResource(s32 id_) : id(id_) {}
ServerVoiceChannelResource::~ServerVoiceChannelResource() = default;
bool ServerVoiceChannelResource::InUse() const {
return in_use;
}
float ServerVoiceChannelResource::GetCurrentMixVolumeAt(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return mix_volume.at(i);
}
float ServerVoiceChannelResource::GetLastMixVolumeAt(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return last_mix_volume.at(i);
}
void ServerVoiceChannelResource::Update(VoiceChannelResource::InParams& in_params) {
in_use = in_params.in_use;
// Update our mix volumes only if it's in use
if (in_params.in_use) {
mix_volume = in_params.mix_volume;
}
}
void ServerVoiceChannelResource::UpdateLastMixVolumes() {
last_mix_volume = mix_volume;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerVoiceChannelResource::GetCurrentMixVolume() const {
return mix_volume;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerVoiceChannelResource::GetLastMixVolume() const {
return last_mix_volume;
}
ServerVoiceInfo::ServerVoiceInfo() {
Initialize();
}
ServerVoiceInfo::~ServerVoiceInfo() = default;
void ServerVoiceInfo::Initialize() {
in_params.in_use = false;
in_params.node_id = 0;
in_params.id = 0;
in_params.current_playstate = ServerPlayState::Stop;
in_params.priority = 255;
in_params.sample_rate = 0;
in_params.sample_format = SampleFormat::Invalid;
in_params.channel_count = 0;
in_params.pitch = 0.0f;
in_params.volume = 0.0f;
in_params.last_volume = 0.0f;
in_params.biquad_filter.fill({});
in_params.wave_buffer_count = 0;
in_params.wave_buffer_head = 0;
in_params.mix_id = AudioCommon::NO_MIX;
in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
in_params.additional_params_address = 0;
in_params.additional_params_size = 0;
in_params.is_new = false;
out_params.played_sample_count = 0;
out_params.wave_buffer_consumed = 0;
in_params.voice_drop_flag = false;
in_params.buffer_mapped = true;
in_params.wave_buffer_flush_request_count = 0;
in_params.was_biquad_filter_enabled.fill(false);
for (auto& wave_buffer : in_params.wave_buffer) {
wave_buffer.start_sample_offset = 0;
wave_buffer.end_sample_offset = 0;
wave_buffer.is_looping = false;
wave_buffer.end_of_stream = false;
wave_buffer.buffer_address = 0;
wave_buffer.buffer_size = 0;
wave_buffer.context_address = 0;
wave_buffer.context_size = 0;
wave_buffer.sent_to_dsp = true;
}
stored_samples.clear();
}
void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
BehaviorInfo& behavior_info) {
in_params.in_use = voice_in.is_in_use;
in_params.id = voice_in.id;
in_params.node_id = voice_in.node_id;
in_params.last_playstate = in_params.current_playstate;
switch (voice_in.play_state) {
case PlayState::Paused:
in_params.current_playstate = ServerPlayState::Paused;
break;
case PlayState::Stopped:
if (in_params.current_playstate != ServerPlayState::Stop) {
in_params.current_playstate = ServerPlayState::RequestStop;
}
break;
case PlayState::Started:
in_params.current_playstate = ServerPlayState::Play;
break;
default:
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
break;
}
in_params.priority = voice_in.priority;
in_params.sorting_order = voice_in.sorting_order;
in_params.sample_rate = voice_in.sample_rate;
in_params.sample_format = voice_in.sample_format;
in_params.channel_count = voice_in.channel_count;
in_params.pitch = voice_in.pitch;
in_params.volume = voice_in.volume;
in_params.biquad_filter = voice_in.biquad_filter;
in_params.wave_buffer_count = voice_in.wave_buffer_count;
in_params.wave_buffer_head = voice_in.wave_buffer_head;
if (behavior_info.IsFlushVoiceWaveBuffersSupported()) {
const auto in_request_count = in_params.wave_buffer_flush_request_count;
const auto voice_request_count = voice_in.wave_buffer_flush_request_count;
in_params.wave_buffer_flush_request_count =
static_cast<u8>(in_request_count + voice_request_count);
}
in_params.mix_id = voice_in.mix_id;
if (behavior_info.IsSplitterSupported()) {
in_params.splitter_info_id = voice_in.splitter_info_id;
} else {
in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
}
std::memcpy(in_params.voice_channel_resource_id.data(),
voice_in.voice_channel_resource_ids.data(),
sizeof(s32) * in_params.voice_channel_resource_id.size());
if (behavior_info.IsVoicePlayedSampleCountResetAtLoopPointSupported()) {
in_params.behavior_flags.is_played_samples_reset_at_loop_point =
voice_in.behavior_flags.is_played_samples_reset_at_loop_point;
} else {
in_params.behavior_flags.is_played_samples_reset_at_loop_point.Assign(0);
}
if (behavior_info.IsVoicePitchAndSrcSkippedSupported()) {
in_params.behavior_flags.is_pitch_and_src_skipped =
voice_in.behavior_flags.is_pitch_and_src_skipped;
} else {
in_params.behavior_flags.is_pitch_and_src_skipped.Assign(0);
}
if (voice_in.is_voice_drop_flag_clear_requested) {
in_params.voice_drop_flag = false;
}
if (in_params.additional_params_address != voice_in.additional_params_address ||
in_params.additional_params_size != voice_in.additional_params_size) {
in_params.additional_params_address = voice_in.additional_params_address;
in_params.additional_params_size = voice_in.additional_params_size;
// TODO(ogniK): Reattach buffer, do we actually need to? Maybe just signal to the DSP that
// our context is new
}
}
void ServerVoiceInfo::UpdateWaveBuffers(
const VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
BehaviorInfo& behavior_info) {
if (voice_in.is_new) {
// Initialize our wave buffers
for (auto& wave_buffer : in_params.wave_buffer) {
wave_buffer.start_sample_offset = 0;
wave_buffer.end_sample_offset = 0;
wave_buffer.is_looping = false;
wave_buffer.end_of_stream = false;
wave_buffer.buffer_address = 0;
wave_buffer.buffer_size = 0;
wave_buffer.context_address = 0;
wave_buffer.context_size = 0;
wave_buffer.loop_start_sample = 0;
wave_buffer.loop_end_sample = 0;
wave_buffer.sent_to_dsp = true;
}
// Mark all our wave buffers as invalid
for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count);
channel++) {
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; ++i) {
voice_states[channel]->is_wave_buffer_valid[i] = false;
}
}
}
// Update our wave buffers
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
// Assume that we have at least 1 channel voice state
const auto have_valid_wave_buffer = voice_states[0]->is_wave_buffer_valid[i];
UpdateWaveBuffer(in_params.wave_buffer[i], voice_in.wave_buffer[i], in_params.sample_format,
have_valid_wave_buffer, behavior_info);
}
}
void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
const WaveBuffer& in_wave_buffer, SampleFormat sample_format,
bool is_buffer_valid,
[[maybe_unused]] BehaviorInfo& behavior_info) {
if (!is_buffer_valid && out_wavebuffer.sent_to_dsp && out_wavebuffer.buffer_address != 0) {
out_wavebuffer.buffer_address = 0;
out_wavebuffer.buffer_size = 0;
}
if (!in_wave_buffer.sent_to_server || !in_params.buffer_mapped) {
// Validate sample offset sizings
if (sample_format == SampleFormat::Pcm16) {
const s64 buffer_size = static_cast<s64>(in_wave_buffer.buffer_size);
const s64 start = sizeof(s16) * in_wave_buffer.start_sample_offset;
const s64 end = sizeof(s16) * in_wave_buffer.end_sample_offset;
if (0 > start || start > buffer_size || 0 > end || end > buffer_size) {
// TODO(ogniK): Write error info
LOG_ERROR(Audio,
"PCM16 wavebuffer has an invalid size. Buffer has size 0x{:08X}, but "
"offsets were "
"{:08X} - 0x{:08X}",
buffer_size, sizeof(s16) * in_wave_buffer.start_sample_offset,
sizeof(s16) * in_wave_buffer.end_sample_offset);
return;
}
} else if (sample_format == SampleFormat::Adpcm) {
const s64 buffer_size = static_cast<s64>(in_wave_buffer.buffer_size);
const s64 start_frames = in_wave_buffer.start_sample_offset / 14;
const s64 start_extra = in_wave_buffer.start_sample_offset % 14 == 0
? 0
: (in_wave_buffer.start_sample_offset % 14) / 2 + 1 +
(in_wave_buffer.start_sample_offset % 2);
const s64 start = start_frames * 8 + start_extra;
const s64 end_frames = in_wave_buffer.end_sample_offset / 14;
const s64 end_extra = in_wave_buffer.end_sample_offset % 14 == 0
? 0
: (in_wave_buffer.end_sample_offset % 14) / 2 + 1 +
(in_wave_buffer.end_sample_offset % 2);
const s64 end = end_frames * 8 + end_extra;
if (in_wave_buffer.start_sample_offset < 0 || start > buffer_size ||
in_wave_buffer.end_sample_offset < 0 || end > buffer_size) {
LOG_ERROR(Audio,
"ADPMC wavebuffer has an invalid size. Buffer has size 0x{:08X}, but "
"offsets were "
"{:08X} - 0x{:08X}",
in_wave_buffer.buffer_size, start, end);
return;
}
}
// TODO(ogniK): ADPCM Size error
out_wavebuffer.sent_to_dsp = false;
out_wavebuffer.start_sample_offset = in_wave_buffer.start_sample_offset;
out_wavebuffer.end_sample_offset = in_wave_buffer.end_sample_offset;
out_wavebuffer.is_looping = in_wave_buffer.is_looping;
out_wavebuffer.end_of_stream = in_wave_buffer.end_of_stream;
out_wavebuffer.buffer_address = in_wave_buffer.buffer_address;
out_wavebuffer.buffer_size = in_wave_buffer.buffer_size;
out_wavebuffer.context_address = in_wave_buffer.context_address;
out_wavebuffer.context_size = in_wave_buffer.context_size;
out_wavebuffer.loop_start_sample = in_wave_buffer.loop_start_sample;
out_wavebuffer.loop_end_sample = in_wave_buffer.loop_end_sample;
in_params.buffer_mapped =
in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0;
// TODO(ogniK): Pool mapper attachment
// TODO(ogniK): IsAdpcmLoopContextBugFixed
if (sample_format == SampleFormat::Adpcm && in_wave_buffer.context_address != 0 &&
in_wave_buffer.context_size != 0 && behavior_info.IsAdpcmLoopContextBugFixed()) {
} else {
out_wavebuffer.context_address = 0;
out_wavebuffer.context_size = 0;
}
}
}
void ServerVoiceInfo::WriteOutStatus(
VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) {
if (voice_in.is_new || in_params.is_new) {
in_params.is_new = true;
voice_out.wave_buffer_consumed = 0;
voice_out.played_sample_count = 0;
voice_out.voice_dropped = false;
} else {
const auto& state = voice_states[0];
voice_out.wave_buffer_consumed = state->wave_buffer_consumed;
voice_out.played_sample_count = state->played_sample_count;
voice_out.voice_dropped = state->voice_dropped;
}
}
const ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() const {
return in_params;
}
ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() {
return in_params;
}
const ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() const {
return out_params;
}
ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() {
return out_params;
}
bool ServerVoiceInfo::ShouldSkip() const {
// TODO(ogniK): Handle unmapped wave buffers or parameters
return !in_params.in_use || in_params.wave_buffer_count == 0 || !in_params.buffer_mapped ||
in_params.voice_drop_flag;
}
bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> dsp_voice_states{};
if (in_params.is_new) {
ResetResources(voice_context);
in_params.last_volume = in_params.volume;
in_params.is_new = false;
}
const s32 channel_count = in_params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
const auto channel_resource = in_params.voice_channel_resource_id[i];
dsp_voice_states[i] =
&voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
}
return UpdateParametersForCommandGeneration(dsp_voice_states);
}
void ServerVoiceInfo::ResetResources(VoiceContext& voice_context) {
const s32 channel_count = in_params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
const auto channel_resource = in_params.voice_channel_resource_id[i];
auto& dsp_state =
voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
dsp_state = {};
voice_context.GetChannelResource(static_cast<std::size_t>(channel_resource))
.UpdateLastMixVolumes();
}
}
bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states) {
const s32 channel_count = in_params.channel_count;
if (in_params.wave_buffer_flush_request_count > 0) {
FlushWaveBuffers(in_params.wave_buffer_flush_request_count, dsp_voice_states,
channel_count);
in_params.wave_buffer_flush_request_count = 0;
}
switch (in_params.current_playstate) {
case ServerPlayState::Play: {
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
if (!in_params.wave_buffer[i].sent_to_dsp) {
for (s32 channel = 0; channel < channel_count; channel++) {
dsp_voice_states[channel]->is_wave_buffer_valid[i] = true;
}
in_params.wave_buffer[i].sent_to_dsp = true;
}
}
in_params.should_depop = false;
return HasValidWaveBuffer(dsp_voice_states[0]);
}
case ServerPlayState::Paused:
case ServerPlayState::Stop: {
in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
return in_params.should_depop;
}
case ServerPlayState::RequestStop: {
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
in_params.wave_buffer[i].sent_to_dsp = true;
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
if (dsp_state->is_wave_buffer_valid[i]) {
dsp_state->wave_buffer_index =
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
dsp_state->wave_buffer_consumed++;
}
dsp_state->is_wave_buffer_valid[i] = false;
}
}
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
dsp_state->offset = 0;
dsp_state->played_sample_count = 0;
dsp_state->fraction = 0;
dsp_state->sample_history.fill(0);
dsp_state->context = {};
}
in_params.current_playstate = ServerPlayState::Stop;
in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
return in_params.should_depop;
}
default:
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
}
return false;
}
void ServerVoiceInfo::FlushWaveBuffers(
u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count) {
auto wave_head = in_params.wave_buffer_head;
for (u8 i = 0; i < flush_count; i++) {
in_params.wave_buffer[wave_head].sent_to_dsp = true;
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
dsp_state->wave_buffer_consumed++;
dsp_state->is_wave_buffer_valid[wave_head] = false;
dsp_state->wave_buffer_index =
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
}
wave_head = (wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS;
}
}
bool ServerVoiceInfo::HasValidWaveBuffer(const VoiceState* state) const {
const auto& valid_wb = state->is_wave_buffer_valid;
return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end();
}
void ServerVoiceInfo::SetWaveBufferCompleted(VoiceState& dsp_state,
const ServerWaveBuffer& wave_buffer) {
dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
dsp_state.wave_buffer_consumed++;
dsp_state.wave_buffer_index = (dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
dsp_state.loop_count = 0;
if (wave_buffer.end_of_stream) {
dsp_state.played_sample_count = 0;
}
}
VoiceContext::VoiceContext(std::size_t voice_count_) : voice_count{voice_count_} {
for (std::size_t i = 0; i < voice_count; i++) {
voice_channel_resources.emplace_back(static_cast<s32>(i));
sorted_voice_info.push_back(&voice_info.emplace_back());
voice_states.emplace_back();
dsp_voice_states.emplace_back();
}
}
VoiceContext::~VoiceContext() {
sorted_voice_info.clear();
}
std::size_t VoiceContext::GetVoiceCount() const {
return voice_count;
}
ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) {
ASSERT(i < voice_count);
return voice_channel_resources.at(i);
}
const ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) const {
ASSERT(i < voice_count);
return voice_channel_resources.at(i);
}
VoiceState& VoiceContext::GetState(std::size_t i) {
ASSERT(i < voice_count);
return voice_states.at(i);
}
const VoiceState& VoiceContext::GetState(std::size_t i) const {
ASSERT(i < voice_count);
return voice_states.at(i);
}
VoiceState& VoiceContext::GetDspSharedState(std::size_t i) {
ASSERT(i < voice_count);
return dsp_voice_states.at(i);
}
const VoiceState& VoiceContext::GetDspSharedState(std::size_t i) const {
ASSERT(i < voice_count);
return dsp_voice_states.at(i);
}
ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) {
ASSERT(i < voice_count);
return voice_info.at(i);
}
const ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) const {
ASSERT(i < voice_count);
return voice_info.at(i);
}
ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) {
ASSERT(i < voice_count);
return *sorted_voice_info.at(i);
}
const ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) const {
ASSERT(i < voice_count);
return *sorted_voice_info.at(i);
}
s32 VoiceContext::DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
s32 channel_count, s32 buffer_offset, s32 sample_count,
Core::Memory::Memory& memory) {
if (wave_buffer->buffer_address == 0) {
return 0;
}
if (wave_buffer->buffer_size == 0) {
return 0;
}
if (wave_buffer->end_sample_offset < wave_buffer->start_sample_offset) {
return 0;
}
const auto samples_remaining =
(wave_buffer->end_sample_offset - wave_buffer->start_sample_offset) - buffer_offset;
const auto start_offset = (wave_buffer->start_sample_offset + buffer_offset) * channel_count;
const auto buffer_pos = wave_buffer->buffer_address + start_offset;
s16* buffer_data = reinterpret_cast<s16*>(memory.GetPointer(buffer_pos));
const auto samples_processed = std::min(sample_count, samples_remaining);
// Fast path
if (channel_count == 1) {
for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
output_buffer[i] = buffer_data[i];
}
} else {
for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
output_buffer[i] = buffer_data[i * channel_count + channel];
}
}
return samples_processed;
}
void VoiceContext::SortInfo() {
for (std::size_t i = 0; i < voice_count; i++) {
sorted_voice_info[i] = &voice_info[i];
}
std::sort(sorted_voice_info.begin(), sorted_voice_info.end(),
[](const ServerVoiceInfo* lhs, const ServerVoiceInfo* rhs) {
const auto& lhs_in = lhs->GetInParams();
const auto& rhs_in = rhs->GetInParams();
// Sort by priority
if (lhs_in.priority != rhs_in.priority) {
return lhs_in.priority > rhs_in.priority;
} else {
// If the priorities match, sort by sorting order
return lhs_in.sorting_order > rhs_in.sorting_order;
}
});
}
void VoiceContext::UpdateStateByDspShared() {
voice_states = dsp_voice_states;
}
} // namespace AudioCore

View File

@@ -1,302 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include "audio_core/algorithm/interpolate.h"
#include "audio_core/codec.h"
#include "audio_core/common.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
class BehaviorInfo;
class VoiceContext;
enum class SampleFormat : u8 {
Invalid = 0,
Pcm8 = 1,
Pcm16 = 2,
Pcm24 = 3,
Pcm32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
enum class PlayState : u8 {
Started = 0,
Stopped = 1,
Paused = 2,
};
enum class ServerPlayState {
Play = 0,
Stop = 1,
RequestStop = 2,
Paused = 3,
};
struct BiquadFilterParameter {
bool enabled{};
INSERT_PADDING_BYTES(1);
std::array<s16, 3> numerator{};
std::array<s16, 2> denominator{};
};
static_assert(sizeof(BiquadFilterParameter) == 0xc, "BiquadFilterParameter is an invalid size");
struct WaveBuffer {
u64_le buffer_address{};
u64_le buffer_size{};
s32_le start_sample_offset{};
s32_le end_sample_offset{};
u8 is_looping{};
u8 end_of_stream{};
u8 sent_to_server{};
INSERT_PADDING_BYTES(1);
s32 loop_count{};
u64 context_address{};
u64 context_size{};
u32 loop_start_sample{};
u32 loop_end_sample{};
};
static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer is an invalid size");
struct ServerWaveBuffer {
VAddr buffer_address{};
std::size_t buffer_size{};
s32 start_sample_offset{};
s32 end_sample_offset{};
bool is_looping{};
bool end_of_stream{};
VAddr context_address{};
std::size_t context_size{};
s32 loop_count{};
u32 loop_start_sample{};
u32 loop_end_sample{};
bool sent_to_dsp{true};
};
struct BehaviorFlags {
BitField<0, 1, u16> is_played_samples_reset_at_loop_point;
BitField<1, 1, u16> is_pitch_and_src_skipped;
};
static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
struct ADPCMContext {
u16 header;
s16 yn1;
s16 yn2;
};
static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
struct VoiceState {
s64 played_sample_count;
s32 offset;
s32 wave_buffer_index;
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid;
s32 wave_buffer_consumed;
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history;
s32 fraction;
VAddr context_address;
Codec::ADPCM_Coeff coeff;
ADPCMContext context;
std::array<s64, 2> biquad_filter_state;
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples;
u32 external_context_size;
bool is_external_context_used;
bool voice_dropped;
s32 loop_count;
};
class VoiceChannelResource {
public:
struct InParams {
s32_le id{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
bool in_use{};
INSERT_PADDING_BYTES(11);
};
static_assert(sizeof(InParams) == 0x70, "InParams is an invalid size");
};
class ServerVoiceChannelResource {
public:
explicit ServerVoiceChannelResource(s32 id_);
~ServerVoiceChannelResource();
bool InUse() const;
float GetCurrentMixVolumeAt(std::size_t i) const;
float GetLastMixVolumeAt(std::size_t i) const;
void Update(VoiceChannelResource::InParams& in_params);
void UpdateLastMixVolumes();
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetCurrentMixVolume() const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetLastMixVolume() const;
private:
s32 id{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volume{};
bool in_use{};
};
class VoiceInfo {
public:
struct InParams {
s32_le id{};
u32_le node_id{};
u8 is_new{};
u8 is_in_use{};
PlayState play_state{};
SampleFormat sample_format{};
s32_le sample_rate{};
s32_le priority{};
s32_le sorting_order{};
s32_le channel_count{};
float_le pitch{};
float_le volume{};
std::array<BiquadFilterParameter, 2> biquad_filter{};
s32_le wave_buffer_count{};
s16_le wave_buffer_head{};
INSERT_PADDING_BYTES(6);
u64_le additional_params_address{};
u64_le additional_params_size{};
s32_le mix_id{};
s32_le splitter_info_id{};
std::array<WaveBuffer, 4> wave_buffer{};
std::array<u32_le, 6> voice_channel_resource_ids{};
// TODO(ogniK): Remaining flags
u8 is_voice_drop_flag_clear_requested{};
u8 wave_buffer_flush_request_count{};
INSERT_PADDING_BYTES(2);
BehaviorFlags behavior_flags{};
INSERT_PADDING_BYTES(16);
};
static_assert(sizeof(InParams) == 0x170, "InParams is an invalid size");
struct OutParams {
u64_le played_sample_count{};
u32_le wave_buffer_consumed{};
u8 voice_dropped{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams is an invalid size");
};
class ServerVoiceInfo {
public:
struct InParams {
bool in_use{};
bool is_new{};
bool should_depop{};
SampleFormat sample_format{};
s32 sample_rate{};
s32 channel_count{};
s32 id{};
s32 node_id{};
s32 mix_id{};
ServerPlayState current_playstate{};
ServerPlayState last_playstate{};
s32 priority{};
s32 sorting_order{};
float pitch{};
float volume{};
float last_volume{};
std::array<BiquadFilterParameter, AudioCommon::MAX_BIQUAD_FILTERS> biquad_filter{};
s32 wave_buffer_count{};
s16 wave_buffer_head{};
INSERT_PADDING_BYTES(2);
BehaviorFlags behavior_flags{};
VAddr additional_params_address{};
std::size_t additional_params_size{};
std::array<ServerWaveBuffer, AudioCommon::MAX_WAVE_BUFFERS> wave_buffer{};
std::array<s32, AudioCommon::MAX_CHANNEL_COUNT> voice_channel_resource_id{};
s32 splitter_info_id{};
u8 wave_buffer_flush_request_count{};
bool voice_drop_flag{};
bool buffer_mapped{};
std::array<bool, AudioCommon::MAX_BIQUAD_FILTERS> was_biquad_filter_enabled{};
};
struct OutParams {
s64 played_sample_count{};
s32 wave_buffer_consumed{};
};
ServerVoiceInfo();
~ServerVoiceInfo();
void Initialize();
void UpdateParameters(const VoiceInfo::InParams& voice_in, BehaviorInfo& behavior_info);
void UpdateWaveBuffers(const VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
BehaviorInfo& behavior_info);
void UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer, const WaveBuffer& in_wave_buffer,
SampleFormat sample_format, bool is_buffer_valid,
BehaviorInfo& behavior_info);
void WriteOutStatus(VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states);
const InParams& GetInParams() const;
InParams& GetInParams();
const OutParams& GetOutParams() const;
OutParams& GetOutParams();
bool ShouldSkip() const;
bool UpdateForCommandGeneration(VoiceContext& voice_context);
void ResetResources(VoiceContext& voice_context);
bool UpdateParametersForCommandGeneration(
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states);
void FlushWaveBuffers(u8 flush_count,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count);
void SetWaveBufferCompleted(VoiceState& dsp_state, const ServerWaveBuffer& wave_buffer);
private:
std::vector<s16> stored_samples;
InParams in_params{};
OutParams out_params{};
bool HasValidWaveBuffer(const VoiceState* state) const;
};
class VoiceContext {
public:
explicit VoiceContext(std::size_t voice_count_);
~VoiceContext();
std::size_t GetVoiceCount() const;
ServerVoiceChannelResource& GetChannelResource(std::size_t i);
const ServerVoiceChannelResource& GetChannelResource(std::size_t i) const;
VoiceState& GetState(std::size_t i);
const VoiceState& GetState(std::size_t i) const;
VoiceState& GetDspSharedState(std::size_t i);
const VoiceState& GetDspSharedState(std::size_t i) const;
ServerVoiceInfo& GetInfo(std::size_t i);
const ServerVoiceInfo& GetInfo(std::size_t i) const;
ServerVoiceInfo& GetSortedInfo(std::size_t i);
const ServerVoiceInfo& GetSortedInfo(std::size_t i) const;
s32 DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
s32 channel_count, s32 buffer_offset, s32 sample_count,
Core::Memory::Memory& memory);
void SortInfo();
void UpdateStateByDspShared();
private:
std::size_t voice_count{};
std::vector<ServerVoiceChannelResource> voice_channel_resources{};
std::vector<VoiceState> voice_states{};
std::vector<VoiceState> dsp_voice_states{};
std::vector<ServerVoiceInfo> voice_info{};
std::vector<ServerVoiceInfo*> sorted_voice_info{};
};
} // namespace AudioCore

View File

@@ -1,19 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/frontend/applets/mii.h"
namespace Core::Frontend {
MiiApplet::~MiiApplet() = default;
void DefaultMiiApplet::ShowMii(
const MiiParameters& parameters,
const std::function<void(const Core::Frontend::MiiParameters& parameters)> callback) const {
LOG_INFO(Service_HID, "(STUBBED) called");
callback(parameters);
}
} // namespace Core::Frontend

View File

@@ -1,35 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <functional>
#include "core/hle/result.h"
#include "core/hle/service/mii/mii_manager.h"
namespace Core::Frontend {
struct MiiParameters {
bool is_editable;
Service::Mii::MiiInfo mii_data{};
};
class MiiApplet {
public:
virtual ~MiiApplet();
virtual void ShowMii(const MiiParameters& parameters,
const std::function<void(const Core::Frontend::MiiParameters& parameters)>
callback) const = 0;
};
class DefaultMiiApplet final : public MiiApplet {
public:
void ShowMii(const MiiParameters& parameters,
const std::function<void(const Core::Frontend::MiiParameters& parameters)>
callback) const override;
};
} // namespace Core::Frontend

View File

@@ -1,29 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hardware_interrupt_manager.h"
#include "core/hle/service/nvdrv/nvdrv_interface.h"
#include "core/hle/service/sm/sm.h"
namespace Core::Hardware {
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
gpu_interrupt_event = Core::Timing::CreateEvent(
"GPUInterrupt", [this](std::uintptr_t message, std::chrono::nanoseconds) {
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
const u32 syncpt = static_cast<u32>(message >> 32);
const u32 value = static_cast<u32>(message);
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
});
}
InterruptManager::~InterruptManager() = default;
void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
}
} // namespace Core::Hardware

View File

@@ -1,32 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include "common/common_types.h"
namespace Core {
class System;
}
namespace Core::Timing {
struct EventType;
}
namespace Core::Hardware {
class InterruptManager {
public:
explicit InterruptManager(Core::System& system);
~InterruptManager();
void GPUInterruptSyncpt(u32 syncpoint_id, u32 value);
private:
Core::System& system;
std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
};
} // namespace Core::Hardware

View File

@@ -1,99 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <list>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/result.h"
namespace Kernel {
class KPageLinkedList final {
public:
class Node final {
public:
constexpr Node(u64 addr_, std::size_t num_pages_) : addr{addr_}, num_pages{num_pages_} {}
constexpr u64 GetAddress() const {
return addr;
}
constexpr std::size_t GetNumPages() const {
return num_pages;
}
constexpr std::size_t GetSize() const {
return GetNumPages() * PageSize;
}
private:
u64 addr{};
std::size_t num_pages{};
};
public:
KPageLinkedList() = default;
KPageLinkedList(u64 address, u64 num_pages) {
ASSERT(AddBlock(address, num_pages).IsSuccess());
}
constexpr std::list<Node>& Nodes() {
return nodes;
}
constexpr const std::list<Node>& Nodes() const {
return nodes;
}
std::size_t GetNumPages() const {
std::size_t num_pages = 0;
for (const Node& node : nodes) {
num_pages += node.GetNumPages();
}
return num_pages;
}
bool IsEqual(KPageLinkedList& other) const {
auto this_node = nodes.begin();
auto other_node = other.nodes.begin();
while (this_node != nodes.end() && other_node != other.nodes.end()) {
if (this_node->GetAddress() != other_node->GetAddress() ||
this_node->GetNumPages() != other_node->GetNumPages()) {
return false;
}
this_node = std::next(this_node);
other_node = std::next(other_node);
}
return this_node == nodes.end() && other_node == other.nodes.end();
}
ResultCode AddBlock(u64 address, u64 num_pages) {
if (!num_pages) {
return ResultSuccess;
}
if (!nodes.empty()) {
const auto node = nodes.back();
if (node.GetAddress() + node.GetNumPages() * PageSize == address) {
address = node.GetAddress();
num_pages += node.GetNumPages();
nodes.pop_back();
}
}
nodes.push_back({address, num_pages});
return ResultSuccess;
}
bool Empty() const {
return nodes.empty();
}
private:
std::list<Node> nodes;
};
} // namespace Kernel

View File

@@ -1,101 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/frontend/applets/mii.h"
#include "core/hle/service/am/am.h"
#include "core/hle/service/am/applets/applet_mii.h"
#include "core/reporter.h"
namespace Service::AM::Applets {
Mii::Mii(Core::System& system_, LibraryAppletMode applet_mode_,
const Core::Frontend::MiiApplet& frontend_)
: Applet{system_, applet_mode_}, frontend{frontend_}, system{system_} {}
Mii::~Mii() = default;
void Mii::Initialize() {
is_complete = false;
const auto storage = broker.PopNormalDataToApplet();
ASSERT(storage != nullptr);
const auto data = storage->GetData();
ASSERT(data.size() == sizeof(MiiAppletInput));
std::memcpy(&input_data, data.data(), sizeof(MiiAppletInput));
}
bool Mii::TransactionComplete() const {
return is_complete;
}
ResultCode Mii::GetStatus() const {
return ResultSuccess;
}
void Mii::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data!");
}
void Mii::Execute() {
if (is_complete) {
return;
}
const auto callback = [this](const Core::Frontend::MiiParameters& parameters) {
DisplayCompleted(parameters);
};
switch (input_data.applet_mode) {
case MiiAppletMode::ShowMiiEdit: {
Service::Mii::MiiManager manager;
Core::Frontend::MiiParameters params{
.is_editable = false,
.mii_data = input_data.mii_char_info.mii_data,
};
frontend.ShowMii(params, callback);
break;
}
case MiiAppletMode::EditMii: {
Service::Mii::MiiManager manager;
Core::Frontend::MiiParameters params{
.is_editable = true,
.mii_data = input_data.mii_char_info.mii_data,
};
frontend.ShowMii(params, callback);
break;
}
case MiiAppletMode::CreateMii: {
Service::Mii::MiiManager manager;
Core::Frontend::MiiParameters params{
.is_editable = true,
.mii_data = manager.BuildDefault(0),
};
frontend.ShowMii(params, callback);
break;
}
default:
UNIMPLEMENTED_MSG("Unimplemented LibAppletMiiEdit mode={:02X}!", input_data.applet_mode);
}
}
void Mii::DisplayCompleted(const Core::Frontend::MiiParameters& parameters) {
is_complete = true;
std::vector<u8> reply(sizeof(AppletOutputForCharInfoEditing));
output_data = {
.result = ResultSuccess,
.mii_data = parameters.mii_data,
};
std::memcpy(reply.data(), &output_data, sizeof(AppletOutputForCharInfoEditing));
broker.PushNormalDataFromApplet(std::make_shared<IStorage>(system, std::move(reply)));
broker.SignalStateChanged();
}
} // namespace Service::AM::Applets

View File

@@ -1,90 +0,0 @@
// Copyright 2022 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "core/hle/result.h"
#include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/mii/mii_manager.h"
namespace Core {
class System;
}
namespace Service::AM::Applets {
// This is nn::mii::AppletMode
enum class MiiAppletMode : u32 {
ShowMiiEdit = 0,
AppendMii = 1,
AppendMiiImage = 2,
UpdateMiiImage = 3,
CreateMii = 4,
EditMii = 5,
};
struct MiiCharInfo {
Service::Mii::MiiInfo mii_data{};
INSERT_PADDING_BYTES(0x28);
};
static_assert(sizeof(MiiCharInfo) == 0x80, "MiiCharInfo has incorrect size.");
// This is nn::mii::AppletInput
struct MiiAppletInput {
s32 version{};
MiiAppletMode applet_mode{};
u32 special_mii_key_code{};
union {
std::array<Common::UUID, 8> valid_uuid;
MiiCharInfo mii_char_info;
};
Common::UUID used_uuid;
INSERT_PADDING_BYTES(0x64);
};
static_assert(sizeof(MiiAppletInput) == 0x100, "MiiAppletInput has incorrect size.");
// This is nn::mii::AppletOutput
struct MiiAppletOutput {
ResultCode result{ResultSuccess};
s32 index{};
INSERT_PADDING_BYTES(0x18);
};
static_assert(sizeof(MiiAppletOutput) == 0x20, "MiiAppletOutput has incorrect size.");
// This is nn::mii::AppletOutputForCharInfoEditing
struct AppletOutputForCharInfoEditing {
ResultCode result{ResultSuccess};
Service::Mii::MiiInfo mii_data{};
INSERT_PADDING_BYTES(0x24);
};
static_assert(sizeof(AppletOutputForCharInfoEditing) == 0x80,
"AppletOutputForCharInfoEditing has incorrect size.");
class Mii final : public Applet {
public:
explicit Mii(Core::System& system_, LibraryAppletMode applet_mode_,
const Core::Frontend::MiiApplet& frontend_);
~Mii() override;
void Initialize() override;
bool TransactionComplete() const override;
ResultCode GetStatus() const override;
void ExecuteInteractive() override;
void Execute() override;
void DisplayCompleted(const Core::Frontend::MiiParameters& parameters);
private:
const Core::Frontend::MiiApplet& frontend;
MiiAppletInput input_data{};
AppletOutputForCharInfoEditing output_data{};
bool is_complete = false;
Core::System& system;
};
} // namespace Service::AM::Applets

View File

@@ -1,38 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/hle/service/nvdrv/syncpoint_manager.h"
#include "video_core/gpu.h"
namespace Service::Nvidia {
SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {}
SyncpointManager::~SyncpointManager() = default;
u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) {
syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id);
return GetSyncpointMin(syncpoint_id);
}
u32 SyncpointManager::AllocateSyncpoint() {
for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) {
if (!syncpoints[syncpoint_id].is_allocated) {
syncpoints[syncpoint_id].is_allocated = true;
return syncpoint_id;
}
}
ASSERT_MSG(false, "No more available syncpoints!");
return {};
}
u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) {
for (u32 index = 0; index < value; ++index) {
syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed);
}
return GetSyncpointMax(syncpoint_id);
}
} // namespace Service::Nvidia

View File

@@ -1,84 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <atomic>
#include "common/common_types.h"
#include "core/hle/service/nvdrv/nvdata.h"
namespace Tegra {
class GPU;
}
namespace Service::Nvidia {
class SyncpointManager final {
public:
explicit SyncpointManager(Tegra::GPU& gpu_);
~SyncpointManager();
/**
* Returns true if the specified syncpoint is expired for the given value.
* @param syncpoint_id Syncpoint ID to check.
* @param value Value to check against the specified syncpoint.
* @returns True if the specified syncpoint is expired for the given value, otherwise False.
*/
bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const {
return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value);
}
/**
* Gets the lower bound for the specified syncpoint.
* @param syncpoint_id Syncpoint ID to get the lower bound for.
* @returns The lower bound for the specified syncpoint.
*/
u32 GetSyncpointMin(u32 syncpoint_id) const {
return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed);
}
/**
* Gets the uper bound for the specified syncpoint.
* @param syncpoint_id Syncpoint ID to get the upper bound for.
* @returns The upper bound for the specified syncpoint.
*/
u32 GetSyncpointMax(u32 syncpoint_id) const {
return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed);
}
/**
* Refreshes the minimum value for the specified syncpoint.
* @param syncpoint_id Syncpoint ID to be refreshed.
* @returns The new syncpoint minimum value.
*/
u32 RefreshSyncpoint(u32 syncpoint_id);
/**
* Allocates a new syncoint.
* @returns The syncpoint ID for the newly allocated syncpoint.
*/
u32 AllocateSyncpoint();
/**
* Increases the maximum value for the specified syncpoint.
* @param syncpoint_id Syncpoint ID to be increased.
* @param value Value to increase the specified syncpoint by.
* @returns The new syncpoint maximum value.
*/
u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value);
private:
struct Syncpoint {
std::atomic<u32> min;
std::atomic<u32> max;
std::atomic<bool> is_allocated;
};
std::array<Syncpoint, MaxSyncPoints> syncpoints{};
Tegra::GPU& gpu;
};
} // namespace Service::Nvidia

View File

@@ -1,206 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
namespace Service::NVFlinger {
BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_,
KernelHelpers::ServiceContext& service_context_)
: id(id_), layer_id(layer_id_), service_context{service_context_} {
buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent");
}
BufferQueue::~BufferQueue() {
service_context.CloseEvent(buffer_wait_event);
}
void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
ASSERT(slot < buffer_slots);
LOG_WARNING(Service, "Adding graphics buffer {}", slot);
{
std::unique_lock lock{free_buffers_mutex};
free_buffers.push_back(slot);
}
free_buffers_condition.notify_one();
buffers[slot] = {
.slot = slot,
.status = Buffer::Status::Free,
.igbp_buffer = igbp_buffer,
.transform = {},
.crop_rect = {},
.swap_interval = 0,
.multi_fence = {},
};
buffer_wait_event->GetWritableEvent().Signal();
}
std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
u32 height) {
// Wait for first request before trying to dequeue
{
std::unique_lock lock{free_buffers_mutex};
free_buffers_condition.wait(lock, [this] { return !free_buffers.empty() || !is_connect; });
}
if (!is_connect) {
// Buffer was disconnected while the thread was blocked, this is most likely due to
// emulation being stopped
return std::nullopt;
}
std::unique_lock lock{free_buffers_mutex};
auto f_itr = free_buffers.begin();
auto slot = buffers.size();
while (f_itr != free_buffers.end()) {
const Buffer& buffer = buffers[*f_itr];
if (buffer.status == Buffer::Status::Free && buffer.igbp_buffer.width == width &&
buffer.igbp_buffer.height == height) {
slot = *f_itr;
free_buffers.erase(f_itr);
break;
}
++f_itr;
}
if (slot == buffers.size()) {
return std::nullopt;
}
buffers[slot].status = Buffer::Status::Dequeued;
return {{buffers[slot].slot, &buffers[slot].multi_fence}};
}
const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const {
ASSERT(slot < buffers.size());
ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
ASSERT(buffers[slot].slot == slot);
return buffers[slot].igbp_buffer;
}
void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform,
const Common::Rectangle<int>& crop_rect, u32 swap_interval,
Service::Nvidia::MultiFence& multi_fence) {
ASSERT(slot < buffers.size());
ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
ASSERT(buffers[slot].slot == slot);
buffers[slot].status = Buffer::Status::Queued;
buffers[slot].transform = transform;
buffers[slot].crop_rect = crop_rect;
buffers[slot].swap_interval = swap_interval;
buffers[slot].multi_fence = multi_fence;
std::unique_lock lock{queue_sequence_mutex};
queue_sequence.push_back(slot);
}
void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence) {
ASSERT(slot < buffers.size());
ASSERT(buffers[slot].status != Buffer::Status::Free);
ASSERT(buffers[slot].slot == slot);
buffers[slot].status = Buffer::Status::Free;
buffers[slot].multi_fence = multi_fence;
buffers[slot].swap_interval = 0;
{
std::unique_lock lock{free_buffers_mutex};
free_buffers.push_back(slot);
}
free_buffers_condition.notify_one();
buffer_wait_event->GetWritableEvent().Signal();
}
std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
std::unique_lock lock{queue_sequence_mutex};
std::size_t buffer_slot = buffers.size();
// Iterate to find a queued buffer matching the requested slot.
while (buffer_slot == buffers.size() && !queue_sequence.empty()) {
const auto slot = static_cast<std::size_t>(queue_sequence.front());
ASSERT(slot < buffers.size());
if (buffers[slot].status == Buffer::Status::Queued) {
ASSERT(buffers[slot].slot == slot);
buffer_slot = slot;
}
queue_sequence.pop_front();
}
if (buffer_slot == buffers.size()) {
return std::nullopt;
}
buffers[buffer_slot].status = Buffer::Status::Acquired;
return {{buffers[buffer_slot]}};
}
void BufferQueue::ReleaseBuffer(u32 slot) {
ASSERT(slot < buffers.size());
ASSERT(buffers[slot].status == Buffer::Status::Acquired);
ASSERT(buffers[slot].slot == slot);
buffers[slot].status = Buffer::Status::Free;
{
std::unique_lock lock{free_buffers_mutex};
free_buffers.push_back(slot);
}
free_buffers_condition.notify_one();
buffer_wait_event->GetWritableEvent().Signal();
}
void BufferQueue::Connect() {
std::unique_lock lock{queue_sequence_mutex};
queue_sequence.clear();
is_connect = true;
}
void BufferQueue::Disconnect() {
buffers.fill({});
{
std::unique_lock lock{queue_sequence_mutex};
queue_sequence.clear();
}
buffer_wait_event->GetWritableEvent().Signal();
is_connect = false;
free_buffers_condition.notify_one();
}
u32 BufferQueue::Query(QueryType type) {
LOG_WARNING(Service, "(STUBBED) called type={}", type);
switch (type) {
case QueryType::NativeWindowFormat:
return static_cast<u32>(PixelFormat::RGBA8888);
case QueryType::NativeWindowWidth:
case QueryType::NativeWindowHeight:
break;
case QueryType::NativeWindowMinUndequeuedBuffers:
return 0;
case QueryType::NativeWindowConsumerUsageBits:
return 0;
}
UNIMPLEMENTED_MSG("Unimplemented query type={}", type);
return 0;
}
Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() {
return buffer_wait_event->GetWritableEvent();
}
Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() {
return buffer_wait_event->GetReadableEvent();
}
} // namespace Service::NVFlinger

View File

@@ -1,154 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <condition_variable>
#include <list>
#include <mutex>
#include <optional>
#include "common/common_funcs.h"
#include "common/math_util.h"
#include "common/swap.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/service/nvdrv/nvdata.h"
namespace Kernel {
class KernelCore;
class KEvent;
class KReadableEvent;
class KWritableEvent;
} // namespace Kernel
namespace Service::KernelHelpers {
class ServiceContext;
} // namespace Service::KernelHelpers
namespace Service::NVFlinger {
constexpr u32 buffer_slots = 0x40;
struct IGBPBuffer {
u32_le magic;
u32_le width;
u32_le height;
u32_le stride;
u32_le format;
u32_le usage;
INSERT_PADDING_WORDS(1);
u32_le index;
INSERT_PADDING_WORDS(3);
u32_le gpu_buffer_id;
INSERT_PADDING_WORDS(6);
u32_le external_format;
INSERT_PADDING_WORDS(10);
u32_le nvmap_handle;
u32_le offset;
INSERT_PADDING_WORDS(60);
};
static_assert(sizeof(IGBPBuffer) == 0x16C, "IGBPBuffer has wrong size");
class BufferQueue final {
public:
enum class QueryType {
NativeWindowWidth = 0,
NativeWindowHeight = 1,
NativeWindowFormat = 2,
/// The minimum number of buffers that must remain un-dequeued after a buffer has been
/// queued
NativeWindowMinUndequeuedBuffers = 3,
/// The consumer gralloc usage bits currently set by the consumer
NativeWindowConsumerUsageBits = 10,
};
explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_,
KernelHelpers::ServiceContext& service_context_);
~BufferQueue();
enum class BufferTransformFlags : u32 {
/// No transform flags are set
Unset = 0x00,
/// Flip source image horizontally (around the vertical axis)
FlipH = 0x01,
/// Flip source image vertically (around the horizontal axis)
FlipV = 0x02,
/// Rotate source image 90 degrees clockwise
Rotate90 = 0x04,
/// Rotate source image 180 degrees
Rotate180 = 0x03,
/// Rotate source image 270 degrees clockwise
Rotate270 = 0x07,
};
enum class PixelFormat : u32 {
RGBA8888 = 1,
RGBX8888 = 2,
RGB888 = 3,
RGB565 = 4,
BGRA8888 = 5,
RGBA5551 = 6,
RRGBA4444 = 7,
};
struct Buffer {
enum class Status { Free = 0, Queued = 1, Dequeued = 2, Acquired = 3 };
u32 slot;
Status status = Status::Free;
IGBPBuffer igbp_buffer;
BufferTransformFlags transform;
Common::Rectangle<int> crop_rect;
u32 swap_interval;
Service::Nvidia::MultiFence multi_fence;
};
void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer);
std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> DequeueBuffer(u32 width,
u32 height);
const IGBPBuffer& RequestBuffer(u32 slot) const;
void QueueBuffer(u32 slot, BufferTransformFlags transform,
const Common::Rectangle<int>& crop_rect, u32 swap_interval,
Service::Nvidia::MultiFence& multi_fence);
void CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence);
std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer();
void ReleaseBuffer(u32 slot);
void Connect();
void Disconnect();
u32 Query(QueryType type);
u32 GetId() const {
return id;
}
bool IsConnected() const {
return is_connect;
}
Kernel::KWritableEvent& GetWritableBufferWaitEvent();
Kernel::KReadableEvent& GetBufferWaitEvent();
private:
BufferQueue(const BufferQueue&) = delete;
u32 id{};
u64 layer_id{};
std::atomic_bool is_connect{};
std::list<u32> free_buffers;
std::array<Buffer, buffer_slots> buffers;
std::list<u32> queue_sequence;
Kernel::KEvent* buffer_wait_event{};
std::mutex free_buffers_mutex;
std::condition_variable free_buffers_condition;
std::mutex queue_sequence_mutex;
KernelHelpers::ServiceContext& service_context;
};
} // namespace Service::NVFlinger

View File

@@ -1,637 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cstring>
#include <limits>
#include <utility>
#include <vector>
#include "common/error.h"
#ifdef _WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#elif YUZU_UNIX
#include <arpa/inet.h>
#include <errno.h>
#include <fcntl.h>
#include <netdb.h>
#include <netinet/in.h>
#include <poll.h>
#include <sys/socket.h>
#include <unistd.h>
#else
#error "Unimplemented platform"
#endif
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "core/network/network.h"
#include "core/network/network_interface.h"
#include "core/network/sockets.h"
namespace Network {
namespace {
#ifdef _WIN32
using socklen_t = int;
void Initialize() {
WSADATA wsa_data;
(void)WSAStartup(MAKEWORD(2, 2), &wsa_data);
}
void Finalize() {
WSACleanup();
}
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
sockaddr_in result;
#if YUZU_UNIX
result.sin_len = sizeof(result);
#endif
switch (static_cast<Domain>(input.family)) {
case Domain::INET:
result.sin_family = AF_INET;
break;
default:
UNIMPLEMENTED_MSG("Unhandled sockaddr family={}", input.family);
result.sin_family = AF_INET;
break;
}
result.sin_port = htons(input.portno);
auto& ip = result.sin_addr.S_un.S_un_b;
ip.s_b1 = input.ip[0];
ip.s_b2 = input.ip[1];
ip.s_b3 = input.ip[2];
ip.s_b4 = input.ip[3];
sockaddr addr;
std::memcpy(&addr, &result, sizeof(addr));
return addr;
}
LINGER MakeLinger(bool enable, u32 linger_value) {
ASSERT(linger_value <= std::numeric_limits<u_short>::max());
LINGER value;
value.l_onoff = enable ? 1 : 0;
value.l_linger = static_cast<u_short>(linger_value);
return value;
}
bool EnableNonBlock(SOCKET fd, bool enable) {
u_long value = enable ? 1 : 0;
return ioctlsocket(fd, FIONBIO, &value) != SOCKET_ERROR;
}
Errno TranslateNativeError(int e) {
switch (e) {
case WSAEBADF:
return Errno::BADF;
case WSAEINVAL:
return Errno::INVAL;
case WSAEMFILE:
return Errno::MFILE;
case WSAENOTCONN:
return Errno::NOTCONN;
case WSAEWOULDBLOCK:
return Errno::AGAIN;
case WSAECONNREFUSED:
return Errno::CONNREFUSED;
case WSAEHOSTUNREACH:
return Errno::HOSTUNREACH;
case WSAENETDOWN:
return Errno::NETDOWN;
case WSAENETUNREACH:
return Errno::NETUNREACH;
default:
return Errno::OTHER;
}
}
#elif YUZU_UNIX // ^ _WIN32 v YUZU_UNIX
using SOCKET = int;
using WSAPOLLFD = pollfd;
using ULONG = u64;
constexpr SOCKET INVALID_SOCKET = -1;
constexpr SOCKET SOCKET_ERROR = -1;
constexpr int SD_RECEIVE = SHUT_RD;
constexpr int SD_SEND = SHUT_WR;
constexpr int SD_BOTH = SHUT_RDWR;
void Initialize() {}
void Finalize() {}
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
sockaddr_in result;
switch (static_cast<Domain>(input.family)) {
case Domain::INET:
result.sin_family = AF_INET;
break;
default:
UNIMPLEMENTED_MSG("Unhandled sockaddr family={}", input.family);
result.sin_family = AF_INET;
break;
}
result.sin_port = htons(input.portno);
result.sin_addr.s_addr = input.ip[0] | input.ip[1] << 8 | input.ip[2] << 16 | input.ip[3] << 24;
sockaddr addr;
std::memcpy(&addr, &result, sizeof(addr));
return addr;
}
int WSAPoll(WSAPOLLFD* fds, ULONG nfds, int timeout) {
return poll(fds, static_cast<nfds_t>(nfds), timeout);
}
int closesocket(SOCKET fd) {
return close(fd);
}
linger MakeLinger(bool enable, u32 linger_value) {
linger value;
value.l_onoff = enable ? 1 : 0;
value.l_linger = linger_value;
return value;
}
bool EnableNonBlock(int fd, bool enable) {
int flags = fcntl(fd, F_GETFL);
if (flags == -1) {
return false;
}
if (enable) {
flags |= O_NONBLOCK;
} else {
flags &= ~O_NONBLOCK;
}
return fcntl(fd, F_SETFL, flags) == 0;
}
Errno TranslateNativeError(int e) {
switch (e) {
case EBADF:
return Errno::BADF;
case EINVAL:
return Errno::INVAL;
case EMFILE:
return Errno::MFILE;
case ENOTCONN:
return Errno::NOTCONN;
case EAGAIN:
return Errno::AGAIN;
case ECONNREFUSED:
return Errno::CONNREFUSED;
case EHOSTUNREACH:
return Errno::HOSTUNREACH;
case ENETDOWN:
return Errno::NETDOWN;
case ENETUNREACH:
return Errno::NETUNREACH;
default:
return Errno::OTHER;
}
}
#endif
Errno GetAndLogLastError() {
#ifdef _WIN32
int e = WSAGetLastError();
#else
int e = errno;
#endif
const Errno err = TranslateNativeError(e);
if (err == Errno::AGAIN) {
return err;
}
LOG_ERROR(Network, "Socket operation error: {}", Common::NativeErrorToString(e));
return err;
}
int TranslateDomain(Domain domain) {
switch (domain) {
case Domain::INET:
return AF_INET;
default:
UNIMPLEMENTED_MSG("Unimplemented domain={}", domain);
return 0;
}
}
int TranslateType(Type type) {
switch (type) {
case Type::STREAM:
return SOCK_STREAM;
case Type::DGRAM:
return SOCK_DGRAM;
default:
UNIMPLEMENTED_MSG("Unimplemented type={}", type);
return 0;
}
}
int TranslateProtocol(Protocol protocol) {
switch (protocol) {
case Protocol::TCP:
return IPPROTO_TCP;
case Protocol::UDP:
return IPPROTO_UDP;
default:
UNIMPLEMENTED_MSG("Unimplemented protocol={}", protocol);
return 0;
}
}
SockAddrIn TranslateToSockAddrIn(sockaddr input_) {
sockaddr_in input;
std::memcpy(&input, &input_, sizeof(input));
SockAddrIn result;
switch (input.sin_family) {
case AF_INET:
result.family = Domain::INET;
break;
default:
UNIMPLEMENTED_MSG("Unhandled sockaddr family={}", input.sin_family);
result.family = Domain::INET;
break;
}
result.portno = ntohs(input.sin_port);
result.ip = TranslateIPv4(input.sin_addr);
return result;
}
short TranslatePollEvents(PollEvents events) {
short result = 0;
if (True(events & PollEvents::In)) {
events &= ~PollEvents::In;
result |= POLLIN;
}
if (True(events & PollEvents::Pri)) {
events &= ~PollEvents::Pri;
#ifdef _WIN32
LOG_WARNING(Service, "Winsock doesn't support POLLPRI");
#else
result |= POLLPRI;
#endif
}
if (True(events & PollEvents::Out)) {
events &= ~PollEvents::Out;
result |= POLLOUT;
}
UNIMPLEMENTED_IF_MSG((u16)events != 0, "Unhandled guest events=0x{:x}", (u16)events);
return result;
}
PollEvents TranslatePollRevents(short revents) {
PollEvents result{};
const auto translate = [&result, &revents](short host, PollEvents guest) {
if ((revents & host) != 0) {
revents &= static_cast<short>(~host);
result |= guest;
}
};
translate(POLLIN, PollEvents::In);
translate(POLLPRI, PollEvents::Pri);
translate(POLLOUT, PollEvents::Out);
translate(POLLERR, PollEvents::Err);
translate(POLLHUP, PollEvents::Hup);
UNIMPLEMENTED_IF_MSG(revents != 0, "Unhandled host revents=0x{:x}", revents);
return result;
}
template <typename T>
Errno SetSockOpt(SOCKET fd, int option, T value) {
const int result =
setsockopt(fd, SOL_SOCKET, option, reinterpret_cast<const char*>(&value), sizeof(value));
if (result != SOCKET_ERROR) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
} // Anonymous namespace
NetworkInstance::NetworkInstance() {
Initialize();
}
NetworkInstance::~NetworkInstance() {
Finalize();
}
std::optional<IPv4Address> GetHostIPv4Address() {
const std::string& selected_network_interface = Settings::values.network_interface.GetValue();
const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
if (network_interfaces.size() == 0) {
LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces");
return {};
}
const auto res =
std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) {
return iface.name == selected_network_interface;
});
if (res != network_interfaces.end()) {
char ip_addr[16] = {};
ASSERT(inet_ntop(AF_INET, &res->ip_address, ip_addr, sizeof(ip_addr)) != nullptr);
return TranslateIPv4(res->ip_address);
} else {
LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface);
return {};
}
}
std::pair<s32, Errno> Poll(std::vector<PollFD>& pollfds, s32 timeout) {
const size_t num = pollfds.size();
std::vector<WSAPOLLFD> host_pollfds(pollfds.size());
std::transform(pollfds.begin(), pollfds.end(), host_pollfds.begin(), [](PollFD fd) {
WSAPOLLFD result;
result.fd = fd.socket->fd;
result.events = TranslatePollEvents(fd.events);
result.revents = 0;
return result;
});
const int result = WSAPoll(host_pollfds.data(), static_cast<ULONG>(num), timeout);
if (result == 0) {
ASSERT(std::all_of(host_pollfds.begin(), host_pollfds.end(),
[](WSAPOLLFD fd) { return fd.revents == 0; }));
return {0, Errno::SUCCESS};
}
for (size_t i = 0; i < num; ++i) {
pollfds[i].revents = TranslatePollRevents(host_pollfds[i].revents);
}
if (result > 0) {
return {result, Errno::SUCCESS};
}
ASSERT(result == SOCKET_ERROR);
return {-1, GetAndLogLastError()};
}
Socket::~Socket() {
if (fd == INVALID_SOCKET) {
return;
}
(void)closesocket(fd);
fd = INVALID_SOCKET;
}
Socket::Socket(Socket&& rhs) noexcept : fd{std::exchange(rhs.fd, INVALID_SOCKET)} {}
Errno Socket::Initialize(Domain domain, Type type, Protocol protocol) {
fd = socket(TranslateDomain(domain), TranslateType(type), TranslateProtocol(protocol));
if (fd != INVALID_SOCKET) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
std::pair<Socket::AcceptResult, Errno> Socket::Accept() {
sockaddr addr;
socklen_t addrlen = sizeof(addr);
const SOCKET new_socket = accept(fd, &addr, &addrlen);
if (new_socket == INVALID_SOCKET) {
return {AcceptResult{}, GetAndLogLastError()};
}
AcceptResult result;
result.socket = std::make_unique<Socket>();
result.socket->fd = new_socket;
ASSERT(addrlen == sizeof(sockaddr_in));
result.sockaddr_in = TranslateToSockAddrIn(addr);
return {std::move(result), Errno::SUCCESS};
}
Errno Socket::Connect(SockAddrIn addr_in) {
const sockaddr host_addr_in = TranslateFromSockAddrIn(addr_in);
if (connect(fd, &host_addr_in, sizeof(host_addr_in)) != SOCKET_ERROR) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
std::pair<SockAddrIn, Errno> Socket::GetPeerName() {
sockaddr addr;
socklen_t addrlen = sizeof(addr);
if (getpeername(fd, &addr, &addrlen) == SOCKET_ERROR) {
return {SockAddrIn{}, GetAndLogLastError()};
}
ASSERT(addrlen == sizeof(sockaddr_in));
return {TranslateToSockAddrIn(addr), Errno::SUCCESS};
}
std::pair<SockAddrIn, Errno> Socket::GetSockName() {
sockaddr addr;
socklen_t addrlen = sizeof(addr);
if (getsockname(fd, &addr, &addrlen) == SOCKET_ERROR) {
return {SockAddrIn{}, GetAndLogLastError()};
}
ASSERT(addrlen == sizeof(sockaddr_in));
return {TranslateToSockAddrIn(addr), Errno::SUCCESS};
}
Errno Socket::Bind(SockAddrIn addr) {
const sockaddr addr_in = TranslateFromSockAddrIn(addr);
if (bind(fd, &addr_in, sizeof(addr_in)) != SOCKET_ERROR) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
Errno Socket::Listen(s32 backlog) {
if (listen(fd, backlog) != SOCKET_ERROR) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
Errno Socket::Shutdown(ShutdownHow how) {
int host_how = 0;
switch (how) {
case ShutdownHow::RD:
host_how = SD_RECEIVE;
break;
case ShutdownHow::WR:
host_how = SD_SEND;
break;
case ShutdownHow::RDWR:
host_how = SD_BOTH;
break;
default:
UNIMPLEMENTED_MSG("Unimplemented flag how={}", how);
return Errno::SUCCESS;
}
if (shutdown(fd, host_how) != SOCKET_ERROR) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
std::pair<s32, Errno> Socket::Recv(int flags, std::vector<u8>& message) {
ASSERT(flags == 0);
ASSERT(message.size() < static_cast<size_t>(std::numeric_limits<int>::max()));
const auto result =
recv(fd, reinterpret_cast<char*>(message.data()), static_cast<int>(message.size()), 0);
if (result != SOCKET_ERROR) {
return {static_cast<s32>(result), Errno::SUCCESS};
}
return {-1, GetAndLogLastError()};
}
std::pair<s32, Errno> Socket::RecvFrom(int flags, std::vector<u8>& message, SockAddrIn* addr) {
ASSERT(flags == 0);
ASSERT(message.size() < static_cast<size_t>(std::numeric_limits<int>::max()));
sockaddr addr_in{};
socklen_t addrlen = sizeof(addr_in);
socklen_t* const p_addrlen = addr ? &addrlen : nullptr;
sockaddr* const p_addr_in = addr ? &addr_in : nullptr;
const auto result = recvfrom(fd, reinterpret_cast<char*>(message.data()),
static_cast<int>(message.size()), 0, p_addr_in, p_addrlen);
if (result != SOCKET_ERROR) {
if (addr) {
ASSERT(addrlen == sizeof(addr_in));
*addr = TranslateToSockAddrIn(addr_in);
}
return {static_cast<s32>(result), Errno::SUCCESS};
}
return {-1, GetAndLogLastError()};
}
std::pair<s32, Errno> Socket::Send(const std::vector<u8>& message, int flags) {
ASSERT(message.size() < static_cast<size_t>(std::numeric_limits<int>::max()));
ASSERT(flags == 0);
const auto result = send(fd, reinterpret_cast<const char*>(message.data()),
static_cast<int>(message.size()), 0);
if (result != SOCKET_ERROR) {
return {static_cast<s32>(result), Errno::SUCCESS};
}
return {-1, GetAndLogLastError()};
}
std::pair<s32, Errno> Socket::SendTo(u32 flags, const std::vector<u8>& message,
const SockAddrIn* addr) {
ASSERT(flags == 0);
const sockaddr* to = nullptr;
const int tolen = addr ? sizeof(sockaddr) : 0;
sockaddr host_addr_in;
if (addr) {
host_addr_in = TranslateFromSockAddrIn(*addr);
to = &host_addr_in;
}
const auto result = sendto(fd, reinterpret_cast<const char*>(message.data()),
static_cast<int>(message.size()), 0, to, tolen);
if (result != SOCKET_ERROR) {
return {static_cast<s32>(result), Errno::SUCCESS};
}
return {-1, GetAndLogLastError()};
}
Errno Socket::Close() {
[[maybe_unused]] const int result = closesocket(fd);
ASSERT(result == 0);
fd = INVALID_SOCKET;
return Errno::SUCCESS;
}
Errno Socket::SetLinger(bool enable, u32 linger) {
return SetSockOpt(fd, SO_LINGER, MakeLinger(enable, linger));
}
Errno Socket::SetReuseAddr(bool enable) {
return SetSockOpt<u32>(fd, SO_REUSEADDR, enable ? 1 : 0);
}
Errno Socket::SetKeepAlive(bool enable) {
return SetSockOpt<u32>(fd, SO_KEEPALIVE, enable ? 1 : 0);
}
Errno Socket::SetBroadcast(bool enable) {
return SetSockOpt<u32>(fd, SO_BROADCAST, enable ? 1 : 0);
}
Errno Socket::SetSndBuf(u32 value) {
return SetSockOpt(fd, SO_SNDBUF, value);
}
Errno Socket::SetRcvBuf(u32 value) {
return SetSockOpt(fd, SO_RCVBUF, value);
}
Errno Socket::SetSndTimeo(u32 value) {
return SetSockOpt(fd, SO_SNDTIMEO, value);
}
Errno Socket::SetRcvTimeo(u32 value) {
return SetSockOpt(fd, SO_RCVTIMEO, value);
}
Errno Socket::SetNonBlock(bool enable) {
if (EnableNonBlock(fd, enable)) {
return Errno::SUCCESS;
}
return GetAndLogLastError();
}
bool Socket::IsOpened() const {
return fd != INVALID_SOCKET;
}
} // namespace Network

View File

@@ -1,117 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <optional>
#include "common/common_funcs.h"
#include "common/common_types.h"
#ifdef _WIN32
#include <winsock2.h>
#elif YUZU_UNIX
#include <netinet/in.h>
#endif
namespace Network {
class Socket;
/// Error code for network functions
enum class Errno {
SUCCESS,
BADF,
INVAL,
MFILE,
NOTCONN,
AGAIN,
CONNREFUSED,
HOSTUNREACH,
NETDOWN,
NETUNREACH,
OTHER,
};
/// Address families
enum class Domain {
INET, ///< Address family for IPv4
};
/// Socket types
enum class Type {
STREAM,
DGRAM,
RAW,
SEQPACKET,
};
/// Protocol values for sockets
enum class Protocol {
ICMP,
TCP,
UDP,
};
/// Shutdown mode
enum class ShutdownHow {
RD,
WR,
RDWR,
};
/// Array of IPv4 address
using IPv4Address = std::array<u8, 4>;
/// Cross-platform sockaddr structure
struct SockAddrIn {
Domain family;
IPv4Address ip;
u16 portno;
};
/// Cross-platform poll fd structure
enum class PollEvents : u16 {
// Using Pascal case because IN is a macro on Windows.
In = 1 << 0,
Pri = 1 << 1,
Out = 1 << 2,
Err = 1 << 3,
Hup = 1 << 4,
Nval = 1 << 5,
};
DECLARE_ENUM_FLAG_OPERATORS(PollEvents);
struct PollFD {
Socket* socket;
PollEvents events;
PollEvents revents;
};
class NetworkInstance {
public:
explicit NetworkInstance();
~NetworkInstance();
};
#ifdef _WIN32
constexpr IPv4Address TranslateIPv4(in_addr addr) {
auto& bytes = addr.S_un.S_un_b;
return IPv4Address{bytes.s_b1, bytes.s_b2, bytes.s_b3, bytes.s_b4};
}
#elif YUZU_UNIX
constexpr IPv4Address TranslateIPv4(in_addr addr) {
const u32 bytes = addr.s_addr;
return IPv4Address{static_cast<u8>(bytes), static_cast<u8>(bytes >> 8),
static_cast<u8>(bytes >> 16), static_cast<u8>(bytes >> 24)};
}
#endif
/// @brief Returns host's IPv4 address
/// @return human ordered IPv4 address (e.g. 192.168.0.1) as an array
std::optional<IPv4Address> GetHostIPv4Address();
} // namespace Network

View File

@@ -1,209 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <fstream>
#include <sstream>
#include <vector>
#include "common/bit_cast.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/settings.h"
#include "common/string_util.h"
#include "core/network/network_interface.h"
#ifdef _WIN32
#include <iphlpapi.h>
#else
#include <cerrno>
#include <ifaddrs.h>
#include <net/if.h>
#endif
namespace Network {
#ifdef _WIN32
std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
std::vector<IP_ADAPTER_ADDRESSES> adapter_addresses;
DWORD ret = ERROR_BUFFER_OVERFLOW;
DWORD buf_size = 0;
// retry up to 5 times
for (int i = 0; i < 5 && ret == ERROR_BUFFER_OVERFLOW; i++) {
ret = GetAdaptersAddresses(
AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS,
nullptr, adapter_addresses.data(), &buf_size);
if (ret != ERROR_BUFFER_OVERFLOW) {
break;
}
adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1);
}
if (ret != NO_ERROR) {
LOG_ERROR(Network, "Failed to get network interfaces with GetAdaptersAddresses");
return {};
}
std::vector<NetworkInterface> result;
for (auto current_address = adapter_addresses.data(); current_address != nullptr;
current_address = current_address->Next) {
if (current_address->FirstUnicastAddress == nullptr ||
current_address->FirstUnicastAddress->Address.lpSockaddr == nullptr) {
continue;
}
if (current_address->OperStatus != IfOperStatusUp) {
continue;
}
const auto ip_addr = Common::BitCast<struct sockaddr_in>(
*current_address->FirstUnicastAddress->Address.lpSockaddr)
.sin_addr;
ULONG mask = 0;
if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength,
&mask) != NO_ERROR) {
LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask");
continue;
}
struct in_addr gateway = {.S_un{.S_addr{0}}};
if (current_address->FirstGatewayAddress != nullptr &&
current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) {
gateway = Common::BitCast<struct sockaddr_in>(
*current_address->FirstGatewayAddress->Address.lpSockaddr)
.sin_addr;
}
result.emplace_back(NetworkInterface{
.name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})},
.ip_address{ip_addr},
.subnet_mask = in_addr{.S_un{.S_addr{mask}}},
.gateway = gateway});
}
return result;
}
#else
std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
struct ifaddrs* ifaddr = nullptr;
if (getifaddrs(&ifaddr) != 0) {
LOG_ERROR(Network, "Failed to get network interfaces with getifaddrs: {}",
std::strerror(errno));
return {};
}
std::vector<NetworkInterface> result;
for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) {
if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) {
continue;
}
if (ifa->ifa_addr->sa_family != AF_INET) {
continue;
}
if ((ifa->ifa_flags & IFF_UP) == 0 || (ifa->ifa_flags & IFF_LOOPBACK) != 0) {
continue;
}
u32 gateway{};
std::ifstream file{"/proc/net/route"};
if (!file.is_open()) {
LOG_ERROR(Network, "Failed to open \"/proc/net/route\"");
result.emplace_back(NetworkInterface{
.name{ifa->ifa_name},
.ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr},
.subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr},
.gateway{in_addr{.s_addr = gateway}}});
continue;
}
// ignore header
file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
bool gateway_found = false;
for (std::string line; std::getline(file, line);) {
std::istringstream iss{line};
std::string iface_name;
iss >> iface_name;
if (iface_name != ifa->ifa_name) {
continue;
}
iss >> std::hex;
u32 dest{};
iss >> dest;
if (dest != 0) {
// not the default route
continue;
}
iss >> gateway;
u16 flags{};
iss >> flags;
// flag RTF_GATEWAY (defined in <linux/route.h>)
if ((flags & 0x2) == 0) {
continue;
}
gateway_found = true;
break;
}
if (!gateway_found) {
gateway = 0;
}
result.emplace_back(NetworkInterface{
.name{ifa->ifa_name},
.ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr},
.subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr},
.gateway{in_addr{.s_addr = gateway}}});
}
freeifaddrs(ifaddr);
return result;
}
#endif
std::optional<NetworkInterface> GetSelectedNetworkInterface() {
const auto& selected_network_interface = Settings::values.network_interface.GetValue();
const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
if (network_interfaces.size() == 0) {
LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces");
return std::nullopt;
}
const auto res =
std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) {
return iface.name == selected_network_interface;
});
if (res == network_interfaces.end()) {
LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface);
return std::nullopt;
}
return *res;
}
} // namespace Network

View File

@@ -1,28 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <optional>
#include <string>
#include <vector>
#ifdef _WIN32
#include <winsock2.h>
#else
#include <netinet/in.h>
#endif
namespace Network {
struct NetworkInterface {
std::string name;
struct in_addr ip_address;
struct in_addr subnet_mask;
struct in_addr gateway;
};
std::vector<NetworkInterface> GetAvailableNetworkInterfaces();
std::optional<NetworkInterface> GetSelectedNetworkInterface();
} // namespace Network

View File

@@ -1,94 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <utility>
#if defined(_WIN32)
#elif !YUZU_UNIX
#error "Platform not implemented"
#endif
#include "common/common_types.h"
#include "core/network/network.h"
// TODO: C++20 Replace std::vector usages with std::span
namespace Network {
class Socket {
public:
struct AcceptResult {
std::unique_ptr<Socket> socket;
SockAddrIn sockaddr_in;
};
explicit Socket() = default;
~Socket();
Socket(const Socket&) = delete;
Socket& operator=(const Socket&) = delete;
Socket(Socket&& rhs) noexcept;
// Avoid closing sockets implicitly
Socket& operator=(Socket&&) noexcept = delete;
Errno Initialize(Domain domain, Type type, Protocol protocol);
Errno Close();
std::pair<AcceptResult, Errno> Accept();
Errno Connect(SockAddrIn addr_in);
std::pair<SockAddrIn, Errno> GetPeerName();
std::pair<SockAddrIn, Errno> GetSockName();
Errno Bind(SockAddrIn addr);
Errno Listen(s32 backlog);
Errno Shutdown(ShutdownHow how);
std::pair<s32, Errno> Recv(int flags, std::vector<u8>& message);
std::pair<s32, Errno> RecvFrom(int flags, std::vector<u8>& message, SockAddrIn* addr);
std::pair<s32, Errno> Send(const std::vector<u8>& message, int flags);
std::pair<s32, Errno> SendTo(u32 flags, const std::vector<u8>& message, const SockAddrIn* addr);
Errno SetLinger(bool enable, u32 linger);
Errno SetReuseAddr(bool enable);
Errno SetKeepAlive(bool enable);
Errno SetBroadcast(bool enable);
Errno SetSndBuf(u32 value);
Errno SetRcvBuf(u32 value);
Errno SetSndTimeo(u32 value);
Errno SetRcvTimeo(u32 value);
Errno SetNonBlock(bool enable);
bool IsOpened() const;
#if defined(_WIN32)
SOCKET fd = INVALID_SOCKET;
#elif YUZU_UNIX
int fd = -1;
#endif
};
std::pair<s32, Errno> Poll(std::vector<PollFD>& poll_fds, s32 timeout);
} // namespace Network

View File

@@ -1,27 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <catch2/catch.hpp>
#include "core/network/network.h"
#include "core/network/sockets.h"
TEST_CASE("Network::Errors", "[core]") {
Network::NetworkInstance network_instance; // initialize network
Network::Socket socks[2];
for (Network::Socket& sock : socks) {
REQUIRE(sock.Initialize(Network::Domain::INET, Network::Type::STREAM,
Network::Protocol::TCP) == Network::Errno::SUCCESS);
}
Network::SockAddrIn addr{
Network::Domain::INET,
{127, 0, 0, 1},
1, // hopefully nobody running this test has something listening on port 1
};
REQUIRE(socks[0].Connect(addr) == Network::Errno::CONNREFUSED);
std::vector<u8> message{1, 2, 3, 4};
REQUIRE(socks[1].Recv(0, message).second == Network::Errno::NOTCONN);
}

View File

@@ -1,310 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <fstream>
#include <vector>
#include "common/assert.h"
#include "common/settings.h"
#include "video_core/command_classes/codecs/codec.h"
#include "video_core/command_classes/codecs/h264.h"
#include "video_core/command_classes/codecs/vp8.h"
#include "video_core/command_classes/codecs/vp9.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
extern "C" {
#include <libavutil/opt.h>
#ifdef LIBVA_FOUND
// for querying VAAPI driver information
#include <libavutil/hwcontext_vaapi.h>
#endif
}
namespace Tegra {
namespace {
constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12;
constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P;
constexpr std::array PREFERRED_GPU_DECODERS = {
AV_HWDEVICE_TYPE_CUDA,
#ifdef _WIN32
AV_HWDEVICE_TYPE_D3D11VA,
AV_HWDEVICE_TYPE_DXVA2,
#elif defined(__unix__)
AV_HWDEVICE_TYPE_VAAPI,
AV_HWDEVICE_TYPE_VDPAU,
#endif
// last resort for Linux Flatpak (w/ NVIDIA)
AV_HWDEVICE_TYPE_VULKAN,
};
void AVPacketDeleter(AVPacket* ptr) {
av_packet_free(&ptr);
}
using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>;
AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) {
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
if (*p == av_codec_ctx->pix_fmt) {
return av_codec_ctx->pix_fmt;
}
}
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
av_buffer_unref(&av_codec_ctx->hw_device_ctx);
av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT;
return PREFERRED_CPU_FMT;
}
// List all the currently available hwcontext in ffmpeg
std::vector<AVHWDeviceType> ListSupportedContexts() {
std::vector<AVHWDeviceType> contexts{};
AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE;
do {
current_device_type = av_hwdevice_iterate_types(current_device_type);
contexts.push_back(current_device_type);
} while (current_device_type != AV_HWDEVICE_TYPE_NONE);
return contexts;
}
} // namespace
void AVFrameDeleter(AVFrame* ptr) {
av_frame_free(&ptr);
}
Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs)
: gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)),
vp8_decoder(std::make_unique<Decoder::VP8>(gpu)),
vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {}
Codec::~Codec() {
if (!initialized) {
return;
}
// Free libav memory
avcodec_free_context(&av_codec_ctx);
av_buffer_unref(&av_gpu_decoder);
}
bool Codec::CreateGpuAvDevice() {
static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX;
static const auto supported_contexts = ListSupportedContexts();
for (const auto& type : PREFERRED_GPU_DECODERS) {
if (std::none_of(supported_contexts.begin(), supported_contexts.end(),
[&type](const auto& context) { return context == type; })) {
LOG_DEBUG(Service_NVDRV, "{} explicitly unsupported", av_hwdevice_get_type_name(type));
continue;
}
// Avoid memory leak from not cleaning up after av_hwdevice_ctx_create
av_buffer_unref(&av_gpu_decoder);
const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0);
if (hwdevice_res < 0) {
LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}",
av_hwdevice_get_type_name(type), hwdevice_res);
continue;
}
#ifdef LIBVA_FOUND
if (type == AV_HWDEVICE_TYPE_VAAPI) {
// we need to determine if this is an impersonated VAAPI driver
AVHWDeviceContext* hwctx =
static_cast<AVHWDeviceContext*>(static_cast<void*>(av_gpu_decoder->data));
AVVAAPIDeviceContext* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx);
const char* vendor_name = vaQueryVendorString(vactx->display);
if (strstr(vendor_name, "VDPAU backend")) {
// VDPAU impersonated VAAPI impl's are super buggy, we need to skip them
LOG_DEBUG(Service_NVDRV, "Skipping vdapu impersonated VAAPI driver");
continue;
} else {
// according to some user testing, certain vaapi driver (Intel?) could be buggy
// so let's log the driver name which may help the developers/supporters
LOG_DEBUG(Service_NVDRV, "Using VAAPI driver: {}", vendor_name);
}
}
#endif
for (int i = 0;; i++) {
const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i);
if (!config) {
LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.",
av_codec->name, av_hwdevice_get_type_name(type));
break;
}
if ((config->methods & HW_CONFIG_METHOD) != 0 && config->device_type == type) {
#if defined(__unix__)
// Some linux decoding backends are reported to crash with this config method
// TODO(ameerj): Properly support this method
if ((config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) != 0) {
// skip zero-copy decoders, we don't currently support them
LOG_DEBUG(Service_NVDRV, "Skipping decoder {} with unsupported capability {}.",
av_hwdevice_get_type_name(type), config->methods);
continue;
}
#endif
LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type));
av_codec_ctx->pix_fmt = config->pix_fmt;
return true;
}
}
}
return false;
}
void Codec::InitializeAvCodecContext() {
av_codec_ctx = avcodec_alloc_context3(av_codec);
av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
}
void Codec::InitializeGpuDecoder() {
if (!CreateGpuAvDevice()) {
av_buffer_unref(&av_gpu_decoder);
return;
}
auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder);
ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
av_codec_ctx->hw_device_ctx = hw_device_ctx;
av_codec_ctx->get_format = GetGpuFormat;
}
void Codec::Initialize() {
const AVCodecID codec = [&] {
switch (current_codec) {
case NvdecCommon::VideoCodec::H264:
return AV_CODEC_ID_H264;
case NvdecCommon::VideoCodec::VP8:
return AV_CODEC_ID_VP8;
case NvdecCommon::VideoCodec::VP9:
return AV_CODEC_ID_VP9;
default:
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
return AV_CODEC_ID_NONE;
}
}();
av_codec = avcodec_find_decoder(codec);
InitializeAvCodecContext();
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) {
InitializeGpuDecoder();
}
if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) {
LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res);
avcodec_free_context(&av_codec_ctx);
av_buffer_unref(&av_gpu_decoder);
return;
}
if (!av_codec_ctx->hw_device_ctx) {
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
}
initialized = true;
}
void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
if (current_codec != codec) {
current_codec = codec;
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
}
}
void Codec::Decode() {
const bool is_first_frame = !initialized;
if (is_first_frame) {
Initialize();
}
if (!initialized) {
return;
}
bool vp9_hidden_frame = false;
const auto& frame_data = [&]() {
switch (current_codec) {
case Tegra::NvdecCommon::VideoCodec::H264:
return h264_decoder->ComposeFrame(state, is_first_frame);
case Tegra::NvdecCommon::VideoCodec::VP8:
return vp8_decoder->ComposeFrame(state);
case Tegra::NvdecCommon::VideoCodec::VP9:
vp9_decoder->ComposeFrame(state);
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
return vp9_decoder->GetFrameBytes();
default:
ASSERT(false);
return std::vector<u8>{};
}
}();
AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter};
if (!packet) {
LOG_ERROR(Service_NVDRV, "av_packet_alloc failed");
return;
}
packet->data = const_cast<u8*>(frame_data.data());
packet->size = static_cast<s32>(frame_data.size());
if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) {
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res);
return;
}
// Only receive/store visible frames
if (vp9_hidden_frame) {
return;
}
AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter};
AVFramePtr final_frame{nullptr, AVFrameDeleter};
ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed");
if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) {
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
return;
}
if (initial_frame->width == 0 || initial_frame->height == 0) {
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
return;
}
if (av_codec_ctx->hw_device_ctx) {
final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed");
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
// because Intel drivers crash unless using AV_PIX_FMT_NV12
final_frame->format = PREFERRED_GPU_FMT;
const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0);
ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret);
} else {
final_frame = std::move(initial_frame);
}
if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) {
UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
return;
}
av_frames.push(std::move(final_frame));
if (av_frames.size() > 10) {
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
av_frames.pop();
}
}
AVFramePtr Codec::GetCurrentFrame() {
// Sometimes VIC will request more frames than have been decoded.
// in this case, return a nullptr and don't overwrite previous frame data
if (av_frames.empty()) {
return AVFramePtr{nullptr, AVFrameDeleter};
}
AVFramePtr frame = std::move(av_frames.front());
av_frames.pop();
return frame;
}
NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
return current_codec;
}
std::string_view Codec::GetCurrentCodecName() const {
switch (current_codec) {
case NvdecCommon::VideoCodec::None:
return "None";
case NvdecCommon::VideoCodec::H264:
return "H264";
case NvdecCommon::VideoCodec::VP8:
return "VP8";
case NvdecCommon::VideoCodec::H265:
return "H265";
case NvdecCommon::VideoCodec::VP9:
return "VP9";
default:
return "Unknown";
}
}
} // namespace Tegra

View File

@@ -1,81 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <string_view>
#include <queue>
#include "video_core/command_classes/nvdec_common.h"
extern "C" {
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
#include <libavcodec/avcodec.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
}
namespace Tegra {
class GPU;
void AVFrameDeleter(AVFrame* ptr);
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
namespace Decoder {
class H264;
class VP8;
class VP9;
} // namespace Decoder
class Codec {
public:
explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs);
~Codec();
/// Initialize the codec, returning success or failure
void Initialize();
/// Sets NVDEC video stream codec
void SetTargetCodec(NvdecCommon::VideoCodec codec);
/// Call decoders to construct headers, decode AVFrame with ffmpeg
void Decode();
/// Returns next decoded frame
[[nodiscard]] AVFramePtr GetCurrentFrame();
/// Returns the value of current_codec
[[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const;
/// Return name of the current codec
[[nodiscard]] std::string_view GetCurrentCodecName() const;
private:
void InitializeAvCodecContext();
void InitializeGpuDecoder();
bool CreateGpuAvDevice();
bool initialized{};
NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None};
const AVCodec* av_codec{nullptr};
AVCodecContext* av_codec_ctx{nullptr};
AVBufferRef* av_gpu_decoder{nullptr};
GPU& gpu;
const NvdecCommon::NvdecRegisters& state;
std::unique_ptr<Decoder::H264> h264_decoder;
std::unique_ptr<Decoder::VP8> vp8_decoder;
std::unique_ptr<Decoder::VP9> vp9_decoder;
std::queue<AVFramePtr> av_frames{};
};
} // namespace Tegra

View File

@@ -1,277 +0,0 @@
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
// SPDX-License-Identifier: MIT
#include <array>
#include <bit>
#include "common/settings.h"
#include "video_core/command_classes/codecs/h264.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
namespace {
// ZigZag LUTs from libavcodec.
constexpr std::array<u8, 64> zig_zag_direct{
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48,
41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23,
30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
};
constexpr std::array<u8, 16> zig_zag_scan{
0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
};
} // Anonymous namespace
H264::H264(GPU& gpu_) : gpu(gpu_) {}
H264::~H264() = default;
const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state,
bool is_first_frame) {
H264DecoderContext context;
gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext));
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
if (!is_first_frame && frame_number != 0) {
frame.resize(context.stream_len);
gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
return frame;
}
// Encode header
H264BitWriter writer{};
writer.WriteU(1, 24);
writer.WriteU(0, 1);
writer.WriteU(3, 2);
writer.WriteU(7, 5);
writer.WriteU(100, 8);
writer.WriteU(0, 8);
writer.WriteU(31, 8);
writer.WriteUe(0);
const u32 chroma_format_idc =
static_cast<u32>(context.h264_parameter_set.chroma_format_idc.Value());
writer.WriteUe(chroma_format_idc);
if (chroma_format_idc == 3) {
writer.WriteBit(false);
}
writer.WriteUe(0);
writer.WriteUe(0);
writer.WriteBit(false); // QpprimeYZeroTransformBypassFlag
writer.WriteBit(false); // Scaling matrix present flag
writer.WriteUe(static_cast<u32>(context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
const auto order_cnt_type =
static_cast<u32>(context.h264_parameter_set.pic_order_cnt_type.Value());
writer.WriteUe(order_cnt_type);
if (order_cnt_type == 0) {
writer.WriteUe(context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
} else if (order_cnt_type == 1) {
writer.WriteBit(context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
writer.WriteSe(0);
writer.WriteSe(0);
writer.WriteUe(0);
}
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
writer.WriteUe(max_num_ref_frames);
writer.WriteBit(false);
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
writer.WriteUe(pic_height - 1);
writer.WriteBit(context.h264_parameter_set.frame_mbs_only_flag != 0);
if (!context.h264_parameter_set.frame_mbs_only_flag) {
writer.WriteBit(context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
}
writer.WriteBit(context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
writer.WriteBit(false); // Frame cropping flag
writer.WriteBit(false); // VUI parameter present flag
writer.End();
// H264 PPS
writer.WriteU(1, 24);
writer.WriteU(0, 1);
writer.WriteU(3, 2);
writer.WriteU(8, 5);
writer.WriteUe(0);
writer.WriteUe(0);
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
writer.WriteBit(false);
writer.WriteUe(0);
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
writer.WriteBit(context.h264_parameter_set.flags.weighted_pred.Value() != 0);
writer.WriteU(static_cast<s32>(context.h264_parameter_set.weighted_bipred_idc.Value()), 2);
s32 pic_init_qp = static_cast<s32>(context.h264_parameter_set.pic_init_qp_minus26.Value());
writer.WriteSe(pic_init_qp);
writer.WriteSe(0);
s32 chroma_qp_index_offset =
static_cast<s32>(context.h264_parameter_set.chroma_qp_index_offset.Value());
writer.WriteSe(chroma_qp_index_offset);
writer.WriteBit(context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
writer.WriteBit(context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
writer.WriteBit(true);
for (s32 index = 0; index < 6; index++) {
writer.WriteBit(true);
std::span<const u8> matrix{context.weight_scale};
writer.WriteScalingList(matrix, index * 16, 16);
}
if (context.h264_parameter_set.transform_8x8_mode_flag) {
for (s32 index = 0; index < 2; index++) {
writer.WriteBit(true);
std::span<const u8> matrix{context.weight_scale_8x8};
writer.WriteScalingList(matrix, index * 64, 64);
}
}
s32 chroma_qp_index_offset2 =
static_cast<s32>(context.h264_parameter_set.second_chroma_qp_index_offset.Value());
writer.WriteSe(chroma_qp_index_offset2);
writer.End();
const auto& encoded_header = writer.GetByteArray();
frame.resize(encoded_header.size() + context.stream_len);
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset,
frame.data() + encoded_header.size(), context.stream_len);
return frame;
}
H264BitWriter::H264BitWriter() = default;
H264BitWriter::~H264BitWriter() = default;
void H264BitWriter::WriteU(s32 value, s32 value_sz) {
WriteBits(value, value_sz);
}
void H264BitWriter::WriteSe(s32 value) {
WriteExpGolombCodedInt(value);
}
void H264BitWriter::WriteUe(u32 value) {
WriteExpGolombCodedUInt(value);
}
void H264BitWriter::End() {
WriteBit(true);
Flush();
}
void H264BitWriter::WriteBit(bool state) {
WriteBits(state ? 1 : 0, 1);
}
void H264BitWriter::WriteScalingList(std::span<const u8> list, s32 start, s32 count) {
std::vector<u8> scan(count);
if (count == 16) {
std::memcpy(scan.data(), zig_zag_scan.data(), scan.size());
} else {
std::memcpy(scan.data(), zig_zag_direct.data(), scan.size());
}
u8 last_scale = 8;
for (s32 index = 0; index < count; index++) {
const u8 value = list[start + scan[index]];
const s32 delta_scale = static_cast<s32>(value - last_scale);
WriteSe(delta_scale);
last_scale = value;
}
}
std::vector<u8>& H264BitWriter::GetByteArray() {
return byte_array;
}
const std::vector<u8>& H264BitWriter::GetByteArray() const {
return byte_array;
}
void H264BitWriter::WriteBits(s32 value, s32 bit_count) {
s32 value_pos = 0;
s32 remaining = bit_count;
while (remaining > 0) {
s32 copy_size = remaining;
const s32 free_bits = GetFreeBufferBits();
if (copy_size > free_bits) {
copy_size = free_bits;
}
const s32 mask = (1 << copy_size) - 1;
const s32 src_shift = (bit_count - value_pos) - copy_size;
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
buffer |= ((value >> src_shift) & mask) << dst_shift;
value_pos += copy_size;
buffer_pos += copy_size;
remaining -= copy_size;
}
}
void H264BitWriter::WriteExpGolombCodedInt(s32 value) {
const s32 sign = value <= 0 ? 0 : 1;
if (value < 0) {
value = -value;
}
value = (value << 1) - sign;
WriteExpGolombCodedUInt(value);
}
void H264BitWriter::WriteExpGolombCodedUInt(u32 value) {
const s32 size = 32 - std::countl_zero(value + 1);
WriteBits(1, size);
value -= (1U << (size - 1)) - 1;
WriteBits(static_cast<s32>(value), size - 1);
}
s32 H264BitWriter::GetFreeBufferBits() {
if (buffer_pos == buffer_size) {
Flush();
}
return buffer_size - buffer_pos;
}
void H264BitWriter::Flush() {
if (buffer_pos == 0) {
return;
}
byte_array.push_back(static_cast<u8>(buffer));
buffer = 0;
buffer_pos = 0;
}
} // namespace Tegra::Decoder

View File

@@ -1,173 +0,0 @@
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
// SPDX-License-Identifier: MIT
#pragma once
#include <span>
#include <vector>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/command_classes/nvdec_common.h"
namespace Tegra {
class GPU;
namespace Decoder {
class H264BitWriter {
public:
H264BitWriter();
~H264BitWriter();
/// The following Write methods are based on clause 9.1 in the H.264 specification.
/// WriteSe and WriteUe write in the Exp-Golomb-coded syntax
void WriteU(s32 value, s32 value_sz);
void WriteSe(s32 value);
void WriteUe(u32 value);
/// Finalize the bitstream
void End();
/// append a bit to the stream, equivalent value to the state parameter
void WriteBit(bool state);
/// Based on section 7.3.2.1.1.1 and Table 7-4 in the H.264 specification
/// Writes the scaling matrices of the sream
void WriteScalingList(std::span<const u8> list, s32 start, s32 count);
/// Return the bitstream as a vector.
[[nodiscard]] std::vector<u8>& GetByteArray();
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
private:
void WriteBits(s32 value, s32 bit_count);
void WriteExpGolombCodedInt(s32 value);
void WriteExpGolombCodedUInt(u32 value);
[[nodiscard]] s32 GetFreeBufferBits();
void Flush();
s32 buffer_size{8};
s32 buffer{};
s32 buffer_pos{};
std::vector<u8> byte_array;
};
class H264 {
public:
explicit H264(GPU& gpu);
~H264();
/// Compose the H264 frame for FFmpeg decoding
[[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state,
bool is_first_frame = false);
private:
std::vector<u8> frame;
GPU& gpu;
struct H264ParameterSet {
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
s32 delta_pic_order_always_zero_flag; ///< 0x04
s32 frame_mbs_only_flag; ///< 0x08
u32 pic_width_in_mbs; ///< 0x0C
u32 frame_height_in_map_units; ///< 0x10
union { ///< 0x14
BitField<0, 2, u32> tile_format;
BitField<2, 3, u32> gob_height;
};
u32 entropy_coding_mode_flag; ///< 0x18
s32 pic_order_present_flag; ///< 0x1C
s32 num_refidx_l0_default_active; ///< 0x20
s32 num_refidx_l1_default_active; ///< 0x24
s32 deblocking_filter_control_present_flag; ///< 0x28
s32 redundant_pic_cnt_present_flag; ///< 0x2C
u32 transform_8x8_mode_flag; ///< 0x30
u32 pitch_luma; ///< 0x34
u32 pitch_chroma; ///< 0x38
u32 luma_top_offset; ///< 0x3C
u32 luma_bot_offset; ///< 0x40
u32 luma_frame_offset; ///< 0x44
u32 chroma_top_offset; ///< 0x48
u32 chroma_bot_offset; ///< 0x4C
u32 chroma_frame_offset; ///< 0x50
u32 hist_buffer_size; ///< 0x54
union { ///< 0x58
union {
BitField<0, 1, u64> mbaff_frame;
BitField<1, 1, u64> direct_8x8_inference;
BitField<2, 1, u64> weighted_pred;
BitField<3, 1, u64> constrained_intra_pred;
BitField<4, 1, u64> ref_pic;
BitField<5, 1, u64> field_pic;
BitField<6, 1, u64> bottom_field;
BitField<7, 1, u64> second_field;
} flags;
BitField<8, 4, u64> log2_max_frame_num_minus4;
BitField<12, 2, u64> chroma_format_idc;
BitField<14, 2, u64> pic_order_cnt_type;
BitField<16, 6, s64> pic_init_qp_minus26;
BitField<22, 5, s64> chroma_qp_index_offset;
BitField<27, 5, s64> second_chroma_qp_index_offset;
BitField<32, 2, u64> weighted_bipred_idc;
BitField<34, 7, u64> curr_pic_idx;
BitField<41, 5, u64> curr_col_idx;
BitField<46, 16, u64> frame_number;
BitField<62, 1, u64> frame_surfaces;
BitField<63, 1, u64> output_memory_layout;
};
};
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
struct H264DecoderContext {
INSERT_PADDING_WORDS_NOINIT(18); ///< 0x0000
u32 stream_len; ///< 0x0048
INSERT_PADDING_WORDS_NOINIT(3); ///< 0x004C
H264ParameterSet h264_parameter_set; ///< 0x0058
INSERT_PADDING_WORDS_NOINIT(66); ///< 0x00B8
std::array<u8, 0x60> weight_scale; ///< 0x01C0
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
};
static_assert(sizeof(H264DecoderContext) == 0x2A0, "H264DecoderContext is an invalid size");
#define ASSERT_POSITION(field_name, position) \
static_assert(offsetof(H264ParameterSet, field_name) == position, \
"Field " #field_name " has invalid position")
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
ASSERT_POSITION(frame_height_in_map_units, 0x10);
ASSERT_POSITION(tile_format, 0x14);
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
ASSERT_POSITION(pic_order_present_flag, 0x1C);
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
ASSERT_POSITION(pitch_luma, 0x34);
ASSERT_POSITION(pitch_chroma, 0x38);
ASSERT_POSITION(luma_top_offset, 0x3C);
ASSERT_POSITION(luma_bot_offset, 0x40);
ASSERT_POSITION(luma_frame_offset, 0x44);
ASSERT_POSITION(chroma_top_offset, 0x48);
ASSERT_POSITION(chroma_bot_offset, 0x4C);
ASSERT_POSITION(chroma_frame_offset, 0x50);
ASSERT_POSITION(hist_buffer_size, 0x54);
ASSERT_POSITION(flags, 0x58);
#undef ASSERT_POSITION
#define ASSERT_POSITION(field_name, position) \
static_assert(offsetof(H264DecoderContext, field_name) == position, \
"Field " #field_name " has invalid position")
ASSERT_POSITION(stream_len, 0x48);
ASSERT_POSITION(h264_parameter_set, 0x58);
ASSERT_POSITION(weight_scale, 0x1C0);
#undef ASSERT_POSITION
};
} // namespace Decoder
} // namespace Tegra

View File

@@ -1,53 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <vector>
#include "video_core/command_classes/codecs/vp8.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
VP8::VP8(GPU& gpu_) : gpu(gpu_) {}
VP8::~VP8() = default;
const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
VP8PictureInfo info;
gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
const bool is_key_frame = info.key_frame == 1u;
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
const size_t header_size = is_key_frame ? 10u : 3u;
frame.resize(header_size + bitstream_size);
// Based on page 30 of the VP8 specification.
// https://datatracker.ietf.org/doc/rfc6386/
frame[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
frame[0] |= static_cast<u8>((info.version & 7u) << 1u); // 3-bit version number
frame[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
// The next 19-bits are the first partition size
frame[0] |= static_cast<u8>((info.first_part_size & 7u) << 5u);
frame[1] = static_cast<u8>((info.first_part_size & 0x7f8u) >> 3u);
frame[2] = static_cast<u8>((info.first_part_size & 0x7f800u) >> 11u);
if (is_key_frame) {
frame[3] = 0x9du;
frame[4] = 0x01u;
frame[5] = 0x2au;
// TODO(ameerj): Horizontal/Vertical Scale
// 16 bits: (2 bits Horizontal Scale << 14) | Width (14 bits)
frame[6] = static_cast<u8>(info.frame_width & 0xff);
frame[7] = static_cast<u8>(((info.frame_width >> 8) & 0x3f));
// 16 bits:(2 bits Vertical Scale << 14) | Height (14 bits)
frame[8] = static_cast<u8>(info.frame_height & 0xff);
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
}
const u64 bitstream_offset = state.frame_bitstream_offset;
gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
return frame;
}
} // namespace Tegra::Decoder

View File

@@ -1,73 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/command_classes/nvdec_common.h"
namespace Tegra {
class GPU;
namespace Decoder {
class VP8 {
public:
explicit VP8(GPU& gpu);
~VP8();
/// Compose the VP8 frame for FFmpeg decoding
[[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state);
private:
std::vector<u8> frame;
GPU& gpu;
struct VP8PictureInfo {
INSERT_PADDING_WORDS_NOINIT(14);
u16 frame_width; // actual frame width
u16 frame_height; // actual frame height
u8 key_frame;
u8 version;
union {
u8 raw;
BitField<0, 2, u8> tile_format;
BitField<2, 3, u8> gob_height;
BitField<5, 3, u8> reserverd_surface_format;
};
u8 error_conceal_on; // 1: error conceal on; 0: off
u32 first_part_size; // the size of first partition(frame header and mb header partition)
u32 hist_buffer_size; // in units of 256
u32 vld_buffer_size; // in units of 1
// Current frame buffers
std::array<u32, 2> frame_stride; // [y_c]
u32 luma_top_offset; // offset of luma top field in units of 256
u32 luma_bot_offset; // offset of luma bottom field in units of 256
u32 luma_frame_offset; // offset of luma frame in units of 256
u32 chroma_top_offset; // offset of chroma top field in units of 256
u32 chroma_bot_offset; // offset of chroma bottom field in units of 256
u32 chroma_frame_offset; // offset of chroma frame in units of 256
INSERT_PADDING_BYTES_NOINIT(0x1c); // NvdecDisplayParams
// Decode picture buffer related
s8 current_output_memory_layout;
// output NV12/NV24 setting. index 0: golden; 1: altref; 2: last
std::array<s8, 3> output_memory_layout;
u8 segmentation_feature_data_update;
INSERT_PADDING_BYTES_NOINIT(3);
// ucode return result
u32 result_value;
std::array<u32, 8> partition_offset;
INSERT_PADDING_WORDS_NOINIT(3);
};
static_assert(sizeof(VP8PictureInfo) == 0xc0, "PictureInfo is an invalid size");
};
} // namespace Decoder
} // namespace Tegra

View File

@@ -1,946 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm> // for std::copy
#include <numeric>
#include "common/assert.h"
#include "video_core/command_classes/codecs/vp9.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
namespace Tegra::Decoder {
namespace {
constexpr u32 diff_update_probability = 252;
constexpr u32 frame_sync_code = 0x498342;
// Default compressed header probabilities once frame context resets
constexpr Vp9EntropyProbs default_probs{
.y_mode_prob{
65, 32, 18, 144, 162, 194, 41, 51, 98, 132, 68, 18, 165, 217, 196, 45, 40, 78,
173, 80, 19, 176, 240, 193, 64, 35, 46, 221, 135, 38, 194, 248, 121, 96, 85, 29,
},
.partition_prob{
199, 122, 141, 0, 147, 63, 159, 0, 148, 133, 118, 0, 121, 104, 114, 0,
174, 73, 87, 0, 92, 41, 83, 0, 82, 99, 50, 0, 53, 39, 39, 0,
177, 58, 59, 0, 68, 26, 63, 0, 52, 79, 25, 0, 17, 14, 12, 0,
222, 34, 30, 0, 72, 16, 44, 0, 58, 32, 12, 0, 10, 7, 6, 0,
},
.coef_probs{
195, 29, 183, 84, 49, 136, 8, 42, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31, 107, 169, 35, 99, 159, 17, 82, 140, 8, 66, 114, 2, 44, 76, 1, 19, 32,
40, 132, 201, 29, 114, 187, 13, 91, 157, 7, 75, 127, 3, 58, 95, 1, 28, 47,
69, 142, 221, 42, 122, 201, 15, 91, 159, 6, 67, 121, 1, 42, 77, 1, 17, 31,
102, 148, 228, 67, 117, 204, 17, 82, 154, 6, 59, 114, 2, 39, 75, 1, 15, 29,
156, 57, 233, 119, 57, 212, 58, 48, 163, 29, 40, 124, 12, 30, 81, 3, 12, 31,
191, 107, 226, 124, 117, 204, 25, 99, 155, 0, 0, 0, 0, 0, 0, 0, 0, 0,
29, 148, 210, 37, 126, 194, 8, 93, 157, 2, 68, 118, 1, 39, 69, 1, 17, 33,
41, 151, 213, 27, 123, 193, 3, 82, 144, 1, 58, 105, 1, 32, 60, 1, 13, 26,
59, 159, 220, 23, 126, 198, 4, 88, 151, 1, 66, 114, 1, 38, 71, 1, 18, 34,
114, 136, 232, 51, 114, 207, 11, 83, 155, 3, 56, 105, 1, 33, 65, 1, 17, 34,
149, 65, 234, 121, 57, 215, 61, 49, 166, 28, 36, 114, 12, 25, 76, 3, 16, 42,
214, 49, 220, 132, 63, 188, 42, 65, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0,
85, 137, 221, 104, 131, 216, 49, 111, 192, 21, 87, 155, 2, 49, 87, 1, 16, 28,
89, 163, 230, 90, 137, 220, 29, 100, 183, 10, 70, 135, 2, 42, 81, 1, 17, 33,
108, 167, 237, 55, 133, 222, 15, 97, 179, 4, 72, 135, 1, 45, 85, 1, 19, 38,
124, 146, 240, 66, 124, 224, 17, 88, 175, 4, 58, 122, 1, 36, 75, 1, 18, 37,
141, 79, 241, 126, 70, 227, 66, 58, 182, 30, 44, 136, 12, 34, 96, 2, 20, 47,
229, 99, 249, 143, 111, 235, 46, 109, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0,
82, 158, 236, 94, 146, 224, 25, 117, 191, 9, 87, 149, 3, 56, 99, 1, 33, 57,
83, 167, 237, 68, 145, 222, 10, 103, 177, 2, 72, 131, 1, 41, 79, 1, 20, 39,
99, 167, 239, 47, 141, 224, 10, 104, 178, 2, 73, 133, 1, 44, 85, 1, 22, 47,
127, 145, 243, 71, 129, 228, 17, 93, 177, 3, 61, 124, 1, 41, 84, 1, 21, 52,
157, 78, 244, 140, 72, 231, 69, 58, 184, 31, 44, 137, 14, 38, 105, 8, 23, 61,
125, 34, 187, 52, 41, 133, 6, 31, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0,
37, 109, 153, 51, 102, 147, 23, 87, 128, 8, 67, 101, 1, 41, 63, 1, 19, 29,
31, 154, 185, 17, 127, 175, 6, 96, 145, 2, 73, 114, 1, 51, 82, 1, 28, 45,
23, 163, 200, 10, 131, 185, 2, 93, 148, 1, 67, 111, 1, 41, 69, 1, 14, 24,
29, 176, 217, 12, 145, 201, 3, 101, 156, 1, 69, 111, 1, 39, 63, 1, 14, 23,
57, 192, 233, 25, 154, 215, 6, 109, 167, 3, 78, 118, 1, 48, 69, 1, 21, 29,
202, 105, 245, 108, 106, 216, 18, 90, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0,
33, 172, 219, 64, 149, 206, 14, 117, 177, 5, 90, 141, 2, 61, 95, 1, 37, 57,
33, 179, 220, 11, 140, 198, 1, 89, 148, 1, 60, 104, 1, 33, 57, 1, 12, 21,
30, 181, 221, 8, 141, 198, 1, 87, 145, 1, 58, 100, 1, 31, 55, 1, 12, 20,
32, 186, 224, 7, 142, 198, 1, 86, 143, 1, 58, 100, 1, 31, 55, 1, 12, 22,
57, 192, 227, 20, 143, 204, 3, 96, 154, 1, 68, 112, 1, 42, 69, 1, 19, 32,
212, 35, 215, 113, 47, 169, 29, 48, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0,
74, 129, 203, 106, 120, 203, 49, 107, 178, 19, 84, 144, 4, 50, 84, 1, 15, 25,
71, 172, 217, 44, 141, 209, 15, 102, 173, 6, 76, 133, 2, 51, 89, 1, 24, 42,
64, 185, 231, 31, 148, 216, 8, 103, 175, 3, 74, 131, 1, 46, 81, 1, 18, 30,
65, 196, 235, 25, 157, 221, 5, 105, 174, 1, 67, 120, 1, 38, 69, 1, 15, 30,
65, 204, 238, 30, 156, 224, 7, 107, 177, 2, 70, 124, 1, 42, 73, 1, 18, 34,
225, 86, 251, 144, 104, 235, 42, 99, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0,
85, 175, 239, 112, 165, 229, 29, 136, 200, 12, 103, 162, 6, 77, 123, 2, 53, 84,
75, 183, 239, 30, 155, 221, 3, 106, 171, 1, 74, 128, 1, 44, 76, 1, 17, 28,
73, 185, 240, 27, 159, 222, 2, 107, 172, 1, 75, 127, 1, 42, 73, 1, 17, 29,
62, 190, 238, 21, 159, 222, 2, 107, 172, 1, 72, 122, 1, 40, 71, 1, 18, 32,
61, 199, 240, 27, 161, 226, 4, 113, 180, 1, 76, 129, 1, 46, 80, 1, 23, 41,
7, 27, 153, 5, 30, 95, 1, 16, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50, 75, 127, 57, 75, 124, 27, 67, 108, 10, 54, 86, 1, 33, 52, 1, 12, 18,
43, 125, 151, 26, 108, 148, 7, 83, 122, 2, 59, 89, 1, 38, 60, 1, 17, 27,
23, 144, 163, 13, 112, 154, 2, 75, 117, 1, 50, 81, 1, 31, 51, 1, 14, 23,
18, 162, 185, 6, 123, 171, 1, 78, 125, 1, 51, 86, 1, 31, 54, 1, 14, 23,
15, 199, 227, 3, 150, 204, 1, 91, 146, 1, 55, 95, 1, 30, 53, 1, 11, 20,
19, 55, 240, 19, 59, 196, 3, 52, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41, 166, 207, 104, 153, 199, 31, 123, 181, 14, 101, 152, 5, 72, 106, 1, 36, 52,
35, 176, 211, 12, 131, 190, 2, 88, 144, 1, 60, 101, 1, 36, 60, 1, 16, 28,
28, 183, 213, 8, 134, 191, 1, 86, 142, 1, 56, 96, 1, 30, 53, 1, 12, 20,
20, 190, 215, 4, 135, 192, 1, 84, 139, 1, 53, 91, 1, 28, 49, 1, 11, 20,
13, 196, 216, 2, 137, 192, 1, 86, 143, 1, 57, 99, 1, 32, 56, 1, 13, 24,
211, 29, 217, 96, 47, 156, 22, 43, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0,
78, 120, 193, 111, 116, 186, 46, 102, 164, 15, 80, 128, 2, 49, 76, 1, 18, 28,
71, 161, 203, 42, 132, 192, 10, 98, 150, 3, 69, 109, 1, 44, 70, 1, 18, 29,
57, 186, 211, 30, 140, 196, 4, 93, 146, 1, 62, 102, 1, 38, 65, 1, 16, 27,
47, 199, 217, 14, 145, 196, 1, 88, 142, 1, 57, 98, 1, 36, 62, 1, 15, 26,
26, 219, 229, 5, 155, 207, 1, 94, 151, 1, 60, 104, 1, 36, 62, 1, 16, 28,
233, 29, 248, 146, 47, 220, 43, 52, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100, 163, 232, 179, 161, 222, 63, 142, 204, 37, 113, 174, 26, 89, 137, 18, 68, 97,
85, 181, 230, 32, 146, 209, 7, 100, 164, 3, 71, 121, 1, 45, 77, 1, 18, 30,
65, 187, 230, 20, 148, 207, 2, 97, 159, 1, 68, 116, 1, 40, 70, 1, 14, 29,
40, 194, 227, 8, 147, 204, 1, 94, 155, 1, 65, 112, 1, 39, 66, 1, 14, 26,
16, 208, 228, 3, 151, 207, 1, 98, 160, 1, 67, 117, 1, 41, 74, 1, 17, 31,
17, 38, 140, 7, 34, 80, 1, 17, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0,
37, 75, 128, 41, 76, 128, 26, 66, 116, 12, 52, 94, 2, 32, 55, 1, 10, 16,
50, 127, 154, 37, 109, 152, 16, 82, 121, 5, 59, 85, 1, 35, 54, 1, 13, 20,
40, 142, 167, 17, 110, 157, 2, 71, 112, 1, 44, 72, 1, 27, 45, 1, 11, 17,
30, 175, 188, 9, 124, 169, 1, 74, 116, 1, 48, 78, 1, 30, 49, 1, 11, 18,
10, 222, 223, 2, 150, 194, 1, 83, 128, 1, 48, 79, 1, 27, 45, 1, 11, 17,
36, 41, 235, 29, 36, 193, 10, 27, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0,
85, 165, 222, 177, 162, 215, 110, 135, 195, 57, 113, 168, 23, 83, 120, 10, 49, 61,
85, 190, 223, 36, 139, 200, 5, 90, 146, 1, 60, 103, 1, 38, 65, 1, 18, 30,
72, 202, 223, 23, 141, 199, 2, 86, 140, 1, 56, 97, 1, 36, 61, 1, 16, 27,
55, 218, 225, 13, 145, 200, 1, 86, 141, 1, 57, 99, 1, 35, 61, 1, 13, 22,
15, 235, 212, 1, 132, 184, 1, 84, 139, 1, 57, 97, 1, 34, 56, 1, 14, 23,
181, 21, 201, 61, 37, 123, 10, 38, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0,
47, 106, 172, 95, 104, 173, 42, 93, 159, 18, 77, 131, 4, 50, 81, 1, 17, 23,
62, 147, 199, 44, 130, 189, 28, 102, 154, 18, 75, 115, 2, 44, 65, 1, 12, 19,
55, 153, 210, 24, 130, 194, 3, 93, 146, 1, 61, 97, 1, 31, 50, 1, 10, 16,
49, 186, 223, 17, 148, 204, 1, 96, 142, 1, 53, 83, 1, 26, 44, 1, 11, 17,
13, 217, 212, 2, 136, 180, 1, 78, 124, 1, 50, 83, 1, 29, 49, 1, 14, 23,
197, 13, 247, 82, 17, 222, 25, 17, 162, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126, 186, 247, 234, 191, 243, 176, 177, 234, 104, 158, 220, 66, 128, 186, 55, 90, 137,
111, 197, 242, 46, 158, 219, 9, 104, 171, 2, 65, 125, 1, 44, 80, 1, 17, 91,
104, 208, 245, 39, 168, 224, 3, 109, 162, 1, 79, 124, 1, 50, 102, 1, 43, 102,
84, 220, 246, 31, 177, 231, 2, 115, 180, 1, 79, 134, 1, 55, 77, 1, 60, 79,
43, 243, 240, 8, 180, 217, 1, 115, 166, 1, 84, 121, 1, 51, 67, 1, 16, 6,
},
.switchable_interp_prob{235, 162, 36, 255, 34, 3, 149, 144},
.inter_mode_prob{
2, 173, 34, 0, 7, 145, 85, 0, 7, 166, 63, 0, 7, 94,
66, 0, 8, 64, 46, 0, 17, 81, 31, 0, 25, 29, 30, 0,
},
.intra_inter_prob{9, 102, 187, 225},
.comp_inter_prob{9, 102, 187, 225, 0},
.single_ref_prob{33, 16, 77, 74, 142, 142, 172, 170, 238, 247},
.comp_ref_prob{50, 126, 123, 221, 226},
.tx_32x32_prob{3, 136, 37, 5, 52, 13},
.tx_16x16_prob{20, 152, 15, 101},
.tx_8x8_prob{100, 66},
.skip_probs{192, 128, 64},
.joints{32, 64, 96},
.sign{128, 128},
.classes{
224, 144, 192, 168, 192, 176, 192, 198, 198, 245,
216, 128, 176, 160, 176, 176, 192, 198, 198, 208,
},
.class_0{216, 208},
.prob_bits{
136, 140, 148, 160, 176, 192, 224, 234, 234, 240,
136, 140, 148, 160, 176, 192, 224, 234, 234, 240,
},
.class_0_fr{128, 128, 64, 96, 112, 64, 128, 128, 64, 96, 112, 64},
.fr{64, 96, 64, 64, 96, 64},
.class_0_hp{160, 160},
.high_precision{128, 128},
};
constexpr std::array<u8, 256> norm_lut{
0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
constexpr std::array<u8, 254> map_lut{
20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 2, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
73, 4, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 7, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 8, 122, 123, 124,
125, 126, 127, 128, 129, 130, 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171, 172, 173, 174, 175, 176, 177,
178, 179, 180, 181, 13, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 14, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 17,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 18, 242, 243, 244, 245, 246, 247,
248, 249, 250, 251, 252, 253, 19,
};
// 6.2.14 Tile size calculation
[[nodiscard]] s32 CalcMinLog2TileCols(s32 frame_width) {
const s32 sb64_cols = (frame_width + 63) / 64;
s32 min_log2 = 0;
while ((64 << min_log2) < sb64_cols) {
min_log2++;
}
return min_log2;
}
[[nodiscard]] s32 CalcMaxLog2TileCols(s32 frame_width) {
const s32 sb64_cols = (frame_width + 63) / 64;
s32 max_log2 = 1;
while ((sb64_cols >> max_log2) >= 4) {
max_log2++;
}
return max_log2 - 1;
}
// Recenters probability. Based on section 6.3.6 of VP9 Specification
[[nodiscard]] s32 RecenterNonNeg(s32 new_prob, s32 old_prob) {
if (new_prob > old_prob * 2) {
return new_prob;
}
if (new_prob >= old_prob) {
return (new_prob - old_prob) * 2;
}
return (old_prob - new_prob) * 2 - 1;
}
// Adjusts old_prob depending on new_prob. Based on section 6.3.5 of VP9 Specification
[[nodiscard]] s32 RemapProbability(s32 new_prob, s32 old_prob) {
new_prob--;
old_prob--;
std::size_t index{};
if (old_prob * 2 <= 0xff) {
index = static_cast<std::size_t>(std::max(0, RecenterNonNeg(new_prob, old_prob) - 1));
} else {
index = static_cast<std::size_t>(
std::max(0, RecenterNonNeg(0xff - 1 - new_prob, 0xff - 1 - old_prob) - 1));
}
return static_cast<s32>(map_lut[index]);
}
} // Anonymous namespace
VP9::VP9(GPU& gpu_) : gpu{gpu_} {}
VP9::~VP9() = default;
void VP9::WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
const bool update = new_prob != old_prob;
writer.Write(update, diff_update_probability);
if (update) {
WriteProbabilityDelta(writer, new_prob, old_prob);
}
}
template <typename T, std::size_t N>
void VP9::WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
const std::array<T, N>& old_prob) {
for (std::size_t offset = 0; offset < new_prob.size(); ++offset) {
WriteProbabilityUpdate(writer, new_prob[offset], old_prob[offset]);
}
}
template <typename T, std::size_t N>
void VP9::WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
const std::array<T, N>& old_prob) {
for (std::size_t offset = 0; offset < new_prob.size(); offset += 4) {
WriteProbabilityUpdate(writer, new_prob[offset + 0], old_prob[offset + 0]);
WriteProbabilityUpdate(writer, new_prob[offset + 1], old_prob[offset + 1]);
WriteProbabilityUpdate(writer, new_prob[offset + 2], old_prob[offset + 2]);
}
}
void VP9::WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
const int delta = RemapProbability(new_prob, old_prob);
EncodeTermSubExp(writer, delta);
}
void VP9::EncodeTermSubExp(VpxRangeEncoder& writer, s32 value) {
if (WriteLessThan(writer, value, 16)) {
writer.Write(value, 4);
} else if (WriteLessThan(writer, value, 32)) {
writer.Write(value - 16, 4);
} else if (WriteLessThan(writer, value, 64)) {
writer.Write(value - 32, 5);
} else {
value -= 64;
constexpr s32 size = 8;
const s32 mask = (1 << size) - 191;
const s32 delta = value - mask;
if (delta < 0) {
writer.Write(value, size - 1);
} else {
writer.Write(delta / 2 + mask, size - 1);
writer.Write(delta & 1, 1);
}
}
}
bool VP9::WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test) {
const bool is_lt = value < test;
writer.Write(!is_lt);
return is_lt;
}
void VP9::WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
const std::array<u8, 1728>& new_prob,
const std::array<u8, 1728>& old_prob) {
constexpr u32 block_bytes = 2 * 2 * 6 * 6 * 3;
const auto needs_update = [&](u32 base_index) {
return !std::equal(new_prob.begin() + base_index,
new_prob.begin() + base_index + block_bytes,
old_prob.begin() + base_index);
};
for (u32 block_index = 0; block_index < 4; block_index++) {
const u32 base_index = block_index * block_bytes;
const bool update = needs_update(base_index);
writer.Write(update);
if (update) {
u32 index = base_index;
for (s32 i = 0; i < 2; i++) {
for (s32 j = 0; j < 2; j++) {
for (s32 k = 0; k < 6; k++) {
for (s32 l = 0; l < 6; l++) {
if (k != 0 || l < 3) {
WriteProbabilityUpdate(writer, new_prob[index + 0],
old_prob[index + 0]);
WriteProbabilityUpdate(writer, new_prob[index + 1],
old_prob[index + 1]);
WriteProbabilityUpdate(writer, new_prob[index + 2],
old_prob[index + 2]);
}
index += 3;
}
}
}
}
}
if (block_index == static_cast<u32>(tx_mode)) {
break;
}
}
}
void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
const bool update = new_prob != old_prob;
writer.Write(update, diff_update_probability);
if (update) {
writer.Write(new_prob >> 1, 7);
}
}
Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) {
PictureInfo picture_info;
gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
Vp9PictureInfo vp9_info = picture_info.Convert();
InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
// surface_luma_offset[0:3] contains the address of the reference frame offsets in the following
// order: last, golden, altref, current.
std::copy(state.surface_luma_offset.begin(), state.surface_luma_offset.begin() + 4,
vp9_info.frame_offsets.begin());
return vp9_info;
}
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
EntropyProbs entropy;
gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
entropy.Convert(dst);
}
Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) {
Vp9FrameContainer current_frame{};
{
gpu.SyncGuestHost();
current_frame.info = GetVp9PictureInfo(state);
current_frame.bit_stream.resize(current_frame.info.bitstream_size);
gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(),
current_frame.info.bitstream_size);
}
if (!next_frame.bit_stream.empty()) {
Vp9FrameContainer temp{
.info = current_frame.info,
.bit_stream = std::move(current_frame.bit_stream),
};
next_frame.info.show_frame = current_frame.info.last_frame_shown;
current_frame.info = next_frame.info;
current_frame.bit_stream = std::move(next_frame.bit_stream);
next_frame = std::move(temp);
} else {
next_frame.info = current_frame.info;
next_frame.bit_stream = current_frame.bit_stream;
}
return current_frame;
}
std::vector<u8> VP9::ComposeCompressedHeader() {
VpxRangeEncoder writer{};
const bool update_probs = !current_frame_info.is_key_frame && current_frame_info.show_frame;
if (!current_frame_info.lossless) {
if (static_cast<u32>(current_frame_info.transform_mode) >= 3) {
writer.Write(3, 2);
writer.Write(current_frame_info.transform_mode == 4);
} else {
writer.Write(current_frame_info.transform_mode, 2);
}
}
if (current_frame_info.transform_mode == 4) {
// tx_mode_probs() in the spec
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_8x8_prob,
prev_frame_probs.tx_8x8_prob);
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_16x16_prob,
prev_frame_probs.tx_16x16_prob);
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_32x32_prob,
prev_frame_probs.tx_32x32_prob);
if (update_probs) {
prev_frame_probs.tx_8x8_prob = current_frame_info.entropy.tx_8x8_prob;
prev_frame_probs.tx_16x16_prob = current_frame_info.entropy.tx_16x16_prob;
prev_frame_probs.tx_32x32_prob = current_frame_info.entropy.tx_32x32_prob;
}
}
// read_coef_probs() in the spec
WriteCoefProbabilityUpdate(writer, current_frame_info.transform_mode,
current_frame_info.entropy.coef_probs, prev_frame_probs.coef_probs);
// read_skip_probs() in the spec
WriteProbabilityUpdate(writer, current_frame_info.entropy.skip_probs,
prev_frame_probs.skip_probs);
if (update_probs) {
prev_frame_probs.coef_probs = current_frame_info.entropy.coef_probs;
prev_frame_probs.skip_probs = current_frame_info.entropy.skip_probs;
}
if (!current_frame_info.intra_only) {
// read_inter_probs() in the spec
WriteProbabilityUpdateAligned4(writer, current_frame_info.entropy.inter_mode_prob,
prev_frame_probs.inter_mode_prob);
if (current_frame_info.interp_filter == 4) {
// read_interp_filter_probs() in the spec
WriteProbabilityUpdate(writer, current_frame_info.entropy.switchable_interp_prob,
prev_frame_probs.switchable_interp_prob);
if (update_probs) {
prev_frame_probs.switchable_interp_prob =
current_frame_info.entropy.switchable_interp_prob;
}
}
// read_is_inter_probs() in the spec
WriteProbabilityUpdate(writer, current_frame_info.entropy.intra_inter_prob,
prev_frame_probs.intra_inter_prob);
// frame_reference_mode() in the spec
if ((current_frame_info.ref_frame_sign_bias[1] & 1) !=
(current_frame_info.ref_frame_sign_bias[2] & 1) ||
(current_frame_info.ref_frame_sign_bias[1] & 1) !=
(current_frame_info.ref_frame_sign_bias[3] & 1)) {
if (current_frame_info.reference_mode >= 1) {
writer.Write(1, 1);
writer.Write(current_frame_info.reference_mode == 2);
} else {
writer.Write(0, 1);
}
}
// frame_reference_mode_probs() in the spec
if (current_frame_info.reference_mode == 2) {
WriteProbabilityUpdate(writer, current_frame_info.entropy.comp_inter_prob,
prev_frame_probs.comp_inter_prob);
if (update_probs) {
prev_frame_probs.comp_inter_prob = current_frame_info.entropy.comp_inter_prob;
}
}
if (current_frame_info.reference_mode != 1) {
WriteProbabilityUpdate(writer, current_frame_info.entropy.single_ref_prob,
prev_frame_probs.single_ref_prob);
if (update_probs) {
prev_frame_probs.single_ref_prob = current_frame_info.entropy.single_ref_prob;
}
}
if (current_frame_info.reference_mode != 0) {
WriteProbabilityUpdate(writer, current_frame_info.entropy.comp_ref_prob,
prev_frame_probs.comp_ref_prob);
if (update_probs) {
prev_frame_probs.comp_ref_prob = current_frame_info.entropy.comp_ref_prob;
}
}
// read_y_mode_probs
for (std::size_t index = 0; index < current_frame_info.entropy.y_mode_prob.size();
++index) {
WriteProbabilityUpdate(writer, current_frame_info.entropy.y_mode_prob[index],
prev_frame_probs.y_mode_prob[index]);
}
// read_partition_probs
WriteProbabilityUpdateAligned4(writer, current_frame_info.entropy.partition_prob,
prev_frame_probs.partition_prob);
// mv_probs
for (s32 i = 0; i < 3; i++) {
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.joints[i],
prev_frame_probs.joints[i]);
}
if (update_probs) {
prev_frame_probs.inter_mode_prob = current_frame_info.entropy.inter_mode_prob;
prev_frame_probs.intra_inter_prob = current_frame_info.entropy.intra_inter_prob;
prev_frame_probs.y_mode_prob = current_frame_info.entropy.y_mode_prob;
prev_frame_probs.partition_prob = current_frame_info.entropy.partition_prob;
prev_frame_probs.joints = current_frame_info.entropy.joints;
}
for (s32 i = 0; i < 2; i++) {
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.sign[i],
prev_frame_probs.sign[i]);
for (s32 j = 0; j < 10; j++) {
const int index = i * 10 + j;
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.classes[index],
prev_frame_probs.classes[index]);
}
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0[i],
prev_frame_probs.class_0[i]);
for (s32 j = 0; j < 10; j++) {
const int index = i * 10 + j;
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.prob_bits[index],
prev_frame_probs.prob_bits[index]);
}
}
for (s32 i = 0; i < 2; i++) {
for (s32 j = 0; j < 2; j++) {
for (s32 k = 0; k < 3; k++) {
const int index = i * 2 * 3 + j * 3 + k;
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0_fr[index],
prev_frame_probs.class_0_fr[index]);
}
}
for (s32 j = 0; j < 3; j++) {
const int index = i * 3 + j;
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.fr[index],
prev_frame_probs.fr[index]);
}
}
if (current_frame_info.allow_high_precision_mv) {
for (s32 index = 0; index < 2; index++) {
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0_hp[index],
prev_frame_probs.class_0_hp[index]);
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.high_precision[index],
prev_frame_probs.high_precision[index]);
}
}
// save previous probs
if (update_probs) {
prev_frame_probs.sign = current_frame_info.entropy.sign;
prev_frame_probs.classes = current_frame_info.entropy.classes;
prev_frame_probs.class_0 = current_frame_info.entropy.class_0;
prev_frame_probs.prob_bits = current_frame_info.entropy.prob_bits;
prev_frame_probs.class_0_fr = current_frame_info.entropy.class_0_fr;
prev_frame_probs.fr = current_frame_info.entropy.fr;
prev_frame_probs.class_0_hp = current_frame_info.entropy.class_0_hp;
prev_frame_probs.high_precision = current_frame_info.entropy.high_precision;
}
}
writer.End();
return writer.GetBuffer();
}
VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
VpxBitStreamWriter uncomp_writer{};
uncomp_writer.WriteU(2, 2); // Frame marker.
uncomp_writer.WriteU(0, 2); // Profile.
uncomp_writer.WriteBit(false); // Show existing frame.
uncomp_writer.WriteBit(!current_frame_info.is_key_frame); // is key frame?
uncomp_writer.WriteBit(current_frame_info.show_frame); // show frame?
uncomp_writer.WriteBit(current_frame_info.error_resilient_mode); // error reslience
if (current_frame_info.is_key_frame) {
uncomp_writer.WriteU(frame_sync_code, 24);
uncomp_writer.WriteU(0, 3); // Color space.
uncomp_writer.WriteU(0, 1); // Color range.
uncomp_writer.WriteU(current_frame_info.frame_size.width - 1, 16);
uncomp_writer.WriteU(current_frame_info.frame_size.height - 1, 16);
uncomp_writer.WriteBit(false); // Render and frame size different.
// Reset context
prev_frame_probs = default_probs;
swap_ref_indices = false;
loop_filter_ref_deltas.fill(0);
loop_filter_mode_deltas.fill(0);
frame_ctxs.fill(default_probs);
// intra only, meaning the frame can be recreated with no other references
current_frame_info.intra_only = true;
} else {
if (!current_frame_info.show_frame) {
uncomp_writer.WriteBit(current_frame_info.intra_only);
} else {
current_frame_info.intra_only = false;
}
if (!current_frame_info.error_resilient_mode) {
uncomp_writer.WriteU(0, 2); // Reset frame context.
}
const auto& curr_offsets = current_frame_info.frame_offsets;
const auto& next_offsets = next_frame.info.frame_offsets;
const bool ref_frames_different = curr_offsets[1] != curr_offsets[2];
const bool next_references_swap =
(next_offsets[1] == curr_offsets[2]) || (next_offsets[2] == curr_offsets[1]);
const bool needs_ref_swap = ref_frames_different && next_references_swap;
if (needs_ref_swap) {
swap_ref_indices = !swap_ref_indices;
}
union {
u32 raw;
BitField<0, 1, u32> refresh_last;
BitField<1, 2, u32> refresh_golden;
BitField<2, 1, u32> refresh_alt;
} refresh_frame_flags;
refresh_frame_flags.raw = 0;
for (u32 index = 0; index < 3; ++index) {
// Refresh indices that use the current frame as an index
if (curr_offsets[3] == next_offsets[index]) {
refresh_frame_flags.raw |= 1u << index;
}
}
if (swap_ref_indices) {
const u32 temp = refresh_frame_flags.refresh_golden;
refresh_frame_flags.refresh_golden.Assign(refresh_frame_flags.refresh_alt.Value());
refresh_frame_flags.refresh_alt.Assign(temp);
}
if (current_frame_info.intra_only) {
uncomp_writer.WriteU(frame_sync_code, 24);
uncomp_writer.WriteU(refresh_frame_flags.raw, 8);
uncomp_writer.WriteU(current_frame_info.frame_size.width - 1, 16);
uncomp_writer.WriteU(current_frame_info.frame_size.height - 1, 16);
uncomp_writer.WriteBit(false); // Render and frame size different.
} else {
const bool swap_indices = needs_ref_swap ^ swap_ref_indices;
const auto ref_frame_index = swap_indices ? std::array{0, 2, 1} : std::array{0, 1, 2};
uncomp_writer.WriteU(refresh_frame_flags.raw, 8);
for (size_t index = 1; index < 4; index++) {
uncomp_writer.WriteU(ref_frame_index[index - 1], 3);
uncomp_writer.WriteU(current_frame_info.ref_frame_sign_bias[index], 1);
}
uncomp_writer.WriteBit(true); // Frame size with refs.
uncomp_writer.WriteBit(false); // Render and frame size different.
uncomp_writer.WriteBit(current_frame_info.allow_high_precision_mv);
uncomp_writer.WriteBit(current_frame_info.interp_filter == 4);
if (current_frame_info.interp_filter != 4) {
uncomp_writer.WriteU(current_frame_info.interp_filter, 2);
}
}
}
if (!current_frame_info.error_resilient_mode) {
uncomp_writer.WriteBit(true); // Refresh frame context. where do i get this info from?
uncomp_writer.WriteBit(true); // Frame parallel decoding mode.
}
int frame_ctx_idx = 0;
if (!current_frame_info.show_frame) {
frame_ctx_idx = 1;
}
uncomp_writer.WriteU(frame_ctx_idx, 2); // Frame context index.
prev_frame_probs = frame_ctxs[frame_ctx_idx]; // reference probabilities for compressed header
frame_ctxs[frame_ctx_idx] = current_frame_info.entropy;
uncomp_writer.WriteU(current_frame_info.first_level, 6);
uncomp_writer.WriteU(current_frame_info.sharpness_level, 3);
uncomp_writer.WriteBit(current_frame_info.mode_ref_delta_enabled);
if (current_frame_info.mode_ref_delta_enabled) {
// check if ref deltas are different, update accordingly
std::array<bool, 4> update_loop_filter_ref_deltas;
std::array<bool, 2> update_loop_filter_mode_deltas;
bool loop_filter_delta_update = false;
for (std::size_t index = 0; index < current_frame_info.ref_deltas.size(); index++) {
const s8 old_deltas = loop_filter_ref_deltas[index];
const s8 new_deltas = current_frame_info.ref_deltas[index];
const bool differing_delta = old_deltas != new_deltas;
update_loop_filter_ref_deltas[index] = differing_delta;
loop_filter_delta_update |= differing_delta;
}
for (std::size_t index = 0; index < current_frame_info.mode_deltas.size(); index++) {
const s8 old_deltas = loop_filter_mode_deltas[index];
const s8 new_deltas = current_frame_info.mode_deltas[index];
const bool differing_delta = old_deltas != new_deltas;
update_loop_filter_mode_deltas[index] = differing_delta;
loop_filter_delta_update |= differing_delta;
}
uncomp_writer.WriteBit(loop_filter_delta_update);
if (loop_filter_delta_update) {
for (std::size_t index = 0; index < current_frame_info.ref_deltas.size(); index++) {
uncomp_writer.WriteBit(update_loop_filter_ref_deltas[index]);
if (update_loop_filter_ref_deltas[index]) {
uncomp_writer.WriteS(current_frame_info.ref_deltas[index], 6);
}
}
for (std::size_t index = 0; index < current_frame_info.mode_deltas.size(); index++) {
uncomp_writer.WriteBit(update_loop_filter_mode_deltas[index]);
if (update_loop_filter_mode_deltas[index]) {
uncomp_writer.WriteS(current_frame_info.mode_deltas[index], 6);
}
}
// save new deltas
loop_filter_ref_deltas = current_frame_info.ref_deltas;
loop_filter_mode_deltas = current_frame_info.mode_deltas;
}
}
uncomp_writer.WriteU(current_frame_info.base_q_index, 8);
uncomp_writer.WriteDeltaQ(current_frame_info.y_dc_delta_q);
uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q);
uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q);
ASSERT(!current_frame_info.segment_enabled);
uncomp_writer.WriteBit(false); // Segmentation enabled (TODO).
const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width);
const s32 max_tile_cols_log2 = CalcMaxLog2TileCols(current_frame_info.frame_size.width);
const s32 tile_cols_log2_diff = current_frame_info.log2_tile_cols - min_tile_cols_log2;
const s32 tile_cols_log2_inc_mask = (1 << tile_cols_log2_diff) - 1;
// If it's less than the maximum, we need to add an extra 0 on the bitstream
// to indicate that it should stop reading.
if (current_frame_info.log2_tile_cols < max_tile_cols_log2) {
uncomp_writer.WriteU(tile_cols_log2_inc_mask << 1, tile_cols_log2_diff + 1);
} else {
uncomp_writer.WriteU(tile_cols_log2_inc_mask, tile_cols_log2_diff);
}
const bool tile_rows_log2_is_nonzero = current_frame_info.log2_tile_rows != 0;
uncomp_writer.WriteBit(tile_rows_log2_is_nonzero);
if (tile_rows_log2_is_nonzero) {
uncomp_writer.WriteBit(current_frame_info.log2_tile_rows > 1);
}
return uncomp_writer;
}
void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) {
std::vector<u8> bitstream;
{
Vp9FrameContainer curr_frame = GetCurrentFrame(state);
current_frame_info = curr_frame.info;
bitstream = std::move(curr_frame.bit_stream);
}
// The uncompressed header routine sets PrevProb parameters needed for the compressed header
auto uncomp_writer = ComposeUncompressedHeader();
std::vector<u8> compressed_header = ComposeCompressedHeader();
uncomp_writer.WriteU(static_cast<s32>(compressed_header.size()), 16);
uncomp_writer.Flush();
std::vector<u8> uncompressed_header = uncomp_writer.GetByteArray();
// Write headers and frame to buffer
frame.resize(uncompressed_header.size() + compressed_header.size() + bitstream.size());
std::copy(uncompressed_header.begin(), uncompressed_header.end(), frame.begin());
std::copy(compressed_header.begin(), compressed_header.end(),
frame.begin() + uncompressed_header.size());
std::copy(bitstream.begin(), bitstream.end(),
frame.begin() + uncompressed_header.size() + compressed_header.size());
}
VpxRangeEncoder::VpxRangeEncoder() {
Write(false);
}
VpxRangeEncoder::~VpxRangeEncoder() = default;
void VpxRangeEncoder::Write(s32 value, s32 value_size) {
for (s32 bit = value_size - 1; bit >= 0; bit--) {
Write(((value >> bit) & 1) != 0);
}
}
void VpxRangeEncoder::Write(bool bit) {
Write(bit, half_probability);
}
void VpxRangeEncoder::Write(bool bit, s32 probability) {
u32 local_range = range;
const u32 split = 1 + (((local_range - 1) * static_cast<u32>(probability)) >> 8);
local_range = split;
if (bit) {
low_value += split;
local_range = range - split;
}
s32 shift = static_cast<s32>(norm_lut[local_range]);
local_range <<= shift;
count += shift;
if (count >= 0) {
const s32 offset = shift - count;
if (((low_value << (offset - 1)) >> 31) != 0) {
const s32 current_pos = static_cast<s32>(base_stream.GetPosition());
base_stream.Seek(-1, Common::SeekOrigin::FromCurrentPos);
while (PeekByte() == 0xff) {
base_stream.WriteByte(0);
base_stream.Seek(-2, Common::SeekOrigin::FromCurrentPos);
}
base_stream.WriteByte(static_cast<u8>((PeekByte() + 1)));
base_stream.Seek(current_pos, Common::SeekOrigin::SetOrigin);
}
base_stream.WriteByte(static_cast<u8>((low_value >> (24 - offset))));
low_value <<= offset;
shift = count;
low_value &= 0xffffff;
count -= 8;
}
low_value <<= shift;
range = local_range;
}
void VpxRangeEncoder::End() {
for (std::size_t index = 0; index < 32; ++index) {
Write(false);
}
}
u8 VpxRangeEncoder::PeekByte() {
const u8 value = base_stream.ReadByte();
base_stream.Seek(-1, Common::SeekOrigin::FromCurrentPos);
return value;
}
VpxBitStreamWriter::VpxBitStreamWriter() = default;
VpxBitStreamWriter::~VpxBitStreamWriter() = default;
void VpxBitStreamWriter::WriteU(u32 value, u32 value_size) {
WriteBits(value, value_size);
}
void VpxBitStreamWriter::WriteS(s32 value, u32 value_size) {
const bool sign = value < 0;
if (sign) {
value = -value;
}
WriteBits(static_cast<u32>(value << 1) | (sign ? 1 : 0), value_size + 1);
}
void VpxBitStreamWriter::WriteDeltaQ(u32 value) {
const bool delta_coded = value != 0;
WriteBit(delta_coded);
if (delta_coded) {
WriteBits(value, 4);
}
}
void VpxBitStreamWriter::WriteBits(u32 value, u32 bit_count) {
s32 value_pos = 0;
s32 remaining = bit_count;
while (remaining > 0) {
s32 copy_size = remaining;
const s32 free = GetFreeBufferBits();
if (copy_size > free) {
copy_size = free;
}
const s32 mask = (1 << copy_size) - 1;
const s32 src_shift = (bit_count - value_pos) - copy_size;
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
buffer |= ((value >> src_shift) & mask) << dst_shift;
value_pos += copy_size;
buffer_pos += copy_size;
remaining -= copy_size;
}
}
void VpxBitStreamWriter::WriteBit(bool state) {
WriteBits(state ? 1 : 0, 1);
}
s32 VpxBitStreamWriter::GetFreeBufferBits() {
if (buffer_pos == buffer_size) {
Flush();
}
return buffer_size - buffer_pos;
}
void VpxBitStreamWriter::Flush() {
if (buffer_pos == 0) {
return;
}
byte_array.push_back(static_cast<u8>(buffer));
buffer = 0;
buffer_pos = 0;
}
std::vector<u8>& VpxBitStreamWriter::GetByteArray() {
return byte_array;
}
const std::vector<u8>& VpxBitStreamWriter::GetByteArray() const {
return byte_array;
}
} // namespace Tegra::Decoder

View File

@@ -1,192 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
#include "common/stream.h"
#include "video_core/command_classes/codecs/vp9_types.h"
#include "video_core/command_classes/nvdec_common.h"
namespace Tegra {
class GPU;
namespace Decoder {
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
/// VP9 header bitstreams.
class VpxRangeEncoder {
public:
VpxRangeEncoder();
~VpxRangeEncoder();
VpxRangeEncoder(const VpxRangeEncoder&) = delete;
VpxRangeEncoder& operator=(const VpxRangeEncoder&) = delete;
VpxRangeEncoder(VpxRangeEncoder&&) = default;
VpxRangeEncoder& operator=(VpxRangeEncoder&&) = default;
/// Writes the rightmost value_size bits from value into the stream
void Write(s32 value, s32 value_size);
/// Writes a single bit with half probability
void Write(bool bit);
/// Writes a bit to the base_stream encoded with probability
void Write(bool bit, s32 probability);
/// Signal the end of the bitstream
void End();
[[nodiscard]] std::vector<u8>& GetBuffer() {
return base_stream.GetBuffer();
}
[[nodiscard]] const std::vector<u8>& GetBuffer() const {
return base_stream.GetBuffer();
}
private:
u8 PeekByte();
Common::Stream base_stream{};
u32 low_value{};
u32 range{0xff};
s32 count{-24};
s32 half_probability{128};
};
class VpxBitStreamWriter {
public:
VpxBitStreamWriter();
~VpxBitStreamWriter();
VpxBitStreamWriter(const VpxBitStreamWriter&) = delete;
VpxBitStreamWriter& operator=(const VpxBitStreamWriter&) = delete;
VpxBitStreamWriter(VpxBitStreamWriter&&) = default;
VpxBitStreamWriter& operator=(VpxBitStreamWriter&&) = default;
/// Write an unsigned integer value
void WriteU(u32 value, u32 value_size);
/// Write a signed integer value
void WriteS(s32 value, u32 value_size);
/// Based on 6.2.10 of VP9 Spec, writes a delta coded value
void WriteDeltaQ(u32 value);
/// Write a single bit.
void WriteBit(bool state);
/// Pushes current buffer into buffer_array, resets buffer
void Flush();
/// Returns byte_array
[[nodiscard]] std::vector<u8>& GetByteArray();
/// Returns const byte_array
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
private:
/// Write bit_count bits from value into buffer
void WriteBits(u32 value, u32 bit_count);
/// Gets next available position in buffer, invokes Flush() if buffer is full
s32 GetFreeBufferBits();
s32 buffer_size{8};
s32 buffer{};
s32 buffer_pos{};
std::vector<u8> byte_array;
};
class VP9 {
public:
explicit VP9(GPU& gpu_);
~VP9();
VP9(const VP9&) = delete;
VP9& operator=(const VP9&) = delete;
VP9(VP9&&) = default;
VP9& operator=(VP9&&) = delete;
/// Composes the VP9 frame from the GPU state information.
/// Based on the official VP9 spec documentation
void ComposeFrame(const NvdecCommon::NvdecRegisters& state);
/// Returns true if the most recent frame was a hidden frame.
[[nodiscard]] bool WasFrameHidden() const {
return !current_frame_info.show_frame;
}
/// Returns a const reference to the composed frame data.
[[nodiscard]] const std::vector<u8>& GetFrameBytes() const {
return frame;
}
private:
/// Generates compressed header probability updates in the bitstream writer
template <typename T, std::size_t N>
void WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
const std::array<T, N>& old_prob);
/// Generates compressed header probability updates in the bitstream writer
/// If probs are not equal, WriteProbabilityDelta is invoked
void WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
/// Generates compressed header probability deltas in the bitstream writer
void WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
/// Inverse of 6.3.4 Decode term subexp
void EncodeTermSubExp(VpxRangeEncoder& writer, s32 value);
/// Writes if the value is less than the test value
bool WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test);
/// Writes probability updates for the Coef probabilities
void WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
const std::array<u8, 1728>& new_prob,
const std::array<u8, 1728>& old_prob);
/// Write probabilities for 4-byte aligned structures
template <typename T, std::size_t N>
void WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
const std::array<T, N>& old_prob);
/// Write motion vector probability updates. 6.3.17 in the spec
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
/// Returns VP9 information from NVDEC provided offset and size
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state);
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
/// Returns frame to be decoded after buffering
[[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state);
/// Use NVDEC providied information to compose the headers for the current frame
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
GPU& gpu;
std::vector<u8> frame;
std::array<s8, 4> loop_filter_ref_deltas{};
std::array<s8, 2> loop_filter_mode_deltas{};
Vp9FrameContainer next_frame{};
std::array<Vp9EntropyProbs, 4> frame_ctxs{};
bool swap_ref_indices{};
Vp9PictureInfo current_frame_info{};
Vp9EntropyProbs prev_frame_probs{};
};
} // namespace Decoder
} // namespace Tegra

View File

@@ -1,306 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Tegra {
class GPU;
namespace Decoder {
struct Vp9FrameDimensions {
s16 width;
s16 height;
s16 luma_pitch;
s16 chroma_pitch;
};
static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size");
enum class FrameFlags : u32 {
IsKeyFrame = 1 << 0,
LastFrameIsKeyFrame = 1 << 1,
FrameSizeChanged = 1 << 2,
ErrorResilientMode = 1 << 3,
LastShowFrame = 1 << 4,
IntraOnly = 1 << 5,
};
DECLARE_ENUM_FLAG_OPERATORS(FrameFlags)
enum class TxSize {
Tx4x4 = 0, // 4x4 transform
Tx8x8 = 1, // 8x8 transform
Tx16x16 = 2, // 16x16 transform
Tx32x32 = 3, // 32x32 transform
TxSizes = 4
};
enum class TxMode {
Only4X4 = 0, // Only 4x4 transform used
Allow8X8 = 1, // Allow block transform size up to 8x8
Allow16X16 = 2, // Allow block transform size up to 16x16
Allow32X32 = 3, // Allow block transform size up to 32x32
TxModeSelect = 4, // Transform specified for each block
TxModes = 5
};
struct Segmentation {
u8 enabled;
u8 update_map;
u8 temporal_update;
u8 abs_delta;
std::array<u32, 8> feature_mask;
std::array<std::array<s16, 4>, 8> feature_data;
};
static_assert(sizeof(Segmentation) == 0x64, "Segmentation is an invalid size");
struct LoopFilter {
u8 mode_ref_delta_enabled;
std::array<s8, 4> ref_deltas;
std::array<s8, 2> mode_deltas;
};
static_assert(sizeof(LoopFilter) == 0x7, "LoopFilter is an invalid size");
struct Vp9EntropyProbs {
std::array<u8, 36> y_mode_prob; ///< 0x0000
std::array<u8, 64> partition_prob; ///< 0x0024
std::array<u8, 1728> coef_probs; ///< 0x0064
std::array<u8, 8> switchable_interp_prob; ///< 0x0724
std::array<u8, 28> inter_mode_prob; ///< 0x072C
std::array<u8, 4> intra_inter_prob; ///< 0x0748
std::array<u8, 5> comp_inter_prob; ///< 0x074C
std::array<u8, 10> single_ref_prob; ///< 0x0751
std::array<u8, 5> comp_ref_prob; ///< 0x075B
std::array<u8, 6> tx_32x32_prob; ///< 0x0760
std::array<u8, 4> tx_16x16_prob; ///< 0x0766
std::array<u8, 2> tx_8x8_prob; ///< 0x076A
std::array<u8, 3> skip_probs; ///< 0x076C
std::array<u8, 3> joints; ///< 0x076F
std::array<u8, 2> sign; ///< 0x0772
std::array<u8, 20> classes; ///< 0x0774
std::array<u8, 2> class_0; ///< 0x0788
std::array<u8, 20> prob_bits; ///< 0x078A
std::array<u8, 12> class_0_fr; ///< 0x079E
std::array<u8, 6> fr; ///< 0x07AA
std::array<u8, 2> class_0_hp; ///< 0x07B0
std::array<u8, 2> high_precision; ///< 0x07B2
};
static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size");
struct Vp9PictureInfo {
u32 bitstream_size;
std::array<u64, 4> frame_offsets;
std::array<s8, 4> ref_frame_sign_bias;
s32 base_q_index;
s32 y_dc_delta_q;
s32 uv_dc_delta_q;
s32 uv_ac_delta_q;
s32 transform_mode;
s32 interp_filter;
s32 reference_mode;
s32 log2_tile_cols;
s32 log2_tile_rows;
std::array<s8, 4> ref_deltas;
std::array<s8, 2> mode_deltas;
Vp9EntropyProbs entropy;
Vp9FrameDimensions frame_size;
u8 first_level;
u8 sharpness_level;
bool is_key_frame;
bool intra_only;
bool last_frame_was_key;
bool error_resilient_mode;
bool last_frame_shown;
bool show_frame;
bool lossless;
bool allow_high_precision_mv;
bool segment_enabled;
bool mode_ref_delta_enabled;
};
struct Vp9FrameContainer {
Vp9PictureInfo info{};
std::vector<u8> bit_stream;
};
struct PictureInfo {
INSERT_PADDING_WORDS_NOINIT(12); ///< 0x00
u32 bitstream_size; ///< 0x30
INSERT_PADDING_WORDS_NOINIT(5); ///< 0x34
Vp9FrameDimensions last_frame_size; ///< 0x48
Vp9FrameDimensions golden_frame_size; ///< 0x50
Vp9FrameDimensions alt_frame_size; ///< 0x58
Vp9FrameDimensions current_frame_size; ///< 0x60
FrameFlags vp9_flags; ///< 0x68
std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C
u8 first_level; ///< 0x70
u8 sharpness_level; ///< 0x71
u8 base_q_index; ///< 0x72
u8 y_dc_delta_q; ///< 0x73
u8 uv_ac_delta_q; ///< 0x74
u8 uv_dc_delta_q; ///< 0x75
u8 lossless; ///< 0x76
u8 tx_mode; ///< 0x77
u8 allow_high_precision_mv; ///< 0x78
u8 interp_filter; ///< 0x79
u8 reference_mode; ///< 0x7A
INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B
u8 log2_tile_cols; ///< 0x7E
u8 log2_tile_rows; ///< 0x7F
Segmentation segmentation; ///< 0x80
LoopFilter loop_filter; ///< 0xE4
INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB
[[nodiscard]] Vp9PictureInfo Convert() const {
return {
.bitstream_size = bitstream_size,
.frame_offsets{},
.ref_frame_sign_bias = ref_frame_sign_bias,
.base_q_index = base_q_index,
.y_dc_delta_q = y_dc_delta_q,
.uv_dc_delta_q = uv_dc_delta_q,
.uv_ac_delta_q = uv_ac_delta_q,
.transform_mode = tx_mode,
.interp_filter = interp_filter,
.reference_mode = reference_mode,
.log2_tile_cols = log2_tile_cols,
.log2_tile_rows = log2_tile_rows,
.ref_deltas = loop_filter.ref_deltas,
.mode_deltas = loop_filter.mode_deltas,
.entropy{},
.frame_size = current_frame_size,
.first_level = first_level,
.sharpness_level = sharpness_level,
.is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame),
.intra_only = True(vp9_flags & FrameFlags::IntraOnly),
.last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame),
.error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode),
.last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame),
.show_frame = true,
.lossless = lossless != 0,
.allow_high_precision_mv = allow_high_precision_mv != 0,
.segment_enabled = segmentation.enabled != 0,
.mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
};
}
};
static_assert(sizeof(PictureInfo) == 0x100, "PictureInfo is an invalid size");
struct EntropyProbs {
INSERT_PADDING_BYTES_NOINIT(1024); ///< 0x0000
std::array<u8, 28> inter_mode_prob; ///< 0x0400
std::array<u8, 4> intra_inter_prob; ///< 0x041C
INSERT_PADDING_BYTES_NOINIT(80); ///< 0x0420
std::array<u8, 2> tx_8x8_prob; ///< 0x0470
std::array<u8, 4> tx_16x16_prob; ///< 0x0472
std::array<u8, 6> tx_32x32_prob; ///< 0x0476
std::array<u8, 4> y_mode_prob_e8; ///< 0x047C
std::array<std::array<u8, 8>, 4> y_mode_prob_e0e7; ///< 0x0480
INSERT_PADDING_BYTES_NOINIT(64); ///< 0x04A0
std::array<u8, 64> partition_prob; ///< 0x04E0
INSERT_PADDING_BYTES_NOINIT(10); ///< 0x0520
std::array<u8, 8> switchable_interp_prob; ///< 0x052A
std::array<u8, 5> comp_inter_prob; ///< 0x0532
std::array<u8, 3> skip_probs; ///< 0x0537
INSERT_PADDING_BYTES_NOINIT(1); ///< 0x053A
std::array<u8, 3> joints; ///< 0x053B
std::array<u8, 2> sign; ///< 0x053E
std::array<u8, 2> class_0; ///< 0x0540
std::array<u8, 6> fr; ///< 0x0542
std::array<u8, 2> class_0_hp; ///< 0x0548
std::array<u8, 2> high_precision; ///< 0x054A
std::array<u8, 20> classes; ///< 0x054C
std::array<u8, 12> class_0_fr; ///< 0x0560
std::array<u8, 20> pred_bits; ///< 0x056C
std::array<u8, 10> single_ref_prob; ///< 0x0580
std::array<u8, 5> comp_ref_prob; ///< 0x058A
INSERT_PADDING_BYTES_NOINIT(17); ///< 0x058F
std::array<u8, 2304> coef_probs; ///< 0x05A0
void Convert(Vp9EntropyProbs& fc) {
fc.inter_mode_prob = inter_mode_prob;
fc.intra_inter_prob = intra_inter_prob;
fc.tx_8x8_prob = tx_8x8_prob;
fc.tx_16x16_prob = tx_16x16_prob;
fc.tx_32x32_prob = tx_32x32_prob;
for (std::size_t i = 0; i < 4; i++) {
for (std::size_t j = 0; j < 9; j++) {
fc.y_mode_prob[j + 9 * i] = j < 8 ? y_mode_prob_e0e7[i][j] : y_mode_prob_e8[i];
}
}
fc.partition_prob = partition_prob;
fc.switchable_interp_prob = switchable_interp_prob;
fc.comp_inter_prob = comp_inter_prob;
fc.skip_probs = skip_probs;
fc.joints = joints;
fc.sign = sign;
fc.class_0 = class_0;
fc.fr = fr;
fc.class_0_hp = class_0_hp;
fc.high_precision = high_precision;
fc.classes = classes;
fc.class_0_fr = class_0_fr;
fc.prob_bits = pred_bits;
fc.single_ref_prob = single_ref_prob;
fc.comp_ref_prob = comp_ref_prob;
// Skip the 4th element as it goes unused
for (std::size_t i = 0; i < coef_probs.size(); i += 4) {
const std::size_t j = i - i / 4;
fc.coef_probs[j] = coef_probs[i];
fc.coef_probs[j + 1] = coef_probs[i + 1];
fc.coef_probs[j + 2] = coef_probs[i + 2];
}
}
};
static_assert(sizeof(EntropyProbs) == 0xEA0, "EntropyProbs is an invalid size");
enum class Ref { Last, Golden, AltRef };
struct RefPoolElement {
s64 frame{};
Ref ref{};
bool refresh{};
};
#define ASSERT_POSITION(field_name, position) \
static_assert(offsetof(Vp9EntropyProbs, field_name) == position, \
"Field " #field_name " has invalid position")
ASSERT_POSITION(partition_prob, 0x0024);
ASSERT_POSITION(switchable_interp_prob, 0x0724);
ASSERT_POSITION(sign, 0x0772);
ASSERT_POSITION(class_0_fr, 0x079E);
ASSERT_POSITION(high_precision, 0x07B2);
#undef ASSERT_POSITION
#define ASSERT_POSITION(field_name, position) \
static_assert(offsetof(PictureInfo, field_name) == position, \
"Field " #field_name " has invalid position")
ASSERT_POSITION(bitstream_size, 0x30);
ASSERT_POSITION(last_frame_size, 0x48);
ASSERT_POSITION(first_level, 0x70);
ASSERT_POSITION(segmentation, 0x80);
ASSERT_POSITION(loop_filter, 0xE4);
#undef ASSERT_POSITION
#define ASSERT_POSITION(field_name, position) \
static_assert(offsetof(EntropyProbs, field_name) == position, \
"Field " #field_name " has invalid position")
ASSERT_POSITION(inter_mode_prob, 0x400);
ASSERT_POSITION(tx_8x8_prob, 0x470);
ASSERT_POSITION(partition_prob, 0x4E0);
ASSERT_POSITION(class_0, 0x540);
ASSERT_POSITION(class_0_fr, 0x560);
ASSERT_POSITION(coef_probs, 0x5A0);
#undef ASSERT_POSITION
}; // namespace Decoder
}; // namespace Tegra

View File

@@ -1,29 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "video_core/command_classes/host1x.h"
#include "video_core/gpu.h"
Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
Tegra::Host1x::~Host1x() = default;
void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
switch (method) {
case Method::LoadSyncptPayload32:
syncpoint_value = argument;
break;
case Method::WaitSyncpt:
case Method::WaitSyncpt32:
Execute(argument);
break;
default:
UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
break;
}
}
void Tegra::Host1x::Execute(u32 data) {
gpu.WaitFence(data, syncpoint_value);
}

View File

@@ -1,34 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_types.h"
namespace Tegra {
class GPU;
class Nvdec;
class Host1x {
public:
enum class Method : u32 {
WaitSyncpt = 0x8,
LoadSyncptPayload32 = 0x4e,
WaitSyncpt32 = 0x50,
};
explicit Host1x(GPU& gpu);
~Host1x();
/// Writes the method into the state, Invoke Execute() if encountered
void ProcessMethod(Method method, u32 argument);
private:
/// For Host1x, execute is waiting on a syncpoint previously written into the state
void Execute(u32 data);
u32 syncpoint_value{};
GPU& gpu;
};
} // namespace Tegra

View File

@@ -1,47 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "video_core/command_classes/nvdec.h"
#include "video_core/gpu.h"
namespace Tegra {
#define NVDEC_REG_INDEX(field_name) \
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {}
Nvdec::~Nvdec() = default;
void Nvdec::ProcessMethod(u32 method, u32 argument) {
state.reg_array[method] = static_cast<u64>(argument) << 8;
switch (method) {
case NVDEC_REG_INDEX(set_codec_id):
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
break;
case NVDEC_REG_INDEX(execute):
Execute();
break;
}
}
AVFramePtr Nvdec::GetFrame() {
return codec->GetCurrentFrame();
}
void Nvdec::Execute() {
switch (codec->GetCurrentCodec()) {
case NvdecCommon::VideoCodec::H264:
case NvdecCommon::VideoCodec::VP8:
case NvdecCommon::VideoCodec::VP9:
codec->Decode();
break;
default:
UNIMPLEMENTED_MSG("Codec {}", codec->GetCurrentCodecName());
break;
}
}
} // namespace Tegra

View File

@@ -1,33 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
#include "video_core/command_classes/codecs/codec.h"
namespace Tegra {
class GPU;
class Nvdec {
public:
explicit Nvdec(GPU& gpu);
~Nvdec();
/// Writes the method into the state, Invoke Execute() if encountered
void ProcessMethod(u32 method, u32 argument);
/// Return most recently decoded frame
[[nodiscard]] AVFramePtr GetFrame();
private:
/// Invoke codec to decode a frame
void Execute();
GPU& gpu;
NvdecCommon::NvdecRegisters state;
std::unique_ptr<Codec> codec;
};
} // namespace Tegra

View File

@@ -1,97 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Tegra::NvdecCommon {
enum class VideoCodec : u64 {
None = 0x0,
H264 = 0x3,
VP8 = 0x5,
H265 = 0x7,
VP9 = 0x9,
};
// NVDEC should use a 32-bit address space, but is mapped to 64-bit,
// doubling the sizes here is compensating for that.
struct NvdecRegisters {
static constexpr std::size_t NUM_REGS = 0x178;
union {
struct {
INSERT_PADDING_WORDS_NOINIT(256); ///< 0x0000
VideoCodec set_codec_id; ///< 0x0400
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0408
u64 execute; ///< 0x0600
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0608
struct { ///< 0x0800
union {
BitField<0, 3, VideoCodec> codec;
BitField<4, 1, u64> gp_timer_on;
BitField<13, 1, u64> mb_timer_on;
BitField<14, 1, u64> intra_frame_pslc;
BitField<17, 1, u64> all_intra_frame;
};
} control_params;
u64 picture_info_offset; ///< 0x0808
u64 frame_bitstream_offset; ///< 0x0810
u64 frame_number; ///< 0x0818
u64 h264_slice_data_offsets; ///< 0x0820
u64 h264_mv_dump_offset; ///< 0x0828
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
u64 frame_stats_offset; ///< 0x0848
u64 h264_last_surface_luma_offset; ///< 0x0850
u64 h264_last_surface_chroma_offset; ///< 0x0858
std::array<u64, 17> surface_luma_offset; ///< 0x0860
std::array<u64, 17> surface_chroma_offset; ///< 0x08E8
INSERT_PADDING_WORDS_NOINIT(68); ///< 0x0970
u64 vp8_prob_data_offset; ///< 0x0A80
u64 vp8_header_partition_buf_offset; ///< 0x0A88
INSERT_PADDING_WORDS_NOINIT(60); ///< 0x0A90
u64 vp9_entropy_probs_offset; ///< 0x0B80
u64 vp9_backward_updates_offset; ///< 0x0B88
u64 vp9_last_frame_segmap_offset; ///< 0x0B90
u64 vp9_curr_frame_segmap_offset; ///< 0x0B98
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BA0
u64 vp9_last_frame_mvs_offset; ///< 0x0BA8
u64 vp9_curr_frame_mvs_offset; ///< 0x0BB0
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BB8
};
std::array<u64, NUM_REGS> reg_array;
};
};
static_assert(sizeof(NvdecRegisters) == (0xBC0), "NvdecRegisters is incorrect size");
#define ASSERT_REG_POSITION(field_name, position) \
static_assert(offsetof(NvdecRegisters, field_name) == position * sizeof(u64), \
"Field " #field_name " has invalid position")
ASSERT_REG_POSITION(set_codec_id, 0x80);
ASSERT_REG_POSITION(execute, 0xC0);
ASSERT_REG_POSITION(control_params, 0x100);
ASSERT_REG_POSITION(picture_info_offset, 0x101);
ASSERT_REG_POSITION(frame_bitstream_offset, 0x102);
ASSERT_REG_POSITION(frame_number, 0x103);
ASSERT_REG_POSITION(h264_slice_data_offsets, 0x104);
ASSERT_REG_POSITION(frame_stats_offset, 0x109);
ASSERT_REG_POSITION(h264_last_surface_luma_offset, 0x10A);
ASSERT_REG_POSITION(h264_last_surface_chroma_offset, 0x10B);
ASSERT_REG_POSITION(surface_luma_offset, 0x10C);
ASSERT_REG_POSITION(surface_chroma_offset, 0x11D);
ASSERT_REG_POSITION(vp8_prob_data_offset, 0x150);
ASSERT_REG_POSITION(vp8_header_partition_buf_offset, 0x151);
ASSERT_REG_POSITION(vp9_entropy_probs_offset, 0x170);
ASSERT_REG_POSITION(vp9_backward_updates_offset, 0x171);
ASSERT_REG_POSITION(vp9_last_frame_segmap_offset, 0x172);
ASSERT_REG_POSITION(vp9_curr_frame_segmap_offset, 0x173);
ASSERT_REG_POSITION(vp9_last_frame_mvs_offset, 0x175);
ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
#undef ASSERT_REG_POSITION
} // namespace Tegra::NvdecCommon

View File

@@ -1,43 +0,0 @@
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
// SPDX-License-Identifier: MIT
#include <algorithm>
#include "sync_manager.h"
#include "video_core/gpu.h"
namespace Tegra {
SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {}
SyncptIncrManager::~SyncptIncrManager() = default;
void SyncptIncrManager::Increment(u32 id) {
increments.emplace_back(0, 0, id, true);
IncrementAllDone();
}
u32 SyncptIncrManager::IncrementWhenDone(u32 class_id, u32 id) {
const u32 handle = current_id++;
increments.emplace_back(handle, class_id, id);
return handle;
}
void SyncptIncrManager::SignalDone(u32 handle) {
const auto done_incr =
std::find_if(increments.begin(), increments.end(),
[handle](const SyncptIncr& incr) { return incr.id == handle; });
if (done_incr != increments.cend()) {
done_incr->complete = true;
}
IncrementAllDone();
}
void SyncptIncrManager::IncrementAllDone() {
std::size_t done_count = 0;
for (; done_count < increments.size(); ++done_count) {
if (!increments[done_count].complete) {
break;
}
gpu.IncrementSyncPoint(increments[done_count].syncpt_id);
}
increments.erase(increments.begin(), increments.begin() + done_count);
}
} // namespace Tegra

View File

@@ -1,47 +0,0 @@
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
// SPDX-License-Identifier: MIT
#pragma once
#include <mutex>
#include <vector>
#include "common/common_types.h"
namespace Tegra {
class GPU;
struct SyncptIncr {
u32 id;
u32 class_id;
u32 syncpt_id;
bool complete;
SyncptIncr(u32 id_, u32 class_id_, u32 syncpt_id_, bool done = false)
: id(id_), class_id(class_id_), syncpt_id(syncpt_id_), complete(done) {}
};
class SyncptIncrManager {
public:
explicit SyncptIncrManager(GPU& gpu);
~SyncptIncrManager();
/// Add syncpoint id and increment all
void Increment(u32 id);
/// Returns a handle to increment later
u32 IncrementWhenDone(u32 class_id, u32 id);
/// IncrememntAllDone, including handle
void SignalDone(u32 handle);
/// Increment all sequential pending increments that are already done.
void IncrementAllDone();
private:
std::vector<SyncptIncr> increments;
std::mutex increment_lock;
u32 current_id{};
GPU& gpu;
};
} // namespace Tegra

View File

@@ -1,238 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
extern "C" {
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
#include <libswscale/swscale.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
}
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/logging/log.h"
#include "video_core/command_classes/nvdec.h"
#include "video_core/command_classes/vic.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
#include "video_core/textures/decoders.h"
namespace Tegra {
namespace {
enum class VideoPixelFormat : u64_le {
RGBA8 = 0x1f,
BGRA8 = 0x20,
RGBX8 = 0x23,
YUV420 = 0x44,
};
} // Anonymous namespace
union VicConfig {
u64_le raw{};
BitField<0, 7, VideoPixelFormat> pixel_format;
BitField<7, 2, u64_le> chroma_loc_horiz;
BitField<9, 2, u64_le> chroma_loc_vert;
BitField<11, 4, u64_le> block_linear_kind;
BitField<15, 4, u64_le> block_linear_height_log2;
BitField<32, 14, u64_le> surface_width_minus1;
BitField<46, 14, u64_le> surface_height_minus1;
};
Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
: gpu(gpu_),
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
Vic::~Vic() = default;
void Vic::ProcessMethod(Method method, u32 argument) {
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
const u64 arg = static_cast<u64>(argument) << 8;
switch (method) {
case Method::Execute:
Execute();
break;
case Method::SetConfigStructOffset:
config_struct_address = arg;
break;
case Method::SetOutputSurfaceLumaOffset:
output_surface_luma_address = arg;
break;
case Method::SetOutputSurfaceChromaOffset:
output_surface_chroma_address = arg;
break;
default:
break;
}
}
void Vic::Execute() {
if (output_surface_luma_address == 0) {
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
return;
}
const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
const auto* frame = frame_ptr.get();
if (!frame) {
return;
}
const u64 surface_width = config.surface_width_minus1 + 1;
const u64 surface_height = config.surface_height_minus1 + 1;
if (static_cast<u64>(frame->width) != surface_width ||
static_cast<u64>(frame->height) != surface_height) {
// TODO: Properly support multiple video streams with differing frame dimensions
LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
frame->width, frame->height, surface_width, surface_height);
}
switch (config.pixel_format) {
case VideoPixelFormat::RGBA8:
case VideoPixelFormat::BGRA8:
case VideoPixelFormat::RGBX8:
WriteRGBFrame(frame, config);
break;
case VideoPixelFormat::YUV420:
WriteYUVFrame(frame, config);
break;
default:
UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
break;
}
}
void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
switch (pixel_format) {
case VideoPixelFormat::RGBA8:
return AV_PIX_FMT_RGBA;
case VideoPixelFormat::BGRA8:
return AV_PIX_FMT_BGRA;
case VideoPixelFormat::RGBX8:
return AV_PIX_FMT_RGB0;
default:
return AV_PIX_FMT_RGBA;
}
}();
sws_freeContext(scaler_ctx);
// Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
scaler_ctx = sws_getContext(frame->width, frame->height,
static_cast<AVPixelFormat>(frame->format), frame->width,
frame->height, target_format, 0, nullptr, nullptr, nullptr);
scaler_width = frame->width;
scaler_height = frame->height;
converted_frame_buffer.reset();
}
if (!converted_frame_buffer) {
const size_t frame_size = frame->width * frame->height * 4;
converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
}
const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
converted_stride.data());
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
const u32 width = std::min(surface_width, static_cast<u32>(frame->width));
const u32 height = std::min(surface_height, static_cast<u32>(frame->height));
const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
if (blk_kind != 0) {
// swizzle pitch linear to block linear
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
luma_buffer.resize(size);
Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(),
converted_frame_buf_addr, block_height, 0, 0);
gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
} else {
// send pitch linear frame
const size_t linear_size = width * height * 4;
gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
linear_size);
}
}
void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
const std::size_t surface_width = config.surface_width_minus1 + 1;
const std::size_t surface_height = config.surface_height_minus1 + 1;
const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
const auto stride = static_cast<size_t>(frame->linesize[0]);
luma_buffer.resize(aligned_width * surface_height);
chroma_buffer.resize(aligned_width * surface_height / 2);
// Populate luma buffer
const u8* luma_src = frame->data[0];
for (std::size_t y = 0; y < frame_height; ++y) {
const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) {
luma_buffer[dst + x] = luma_src[src + x];
}
}
gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
luma_buffer.size());
// Chroma
const std::size_t half_height = frame_height / 2;
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
switch (frame->format) {
case AV_PIX_FMT_YUV420P: {
// Frame from FFmpeg software
// Populate chroma buffer from both channels with interleaving.
const std::size_t half_width = frame_width / 2;
const u8* chroma_b_src = frame->data[1];
const u8* chroma_r_src = frame->data[2];
for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * half_stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < half_width; ++x) {
chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
}
}
break;
}
case AV_PIX_FMT_NV12: {
// Frame from VA-API hardware
// This is already interleaved so just copy
const u8* chroma_src = frame->data[1];
for (std::size_t y = 0; y < half_height; ++y) {
const std::size_t src = y * stride;
const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < frame_width; ++x) {
chroma_buffer[dst + x] = chroma_src[src + x];
}
}
break;
}
default:
ASSERT(false);
break;
}
gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
chroma_buffer.size());
}
} // namespace Tegra

View File

@@ -1,61 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
struct SwsContext;
namespace Tegra {
class GPU;
class Nvdec;
union VicConfig;
class Vic {
public:
enum class Method : u32 {
Execute = 0xc0,
SetControlParams = 0x1c1,
SetConfigStructOffset = 0x1c2,
SetOutputSurfaceLumaOffset = 0x1c8,
SetOutputSurfaceChromaOffset = 0x1c9,
SetOutputSurfaceChromaUnusedOffset = 0x1ca
};
explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor);
~Vic();
/// Write to the device state.
void ProcessMethod(Method method, u32 argument);
private:
void Execute();
void WriteRGBFrame(const AVFrame* frame, const VicConfig& config);
void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
GPU& gpu;
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
/// Avoid reallocation of the following buffers every frame, as their
/// size does not change during a stream
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
AVMallocPtr converted_frame_buffer;
std::vector<u8> luma_buffer;
std::vector<u8> chroma_buffer;
GPUVAddr config_struct_address{};
GPUVAddr output_surface_luma_address{};
GPUVAddr output_surface_chroma_address{};
SwsContext* scaler_ctx{};
s32 scaler_width{};
s32 scaler_height{};
};
} // namespace Tegra

View File

@@ -1,53 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "video_core/vulkan_common/vulkan_wrapper.h"
#include <filesystem>
#include <fstream>
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_library.h"
#include "yuzu/check_vulkan.h"
#include "yuzu/uisettings.h"
constexpr char TEMP_FILE_NAME[] = "vulkan_check";
bool CheckVulkan() {
if (UISettings::values.has_broken_vulkan) {
return true;
}
LOG_DEBUG(Frontend, "Checking presence of Vulkan");
const auto fs_config_loc = Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir);
const auto temp_file_loc = fs_config_loc / TEMP_FILE_NAME;
if (std::filesystem::exists(temp_file_loc)) {
LOG_WARNING(Frontend, "Detected recovery from previous failed Vulkan initialization");
UISettings::values.has_broken_vulkan = true;
std::filesystem::remove(temp_file_loc);
return false;
}
std::ofstream temp_file_handle(temp_file_loc);
temp_file_handle.close();
try {
Vulkan::vk::InstanceDispatch dld;
const Common::DynamicLibrary library = Vulkan::OpenLibrary();
const Vulkan::vk::Instance instance =
Vulkan::CreateInstance(library, dld, VK_API_VERSION_1_0);
} catch (const Vulkan::vk::Exception& exception) {
LOG_ERROR(Frontend, "Failed to initialize Vulkan: {}", exception.what());
// Don't set has_broken_vulkan to true here: we care when loading Vulkan crashes the
// application, not when we can handle it.
}
std::filesystem::remove(temp_file_loc);
return true;
}

View File

@@ -1,6 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
bool CheckVulkan();